aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bpkg/auth.cxx17
-rw-r--r--bpkg/auth.hxx8
-rw-r--r--bpkg/bpkg.cli17
-rw-r--r--bpkg/bpkg.cxx2
-rw-r--r--bpkg/buildfile2
-rw-r--r--bpkg/cfg-add.cli81
-rw-r--r--bpkg/cfg-add.cxx324
-rw-r--r--bpkg/cfg-add.hxx40
-rw-r--r--bpkg/cfg-create.cli61
-rw-r--r--bpkg/cfg-create.cxx162
-rw-r--r--bpkg/cfg-create.hxx21
-rw-r--r--bpkg/database.cxx713
-rw-r--r--bpkg/database.hxx414
-rw-r--r--bpkg/fetch-git.cxx1
-rw-r--r--bpkg/forward.hxx6
-rwxr-xr-xbpkg/odb.sh6
-rw-r--r--bpkg/package.cxx189
-rw-r--r--bpkg/package.hxx300
-rw-r--r--bpkg/package.xml27
-rw-r--r--bpkg/pkg-build.cli39
-rw-r--r--bpkg/pkg-build.cxx1841
-rw-r--r--bpkg/pkg-checkout.cxx49
-rw-r--r--bpkg/pkg-checkout.hxx4
-rw-r--r--bpkg/pkg-command.cxx49
-rw-r--r--bpkg/pkg-command.hxx25
-rw-r--r--bpkg/pkg-configure.cxx103
-rw-r--r--bpkg/pkg-configure.hxx12
-rw-r--r--bpkg/pkg-disfigure.cxx44
-rw-r--r--bpkg/pkg-disfigure.hxx4
-rw-r--r--bpkg/pkg-drop.cxx140
-rw-r--r--bpkg/pkg-fetch.cxx51
-rw-r--r--bpkg/pkg-fetch.hxx4
-rw-r--r--bpkg/pkg-purge.cxx16
-rw-r--r--bpkg/pkg-purge.hxx4
-rw-r--r--bpkg/pkg-status.cli20
-rw-r--r--bpkg/pkg-status.cxx102
-rw-r--r--bpkg/pkg-unpack.cxx57
-rw-r--r--bpkg/pkg-unpack.hxx6
-rw-r--r--bpkg/pkg-update.hxx5
-rw-r--r--bpkg/pointer-traits.hxx58
-rw-r--r--bpkg/rep-add.cxx8
-rw-r--r--bpkg/rep-add.hxx5
-rw-r--r--bpkg/rep-fetch.cxx62
-rw-r--r--bpkg/rep-fetch.hxx2
-rw-r--r--bpkg/rep-list.cxx2
-rw-r--r--bpkg/rep-remove.cxx36
-rw-r--r--bpkg/rep-remove.hxx17
-rw-r--r--bpkg/system-repository.cxx4
-rw-r--r--bpkg/system-repository.hxx4
-rw-r--r--bpkg/types-parsers.cxx25
-rw-r--r--bpkg/types-parsers.hxx10
-rw-r--r--bpkg/types.hxx65
-rw-r--r--bpkg/utility.cxx14
-rw-r--r--bpkg/utility.hxx9
-rw-r--r--doc/buildfile1
-rwxr-xr-xdoc/cli.sh2
-rw-r--r--manifest1
-rw-r--r--repositories.manifest4
-rw-r--r--tests/cfg-add.testscript190
-rw-r--r--tests/cfg-create.testscript126
-rw-r--r--tests/common.testscript1
-rw-r--r--tests/common/associated/t7a/foo-1.0.0.tar.gzbin0 -> 361 bytes
-rw-r--r--tests/common/associated/t7a/libbar-1.0.0.tar.gzbin0 -> 371 bytes
-rw-r--r--tests/common/associated/t7a/libbaz-1.0.0.tar.gzbin0 -> 354 bytes
-rw-r--r--tests/common/associated/t7a/libbox-1.0.0.tar.gzbin0 -> 373 bytes
-rw-r--r--tests/common/associated/t7a/libfix-1.0.0.tar.gzbin0 -> 373 bytes
-rw-r--r--tests/common/associated/t7a/repositories.manifest1
-rw-r--r--tests/common/associated/t7b/foo-1.1.0.tar.gzbin0 -> 361 bytes
-rw-r--r--tests/common/associated/t7b/libbar-1.1.0.tar.gzbin0 -> 374 bytes
-rw-r--r--tests/common/associated/t7b/libbaz-1.1.0.tar.gzbin0 -> 355 bytes
-rw-r--r--tests/common/associated/t7b/libbox-1.1.0.tar.gzbin0 -> 371 bytes
-rw-r--r--tests/common/associated/t7b/repositories.manifest4
-rw-r--r--tests/pkg-build.testscript769
l---------tests/pkg-build/t7a1
l---------tests/pkg-build/t7b1
-rw-r--r--tests/pkg-drop.testscript256
l---------tests/pkg-drop/t7a1
l---------tests/pkg-drop/t7b1
78 files changed, 5491 insertions, 1155 deletions
diff --git a/bpkg/auth.cxx b/bpkg/auth.cxx
index 0693abc..79e87ea 100644
--- a/bpkg/auth.cxx
+++ b/bpkg/auth.cxx
@@ -561,7 +561,6 @@ namespace bpkg
//
static shared_ptr<certificate>
auth_cert (const common_options& co,
- const dir_path& conf,
database& db,
const optional<string>& pem,
const repository_location& rl,
@@ -603,7 +602,7 @@ namespace bpkg
//
if (pem)
{
- path f (conf / certs_dir / path (cert->id + ".pem"));
+ path f (db.config_orig / certs_dir / path (cert->id + ".pem"));
try
{
@@ -624,6 +623,7 @@ namespace bpkg
shared_ptr<const certificate>
authenticate_certificate (const common_options& co,
const dir_path* conf,
+ database* db,
const optional<string>& pem,
const repository_location& rl,
const optional<string>& dependent_trust)
@@ -642,6 +642,8 @@ namespace bpkg
if (conf == nullptr)
{
+ assert (db == nullptr);
+
// If we have no configuration, go straight to authenticating a new
// certificate.
//
@@ -650,20 +652,21 @@ namespace bpkg
? auth_real (co, fp, *pem, rl, dependent_trust).cert
: auth_dummy (co, fp.abbreviated, rl);
}
- else if (transaction::has_current ())
+ else if (db != nullptr)
{
+ assert (transaction::has_current ());
+
r = auth_cert (co,
- *conf,
- transaction::current ().database (),
+ *db,
pem,
rl,
dependent_trust);
}
else
{
- database db (open (*conf, trace));
+ database db (*conf, trace, false /* pre_attach */);
transaction t (db);
- r = auth_cert (co, *conf, db, pem, rl, dependent_trust);
+ r = auth_cert (co, db, pem, rl, dependent_trust);
t.commit ();
}
diff --git a/bpkg/auth.hxx b/bpkg/auth.hxx
index b5ae1ff..c95d1c8 100644
--- a/bpkg/auth.hxx
+++ b/bpkg/auth.hxx
@@ -26,9 +26,10 @@ namespace bpkg
// Note that if certificate is authenticated for such a use, then it is not
// persisted into the database.
//
- // If the configuration is used, then check if we are already in transaction.
- // If so, then assume the configuration database is already opened and use
- // that. Otherwise, open the database and start a new transaction.
+ // If the configuration is used and also the configuration database is
+ // specified, then assume the database is already opened with the
+ // transaction started and use that. Otherwise, open the database and start
+ // a new transaction.
//
// Note that one drawback of doing this as part of an existing transaction
// is that if things go south and the transaction gets aborted, then all the
@@ -38,6 +39,7 @@ namespace bpkg
shared_ptr<const certificate>
authenticate_certificate (const common_options&,
const dir_path* configuration,
+ database*,
const optional<string>& cert_pem,
const repository_location&,
const optional<string>& dependent_trust);
diff --git a/bpkg/bpkg.cli b/bpkg/bpkg.cli
index 48f655e..23be078 100644
--- a/bpkg/bpkg.cli
+++ b/bpkg/bpkg.cli
@@ -50,6 +50,11 @@ namespace bpkg
configuration is an amalgamation that contains packages as subprojects
(see \l{bpkg-cfg-create(1)} for details).
+ Build configurations can be associated with each other so that while a
+ package is built in one configuration, some of its dependencies can be
+ built in associated configurations (see \l{bpkg-cfg-create(1)} for
+ details).
+
A \i{bpkg package} is an archive or directory (potentially in a version
control system) that contains a \cb{build2} project plus the package
\cb{manifest} file. \cb{bpkg} can either use package archives/directories
@@ -168,6 +173,11 @@ namespace bpkg
"\l{bpkg-cfg-create(1)} \- create configuration"
}
+ bool cfg-add
+ {
+ "\l{bpkg-cfg-add(1)} \- associate configuration"
+ }
+
bool rep-info
{
"\l{bpkg-rep-info(1)} \- print repository information"
@@ -314,9 +324,10 @@ namespace bpkg
"\h|ENVIRONMENT|
- Commands executed by \cb{bpkg} while the build configuration database is
- open will have the \cb{BPKG_OPEN_CONFIG} environment variable set to the
- absolute and normalized configuration directory path. This can be used by
+ Commands executed by \cb{bpkg} while the current and associated build
+ configuration databases are open will have the \cb{BPKG_OPEN_CONFIGS}
+ environment variable set to the space-separated, \cb{\"}-quoted list of
+ absolute and normalized configuration directory paths. This can be used by
build system hooks and/or programs that they execute.
"
diff --git a/bpkg/bpkg.cxx b/bpkg/bpkg.cxx
index f1ee302..7697168 100644
--- a/bpkg/bpkg.cxx
+++ b/bpkg/bpkg.cxx
@@ -21,6 +21,7 @@
//
#include <bpkg/help.hxx>
+#include <bpkg/cfg-add.hxx>
#include <bpkg/cfg-create.hxx>
#include <bpkg/pkg-build.hxx>
@@ -498,6 +499,7 @@ try
#define CFG_COMMAND(CMD, TMP) COMMAND_IMPL(cfg_, "cfg-", CMD, false, TMP)
CFG_COMMAND (create, false); // Temp dir initialized manually.
+ CFG_COMMAND (add, true);
// pkg-* commands
//
diff --git a/bpkg/buildfile b/bpkg/buildfile
index cb09ca9..2016d38 100644
--- a/bpkg/buildfile
+++ b/bpkg/buildfile
@@ -15,6 +15,7 @@ import libs += libodb-sqlite%lib{odb-sqlite}
options_topics = \
bpkg-options \
+cfg-add-options \
cfg-create-options \
common-options \
configuration-options \
@@ -149,6 +150,7 @@ if $cli.configured
# cfg-* command.
#
+ cli.cxx{cfg-add-options}: cli{cfg-add}
cli.cxx{cfg-create-options}: cli{cfg-create}
# rep-* command.
diff --git a/bpkg/cfg-add.cli b/bpkg/cfg-add.cli
new file mode 100644
index 0000000..3d0f3d2
--- /dev/null
+++ b/bpkg/cfg-add.cli
@@ -0,0 +1,81 @@
+// file : bpkg/cfg-add.cli
+// license : MIT; see accompanying LICENSE file
+
+include <bpkg/configuration.cli>;
+
+"\section=1"
+"\name=bpkg-cfg-add"
+"\summary=associate configuration"
+
+namespace bpkg
+{
+ {
+ "<options> <dir>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{bpkg cfg-add} [<options>] <dir>}
+
+ \h|DESCRIPTION|
+
+ The \cb{cfg-add} command associates the specified \cb{bpkg} configuration
+ with the current configuration. See \l{bpkg-cfg-create(1)} for background
+ on associated configurations.
+
+ The associated configurations are normally referred to using names when
+ specified on the \cb{bpkg} command line. Unless overridden with the
+ \cb{--name} option, the original configuration name is used to name the
+ association. If the association is unnamed, then it can be referred to
+ using the numeric id that is automatically assigned when establishing the
+ association or using the configuration UUID.
+
+ If the specified configuration path is relative, then it is rebased
+ relative to the current configuration directory. This way, when the
+ associated configurations are moved around together, the stored relative
+ paths remain valid. If the specified directory path is absolute, then it
+ is stored as absolute unless the \cb{--relative} option is specified in
+ which case it is also rebased relative to the current configuration
+ directory.
+ "
+ }
+
+ class cfg_add_options: configuration_options
+ {
+ "\h|CFG-ADD OPTIONS|"
+
+ string --name
+ {
+ "<name>",
+ "Alternative association name. If this option is not specified, then the
+ configuration name is used as the association name (see
+ \l{bpkg-cfg-create(1)} for details)."
+ }
+
+ bool --relative
+ {
+ "Rebase the absolute associated configuration path relative to the
+ current configuration directory."
+ }
+ };
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ See \l{bpkg-default-options-files(1)} for an overview of the default
+ options files. For the \cb{cfg-add} command the search start directory is
+ the configuration directory. The following options files are searched for
+ in each directory and, if found, loaded in the order listed:
+
+ \
+ bpkg.options
+ bpkg-cfg-add.options
+ \
+
+ The following \cb{cfg-add} command options cannot be specified in the
+ default options files:
+
+ \
+ --directory|-d
+ \
+ "
+}
diff --git a/bpkg/cfg-add.cxx b/bpkg/cfg-add.cxx
new file mode 100644
index 0000000..6528423
--- /dev/null
+++ b/bpkg/cfg-add.cxx
@@ -0,0 +1,324 @@
+// file : bpkg/cfg-add.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/cfg-add.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ shared_ptr<configuration>
+ cfg_add (database& db,
+ const dir_path& ad,
+ bool rel,
+ optional<string> name,
+ bool sys_rep)
+ {
+ tracer trace ("cfg_add");
+
+ bool name_specified (name);
+ const dir_path& cd (db.config); // Note: absolute and normalized.
+
+ // Load the self-association object from the database of the configuration
+ // being associated to obtain its name, type, and uuid.
+ //
+ database& adb (db.attach (ad, sys_rep));
+
+ string type;
+ uuid uid;
+ {
+ shared_ptr<configuration> cf (adb.load<configuration> (0));
+
+ type = move (cf->type);
+ uid = cf->uuid;
+
+ if (!name)
+ name = move (cf->name);
+ }
+
+ if (db.uuid == uid)
+ fail << "associating configuration " << ad << " with itself" <<
+ info << "uuid: " << uid;
+
+ if (name && name == db.name)
+ fail << "associating configuration " << ad << " using current "
+ << "configuration name '" << *name << "'" <<
+ info << "consider specifying alternative name with --name";
+
+ // Verify that the name and path of the configuration being associated do
+ // not clash with already associated configurations. Fail if
+ // configurations with this uuid is already associated unless the
+ // association is implicit, in which case make it explicit and update its
+ // name and path.
+ //
+ // Note that when we make an implicit association explicit, we start
+ // treating it as an implicit and explicit simultaneously. So, for
+ // example, for cfg1 the association cfg2 is explicit and the association
+ // cfg3 is both explicit and implicit:
+ //
+ // cfg2 <- cfg1 <-> cfg3
+ //
+ // Similar, if we associate cfg1 with cfg2, the explicit association cfg2
+ // in cfg1 also becomes both explicit and implicit, not being amended
+ // directly.
+ //
+ shared_ptr<configuration> acf;
+
+ using query = query<configuration>;
+
+ for (shared_ptr<configuration> ac:
+ pointer_result (db.query<configuration> (query::id != 0)))
+ {
+ if (uid == ac->uuid)
+ {
+ if (ac->expl)
+ fail << "configuration with uuid " << uid << " is already "
+ << "associated as " << ac->path;
+
+ // Verify the existing implicit association integrity and cache it to
+ // update later, when the name/path clash check is complete.
+ //
+ db.verify_association (*ac, adb);
+
+ acf = move (ac);
+ continue;
+ }
+
+ if (ad == ac->effective_path (cd))
+ fail << "configuration with path " << ad << " is already associated";
+
+ // If the name clashes, then fail if it was specified by the user and
+ // issue a warning and associate the configuration as unnamed otherwise.
+ //
+ if (name && name == ac->name)
+ {
+ diag_record dr (name_specified ? error : warn);
+ dr << "configuration with name " << *name << " is already "
+ << "associated as " << ac->path;
+
+ if (name_specified)
+ {
+ dr << info << "consider specifying alternative name with --name"
+ << endf;
+ }
+ else
+ {
+ dr << ", associating as unnamed";
+ name = nullopt;
+ }
+ }
+ }
+
+ // If requested, rebase the first path relative to the second or return it
+ // as is otherwise. Fail if the rebase is not possible (e.g., paths are on
+ // different drives on Windows).
+ //
+ auto rebase = [rel] (const dir_path& x, const dir_path& y) -> dir_path
+ {
+ try
+ {
+ return rel ? x.relative (y) : x;
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to rebase " << x << " relative to " << y <<
+ info << "specify absolute configuration directory path to save it "
+ << "as absolute" << endf;
+ }
+ };
+
+ // If the implicit association already exists, then make it explicit and
+ // update its name and path. Otherwise, create a new association.
+ //
+ // Note that in the former case the current configuration must already be
+ // explicitly associated with the configuration being associated. We
+ // verify that and the association integrity.
+ //
+ if (acf != nullptr)
+ {
+ // Verify the reverse association integrity.
+ //
+ shared_ptr<configuration> cf (
+ adb.query_one<configuration> (query::uuid == db.uuid.string ()));
+
+ // Note: both sides of the association cannot be implicit.
+ //
+ if (cf == nullptr || !cf->expl)
+ fail << "configuration " << ad << " is already implicitly "
+ << "associated but current configuration " << cd << " is not "
+ << "explicitly associated with it";
+
+ adb.verify_association (*cf, db);
+
+ // Finally, turn the implicit association into explicit.
+ //
+ // Note: reuse id.
+ //
+ acf->expl = true;
+ acf->name = move (name);
+ acf->path = rebase (ad, cd); // Note: can't clash (see above).
+
+ db.update (acf);
+ }
+ else
+ {
+ // If the directory path of the configuration being associated is
+ // relative or the --relative option is specified, then rebase it
+ // relative to the current configuration directory path.
+ //
+ acf = make_shared<configuration> (uid,
+ move (name),
+ move (type),
+ rebase (ad, cd),
+ true /* explicit */);
+
+ db.persist (acf);
+
+ // Now implicitly associate ourselves with the just associated
+ // configuration. Note that we associate ourselves as unnamed.
+ //
+ shared_ptr<configuration> ccf (db.load<configuration> (0));
+
+ // What if we find the current configuration to already be implicitly
+ // associated? The potential scenario could be, that the current
+ // configuration was recreated from scratch, previously being implicitly
+ // associated with the configuration we currently associate. It feels
+ // like in this case we would rather overwrite the existing dead
+ // implicit association than just fail. Let's also warn for good
+ // measure.
+ //
+ shared_ptr<configuration> cf;
+
+ for (shared_ptr<configuration> ac:
+ pointer_result (adb.query<configuration> (query::id != 0)))
+ {
+ if (cd == ac->make_effective_path (ad))
+ {
+ if (ac->expl)
+ fail << "current configuration " << cd << " is already "
+ << "associated with " << ad;
+
+ warn << "current configuration " << cd << " is already "
+ << "implicitly associated with " << ad;
+
+ cf = move (ac);
+ continue;
+ }
+
+ if (ccf->uuid == ac->uuid)
+ fail << "current configuration " << ccf->uuid
+ << " is already associated with " << ad;
+ }
+
+ // It feels natural to persist explicitly and implicitly associated
+ // configuration paths both either relative or absolute.
+ //
+ if (cf != nullptr)
+ {
+ // The dead implicit association case.
+ //
+ // Note: reuse id.
+ //
+ cf->uuid = ccf->uuid;
+ cf->type = move (ccf->type);
+ cf->path = rebase (cd, ad);
+
+ adb.update (cf);
+ }
+ else
+ {
+ ccf = make_shared<configuration> (ccf->uuid,
+ nullopt /* name */,
+ move (ccf->type),
+ rebase (cd, ad),
+ false /* explicit */);
+
+ adb.persist (ccf);
+ }
+ }
+
+ // If explicit associations of the current database are pre-attached, then
+ // also pre-attach explicit associations of the newly associated database.
+ //
+ associated_configs& acs (db.explicit_associations ());
+
+ if (!acs.empty ())
+ {
+ acs.push_back (associated_config {*acf->id, acf->name, adb});
+ adb.attach_explicit (sys_rep);
+ }
+
+ // If the implicit associations of the added database are already
+ // attached, then also attach the current database, unless it is already
+ // there (see above for the dead association case).
+ //
+ associated_databases& ads (adb.implicit_associations (false /* attach */));
+
+ if (!ads.empty () && find (ads.begin (), ads.end (), db) == ads.end ())
+ ads.push_back (db);
+
+ return acf;
+ }
+
+ int
+ cfg_add (const cfg_add_options& o, cli::scanner& args)
+ try
+ {
+ tracer trace ("cfg_add");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ if (o.name_specified ())
+ validate_configuration_name (o.name (), "--name option value");
+
+ if (!args.more ())
+ fail << "configuration directory argument expected" <<
+ info << "run 'bpkg help cfg-add' for more information";
+
+ dir_path ad (args.next ());
+ if (ad.empty ())
+ throw invalid_path (ad.string ());
+
+ l4 ([&]{trace << "add configuration: " << ad;});
+
+ bool rel (ad.relative () || o.relative ());
+ normalize (ad, "specified associated configuration");
+
+ database db (c, trace, false /* pre_attach */, false /* sys_rep */, &ad);
+ transaction t (db);
+
+ shared_ptr<configuration> ac (
+ cfg_add (db,
+ ad,
+ rel,
+ o.name_specified () ? o.name () : optional<string> ()));
+
+ t.commit ();
+
+ if (verb && !o.no_result ())
+ {
+ diag_record dr (text);
+
+ dr << "associated configuration " << ad <<
+ info << "uuid: " << ac->uuid <<
+ info << "type: " << ac->type;
+
+ if (ac->name)
+ dr << info << "name: " << *ac->name;
+
+ dr << info << "id: " << *ac->id;
+ }
+
+ return 0;
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path: '" << e.path << "'" << endf;
+ }
+}
diff --git a/bpkg/cfg-add.hxx b/bpkg/cfg-add.hxx
new file mode 100644
index 0000000..2059963
--- /dev/null
+++ b/bpkg/cfg-add.hxx
@@ -0,0 +1,40 @@
+// file : bpkg/cfg-add.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_CFG_ADD_HXX
+#define BPKG_CFG_ADD_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/forward.hxx> // configuration
+#include <bpkg/utility.hxx>
+
+#include <bpkg/cfg-add-options.hxx>
+
+namespace bpkg
+{
+ int
+ cfg_add (const cfg_add_options&, cli::scanner& args);
+
+ // Associate the configuration specified as the directory path with the
+ // current configuration, attach the associated configuration database, and
+ // return the association. Note that it also establishes an implicit
+ // association of the current configuration with the associated one.
+ //
+ // The specified configuration path must be absolute and normalized. If the
+ // relative argument is true, then rebase this path relative to the current
+ // configuration directory path and fail if that's not possible (different
+ // drive on Windows, etc).
+ //
+ // If the current configuration database has its explicit associations
+ // pre-attached, then also pre-attach explicit associations of the newly
+ // associated database.
+ //
+ shared_ptr<configuration>
+ cfg_add (database&,
+ const dir_path&,
+ bool relative,
+ optional<string> name,
+ bool sys_rep = false);
+}
+
+#endif // BPKG_CFG_ADD_HXX
diff --git a/bpkg/cfg-create.cli b/bpkg/cfg-create.cli
index 0b32604..e367828 100644
--- a/bpkg/cfg-create.cli
+++ b/bpkg/cfg-create.cli
@@ -51,6 +51,37 @@ namespace bpkg
\
bpkg create cxx. \"?cli\"
\
+
+ Configurations can be associated with each other to allow a package to be
+ built in one configuration while its dependencies in one or more
+ associated configurations. This can be used to create a \"base\"
+ configuration with common dependencies that are shared between multiple
+ configurations. This mechanism is also used to provide a host
+ configuration that is used to build build-time dependencies.
+
+ Each configuration is assigned an automatically-generated UUID unless one
+ is specified with the \cb{--config-uuid} option. This UUID is used to
+ check the integrity of configuration associations. For convenience of
+ referring to associated configurations, a configuration can also be
+ assigned a name with the \cb{--name} option.
+
+ A configuration also has a type specified with the \cb{--type} option.
+ Two predefined types are \cb{host} and \cb{target}. If the type is not
+ specified explicitly, then \cb{target} is assumed. When satisfying a
+ dependency of one package on another, an associated configuration will
+ only be considered if (1) it has the same type as the other configuration
+ for run-time dependencies and (2) it has the \cb{host} type for
+ build-time dependencies. Note that a host configuration is a target
+ configuration for the host machine. So to create a self-hosted
+ configuration, use type \cb{host}.
+
+ To associate a configuration we use the \l{bpkg-cfg-add(1)} command. As a
+ shortcut, a host configuration can also be associated during the
+ configuration creation with the \cb{--host-config} option. If a
+ build-time dependency is encountered in a configuration that has no
+ associated host configuration (nor is itself a host configuration), then
+ a private host configuration named \cb{host} is created automatically
+ inside the configuration's \c{.bpkg/} subdirectory.
"
}
@@ -77,6 +108,36 @@ namespace bpkg
configuration. For safety, this option requires that you specify the
configuration directory explicitly with \cb{--directory|-d}."
}
+
+ dir_path --host-config
+ {
+ "<dir>",
+ "Associate the specified host configuration with the configuration
+ being created as if by running the \l{bpkg-cfg-add(1)} command."
+ }
+
+ string --name
+ {
+ "<name>",
+ "The name of the configuration being created. If this configuration
+ is associated with another configuration using \l{bpkg-cfg-add(1)},
+ this name will be used as the association name unless overridden.
+ By default the configuration is created unnamed."
+ }
+
+ string --type = "target"
+ {
+ "<type>",
+ "The type of the configuration being created. By default, configuration
+ of type \cb{target} is created."
+ }
+
+ uuid --config-uuid
+ {
+ "<uuid>",
+ "Use the specified UUID as the configuration id instead of generating
+ one automatically."
+ }
};
"
diff --git a/bpkg/cfg-create.cxx b/bpkg/cfg-create.cxx
index f3ca80d..98d6aea 100644
--- a/bpkg/cfg-create.cxx
+++ b/bpkg/cfg-create.cxx
@@ -8,29 +8,62 @@
#include <bpkg/database.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/cfg-add.hxx>
+
using namespace std;
namespace bpkg
{
- int
- cfg_create (const cfg_create_options& o, cli::scanner& args)
+ shared_ptr<configuration>
+ cfg_create (const common_options& o,
+ const dir_path& c,
+ optional<string> name,
+ string type,
+ const strings& mods,
+ const strings& vars,
+ bool existing,
+ bool wipe,
+ optional<uuid> uid,
+ const optional<dir_path>& host_config)
{
tracer trace ("cfg_create");
- if (o.existing () && o.wipe ())
- fail << "both --existing|-e and --wipe specified";
+ // Stash and restore the current transaction, if any.
+ //
+ namespace sqlite = odb::sqlite;
- if (o.wipe () && !o.directory_specified ())
- fail << "--wipe requires explicit --directory|-d";
+ sqlite::transaction* ct (nullptr);
+ if (sqlite::transaction::has_current ())
+ {
+ ct = &sqlite::transaction::current ();
+ sqlite::transaction::reset_current ();
+ }
- dir_path c (o.directory ());
- l4 ([&]{trace << "creating configuration in " << c;});
+ auto tg (make_guard ([ct] ()
+ {
+ if (ct != nullptr)
+ sqlite::transaction::current (*ct);
+ }));
+
+ // First, let's verify the host configuration existence and type and
+ // normalize its path.
+ //
+ dir_path hc;
+ if (host_config)
+ {
+ hc = normalize (*host_config, "host configuration");
+
+ database db (hc, trace, false /* pre_attach */);
+ if (db.type != "host")
+ fail << "host configuration " << hc << " is of '" << db.type
+ << "' type";
+ }
// Verify the existing directory is compatible with our mode.
//
if (exists (c))
{
- if (o.existing ())
+ if (existing)
{
// Bail if the .bpkg/ directory already exists and is not empty.
//
@@ -49,7 +82,7 @@ namespace bpkg
//
if (!empty (c))
{
- if (!o.wipe ())
+ if (!wipe)
fail << "directory " << c << " is not empty" <<
info << "use --wipe to clean it up but be careful";
@@ -65,29 +98,9 @@ namespace bpkg
mk_p (c);
}
- // Sort arguments into modules and configuration variables.
- //
- strings mods;
- strings vars;
- while (args.more ())
- {
- string a (args.next ());
-
- if (a.find ('=') != string::npos)
- {
- vars.push_back (move (a));
- }
- else if (!a.empty ())
- {
- mods.push_back (move (a));
- }
- else
- fail << "empty string as argument";
- }
-
// Create and configure.
//
- if (o.existing ())
+ if (existing)
{
if (!mods.empty ())
fail << "module '" << mods[0] << "' specified with --existing|-e";
@@ -149,7 +162,12 @@ namespace bpkg
// Create the database.
//
- database db (open (c, trace, true));
+ shared_ptr<configuration> r (make_shared<configuration> (move (name),
+ move (type),
+ uid));
+
+ database db (c, r, trace, host_config ? &hc : nullptr);
+ transaction t (db);
// Add the special, root repository object with empty location and
// containing a single repository fragment having an empty location as
@@ -161,31 +179,97 @@ namespace bpkg
// locations and as a search starting point for held packages (see
// pkg-build for details).
//
- transaction t (db);
-
shared_ptr<repository_fragment> fr (
make_shared<repository_fragment> (repository_location ()));
db.persist (fr);
- shared_ptr<repository> r (
+ shared_ptr<repository> rep (
make_shared<repository> (repository_location ()));
- r->fragments.push_back (
+ rep->fragments.push_back (
repository::fragment_type {string () /* friendly_name */, move (fr)});
- db.persist (r);
+ db.persist (rep);
+
+ if (host_config)
+ cfg_add (db, hc, host_config->relative (), nullopt /* name */);
t.commit ();
+ return r;
+ }
+
+ int
+ cfg_create (const cfg_create_options& o, cli::scanner& args)
+ {
+ tracer trace ("cfg_create");
+
+ if (o.name_specified ())
+ validate_configuration_name (o.name (), "--name option value");
+
+ if (o.type ().empty ())
+ fail << "empty --type option value";
+
+ if (o.existing () && o.wipe ())
+ fail << "both --existing|-e and --wipe specified";
+
+ if (o.wipe () && !o.directory_specified ())
+ fail << "--wipe requires explicit --directory|-d";
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "creating configuration in " << c;});
+
+ // Sort arguments into modules and configuration variables.
+ //
+ strings mods;
+ strings vars;
+ while (args.more ())
+ {
+ string a (args.next ());
+
+ if (a.find ('=') != string::npos)
+ vars.push_back (move (a));
+ else if (!a.empty ())
+ mods.push_back (move (a));
+ else
+ fail << "empty string as argument";
+ }
+
+ // Auto-generate the configuration UUID, unless it is specified
+ // explicitly.
+ //
+ shared_ptr<configuration> cf (
+ cfg_create (
+ o,
+ c,
+ o.name_specified () ? o.name () : optional<string> (),
+ o.type (),
+ mods,
+ vars,
+ o.existing (),
+ o.wipe (),
+ o.config_uuid_specified () ? o.config_uuid () : optional<uuid> (),
+ (o.host_config_specified ()
+ ? o.host_config ()
+ : optional<dir_path> ())));
+
if (verb && !o.no_result ())
{
normalize (c, "configuration");
+ diag_record dr (text);
+
if (o.existing ())
- text << "initialized existing configuration in " << c;
+ dr << "initialized existing configuration in " << c;
else
- text << "created new configuration in " << c;
+ dr << "created new configuration in " << c;
+
+ dr << info << "uuid: " << cf->uuid
+ << info << "type: " << cf->type;
+
+ if (cf->name)
+ dr << info << "name: " << *cf->name;
}
return 0;
diff --git a/bpkg/cfg-create.hxx b/bpkg/cfg-create.hxx
index 9831160..8b85d93 100644
--- a/bpkg/cfg-create.hxx
+++ b/bpkg/cfg-create.hxx
@@ -5,6 +5,7 @@
#define BPKG_CFG_CREATE_HXX
#include <bpkg/types.hxx>
+#include <bpkg/forward.hxx> // configuration
#include <bpkg/utility.hxx>
#include <bpkg/cfg-create-options.hxx>
@@ -14,6 +15,26 @@ namespace bpkg
int
cfg_create (const cfg_create_options&, cli::scanner& args);
+ // Create a new bpkg configuration, initialize its database (add self-
+ // association, root repository, etc), and return this configuration
+ // information. See bpkg-cfg-create(1) for arguments semantics.
+ //
+ // If there is a current transaction already open, then stash it before the
+ // database initialization and restore it afterwards (used to create private
+ // host configuration on demand).
+ //
+ shared_ptr<configuration>
+ cfg_create (const common_options&,
+ const dir_path&,
+ optional<string> name,
+ string type,
+ const strings& mods,
+ const strings& vars,
+ bool existing,
+ bool wipe,
+ optional<uuid> uid = nullopt,
+ const optional<dir_path>& host_config = nullopt);
+
default_options_files
options_files (const char* cmd,
const cfg_create_options&,
diff --git a/bpkg/database.cxx b/bpkg/database.cxx
index a866274..45f4b29 100644
--- a/bpkg/database.cxx
+++ b/bpkg/database.cxx
@@ -3,45 +3,27 @@
#include <bpkg/database.hxx>
+#include <map>
+
#include <odb/schema-catalog.hxx>
#include <odb/sqlite/exceptions.hxx>
+#include <libbutl/sha256.mxx>
+
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/diagnostics.hxx>
-#include <bpkg/system-repository.hxx>
using namespace std;
namespace bpkg
{
- using namespace odb::sqlite;
- using odb::schema_catalog;
-
- // Use a custom connection factory to automatically set and clear the
- // BPKG_OPEN_CONFIG environment variable. A bit heavy-weight but seems like
- // the best option.
- //
- static const string open_name ("BPKG_OPEN_CONFIG");
-
- class conn_factory: public single_connection_factory // No need for pool.
- {
- public:
- conn_factory (const dir_path& d)
- {
- setenv (open_name, normalize (d, "configuration").string ());
- }
-
- virtual
- ~conn_factory ()
- {
- unsetenv (open_name);
- }
- };
+ namespace sqlite = odb::sqlite;
// Register the data migration functions.
//
- // NOTE: remember to qualify table names if using native statements.
+ // NOTE: remember to qualify table names with \"main\". if using native
+ // statements.
//
template <odb::schema_version v>
using migration_entry = odb::data_migration_entry<v, DB_SCHEMA_VERSION_BASE>;
@@ -59,84 +41,679 @@ namespace bpkg
}
});
- database
- open (const dir_path& d, tracer& tr, bool create)
+ static const migration_entry<9>
+ migrate_v9 ([] (odb::database& db)
{
- tracer trace ("open");
+ // Add the unnamed self-association of the target type.
+ //
+ shared_ptr<configuration> sc (
+ make_shared<configuration> (optional<string> (), "target"));
+
+ db.persist (sc);
+ db.execute ("UPDATE selected_package_prerequisites SET configuration = '" +
+ sc->uuid.string () + "'");
+ });
+ static inline path
+ cfg_path (const dir_path& d, bool create)
+ {
path f (d / bpkg_dir / "bpkg.sqlite3");
if (!create && !exists (f))
fail << d << " does not look like a bpkg configuration directory";
+ return f;
+ }
+
+ // The BPKG_OPEN_CONFIGS environment variable.
+ //
+ // Automatically set it to the configuration directory path and clear in the
+ // main database constructor and destructor, respectively. Also append the
+ // attached database configuration paths in their constructors and clear
+ // them in detach_all(). The paths are absolute, normalized, double-quoted,
+ // and separated with spaces.
+ //
+ static const string open_name ("BPKG_OPEN_CONFIGS");
+
+ struct database::impl
+ {
+ sqlite::connection_ptr conn; // Main connection.
+
+ map<dir_path, database> attached_map;
+
+ impl (sqlite::connection_ptr&& c): conn (move (c)) {}
+ };
+
+ database::
+ database (const dir_path& d,
+ configuration* create,
+ odb::tracer& tr,
+ bool pre_attach,
+ bool sys_rep,
+ const dir_path* pre_assoc)
+ : sqlite::database (
+ cfg_path (d, create != nullptr).string (),
+ SQLITE_OPEN_READWRITE | (create != nullptr ? SQLITE_OPEN_CREATE : 0),
+ true, // Enable FKs.
+ "", // Default VFS.
+ unique_ptr<sqlite::connection_factory> (
+ new sqlite::serial_connection_factory)), // Single connection.
+ config (normalize (d, "configuration")),
+ config_orig (d)
+ {
+ bpkg::tracer trace ("database");
+
+ // Cache the (single) main connection we will be using.
+ //
+ unique_ptr<impl> ig ((impl_ = new impl (connection ())));
+
try
{
- database db (f.string (),
- SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0),
- true, // Enable FKs.
- "", // Default VFS.
- unique_ptr<connection_factory> (new conn_factory (d)));
-
- db.tracer (trace);
-
- // Lock the database for as long as the connection is active. First
- // we set locking_mode to EXCLUSIVE which instructs SQLite not to
- // release any locks until the connection is closed. Then we force
- // SQLite to acquire the write lock by starting exclusive transaction.
- // See the locking_mode pragma documentation for details. This will
- // also fail if the database is inaccessible (e.g., file does not
- // exist, already used by another process, etc).
+ tracer_guard tg (*this, trace);
+
+ // Lock the database for as long as the connection is active. First we
+ // set locking_mode to EXCLUSIVE which instructs SQLite not to release
+ // any locks until the connection is closed. Then we force SQLite to
+ // acquire the write lock by starting exclusive transaction. See the
+ // locking_mode pragma documentation for details. This will also fail if
+ // the database is inaccessible (e.g., file does not exist, already used
+ // by another process, etc).
+ //
+ // Note that here we assume that any database that is ATTACHED within an
+ // exclusive transaction gets the same treatment.
//
- using odb::sqlite::transaction; // Skip the wrapper.
+ using odb::schema_catalog;
+
+ impl_->conn->execute ("PRAGMA locking_mode = EXCLUSIVE");
+
+ add_env (true /* reset */);
+ auto g (make_exception_guard ([] () {unsetenv (open_name);}));
- try
{
- db.connection ()->execute ("PRAGMA locking_mode = EXCLUSIVE");
- transaction t (db.begin_exclusive ());
+ sqlite::transaction t (impl_->conn->begin_exclusive ());
- if (create)
+ if (create != nullptr)
{
- // Create the new schema.
+ // Create the new schema and persist the self-association.
//
- if (db.schema_version () != 0)
- fail << f << ": already has database schema";
+ if (schema_version () != 0)
+ fail << sqlite::database::name () << ": already has database "
+ << "schema";
+
+ schema_catalog::create_schema (*this);
- schema_catalog::create_schema (db);
+ persist (*create); // Also assigns association id.
+
+ // Cache the configuration information.
+ //
+ cache_config (create->uuid, create->name, create->type);
}
else
{
- // Migrate the database if necessary.
+ // Migrate the associated databases cluster.
//
- schema_catalog::migrate (db);
+ migrate ();
+
+ // Cache the configuration information.
+ //
+ shared_ptr<configuration> c (load<configuration> (0));
+ cache_config (c->uuid, move (c->name), move (c->type));
+
+ // Load the system repository, if requested.
+ //
+ if (sys_rep)
+ load_system_repository ();
}
+ // Migrate the pre-associated database and the database cluster it
+ // belongs to.
+ //
+ if (pre_assoc != nullptr)
+ attach (*pre_assoc).migrate ();
+
t.commit ();
}
+
+ // Detach potentially attached during migration the (pre-)associated
+ // databases.
+ //
+ detach_all ();
+
+ if (pre_attach)
+ {
+ sqlite::transaction t (begin_exclusive ());
+ attach_explicit (sys_rep);
+ t.commit ();
+ }
+ }
+ catch (odb::timeout&)
+ {
+ fail << "configuration " << d << " is already used by another process";
+ }
+ catch (const sqlite::database_exception& e)
+ {
+ fail << sqlite::database::name () << ": " << e.message ();
+ }
+
+ tracer (tr);
+
+ // Note: will be leaked if anything further throws.
+ //
+ ig.release ();
+ }
+
+ // NOTE: if we ever load/persist any dynamically allocated objects in this
+ // constructor, make sure such objects do not use the session or the session
+ // is temporarily suspended in the attach() function (see its implementation
+ // for the reasoning note) since the database will be moved.
+ //
+ database::
+ database (impl* i,
+ const dir_path& d,
+ std::string schema,
+ bool sys_rep)
+ : sqlite::database (i->conn,
+ cfg_path (d, false /* create */).string (),
+ move (schema)),
+ config (d),
+ impl_ (i)
+ {
+ bpkg::tracer trace ("database");
+
+ // Derive the configuration original directory path.
+ //
+ database& mdb (main_database ());
+
+ if (mdb.config_orig.relative ())
+ {
+ // Fallback to absolute path if the configuration is on a different
+ // drive on Windows.
+ //
+ if (optional<dir_path> c = config.try_relative (current_directory ()))
+ config_orig = move (*c);
+ else
+ config_orig = config;
+ }
+ else
+ config_orig = config;
+
+ try
+ {
+ tracer_guard tg (*this, trace);
+
+ // Cache the configuration information.
+ //
+ shared_ptr<configuration> c (load<configuration> (0));
+ cache_config (c->uuid, move (c->name), move (c->type));
+
+ // Load the system repository, if requested.
+ //
+ if (sys_rep)
+ load_system_repository ();
+ }
+ catch (const sqlite::database_exception& e)
+ {
+ fail << sqlite::database::name () << ": " << e.message ();
+ }
+
+ add_env ();
+
+ // Set the tracer used by the associated configurations cluster.
+ //
+ sqlite::database::tracer (mdb.tracer ());
+ }
+
+ database::
+ ~database ()
+ {
+ if (impl_ != nullptr && // Not a moved-from database?
+ main ())
+ {
+ delete impl_;
+
+ unsetenv (open_name);
+ }
+ }
+
+ database::
+ database (database&& db)
+ : sqlite::database (move (db)),
+ uuid (db.uuid),
+ type (move (db.type)),
+ config (move (db.config)),
+ config_orig (move (db.config_orig)),
+ system_repository (move (db.system_repository)),
+ impl_ (db.impl_),
+ explicit_associations_ (move (db.explicit_associations_)),
+ implicit_associations_ (move (db.implicit_associations_))
+ {
+ db.impl_ = nullptr; // See ~database().
+ }
+
+ void database::
+ add_env (bool reset) const
+ {
+ using std::string;
+
+ string v;
+
+ if (!reset)
+ {
+ if (optional<string> e = getenv (open_name))
+ v = move (*e);
+ }
+
+ v += (v.empty () ? "\"" : " \"") + config.string () + '"';
+
+ setenv (open_name, v);
+ }
+
+ void database::
+ tracer (tracer_type* t)
+ {
+ main_database ().sqlite::database::tracer (t);
+
+ for (auto& db: impl_->attached_map)
+ db.second.sqlite::database::tracer (t);
+ }
+
+ void database::
+ migrate ()
+ {
+ using odb::schema_catalog;
+
+ odb::schema_version sv (schema_version ());
+ odb::schema_version scv (schema_catalog::current_version (*this));
+
+ if (sv != scv)
+ {
+ if (sv < schema_catalog::base_version (*this))
+ fail << "configuration " << config_orig << " is too old";
+
+ if (sv > scv)
+ fail << "configuration " << config_orig << " is too new";
+
+ // Note that we need to migrate the current database before the
+ // associated ones to properly handle association cycles.
+ //
+ schema_catalog::migrate (*this);
+
+ for (auto& c: query<configuration> (odb::query<configuration>::id != 0))
+ attach (c.make_effective_path (config)).migrate ();
+ }
+ }
+
+ void database::
+ cache_config (const uuid_type& u, optional<std::string> n, std::string t)
+ {
+ uuid = u;
+ name = move (n);
+ type = move (t);
+ }
+
+ void database::
+ load_system_repository ()
+ {
+ assert (!system_repository); // Must only be loaded once.
+
+ system_repository = bpkg::system_repository ();
+
+ // Query for all the packages with the system substate and enter their
+ // versions into system_repository as non-authoritative. This way an
+ // available_package (e.g., a stub) will automatically "see" system
+ // version, if one is known.
+ //
+ assert (transaction::has_current ());
+
+ for (const auto& p: query<selected_package> (
+ odb::query<selected_package>::substate == "system"))
+ system_repository->insert (p.name,
+ p.version,
+ false /* authoritative */);
+ }
+
+ database& database::
+ attach (const dir_path& d, bool sys_rep)
+ {
+ assert (d.absolute () && d.normalized ());
+
+ // Check if we are trying to attach the main database.
+ //
+ database& md (main_database ());
+ if (d == md.config)
+ return md;
+
+ auto& am (impl_->attached_map);
+
+ auto i (am.find (d));
+
+ if (i == am.end ())
+ {
+ // We know from the implementation that 4-character schema names are
+ // optimal. So try to come up with a unique abbreviated hash that is 4
+ // or more characters long.
+ //
+ std::string schema;
+ {
+ butl::sha256 h (d.string ());
+
+ for (size_t n (4);; ++n)
+ {
+ schema = h.abbreviated_string (n);
+
+ if (find_if (am.begin (), am.end (),
+ [&schema] (const map<dir_path, database>::value_type& v)
+ {
+ return v.second.schema () == schema;
+ }) == am.end ())
+ break;
+ }
+ }
+
+ // If attaching out of an exclusive transaction (all our transactions
+ // are exclusive), start one to force database locking (see the above
+ // locking_mode discussion for details).
+ //
+ sqlite::transaction t;
+ if (!sqlite::transaction::has_current ())
+ t.reset (begin_exclusive ());
+
+ try
+ {
+ // NOTE: we need to be careful here not to bind any persistent objects
+ // the database constructor may load/persist to the temporary database
+ // object in the session cache.
+ //
+ i = am.insert (
+ make_pair (d, database (impl_, d, move (schema), sys_rep))).first;
+ }
catch (odb::timeout&)
{
fail << "configuration " << d << " is already used by another process";
}
- // Query for all the packages with the system substate and enter their
- // versions into system_repository as non-authoritative. This way an
- // available_package (e.g., a stub) will automatically "see" system
- // version, if one is known.
+ if (!t.finalized ())
+ t.commit ();
+ }
+
+ return i->second;
+ }
+
+ void database::
+ detach_all ()
+ {
+ assert (main ());
+
+ explicit_associations_.clear ();
+ implicit_associations_.clear ();
+
+ for (auto i (impl_->attached_map.begin ());
+ i != impl_->attached_map.end (); )
+ {
+ i->second.detach ();
+ i = impl_->attached_map.erase (i);
+ }
+
+ // Remove the detached databases from the environment.
+ //
+ add_env (true /* reset */);
+ }
+
+ void database::
+ verify_association (const configuration& ac, database& adb)
+ {
+ const dir_path& c (adb.config_orig);
+
+ if (ac.uuid != adb.uuid)
+ fail << "configuration " << c << " uuid mismatch" <<
+ info << "uuid " << adb.uuid <<
+ info << (!ac.expl ? "implicitly " : "") << "associated with "
+ << config_orig << " as " << ac.uuid;
+
+ if (ac.type != adb.type)
+ fail << "configuration " << c << " type mismatch" <<
+ info << "type " << adb.type <<
+ info << (!ac.expl ? "implicitly " : "") << "associated with "
+ << config_orig << " as " << ac.type;
+
+ if (ac.effective_path (config) != adb.config)
+ fail << "configuration " << c << " path mismatch" <<
+ info << (!ac.expl ? "implicitly " : "") << "associated with "
+ << config_orig << " as " << ac.path;
+ }
+
+ void database::
+ attach_explicit (bool sys_rep)
+ {
+ assert (transaction::has_current ());
+
+ if (explicit_associations_.empty ())
+ {
+ // Note that the self-association is implicit.
//
- transaction t (db.begin ());
+ explicit_associations_.push_back (associated_config {0, name, *this});
+
+ for (auto& ac: query<configuration> (odb::query<configuration>::expl))
+ {
+ database& db (attach (ac.effective_path (config), sys_rep));
+ verify_association (ac, db);
+
+ explicit_associations_.push_back (associated_config {*ac.id,
+ move (ac.name),
+ db});
+ db.attach_explicit (sys_rep);
+ }
+ }
+ }
- for (const auto& p:
- db.query<selected_package> (
- query<selected_package>::substate == "system"))
- system_repository.insert (p.name, p.version, false);
+ associated_databases& database::
+ implicit_associations (bool ath, bool sys_rep)
+ {
+ assert (transaction::has_current ());
- t.commit ();
+ // Note that cached implicit associations must at least contain the self-
+ // association, if the databases are already attached and cached.
+ //
+ if (implicit_associations_.empty () && ath)
+ {
+ using q = odb::query<configuration>;
- db.tracer (tr); // Switch to the caller's tracer.
- return db;
+ // Make sure the self-association (zero id) comes first.
+ //
+ for (const auto& ac: query<configuration> ("ORDER BY" + q::id))
+ {
+ database& db (attach (ac.effective_path (config), sys_rep));
+
+ // Verify the association integrity and pre-attach its explicit
+ // associations, if required.
+ //
+ if (*ac.id != 0)
+ {
+ verify_association (ac, db);
+
+ // If the association is explicit, also check if it is also implicit
+ // (see cfg_add() for details) and skip if it is not.
+ //
+ if (ac.expl)
+ {
+ shared_ptr<configuration> cf (
+ db.query_one<configuration> (q::uuid == uuid.string ()));
+
+ if (cf == nullptr)
+ fail << "configuration " << db.config_orig << " is associated "
+ << "with " << config_orig << " but latter is not "
+ << "implicitly associated with former";
+
+ // While at it, verify the integrity of the other end of the
+ // association.
+ //
+ db.verify_association (*cf, *this);
+
+ if (!cf->expl)
+ continue;
+ }
+
+ // If the explicitly associated databases are pre-attached, normally
+ // to make the selected packages loadable, then we also pre-attach
+ // explicit associations of the database being attached implicitly,
+ // by the same reason. Indeed, think of loading the package
+ // dependent from the implicitly associated database as a selected
+ // package.
+ //
+ if (!explicit_associations_.empty ())
+ db.attach_explicit (sys_rep);
+ }
+
+ implicit_associations_.push_back (db);
+ }
}
- catch (const database_exception& e)
+
+ return implicit_associations_;
+ }
+
+ associated_databases database::
+ dependent_configs (bool sys_rep)
+ {
+ associated_databases r;
+
+ // Add the associated database to the resulting list if it is of the
+ // associating database type or this type is 'host'. Call itself
+ // recursively for the implicitly associated configurations.
+ //
+ auto add = [&r, sys_rep] (database& db,
+ const std::string& t,
+ const auto& add)
{
- fail << f << ": " << e.message () << endf;
+ if (!(db.type == t || t == "host") ||
+ std::find (r.begin (), r.end (), db) != r.end ())
+ return;
+
+ r.push_back (db);
+
+ const associated_databases& ads (
+ db.implicit_associations (true /* attach */, sys_rep));
+
+ // Skip the self-association.
+ //
+ for (auto i (ads.begin () + 1); i != ads.end (); ++i)
+ add (*i, db.type, add);
+ };
+
+ add (*this, type, add);
+ return r;
+ }
+
+ associated_databases database::
+ dependency_configs (optional<bool> buildtime)
+ {
+ associated_databases r;
+
+ bool allow_own_type (!buildtime || !*buildtime);
+ bool allow_host_type (!buildtime || *buildtime);
+
+ // Add the associated database to the resulting list if it is of the
+ // associating database type and allow_own_type is true or if it is of the
+ // host type and allow_host_type is true. Call itself recursively for the
+ // explicitly associated configurations.
+ //
+ // Note that the associated database of the associating database type is
+ // not added if allow_own_type is false but its own associated databases
+ // of the host type are added, if allow_host_type is true.
+ //
+ associated_databases descended; // Note: we may not add but still descend.
+ auto add = [&r, allow_own_type, allow_host_type, &descended]
+ (database& db, const std::string& t, const auto& add)
+ {
+ if (std::find (descended.begin (), descended.end (), db) !=
+ descended.end ())
+ return;
+
+ descended.push_back (db);
+
+ bool own (db.type == t);
+ bool host (db.type == "host");
+
+ if (!own && !(allow_host_type && host))
+ return;
+
+ if ((allow_own_type && own) || (allow_host_type && host))
+ r.push_back (db);
+
+ const associated_configs& acs (db.explicit_associations ());
+
+ // Skip the self-association.
+ //
+ for (auto i (acs.begin () + 1); i != acs.end (); ++i)
+ add (i->db, db.type, add);
+ };
+
+ add (*this, type, add);
+ return r;
+ }
+
+ database& database::
+ find_attached (uint64_t id)
+ {
+ assert (!explicit_associations_.empty ());
+
+ // Note that there shouldn't be too many databases, so the linear search
+ // is OK.
+ //
+ auto r (find_if (explicit_associations_.begin (),
+ explicit_associations_.end (),
+ [&id] (const associated_config& ac)
+ {
+ return ac.id == id;
+ }));
+
+ if (r == explicit_associations_.end ())
+ fail << "no configuration with id " << id << " is associated with "
+ << config_orig;
+
+ return r->db;
+ }
+
+ database& database::
+ find_attached (const std::string& name)
+ {
+ assert (!explicit_associations_.empty ());
+
+ auto r (find_if (explicit_associations_.begin (),
+ explicit_associations_.end (),
+ [&name] (const associated_config& ac)
+ {
+ return ac.name && *ac.name == name;
+ }));
+
+ if (r == explicit_associations_.end ())
+ fail << "no configuration with name '" << name << "' is associated "
+ << "with " << config_orig;
+
+ return r->db;
+ }
+
+ database& database::
+ find_dependency_config (const uuid_type& uid)
+ {
+ for (database& adb: dependency_configs ())
+ {
+ if (uid == adb.uuid)
+ return adb;
}
+
+ fail << "no configuration with uuid " << uid << " is associated with "
+ << config_orig << endf;
+ }
+
+ bool database::
+ main ()
+ {
+ return *this == main_database ();
+ }
+
+ string database::
+ string ()
+ {
+ return main () ? empty_string : '[' + config_orig.representation () + ']';
}
}
diff --git a/bpkg/database.hxx b/bpkg/database.hxx
index 42270d8..53ca54a 100644
--- a/bpkg/database.hxx
+++ b/bpkg/database.hxx
@@ -16,6 +16,7 @@
#include <bpkg/utility.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/system-repository.hxx>
namespace bpkg
{
@@ -23,41 +24,412 @@ namespace bpkg
using odb::result;
using odb::session;
- using odb::sqlite::database;
+ class configuration;
+ class database;
+
+ struct associated_config
+ {
+ uint64_t id;
+ optional<string> name;
+ reference_wrapper<database> db; // Needs to be move-assignable.
+ };
+
+ // Used for the immediate explicit associations which are normally not many
+ // (one entry for the self-association).
+ //
+ using associated_configs = small_vector<associated_config, 2>;
+
+ // In particular, is used for implicit associations which can potentially be
+ // many. Think of a dependency in a shared configuration with dependents in
+ // multiple implicitly associated configurations.
+ //
+ using associated_databases = small_vector<reference_wrapper<database>, 16>;
+
+ // Derive a custom database class that handles attaching/detaching
+ // additional configurations.
+ //
+ class database: public odb::sqlite::database
+ {
+ public:
+ using uuid_type = bpkg::uuid;
+
+ // Create new main database.
+ //
+ // The specified self-association object is persisted and its uuid and
+ // type are cached in the database object.
+ //
+ // If pre_associate is not NULL, then this configuration is treated as an
+ // associated configuration for schema migration purposes. If specified,
+ // this path should be absolute and normalized.
+ //
+ database (const dir_path& cfg,
+ const shared_ptr<configuration>& self,
+ odb::tracer& tr,
+ const dir_path* pre_associate = nullptr)
+ : database (cfg, self.get (), tr, false, false, pre_associate)
+ {
+ assert (self != nullptr);
+ }
+
+ // Open existing main database.
+ //
+ // If configured non-system selected packages can potentially be loaded
+ // from this database, then pass true as the pre_attach argument to
+ // recursively pre-attach the explicitly associated configuration
+ // databases, so that package prerequisites can be loaded from the
+ // associated configurations as well (see _selected_package_ref::to_ptr()
+ // implementation for details). Note that selected packages are loaded by
+ // some functions internally (package_iteration(), etc). Such functions
+ // are marked with the 'Note: loads selected packages.' note.
+ //
+ database (const dir_path& cfg,
+ odb::tracer& tr,
+ bool pre_attach,
+ bool sys_rep = false,
+ const dir_path* pre_associate = nullptr)
+ : database (cfg, nullptr, tr, pre_attach, sys_rep, pre_associate) {}
+
+ ~database ();
+
+ // Move-constructible but not move-assignable.
+ //
+ database (database&&);
+ database& operator= (database&&) = delete;
+
+ database (const database&) = delete;
+ database& operator= (const database&) = delete;
+
+ // Attach another (existing) database. The configuration directory should
+ // be absolute and normalized.
+ //
+ // Note that if the database is already attached, then the existing
+ // instance reference is returned and the sys_rep argument is ignored.
+ //
+ database&
+ attach (const dir_path&, bool sys_rep = false);
+
+ // Attach databases of all the explicitly associated configurations,
+ // recursively. Must be called inside the transaction.
+ //
+ void
+ attach_explicit (bool sys_rep = false);
+
+ // Note that while attach*() can be called on the attached database,
+ // detach_all() should only be called on the main database.
+ //
+ void
+ detach_all ();
+
+ database&
+ main_database ()
+ {
+ return static_cast<database&> (odb::sqlite::database::main_database ());
+ }
+
+ // Return true if this is the main database.
+ //
+ bool
+ main ();
+
+ // Return the explicit associations and the self-association (comes first)
+ // if the main database has been created with the pre_attach flag set to
+ // true and an empty list otherwise.
+ //
+ associated_configs&
+ explicit_associations ()
+ {
+ return explicit_associations_;
+ }
+
+ // By default attach and cache the implicitly associated configuration
+ // databases on the first call and return them along with the self-
+ // association (comes first). If attach is false, then return an empty
+ // list if associations were not yet cached by this function's previous
+ // call.
+ //
+ // Note that for implicitly associated configurations the association
+ // information (id, etc) is useless, thus we only return the databases
+ // rather than the association information.
+ //
+ associated_databases&
+ implicit_associations (bool attach = true, bool sys_rep = false);
+
+ // Return configurations of potential dependencies of packages selected in
+ // the current configuration.
+ //
+ // Specifically, return the self-association (comes first if included) and
+ // explicitly associated databases recursively, including them into the
+ // resulting list according to the following rules:
+ //
+ // - If buildtime is nullopt, then return configurations of all
+ // dependencies (runtime and build-time). In this case include
+ // configurations of the associating configuration type and the host
+ // type and do not descended into associations of different types.
+ //
+ // So, for example, for the following (not very sensible) association
+ // chain only the cfg1 and cfg2 configurations are included. The cfg3
+ // type is not host and differs from type of cfg2 which associates it
+ // and thus it is not included.
+ //
+ // cfg1 (this, target) -> cfg2 (host) -> cfg3 (target)
+ //
+ // - If buildtime is false, then return configurations of only runtime
+ // dependencies. In this case include configurations of only the
+ // associating configuration type and do not descend into associations
+ // of different types.
+ //
+ // So for the above association chain only cfg1 configuration is
+ // included.
+ //
+ // - If buildtime is true, then return configurations of only build-time
+ // dependencies. In this case include configurations of only the host
+ // type and do not descend into associations of different types and the
+ // host type.
+ //
+ // So for the above association chain only cfg2 configuration is
+ // included.
+ //
+ associated_databases
+ dependency_configs (optional<bool> buildtime = nullopt);
+
+ // Return configurations of potential dependents of packages selected in
+ // the current configuration.
+ //
+ // Specifically, return the implicitly associated configuration databases
+ // recursively, including the self-association (comes first). Only include
+ // an associated configuration into the resulting list if it is of the
+ // same type as the associating configuration or the associating
+ // configuration is of the host type (think of searching through the
+ // target configurations for dependents of a build-time dependency in host
+ // configuration).
+ //
+ associated_databases
+ dependent_configs (bool sys_rep = false);
+
+ // The following find_*() functions assume that the main database has been
+ // created with the pre_attach flag set to true.
+ //
+
+ // Return the self reference if the id is 0. Otherwise, return the
+ // database of an explicitly associated configuration with the specified
+ // association id and issue diagnostics and fail if no association is
+ // found.
+ //
+ database&
+ find_attached (uint64_t id);
+
+ // Return the self reference if this is the current configuration
+ // name. Otherwise, return the database of an explicitly associated
+ // configuration with the specified name and issue diagnostics and fail if
+ // no association is found.
+ //
+ database&
+ find_attached (const std::string& name);
+
+ // Return the dependency configuration with the specified uuid and issue
+ // diagnostics and fail if not found.
+ //
+ database&
+ find_dependency_config (const uuid_type&);
+
+ // Return an empty string for the main database and the original
+ // configuration directory path in the `[<dir>]` form otherwise.
+ //
+ // NOTE: remember to update pkg_command_vars::string() if changing the
+ // format.
+ //
+ std::string
+ string ();
+
+ // Verify that the association information (uuid, type, etc) matches the
+ // associated configuration. Issue diagnostics and fail if that's not the
+ // case.
+ //
+ void
+ verify_association (const configuration&, database&);
+
+ // Set the specified tracer for the whole associated databases cluster.
+ //
+ using tracer_type = odb::tracer;
+
+ void
+ tracer (tracer_type*);
+
+ void
+ tracer (tracer_type& t) {tracer (&t);}
+
+ using odb::sqlite::database::tracer;
+
+ public:
+ // Cached configuration information.
+ //
+ uuid_type uuid;
+ optional<std::string> name;
+ std::string type;
+
+ // Absolute and normalized configuration directory path. In particular, it
+ // is used as the configuration database identity.
+ //
+ dir_path config;
+
+ // For the main database, this is the original configuration directory
+ // path as specified by the user on the command line and `./` if
+ // unspecified. For other (associated) databases, it is the absolute
+ // configuration path if the main database's original configuration path
+ // is absolute and the path relative to the current directory otherwise.
+ // This is used in diagnostics.
+ //
+ dir_path config_orig;
+
+ // Per-configuration system repository (only loaded if sys_rep constructor
+ // argument is true).
+ //
+ optional<bpkg::system_repository> system_repository;
+
+ private:
+ struct impl;
+
+ // Create/open main database.
+ //
+ database (const dir_path& cfg,
+ configuration* create,
+ odb::tracer&,
+ bool pre_attach,
+ bool sys_rep,
+ const dir_path* pre_associate);
+
+ // Create attached database.
+ //
+ database (impl*,
+ const dir_path& cfg,
+ std::string schema,
+ bool sys_rep);
+
+ // If necessary, migrate this database and all the associated (both
+ // explicitly and implicitly) databases, recursively. Leave the associated
+ // databases attached. Must be called inside the transaction.
+ //
+ // Note that since the whole associated databases cluster is migrated at
+ // once, it is assumed that if migration is unnecessary for this database
+ // then it is also unnecessary for its associated databases.
+ //
+ void
+ migrate ();
+
+ // Cache the configuration information.
+ //
+ void
+ cache_config (const uuid_type&,
+ optional<std::string> name,
+ std::string type);
+
+ // Note: must be called inside the transaction.
+ //
+ void
+ load_system_repository ();
+
+ // Add the configuration path to the BPKG_OPEN_CONFIGS environment
+ // variable which contains a list of the space-separated double-quoted
+ // absolute directory paths. Optionally, reset the list to this database's
+ // single path.
+ //
+ void
+ add_env (bool reset = false) const;
+
+ impl* impl_;
+
+ associated_configs explicit_associations_;
+ associated_databases implicit_associations_;
+ };
+
+ // NOTE: remember to update config_package comparison operators if changing
+ // the database comparison operators.
+ //
+ // Note that here we use the database address as the database identity since
+ // we don't suppose two database instances for the same configuration to
+ // exist simultaneously due to the EXCLUSIVE locking mode (see database
+ // constructor for details).
+ //
+ inline bool
+ operator== (const database& x, const database& y)
+ {
+ return &x == &y;
+ }
+
+ inline bool
+ operator!= (const database& x, const database& y)
+ {
+ return !(x == y);
+ }
+
+ inline bool
+ operator< (const database& x, const database& y)
+ {
+ // Note that if we ever need the ordering to be consistent across runs,
+ // then we can compare the config paths or uuids.
+ //
+ return &x < &y;
+ }
+
+ inline ostream&
+ operator<< (ostream& os, const database& db)
+ {
+ string s (const_cast<database&> (db).string ());
+
+ if (!s.empty ())
+ os << ' ' << s;
+
+ return os;
+ }
// Transaction wrapper that allow the creation of dummy transactions (start
// is false) that in reality use an existing transaction.
//
- struct transaction
+ // Note that there can be multiple databases attached to the main database
+ // and normally a transaction object is passed around together with a
+ // specific database. Thus, we don't provide the database accessor function,
+ // so that the database is always chosen deliberately.
+ //
+ class transaction
{
+ public:
using database_type = bpkg::database;
explicit
transaction (database_type& db, bool start = true)
- : db_ (db), start_ (start), t_ () // Finalized.
+ : start_ (start), t_ () // Finalized.
{
if (start)
- t_.reset (db.begin ());
+ t_.reset (db.begin_exclusive ()); // See locking_mode for details.
}
void
commit ()
{
if (start_)
+ {
t_.commit ();
+ start_ = false;
+ }
}
void
rollback ()
{
if (start_)
+ {
t_.rollback ();
+ start_ = false;
+ }
}
- database_type&
- database ()
+ void
+ start (database_type& db)
{
- return db_;
+ assert (!start_);
+
+ start_ = true;
+ t_.reset (db.begin_exclusive ());
}
static bool
@@ -66,26 +438,16 @@ namespace bpkg
return odb::sqlite::transaction::has_current ();
}
- static odb::sqlite::transaction&
- current ()
- {
- return odb::sqlite::transaction::current ();
- }
-
private:
- database_type& db_;
bool start_;
odb::sqlite::transaction t_;
};
- database
- open (const dir_path& configuration, tracer&, bool create = false);
-
struct tracer_guard
{
tracer_guard (database& db, tracer& t)
: db_ (db), t_ (db.tracer ()) {db.tracer (t);}
- ~tracer_guard () {db_.tracer (*t_);}
+ ~tracer_guard () {db_.tracer (t_);}
private:
database& db_;
@@ -128,6 +490,22 @@ namespace bpkg
{
return pointer_result_range<R> (forward<R> (r));
}
+
+ // Note that lazy_shared_ptr and lazy_weak_ptr are defined in types.hxx.
+ //
+ template <typename T>
+ inline database& lazy_shared_ptr<T>::
+ database () const
+ {
+ return static_cast<bpkg::database&> (base_type::database ());
+ }
+
+ template <typename T>
+ inline database& lazy_weak_ptr<T>::
+ database () const
+ {
+ return static_cast<bpkg::database&> (base_type::database ());
+ }
}
#endif // BPKG_DATABASE_HXX
diff --git a/bpkg/fetch-git.cxx b/bpkg/fetch-git.cxx
index 448cf49..0c2ac21 100644
--- a/bpkg/fetch-git.cxx
+++ b/bpkg/fetch-git.cxx
@@ -6,7 +6,6 @@
#include <map>
#include <libbutl/git.mxx>
-#include <libbutl/utility.mxx> // digit(), xdigit()
#include <libbutl/filesystem.mxx> // path_entry
#include <libbutl/path-pattern.mxx>
#include <libbutl/semantic-version.mxx>
diff --git a/bpkg/forward.hxx b/bpkg/forward.hxx
index becf628..6b11024 100644
--- a/bpkg/forward.hxx
+++ b/bpkg/forward.hxx
@@ -4,15 +4,13 @@
#ifndef BPKG_FORWARD_HXX
#define BPKG_FORWARD_HXX
-#include <odb/sqlite/forward.hxx>
-
namespace bpkg
{
- using odb::sqlite::database;
- struct transaction;
+ class transaction;
// <bpkg/package.hxx>
//
+ class configuration;
class repository;
class repository_fragment;
class selected_package;
diff --git a/bpkg/odb.sh b/bpkg/odb.sh
index 5cd8e02..7f494e1 100755
--- a/bpkg/odb.sh
+++ b/bpkg/odb.sh
@@ -43,9 +43,11 @@ fi
$odb "${inc[@]}" \
-DLIBODB_BUILD2 -DLIBODB_SQLITE_BUILD2 --generate-schema \
-d sqlite --std c++14 --generate-query \
- --odb-epilogue '#include <libbutl/small-vector-odb.hxx>' \
+ --odb-epilogue '#include <libbutl/small-vector-odb.hxx>' \
+ --odb-epilogue '#include <bpkg/pointer-traits.hxx>' \
--odb-epilogue '#include <bpkg/wrapper-traits.hxx>' \
- --hxx-prologue '#include <libbutl/small-vector-odb.hxx>' \
+ --hxx-prologue '#include <libbutl/small-vector-odb.hxx>' \
+ --hxx-prologue '#include <bpkg/pointer-traits.hxx>' \
--hxx-prologue '#include <bpkg/wrapper-traits.hxx>' \
--hxx-prologue '#include <bpkg/value-traits.hxx>' \
--include-with-brackets --include-prefix bpkg --guard-prefix BPKG \
diff --git a/bpkg/package.cxx b/bpkg/package.cxx
index 3532f3d..b1e9ab1 100644
--- a/bpkg/package.cxx
+++ b/bpkg/package.cxx
@@ -15,17 +15,118 @@ namespace bpkg
{
const version wildcard_version (0, "0", nullopt, nullopt, 0);
- // available_package_id
+ // configuration
//
- bool
- operator< (const available_package_id& x, const available_package_id& y)
+ configuration::
+ configuration (optional<string> n, string t, optional<uuid_type> uid)
+ : id (0),
+ name (move (n)),
+ type (move (t)),
+ expl (false)
{
- int r (x.name.compare (y.name));
- return r != 0 ? r < 0 : x.version < y.version;
+ try
+ {
+ uuid = uid ? *uid : uuid_type::generate ();
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to generate configuration uuid: " << e;
+ }
+ }
+
+ dir_path configuration::
+ effective_path (const dir_path& d) const
+ {
+ if (path.relative ())
+ {
+ dir_path r (d / path);
+
+ string what ("associated with " + d.representation () +
+ " configuration " + (name ? *name : to_string (*id)));
+
+ normalize (r, what.c_str ());
+ return r;
+ }
+ else
+ return path;
+ }
+
+ void
+ validate_configuration_name (const string& s, const char* what)
+ {
+ if (s.empty ())
+ fail << "empty " << what;
+
+ if (!(alpha (s[0]) || s[0] == '_'))
+ fail << "invalid " << what << " '" << s << "': illegal first character "
+ << "(must be alphabetic or underscore)";
+
+ for (auto i (s.cbegin () + 1), e (s.cend ()); i != e; ++i)
+ {
+ char c (*i);
+
+ if (!(alnum (c) || c == '_' || c == '-'))
+ fail << "invalid " << what << " '" << s << "': illegal character "
+ << "(must be alphabetic, digit, underscore, or dash)";
+ }
+ }
+
+ // config_package
+ //
+ string config_package::
+ string () const
+ {
+ std::string s (db.string ());
+ return !s.empty () ? name.string () + ' ' + s : name.string ();
}
// available_package
//
+ const version* available_package::
+ system_version (database& db) const
+ {
+ if (!system_version_)
+ {
+ assert (db.system_repository);
+
+ if (const system_package* sp = db.system_repository->find (id.name))
+ {
+ // Only cache if it is authoritative.
+ //
+ if (sp->authoritative)
+ system_version_ = sp->version;
+ else
+ return &sp->version;
+ }
+ }
+
+ return system_version_ ? &*system_version_ : nullptr;
+ }
+
+ pair<const version*, bool> available_package::
+ system_version_authoritative (database& db) const
+ {
+ assert (db.system_repository);
+
+ const system_package* sp (db.system_repository->find (id.name));
+
+ if (!system_version_)
+ {
+ if (sp != nullptr)
+ {
+ // Only cache if it is authoritative.
+ //
+ if (sp->authoritative)
+ system_version_ = sp->version;
+ else
+ return make_pair (&sp->version, false);
+ }
+ }
+
+ return make_pair (system_version_ ? &*system_version_ : nullptr,
+ sp != nullptr ? sp->authoritative : false);
+ }
+
odb::result<available_package>
query_available (database& db,
const package_name& name,
@@ -309,11 +410,9 @@ namespace bpkg
}
void
- check_any_available (const dir_path& c,
- transaction& t,
- const diag_record* dr)
+ check_any_available (database& db, transaction&, const diag_record* dr)
{
- database& db (t.database ());
+ const dir_path& c (db.config_orig);
if (db.query_value<repository_count> () == 0)
{
@@ -382,15 +481,62 @@ namespace bpkg
// selected_package
//
string selected_package::
- version_string () const
+ string (database& db) const
{
- return version != wildcard_version ? version.string () : "*";
+ std::string s (db.string ());
+ return !s.empty () ? string () + ' ' + s : string ();
+ }
+
+ _selected_package_ref::
+ _selected_package_ref (const lazy_shared_ptr<selected_package>& p)
+ : configuration (p.database ().uuid),
+ prerequisite (p.object_id ())
+ {
+ }
+
+ lazy_shared_ptr<selected_package> _selected_package_ref::
+ to_ptr (odb::database& db) &&
+ {
+ // Note that if this points to a different configuration, then it should
+ // already be pre-attached since it must be explicitly associated.
+ //
+ return lazy_shared_ptr<selected_package> (
+ static_cast<database&> (db).find_dependency_config (configuration),
+ move (prerequisite));
+ }
+
+ pair<shared_ptr<selected_package>, database*>
+ find_dependency (database& db, const package_name& pn, bool buildtime)
+ {
+ pair<shared_ptr<selected_package>, database*> r;
+
+ for (database& adb: db.dependency_configs (buildtime))
+ {
+ shared_ptr<selected_package> p (adb.find<selected_package> (pn));
+
+ if (p != nullptr)
+ {
+ if (r.first == nullptr)
+ {
+ r.first = move (p);
+ r.second = &adb;
+ }
+ else
+ {
+ fail << "package " << pn << " appears in multiple configurations" <<
+ info << r.first->state << " in " << r.second->config_orig <<
+ info << p->state << " in " << adb.config_orig;
+ }
+ }
+ }
+
+ return r;
}
optional<version>
package_iteration (const common_options& o,
- const dir_path& c,
- transaction& t,
+ database& db,
+ transaction&,
const dir_path& d,
const package_name& n,
const version& v,
@@ -398,7 +544,6 @@ namespace bpkg
{
tracer trace ("package_iteration");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (check_external)
@@ -447,7 +592,7 @@ namespace bpkg
//
if (!changed && p->external ())
{
- dir_path src_root (p->effective_src_root (c));
+ dir_path src_root (p->effective_src_root (db.config));
// We need to complete and normalize the source directory as it may
// generally be completed against the configuration directory (unlikely
@@ -532,4 +677,18 @@ namespace bpkg
return os;
}
+
+ // package_dependent
+ //
+ odb::result<package_dependent>
+ query_dependents (database& db,
+ const package_name& dep,
+ database& dep_db)
+ {
+ using query = query<package_dependent>;
+
+ return db.query<package_dependent> (
+ "prerequisite = " + query::_val (dep.string ()) + "AND" +
+ "configuration = " + query::_val (dep_db.uuid.string ()));
+ }
}
diff --git a/bpkg/package.hxx b/bpkg/package.hxx
index cee2dd6..3457073 100644
--- a/bpkg/package.hxx
+++ b/bpkg/package.hxx
@@ -27,23 +27,12 @@
//
#define DB_SCHEMA_VERSION_BASE 6
-#pragma db model version(DB_SCHEMA_VERSION_BASE, 8, closed)
+#pragma db model version(DB_SCHEMA_VERSION_BASE, 9, closed)
namespace bpkg
{
- // Compare two lazy pointers via the pointed-to object ids.
- //
- struct compare_lazy_ptr
- {
- template <typename P>
- bool
- operator() (const P& x, const P& y) const
- {
- return x.object_id () < y.object_id ();
- }
- };
-
using optional_string = optional<string>;
+ using optional_uint64_t = optional<uint64_t>; // Preserve uint64_t alias.
// path
//
@@ -67,6 +56,10 @@ namespace bpkg
to((?) ? (?)->string () : bpkg::optional_string ()) \
from((?) ? bpkg::dir_path (*(?)) : bpkg::optional_dir_path ())
+ // uuid
+ //
+ #pragma db map type(uuid) as(string) to((?).string ()) from(bpkg::uuid (?))
+
// timestamp
//
using butl::timestamp;
@@ -122,8 +115,6 @@ namespace bpkg
#include <libbpkg/manifest.hxx>
-#include <bpkg/system-repository.hxx>
-
// Prevent assert() macro expansion in get/set expressions. This should
// appear after all #include directives since the assert() macro is
// redefined in each <assert.h> inclusion.
@@ -136,6 +127,105 @@ void assert (int);
namespace bpkg
{
+ // Associated bpkg configuration.
+ //
+ // Association with id 0 is the special self-association which captures
+ // information about the current configuration. This information is cached
+ // in associations of other configurations.
+ //
+ // Note that associated configurations information will normally be accessed
+ // through the database object functions, which load and cache this
+ // information on the first call. This makes the session support for the
+ // configuration class redundant. Moreover, with the session support
+ // disabled the database implementation can freely move out the data from
+ // the configuration objects into the internal cache and safely load them
+ // from the temporary database objects (see database::attach() for details).
+ //
+ #pragma db object pointer(shared_ptr)
+ class configuration
+ {
+ public:
+ using uuid_type = bpkg::uuid;
+
+ // Association id.
+ //
+ // Zero for the self-association and is auto-assigned for associated
+ // configurations when the object is persisted.
+ //
+ optional_uint64_t id; // Object id.
+
+ uuid_type uuid;
+ optional<string> name;
+ string type;
+ dir_path path; // Empty for the self-association.
+
+ // True if the association is created explicitly by the user rather than
+ // automatically as a reverse association.
+ //
+ bool expl;
+
+ // Database mapping.
+ //
+ #pragma db member(id) id auto
+ #pragma db member(uuid) unique
+ #pragma db member(name) unique
+ #pragma db member(path) unique
+ #pragma db member(expl) column("explicit")
+
+ public:
+ // Create the self-association. Generate the UUID, unless specified.
+ //
+ configuration (optional<string> n,
+ string t,
+ optional<uuid_type> uid = nullopt);
+
+ // Create an associated configuration.
+ //
+ configuration (const uuid_type& uid,
+ optional<string> n,
+ string t,
+ dir_path p,
+ bool e)
+ : uuid (uid),
+ name (move (n)),
+ type (move (t)),
+ path (move (p)),
+ expl (e) {}
+
+ // If the configuration path is absolute, then return it as is. Otherwise,
+ // return it completed relative to the specified associated configuration
+ // directory path and then normalized. The specified directory path should
+ // be absolute and normalized. Issue diagnostics and fail on the path
+ // conversion error.
+ //
+ // Note that the self-association object is naturally supported by this
+ // function, since its path is empty.
+ //
+ dir_path
+ effective_path (const dir_path&) const;
+
+ const dir_path&
+ make_effective_path (const dir_path& d)
+ {
+ if (path.relative ())
+ path = effective_path (d);
+
+ return path;
+ }
+
+ private:
+ friend class odb::access;
+ configuration () = default;
+ };
+
+ // Verify that a string is a valid configuration name, that is non-empty,
+ // containing only alpha-numeric characters, '_', '-' (except for the first
+ // character which can only be alphabetic or '_'). Issue diagnostics and
+ // fail if that's not the case.
+ //
+ void
+ validate_configuration_name (const string&, const char* what);
+
// version
//
// Sometimes we need to split the version into two parts: the part
@@ -532,9 +622,6 @@ namespace bpkg
available_package_id (package_name, const bpkg::version&);
};
- bool
- operator< (const available_package_id&, const available_package_id&);
-
#pragma db object pointer(shared_ptr) session
class available_package
{
@@ -612,48 +699,13 @@ namespace bpkg
// we do not implicitly assume a wildcard version.
//
const version_type*
- system_version () const
- {
- if (!system_version_)
- {
- if (const system_package* sp = system_repository.find (id.name))
- {
- // Only cache if it is authoritative.
- //
- if (sp->authoritative)
- system_version_ = sp->version;
- else
- return &sp->version;
- }
- }
-
- return system_version_ ? &*system_version_ : nullptr;
- }
+ system_version (database&) const;
// As above but also return an indication if the version information is
// authoritative.
//
pair<const version_type*, bool>
- system_version_authoritative () const
- {
- const system_package* sp (system_repository.find (id.name));
-
- if (!system_version_)
- {
- if (sp != nullptr)
- {
- // Only cache if it is authoritative.
- //
- if (sp->authoritative)
- system_version_ = sp->version;
- else
- return make_pair (&sp->version, false);
- }
- }
-
- return make_pair (system_version_ ? &*system_version_ : nullptr,
- sp != nullptr ? sp->authoritative : false);
- }
+ system_version_authoritative (database&) const;
// Database mapping.
//
@@ -785,9 +837,7 @@ namespace bpkg
// NULL, print the error message and fail.
//
void
- check_any_available (const dir_path& configuration,
- transaction&,
- const diag_record* = nullptr);
+ check_any_available (database&, transaction&, const diag_record* = nullptr);
// package_state
//
@@ -867,12 +917,46 @@ namespace bpkg
// single constraint, we don't support multiple dependencies on the same
// package (e.g., two ranges of versions). See pkg_configure().
//
+ // Note also that the pointer can refer to a selected package in another
+ // database.
+ //
class selected_package;
+ // Note that the keys for this map need to be created with the database
+ // passed to their constructor, which is required for persisting them (see
+ // _selected_package_ref() implementation for details).
+ //
using package_prerequisites = std::map<lazy_shared_ptr<selected_package>,
optional<version_constraint>,
compare_lazy_ptr>;
+ // Database mapping for lazy_shared_ptr<selected_package> to configuration
+ // UUID and package name.
+ //
+ #pragma db value
+ struct _selected_package_ref
+ {
+ using ptr = lazy_shared_ptr<selected_package>;
+
+ uuid configuration;
+ package_name prerequisite;
+
+ explicit
+ _selected_package_ref (const ptr&);
+
+ _selected_package_ref () = default;
+
+ ptr
+ to_ptr (odb::database&) &&;
+
+ #pragma db member(configuration)
+ };
+
+ #pragma db map type(_selected_package_ref::ptr) \
+ as(_selected_package_ref) \
+ to(bpkg::_selected_package_ref (?)) \
+ from(std::move (?).to_ptr (*db))
+
#pragma db object pointer(shared_ptr) session
class selected_package
{
@@ -971,11 +1055,17 @@ namespace bpkg
// all other versions.
//
std::string
- version_string () const;
+ version_string () const
+ {
+ return version != wildcard_version ? version.string () : "*";
+ }
std::string
string () const {return package_string (name, version, system ());}
+ std::string
+ string (database&) const;
+
// Return the relative source directory completed using the configuration
// directory. Return the absolute source directory as is.
//
@@ -1004,8 +1094,8 @@ namespace bpkg
//
#pragma db member(name) id
- #pragma db member(prerequisites) id_column("package") \
- key_column("prerequisite") key_not_null value_column("")
+ #pragma db member(prerequisites) id_column("package") \
+ key_column("") value_column("")
// Explicit aggregate initialization for C++20 (private default ctor).
//
@@ -1049,6 +1139,15 @@ namespace bpkg
return os << p.string ();
}
+ // Try to find a dependency in the dependency configurations (see
+ // database::dependency_configs() for details). Return pointers to the found
+ // package and the configuration it belongs to. Return a pair of NULLs if no
+ // package is found and issue diagnostics and fail if multiple packages (in
+ // multiple configurations) are found.
+ //
+ pair<shared_ptr<selected_package>, database*>
+ find_dependency (database&, const package_name&, bool buildtime);
+
// Check if the directory containing the specified package version should be
// considered its iteration. Return the version of this iteration if that's
// the case and nullopt otherwise.
@@ -1081,9 +1180,11 @@ namespace bpkg
//
class common_options;
+ // Note: loads selected packages.
+ //
optional<version>
package_iteration (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const dir_path&,
const package_name&,
@@ -1179,25 +1280,22 @@ namespace bpkg
// Return a list of packages that depend on this package along with
// their constraints.
//
+ // @@ Using raw container table since ODB doesn't support containers in
+ // views yet.
+ //
/*
- #pragma db view object(selected_package) \
- container(selected_package::prerequisites = pp inner: pp.key)
+ #pragma db view container(selected_package::prerequisites = pp)
struct package_dependent
{
- #pragma db column(pp.id)
- string name;
+ #pragma db column("pp.package")
+ package_name name;
- #pragma db column(pp.value)
+ #pragma db column("pp.")
optional<version_constraint> constraint;
};
*/
- // @@ Using raw container table since ODB doesn't support containers
- // in views yet.
- //
- #pragma db view object(selected_package) \
- table("main.selected_package_prerequisites" = "pp" inner: \
- "pp.prerequisite = " + selected_package::name)
+ #pragma db view table("main.selected_package_prerequisites" = "pp")
struct package_dependent
{
#pragma db column("pp.package")
@@ -1207,6 +1305,55 @@ namespace bpkg
optional<version_constraint> constraint;
};
+ // In the specified database query dependents of a dependency that resided
+ // in a potentially different database (yeah, it's a mouthful).
+ //
+ odb::result<package_dependent>
+ query_dependents (database& dependent_db,
+ const package_name& dependency,
+ database& dependency_db);
+
+ // Database and package name pair.
+ //
+ // It is normally used as a key for maps containing data for packages across
+ // multiple associated configurations. Assumes that the respective databases
+ // are not detached during such map lifetimes. Considers both package name
+ // and database for objects comparison.
+ //
+ struct config_package
+ {
+ database& db;
+ package_name name;
+
+ config_package (database& d, package_name n): db (d), name (move (n)) {}
+
+ // Create a pseudo-package (command line as a dependent, etc).
+ //
+ config_package (database& d, string n)
+ : db (d),
+ name (n.empty () ? package_name () : package_name (move (n))) {}
+
+ bool
+ operator== (const config_package& v) const
+ {
+ // See operator==(database, database).
+ //
+ return name == v.name && &db == &v.db;
+ }
+
+ bool
+ operator< (const config_package& v) const
+ {
+ // See operator==(database, database).
+ //
+ int r (name.compare (v.name));
+ return r != 0 ? (r < 0) : (&db < &v.db);
+ }
+
+ std::string
+ string () const;
+ };
+
// Return a count of repositories that contain this repository fragment.
//
#pragma db view table("main.repository_fragments")
@@ -1472,6 +1619,13 @@ namespace bpkg
}
*/
+ inline bool
+ operator< (const available_package_id& x, const available_package_id& y)
+ {
+ int r (x.name.compare (y.name));
+ return r != 0 ? r < 0 : x.version < y.version;
+ }
+
template <typename T1, typename T2>
inline auto
compare_version_gt (const T1& x, const T2& y, bool revision, bool iteration)
diff --git a/bpkg/package.xml b/bpkg/package.xml
index e54829c..edb6d09 100644
--- a/bpkg/package.xml
+++ b/bpkg/package.xml
@@ -1,4 +1,31 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="sqlite" version="1">
+ <changeset version="9">
+ <add-table name="main.configuration" kind="object">
+ <column name="id" type="INTEGER" null="true"/>
+ <column name="uuid" type="TEXT" null="true"/>
+ <column name="name" type="TEXT" null="true"/>
+ <column name="type" type="TEXT" null="true"/>
+ <column name="path" type="TEXT" null="true"/>
+ <column name="explicit" type="INTEGER" null="true"/>
+ <primary-key auto="true">
+ <column name="id"/>
+ </primary-key>
+ <index name="configuration_uuid_i" type="UNIQUE">
+ <column name="uuid"/>
+ </index>
+ <index name="configuration_name_i" type="UNIQUE">
+ <column name="name"/>
+ </index>
+ <index name="configuration_path_i" type="UNIQUE">
+ <column name="path"/>
+ </index>
+ </add-table>
+ <alter-table name="main.selected_package_prerequisites">
+ <add-column name="configuration" type="TEXT" null="true"/>
+ <drop-foreign-key name="prerequisite_fk"/>
+ </alter-table>
+ </changeset>
+
<changeset version="8">
<alter-table name="main.repository">
<add-column name="local" type="INTEGER" null="true"/>
diff --git a/bpkg/pkg-build.cli b/bpkg/pkg-build.cli
index e5a6118..804eb41 100644
--- a/bpkg/pkg-build.cli
+++ b/bpkg/pkg-build.cli
@@ -156,6 +156,16 @@ namespace bpkg
bpkg build libfoo/2.0.0 # upgrade libfoo 2.0.0 to hold,
# also hold version 2.0.0
\
+
+ A package can be built in one of the associated configurations instead of
+ the current (or host, for build-time dependencies) configuration by
+ specifying one of the \cb{--config-*} options (see \l{bpkg-cfg-create(1)}
+ for background on associated configurations). For example:
+
+ \
+ bpkg build foo { --config-name=alt-host }+ ?bison
+ \
+
"
}
@@ -243,6 +253,27 @@ namespace bpkg
are purged. Refer to the \cb{--output-purge} option in
\l{bpkg-pkg-checkout(1)} for details."
}
+
+ string --config-name
+ {
+ "<id>",
+ "Name of the associated configuration to build this package(s) in.
+ By default, the package is built in the current configuration."
+ }
+
+ uint64_t --config-id
+ {
+ "<id>",
+ "Numeric id of the associated configuration to build this package(s)
+ in. By default, the package is built in the current configuration."
+ }
+
+ uuid --config-uuid
+ {
+ "<uuid>",
+ "UUID of the associated configuration to build this package(s) in.
+ By default, the package is built in the current configuration."
+ }
};
class pkg_build_options: configuration_options,
@@ -310,6 +341,14 @@ namespace bpkg
specified as part of the build command. Refer to the \cb{--shallow}
option in \l{bpkg-rep-fetch(1)} for details."
}
+
+ bool --no-refinement
+ {
+ "Don't try to refine the configuration by offering drop any unused
+ dependencies that were potentially left behind on the previous
+ \cb{pkg-build} or \cb{pkg-drop} command execution if the command
+ is otherwise a noop (performs no new package builds, upgrades, etc)."
+ }
};
"
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 32a680f..e15a9c9 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -20,9 +20,11 @@
#include <bpkg/common-options.hxx>
+#include <bpkg/cfg-add.hxx>
#include <bpkg/pkg-purge.hxx>
#include <bpkg/pkg-fetch.hxx>
#include <bpkg/rep-fetch.hxx>
+#include <bpkg/cfg-create.hxx>
#include <bpkg/pkg-unpack.hxx>
#include <bpkg/pkg-update.hxx>
#include <bpkg/pkg-verify.hxx>
@@ -195,7 +197,6 @@ namespace bpkg
//
static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
make_available (const common_options& options,
- const dir_path& c,
database& db,
const shared_ptr<selected_package>& sp)
{
@@ -214,7 +215,7 @@ namespace bpkg
// moment).
//
shared_ptr<repository_fragment> af (
- db.find<repository_fragment> (
+ db.main_database ().find<repository_fragment> (
sp->repository_fragment.canonical_name ()));
// The package is in at least fetched state, which means we should
@@ -225,10 +226,10 @@ namespace bpkg
package_manifest m (
sp->state == package_state::fetched
? pkg_verify (options,
- a->absolute () ? *a : c / *a,
+ a->absolute () ? *a : db.config_orig / *a,
true /* ignore_unknown */,
false /* expand_values */)
- : pkg_verify (sp->effective_src_root (c),
+ : pkg_verify (sp->effective_src_root (db.config_orig),
true /* ignore_unknown */,
// Copy potentially fixed up version from selected package.
[&sp] (version& v) {v = sp->version;}));
@@ -311,6 +312,8 @@ namespace bpkg
//
optional<action_type> action;
+ reference_wrapper<database> db; // Needs to be move-assignable.
+
shared_ptr<selected_package> selected; // NULL if not selected.
shared_ptr<available_package> available; // Can be NULL, fake/transient.
@@ -337,12 +340,12 @@ namespace bpkg
//
struct constraint_type
{
+ reference_wrapper<database> db; // Main database for non-packages.
string dependent;
version_constraint value;
- constraint_type () = default;
- constraint_type (string d, version_constraint v)
- : dependent (move (d)), value (move (v)) {}
+ constraint_type (database& d, string dp, version_constraint v)
+ : db (d), dependent (move (dp)), value (move (v)) {}
};
vector<constraint_type> constraints;
@@ -373,12 +376,13 @@ namespace bpkg
// Set of package names that caused this package to be built or adjusted.
// Empty name signifies user selection.
//
- set<package_name> required_by;
+ set<config_package> required_by;
bool
user_selection () const
{
- return required_by.find (package_name ()) != required_by.end ();
+ return required_by.find (config_package {db.get ().main_database (),
+ ""}) != required_by.end ();
}
// Adjustment flags.
@@ -434,17 +438,28 @@ namespace bpkg
//
assert (available != nullptr &&
(system
- ? available->system_version () != nullptr
+ ? available->system_version (db) != nullptr
: !available->stub ()));
- return system ? *available->system_version () : available->version;
+ return system ? *available->system_version (db) : available->version;
}
string
available_name_version () const
{
assert (available != nullptr);
- return package_string (available->id.name, available_version (), system);
+ return package_string (available->id.name,
+ available_version (),
+ system);
+ }
+
+ string
+ available_name_version_db () const
+ {
+ string s (db.get ().string ());
+ return !s.empty ()
+ ? available_name_version () + ' ' + s
+ : available_name_version ();
}
// Merge constraints, required-by package names, hold_* flags,
@@ -453,6 +468,10 @@ namespace bpkg
void
merge (build_package&& p)
{
+ // We don't merge objects from different configurations.
+ //
+ assert (db == p.db);
+
// We don't merge into pre-entered objects, and from/into drops.
//
assert (action && *action != drop && (!p.action || *p.action != drop));
@@ -483,7 +502,7 @@ namespace bpkg
// Propagate the user-selection tag.
//
- required_by.insert (package_name ());
+ required_by.emplace (db.get ().main_database (), package_name ());
}
// Required-by package names have different semantics for different
@@ -545,7 +564,10 @@ namespace bpkg
{
assert (!pkg.action);
- auto p (map_.emplace (move (name), data_type {end (), move (pkg)}));
+ database& db (pkg.db); // Save before the move() call.
+ auto p (map_.emplace (config_package {db, move (name)},
+ data_type {end (), move (pkg)}));
+
assert (p.second);
}
@@ -555,8 +577,6 @@ namespace bpkg
//
build_package*
collect_build (const common_options& options,
- const dir_path& cd,
- database& db,
build_package pkg,
postponed_packages* recursively = nullptr)
{
@@ -569,7 +589,7 @@ namespace bpkg
assert (pkg.action && *pkg.action == build_package::build &&
pkg.available != nullptr);
- auto i (map_.find (pkg.available->id.name));
+ auto i (map_.find (pkg.db, pkg.available->id.name));
// If we already have an entry for this package name, then we
// have to pick one over the other.
@@ -636,15 +656,15 @@ namespace bpkg
//
if (auto c1 = test (p2, p1))
{
- const package_name& n (i->first);
+ const package_name& n (i->first.name);
const string& d1 (c1->dependent);
const string& d2 (c2->dependent);
fail << "unable to satisfy constraints on package " << n <<
- info << d1 << " depends on (" << n << " " << c1->value
- << ")" <<
- info << d2 << " depends on (" << n << " " << c2->value
- << ")" <<
+ info << d1 << c1->db << " depends on (" << n << " "
+ << c1->value << ")" <<
+ info << d2 << c2->db << " depends on (" << n << " "
+ << c2->value << ")" <<
info << "available " << p1->available_name_version () <<
info << "available " << p2->available_name_version () <<
info << "explicitly specify " << n << " version to manually "
@@ -654,8 +674,8 @@ namespace bpkg
swap (p1, p2);
}
- l4 ([&]{trace << "pick " << p1->available_name_version ()
- << " over " << p2->available_name_version ();});
+ l4 ([&]{trace << "pick " << p1->available_name_version_db ()
+ << " over " << p2->available_name_version_db ();});
}
// If versions are the same, then we still need to pick the entry as
// one of them can build a package from source while another
@@ -690,12 +710,14 @@ namespace bpkg
{
// This is the first time we are adding this package name to the map.
//
- l4 ([&]{trace << "add " << pkg.available_name_version ();});
+ l4 ([&]{trace << "add " << pkg.available_name_version_db ();});
// Note: copy; see emplace() below.
//
+ database& db (pkg.db); // Save before the move() call.
package_name n (pkg.available->id.name);
- i = map_.emplace (move (n), data_type {end (), move (pkg)}).first;
+ i = map_.emplace (config_package {db, move (n)},
+ data_type {end (), move (pkg)}).first;
}
build_package& p (i->second.package);
@@ -721,7 +743,7 @@ namespace bpkg
// reasoning wrong.
//
if (recursively != nullptr)
- collect_build_prerequisites (options, cd, db, p, recursively);
+ collect_build_prerequisites (options, p, recursively);
return &p;
}
@@ -737,8 +759,6 @@ namespace bpkg
//
void
collect_build_prerequisites (const common_options& options,
- const dir_path& cd,
- database& db,
const build_package& pkg,
postponed_packages* postponed)
{
@@ -761,13 +781,16 @@ namespace bpkg
make_exception_guard (
[&pkg] ()
{
- info << "while satisfying " << pkg.available_name_version ();
+ info << "while satisfying " << pkg.available_name_version_db ();
}));
const shared_ptr<available_package>& ap (pkg.available);
const shared_ptr<repository_fragment>& af (pkg.repository_fragment);
const package_name& name (ap->id.name);
+ database& pdb (pkg.db);
+ database& mdb (pdb.main_database ());
+
for (const dependency_alternatives_ex& da: ap->dependencies)
{
if (da.conditional) // @@ TODO
@@ -797,10 +820,6 @@ namespace bpkg
continue;
}
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-configure.
}
bool system (false);
@@ -823,7 +842,12 @@ namespace bpkg
//
const version_constraint* dep_constr (nullptr);
- auto i (map_.find (dn));
+ // If the dependency package build is already in the map, then switch
+ // to its configuration.
+ //
+ database* ddb (&pdb);
+
+ auto i (map_.find_dependency (pdb, dn, da.buildtime));
if (i != map_.end ())
{
const build_package& bp (i->second.package);
@@ -849,13 +873,15 @@ namespace bpkg
if (!wildcard (*dep_constr) &&
!satisfies (*dep_constr, dp.constraint))
fail << "unable to satisfy constraints on package " << dn <<
- info << name << " depends on (" << dn << " "
+ info << name << pdb << " depends on (" << dn << " "
<< *dp.constraint << ")" <<
- info << c.dependent << " depends on (" << dn << " "
+ info << c.dependent << c.db << " depends on (" << dn << " "
<< c.value << ")" <<
info << "specify " << dn << " version to satisfy " << name
<< " constraint";
}
+
+ ddb = &bp.db.get ();
}
const dependency& d (!dep_constr
@@ -863,11 +889,16 @@ namespace bpkg
: dependency {dn, *dep_constr});
// First see if this package is already selected. If we already have
- // it in the configuraion and it satisfies our dependency version
+ // it in the configuration and it satisfies our dependency version
// constraint, then we don't want to be forcing its upgrade (or,
// worse, downgrade).
//
- shared_ptr<selected_package> dsp (db.find<selected_package> (dn));
+ // Search recursively in the explicitly associated configurations.
+ //
+ pair<shared_ptr<selected_package>, database*> spd (
+ find_dependency (*ddb, dn, da.buildtime));
+
+ shared_ptr<selected_package>& dsp (spd.first);
pair<shared_ptr<available_package>,
shared_ptr<repository_fragment>> rp;
@@ -878,8 +909,22 @@ namespace bpkg
if (dsp != nullptr)
{
+ // Fail if we end up building a dependency that is also configured
+ // in another configuration of the same type.
+ //
+ if (i != map_.end () && *ddb != *spd.second)
+ fail << "building package " << dn << " which is already "
+ << dsp->state << " in another configuration" <<
+ info << "building in " << ddb->config_orig <<
+ info << dsp->state << " in " << spd.second->config_orig <<
+ info << "use --config-* to select package configuration";
+
+ // Switch to the selected package configuration.
+ //
+ ddb = spd.second;
+
if (dsp->state == package_state::broken)
- fail << "unable to build broken package " << dn <<
+ fail << "unable to build broken package " << dn << *ddb <<
info << "use 'pkg-purge --force' to remove";
// If the constraint is imposed by the user we also need to make sure
@@ -897,11 +942,11 @@ namespace bpkg
// doesn't really matter).
//
shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
+ mdb.load<repository_fragment> (""));
rp = system
- ? find_available_one (db, dn, nullopt, root)
- : find_available_one (db,
+ ? find_available_one (mdb, dn, nullopt, root)
+ : find_available_one (mdb,
dn,
version_constraint (dsp->version),
root);
@@ -910,7 +955,7 @@ namespace bpkg
// (returning stub as an available package feels wrong).
//
if (dap == nullptr || dap->stub ())
- rp = make_available (options, cd, db, dsp);
+ rp = make_available (options, *ddb, dsp);
}
else
// Remember that we may be forcing up/downgrade; we will deal with
@@ -919,6 +964,88 @@ namespace bpkg
force = true;
}
+ // If this is a build-time dependency and we build it for the first
+ // time, then we need to find a suitable configuration (of the host
+ // type) to build it in.
+ //
+ // If the current configuration (ddb) is of the host type, then we use
+ // that. Otherwise, we go through its immediate explicit associations.
+ // If only one of them has the host type, then we use that. If there
+ // are multiple of them, then we fail advising the user to pick one
+ // explicitly. If there are none, then we create the private host
+ // configuration and use that.
+ //
+ // Note that if the user has explicitly specified the configuration
+ // for this dependency on the command line (using --config-*), then
+ // this configuration is used as the starting point for this search.
+ //
+ if (da.buildtime && dsp == nullptr)
+ {
+ database* hdb (nullptr);
+
+ // Note that the first returned association is for ddb itself.
+ //
+ for (const associated_config& ac: ddb->explicit_associations ())
+ {
+ database& adb (ac.db);
+
+ if (adb.type == "host")
+ {
+ // We are done if the self-association is of the host type.
+ //
+ if (ac.id == 0)
+ {
+ hdb = &adb;
+ break;
+ }
+
+ if (hdb == nullptr)
+ hdb = &adb;
+ else
+ fail << "multiple possible host configurations for build-time "
+ << "dependency (" << dp << ")" <<
+ info << hdb->config_orig <<
+ info << adb.config_orig <<
+ info << "use --config-* to select the configuration";
+ }
+ }
+
+ // If no host configuration is found, then create and associate it.
+ //
+ if (hdb == nullptr)
+ {
+ const strings mods {"cc"};
+ const strings vars {"config.config.load=~host"};
+
+ // Wipe a potentially existing un-associated private configuration
+ // left from a previous faulty run. Note that trying to reuse it
+ // would be a bad idea since it can be half-prepared, with an
+ // outdated database schema version, etc.
+ //
+ cfg_create (options,
+ ddb->config_orig / host_dir,
+ optional<string> ("host") /* name */,
+ "host" /* type */,
+ mods,
+ vars,
+ false /* existing */,
+ true /* wipe */);
+
+ // Note that we will copy the host name from the configuration
+ // unless it clashes with one of the existing associations.
+ //
+ shared_ptr<configuration> ac (cfg_add (*ddb,
+ ddb->config / host_dir,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */));
+
+ hdb = &ddb->find_attached (*ac->id);
+ }
+
+ ddb = hdb; // Switch to the host configuration.
+ }
+
// If we didn't get the available package corresponding to the
// selected package, look for any that satisfies the constraint.
//
@@ -930,7 +1057,7 @@ namespace bpkg
// prerequisites).
//
if (af == nullptr)
- fail << "package " << pkg.available_name_version ()
+ fail << "package " << pkg.available_name_version_db ()
<< " is orphaned" <<
info << "explicitly upgrade it to a new version";
@@ -976,7 +1103,7 @@ namespace bpkg
// the package is recognized. An unrecognized package means the
// broken/stale repository (see below).
//
- rp = find_available_one (db,
+ rp = find_available_one (mdb,
dn,
!system ? d.constraint : nullopt,
af);
@@ -998,7 +1125,7 @@ namespace bpkg
if (d.constraint && (!dep_constr || !wildcard (*dep_constr)))
dr << ' ' << *d.constraint;
- dr << " of package " << name;
+ dr << " of package " << name << pdb;
if (!af->location.empty () && (!dep_constr || system))
dr << info << "repository " << af->location << " appears to "
@@ -1024,17 +1151,17 @@ namespace bpkg
// version constraint). If it were, then the system version
// wouldn't be NULL and would satisfy itself.
//
- if (dap->system_version () == nullptr)
+ if (dap->system_version (*ddb) == nullptr)
fail << "dependency " << d << " of package " << name << " is "
<< "not available in source" <<
info << "specify ?sys:" << dn << " if it is available from "
<< "the system";
- if (!satisfies (*dap->system_version (), d.constraint))
+ if (!satisfies (*dap->system_version (*ddb), d.constraint))
fail << "dependency " << d << " of package " << name << " is "
<< "not available in source" <<
info << package_string (dn,
- *dap->system_version (),
+ *dap->system_version (*ddb),
true /* system */)
<< " does not satisfy the constrains";
@@ -1042,7 +1169,7 @@ namespace bpkg
}
else
{
- auto p (dap->system_version_authoritative ());
+ auto p (dap->system_version_authoritative (*ddb));
if (p.first != nullptr &&
p.second && // Authoritative.
@@ -1053,19 +1180,20 @@ namespace bpkg
build_package bp {
build_package::build,
+ *ddb,
dsp,
dap,
rp.second,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {name}, // Required by (dependent).
- 0}; // Adjustments.
+ false, // Keep output directory.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {pdb, name}}, // Required by (dependent).
+ 0}; // Adjustments.
// Add our constraint, if we have one.
//
@@ -1075,7 +1203,7 @@ namespace bpkg
// completeness.
//
if (dp.constraint)
- bp.constraints.emplace_back (name.string (), *dp.constraint);
+ bp.constraints.emplace_back (pdb, name.string (), *dp.constraint);
// Now collect this prerequisite. If it was actually collected
// (i.e., it wasn't already there) and we are forcing a downgrade or
@@ -1096,7 +1224,7 @@ namespace bpkg
// build foo ?sys:bar/2
//
const build_package* p (
- collect_build (options, cd, db, move (bp), postponed));
+ collect_build (options, move (bp), postponed));
if (p != nullptr && force && !dep_optional)
{
@@ -1118,9 +1246,9 @@ namespace bpkg
(f ? dr << fail :
w ? dr << warn :
dr << info)
- << "package " << name << " dependency on "
+ << "package " << name << pdb << " dependency on "
<< (c ? "(" : "") << d << (c ? ")" : "") << " is forcing "
- << (u ? "up" : "down") << "grade of " << *dsp << " to ";
+ << (u ? "up" : "down") << "grade of " << *dsp << *ddb << " to ";
// Print both (old and new) package names in full if the system
// attribution changes.
@@ -1131,7 +1259,7 @@ namespace bpkg
dr << av; // Can't be a system version so is never wildcard.
if (dsp->hold_version)
- dr << info << "package version " << *dsp << " is held";
+ dr << info << "package version " << *dsp << *ddb << " is held";
if (f)
dr << info << "explicitly request version "
@@ -1144,12 +1272,13 @@ namespace bpkg
// Collect the package being dropped.
//
void
- collect_drop (shared_ptr<selected_package> sp)
+ collect_drop (database& db, shared_ptr<selected_package> sp)
{
const package_name& nm (sp->name);
build_package p {
build_package::drop,
+ db,
move (sp),
nullptr,
nullptr,
@@ -1164,7 +1293,7 @@ namespace bpkg
{}, // Required by.
0}; // Adjustments.
- auto i (map_.find (nm));
+ auto i (map_.find (db, nm));
if (i != map_.end ())
{
@@ -1180,15 +1309,16 @@ namespace bpkg
bp = move (p);
}
else
- map_.emplace (nm, data_type {end (), move (p)});
+ map_.emplace (config_package {db, nm},
+ data_type {end (), move (p)});
}
// Collect the package being unheld.
//
void
- collect_unhold (const shared_ptr<selected_package>& sp)
+ collect_unhold (database& db, const shared_ptr<selected_package>& sp)
{
- auto i (map_.find (sp->name));
+ auto i (map_.find (db, sp->name));
// Currently, it must always be pre-entered.
//
@@ -1200,6 +1330,7 @@ namespace bpkg
{
build_package p {
build_package::adjust,
+ db,
sp,
nullptr,
nullptr,
@@ -1223,20 +1354,17 @@ namespace bpkg
void
collect_build_prerequisites (const common_options& o,
- const dir_path& cd,
database& db,
const package_name& name,
postponed_packages& postponed)
{
- auto mi (map_.find (name));
+ auto mi (map_.find (db, name));
assert (mi != map_.end ());
- collect_build_prerequisites (o, cd, db, mi->second.package, &postponed);
+ collect_build_prerequisites (o, mi->second.package, &postponed);
}
void
collect_build_postponed (const common_options& o,
- const dir_path& cd,
- database& db,
postponed_packages& pkgs)
{
// Try collecting postponed packages for as long as we are making
@@ -1247,7 +1375,7 @@ namespace bpkg
postponed_packages npkgs;
for (const build_package* p: pkgs)
- collect_build_prerequisites (o, cd, db, *p, prog ? &npkgs : nullptr);
+ collect_build_prerequisites (o, *p, prog ? &npkgs : nullptr);
assert (prog); // collect_build_prerequisites() should have failed.
prog = (npkgs != pkgs);
@@ -1256,16 +1384,25 @@ namespace bpkg
}
// Order the previously-collected package with the specified name
- // returning its positions. Recursively order the package dependencies
- // being ordered failing if a dependency cycle is detected. If reorder is
- // true, then reorder this package to be considered as "early" as
- // possible.
+ // returning its positions.
+ //
+ // If buildtime is nullopt, then search for the specified package build in
+ // only the specified database. Otherwise, treat the package as a
+ // dependency and search for the build recursively (see
+ // config_package_map::find_dependency() for details).
+ //
+ // Recursively order the package dependencies being ordered failing if a
+ // dependency cycle is detected. If reorder is true, then reorder this
+ // package to be considered as "early" as possible.
//
iterator
- order (const package_name& name, bool reorder = true)
+ order (database& db,
+ const package_name& name,
+ optional<bool> buildtime,
+ bool reorder = true)
{
- package_names chain;
- return order (name, chain, reorder);
+ config_package_names chain;
+ return order (db, name, buildtime, chain, reorder);
}
// If a configured package is being up/down-graded then that means
@@ -1284,7 +1421,7 @@ namespace bpkg
// to also notice this.
//
void
- collect_order_dependents (database& db)
+ collect_order_dependents ()
{
// For each package on the list we want to insert all its dependents
// before it so that they get configured after the package on which
@@ -1305,18 +1442,20 @@ namespace bpkg
// Dropped package may have no dependents.
//
if (*p.action != build_package::drop && p.reconfigure ())
- collect_order_dependents (db, i);
+ collect_order_dependents (i);
}
}
void
- collect_order_dependents (database& db, iterator pos)
+ collect_order_dependents (iterator pos)
{
tracer trace ("collect_order_dependents");
assert (pos != end ());
build_package& p (*pos);
+
+ database& pdb (p.db);
const shared_ptr<selected_package>& sp (p.selected);
const package_name& n (sp->name);
@@ -1328,166 +1467,173 @@ namespace bpkg
? sp->version.compare (p.available_version ())
: 0);
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == n))
+ for (database& ddb: pdb.dependent_configs ())
{
- package_name& dn (pd.name);
- auto i (map_.find (dn));
-
- // First make sure the up/downgraded package still satisfies this
- // dependent.
- //
- bool check (ud != 0 && pd.constraint);
-
- // There is one tricky aspect: the dependent could be in the process
- // of being up/downgraded as well. In this case all we need to do is
- // detect this situation and skip the test since all the (new)
- // contraints of this package have been satisfied in collect_build().
- //
- if (check && i != map_.end () && i->second.position != end ())
+ for (auto& pd: query_dependents (ddb, n, pdb))
{
- build_package& dp (i->second.package);
-
- check = dp.available == nullptr ||
- (dp.selected->system () == dp.system &&
- dp.selected->version == dp.available_version ());
- }
+ package_name& dn (pd.name);
+ auto i (map_.find (ddb, dn));
- if (check)
- {
- const version& av (p.available_version ());
- const version_constraint& c (*pd.constraint);
+ // First make sure the up/downgraded package still satisfies this
+ // dependent.
+ //
+ bool check (ud != 0 && pd.constraint);
- if (!satisfies (av, c))
+ // There is one tricky aspect: the dependent could be in the process
+ // of being up/downgraded as well. In this case all we need to do is
+ // detect this situation and skip the test since all the (new)
+ // contraints of this package have been satisfied in
+ // collect_build().
+ //
+ if (check && i != map_.end () && i->second.position != end ())
{
- diag_record dr (fail);
+ build_package& dp (i->second.package);
- dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
- << "package " << *sp << " to ";
-
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (p.system != sp->system ())
- dr << p.available_name_version ();
- else
- dr << av; // Can't be the wildcard otherwise would satisfy.
+ check = dp.available == nullptr ||
+ (dp.selected->system () == dp.system &&
+ dp.selected->version == dp.available_version ());
+ }
- dr << info << "because package " << dn << " depends on (" << n
- << " " << c << ")";
+ if (check)
+ {
+ const version& av (p.available_version ());
+ const version_constraint& c (*pd.constraint);
- string rb;
- if (!p.user_selection ())
+ if (!satisfies (av, c))
{
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
- }
+ diag_record dr (fail);
+
+ dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
+ << "package " << *sp << pdb << " to ";
+
+ // Print both (old and new) package names in full if the system
+ // attribution changes.
+ //
+ if (p.system != sp->system ())
+ dr << p.available_name_version ();
+ else
+ dr << av; // Can't be the wildcard otherwise would satisfy.
- if (!rb.empty ())
- dr << info << "package " << p.available_name_version ()
- << " required by" << rb;
+ dr << info << "because package " << dn << ddb << " depends on ("
+ << n << " " << c << ")";
- dr << info << "explicitly request up/downgrade of package " << dn;
+ string rb;
+ if (!p.user_selection ())
+ {
+ for (const config_package& cp: p.required_by)
+ rb += ' ' + cp.string ();
+ }
- dr << info << "or explicitly specify package " << n << " version "
- << "to manually satisfy these constraints";
- }
+ if (!rb.empty ())
+ dr << info << "package " << p.available_name_version ()
+ << " required by" << rb;
- // Add this contraint to the list for completeness.
- //
- p.constraints.emplace_back (dn.string (), c);
- }
+ dr << info << "explicitly request up/downgrade of package "
+ << dn;
- auto adjustment = [&dn, &n, &db] () -> build_package
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (dn));
- bool system (dsp->system ()); // Save flag before the move(dsp) call.
-
- return build_package {
- build_package::adjust,
- move (dsp),
- nullptr, // No available package/repository fragment.
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {n}, // Required by (dependency).
- build_package::adjust_reconfigure};
- };
+ dr << info << "or explicitly specify package " << n
+ << " version to manually satisfy these constraints";
+ }
- // We can have three cases here: the package is already on the
- // list, the package is in the map (but not on the list) and it
- // is in neither.
- //
- // If the existing entry is a drop, then we skip it. If it is
- // pre-entered, is an adjustment, or is a build that is not supposed
- // to be built (not in the list), then we merge it into the new
- // adjustment entry. Otherwise (is a build in the list), we just add
- // the reconfigure adjustment flag to it.
- //
- if (i != map_.end ())
- {
- build_package& dp (i->second.package);
- iterator& dpos (i->second.position);
+ // Add this contraint to the list for completeness.
+ //
+ p.constraints.emplace_back (ddb, dn.string (), c);
+ }
- if (!dp.action || // Pre-entered.
- *dp.action != build_package::build || // Non-build.
- dpos == end ()) // Build not in the list.
+ auto adjustment = [&dn, &ddb, &n, &pdb] () -> build_package
{
- // Skip the droped package.
- //
- if (dp.action && *dp.action == build_package::drop)
- continue;
+ shared_ptr<selected_package> dsp (ddb.load<selected_package> (dn));
+
+ bool system (dsp->system ()); // Save before the move(dsp) call.
+
+ return build_package {
+ build_package::adjust,
+ ddb,
+ move (dsp),
+ nullptr, // No available pkg/repo fragment.
+ nullptr,
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ system,
+ false, // Keep output directory.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {pdb, n}}, // Required by (dependency).
+ build_package::adjust_reconfigure};
+ };
- build_package bp (adjustment ());
- bp.merge (move (dp));
- dp = move (bp);
- }
- else // Build in the list.
- dp.adjustments |= build_package::adjust_reconfigure;
-
- // It may happen that the dependent is already in the list but is
- // not properly ordered against its dependencies that get into the
- // list via another dependency path. Thus, we check if the dependent
- // is to the right of its dependency and, if that's the case,
- // reinsert it in front of the dependency.
+ // We can have three cases here: the package is already on the
+ // list, the package is in the map (but not on the list) and it
+ // is in neither.
//
- if (dpos != end ())
+ // If the existing entry is a drop, then we skip it. If it is
+ // pre-entered, is an adjustment, or is a build that is not supposed
+ // to be built (not in the list), then we merge it into the new
+ // adjustment entry. Otherwise (is a build in the list), we just add
+ // the reconfigure adjustment flag to it.
+ //
+ if (i != map_.end ())
{
- for (auto i (pos); i != end (); ++i)
+ build_package& dp (i->second.package);
+ iterator& dpos (i->second.position);
+
+ if (!dp.action || // Pre-entered.
+ *dp.action != build_package::build || // Non-build.
+ dpos == end ()) // Build not in the list.
{
- if (i == dpos)
+ // Skip the droped package.
+ //
+ if (dp.action && *dp.action == build_package::drop)
+ continue;
+
+ build_package bp (adjustment ());
+ bp.merge (move (dp));
+ dp = move (bp);
+ }
+ else // Build in the list.
+ dp.adjustments |= build_package::adjust_reconfigure;
+
+ // It may happen that the dependent is already in the list but is
+ // not properly ordered against its dependencies that get into the
+ // list via another dependency path. Thus, we check if the
+ // dependent is to the right of its dependency and, if that's the
+ // case, reinsert it in front of the dependency.
+ //
+ if (dpos != end ())
+ {
+ for (auto i (pos); i != end (); ++i)
{
- erase (dpos);
- dpos = insert (pos, dp);
- break;
+ if (i == dpos)
+ {
+ erase (dpos);
+ dpos = insert (pos, dp);
+ break;
+ }
}
}
+ else
+ dpos = insert (pos, dp);
}
else
- dpos = insert (pos, dp);
- }
- else
- {
- i = map_.emplace (
- move (dn), data_type {end (), adjustment ()}).first;
+ {
+ // Don't move dn since it is used by adjustment().
+ //
+ i = map_.emplace (config_package {ddb, dn},
+ data_type {end (), adjustment ()}).first;
- i->second.position = insert (pos, i->second.package);
- }
+ i->second.position = insert (pos, i->second.package);
+ }
- // Recursively collect our own dependents inserting them before us.
- //
- // Note that we cannot end up with an infinite recursion for
- // configured packages due to a dependency cycle (see order() for
- // details).
- //
- collect_order_dependents (db, i->second.position);
+ // Recursively collect our own dependents inserting them before us.
+ //
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
+ //
+ collect_order_dependents (i->second.position);
+ }
}
}
@@ -1508,34 +1654,54 @@ namespace bpkg
}
private:
- using package_names = small_vector<reference_wrapper<const package_name>,
- 16>;
+ struct config_package_name
+ {
+ database& db;
+ const package_name& name;
+
+ bool
+ operator== (const config_package_name& v)
+ {
+ return name == v.name && db == v.db;
+ }
+ };
+ using config_package_names = small_vector<config_package_name, 16>;
iterator
- order (const package_name& name, package_names& chain, bool reorder)
+ order (database& db,
+ const package_name& name,
+ optional<bool> buildtime,
+ config_package_names& chain,
+ bool reorder)
{
// Every package that we order should have already been collected.
//
- auto mi (map_.find (name));
+ auto mi (!buildtime
+ ? map_.find (db, name)
+ : map_.find_dependency (db, name, *buildtime));
+
assert (mi != map_.end ());
build_package& p (mi->second.package);
assert (p.action); // Can't order just a pre-entered package.
+ database& pdb (p.db);
+
// Make sure there is no dependency cycle.
//
+ config_package_name cp {pdb, name};
{
- auto i (find (chain.begin (), chain.end (), name));
+ auto i (find (chain.begin (), chain.end (), cp));
if (i != chain.end ())
{
diag_record dr (fail);
- dr << "dependency cycle detected involving package " << name;
+ dr << "dependency cycle detected involving package " << name << pdb;
- auto nv = [this] (const package_name& name)
+ auto nv = [this] (const config_package_name& cp)
{
- auto mi (map_.find (name));
+ auto mi (map_.find (cp.db, cp.name));
assert (mi != map_.end ());
build_package& p (mi->second.package);
@@ -1548,14 +1714,14 @@ namespace bpkg
//
assert (p.available != nullptr);
- return p.available_name_version ();
+ return p.available_name_version_db ();
};
// Note: push_back() can invalidate the iterator.
//
size_t j (i - chain.begin ());
- for (chain.push_back (name); j != chain.size () - 1; ++j)
+ for (chain.push_back (cp); j != chain.size () - 1; ++j)
dr << info << nv (chain[j]) << " depends on " << nv (chain[j + 1]);
}
}
@@ -1577,7 +1743,7 @@ namespace bpkg
// position of its "earliest" prerequisite -- this is where it
// will be inserted.
//
- const shared_ptr<selected_package>& sp (p.selected);
+ const shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
bool build (*p.action == build_package::build);
@@ -1624,7 +1790,7 @@ namespace bpkg
bool order_disfigured (src_conf && disfigure (p));
- chain.push_back (name);
+ chain.push_back (cp);
// Order the build dependencies.
//
@@ -1639,13 +1805,18 @@ namespace bpkg
{
for (const auto& p: sp->prerequisites)
{
+ database& db (p.first.database ());
const package_name& name (p.first.object_id ());
// The prerequisites may not necessarily be in the map.
//
- auto i (map_.find (name));
+ auto i (map_.find (db, name));
if (i != map_.end () && i->second.package.action)
- update (order (name, chain, false /* reorder */));
+ update (order (db,
+ name,
+ nullopt /* buildtime */,
+ chain,
+ false /* reorder */));
}
// We just ordered them among other prerequisites.
@@ -1671,7 +1842,11 @@ namespace bpkg
if (da.buildtime && (dn == "build2" || dn == "bpkg"))
continue;
- update (order (d.name, chain, false /* reorder */));
+ update (order (pdb,
+ d.name,
+ da.buildtime,
+ chain,
+ false /* reorder */));
}
}
}
@@ -1682,14 +1857,19 @@ namespace bpkg
{
for (const auto& p: sp->prerequisites)
{
+ database& db (p.first.database ());
const package_name& name (p.first.object_id ());
// The prerequisites may not necessarily be in the map.
//
- auto i (map_.find (name));
+ auto i (map_.find (db, name));
if (i != map_.end () && disfigure (i->second.package))
- update (order (name, chain, false /* reorder */));
+ update (order (db,
+ name,
+ nullopt /* buildtime */,
+ chain,
+ false /* reorder */));
}
}
@@ -1705,7 +1885,49 @@ namespace bpkg
build_package package;
};
- map<package_name, data_type> map_;
+ class config_package_map: public map<config_package, data_type>
+ {
+ public:
+ using base_type = map<config_package, data_type>;
+
+ iterator
+ find (database& db, const package_name& pn)
+ {
+ return base_type::find (config_package {db, pn});
+ }
+
+ // Try to find a package build in the dependency configurations (see
+ // database::dependency_configs() for details). Return the end iterator
+ // if no build is found and issue diagnostics and fail if multiple
+ // builds (in multiple configurations) are found.
+ //
+ iterator
+ find_dependency (database& db, const package_name& pn, bool buildtime)
+ {
+ iterator r (end ());
+
+ associated_databases adbs (db.dependency_configs (buildtime));
+
+ for (database& adb: adbs)
+ {
+ iterator i (find (adb, pn));
+ if (i != end ())
+ {
+ if (r == end ())
+ r = i;
+ else
+ fail << "building package " << pn << " in multiple "
+ << "configurations" <<
+ info << r->first.db.config_orig <<
+ info << adb.config_orig <<
+ info << "use --config-* to select package configuration";
+ }
+ }
+
+ return r;
+ }
+ };
+ config_package_map map_;
};
// Return a patch version constraint for the selected package if it has a
@@ -1759,15 +1981,16 @@ namespace bpkg
//
struct dependency_package
{
- package_name name;
- optional<version_constraint> constraint; // nullopt if unspecified.
- shared_ptr<selected_package> selected; // NULL if not present.
- bool system;
- bool patch; // Only for an empty version.
- bool keep_out;
- optional<dir_path> checkout_root;
- bool checkout_purge;
- strings config_vars; // Only if not system.
+ database& db;
+ package_name name;
+ optional<version_constraint> constraint; // nullopt if unspecified.
+ shared_ptr<selected_package> selected; // NULL if not present.
+ bool system;
+ bool patch; // Only for an empty version.
+ bool keep_out;
+ optional<dir_path> checkout_root;
+ bool checkout_purge;
+ strings config_vars; // Only if not system.
};
using dependency_packages = vector<dependency_package>;
@@ -1788,14 +2011,25 @@ namespace bpkg
//
struct evaluate_result
{
- shared_ptr<available_package> available;
+ shared_ptr<available_package> available;
shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool unused;
- bool system; // Is meaningless if unused.
+ bool unused;
+ bool system; // Is meaningless if unused.
+ };
+
+ struct config_package_dependent
+ {
+ database& db;
+ shared_ptr<selected_package> package;
+ optional<version_constraint> constraint;
+
+ config_package_dependent (database& d,
+ shared_ptr<selected_package> p,
+ optional<version_constraint> c)
+ : db (d), package (move (p)), constraint (move (c)) {}
};
- using package_dependents = vector<pair<shared_ptr<selected_package>,
- optional<version_constraint>>>;
+ using config_package_dependents = vector<config_package_dependent>;
static optional<evaluate_result>
evaluate_dependency (database&,
@@ -1805,13 +2039,13 @@ namespace bpkg
bool patch,
bool explicitly,
const set<shared_ptr<repository_fragment>>&,
- const package_dependents&,
+ const config_package_dependents&,
bool ignore_unsatisfiable);
static optional<evaluate_result>
evaluate_dependency (database& db,
- const dependency_packages& deps,
const shared_ptr<selected_package>& sp,
+ const dependency_packages& deps,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_dependency");
@@ -1820,14 +2054,49 @@ namespace bpkg
const package_name& nm (sp->name);
- // Query the dependents and bail out if the dependency is unused.
+ // If there are no user expectations regarding this dependency, then we
+ // give no up/down-grade recommendation, unless there are no dependents
+ // in which case we recommend to drop the dependency.
//
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == nm));
+ // Note that it would be easier to check for the dependent's presence
+ // first and, if present, for the user expectations afterwords. We,
+ // however, don't want to needlessly query all the explicitly associated
+ // databases (which can be many) for dependents if we can bail out
+ // earlier.
+ //
+ auto i (find_if (
+ deps.begin (), deps.end (),
+ [&nm, &db] (const dependency_package& i)
+ {
+ return i.name == nm && i.db == db;
+ }));
+
+ bool no_rec (i == deps.end ());
+
+ vector<pair<database&, package_dependent>> pds;
+ for (database& ddb: db.dependent_configs ())
+ {
+ auto ds (query_dependents (ddb, nm, db));
+
+ // Bail out if the dependency is used but there are no user expectations
+ // regrading it.
+ //
+ if (!ds.empty ())
+ {
+ if (no_rec)
+ return nullopt;
+
+ for (auto& d: ds)
+ pds.emplace_back (ddb, move (d));
+ }
+ }
+
+ // Bail out if the dependency is unused.
+ //
if (pds.empty ())
{
- l5 ([&]{trace << *sp << ": unused";});
+ l5 ([&]{trace << *sp << db << ": unused";});
return evaluate_result {nullptr /* available */,
nullptr /* repository_fragment */,
@@ -1835,16 +2104,6 @@ namespace bpkg
false /* system */};
}
- // If there are no user expectations regarding this dependency, then we
- // give no up/down-grade recommendation.
- //
- auto i (find_if (
- deps.begin (), deps.end (),
- [&nm] (const dependency_package& i) {return i.name == nm;}));
-
- if (i == deps.end ())
- return nullopt;
-
// If the selected package matches the user expectations then no package
// change is required.
//
@@ -1860,7 +2119,7 @@ namespace bpkg
dvc &&
(ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ l5 ([&]{trace << *sp << db << ": unchanged";});
return evaluate_result {nullptr /* available */,
nullptr /* repository_fragment */,
@@ -1873,14 +2132,20 @@ namespace bpkg
// dependency.
//
set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ config_package_dependents dependents;
+
+ database& mdb (db.main_database ());
for (auto& pd: pds)
{
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
+ database& ddb (pd.first);
+ package_dependent& dep (pd.second);
+
+ shared_ptr<selected_package> dsp (
+ ddb.load<selected_package> (dep.name));
shared_ptr<available_package> dap (
- db.find<available_package> (
+ mdb.find<available_package> (
available_package_id (dsp->name, dsp->version)));
if (dap != nullptr)
@@ -1891,7 +2156,7 @@ namespace bpkg
repo_frags.insert (pl.repository_fragment.load ());
}
- dependents.emplace_back (move (dsp), move (pd.constraint));
+ dependents.emplace_back (ddb, move (dsp), move (dep.constraint));
}
return evaluate_dependency (db,
@@ -1905,6 +2170,29 @@ namespace bpkg
ignore_unsatisfiable);
}
+ struct config_selected_package
+ {
+ database& db;
+ const shared_ptr<selected_package>& package;
+
+ config_selected_package (database& d,
+ const shared_ptr<selected_package>& p)
+ : db (d), package (p) {}
+
+ bool
+ operator== (const config_selected_package& v) const
+ {
+ return package->name == v.package->name && db == v.db;
+ }
+
+ bool
+ operator< (const config_selected_package& v) const
+ {
+ int r (package->name.compare (v.package->name));
+ return r != 0 ? (r < 0) : (db < v.db);
+ }
+ };
+
static optional<evaluate_result>
evaluate_dependency (database& db,
const shared_ptr<selected_package>& sp,
@@ -1913,7 +2201,7 @@ namespace bpkg
bool patch,
bool explicitly,
const set<shared_ptr<repository_fragment>>& rfs,
- const package_dependents& dependents,
+ const config_package_dependents& dependents,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_dependency");
@@ -1947,7 +2235,7 @@ namespace bpkg
if (!c)
{
- l5 ([&]{trace << *sp << ": non-patchable";});
+ l5 ([&]{trace << *sp << db << ": non-patchable";});
return no_change ();
}
}
@@ -1957,7 +2245,7 @@ namespace bpkg
vector<pair<shared_ptr<available_package>,
shared_ptr<repository_fragment>>> afs (
- find_available (db,
+ find_available (db.main_database (),
nm,
c,
vector<shared_ptr<repository_fragment>> (rfs.begin (),
@@ -1967,30 +2255,21 @@ namespace bpkg
// satisfies all the dependents. Collect (and sort) unsatisfied dependents
// per the unsatisfiable version in case we need to print them.
//
- struct compare_sp
- {
- bool
- operator() (const shared_ptr<selected_package>& x,
- const shared_ptr<selected_package>& y) const
- {
- return x->name < y->name;
- }
- };
-
- using sp_set = set<reference_wrapper<const shared_ptr<selected_package>>,
- compare_sp>;
+ using sp_set = set<config_selected_package>;
vector<pair<version, sp_set>> unsatisfiable;
bool stub (false);
bool ssys (sp->system ());
- assert (!dsys || system_repository.find (nm) != nullptr);
+ assert (!dsys ||
+ (db.system_repository &&
+ db.system_repository->find (nm) != nullptr));
for (auto& af: afs)
{
shared_ptr<available_package>& ap (af.first);
- const version& av (!dsys ? ap->version : *ap->system_version ());
+ const version& av (!dsys ? ap->version : *ap->system_version (db));
// If we aim to upgrade to the latest version and it tends to be less
// then the selected one, then what we currently have is the best that
@@ -2007,7 +2286,7 @@ namespace bpkg
//
if (!ssys)
{
- l5 ([&]{trace << *sp << ": best";});
+ l5 ([&]{trace << *sp << db << ": best";});
return no_change ();
}
@@ -2029,7 +2308,7 @@ namespace bpkg
for (const auto& dp: dependents)
{
- if (!satisfies (av, dp.second))
+ if (!satisfies (av, dp.constraint))
{
satisfactory = false;
@@ -2039,7 +2318,7 @@ namespace bpkg
if (ignore_unsatisfiable)
break;
- unsatisfied_dependents.insert (dp.first);
+ unsatisfied_dependents.emplace (dp.db, dp.package);
}
}
@@ -2064,11 +2343,11 @@ namespace bpkg
//
if (av == sv && ssys == dsys)
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ l5 ([&]{trace << *sp << db << ": unchanged";});
return no_change ();
}
- l5 ([&]{trace << *sp << ": update to "
+ l5 ([&]{trace << *sp << db << ": update to "
<< package_string (nm, av, dsys);});
return evaluate_result {
@@ -2083,18 +2362,18 @@ namespace bpkg
{
assert (!dsys); // Version cannot be empty for the system package.
- l5 ([&]{trace << *sp << ": only";});
+ l5 ([&]{trace << *sp << db << ": only";});
return no_change ();
}
// If the version satisfying the desired dependency version constraint is
// unavailable or unsatisfiable for some dependents then we fail, unless
- // requested not to do so. In the later case we return the "no change"
+ // requested not to do so. In the latter case we return the "no change"
// result.
//
if (ignore_unsatisfiable)
{
- l5 ([&]{trace << package_string (nm, dvc, dsys)
+ l5 ([&]{trace << package_string (nm, dvc, dsys) << db
<< (unsatisfiable.empty ()
? ": no source"
: ": unsatisfiable");});
@@ -2118,11 +2397,11 @@ namespace bpkg
//
assert (explicitly);
- fail << "patch version for " << *sp << " is not available "
+ fail << "patch version for " << *sp << db << " is not available "
<< "from its dependents' repositories";
}
else if (!stub)
- fail << package_string (nm, dsys ? nullopt : dvc)
+ fail << package_string (nm, dsys ? nullopt : dvc) << db
<< " is not available from its dependents' repositories";
else // The only available package is a stub.
{
@@ -2131,15 +2410,15 @@ namespace bpkg
//
assert (!dvc && !dsys && ssys);
- fail << package_string (nm, dvc) << " is not available in source "
- << "from its dependents' repositories";
+ fail << package_string (nm, dvc) << db << " is not available in "
+ << "source from its dependents' repositories";
}
}
// Issue the diagnostics and fail.
//
diag_record dr (fail);
- dr << "package " << nm << " doesn't satisfy its dependents";
+ dr << "package " << nm << db << " doesn't satisfy its dependents";
// Print the list of unsatisfiable versions together with dependents they
// don't satisfy: up to three latest versions with no more than five
@@ -2152,9 +2431,9 @@ namespace bpkg
size_t n (0);
const sp_set& ps (u.second);
- for (const shared_ptr<selected_package>& p: ps)
+ for (const config_selected_package& p: ps)
{
- dr << ' ' << *p;
+ dr << ' ' << *p.package << p.db;
if (++n == 5 && ps.size () != 6) // Printing 'and 1 more' looks stupid.
break;
@@ -2178,6 +2457,7 @@ namespace bpkg
//
struct recursive_package
{
+ database& db;
package_name name;
bool upgrade; // true -- upgrade, false -- patch.
bool recursive; // true -- recursive, false -- immediate.
@@ -2191,18 +2471,18 @@ namespace bpkg
static optional<bool>
upgrade_dependencies (database& db,
const package_name& nm,
- const recursive_packages& recs,
+ const recursive_packages& rs,
bool recursion = false)
{
- auto i (find_if (recs.begin (), recs.end (),
- [&nm] (const recursive_package& i) -> bool
+ auto i (find_if (rs.begin (), rs.end (),
+ [&nm, &db] (const recursive_package& i) -> bool
{
- return i.name == nm;
+ return i.name == nm && i.db == db;
}));
optional<bool> r;
- if (i != recs.end () && i->recursive >= recursion)
+ if (i != rs.end () && i->recursive >= recursion)
{
r = i->upgrade;
@@ -2210,20 +2490,23 @@ namespace bpkg
return r;
}
- for (const auto& pd: db.query<package_dependent> (
- query<package_dependent>::name == nm))
+ for (database& ddb: db.dependent_configs ())
{
- // Note that we cannot end up with an infinite recursion for configured
- // packages due to a dependency cycle (see order() for details).
- //
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs, true))
+ for (auto& pd: query_dependents (ddb, nm, db))
{
- if (!r || *r < *u) // Upgrade wins patch.
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
+ //
+ if (optional<bool> u = upgrade_dependencies (ddb, pd.name, rs, true))
{
- r = u;
+ if (!r || *r < *u) // Upgrade wins patch.
+ {
+ r = u;
- if (*r) // Upgrade (vs patch)?
- return r;
+ if (*r) // Upgrade (vs patch)?
+ return r;
+ }
}
}
}
@@ -2244,8 +2527,8 @@ namespace bpkg
//
static optional<evaluate_result>
evaluate_recursive (database& db,
- const recursive_packages& recs,
const shared_ptr<selected_package>& sp,
+ const recursive_packages& recs,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_recursive");
@@ -2257,10 +2540,7 @@ namespace bpkg
// dependency.
//
set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
-
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == sp->name));
+ config_package_dependents dependents;
// Only collect repository fragments (for best version selection) of
// (immediate) dependents that have a hit (direct or indirect) in recs.
@@ -2268,39 +2548,46 @@ namespace bpkg
//
optional<bool> upgrade;
- for (const auto& pd: pds)
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
- dependents.emplace_back (dsp, move (pd.constraint));
+ database& mdb (db.main_database ());
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs))
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (auto& pd: query_dependents (ddb, sp->name, db))
{
- if (!upgrade || *upgrade < *u) // Upgrade wins patch.
- upgrade = u;
- }
- else
- continue;
+ shared_ptr<selected_package> dsp (
+ ddb.load<selected_package> (pd.name));
- // While we already know that the dependency upgrade is required, we
- // continue to iterate over dependents, collecting the repository
- // fragments and the constraints.
- //
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ dependents.emplace_back (ddb, dsp, move (pd.constraint));
- if (dap != nullptr)
- {
- assert (!dap->locations.empty ());
+ if (optional<bool> u = upgrade_dependencies (ddb, pd.name, recs))
+ {
+ if (!upgrade || *upgrade < *u) // Upgrade wins patch.
+ upgrade = u;
+ }
+ else
+ continue;
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
+ // While we already know that the dependency upgrade is required, we
+ // continue to iterate over dependents, collecting the repository
+ // fragments and the constraints.
+ //
+ shared_ptr<available_package> dap (
+ mdb.find<available_package> (
+ available_package_id (dsp->name, dsp->version)));
+
+ if (dap != nullptr)
+ {
+ assert (!dap->locations.empty ());
+
+ for (const auto& pl: dap->locations)
+ repo_frags.insert (pl.repository_fragment.load ());
+ }
}
}
if (!upgrade)
{
- l5 ([&]{trace << *sp << ": no hit";});
+ l5 ([&]{trace << *sp << db << ": no hit";});
return nullopt;
}
@@ -2323,12 +2610,10 @@ namespace bpkg
return r && r->available == nullptr ? nullopt : r;
}
- static void
- execute_plan (const pkg_build_options&,
- const dir_path&,
- database&,
- build_package_list&,
- bool simulate);
+ // Return false if the plan execution was noop.
+ //
+ static bool
+ execute_plan (const pkg_build_options&, build_package_list&, bool simulate);
using pkg_options = pkg_build_pkg_options;
@@ -2357,7 +2642,13 @@ namespace bpkg
(o.upgrade_recursive () ? 1 : 0) +
(o.patch_immediate () ? 1 : 0) +
(o.patch_recursive () ? 1 : 0)) > 1)
- fail << "multiple --(upgrade|patch)-(immediate|recursive) specified";
+ dr << fail << "multiple --(upgrade|patch)-(immediate|recursive) "
+ << "specified";
+
+ if (((o.config_id_specified () ? 1 : 0) +
+ (o.config_name_specified () ? 1 : 0) +
+ (o.config_uuid_specified () ? 1 : 0)) > 1)
+ dr << fail << "multiple --config-* specified";
if (!dr.empty () && !pkg.empty ())
dr << info << "while validating options for " << pkg;
@@ -2400,6 +2691,29 @@ namespace bpkg
}
dst.checkout_purge (src.checkout_purge () || dst.checkout_purge ());
+
+ if (!dst.config_id_specified () &&
+ !dst.config_name_specified () &&
+ !dst.config_uuid_specified ())
+ {
+ if (src.config_id_specified ())
+ {
+ dst.config_id (src.config_id ());
+ dst.config_id_specified (true);
+ }
+
+ if (src.config_name_specified ())
+ {
+ dst.config_name (src.config_name ());
+ dst.config_name_specified (true);
+ }
+
+ if (src.config_uuid_specified ())
+ {
+ dst.config_uuid (src.config_uuid ());
+ dst.config_uuid_specified (true);
+ }
+ }
}
static bool
@@ -2416,7 +2730,10 @@ namespace bpkg
x.patch_immediate () == y.patch_immediate () &&
x.patch_recursive () == y.patch_recursive () &&
x.checkout_root () == y.checkout_root () &&
- x.checkout_purge () == y.checkout_purge ();
+ x.checkout_purge () == y.checkout_purge () &&
+ x.config_id () == y.config_id () &&
+ x.config_name () == y.config_name () &&
+ x.config_uuid () == y.config_uuid ();
}
int
@@ -2438,7 +2755,9 @@ namespace bpkg
fail << "package name argument expected" <<
info << "run 'bpkg help pkg-build' for more information";
- database db (open (c, trace)); // Also populates the system repository.
+ // Also populates the system repository.
+ //
+ database mdb (c, trace, true /* pre_attach */, true /* sys_rep */);
// Note that the session spans all our transactions. The idea here is that
// selected_package objects in build_packages below will be cached in this
@@ -2460,6 +2779,7 @@ namespace bpkg
//
struct pkg_spec
{
+ database* db; // A pointer since we build these objects incrementally.
string packages;
repository_location location;
pkg_options options;
@@ -2513,7 +2833,7 @@ namespace bpkg
vector<repository_location> locations;
- transaction t (db);
+ transaction t (mdb);
while (args.more ())
{
@@ -2563,6 +2883,15 @@ namespace bpkg
fail << e << " grouped for argument '" << a << "'";
}
+ // Note: main database if no --config-* option is specified.
+ //
+ if (ps.options.config_name_specified ())
+ ps.db = &mdb.find_attached (ps.options.config_name ());
+ else if (ps.options.config_uuid_specified ())
+ ps.db = &mdb.find_dependency_config (ps.options.config_uuid ());
+ else
+ ps.db = &mdb.find_attached (ps.options.config_id ());
+
if (!a.empty () && a[0] == '?')
{
ps.options.dependency (true);
@@ -2647,7 +2976,7 @@ namespace bpkg
( query::local && u + " COLLATE nocase = " + query::_val (l)));
#endif
- auto rs (db.query<repository> (q));
+ auto rs (mdb.query<repository> (q));
auto i (rs.begin ());
if (i != rs.end ())
@@ -2690,10 +3019,14 @@ namespace bpkg
t.commit ();
+ // Fetch the repositories in the main configuration.
+ //
+ // Note that during this build only the repositories information from
+ // the main database will be used.
+ //
if (!locations.empty ())
rep_fetch (o,
- c,
- db,
+ mdb,
locations,
o.fetch_shallow (),
string () /* reason for "fetching ..." */);
@@ -2710,6 +3043,7 @@ namespace bpkg
//
struct pkg_arg
{
+ reference_wrapper<database> db;
package_scheme scheme;
package_name name;
optional<version_constraint> constraint;
@@ -2720,7 +3054,8 @@ namespace bpkg
// Create the parsed package argument.
//
- auto arg_package = [] (package_scheme sc,
+ auto arg_package = [] (database& db,
+ package_scheme sc,
package_name nm,
optional<version_constraint> vc,
pkg_options os,
@@ -2728,7 +3063,8 @@ namespace bpkg
{
assert (!vc || !vc->empty ()); // May not be empty if present.
- pkg_arg r {sc, move (nm), move (vc), string (), move (os), move (vs)};
+ pkg_arg r {
+ db, sc, move (nm), move (vc), string (), move (os), move (vs)};
switch (sc)
{
@@ -2742,14 +3078,20 @@ namespace bpkg
//
assert (r.constraint->min_version == r.constraint->max_version);
- const system_package* sp (system_repository.find (r.name));
+ assert (db.system_repository);
+
+ const system_package* sp (db.system_repository->find (r.name));
// Will deal with all the duplicates later.
//
if (sp == nullptr || !sp->authoritative)
- system_repository.insert (r.name,
- *r.constraint->min_version,
- true /* authoritative */);
+ {
+ assert (db.system_repository);
+
+ db.system_repository->insert (r.name,
+ *r.constraint->min_version,
+ true /* authoritative */);
+ }
break;
}
@@ -2761,9 +3103,13 @@ namespace bpkg
// Create the unparsed package argument.
//
- auto arg_raw = [] (string v, pkg_options os, strings vs) -> pkg_arg
+ auto arg_raw = [] (database& db,
+ string v,
+ pkg_options os,
+ strings vs) -> pkg_arg
{
- return pkg_arg {package_scheme::none,
+ return pkg_arg {db,
+ package_scheme::none,
package_name (),
nullopt /* constraint */,
move (v),
@@ -2830,6 +3176,11 @@ namespace bpkg
append (v, s);
};
+ auto add_num = [&add_string] (const char* o, auto v)
+ {
+ add_string (o, to_string (v));
+ };
+
const pkg_options& o (a.options);
add_bool ("--keep-out", o.keep_out ());
@@ -2847,6 +3198,15 @@ namespace bpkg
add_bool ("--checkout-purge", o.checkout_purge ());
+ if (o.config_id_specified ())
+ add_num ("--config-id", o.config_id ());
+
+ if (o.config_name_specified ())
+ add_string ("--config-name", o.config_name ());
+
+ if (o.config_uuid_specified ())
+ add_string ("--config-uuid", o.config_uuid ().string ());
+
// Compose the option/variable group.
//
if (!s.empty () || !a.config_vars.empty ())
@@ -2879,7 +3239,7 @@ namespace bpkg
//
vector<shared_ptr<available_package>> stubs;
- transaction t (db);
+ transaction t (mdb);
// Don't fold the zero revision if building the package from source so
// that we build the exact X+0 package revision if it is specified.
@@ -2936,14 +3296,16 @@ namespace bpkg
if (sys && vc)
stubs.push_back (make_shared<available_package> (n));
- pkg_args.push_back (arg_package (sc,
+ pkg_args.push_back (arg_package (*ps.db,
+ sc,
move (n),
move (vc),
move (ps.options),
move (ps.config_vars)));
}
else // Add unparsed.
- pkg_args.push_back (arg_raw (move (ps.packages),
+ pkg_args.push_back (arg_raw (*ps.db,
+ move (ps.packages),
move (ps.options),
move (ps.config_vars)));
@@ -2955,7 +3317,7 @@ namespace bpkg
// presence of --no-fetch option.
//
shared_ptr<repository> r (
- db.find<repository> (ps.location.canonical_name ()));
+ mdb.find<repository> (ps.location.canonical_name ()));
if (r == nullptr)
fail << "repository '" << ps.location
@@ -2978,7 +3340,7 @@ namespace bpkg
{
using query = query<repository_fragment_package>;
- for (const auto& rp: db.query<repository_fragment_package> (
+ for (const auto& rp: mdb.query<repository_fragment_package> (
(query::repository_fragment::name ==
rf.fragment.load ()->name) +
order_by_version_desc (query::package::id.version)))
@@ -2993,7 +3355,7 @@ namespace bpkg
if (ps.options.patch ())
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (nm));
+ ps.db->find<selected_package> (nm));
// It seems natural in the presence of --patch option to only
// patch the selected packages and not to build new packages if
@@ -3044,7 +3406,8 @@ namespace bpkg
info << "package " << pv.first << " is not present in "
<< "configuration";
else
- pkg_args.push_back (arg_package (package_scheme::none,
+ pkg_args.push_back (arg_package (*ps.db,
+ package_scheme::none,
pv.first,
version_constraint (pv.second),
ps.options,
@@ -3100,12 +3463,14 @@ namespace bpkg
optional<version_constraint> c;
shared_ptr<selected_package> sp;
+ database& pdb (*ps.db);
+
if (!sys)
{
if (!vc)
{
if (ps.options.patch () &&
- (sp = db.find<selected_package> (n)) != nullptr)
+ (sp = pdb.find<selected_package> (n)) != nullptr)
{
c = patch_constraint (sp);
@@ -3121,7 +3486,7 @@ namespace bpkg
}
shared_ptr<available_package> ap (
- find_available_one (db, n, c, rfs, false /* prereq */).first);
+ find_available_one (mdb, n, c, rfs, false /* prereq */).first);
// Fail if no available package is found or only a stub is
// available and we are building a source package.
@@ -3133,7 +3498,7 @@ namespace bpkg
// If the selected package is loaded then we aim to patch it.
//
if (sp != nullptr)
- dr << "patch version for " << *sp << " is not found in "
+ dr << "patch version for " << *sp << pdb << " is not found in "
<< r->name;
else if (ap == nullptr)
dr << "package " << pkg << " is not found in " << r->name;
@@ -3158,7 +3523,8 @@ namespace bpkg
// Don't move options and variables as they may be reused.
//
- pkg_args.push_back (arg_package (sc,
+ pkg_args.push_back (arg_package (*ps.db,
+ sc,
move (n),
move (vc),
ps.options,
@@ -3184,14 +3550,14 @@ namespace bpkg
// Check if the package is a duplicate. Return true if it is but
// harmless.
//
- map<package_name, pkg_arg> package_map;
+ map<config_package, pkg_arg> package_map;
auto check_dup = [&package_map, &arg_string, &arg_parsed]
(const pkg_arg& pa) -> bool
{
assert (arg_parsed (pa));
- auto r (package_map.emplace (pa.name, pa));
+ auto r (package_map.emplace (config_package {pa.db, pa.name}, pa));
const pkg_arg& a (r.first->second);
assert (arg_parsed (a));
@@ -3216,9 +3582,10 @@ namespace bpkg
return !r.second;
};
- transaction t (db);
+ transaction t (mdb);
- shared_ptr<repository_fragment> root (db.load<repository_fragment> (""));
+ shared_ptr<repository_fragment> root (
+ mdb.load<repository_fragment> (""));
// Here is what happens here: for unparsed package args we are going to
// try and guess whether we are dealing with a package archive, package
@@ -3226,13 +3593,15 @@ namespace bpkg
// then as a directory, and then assume it is name/version. Sometimes,
// however, it is really one of the first two but just broken. In this
// case things are really confusing since we suppress all diagnostics
- // for the first two "guesses". So what we are going to do here is re-run
- // them with full diagnostics if the name/version guess doesn't pan out.
+ // for the first two "guesses". So what we are going to do here is
+ // re-run them with full diagnostics if the name/version guess doesn't
+ // pan out.
//
bool diag (false);
for (auto i (pkg_args.begin ()); i != pkg_args.end (); )
{
- pkg_arg& pa (*i);
+ pkg_arg& pa (*i);
+ database& pdb (pa.db);
// Reduce all the potential variations (archive, directory, package
// name, package name/version) to a single available_package object.
@@ -3280,7 +3649,8 @@ namespace bpkg
fail << "package archive '" << a
<< "' may not be built as a dependency";
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3358,7 +3728,7 @@ namespace bpkg
//
if (optional<version> v =
package_iteration (o,
- c,
+ pdb,
t,
d,
m.name,
@@ -3366,7 +3736,8 @@ namespace bpkg
true /* check_external */))
m.version = move (*v);
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3424,7 +3795,8 @@ namespace bpkg
false /* allow_wildcard */,
false /* fold_zero_revision */));
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
move (n),
move (vc),
move (pa.options),
@@ -3446,7 +3818,7 @@ namespace bpkg
assert (!arg_sys (pa));
if (pa.options.patch () &&
- (sp = db.find<selected_package> (pa.name)) != nullptr)
+ (sp = pdb.find<selected_package> (pa.name)) != nullptr)
{
c = patch_constraint (sp);
@@ -3465,7 +3837,7 @@ namespace bpkg
else if (!arg_sys (pa))
c = pa.constraint;
- auto rp (find_available_one (db, pa.name, c, root));
+ auto rp (find_available_one (mdb, pa.name, c, root));
ap = move (rp.first);
af = move (rp.second);
}
@@ -3503,7 +3875,7 @@ namespace bpkg
l4 ([&]{trace << "stashing recursive package "
<< arg_string (pa);});
- rec_pkgs.push_back (recursive_package {pa.name, *u, *r});
+ rec_pkgs.push_back (recursive_package {pdb, pa.name, *u, *r});
}
}
@@ -3519,23 +3891,24 @@ namespace bpkg
// Make sure that the package is known.
//
auto apr (!pa.constraint || sys
- ? find_available (db, pa.name, nullopt)
- : find_available (db, pa.name, *pa.constraint));
+ ? find_available (mdb, pa.name, nullopt)
+ : find_available (mdb, pa.name, *pa.constraint));
if (apr.empty ())
{
diag_record dr (fail);
dr << "unknown package " << arg_string (pa, false /* options */);
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
// Save before the name move.
//
- sp = db.find<selected_package> (pa.name);
+ sp = pdb.find<selected_package> (pa.name);
dep_pkgs.push_back (
- dependency_package {move (pa.name),
+ dependency_package {pdb,
+ move (pa.name),
move (pa.constraint),
move (sp),
sys,
@@ -3558,10 +3931,10 @@ namespace bpkg
// the same as the selected package).
//
if (sp == nullptr)
- sp = db.find<selected_package> (pa.name);
+ sp = pdb.find<selected_package> (pa.name);
if (sp != nullptr && sp->state == package_state::broken)
- fail << "unable to build broken package " << pa.name <<
+ fail << "unable to build broken package " << pa.name << pdb <<
info << "use 'pkg-purge --force' to remove";
bool found (true);
@@ -3585,7 +3958,7 @@ namespace bpkg
if (ap == nullptr)
{
if (pa.constraint &&
- find_available_one (db,
+ find_available_one (mdb,
pa.name,
nullopt,
root).first != nullptr)
@@ -3667,7 +4040,7 @@ namespace bpkg
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
else
{
@@ -3690,7 +4063,7 @@ namespace bpkg
{
assert (sp != nullptr && sp->system () == arg_sys (pa));
- auto rp (make_available (o, c, db, sp));
+ auto rp (make_available (o, pdb, sp));
ap = rp.first;
af = rp.second; // Could be NULL (orphan).
}
@@ -3708,6 +4081,7 @@ namespace bpkg
//
build_package p {
build_package::build,
+ pdb,
move (sp),
move (ap),
move (af),
@@ -3721,11 +4095,11 @@ namespace bpkg
: optional<dir_path> ()),
pa.options.checkout_purge (),
move (pa.config_vars),
- {package_name ()}, // Required by (command line).
+ {config_package {mdb, ""}}, // Required by (command line).
0}; // Adjustments.
l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ << p.available_name_version_db ();});
// "Fix" the version the user asked for by adding the constraint.
//
@@ -3733,7 +4107,8 @@ namespace bpkg
// this build_package instance is never replaced).
//
if (pa.constraint)
- p.constraints.emplace_back ("command line", move (*pa.constraint));
+ p.constraints.emplace_back (
+ mdb, "command line", move (*pa.constraint));
hold_pkgs.push_back (move (p));
}
@@ -3741,6 +4116,10 @@ namespace bpkg
// If this is just pkg-build -u|-p, then we are upgrading all held
// packages.
//
+ // Should we also upgrade the held packages in the explicitly associated
+ // configurations, recursively? Maybe later and we probably will need a
+ // command line option to enable this behavior.
+ //
if (hold_pkgs.empty () && dep_pkgs.empty () &&
(o.upgrade () || o.patch ()))
{
@@ -3748,15 +4127,14 @@ namespace bpkg
for (shared_ptr<selected_package> sp:
pointer_result (
- db.query<selected_package> (query::state == "configured" &&
- query::hold_package)))
+ mdb.query<selected_package> (query::state == "configured" &&
+ query::hold_package)))
{
// Let's skip upgrading system packages as they are, probably,
// configured as such for a reason.
//
if (sp->system ())
continue;
-
const package_name& name (sp->name);
optional<version_constraint> pc;
@@ -3772,7 +4150,7 @@ namespace bpkg
continue;
}
- auto apr (find_available_one (db, name, pc, root));
+ auto apr (find_available_one (mdb, name, pc, root));
shared_ptr<available_package> ap (move (apr.first));
if (ap == nullptr || ap->stub ())
@@ -3788,7 +4166,7 @@ namespace bpkg
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
// We will keep the output directory only if the external package is
@@ -3797,23 +4175,24 @@ namespace bpkg
bool keep_out (o.keep_out () && sp->external ());
build_package p {
- build_package::build,
+ build_package::build,
+ mdb,
move (sp),
move (ap),
move (apr.second),
- true, // Hold package.
- false, // Hold version.
- {}, // Constraints.
- false, // System package.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System package.
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {mdb, ""}}, // Required by (command line).
+ 0}; // Adjustments.
l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ << p.available_name_version_db ();});
hold_pkgs.push_back (move (p));
@@ -3822,7 +4201,7 @@ namespace bpkg
//
if (o.immediate () || o.recursive ())
rec_pkgs.push_back (
- recursive_package {name, o.upgrade (), o.recursive ()});
+ recursive_package {mdb, name, o.upgrade (), o.recursive ()});
}
}
@@ -3885,14 +4264,15 @@ namespace bpkg
{
struct dep
{
- package_name name; // Empty if up/down-grade.
+ reference_wrapper<database> db;
+ package_name name; // Empty if up/down-grade.
// Both are NULL if drop.
//
- shared_ptr<available_package> available;
+ shared_ptr<available_package> available;
shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool system;
+ bool system;
};
vector<dep> deps;
@@ -3903,7 +4283,15 @@ namespace bpkg
l4 ([&]{trace << "refining execution plan"
<< (scratch ? " from scratch" : "");});
- transaction t (db);
+ transaction t (mdb);
+
+ // Save the total number of host configurations in the associated
+ // configurations cluster to later check if any private host
+ // configurations have been created during collection of the package
+ // builds (see below).
+ //
+ size_t host_configs (
+ mdb.dependency_configs (true /* buildtime */).size ());
build_packages::postponed_packages postponed;
@@ -3924,23 +4312,25 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
build_package bp {
- nullopt, // Action.
- nullptr, // Selected package.
- nullptr, // Available package/repository frag.
+ nullopt, // Action.
+ p.db,
+ nullptr, // Selected package.
+ nullptr, // Available package/repository frag.
nullptr,
- false, // Hold package.
- p.constraint.has_value (), // Hold version.
- {}, // Constraints.
+ false, // Hold package.
+ p.constraint.has_value (), // Hold version.
+ {}, // Constraints.
p.system,
p.keep_out,
p.checkout_root,
p.checkout_purge,
p.config_vars,
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ {config_package {mdb, ""}}, // Required by (command line).
+ 0}; // Adjustments.
if (p.constraint)
- bp.constraints.emplace_back ("command line", *p.constraint);
+ bp.constraints.emplace_back (
+ mdb, "command line", *p.constraint);
pkgs.enter (p.name, move (bp));
}
@@ -3950,12 +4340,12 @@ namespace bpkg
// specify packages on the command line does not matter).
//
for (const build_package& p: hold_pkgs)
- pkgs.collect_build (o, c, db, p);
+ pkgs.collect_build (o, p);
// Collect all the prerequisites of the user selection.
//
for (const build_package& p: hold_pkgs)
- pkgs.collect_build_prerequisites (o, c, db, p.name (), postponed);
+ pkgs.collect_build_prerequisites (o, p.db, p.name (), postponed);
// Note that we need to collect unheld after prerequisites, not to
// overwrite the pre-entered entries before they are used to provide
@@ -3964,7 +4354,7 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
if (p.selected != nullptr && p.selected->hold_package)
- pkgs.collect_unhold (p.selected);
+ pkgs.collect_unhold (p.db, p.selected);
}
scratch = false;
@@ -3977,12 +4367,16 @@ namespace bpkg
//
for (const dep& d: deps)
{
+ database& ddb (d.db);
+
if (d.available == nullptr)
- pkgs.collect_drop (db.load<selected_package> (d.name));
+ {
+ pkgs.collect_drop (ddb, ddb.load<selected_package> (d.name));
+ }
else
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (d.name));
+ ddb.find<selected_package> (d.name));
// We will keep the output directory only if the external package
// is replaced with an external one (see above for details).
@@ -3997,28 +4391,41 @@ namespace bpkg
//
build_package p {
build_package::build,
+ ddb,
move (sp),
d.available,
d.repository_fragment,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
d.system,
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {mdb, ""}}, // Required by (command line).
+ 0}; // Adjustments.
- pkgs.collect_build (o, c, db, p, &postponed /* recursively */);
+ pkgs.collect_build (o, move (p), &postponed /* recursively */);
}
}
// Handle the (combined) postponed collection.
//
if (!postponed.empty ())
- pkgs.collect_build_postponed (o, c, db, postponed);
+ pkgs.collect_build_postponed (o, postponed);
+
+ // If any private host configurations have been created while
+ // collecting the package builds, then commit the new associations and
+ // restart the transaction (there should be no changes other than
+ // that).
+ //
+ if (mdb.dependency_configs (true /* buildtime */).size () !=
+ host_configs)
+ {
+ t.commit ();
+ t.start (mdb);
+ }
// Now that we have collected all the package versions that we need to
// build, arrange them in the "dependency order", that is, with every
@@ -4034,16 +4441,19 @@ namespace bpkg
// appear (e.g., on the plan) last.
//
for (const dep& d: deps)
- pkgs.order (d.name, false /* reorder */);
+ pkgs.order (d.db,
+ d.name,
+ nullopt /* buildtime */,
+ false /* reorder */);
for (const build_package& p: reverse_iterate (hold_pkgs))
- pkgs.order (p.name ());
+ pkgs.order (p.db, p.name (), nullopt /* buildtime */);
// Collect and order all the dependents that we will need to
// reconfigure because of the up/down-grades of packages that are now
// on the list.
//
- pkgs.collect_order_dependents (db);
+ pkgs.collect_order_dependents ();
// And, finally, make sure all the packages that we need to unhold
// are on the list.
@@ -4051,7 +4461,10 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
if (p.selected != nullptr && p.selected->hold_package)
- pkgs.order (p.name, false /* reorder */);
+ pkgs.order (p.db,
+ p.name,
+ nullopt /* buildtime */,
+ false /* reorder */);
}
// We are about to execute the plan on the database (but not on the
@@ -4060,36 +4473,60 @@ namespace bpkg
// below for details).
//
using selected_packages = session::object_map<selected_package>;
- auto selected_packages_session = [&db, &ses] () -> selected_packages*
+ auto sp_session = [] (const auto& tm) -> selected_packages*
{
- auto& m (ses.map ()[&db]);
- auto i (m.find (&typeid (selected_package)));
- return (i != m.end ()
+ auto i (tm.find (&typeid (selected_package)));
+ return (i != tm.end ()
? &static_cast<selected_packages&> (*i->second)
: nullptr);
};
- selected_packages old_sp;
- if (const selected_packages* sp = selected_packages_session ())
- old_sp = *sp;
+ map<const odb::database*, selected_packages> old_sp;
+
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
+ old_sp.emplace (dps.first, *sps);
+ }
// Note that we need to perform the execution on the copies of the
// build/drop_package objects to preserve the original ones. The
// selected_package objects will still be changed so we will reload
// them afterwards (see below).
//
+ // After the plan execution, save the dependents (selected packages
+ // with prerequisites) for the subsequent built package dependency
+ // hierarchies verification.
+ //
+ bool changed;
+ vector<pair<database&, shared_ptr<selected_package>>> dependents;
{
vector<build_package> tmp (pkgs.begin (), pkgs.end ());
build_package_list bl (tmp.begin (), tmp.end ());
- execute_plan (o, c, db, bl, true /* simulate */);
+ changed = execute_plan (o, bl, true /* simulate */);
+
+ if (changed)
+ {
+ for (build_package& p: bl)
+ {
+ if (p.selected != nullptr)
+ {
+ if (!p.selected->prerequisites.empty ())
+ dependents.emplace_back (p.db, move (p.selected));
+ }
+ else
+ assert (p.action && *p.action == build_package::drop);
+ }
+ }
}
// Return nullopt if no changes to the dependency are necessary. This
// value covers both the "no change is required" and the "no
// recommendation available" cases.
//
- auto eval_dep = [&db, &dep_pkgs, &rec_pkgs] (
+ auto eval_dep = [&dep_pkgs, &rec_pkgs] (
+ database& db,
const shared_ptr<selected_package>& sp,
bool ignore_unsatisfiable = true) -> optional<evaluate_result>
{
@@ -4098,7 +4535,7 @@ namespace bpkg
// See if there is an optional dependency upgrade recommendation.
//
if (!sp->hold_package)
- r = evaluate_dependency (db, dep_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_dependency (db, sp, dep_pkgs, ignore_unsatisfiable);
// If none, then see for the recursive dependency upgrade
// recommendation.
@@ -4107,7 +4544,7 @@ namespace bpkg
// configured as such for a reason.
//
if (!r && !sp->system () && !rec_pkgs.empty ())
- r = evaluate_recursive (db, rec_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_recursive (db, sp, rec_pkgs, ignore_unsatisfiable);
// Translate the "no change" result to nullopt.
//
@@ -4117,16 +4554,18 @@ namespace bpkg
// The empty version means that the package must be dropped.
//
const version ev;
- auto target_version = [&ev] (const shared_ptr<available_package>& ap,
- bool sys) -> const version&
+ auto target_version = [&ev]
+ (database& db,
+ const shared_ptr<available_package>& ap,
+ bool sys) -> const version&
{
if (ap == nullptr)
return ev;
if (sys)
{
- assert (ap->system_version () != nullptr);
- return *ap->system_version ();
+ assert (ap->system_version (db) != nullptr);
+ return *ap->system_version (db);
}
return ap->version;
@@ -4139,15 +4578,17 @@ namespace bpkg
{
bool s (false);
+ database& db (i->db);
+
// Here we scratch if evaluate changed its mind or if the resulting
// version doesn't match what we expect it to be.
//
if (auto sp = db.find<selected_package> (i->name))
{
- const version& dv (target_version (i->available, i->system));
+ const version& dv (target_version (db, i->available, i->system));
- if (optional<evaluate_result> r = eval_dep (sp))
- s = dv != target_version (r->available, r->system) ||
+ if (optional<evaluate_result> r = eval_dep (db, sp))
+ s = dv != target_version (db, r->available, r->system) ||
i->system != r->system;
else
s = dv != sp->version || i->system != sp->system ();
@@ -4164,7 +4605,24 @@ namespace bpkg
++i;
}
- if (!scratch)
+ // If the execute_plan() call was noop, there are no user expectations
+ // regarding any dependency, and no upgrade is requested, then the
+ // only possible refinement outcome can be recommendations to drop
+ // unused dependencies (that the user has refused to drop on the
+ // previous build or drop command run). Thus, if the --keep-unused|-K
+ // or --no-refinement option is also specified, then we omit the
+ // need_refinement() call altogether and assume that no refinement is
+ // required.
+ //
+ if (!changed && dep_pkgs.empty () && rec_pkgs.empty ())
+ {
+ assert (!scratch); // No reason to change any previous decision.
+
+ if (o.keep_unused () || o.no_refinement ())
+ refine = false;
+ }
+
+ if (!scratch && refine)
{
// First, we check if the refinement is required, ignoring the
// unsatisfiable dependency version constraints. If we end up
@@ -4174,12 +4632,12 @@ namespace bpkg
// make sure that the unsatisfiable dependency, if left, is
// reported.
//
- auto need_refinement = [&eval_dep, &deps, &rec_pkgs, &db, &o] (
+ auto need_refinement = [&eval_dep, &deps, &rec_pkgs, &mdb, &o] (
bool diag = false) -> bool
{
// Examine the new dependency set for any up/down-grade/drops.
//
- bool r (false); // Presumably no more refinments are necessary.
+ bool r (false); // Presumably no more refinements are necessary.
using query = query<selected_package>;
@@ -4188,22 +4646,32 @@ namespace bpkg
if (rec_pkgs.empty ())
q = q && !query::hold_package;
- for (shared_ptr<selected_package> sp:
- pointer_result (db.query<selected_package> (q)))
+ // It seems right to only evaluate dependencies in the explicitly
+ // associated configurations, recursively. Indeed, we shouldn't be
+ // up/down-grading or dropping packages in configurations that
+ // only contain dependents, some of which we may only reconfigure.
+ //
+ for (database& adb: mdb.dependency_configs ())
{
- if (optional<evaluate_result> er = eval_dep (sp, !diag))
+ for (shared_ptr<selected_package> sp:
+ pointer_result (adb.query<selected_package> (q)))
{
- // Skip unused if we were instructed to keep them.
- //
- if (o.keep_unused () && er->available == nullptr)
- continue;
+ if (optional<evaluate_result> er = eval_dep (adb, sp, !diag))
+ {
+ // Skip unused if we were instructed to keep them.
+ //
+ if (o.keep_unused () && er->available == nullptr)
+ continue;
+
+ if (!diag)
+ deps.push_back (dep {adb,
+ sp->name,
+ move (er->available),
+ move (er->repository_fragment),
+ er->system});
- if (!diag)
- deps.push_back (dep {sp->name,
- move (er->available),
- move (er->repository_fragment),
- er->system});
- r = true;
+ r = true;
+ }
}
}
@@ -4216,18 +4684,170 @@ namespace bpkg
need_refinement (true /* diag */);
}
+ // Note that we prevent building multiple instances of the same
+ // package dependency in different configurations (of the same type)
+ // while creating the build plan. However, we may potentially end up
+ // with the same dependency in multiple configurations since we do not
+ // descend into prerequisites of already configured packages which
+ // require no up/downgrade.
+ //
+ // To prevent this, we additionally verify that none of the built
+ // package dependency hierarchies contain the same runtime dependency,
+ // built in multiple configurations.
+ //
+ if (changed && !refine)
+ {
+ // Verify the specified package dependency hierarchy and return the
+ // set of packages plus their runtime dependencies, including
+ // indirect ones. Fail if a dependency cycle is detected.
+ //
+ // Note that all the encountered dependency sub-hierarchies that
+ // reside in configurations of different types (or beneath them) are
+ // also verified but not included into the resulting set.
+ //
+ using prerequisites = set<lazy_shared_ptr<selected_package>,
+ compare_lazy_ptr>;
+
+ map<config_package, prerequisites> cache;
+ small_vector<config_selected_package, 16> chain;
+
+ auto verify_dependencies = [&cache, &chain]
+ (database& db,
+ shared_ptr<selected_package> sp,
+ const auto& verify_dependencies)
+ -> const prerequisites&
+ {
+ // Return the cached value, if present.
+ //
+ config_package cp {db, sp->name};
+ {
+ auto i (cache.find (cp));
+
+ if (i != cache.end ())
+ return i->second;
+ }
+
+ // Make sure there is no dependency cycle.
+ //
+ config_selected_package csp {db, sp};
+ {
+ auto i (find (chain.begin (), chain.end (), csp));
+
+ if (i != chain.end ())
+ {
+ diag_record dr (fail);
+ dr << "dependency cycle detected involving package " << *sp
+ << db;
+
+ // Note: push_back() can invalidate the iterator.
+ //
+ size_t j (i - chain.begin ());
+
+ for (chain.push_back (csp); j != chain.size () - 1; ++j)
+ dr << info << *chain[j].package << chain[j].db
+ << " depends on "
+ << *chain[j + 1].package << chain[j + 1].db;
+ }
+ }
+
+ chain.push_back (csp);
+
+ // Verify all prerequisites, but only collect those that are from
+ // configurations of the same type.
+ //
+ // Indeed, we don't care if an associated host configuration
+ // contains a configured package that we also have configured in
+ // our target configuration. It's also fine if some of our runtime
+ // dependencies from different configurations build-time depend on
+ // the same package (of potentially different versions) configured
+ // in different host configurations.
+ //
+ prerequisites r;
+ const package_prerequisites& prereqs (sp->prerequisites);
+
+ for (const auto& prereq: prereqs)
+ {
+ const lazy_shared_ptr<selected_package>& p (prereq.first);
+ database& pdb (p.database ());
+
+ // Validate prerequisite sub-hierarchy also in configuration of
+ // different type but do not collect it.
+ //
+ const prerequisites& ps (
+ verify_dependencies (pdb, p.load (), verify_dependencies));
+
+ if (pdb.type != db.type)
+ continue;
+
+ // Collect prerequisite sub-hierarchy, checking that none of the
+ // packages are already collected.
+ //
+ for (const lazy_shared_ptr<selected_package>& p: ps)
+ {
+ // Note: compare_lazy_ptr only considers package names.
+ //
+ auto i (r.find (p));
+
+ if (i != r.end ())
+ {
+ database& db1 (p.database ());
+ database& db2 (i->database ());
+
+ if (db1 != db2)
+ {
+ bool indirect (prereqs.find (p) == prereqs.end ());
+
+ fail << "package " << p.object_id ()
+ << (indirect ? " indirectly" : "") << " required by "
+ << *sp << db << " is configured in multiple "
+ << "configurations" <<
+ info << *p.load () << db1 <<
+ info << *i->load () << db2;
+ }
+ }
+ else
+ r.insert (p);
+ }
+ }
+
+ chain.pop_back ();
+
+ // Collect the dependent package itself.
+ //
+ r.insert (lazy_shared_ptr<selected_package> (db, move (sp)));
+
+ // Cache the resulting package prerequisites set and return a
+ // reference to it.
+ //
+ auto j (cache.emplace (move (cp), move (r)));
+ assert (j.second); // A package cannot depend on itself.
+
+ return j.first->second;
+ };
+
+ for (auto& p: dependents)
+ verify_dependencies (p.first,
+ move (p.second),
+ verify_dependencies);
+ }
+
// Rollback the changes to the database and reload the changed
// selected_package objects.
//
t.rollback ();
{
- transaction t (db);
+ transaction t (mdb);
// First reload all the selected_package object that could have been
// modified (conceptually, we should only modify what's on the
// plan). And in case of drop the object is removed from the session
// so we need to bring it back.
//
+ // Make sure that selected packages are only owned by the session
+ // and the build package list.
+ //
+ dependents.clear ();
+
// Note: we use the original pkgs list since the executed ones may
// contain newly created (but now gone) selected_package objects.
//
@@ -4235,58 +4855,112 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
if (*p.action == build_package::drop)
{
assert (p.selected != nullptr);
ses.cache_insert<selected_package> (
- db, p.selected->name, p.selected);
+ pdb, p.selected->name, p.selected);
}
if (p.selected != nullptr)
- db.reload (*p.selected);
+ pdb.reload (*p.selected);
}
// Now remove all the newly created selected_package objects from
// the session. The tricky part is to distinguish newly created ones
// from newly loaded (and potentially cached).
//
- if (selected_packages* sp = selected_packages_session ())
+ for (bool rescan (true); rescan; )
{
- for (bool rescan (true); rescan; )
- {
- rescan = false;
+ rescan = false;
- for (auto i (sp->begin ()); i != sp->end (); )
+ for (const auto& dps: ses.map ())
+ {
+ if (selected_packages* sps = sp_session (dps.second))
{
- bool erased (false);
- auto j (old_sp.find (i->first));
-
+ auto j (old_sp.find (dps.first)); // Find the database.
+
+ // Note that if a database has been introduced only during
+ // simulation, then we could just clear all its selected
+ // packages in one shot. Let's however, be cautious and remove
+ // them iteratively to make sure that none of them are left at
+ // the end (no more rescan is necessary). If any of them is
+ // left, then that would mean that is is referenced from
+ // somewhere besides the session object, which would be a bug.
+ //
if (j == old_sp.end ())
{
- if (i->second.use_count () == 1)
+ if (!sps->empty ())
+ {
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ rescan = true;
+ }
+ else
+ ++i;
+ }
+ }
+
+ continue;
+ }
+
+ const selected_packages& osp (j->second);
+
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ bool erased (false);
+ auto j (osp.find (i->first));
+
+ if (j == osp.end ())
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ erased = true;
+ rescan = true;
+ }
+ }
+ // It may also happen that the object was erased from the
+ // database and then recreated. In this case we restore the
+ // pointer that is stored in the session.
+ //
+ else if (i->second != j->second)
{
// This might cause another object's use count to drop.
//
- i = sp->erase (i);
- erased = true;
+ i->second = j->second;
rescan = true;
}
+
+ if (!erased)
+ ++i;
}
- // It may also happen that the object was erased from the
- // database and then recreated. In this case we restore the
- // pointer that is stored in the session.
- //
- else if (i->second != j->second)
+ }
+ }
+
+ // Verify that all the selected packages of the newly introduced
+ // during simulation databases are erased (see above for the
+ // verification reasoning).
+ //
+ if (!rescan)
+ {
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
{
- // This might cause another object's use count to drop.
- //
- i->second = j->second;
- rescan = true;
+ if (old_sp.find (dps.first) == old_sp.end ())
+ assert (sps->empty ());
}
-
- if (!erased)
- ++i;
}
}
}
@@ -4317,6 +4991,7 @@ namespace bpkg
for (const build_package& p: reverse_iterate (pkgs))
{
+ database& pdb (p.db);
const shared_ptr<selected_package>& sp (p.selected);
string act;
@@ -4325,7 +5000,7 @@ namespace bpkg
if (*p.action == build_package::drop)
{
- act = "drop " + sp->string () + " (unused)";
+ act = "drop " + sp->string (pdb) + " (unused)";
need_prompt = true;
}
else
@@ -4361,6 +5036,10 @@ namespace bpkg
}
act += ' ' + sp->name.string ();
+
+ string s (pdb.string ());
+ if (!s.empty ())
+ act += ' ' + s;
}
else
{
@@ -4403,15 +5082,15 @@ namespace bpkg
if (p.unhold ())
act += "/unhold";
- act += ' ' + p.available_name_version ();
+ act += ' ' + p.available_name_version_db ();
cause = "required by";
}
string rb;
if (!p.user_selection ())
{
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
+ for (const config_package& cp: p.required_by)
+ rb += ' ' + cp.string ();
// If not user-selected, then there should be another (implicit)
// reason for the action.
@@ -4491,7 +5170,7 @@ namespace bpkg
// prerequsites got upgraded/downgraded and that the user may want to in
// addition update (that update_dependents flag above).
//
- execute_plan (o, c, db, pkgs, false /* simulate */);
+ execute_plan (o, pkgs, false /* simulate */);
if (o.configure_only ())
return 0;
@@ -4512,11 +5191,14 @@ namespace bpkg
if (*p.action != build_package::build)
continue;
+ database& db (p.db);
const shared_ptr<selected_package>& sp (p.selected);
if (!sp->system () && // System package doesn't need update.
p.user_selection ())
- upkgs.push_back (pkg_command_vars {sp,
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ db.main (),
+ sp,
strings () /* vars */,
false /* cwd */});
}
@@ -4530,28 +5212,30 @@ namespace bpkg
{
assert (p.action);
+ database& db (p.db);
+
if (*p.action == build_package::adjust && p.reconfigure ())
- upkgs.push_back (pkg_command_vars {p.selected,
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ db.main (),
+ p.selected,
strings () /* vars */,
false /* cwd */});
}
}
- pkg_update (c, o, o.for_ (), strings (), upkgs);
+ pkg_update (o, o.for_ (), strings (), upkgs);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: upkgs)
- text << "updated " << *pv.pkg;
+ text << "updated " << pv.string ();
}
return 0;
}
- static void
+ static bool
execute_plan (const pkg_build_options& o,
- const dir_path& c,
- database& db,
build_package_list& build_pkgs,
bool simulate)
{
@@ -4559,6 +5243,7 @@ namespace bpkg
l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");});
+ bool r (false);
uint16_t verbose (!simulate ? verb : 0);
// disfigure
@@ -4573,12 +5258,13 @@ namespace bpkg
if (*p.action != build_package::drop && !p.reconfigure ())
continue;
+ database& pdb (p.db);
shared_ptr<selected_package>& sp (p.selected);
// Each package is disfigured in its own transaction, so that we
// always leave the configuration in a valid state.
//
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Reset the flag if the package being unpacked is not an external one.
//
@@ -4612,7 +5298,9 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, sp, !p.keep_out, simulate);
+ pkg_disfigure (o, pdb, t, sp, !p.keep_out, simulate);
+
+ r = true;
assert (sp->state == package_state::unpacked ||
sp->state == package_state::transient);
@@ -4620,7 +5308,7 @@ namespace bpkg
if (verbose && !o.no_result ())
text << (sp->state == package_state::transient
? "purged "
- : "disfigured ") << *sp;
+ : "disfigured ") << *sp << pdb;
// Selected system package is now gone from the database. Before we drop
// the object we need to make sure the hold state is preserved in the
@@ -4644,6 +5332,8 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
@@ -4661,11 +5351,13 @@ namespace bpkg
{
assert (!sp->system ());
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ text << "purged " << *sp << pdb;
sp = nullptr;
}
@@ -4690,11 +5382,13 @@ namespace bpkg
{
if (sp != nullptr && !sp->system ())
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ text << "purged " << *sp << pdb;
if (!p.hold_package)
p.hold_package = sp->hold_package;
@@ -4721,7 +5415,7 @@ namespace bpkg
if (pl.repository_fragment.object_id () != "") // Special root?
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Go through package repository fragments to decide if we should
// fetch, checkout or unpack depending on the available repository
@@ -4754,7 +5448,7 @@ namespace bpkg
case repository_basis::archive:
{
sp = pkg_fetch (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4766,7 +5460,7 @@ namespace bpkg
{
sp = p.checkout_root
? pkg_checkout (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4775,7 +5469,7 @@ namespace bpkg
p.checkout_purge,
simulate)
: pkg_checkout (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4786,7 +5480,7 @@ namespace bpkg
case repository_basis::directory:
{
sp = pkg_unpack (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4800,11 +5494,11 @@ namespace bpkg
//
else if (exists (pl.location))
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_fetch (
o,
- c,
+ pdb,
t,
pl.location, // Archive path.
true, // Replace
@@ -4814,6 +5508,8 @@ namespace bpkg
if (sp != nullptr) // Actually fetched or checked out something?
{
+ r = true;
+
assert (sp->state == package_state::fetched ||
sp->state == package_state::unpacked);
@@ -4833,19 +5529,19 @@ namespace bpkg
case repository_basis::archive:
{
assert (sp->state == package_state::fetched);
- dr << "fetched " << *sp;
+ dr << "fetched " << *sp << pdb;
break;
}
case repository_basis::directory:
{
assert (sp->state == package_state::unpacked);
- dr << "using " << *sp << " (external)";
+ dr << "using " << *sp << pdb << " (external)";
break;
}
case repository_basis::version_control:
{
assert (sp->state == package_state::unpacked);
- dr << "checked out " << *sp;
+ dr << "checked out " << *sp << pdb;
break;
}
}
@@ -4860,23 +5556,23 @@ namespace bpkg
{
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Commits the transaction.
//
- sp = pkg_unpack (o, c, t, ap->id.name, simulate);
+ sp = pkg_unpack (o, pdb, t, ap->id.name, simulate);
if (verbose && !o.no_result ())
- text << "unpacked " << *sp;
+ text << "unpacked " << *sp << pdb;
}
else
{
const package_location& pl (ap->locations[0]);
assert (pl.repository_fragment.object_id () == ""); // Special root.
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_unpack (o,
- c,
+ pdb,
t,
path_cast<dir_path> (pl.location),
true, // Replace.
@@ -4884,9 +5580,11 @@ namespace bpkg
simulate);
if (verbose && !o.no_result ())
- text << "using " << *sp << " (external)";
+ text << "using " << *sp << pdb << " (external)";
}
+ r = true;
+
assert (sp->state == package_state::unpacked);
}
@@ -4919,14 +5617,25 @@ namespace bpkg
if (sp != nullptr && sp->state == package_state::configured)
continue;
- transaction t (db, !simulate /* start */);
+ database& pdb (p.db);
+
+ transaction t (pdb, !simulate /* start */);
// Note that pkg_configure() commits the transaction.
//
if (p.system)
- sp = pkg_configure_system (ap->id.name, p.available_version (), t);
+ sp = pkg_configure_system (ap->id.name,
+ p.available_version (),
+ pdb,
+ t);
else if (ap != nullptr)
- pkg_configure (c, o, t, sp, ap->dependencies, p.config_vars, simulate);
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ ap->dependencies,
+ p.config_vars,
+ simulate);
else // Dependent.
{
// Must be in the unpacked state since it was disfigured on the first
@@ -4935,12 +5644,12 @@ namespace bpkg
assert (sp->state == package_state::unpacked);
package_manifest m (
- pkg_verify (sp->effective_src_root (c),
+ pkg_verify (sp->effective_src_root (pdb.config_orig),
true /* ignore_unknown */,
[&sp] (version& v) {v = sp->version;}));
- pkg_configure (c,
- o,
+ pkg_configure (o,
+ p.db,
t,
sp,
convert (move (m.dependencies)),
@@ -4948,10 +5657,12 @@ namespace bpkg
simulate);
}
+ r = true;
+
assert (sp->state == package_state::configured);
if (verbose && !o.no_result ())
- text << "configured " << *sp;
+ text << "configured " << *sp << pdb;
}
// Update the hold state.
@@ -4966,6 +5677,8 @@ namespace bpkg
if (*p.action == build_package::drop)
continue;
+ database& pdb (p.db);
+
const shared_ptr<selected_package>& sp (p.selected);
assert (sp != nullptr);
@@ -4986,19 +5699,23 @@ namespace bpkg
sp->hold_package = hp;
sp->hold_version = hv;
- transaction t (db, !simulate /* start */);
- db.update (sp);
+ transaction t (pdb, !simulate /* start */);
+ pdb.update (sp);
t.commit ();
+ r = true;
+
if (verbose > 1)
{
if (hp)
- text << "holding package " << sp->name;
+ text << "holding package " << sp->name << pdb;
if (hv)
- text << "holding version " << *sp;
+ text << "holding version " << *sp << pdb;
}
}
}
+
+ return r;
}
}
diff --git a/bpkg/pkg-checkout.cxx b/bpkg/pkg-checkout.cxx
index 3b99496..b184bfd 100644
--- a/bpkg/pkg-checkout.cxx
+++ b/bpkg/pkg-checkout.cxx
@@ -26,7 +26,8 @@ namespace bpkg
checkout (const common_options& o,
const repository_location& rl,
const dir_path& dir,
- const shared_ptr<available_package>& ap)
+ const shared_ptr<available_package>& ap,
+ database& db)
{
switch (rl.type ())
{
@@ -43,7 +44,7 @@ namespace bpkg
//
if (verb && !o.no_progress ())
text << "checking out "
- << package_string (ap->id.name, ap->version);
+ << package_string (ap->id.name, ap->version) << db;
git_checkout_submodules (o, rl, dir);
}
@@ -84,7 +85,7 @@ namespace bpkg
//
static shared_ptr<selected_package>
pkg_checkout (const common_options& o,
- dir_path c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -95,9 +96,10 @@ namespace bpkg
{
tracer trace ("pkg_checkout");
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
+
// See if this package already exists in this configuration.
//
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -121,13 +123,15 @@ namespace bpkg
}
}
- check_any_available (c, t);
+ database& mdb (db.main_database ());
+
+ check_any_available (mdb, t);
// Note that here we compare including the revision (see pkg-fetch()
// implementation for more details).
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ mdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -185,7 +189,7 @@ namespace bpkg
// if the previous checkout have failed or been interrupted.
//
dir_path sd (repository_state (rl));
- dir_path rd (c / repos_dir / sd);
+ dir_path rd (mdb.config_orig / repos_dir / sd);
if (!exists (rd))
fail << "missing repository directory for package " << n << " " << v
@@ -217,7 +221,7 @@ namespace bpkg
// Checkout the repository fragment and fix up the working tree.
//
- checkout (o, rl, td, ap);
+ checkout (o, rl, td, ap, db);
bool fixedup (fixup (o, rl, td));
// Calculate the package path that points into the checked out fragment
@@ -258,7 +262,7 @@ namespace bpkg
// build system's actual progress.
//
if (verb == 1 && !o.no_progress ())
- text << "distributing " << n << '/' << v;
+ text << "distributing " << n << '/' << v << db;
run_b (o,
verb_b::progress,
@@ -301,7 +305,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things go
// badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (db, t, p, simulate);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -315,15 +319,14 @@ namespace bpkg
}
}
- // Make the package and configuration paths absolute and normalized.
- // If the package is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the package path absolute and normalized. If the package is inside
+ // the configuration, use the relative path. This way we can move the
+ // configuration around.
//
- normalize (c, "configuration");
normalize (d, "package");
- if (d.sub (c))
- d = d.leaf (c);
+ if (d.sub (db.config))
+ d = d.leaf (db.config);
if (p != nullptr)
{
@@ -367,7 +370,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_checkout (const common_options& o,
- const dir_path& c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -377,7 +380,7 @@ namespace bpkg
bool simulate)
{
return pkg_checkout (o,
- c,
+ db,
t,
move (n),
move (v),
@@ -389,7 +392,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_checkout (const common_options& o,
- const dir_path& c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -397,7 +400,7 @@ namespace bpkg
bool simulate)
{
return pkg_checkout (o,
- c,
+ db,
t,
move (n),
move (v),
@@ -415,7 +418,7 @@ namespace bpkg
dir_path c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -437,7 +440,7 @@ namespace bpkg
//
if (o.output_root_specified ())
p = pkg_checkout (o,
- c,
+ db,
t,
move (n),
move (v),
@@ -447,7 +450,7 @@ namespace bpkg
false /* simulate */);
else
p = pkg_checkout (o,
- c,
+ db,
t,
move (n),
move (v),
diff --git a/bpkg/pkg-checkout.hxx b/bpkg/pkg-checkout.hxx
index 47b1ad0..3a058b5 100644
--- a/bpkg/pkg-checkout.hxx
+++ b/bpkg/pkg-checkout.hxx
@@ -25,7 +25,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_checkout (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
package_name,
version,
@@ -40,7 +40,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_checkout (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-command.cxx b/bpkg/pkg-command.cxx
index 11f10f0..6bec97b 100644
--- a/bpkg/pkg-command.cxx
+++ b/bpkg/pkg-command.cxx
@@ -18,7 +18,6 @@ namespace bpkg
{
void
pkg_command (const string& cmd,
- const dir_path& c,
const common_options& o,
const string& cmd_v,
const strings& cvars,
@@ -78,7 +77,7 @@ namespace bpkg
assert (p->state == package_state::configured);
assert (p->out_root); // Should be present since configured.
- dir_path out_root (p->effective_out_root (c));
+ dir_path out_root (p->effective_out_root (pv.config_orig));
l4 ([&]{trace << p->name << " out_root: " << out_root;});
if (bspec.back () != '(')
@@ -133,11 +132,17 @@ namespace bpkg
[&d] (const pkg_command_vars& i) {return i.pkg == d;}) ==
ps.end ())
{
+ database& db (pr.first.database ());
+
// Note: no package-specific variables (global ones still apply).
//
- ps.push_back (pkg_command_vars {d,
- strings () /* vars */,
- package_cwd});
+ ps.push_back (
+ pkg_command_vars {
+ db.config_orig,
+ db.main (),
+ d,
+ strings () /* vars */,
+ package_cwd});
if (recursive)
collect_dependencies (d, recursive, package_cwd, ps);
@@ -248,7 +253,7 @@ namespace bpkg
vector<pkg_command_vars> ps;
{
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
// We need to suppress duplicate dependencies for the recursive command
@@ -256,11 +261,16 @@ namespace bpkg
//
session ses;
- auto add = [&ps, recursive, immediate, package_cwd] (
+ auto add = [&db, &ps, recursive, immediate, package_cwd] (
const shared_ptr<selected_package>& p,
strings vars)
{
- ps.push_back (pkg_command_vars {p, move (vars), package_cwd});
+ ps.push_back (
+ pkg_command_vars {db.config_orig,
+ db.main (),
+ p,
+ move (vars),
+ package_cwd});
// Note that it can only be recursive or immediate but not both.
//
@@ -310,13 +320,13 @@ namespace bpkg
<< "configuration " << c;
if (p->state != package_state::configured)
- fail << "package " << a.name << " is " << p->state <<
+ fail << "package " << a.name << db << " is " << p->state <<
info << "expected it to be configured";
if (p->substate == package_substate::system)
- fail << "cannot " << cmd << " system package " << a.name;
+ fail << "cannot " << cmd << " system package " << a.name << db;
- l4 ([&]{trace << *p;});
+ l4 ([&]{trace << *p << db;});
add (p, move (a.vars));
}
@@ -325,14 +335,27 @@ namespace bpkg
t.commit ();
}
- pkg_command (cmd, c, o, cmd_v, cvars, ps);
+ pkg_command (cmd, o, cmd_v, cvars, ps);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: ps)
- text << cmd << (cmd.back () != 'e' ? "ed " : "d ") << *pv.pkg;
+ text << cmd << (cmd.back () != 'e' ? "ed " : "d ") << pv.string ();
}
return 0;
}
+
+ // pkg_command_vars
+ //
+ string pkg_command_vars::
+ string () const
+ {
+ std::string r (pkg->string ());
+
+ if (!config_main)
+ r += " [" + config_orig.representation () + ']';
+
+ return r;
+ }
}
diff --git a/bpkg/pkg-command.hxx b/bpkg/pkg-command.hxx
index 40a55f2..9feba0f 100644
--- a/bpkg/pkg-command.hxx
+++ b/bpkg/pkg-command.hxx
@@ -20,6 +20,8 @@ namespace bpkg
// The command can also be performed recursively for all or immediate
// dependencies of the specified or all the held packages.
//
+ // Note: loads selected packages.
+ //
int
pkg_command (const string& cmd, // Without the 'pkg-' prefix.
const configuration_options&,
@@ -33,15 +35,34 @@ namespace bpkg
struct pkg_command_vars
{
+ // Configuration information.
+ //
+ // Used to derive the package out_root directory, issue diagnostics, etc.
+ //
+ // Note that we cannot store the database reference here since it can be
+ // closed by the time this information is used. Instead, we save the
+ // required information.
+ //
+ dir_path config_orig; // Database's config_orig.
+ bool config_main; // True if database is main.
+
shared_ptr<selected_package> pkg;
- strings vars; // Package-specific command line vars.
+ strings vars; // Package-specific command line vars.
bool cwd; // Change the working directory to the package directory.
+
+ // Return the selected package name/version followed by the configuration
+ // directory, unless this is the main configuration. For example:
+ //
+ // libfoo/1.1.0
+ // libfoo/1.1.0 [cfg/]
+ //
+ std::string
+ string () const;
};
void
pkg_command (const string& cmd,
- const dir_path& configuration,
const common_options&,
const string& cmd_variant,
const strings& common_vars,
diff --git a/bpkg/pkg-configure.cxx b/bpkg/pkg-configure.cxx
index cd55575..5c26fca 100644
--- a/bpkg/pkg-configure.cxx
+++ b/bpkg/pkg-configure.cxx
@@ -20,14 +20,13 @@ namespace bpkg
{
package_prerequisites
pkg_configure_prerequisites (const common_options& o,
- transaction& t,
+ database& db,
+ transaction&,
const dependencies& deps,
const package_name& package)
{
package_prerequisites r;
- database& db (t.database ());
-
for (const dependency_alternatives_ex& da: deps)
{
assert (!da.conditional); //@@ TODO
@@ -57,13 +56,12 @@ namespace bpkg
satisfied = true;
break;
}
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-build.
}
- if (shared_ptr<selected_package> dp = db.find<selected_package> (n))
+ pair<shared_ptr<selected_package>, database*> spd (
+ find_dependency (db, n, da.buildtime));
+
+ if (const shared_ptr<selected_package>& dp = spd.first)
{
if (dp->state != package_state::configured)
continue;
@@ -71,27 +69,32 @@ namespace bpkg
if (!satisfies (dp->version, d.constraint))
continue;
- auto p (r.emplace (dp, d.constraint));
-
- // Currently we can only capture a single constraint, so if we
- // already have a dependency on this package and one constraint is
- // not a subset of the other, complain.
+ // See the package_prerequisites definition for details on creating
+ // the map keys with the database passed.
//
- if (!p.second)
- {
- auto& c (p.first->second);
-
- bool s1 (satisfies (c, d.constraint));
- bool s2 (satisfies (d.constraint, c));
-
- if (!s1 && !s2)
- fail << "multiple dependencies on package " << n <<
- info << n << " " << *c <<
- info << n << " " << *d.constraint;
-
- if (s2 && !s1)
- c = d.constraint;
- }
+ auto p (
+ r.emplace (lazy_shared_ptr<selected_package> (*spd.second, dp),
+ d.constraint));
+
+ // Currently we can only capture a single constraint, so if we
+ // already have a dependency on this package and one constraint is
+ // not a subset of the other, complain.
+ //
+ if (!p.second)
+ {
+ auto& c (p.first->second);
+
+ bool s1 (satisfies (c, d.constraint));
+ bool s2 (satisfies (d.constraint, c));
+
+ if (!s1 && !s2)
+ fail << "multiple dependencies on package " << n <<
+ info << n << " " << *c <<
+ info << n << " " << *d.constraint;
+
+ if (s2 && !s1)
+ c = d.constraint;
+ }
satisfied = true;
break;
@@ -106,8 +109,8 @@ namespace bpkg
}
void
- pkg_configure (const dir_path& c,
- const common_options& o,
+ pkg_configure (const common_options& o,
+ database& db,
transaction& t,
const shared_ptr<selected_package>& p,
const dependencies& deps,
@@ -119,9 +122,9 @@ namespace bpkg
assert (p->state == package_state::unpacked);
assert (p->src_root); // Must be set since unpacked.
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
dir_path src_root (p->effective_src_root (c));
// Calculate package's out_root.
@@ -139,10 +142,32 @@ namespace bpkg
//
assert (p->prerequisites.empty ());
- p->prerequisites = pkg_configure_prerequisites (o, t, deps, p->name);
+ p->prerequisites = pkg_configure_prerequisites (o, db, t, deps, p->name);
if (!simulate)
{
+ // Add the config.import.* variables for prerequisites from the
+ // associated configurations.
+ //
+ strings imports;
+
+ for (const auto& pp: p->prerequisites)
+ {
+ database& pdb (pp.first.database ());
+
+ if (pdb != db)
+ {
+ shared_ptr<selected_package> sp (pp.first.load ());
+
+ if (!sp->system ())
+ {
+ dir_path od (sp->effective_out_root (pdb.config));
+ imports.push_back ("config.import." + sp->name.string () + "='" +
+ od.representation () + "'");
+ }
+ }
+ }
+
// Form the buildspec.
//
string bspec;
@@ -162,7 +187,7 @@ namespace bpkg
//
try
{
- run_b (o, verb_b::quiet, vars, bspec);
+ run_b (o, verb_b::quiet, imports, vars, bspec);
}
catch (const failed&)
{
@@ -180,7 +205,7 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
+ pkg_disfigure (o, db, t, p, true /* clean */, false /* simulate */);
throw;
}
}
@@ -195,11 +220,11 @@ namespace bpkg
shared_ptr<selected_package>
pkg_configure_system (const package_name& n,
const version& v,
+ database& db,
transaction& t)
{
tracer trace ("pkg_configure_system");
- database& db (t.database ());
tracer_guard tg (db, trace);
shared_ptr<selected_package> p (
@@ -269,7 +294,7 @@ namespace bpkg
if (ps == package_scheme::sys && !vars.empty ())
fail << "configuration variables specified for a system package";
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -297,7 +322,7 @@ namespace bpkg
if (filter_one (root, db.query<available_package> (q)).first == nullptr)
fail << "unknown package " << n;
- p = pkg_configure_system (n, v.empty () ? wildcard_version : v, t);
+ p = pkg_configure_system (n, v.empty () ? wildcard_version : v, db, t);
}
else
{
@@ -319,8 +344,8 @@ namespace bpkg
true /* ignore_unknown */,
[&p] (version& v) {v = p->version;}));
- pkg_configure (c,
- o,
+ pkg_configure (o,
+ db,
t,
p,
convert (move (m.dependencies)),
diff --git a/bpkg/pkg-configure.hxx b/bpkg/pkg-configure.hxx
index b708df5..e9e64b6 100644
--- a/bpkg/pkg-configure.hxx
+++ b/bpkg/pkg-configure.hxx
@@ -26,8 +26,8 @@ namespace bpkg
// Configure the package, update its state, and commit the transaction.
//
void
- pkg_configure (const dir_path& configuration,
- const common_options&,
+ pkg_configure (const common_options&,
+ database&,
transaction&,
const shared_ptr<selected_package>&,
const dependencies&,
@@ -37,15 +37,21 @@ namespace bpkg
// Configure a system package and commit the transaction.
//
shared_ptr<selected_package>
- pkg_configure_system (const package_name&, const version&, transaction&);
+ pkg_configure_system (const package_name&,
+ const version&,
+ database&,
+ transaction&);
// Return package prerequisites given its dependencies. Fail if some of the
// prerequisites are not configured or don't satisfy the package's
// dependency constraints. Note that the package argument is used for
// diagnostics only.
//
+ // Note: loads selected packages.
+ //
package_prerequisites
pkg_configure_prerequisites (const common_options&,
+ database&,
transaction&,
const dependencies&,
const package_name&);
diff --git a/bpkg/pkg-disfigure.cxx b/bpkg/pkg-disfigure.cxx
index 9347bbc..1c356f0 100644
--- a/bpkg/pkg-disfigure.cxx
+++ b/bpkg/pkg-disfigure.cxx
@@ -15,8 +15,8 @@ using namespace butl;
namespace bpkg
{
void
- pkg_disfigure (const dir_path& c,
- const common_options& o,
+ pkg_disfigure (const common_options& o,
+ database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool clean,
@@ -29,28 +29,30 @@ namespace bpkg
l4 ([&]{trace << *p;});
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check that we have no dependents.
//
if (p->state == package_state::configured)
{
- using query = query<package_dependent>;
-
- auto r (db.query<package_dependent> (query::name == p->name));
-
- if (!r.empty ())
+ diag_record dr;
+ for (database& ddb: db.dependent_configs ())
{
- diag_record dr;
- dr << fail << "package " << p->name << " still has dependents:";
+ auto r (query_dependents (ddb, p->name, db));
- for (const package_dependent& pd: r)
+ if (!r.empty ())
{
- dr << info << "package " << pd.name;
+ if (dr.empty ())
+ dr << fail << "package " << p->name << db << " still has "
+ << "dependents:";
- if (pd.constraint)
- dr << " on " << p->name << " " << *pd.constraint;
+ for (const package_dependent& pd: r)
+ {
+ dr << info << "package " << pd.name << ddb;
+
+ if (pd.constraint)
+ dr << " on " << p->name << " " << *pd.constraint;
+ }
}
}
}
@@ -75,8 +77,8 @@ namespace bpkg
if (!simulate)
{
- dir_path src_root (p->effective_src_root (c));
- dir_path out_root (p->effective_out_root (c));
+ dir_path src_root (p->effective_src_root (db.config_orig));
+ dir_path out_root (p->effective_out_root (db.config_orig));
l4 ([&]{trace << "src_root: " << src_root << ", "
<< "out_root: " << out_root;});
@@ -105,8 +107,8 @@ namespace bpkg
if (src_root == out_root)
bspec = "disfigure('" + rep + "')";
else
- bspec = "disfigure('" + src_root.representation () + "'@'" +
- rep + "')";
+ bspec = "disfigure('" + src_root.representation () + "'@'" + rep +
+ "')";
}
l4 ([&]{trace << "buildspec: " << bspec;});
@@ -179,7 +181,7 @@ namespace bpkg
db.update (p);
t.commit ();
- info << "package " << p->name << " is now broken; "
+ info << "package " << p->name << db << " is now broken; "
<< "use 'pkg-purge' to remove";
throw;
}
@@ -207,7 +209,7 @@ namespace bpkg
package_name n (parse_package_name (args.next (),
false /* allow_version */));
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -221,7 +223,7 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, !o.keep_out (), false /* simulate */);
+ pkg_disfigure (o, db, t, p, !o.keep_out (), false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
diff --git a/bpkg/pkg-disfigure.hxx b/bpkg/pkg-disfigure.hxx
index 5121050..d15b007 100644
--- a/bpkg/pkg-disfigure.hxx
+++ b/bpkg/pkg-disfigure.hxx
@@ -22,8 +22,8 @@ namespace bpkg
// for that matter).
//
void
- pkg_disfigure (const dir_path& configuration,
- const common_options&,
+ pkg_disfigure (const common_options&,
+ database&,
transaction&,
const shared_ptr<selected_package>&,
bool clean,
diff --git a/bpkg/pkg-drop.cxx b/bpkg/pkg-drop.cxx
index 6ea6769..e060e96 100644
--- a/bpkg/pkg-drop.cxx
+++ b/bpkg/pkg-drop.cxx
@@ -33,6 +33,7 @@ namespace bpkg
struct drop_package
{
+ database& db;
shared_ptr<selected_package> package;
drop_reason reason;
};
@@ -41,7 +42,9 @@ namespace bpkg
//
struct dependent_name
{
+ database& db;
package_name name;
+ database& prq_db;
package_name prq_name; // Prerequisite package name.
};
using dependent_names = vector<dependent_name>;
@@ -69,17 +72,20 @@ namespace bpkg
// Collect a package to be dropped, by default, as a user selection.
//
bool
- collect (shared_ptr<selected_package> p, drop_reason r = drop_reason::user)
+ collect (database& db,
+ shared_ptr<selected_package> p,
+ drop_reason r = drop_reason::user)
{
package_name n (p->name); // Because of move(p) below.
- return map_.emplace (move (n), data_type {end (), {move (p), r}}).second;
+ return map_.emplace (config_package {db, move (n)},
+ data_type {end (), {db, move (p), r}}).second;
}
- // Collect all the dependets of the user selection returning the list
+ // Collect all the dependents of the user selection returning the list
// of their names. Dependents of dependents are collected recursively.
//
dependent_names
- collect_dependents (database& db)
+ collect_dependents ()
{
dependent_names dns;
@@ -91,7 +97,7 @@ namespace bpkg
//
if (dp.reason != drop_reason::dependent &&
dp.package->state == package_state::configured)
- collect_dependents (db, dns, dp.package);
+ collect_dependents (pr.first.db, dp.package, dns);
}
return dns;
@@ -99,21 +105,22 @@ namespace bpkg
void
collect_dependents (database& db,
- dependent_names& dns,
- const shared_ptr<selected_package>& p)
+ const shared_ptr<selected_package>& p,
+ dependent_names& dns)
{
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == p->name))
+ for (database& ddb: db.dependent_configs ())
{
- const package_name& dn (pd.name);
-
- if (map_.find (dn) == map_.end ())
+ for (auto& pd: query_dependents (ddb, p->name, db))
{
- shared_ptr<selected_package> dp (db.load<selected_package> (dn));
- dns.push_back (dependent_name {dn, p->name});
- collect (dp, drop_reason::dependent);
- collect_dependents (db, dns, dp);
+ const package_name& dn (pd.name);
+
+ if (map_.find (ddb, dn) == map_.end ())
+ {
+ shared_ptr<selected_package> dp (ddb.load<selected_package> (dn));
+ dns.push_back (dependent_name {ddb, dn, db, p->name});
+ collect (ddb, dp, drop_reason::dependent);
+ collect_dependents (ddb, dp, dns);
+ }
}
}
}
@@ -123,7 +130,7 @@ namespace bpkg
// are collected recursively.
//
bool
- collect_prerequisites (database& db)
+ collect_prerequisites ()
{
bool r (false);
@@ -136,29 +143,30 @@ namespace bpkg
if ((dp.reason == drop_reason::user ||
dp.reason == drop_reason::dependent) &&
dp.package->state == package_state::configured)
- r = collect_prerequisites (db, dp.package) || r;
+ r = collect_prerequisites (dp.package) || r;
}
return r;
}
bool
- collect_prerequisites (database& db, const shared_ptr<selected_package>& p)
+ collect_prerequisites (const shared_ptr<selected_package>& p)
{
bool r (false);
for (const auto& pair: p->prerequisites)
{
const lazy_shared_ptr<selected_package>& lpp (pair.first);
+ database& pdb (lpp.database ());
- if (map_.find (lpp.object_id ()) == map_.end ())
+ if (map_.find (pdb, lpp.object_id ()) == map_.end ())
{
shared_ptr<selected_package> pp (lpp.load ());
if (!pp->hold_package) // Prune held packages.
{
- collect (pp, drop_reason::prerequisite);
- collect_prerequisites (db, pp);
+ collect (pdb, pp, drop_reason::prerequisite);
+ collect_prerequisites (pp);
r = true;
}
}
@@ -171,11 +179,11 @@ namespace bpkg
// returning its positions.
//
iterator
- order (const package_name& name)
+ order (database& db, const package_name& name)
{
// Every package that we order should have already been collected.
//
- auto mi (map_.find (name));
+ auto mi (map_.find (db, name));
assert (mi != map_.end ());
// If this package is already in the list, then that would also
@@ -214,13 +222,14 @@ namespace bpkg
{
for (const auto& pair: p->prerequisites)
{
+ database& pdb (pair.first.database ());
const package_name& pn (pair.first.object_id ());
// The prerequisites may not necessarily be in the map (e.g.,
// a held package that we prunned).
//
- if (map_.find (pn) != map_.end ())
- update (order (pn));
+ if (map_.find (pdb, pn) != map_.end ())
+ update (order (pdb, pn));
}
}
@@ -231,7 +240,7 @@ namespace bpkg
// true if any remain.
//
bool
- filter_prerequisites (database& db)
+ filter_prerequisites ()
{
bool r (false);
@@ -244,27 +253,32 @@ namespace bpkg
if (dp.reason == drop_reason::prerequisite)
{
const shared_ptr<selected_package>& p (dp.package);
+ database& db (dp.db);
bool keep (true);
// Get our dependents (which, BTW, could only have been before us
// on the list). If they are all in the map, then we can be dropped.
//
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == p->name))
+ for (database& ddb: db.dependent_configs ())
{
- if (map_.find (pd.name) == map_.end ())
+ for (auto& pd: query_dependents (ddb, p->name, db))
{
- keep = false;
- break;
+ if (map_.find (ddb, pd.name) == map_.end ())
+ {
+ keep = false;
+ break;
+ }
}
+
+ if (!keep)
+ break;
}
if (!keep)
{
i = erase (i);
- map_.erase (p->name);
+ map_.erase (config_package {db, p->name});
continue;
}
@@ -284,15 +298,24 @@ namespace bpkg
drop_package package;
};
- map<package_name, data_type> map_;
+ class config_package_map: public map<config_package, data_type>
+ {
+ public:
+ using base_type = map<config_package, data_type>;
+
+ iterator
+ find (database& db, const package_name& pn)
+ {
+ return base_type::find (config_package {db, pn});
+ }
+ };
+ config_package_map map_;
};
// Drop ordered list of packages.
//
static int
- pkg_drop (const dir_path& c,
- const pkg_drop_options& o,
- database& db,
+ pkg_drop (const pkg_drop_options& o,
const drop_packages& pkgs,
bool drop_prq,
bool need_prompt)
@@ -330,11 +353,11 @@ namespace bpkg
}
if (o.print_only ())
- cout << "drop " << p->name << endl;
+ cout << "drop " << p->name << dp.db << endl;
else if (verb)
// Print indented for better visual separation.
//
- text << " drop " << p->name;
+ text << " drop " << p->name << dp.db;
}
if (o.print_only ())
@@ -365,6 +388,8 @@ namespace bpkg
if (p->state != package_state::configured)
continue;
+ database& db (dp.db);
+
// Each package is disfigured in its own transaction, so that we always
// leave the configuration in a valid state.
//
@@ -372,7 +397,7 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
+ pkg_disfigure (o, db, t, p, true /* clean */, false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
@@ -380,7 +405,7 @@ namespace bpkg
if (verb && !o.no_result ())
text << (p->state == package_state::transient
? "purged "
- : "disfigured ") << p->name;
+ : "disfigured ") << p->name << db;
}
if (o.disfigure_only ())
@@ -403,14 +428,16 @@ namespace bpkg
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
+ database& db (dp.db);
+
transaction t (db);
// Commits the transaction, p is now transient.
//
- pkg_purge (c, t, p, false /* simulate */);
+ pkg_purge (db, t, p, false /* simulate */);
if (verb && !o.no_result ())
- text << "purged " << p->name;
+ text << "purged " << p->name << db;
}
return 0;
@@ -436,7 +463,7 @@ namespace bpkg
fail << "package name argument expected" <<
info << "run 'bpkg help pkg-drop' for more information";
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
// Note that the session spans all our transactions. The idea here is
// that drop_package objects in the drop_packages list below will be
@@ -480,7 +507,7 @@ namespace bpkg
fail << "unable to drop broken package " << n <<
info << "use 'pkg-purge --force' to remove";
- if (pkgs.collect (move (p)))
+ if (pkgs.collect (db, move (p)))
names.push_back (move (n));
}
@@ -488,7 +515,7 @@ namespace bpkg
// already on the list. We will either have to drop those as well or
// abort.
//
- dependent_names dnames (pkgs.collect_dependents (db));
+ dependent_names dnames (pkgs.collect_dependents ());
if (!dnames.empty () && !o.drop_dependent ())
{
{
@@ -503,7 +530,8 @@ namespace bpkg
<< "as well:";
for (const dependent_name& dn: dnames)
- dr << text << dn.name << " (requires " << dn.prq_name << ")";
+ dr << text << dn.name << dn.db << " (requires " << dn.prq_name
+ << dn.prq_db << ")";
}
if (o.yes ())
@@ -526,7 +554,7 @@ namespace bpkg
// on the latter and, if that's the case and "more" cannot be dropped,
// then neither can "less".
//
- pkgs.collect_prerequisites (db);
+ pkgs.collect_prerequisites ();
// Now that we have collected all the packages we could possibly be
// dropping, arrange them in the "dependency order", that is, with
@@ -540,17 +568,17 @@ namespace bpkg
// on which it depends.
//
for (const package_name& n: names)
- pkgs.order (n);
+ pkgs.order (db, n);
for (const dependent_name& dn: dnames)
- pkgs.order (dn.name);
+ pkgs.order (dn.db, dn.name);
// Filter out prerequisites that we cannot possibly drop (e.g., they
// have dependents other than the ones we are dropping). If there are
// some that we can drop, ask the user for confirmation.
//
- if (pkgs.filter_prerequisites (db) &&
- !o.keep_unused () &&
+ if (pkgs.filter_prerequisites () &&
+ !o.keep_unused () &&
!(drop_prq = o.yes ()) && !o.no ())
{
{
@@ -563,7 +591,7 @@ namespace bpkg
{
if (dp.reason == drop_reason::prerequisite)
dr << text << (dp.package->system () ? "sys:" : "")
- << dp.package->name;
+ << dp.package->name << dp.db;
}
}
@@ -576,6 +604,6 @@ namespace bpkg
t.commit ();
}
- return pkg_drop (c, o, db, pkgs, drop_prq, need_prompt);
+ return pkg_drop (o, pkgs, drop_prq, need_prompt);
}
}
diff --git a/bpkg/pkg-fetch.cxx b/bpkg/pkg-fetch.cxx
index 24883c5..5155092 100644
--- a/bpkg/pkg-fetch.cxx
+++ b/bpkg/pkg-fetch.cxx
@@ -24,7 +24,7 @@ namespace bpkg
// Return the selected package object which may replace the existing one.
//
static shared_ptr<selected_package>
- pkg_fetch (dir_path c,
+ pkg_fetch (database& db,
transaction& t,
package_name n,
version v,
@@ -35,18 +35,16 @@ namespace bpkg
{
tracer trace ("pkg_fetch");
- database& db (t.database ());
tracer_guard tg (db, trace);
- // Make the archive and configuration paths absolute and normalized.
- // If the archive is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the archive path absolute and normalized. If the archive is
+ // inside the configuration, use the relative path. This way we can move
+ // the configuration around.
//
- normalize (c, "configuration");
normalize (a, "archive");
- if (a.sub (c))
- a = a.leaf (c);
+ if (a.sub (db.config))
+ a = a.leaf (db.config);
shared_ptr<selected_package> p (db.find<selected_package> (n));
if (p != nullptr)
@@ -55,7 +53,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things
// go badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (db, t, p, simulate);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -113,14 +111,13 @@ namespace bpkg
// or fetching one.
//
static void
- pkg_fetch_check (const dir_path& c,
- transaction& t,
+ pkg_fetch_check (database& db,
+ transaction&,
const package_name& n,
bool replace)
{
tracer trace ("pkg_fetch_check");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (shared_ptr<selected_package> p = db.find<selected_package> (n))
@@ -131,6 +128,7 @@ namespace bpkg
if (!replace || !s)
{
diag_record dr (fail);
+ const dir_path& c (db.config_orig);
dr << "package " << n << " already exists in configuration " << c <<
info << "version: " << p->version_string ()
@@ -145,7 +143,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_fetch (const common_options& co,
- const dir_path& c,
+ database& db,
transaction& t,
path a,
bool replace,
@@ -170,12 +168,12 @@ namespace bpkg
// Check/diagnose an already existing package.
//
- pkg_fetch_check (c, t, m.name, replace);
+ pkg_fetch_check (db, t, m.name, replace);
// Use the special root repository fragment as the repository fragment of
// this package.
//
- return pkg_fetch (c,
+ return pkg_fetch (db,
t,
move (m.name),
move (m.version),
@@ -187,7 +185,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_fetch (const common_options& co,
- const dir_path& c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -196,14 +194,15 @@ namespace bpkg
{
tracer trace ("pkg_fetch");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check/diagnose an already existing package.
//
- pkg_fetch_check (c, t, n, replace);
+ pkg_fetch_check (db, t, n, replace);
- check_any_available (c, t);
+ database& mdb (db.main_database ());
+
+ check_any_available (mdb, t);
// Note that here we compare including the revision (unlike, say in
// pkg-status). Which means one cannot just specify 1.0.0 and get 1.0.0+1
@@ -211,7 +210,7 @@ namespace bpkg
// a low-level command where some extra precision doesn't hurt.
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ mdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -243,7 +242,7 @@ namespace bpkg
<< "from " << pl->repository_fragment->name;
auto_rmfile arm;
- path a (c / pl->location.leaf ());
+ path a (db.config_orig / pl->location.leaf ());
if (!simulate)
{
@@ -264,12 +263,12 @@ namespace bpkg
info << "fetched archive has " << sha256sum <<
info << "consider re-fetching package list and trying again" <<
info << "if problem persists, consider reporting this to "
- << "the repository maintainer";
+ << "the repository maintainer";
}
}
shared_ptr<selected_package> p (
- pkg_fetch (c,
+ pkg_fetch (db,
t,
move (n),
move (v),
@@ -290,7 +289,7 @@ namespace bpkg
dir_path c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -305,7 +304,7 @@ namespace bpkg
info << "run 'bpkg help pkg-fetch' for more information";
p = pkg_fetch (o,
- c,
+ db,
t,
path (args.next ()),
o.replace (),
@@ -327,7 +326,7 @@ namespace bpkg
info << "run 'bpkg help pkg-fetch' for more information";
p = pkg_fetch (o,
- c,
+ db,
t,
move (n),
move (v),
diff --git a/bpkg/pkg-fetch.hxx b/bpkg/pkg-fetch.hxx
index e9d753b..9dd53f6 100644
--- a/bpkg/pkg-fetch.hxx
+++ b/bpkg/pkg-fetch.hxx
@@ -23,7 +23,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_fetch (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
path archive,
bool replace,
@@ -36,7 +36,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_fetch (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-purge.cxx b/bpkg/pkg-purge.cxx
index f6589bb..4fe040e 100644
--- a/bpkg/pkg-purge.cxx
+++ b/bpkg/pkg-purge.cxx
@@ -15,7 +15,7 @@ using namespace butl;
namespace bpkg
{
void
- pkg_purge_fs (const dir_path& c,
+ pkg_purge_fs (database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool simulate,
@@ -26,9 +26,10 @@ namespace bpkg
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
+
try
{
if (p->purge_src)
@@ -76,14 +77,14 @@ namespace bpkg
db.update (p);
t.commit ();
- info << "package " << p->name << " is now broken; "
+ info << "package " << p->name << db << " is now broken; "
<< "use 'pkg-purge --force' to remove";
throw;
}
}
void
- pkg_purge (const dir_path& c,
+ pkg_purge (database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool simulate)
@@ -93,11 +94,10 @@ namespace bpkg
tracer trace ("pkg_purge");
- database& db (t.database ());
tracer_guard tg (db, trace);
assert (!p->out_root);
- pkg_purge_fs (c, t, p, simulate, true);
+ pkg_purge_fs (db, t, p, simulate, true);
db.erase (p);
t.commit ();
@@ -120,7 +120,7 @@ namespace bpkg
package_name n (parse_package_name (args.next (),
false /* allow_version */));
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -201,7 +201,7 @@ namespace bpkg
else
{
assert (!p->out_root);
- pkg_purge_fs (c, t, p, false /* simulate */, !o.keep ());
+ pkg_purge_fs (db, t, p, false /* simulate */, !o.keep ());
}
// Finally, update the database state.
diff --git a/bpkg/pkg-purge.hxx b/bpkg/pkg-purge.hxx
index 215e468..ac82bf4 100644
--- a/bpkg/pkg-purge.hxx
+++ b/bpkg/pkg-purge.hxx
@@ -19,7 +19,7 @@ namespace bpkg
// transaction. If this fails, set the package state to broken.
//
void
- pkg_purge (const dir_path& configuration,
+ pkg_purge (database&,
transaction&,
const shared_ptr<selected_package>&,
bool simulate);
@@ -29,7 +29,7 @@ namespace bpkg
// set the package state to broken, commit the transaction, and fail.
//
void
- pkg_purge_fs (const dir_path& configuration,
+ pkg_purge_fs (database&,
transaction&,
const shared_ptr<selected_package>&,
bool simulate,
diff --git a/bpkg/pkg-status.cli b/bpkg/pkg-status.cli
index 24e1dc8..8671904 100644
--- a/bpkg/pkg-status.cli
+++ b/bpkg/pkg-status.cli
@@ -32,10 +32,12 @@ namespace bpkg
\cb{stderr}.
The status output format is regular with components separated with
- spaces. Each line starts with the package name (and version, if
- specified) followed by one of the status words listed below. Some of
- them can be optionally followed by '\cb{,}' (no spaces) and a sub-status
- word.
+ spaces. Each line starts with the package name and version (if specified)
+ followed by one of the status words listed below. Some of them can be
+ optionally followed by '\cb{,}' (no spaces) and a sub-status word. Lines
+ corresponding to dependencies from associated configurations will
+ additionally mention the configuration directory in square brackets
+ after the package name and version.
\dl|
@@ -146,6 +148,16 @@ namespace bpkg
!libfoo configured,system * available 1.1.0 1.1.1
\
+ Another example of the status output this time including dependencies:
+
+ \
+ bpkg status -r libbaz
+ !libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ bison [.bpkg/host/] configured 1.0.0
+ libbar configured 2.0.0
+ \
+
"
}
diff --git a/bpkg/pkg-status.cxx b/bpkg/pkg-status.cxx
index 655ee8b..73821a3 100644
--- a/bpkg/pkg-status.cxx
+++ b/bpkg/pkg-status.cxx
@@ -18,6 +18,7 @@ namespace bpkg
{
struct package
{
+ database& db;
package_name name;
bpkg::version version; // Empty if unspecified.
shared_ptr<selected_package> selected; // NULL if none selected.
@@ -30,7 +31,6 @@ namespace bpkg
//
static void
pkg_status (const pkg_status_options& o,
- database& db,
const packages& pkgs,
string& indent,
bool recursive,
@@ -59,6 +59,8 @@ namespace bpkg
};
vector<apkg> apkgs;
+ database& mdb (p.db.main_database ());
+
// A package with this name is known in available packages potentially
// for build.
//
@@ -66,13 +68,13 @@ namespace bpkg
bool build (false);
{
shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
+ mdb.load<repository_fragment> (""));
using query = query<available_package>;
query q (query::id.name == p.name);
{
- auto r (db.query<available_package> (q));
+ auto r (mdb.query<available_package> (q));
known = !r.empty ();
build = filter_one (root, move (r)).first != nullptr;
}
@@ -107,7 +109,7 @@ namespace bpkg
//
for (shared_ptr<available_package> ap:
pointer_result (
- db.query<available_package> (q)))
+ mdb.query<available_package> (q)))
{
bool build (filter (root, ap));
apkgs.push_back (apkg {move (ap), build});
@@ -130,7 +132,7 @@ namespace bpkg
// If the package name is selected, then print its exact spelling.
//
- cout << (s != nullptr ? s->name : p.name);
+ cout << (s != nullptr ? s->name : p.name) << p.db;
if (o.constraint () && p.constraint)
cout << ' ' << *p.constraint;
@@ -235,20 +237,16 @@ namespace bpkg
for (const auto& pair: s->prerequisites)
{
shared_ptr<selected_package> d (pair.first.load ());
+ database& db (pair.first.database ());
const optional<version_constraint>& c (pair.second);
- dpkgs.push_back (package {d->name, version (), move (d), c});
+ dpkgs.push_back (package {db, d->name, version (), move (d), c});
}
}
if (!dpkgs.empty ())
{
indent += " ";
- pkg_status (o,
- db,
- dpkgs,
- indent,
- recursive,
- false /* immediate */);
+ pkg_status (o, dpkgs, indent, recursive, false /* immediate */);
indent.resize (indent.size () - 2);
}
}
@@ -266,7 +264,7 @@ namespace bpkg
const dir_path& c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -279,39 +277,69 @@ namespace bpkg
while (args.more ())
{
const char* arg (args.next ());
- package p {parse_package_name (arg),
- parse_package_version (arg,
- false /* allow_wildcard */,
- false /* fold_zero_revision */),
- nullptr /* selected */,
- nullopt /* constraint */};
-
- // Search in the packages that already exist in this configuration.
- //
- {
- query q (query::name == p.name);
- if (!p.version.empty ())
- q = q && compare_version_eq (query::version,
- canonical_version (p.version),
- p.version.revision.has_value (),
- false /* iteration */);
+ package_name pn (parse_package_name (arg));
+ version pv (parse_package_version (arg,
+ false /* allow_wildcard */,
+ false /* fold_zero_revision */));
+
+ query q (query::name == pn);
- p.selected = db.query_one<selected_package> (q);
+ if (!pv.empty ())
+ q = q && compare_version_eq (query::version,
+ canonical_version (pv),
+ pv.revision.has_value (),
+ false /* iteration */);
+
+ // Search in the packages that already exist in this and all the
+ // dependency configurations.
+ //
+ bool found (false);
+ for (database& adb: db.dependency_configs ())
+ {
+ shared_ptr<selected_package> sp (
+ adb.query_one<selected_package> (q));
+
+ if (sp != nullptr)
+ {
+ pkgs.push_back (package {adb,
+ pn,
+ pv,
+ move (sp),
+ nullopt /* constraint */});
+ found = true;
+ }
}
- pkgs.push_back (move (p));
+ if (!found)
+ {
+ pkgs.push_back (package {db,
+ move (pn),
+ move (pv),
+ nullptr /* selected */,
+ nullopt /* constraint */});
+ }
}
}
else
{
- // Find all held packages.
+ // Find all held packages in this and all the dependency
+ // configurations.
//
- for (shared_ptr<selected_package> s:
- pointer_result (
- db.query<selected_package> (query::hold_package)))
+ for (database& adb: db.dependency_configs ())
{
- pkgs.push_back (package {s->name, version (), move (s), nullopt});
+ for (shared_ptr<selected_package> s:
+ pointer_result (
+ adb.query<selected_package> (query::hold_package)))
+ {
+ pkgs.push_back (package {adb,
+ s->name,
+ version (),
+ move (s),
+ nullopt /* constraint */});
+
+
+ }
}
if (pkgs.empty ())
@@ -323,7 +351,7 @@ namespace bpkg
}
string indent;
- pkg_status (o, db, pkgs, indent, o.recursive (), o.immediate ());
+ pkg_status (o, pkgs, indent, o.recursive (), o.immediate ());
t.commit ();
return 0;
diff --git a/bpkg/pkg-unpack.cxx b/bpkg/pkg-unpack.cxx
index 9685f3e..368a71c 100644
--- a/bpkg/pkg-unpack.cxx
+++ b/bpkg/pkg-unpack.cxx
@@ -24,14 +24,13 @@ namespace bpkg
// diagnose all the illegal cases.
//
static void
- pkg_unpack_check (const dir_path& c,
- transaction& t,
+ pkg_unpack_check (database& db,
+ transaction&,
const package_name& n,
bool replace)
{
tracer trace ("pkg_update_check");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (shared_ptr<selected_package> p = db.find<selected_package> (n))
@@ -42,6 +41,7 @@ namespace bpkg
if (!replace || !s)
{
diag_record dr (fail);
+ const dir_path& c (db.config_orig);
dr << "package " << n << " already exists in configuration " << c <<
info << "version: " << p->version_string ()
@@ -59,7 +59,7 @@ namespace bpkg
//
static shared_ptr<selected_package>
pkg_unpack (const common_options& o,
- dir_path c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -70,7 +70,6 @@ namespace bpkg
{
tracer trace ("pkg_unpack");
- database& db (t.database ());
tracer_guard tg (db, trace);
optional<string> mc;
@@ -78,15 +77,14 @@ namespace bpkg
if (!simulate)
mc = sha256 (o, d / manifest_file);
- // Make the package and configuration paths absolute and normalized.
- // If the package is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the package path absolute and normalized. If the package is inside
+ // the configuration, use the relative path. This way we can move the
+ // configuration around.
//
- normalize (c, "configuration");
normalize (d, "package");
- if (d.sub (c))
- d = d.leaf (c);
+ if (d.sub (db.config))
+ d = d.leaf (db.config);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -96,7 +94,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things
// go badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (db, t, p, simulate);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -150,7 +148,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_unpack (const common_options& o,
- const dir_path& c,
+ database& db,
transaction& t,
const dir_path& d,
bool replace,
@@ -177,19 +175,19 @@ namespace bpkg
// Check/diagnose an already existing package.
//
- pkg_unpack_check (c, t, m.name, replace);
+ pkg_unpack_check (db, t, m.name, replace);
// Fix-up the package version.
//
if (optional<version> v = package_iteration (
- o, c, t, d, m.name, m.version, true /* check_external */))
+ o, db, t, d, m.name, m.version, true /* check_external */))
m.version = move (*v);
// Use the special root repository fragment as the repository fragment of
// this package.
//
return pkg_unpack (o,
- c,
+ db,
t,
move (m.name),
move (m.version),
@@ -201,7 +199,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_unpack (const common_options& o,
- const dir_path& c,
+ database& db,
transaction& t,
package_name n,
version v,
@@ -210,20 +208,21 @@ namespace bpkg
{
tracer trace ("pkg_unpack");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check/diagnose an already existing package.
//
- pkg_unpack_check (c, t, n, replace);
+ pkg_unpack_check (db, t, n, replace);
- check_any_available (c, t);
+ database& mdb (db.main_database ());
+
+ check_any_available (mdb, t);
// Note that here we compare including the revision (see pkg-fetch()
// implementation for more details).
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ mdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -253,7 +252,7 @@ namespace bpkg
const repository_location& rl (pl->repository_fragment->location);
return pkg_unpack (o,
- c,
+ db,
t,
move (n),
move (v),
@@ -265,23 +264,23 @@ namespace bpkg
shared_ptr<selected_package>
pkg_unpack (const common_options& co,
- const dir_path& c,
+ database& db,
transaction& t,
const package_name& name,
bool simulate)
{
tracer trace ("pkg_unpack");
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
shared_ptr<selected_package> p (db.find<selected_package> (name));
if (p == nullptr)
fail << "package " << name << " does not exist in configuration " << c;
if (p->state != package_state::fetched)
- fail << "package " << name << " is " << p->state <<
+ fail << "package " << name << db << " is " << p->state <<
info << "expected it to be fetched";
l4 ([&]{trace << *p;});
@@ -356,7 +355,7 @@ namespace bpkg
const dir_path& c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p;
@@ -371,7 +370,7 @@ namespace bpkg
info << "run 'bpkg help pkg-unpack' for more information";
p = pkg_unpack (o,
- c,
+ db,
t,
dir_path (args.next ()),
o.replace (),
@@ -400,9 +399,9 @@ namespace bpkg
// "unpack" it from the directory-based repository.
//
p = v.empty ()
- ? pkg_unpack (o, c, t, n, false /* simulate */)
+ ? pkg_unpack (o, db, t, n, false /* simulate */)
: pkg_unpack (o,
- c,
+ db,
t,
move (n),
move (v),
diff --git a/bpkg/pkg-unpack.hxx b/bpkg/pkg-unpack.hxx
index 107322b..c6496d6 100644
--- a/bpkg/pkg-unpack.hxx
+++ b/bpkg/pkg-unpack.hxx
@@ -23,7 +23,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const dir_path&,
bool replace,
@@ -34,7 +34,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const package_name&,
bool simulate);
@@ -45,7 +45,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-update.hxx b/bpkg/pkg-update.hxx
index d7b9536..41fead0 100644
--- a/bpkg/pkg-update.hxx
+++ b/bpkg/pkg-update.hxx
@@ -28,13 +28,12 @@ namespace bpkg
}
inline void
- pkg_update (const dir_path& configuration,
- const common_options& o,
+ pkg_update (const common_options& o,
const string& cmd_variant,
const strings& common_vars,
const vector<pkg_command_vars>& pkgs)
{
- pkg_command ("update", configuration, o, cmd_variant, common_vars, pkgs);
+ pkg_command ("update", o, cmd_variant, common_vars, pkgs);
}
}
diff --git a/bpkg/pointer-traits.hxx b/bpkg/pointer-traits.hxx
new file mode 100644
index 0000000..a63b289
--- /dev/null
+++ b/bpkg/pointer-traits.hxx
@@ -0,0 +1,58 @@
+// file : bpkg/pointer-traits.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_POINTER_TRAITS_HXX
+#define BPKG_POINTER_TRAITS_HXX
+
+#include <bpkg/types.hxx>
+
+#include <odb/pointer-traits.hxx>
+
+namespace odb
+{
+ template <typename T>
+ class pointer_traits<bpkg::lazy_shared_ptr<T>>
+ {
+ public:
+ static const pointer_kind kind = pk_shared;
+ static const bool lazy = true;
+
+ typedef T element_type;
+ typedef bpkg::lazy_shared_ptr<element_type> pointer_type;
+ typedef bpkg::shared_ptr<element_type> eager_pointer_type;
+
+ static bool
+ null_ptr (const pointer_type& p)
+ {
+ return !p;
+ }
+
+ template <class O = T>
+ static typename object_traits<O>::id_type
+ object_id (const pointer_type& p)
+ {
+ return p.template object_id<O> ();
+ }
+ };
+
+ template <typename T>
+ class pointer_traits<bpkg::lazy_weak_ptr<T>>
+ {
+ public:
+ static const pointer_kind kind = pk_weak;
+ static const bool lazy = true;
+
+ typedef T element_type;
+ typedef bpkg::lazy_weak_ptr<element_type> pointer_type;
+ typedef bpkg::lazy_shared_ptr<element_type> strong_pointer_type;
+ typedef bpkg::weak_ptr<element_type> eager_pointer_type;
+
+ static strong_pointer_type
+ lock (const pointer_type& p)
+ {
+ return p.lock ();
+ }
+ };
+}
+
+#endif // BPKG_POINTER_TRAITS_HXX
diff --git a/bpkg/rep-add.cxx b/bpkg/rep-add.cxx
index 6856437..81b1286 100644
--- a/bpkg/rep-add.cxx
+++ b/bpkg/rep-add.cxx
@@ -16,12 +16,12 @@ namespace bpkg
{
shared_ptr<repository>
rep_add (const common_options& o,
- transaction& t,
+ database& db,
+ transaction&,
const repository_location& rl)
{
const string& rn (rl.canonical_name ());
- database& db (t.database ());
shared_ptr<repository> r (db.find<repository> (rn));
bool updated (false);
@@ -65,7 +65,7 @@ namespace bpkg
fail << "repository location argument expected" <<
info << "run 'bpkg help rep-add' for more information";
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
transaction t (db);
session s; // Repository dependencies can have cycles.
@@ -77,7 +77,7 @@ namespace bpkg
? optional<repository_type> (o.type ())
: nullopt));
- rep_add (o, t, rl);
+ rep_add (o, db, t, rl);
}
t.commit ();
diff --git a/bpkg/rep-add.hxx b/bpkg/rep-add.hxx
index 0062cdc..d5cec5d 100644
--- a/bpkg/rep-add.hxx
+++ b/bpkg/rep-add.hxx
@@ -22,7 +22,10 @@ namespace bpkg
// repository if it is not already.
//
shared_ptr<repository>
- rep_add (const common_options&, transaction&, const repository_location&);
+ rep_add (const common_options&,
+ database&,
+ transaction&,
+ const repository_location&);
}
#endif // BPKG_REP_ADD_HXX
diff --git a/bpkg/rep-fetch.cxx b/bpkg/rep-fetch.cxx
index ef4c110..436cd46 100644
--- a/bpkg/rep-fetch.cxx
+++ b/bpkg/rep-fetch.cxx
@@ -49,6 +49,7 @@ namespace bpkg
static rep_fetch_data
rep_fetch_pkg (const common_options& co,
const dir_path* conf,
+ database* db,
const repository_location& rl,
const optional<string>& dependent_trust,
bool ignore_unknown)
@@ -71,7 +72,7 @@ namespace bpkg
if (a)
{
cert = authenticate_certificate (
- co, conf, cert_pem, rl, dependent_trust);
+ co, conf, db, cert_pem, rl, dependent_trust);
a = !cert->dummy ();
}
@@ -556,6 +557,7 @@ namespace bpkg
static rep_fetch_data
rep_fetch (const common_options& co,
const dir_path* conf,
+ database* db,
const repository_location& rl,
const optional<string>& dt,
bool iu,
@@ -563,7 +565,7 @@ namespace bpkg
{
switch (rl.type ())
{
- case repository_type::pkg: return rep_fetch_pkg (co, conf, rl, dt, iu);
+ case repository_type::pkg: return rep_fetch_pkg (co, conf, db, rl, dt, iu);
case repository_type::dir: return rep_fetch_dir (co, rl, iu, ev);
case repository_type::git: return rep_fetch_git (co, conf, rl, iu, ev);
}
@@ -579,7 +581,13 @@ namespace bpkg
bool iu,
bool ev)
{
- return rep_fetch (co, conf, rl, nullopt /* dependent_trust */, iu, ev);
+ return rep_fetch (co,
+ conf,
+ nullptr /* database */,
+ rl,
+ nullopt /* dependent_trust */,
+ iu,
+ ev);
}
// Return an existing repository fragment or create a new one. Update the
@@ -591,7 +599,7 @@ namespace bpkg
static shared_ptr<repository_fragment>
rep_fragment (const common_options& co,
- const dir_path& conf,
+ database& db,
transaction& t,
const repository_location& rl,
rep_fetch_data::fragment&& fr,
@@ -601,7 +609,6 @@ namespace bpkg
{
tracer trace ("rep_fragment");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Calculate the fragment location.
@@ -852,7 +859,7 @@ namespace bpkg
// details).
//
if (exists && !full_fetch)
- rep_remove_package_locations (t, rf->name);
+ rep_remove_package_locations (db, t, rf->name);
for (package_manifest& pm: fr.packages)
{
@@ -868,7 +875,7 @@ namespace bpkg
optional<version> v (
package_iteration (
co,
- conf,
+ db,
t,
path_cast<dir_path> (rl.path () / *pm.location),
pm.name,
@@ -956,7 +963,7 @@ namespace bpkg
//
static void
rep_fetch (const common_options& co,
- const dir_path& conf,
+ database& db,
transaction& t,
const shared_ptr<repository>& r,
const optional<string>& dependent_trust,
@@ -970,7 +977,6 @@ namespace bpkg
{
tracer trace ("rep_fetch(rep)");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check that the repository is not fetched yet and register it as fetched
@@ -990,7 +996,8 @@ namespace bpkg
//
if (need_auth (co, r->location))
authenticate_certificate (co,
- &conf,
+ &db.config_orig,
+ &db,
r->certificate,
r->location,
dependent_trust);
@@ -1059,7 +1066,8 @@ namespace bpkg
//
rep_fetch_data rfd (
rep_fetch (co,
- &conf,
+ &db.config_orig,
+ &db,
rl,
dependent_trust,
true /* ignore_unknow */,
@@ -1079,7 +1087,7 @@ namespace bpkg
string nm (fr.friendly_name); // Don't move, still may be used.
shared_ptr<repository_fragment> rf (rep_fragment (co,
- conf,
+ db,
t,
rl,
move (fr),
@@ -1156,7 +1164,7 @@ namespace bpkg
rm (pr);
auto fetch = [&co,
- &conf,
+ &db,
&t,
&fetched_repositories,
&removed_repositories,
@@ -1171,7 +1179,7 @@ namespace bpkg
assert (i != repo_trust.end ());
rep_fetch (co,
- conf,
+ db,
t,
r,
i->second,
@@ -1206,7 +1214,7 @@ namespace bpkg
static void
rep_fetch (const common_options& o,
- const dir_path& conf,
+ database& db,
transaction& t,
const vector<lazy_shared_ptr<repository>>& repos,
bool shallow,
@@ -1215,7 +1223,6 @@ namespace bpkg
{
tracer trace ("rep_fetch(repos)");
- database& db (t.database ());
tracer_guard tg (db, trace);
// As a fist step we fetch repositories recursively building the list of
@@ -1243,7 +1250,7 @@ namespace bpkg
//
for (const lazy_shared_ptr<repository>& r: repos)
rep_fetch (o,
- conf,
+ db,
t,
r.load (),
nullopt /* dependent_trust */,
@@ -1258,7 +1265,7 @@ namespace bpkg
// Remove dangling repositories.
//
for (const shared_ptr<repository>& r: removed_repositories)
- rep_remove (conf, t, r);
+ rep_remove (db, t, r);
// Remove dangling repository fragments.
//
@@ -1277,7 +1284,7 @@ namespace bpkg
//
assert (f == rf);
- rep_remove_fragment (conf, t, rf);
+ rep_remove_fragment (db, t, rf);
}
}
@@ -1409,7 +1416,7 @@ namespace bpkg
warn << "repository state is now broken and will be cleaned up" <<
info << "run 'bpkg rep-fetch' to update";
- rep_remove_clean (o, conf, t.database ());
+ rep_remove_clean (o, db);
}
throw;
@@ -1418,7 +1425,6 @@ namespace bpkg
void
rep_fetch (const common_options& o,
- const dir_path& conf,
database& db,
const vector<repository_location>& rls,
bool shallow,
@@ -1449,12 +1455,12 @@ namespace bpkg
// case, which is ok.
//
if (ua.find (r) == ua.end () || r.load ()->location.url () != rl.url ())
- rep_add (o, t, rl);
+ rep_add (o, db, t, rl);
repos.emplace_back (r);
}
- rep_fetch (o, conf, t, repos, shallow, false /* full_fetch */, reason);
+ rep_fetch (o, db, t, repos, shallow, false /* full_fetch */, reason);
t.commit ();
}
@@ -1471,7 +1477,11 @@ namespace bpkg
//
vector<lazy_shared_ptr<repository>> repos;
- database db (open (c, trace));
+ // Pre-attach the explicitly associated databases since we call
+ // package_iteration().
+ //
+ database db (c, trace, true /* pre_attach */);
+
transaction t (db);
session s; // Repository dependencies can have cycles.
@@ -1535,7 +1545,7 @@ namespace bpkg
//
auto i (ua.find (r));
if (i == ua.end () || i->load ()->location.url () != rl.url ())
- r = lazy_shared_ptr<repository> (db, rep_add (o, t, rl));
+ r = lazy_shared_ptr<repository> (db, rep_add (o, db, t, rl));
}
repos.emplace_back (move (r));
@@ -1562,7 +1572,7 @@ namespace bpkg
}
}
- rep_fetch (o, c, t, repos, o.shallow (), full_fetch, reason);
+ rep_fetch (o, db, t, repos, o.shallow (), full_fetch, reason);
size_t rcount (0), pcount (0);
if (verb)
diff --git a/bpkg/rep-fetch.hxx b/bpkg/rep-fetch.hxx
index 4ddce5b..7905e85 100644
--- a/bpkg/rep-fetch.hxx
+++ b/bpkg/rep-fetch.hxx
@@ -7,7 +7,6 @@
#include <libbpkg/manifest.hxx>
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // database
#include <bpkg/utility.hxx>
#include <bpkg/rep-fetch-options.hxx>
@@ -69,7 +68,6 @@ namespace bpkg
//
void
rep_fetch (const common_options&,
- const dir_path& conf,
database&,
const vector<repository_location>&,
bool shallow,
diff --git a/bpkg/rep-list.cxx b/bpkg/rep-list.cxx
index 5b961c0..67b25bf 100644
--- a/bpkg/rep-list.cxx
+++ b/bpkg/rep-list.cxx
@@ -107,7 +107,7 @@ namespace bpkg
fail << "unexpected argument '" << args.next () << "'" <<
info << "run 'bpkg help rep-list' for more information";
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
transaction t (db);
session s; // Repository dependencies can have cycles.
diff --git a/bpkg/rep-remove.cxx b/bpkg/rep-remove.cxx
index c377fc5..aae390d 100644
--- a/bpkg/rep-remove.cxx
+++ b/bpkg/rep-remove.cxx
@@ -94,11 +94,12 @@ namespace bpkg
}
void
- rep_remove_package_locations (transaction& t, const string& fragment_name)
+ rep_remove_package_locations (database& db,
+ transaction&,
+ const string& fragment_name)
{
tracer trace ("rep_remove_package_locations");
- database& db (t.database ());
tracer_guard tg (db, trace);
using query = query<repository_fragment_package>;
@@ -141,15 +142,12 @@ namespace bpkg
}
void
- rep_remove (const dir_path& c,
- transaction& t,
- const shared_ptr<repository>& r)
+ rep_remove (database& db, transaction& t, const shared_ptr<repository>& r)
{
assert (!r->name.empty ()); // Can't be the root repository.
tracer trace ("rep_remove");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (reachable (db, r))
@@ -164,7 +162,7 @@ namespace bpkg
// Remove dangling repository fragments.
//
for (const repository::fragment_type& fr: r->fragments)
- rep_remove_fragment (c, t, fr.fragment.load ());
+ rep_remove_fragment (db, t, fr.fragment.load ());
// If there are no repositories stayed in the database then no repository
// fragments should stay either.
@@ -188,7 +186,7 @@ namespace bpkg
if (!d.empty ())
{
- dir_path sd (c / repos_dir / d);
+ dir_path sd (db.config_orig / repos_dir / d);
if (exists (sd))
{
@@ -219,13 +217,12 @@ namespace bpkg
}
void
- rep_remove_fragment (const dir_path& c,
+ rep_remove_fragment (database& db,
transaction& t,
const shared_ptr<repository_fragment>& rf)
{
tracer trace ("rep_remove_fragment");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Bail out if the repository fragment is still used.
@@ -240,7 +237,7 @@ namespace bpkg
// it contains. Note that this must be done before the repository fragment
// removal.
//
- rep_remove_package_locations (t, rf->name);
+ rep_remove_package_locations (db, t, rf->name);
// Remove the repository fragment.
//
@@ -265,10 +262,10 @@ namespace bpkg
// Prior to removing a prerequisite/complement we need to make sure it
// still exists, which may not be the case due to the dependency cycle.
//
- auto remove = [&c, &db, &t] (const lazy_weak_ptr<repository>& rp)
+ auto remove = [&db, &t] (const lazy_weak_ptr<repository>& rp)
{
if (shared_ptr<repository> r = db.find<repository> (rp.object_id ()))
- rep_remove (c, t, r);
+ rep_remove (db, t, r);
};
for (const lazy_weak_ptr<repository>& cr: rf->complements)
@@ -285,10 +282,7 @@ namespace bpkg
}
void
- rep_remove_clean (const common_options& o,
- const dir_path& c,
- database& db,
- bool quiet)
+ rep_remove_clean (const common_options& o, database& db, bool quiet)
{
tracer trace ("rep_remove_clean");
tracer_guard tg (db, trace);
@@ -336,7 +330,7 @@ namespace bpkg
// Remove repository state subdirectories.
//
- dir_path rd (c / repos_dir);
+ dir_path rd (db.config_orig / repos_dir);
try
{
@@ -384,13 +378,13 @@ namespace bpkg
dr << info << "run 'bpkg help rep-remove' for more information";
}
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
// Clean the configuration if requested.
//
if (o.clean ())
{
- rep_remove_clean (o, c, db, false /* quiet */);
+ rep_remove_clean (o, db, false /* quiet */);
return 0;
}
@@ -484,7 +478,7 @@ namespace bpkg
//
for (const lazy_shared_ptr<repository>& r: repos)
{
- rep_remove (c, t, r.load ());
+ rep_remove (db, t, r.load ());
if (verb && !o.no_result ())
text << "removed " << r.object_id ();
diff --git a/bpkg/rep-remove.hxx b/bpkg/rep-remove.hxx
index f85aec5..0fc82e8 100644
--- a/bpkg/rep-remove.hxx
+++ b/bpkg/rep-remove.hxx
@@ -5,7 +5,7 @@
#define BPKG_REP_REMOVE_HXX
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // database, transaction, repository
+#include <bpkg/forward.hxx> // transaction, repository
#include <bpkg/utility.hxx>
#include <bpkg/rep-remove-options.hxx>
@@ -20,15 +20,13 @@ namespace bpkg
// repository fragments.
//
void
- rep_remove (const dir_path& conf,
- transaction&,
- const shared_ptr<repository>&);
+ rep_remove (database&, transaction&, const shared_ptr<repository>&);
// Remove a repository fragment if it is not referenced by any repository,
// also removing its unreachable complements and prerequisites.
//
void
- rep_remove_fragment (const dir_path& conf,
+ rep_remove_fragment (database&,
transaction&,
const shared_ptr<repository_fragment>&);
@@ -50,16 +48,15 @@ namespace bpkg
// - Remove all available packages.
//
void
- rep_remove_clean (const common_options&,
- const dir_path& conf,
- database&,
- bool quiet = true);
+ rep_remove_clean (const common_options&, database&, bool quiet = true);
// Remove a repository fragment from locations of the available packages it
// contains. Remove packages that come from only this repository fragment.
//
void
- rep_remove_package_locations (transaction&, const string& fragment_name);
+ rep_remove_package_locations (database&,
+ transaction&,
+ const string& fragment_name);
}
#endif // BPKG_REP_REMOVE_HXX
diff --git a/bpkg/system-repository.cxx b/bpkg/system-repository.cxx
index de4e61e..d7a47b7 100644
--- a/bpkg/system-repository.cxx
+++ b/bpkg/system-repository.cxx
@@ -5,9 +5,7 @@
namespace bpkg
{
- system_repository_type system_repository;
-
- const version& system_repository_type::
+ const version& system_repository::
insert (const package_name& name, const version& v, bool authoritative)
{
auto p (map_.emplace (name, system_package {v, authoritative}));
diff --git a/bpkg/system-repository.hxx b/bpkg/system-repository.hxx
index 1168ec0..f33d622 100644
--- a/bpkg/system-repository.hxx
+++ b/bpkg/system-repository.hxx
@@ -32,7 +32,7 @@ namespace bpkg
bool authoritative;
};
- class system_repository_type
+ class system_repository
{
public:
const version&
@@ -48,8 +48,6 @@ namespace bpkg
private:
std::map<package_name, system_package> map_;
};
-
- extern system_repository_type system_repository;
}
#endif // BPKG_SYSTEM_REPOSITORY_HXX
diff --git a/bpkg/types-parsers.cxx b/bpkg/types-parsers.cxx
index be95219..d5ddb28 100644
--- a/bpkg/types-parsers.cxx
+++ b/bpkg/types-parsers.cxx
@@ -67,6 +67,31 @@ namespace bpkg
parse_path (x, s);
}
+ void parser<uuid>::
+ parse (uuid& x, bool& xs, scanner& s)
+ {
+ xs = true;
+
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = uuid (v);
+
+ if (x.nil ())
+ throw invalid_value (o, v);
+ }
+ catch (const invalid_argument&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
void parser<auth>::
parse (auth& x, bool& xs, scanner& s)
{
diff --git a/bpkg/types-parsers.hxx b/bpkg/types-parsers.hxx
index 38b7cee..d687156 100644
--- a/bpkg/types-parsers.hxx
+++ b/bpkg/types-parsers.hxx
@@ -49,6 +49,16 @@ namespace bpkg
};
template <>
+ struct parser<uuid>
+ {
+ static void
+ parse (uuid&, bool&, scanner&);
+
+ static void
+ merge (uuid& b, const uuid& a) {b = a;}
+ };
+
+ template <>
struct parser<auth>
{
static void
diff --git a/bpkg/types.hxx b/bpkg/types.hxx
index 65dba60..7d8740f 100644
--- a/bpkg/types.hxx
+++ b/bpkg/types.hxx
@@ -23,6 +23,8 @@
#include <libbutl/url.mxx>
#include <libbutl/path.mxx>
+#include <libbutl/uuid.hxx>
+#include <libbutl/uuid-io.hxx>
#include <libbutl/process.mxx>
#include <libbutl/utility.mxx> // icase_compare_string,
// compare_reference_target
@@ -80,11 +82,6 @@ namespace bpkg
using butl::optional;
using butl::nullopt;
- // ODB smart pointers.
- //
- using odb::lazy_shared_ptr;
- using odb::lazy_weak_ptr;
-
// <libbutl/path.mxx>
//
using butl::path;
@@ -92,6 +89,10 @@ namespace bpkg
using butl::basic_path;
using butl::invalid_path;
+ // <libbutl/uuid.mxx>
+ //
+ using butl::uuid;
+
using butl::path_cast;
using paths = std::vector<path>;
@@ -123,6 +124,60 @@ namespace bpkg
using butl::default_options_files;
using butl::default_options_entry;
using butl::default_options;
+
+ // Derive from ODB smart pointers to return derived database (note that the
+ // database() functions are defined in database.hxx).
+ //
+ class database;
+
+ template <class T>
+ class lazy_shared_ptr: public odb::lazy_shared_ptr<T>
+ {
+ public:
+ using base_type = odb::lazy_shared_ptr<T>;
+
+ using base_type::base_type;
+
+ explicit
+ lazy_shared_ptr (base_type&& p): base_type (move (p)) {}
+
+ lazy_shared_ptr () = default;
+
+ bpkg::database&
+ database () const;
+ };
+
+ template <class T>
+ class lazy_weak_ptr: public odb::lazy_weak_ptr<T>
+ {
+ public:
+ using base_type = odb::lazy_weak_ptr<T>;
+
+ using base_type::base_type;
+
+ bpkg::database&
+ database () const;
+
+ lazy_shared_ptr<T>
+ lock () const
+ {
+ return lazy_shared_ptr<T> (base_type::lock ());
+ }
+ };
+
+ // Compare two lazy pointers via the pointed-to object ids.
+ //
+ struct compare_lazy_ptr
+ {
+ template <typename P>
+ bool
+ operator() (const P& x, const P& y) const
+ {
+ // Note: ignoring database is intentional.
+ //
+ return x.object_id () < y.object_id ();
+ }
+ };
}
// In order to be found (via ADL) these have to be either in std:: or in
diff --git a/bpkg/utility.cxx b/bpkg/utility.cxx
index ef61870..2f3f131 100644
--- a/bpkg/utility.cxx
+++ b/bpkg/utility.cxx
@@ -25,6 +25,7 @@ namespace bpkg
//
const dir_path certs_dir (dir_path (bpkg_dir) /= "certs");
const dir_path repos_dir (dir_path (bpkg_dir) /= "repos");
+ const dir_path host_dir (dir_path (bpkg_dir) /= "host");
const dir_path current_dir (".");
@@ -117,6 +118,19 @@ namespace bpkg
return d;
}
+ dir_path
+ current_directory ()
+ {
+ try
+ {
+ return dir_path::current_directory ();
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain current directory: " << e << endf;
+ }
+ }
+
bool stderr_term;
bool
diff --git a/bpkg/utility.hxx b/bpkg/utility.hxx
index 4360118..466f9f1 100644
--- a/bpkg/utility.hxx
+++ b/bpkg/utility.hxx
@@ -42,6 +42,11 @@ namespace bpkg
using butl::icasecmp;
using butl::reverse_iterate;
+ using butl::alpha;
+ using butl::alnum;
+ using butl::digit;
+ using butl::xdigit;
+
using butl::make_guard;
using butl::make_exception_guard;
@@ -74,6 +79,7 @@ namespace bpkg
extern const dir_path bpkg_dir; // .bpkg/
extern const dir_path certs_dir; // .bpkg/certs/
extern const dir_path repos_dir; // .bpkg/repos/
+ extern const dir_path host_dir; // .bpkg/host/
extern const dir_path current_dir; // ./
// Temporary directory facility.
@@ -123,6 +129,9 @@ namespace bpkg
return move (normalize (r, what));
}
+ dir_path
+ current_directory ();
+
// Progress.
//
extern bool stderr_term; // True if stderr is a terminal.
diff --git a/doc/buildfile b/doc/buildfile
index 201d41c..42ec7c1 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -2,6 +2,7 @@
# license : MIT; see accompanying LICENSE file
cmds = \
+bpkg-cfg-add \
bpkg-cfg-create \
bpkg-help \
bpkg-pkg-build \
diff --git a/doc/cli.sh b/doc/cli.sh
index 995efcc..119d112 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -78,7 +78,7 @@ compile "pkg-build" $o --class-doc bpkg::pkg_build_pkg_options=exclude-base
# NOTE: remember to update a similar list in buildfile and bpkg.cli as well as
# the help topics sections in bpkg/buildfile and help.cxx.
#
-pages="cfg-create help pkg-clean pkg-configure pkg-disfigure \
+pages="cfg-add cfg-create help pkg-clean pkg-configure pkg-disfigure \
pkg-drop pkg-fetch pkg-checkout pkg-install pkg-purge pkg-status pkg-test \
pkg-uninstall pkg-unpack pkg-update pkg-verify rep-add rep-remove rep-list \
rep-create rep-fetch rep-info repository-signing repository-types \
diff --git a/manifest b/manifest
index e42542b..39c187a 100644
--- a/manifest
+++ b/manifest
@@ -20,5 +20,6 @@ depends: * bpkg >= 0.13.0
requires: ? cli ; Only required if changing .cli files.
depends: libodb [2.5.0-b.20.1 2.5.0-b.21)
depends: libodb-sqlite [2.5.0-b.20.1 2.5.0-b.21)
+depends: libsqlite3 ^3.21.0 ; ATTACH in transaction
depends: libbutl [0.14.0-a.0.1 0.14.0-a.1)
depends: libbpkg [0.14.0-a.0.1 0.14.0-a.1)
diff --git a/repositories.manifest b/repositories.manifest
index b296991..19fb7ab 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -11,6 +11,10 @@ location: ../libbpkg.git##HEAD
:
role: prerequisite
+location: https://git.build2.org/packaging/sqlite/sqlite.git##HEAD
+
+:
+role: prerequisite
location: https://git.codesynthesis.com/odb/libodb.git##HEAD
:
diff --git a/tests/cfg-add.testscript b/tests/cfg-add.testscript
new file mode 100644
index 0000000..d47d6e4
--- /dev/null
+++ b/tests/cfg-add.testscript
@@ -0,0 +1,190 @@
+# file : tests/cfg-add.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include common.testscript
+
+cfg_create += 2>!
+
+# @@ To verify the association result use cfg-list command rather than
+# pkg-status, when implemented.
+#
+
+test.arguments += -d cfg
+
+cfg_uuid = '18f48b4b-b5d9-4712-b98c-1930df1c4228'
+acfg_uuid = '28f48b4b-b5d9-4712-b98c-1930df1c4228'
+
+: success
+:
+{
+ $cfg_create -d cfg --name 'main';
+ $cfg_create -d acfg --name 'shared' --config-uuid "$acfg_uuid" &acfg/***;
+
+ # Try to associate configuration under the same name.
+ #
+ $* cfg 2>>/~"%EOE%" != 0;
+ %error: associating configuration .+/cfg/ with itself%
+ % info: uuid: .{36}%
+ EOE
+
+ # Try to associate configuration under the same name.
+ #
+ $* acfg --name 'main' 2>>/~"%EOE%" != 0;
+ %error: associating configuration .+/acfg/ using current configuration name 'main'%
+ info: consider specifying alternative name with --name
+ EOE
+
+ # Associate configuration.
+ #
+ $* acfg 2>>/~"%EOE%";
+ %associated configuration .+/acfg/%
+ info: uuid: $acfg_uuid
+ info: type: target
+ info: name: shared
+ info: id: 1
+ EOE
+
+ $pkg_status -d cfg libfoo >'libfoo unknown';
+ $pkg_status -d acfg libfoo >'libfoo unknown';
+
+ # Test that the recreated configuration can be implicitly re-associated.
+ #
+ rm -r cfg;
+ $cfg_create -d cfg --name 'test' --config-uuid "$cfg_uuid" &cfg/***;
+
+ $* acfg 2>>/~"%EOE%";
+ %warning: current configuration .+/cfg/ is already implicitly associated with .+/acfg/%
+ %associated configuration .+/acfg/%
+ info: uuid: $acfg_uuid
+ info: type: target
+ info: name: shared
+ info: id: 1
+ EOE
+
+ # Test that the repeated association is reported.
+ #
+ $* acfg 2>>/~%EOE% != 0;
+ %error: configuration with uuid .{36} is already associated as \.\./acfg/%
+ EOE
+
+ rm -r acfg;
+
+ $cfg_create -d acfg --name 'shared' &acfg/***;
+
+ # Test that the path clash is reported.
+ #
+ $* acfg 2>>/~%EOE% != 0;
+ %error: configuration with path .+/acfg/ is already associated%
+ EOE
+
+ # Test that the name clash is reported.
+ #
+ $cfg_create -d acfg2 --name 'shared' &acfg2/***;
+
+ $* acfg2 --name 'shared' 2>>/EOE != 0;
+ error: configuration with name shared is already associated as ../acfg/
+ info: consider specifying alternative name with --name
+ EOE
+
+ $* acfg2 2>>/~%EOE%;
+ warning: configuration with name shared is already associated as ../acfg/, associating as unnamed
+ %associated configuration .+/acfg2/%
+ % info: uuid: .{36}%
+ info: type: target
+ info: id: 2
+ EOE
+
+ # Test that the integrity check fails.
+ #
+ $pkg_status -d cfg libfoo 2>>/~"%EOE%" != 0;
+ error: configuration acfg/ uuid mismatch
+ % info: uuid .+%
+ info: associated with cfg/ as $acfg_uuid
+ EOE
+
+ # Associate the second configuration.
+ #
+ rm -r acfg;
+ $cfg_create -d acfg --name 'shared' --config-uuid "$acfg_uuid" &acfg/***;
+
+ $pkg_status -d cfg libfoo >'libfoo unknown';
+ $pkg_status -d acfg2 libfoo >'libfoo unknown';
+
+ # Test that the configuration type mismatch is reported.
+ #
+ mv cfg cfg.tmp;
+ $cfg_create -d cfg --type host --config-uuid "$cfg_uuid";
+
+ $* -d acfg2 cfg 2>>/EOE != 0;
+ error: configuration cfg/ type mismatch
+ info: type host
+ info: implicitly associated with acfg2/ as target
+ EOE
+
+ rm -r cfg;
+ mv cfg.tmp cfg;
+
+ # Make the implicit association explicit.
+ #
+ $* -d acfg2 cfg 2>>/~"%EOE%";
+ %associated configuration .+/cfg/%
+ info: uuid: $cfg_uuid
+ info: type: target
+ info: name: test
+ info: id: 1
+ EOE
+
+ $pkg_status -d cfg libfoo >'libfoo unknown';
+ $pkg_status -d acfg2 libfoo >'libfoo unknown';
+
+ $* -d acfg2 cfg 2>>/~%EOE% != 0;
+ %error: configuration with uuid .{36} is already associated as \.\./cfg/%
+ EOE
+
+ # Test that the reverse association path clash is reported.
+ #
+ rm -r cfg;
+ $cfg_create -d cfg --name 'test' &cfg/***;
+
+ $* acfg2 2>>/~%EOE% != 0;
+ %error: current configuration .+/cfg/ is already associated with .+/acfg2/%
+ EOE
+
+ # Test that the repeated reverse association is reported.
+ #
+ $cfg_create -d cfg2 --config-uuid "$cfg_uuid";
+
+ $* -d cfg2 acfg2 2>>/~"%EOE%" != 0;
+ %error: current configuration $cfg_uuid is already associated with .+/acfg2/%
+ EOE
+
+ # Make sure that current configuration is reverse associated as unnamed.
+ #
+ # @@ Make sure that's really the case when the cfg-list command is
+ # implemented.
+ #
+ rm -r cfg2;
+ $cfg_create -d cfg2 --name 'test' &cfg2/***;
+
+ $* -d cfg2 acfg2 2>>/~%EOE%;
+ %associated configuration .+/acfg2/%
+ % info: uuid: .{36}%
+ info: type: target
+ info: name: shared
+ info: id: 1
+ EOE
+
+ # Test that the integrity check fails.
+ #
+ $pkg_status -d cfg2 libfoo 2>>/~"%EOE%" != 0;
+ error: configuration cfg/ uuid mismatch
+ % info: uuid .+%
+ info: associated with acfg2/ as $cfg_uuid
+ EOE
+
+ rm -r cfg;
+ $cfg_create -d cfg --name 'test' --config-uuid "$cfg_uuid" &cfg/***;
+
+ $pkg_status -d cfg2 libfoo >'libfoo unknown';
+ $pkg_status -d acfg2 libfoo >'libfoo unknown'
+}
diff --git a/tests/cfg-create.testscript b/tests/cfg-create.testscript
index 9461dad..1993547 100644
--- a/tests/cfg-create.testscript
+++ b/tests/cfg-create.testscript
@@ -5,6 +5,11 @@
config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+cfg_create += 2>!
+
+# @@ To verify the creation result use cfg-list command rather than
+# pkg-status, when implemented.
+#
pkg_status += -d cfg
: non-empty
@@ -25,6 +30,8 @@ EOE
{
$* 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
EOE
$pkg_status libfoo >'libfoo unknown'
@@ -35,6 +42,8 @@ EOE
{
$* "config.install.root='$~/opt'" 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
EOE
$pkg_status libfoo >'libfoo unknown';
@@ -51,6 +60,8 @@ EOE
{
$* cxx $config_cxx 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
EOE
$pkg_status libfoo >'libfoo unknown';
@@ -69,6 +80,8 @@ EOE
$* --wipe 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
EOE
$pkg_status libfoo >'libfoo unknown'
@@ -81,8 +94,121 @@ EOE
$* --existing 2>>/~%EOE%;
%initialized existing configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
+ EOE
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+}
+
+: name
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ $* --name foo 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
+ info: name: foo
EOE
+ # @@ To verify the result use cfg-list, when implemented.
+ #
$pkg_status libfoo >'libfoo unknown'
}
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --name 123 2>>EOE != 0
+ error: invalid --name option value '123': illegal first character (must be alphabetic or underscore)
+ EOE
+}
+
+: type
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ $* --type host 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: host
+ EOE
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --type '' 2>>EOE != 0
+ error: empty --type option value
+ EOE
+}
+
+: uuid
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ $* --config-uuid '18f48b4b-b5d9-4712-b98c-1930df1c4228' 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ info: uuid: 18f48b4b-b5d9-4712-b98c-1930df1c4228
+ info: type: target
+ EOE
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --config-uuid '123' 2>>EOE != 0
+ error: invalid value '123' for option '--config-uuid'
+ EOE
+}
+
+: associate-host-config
+:
+{
+ test.arguments += -d cfg
+
+ : valid-type
+ :
+ {
+ $cfg_create -d host --type 'host' &host/***;
+
+ $* --host-config host 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % info: uuid: .{36}%
+ info: type: target
+ EOE
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid-type
+ :
+ {
+ $cfg_create -d host &host/***;
+
+ $* --host-config host 2>>/~%EOE% != 0
+ %error: host configuration .+/host/ is of 'target' type%
+ EOE
+ }
}
diff --git a/tests/common.testscript b/tests/common.testscript
index 5db8c6a..4593d11 100644
--- a/tests/common.testscript
+++ b/tests/common.testscript
@@ -32,6 +32,7 @@ test.options += --default-options $options_guard \
# (for example, to make sure that configuration post-test state is valid and is
# as expected).
#
+cfg_add = $* cfg-add
cfg_create = $* cfg-create
pkg_build = $* pkg-build
pkg_checkout = $* pkg-checkout
diff --git a/tests/common/associated/t7a/foo-1.0.0.tar.gz b/tests/common/associated/t7a/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..100496d
--- /dev/null
+++ b/tests/common/associated/t7a/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7a/libbar-1.0.0.tar.gz b/tests/common/associated/t7a/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..21940a3
--- /dev/null
+++ b/tests/common/associated/t7a/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7a/libbaz-1.0.0.tar.gz b/tests/common/associated/t7a/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..723ac32
--- /dev/null
+++ b/tests/common/associated/t7a/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7a/libbox-1.0.0.tar.gz b/tests/common/associated/t7a/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..7c293e0
--- /dev/null
+++ b/tests/common/associated/t7a/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7a/libfix-1.0.0.tar.gz b/tests/common/associated/t7a/libfix-1.0.0.tar.gz
new file mode 100644
index 0000000..471a75f
--- /dev/null
+++ b/tests/common/associated/t7a/libfix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7a/repositories.manifest b/tests/common/associated/t7a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/associated/t7a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/associated/t7b/foo-1.1.0.tar.gz b/tests/common/associated/t7b/foo-1.1.0.tar.gz
new file mode 100644
index 0000000..aa929fb
--- /dev/null
+++ b/tests/common/associated/t7b/foo-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7b/libbar-1.1.0.tar.gz b/tests/common/associated/t7b/libbar-1.1.0.tar.gz
new file mode 100644
index 0000000..9926348
--- /dev/null
+++ b/tests/common/associated/t7b/libbar-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7b/libbaz-1.1.0.tar.gz b/tests/common/associated/t7b/libbaz-1.1.0.tar.gz
new file mode 100644
index 0000000..6b12460
--- /dev/null
+++ b/tests/common/associated/t7b/libbaz-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7b/libbox-1.1.0.tar.gz b/tests/common/associated/t7b/libbox-1.1.0.tar.gz
new file mode 100644
index 0000000..698661d
--- /dev/null
+++ b/tests/common/associated/t7b/libbox-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/associated/t7b/repositories.manifest b/tests/common/associated/t7b/repositories.manifest
new file mode 100644
index 0000000..aed60ed
--- /dev/null
+++ b/tests/common/associated/t7b/repositories.manifest
@@ -0,0 +1,4 @@
+: 1
+location: ../t7a
+role: complement
+:
diff --git a/tests/pkg-build.testscript b/tests/pkg-build.testscript
index 50db679..a13d454 100644
--- a/tests/pkg-build.testscript
+++ b/tests/pkg-build.testscript
@@ -113,6 +113,21 @@
# | | `-- root.build
# | `-- *
# |
+# |-- t7a
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- foo-1.0.0.tar.gz -> libbaz ^1.0.0
+# | |-- libbar-1.0.0.tar.gz -> * foo ^1.0.0, libbaz ^1.0.0
+# | |-- libbox-1.0.0.tar.gz -> * foo ^1.0.0, libbaz ^1.0.0
+# | |-- libfix-1.0.0.tar.gz -> libbar ^1.0.0, libbox ^1.0.0
+# | `-- repositories.manifest
+# |
+# |-- t7b -> t7a (complement repository)
+# | |-- libbaz-1.1.0.tar.gz
+# | |-- foo-1.1.0.tar.gz -> libbaz ^1.1.0
+# | |-- libbar-1.1.0.tar.gz -> * foo ^1.1.0, libbaz ^1.0.0
+# | |-- libbox-1.1.0.tar.gz -> * foo ^1.0.0
+# | `-- repositories.manifest
+# |
# `-- git
# |-- libbar.git -> style-basic.git (prerequisite repository)
# |-- libbaz.git
@@ -139,6 +154,8 @@ posix = ($cxx.target.class != 'windows')
cp -r $src/t4e $out/t4e && $rep_create $out/t4e &$out/t4e/packages.manifest
cp -r $src/t5 $out/t5 && $rep_create $out/t5 &$out/t5/packages.manifest
cp -r $src/t6 $out/t6 && $rep_create $out/t6 &$out/t6/packages.manifest
+ cp -r $src/t7a $out/t7a && $rep_create $out/t7a &$out/t7a/packages.manifest
+ cp -r $src/t7b $out/t7b && $rep_create $out/t7b &$out/t7b/packages.manifest
# Create git repositories.
#
@@ -149,6 +166,8 @@ end
config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+cfg_create += 2>!
+cfg_add += 2>!
pkg_configure += -d cfg $config_cxx 2>!
pkg_disfigure += -d cfg
pkg_drop += -d cfg --yes 2>!
@@ -408,6 +427,18 @@ test.options += --no-progress
info: while satisfying libbar/1.0.0
EOE
+ : unknown-dependency-config
+ :
+ : As above but with an associated configuration.
+ :
+ $clone_root_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+ $* $src/libbar-1.0.0.tar.gz +{ --config-id 1 } 2>>~%EOE% != 0
+ %error: unknown dependency libfoo of package libbar \[cfg2.\]%
+ %info: while satisfying libbar/1.0.0 \[cfg2.\]%
+ EOE
+
: t2
:
{
@@ -513,6 +544,21 @@ test.options += --no-progress
EOO
}
+ : latest-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+
+ $* libbar/1.1.0 +{ --config-id 1 } >>~%EOO%
+ %new libfoo/1.1.0\+1 \[cfg2.\] \(required by libbar \[cfg2.\]\)%
+ %new libbar/1.1.0 \[cfg2.\]%
+ EOO
+ }
+
: zero
:
{
@@ -755,6 +801,24 @@ test.options += --no-progress
info: while satisfying libbaz/1.1.0
EOE
+ : unable-satisfy-config
+ :
+ : As above but with an associated configuration.
+ :
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+ $* libbaz ?libbar +{ --config-id 1 } libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ % info: libbar \[cfg2.\] depends on \(libfoo == 1.1.0\)%
+ info: command line depends on (libfoo == 1.0.0)
+ info: available libfoo/1.1.0
+ info: available libfoo/1.0.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ %info: while satisfying libbar/1.1.0 \[cfg2.\]%
+ info: while satisfying libbaz/1.1.0
+ EOE
+
: not-available
:
$clone_cfg;
@@ -832,6 +896,38 @@ test.options += --no-progress
$pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
$pkg_purge libfoo 2>'purged libfoo/1.1.0'
}
+
+ : unable-downgrade-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t4c && $rep_fetch -d cfg2;
+ $cfg_add -d cfg2 cfg;
+ $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
+ $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $* libfoo-1.2.0.tar.gz 2>>EOE != 0;
+ error: unknown package libfoo-1.2.0.tar.gz
+ EOE
+
+ $* -d cfg2 libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: unable to downgrade package libfoo/1.1.0 \[cfg.\] to 1.0.0%
+ % info: because package libbar \[cfg.\] depends on \(libfoo == 1.1.0\)%
+ info: explicitly request up/downgrade of package libbar
+ info: or explicitly specify package libfoo version to manually satisfy these constraints
+ EOE
+
+ $* libfoo/1.1.0 --keep-unused >'update libfoo/1.1.0';
+
+ $pkg_disfigure libbar 2>'disfigured libbar/1.1.0';
+ $pkg_purge libbar 2>'purged libbar/1.1.0';
+
+ $pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
+ $pkg_purge libfoo 2>'purged libfoo/1.1.0'
+ }
}
: dependent-reconfiguration
@@ -1166,6 +1262,37 @@ test.options += --no-progress
$pkg_disfigure libfoo 2>'disfigured libfoo/1.0.0';
$pkg_purge libfoo 2>'purged libfoo/1.0.0'
}
+
+ : forcing-upgrade-held-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+
+ $* libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE%;
+ %fetched libfoo/1.0.0 \[cfg2.\]%
+ %unpacked libfoo/1.0.0 \[cfg2.\]%
+ %configured libfoo/1.0.0 \[cfg2.\]%
+ %info: .+ is up to date%
+ %updated libfoo/1.0.0 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg2 libfoo >'!libfoo configured !1.0.0';
+
+ $* libbaz ?libbar +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: package libbar \[cfg2.\] dependency on \(libfoo == 1.1.0\) is forcing upgrade of libfoo/1.0.0 \[cfg2.\] to 1.1.0%
+ % info: package version libfoo/1.0.0 \[cfg2.\] is held%
+ info: explicitly request version upgrade to continue
+ %info: while satisfying libbar/1.1.0 \[cfg2.\]%
+ info: while satisfying libbaz/1.1.0
+ EOE
+
+ $pkg_disfigure -d cfg2 libfoo 2>'disfigured libfoo/1.0.0';
+ $pkg_purge -d cfg2 libfoo 2>'purged libfoo/1.0.0'
+ }
}
: drop-dependencies
@@ -2280,6 +2407,29 @@ test.options += --no-progress
$pkg_drop libbar
}
+
+ : unsatisfied-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+
+ $* libbar/0.0.1 +{ --config-id 1 } 2>!;
+
+ $pkg_status -d cfg2 libbaz >'libbaz configured 0.0.1';
+
+ $* ?libbaz/0.0.3 +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: package libbaz \[cfg2.\] doesn't satisfy its dependents%
+ % info: libbaz/0.0.3 doesn't satisfy libbar/0.0.1 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg2 libbaz >'libbaz configured 0.0.1';
+
+ $pkg_drop -d cfg2 libbar
+ }
}
: scratch
@@ -2434,7 +2584,7 @@ test.options += --no-progress
: as a dependency, so it is built incrementally.
:
{
- $cfg_create cxx $config_cxx -d cfg 2>- &cfg/***;
+ $cfg_create cxx $config_cxx -d cfg &cfg/***;
# Add libhello as the dir repository.
#
@@ -2853,7 +3003,7 @@ test.options += --no-progress
{
test.arguments += --yes
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
: direct
:
@@ -2872,6 +3022,27 @@ test.options += --no-progress
EOE
}
+ : direct-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+
+ cp -r $src/libfoo-1.1.0/ libfoo;
+ echo "depends: libfoo" >+ libfoo/manifest;
+ $rep_add libfoo --type dir;
+
+ $rep_fetch;
+
+ $* libfoo +{ --config-id 1 } 2>>~%EOE% != 0
+ %error: dependency cycle detected involving package libfoo \[cfg2.\]%
+ % info: libfoo/1.1.0 \[cfg2.\] depends on libfoo/1.1.0 \[cfg2.\]%
+ EOE
+ }
+
: indirect
:
{
@@ -2950,7 +3121,7 @@ test.options += --no-progress
{
test.arguments += --yes
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+cp -r $src/libhello-1.0.0 ./libhello
+$rep_add libhello --type dir
+$rep_fetch
@@ -3426,6 +3597,30 @@ test.options += --no-progress
$pkg_drop libfoo
}
+ : no-patch-config
+ :
+ : As above but with an associated configuration.
+ :
+ {
+ $clone_root_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_add -d cfg cfg2;
+
+ $* "libfoo@$rep/t1" +{ --config-id 1 } --patch 2>>~%EOE%;
+ %.+
+ %configured libfoo/1.0.0 \[cfg2.\]%
+ %info: .+ is up to date%
+ %updated libfoo/1.0.0 \[cfg2.\]%
+ EOE
+
+ $* "libfoo@$rep/t3" +{ --config-id 1 } --patch 2>>~%EOE% != 0;
+ %.+
+ %error: patch version for libfoo/1.0.0 \[cfg2.\] is not found in pkg:build2.org/pkg-build/t3%
+ EOE
+
+ $pkg_drop -d cfg2 libfoo
+ }
+
: package-in-complement
:
{
@@ -3528,7 +3723,7 @@ test.options += --no-progress
: keep-out
:
{
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
# Build libhello as an external package.
#
@@ -3646,7 +3841,7 @@ test.options += --no-progress
# Distribute using the dedicated configuration to avoid the 'c and cxx
# module toolchain pattern mismatch' warning.
#
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+$build 'dist(../../libhello/@./cfg/libhello/)' \
config.dist.root=./ \
@@ -3945,3 +4140,567 @@ else
$pkg_purge style-basic
}
}
+
+: associated-configs
+:
+{
+ : 2-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ : invalid-cfg
+ :
+ {
+ +$clone_cfg
+
+ : config-id
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-id 1 2>>/EOE != 0
+ error: no configuration with id 1 is associated with cfg/
+ EOE
+ }
+
+ : config-name
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-name foo 2>>/EOE != 0
+ error: no configuration with name 'foo' is associated with cfg/
+ EOE
+ }
+
+ : config-uuid
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-uuid '18f48b4b-b5d9-4712-b98c-1930df1c4228' 2>>/EOE != 0
+ error: no configuration with uuid 18f48b4b-b5d9-4712-b98c-1930df1c4228 is associated with cfg/
+ EOE
+ }
+
+ : multiple
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-id 1 --config-name foo 2>>/EOE != 0
+ error: multiple --config-* specified
+ EOE
+ }
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg-bar-foo &cfg-bar-foo/***;
+ $cfg_add -d cfg cfg-bar-foo;
+
+ $* libbaz ?libbar +{ --config-id 1 } ?libfoo +{ --config-id 1 } 2>>~%EOE%;
+ %fetched libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %unpacked libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %fetched libbar/1.1.0 \[cfg-bar-foo.\]%
+ %unpacked libbar/1.1.0 \[cfg-bar-foo.\]%
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ %configured libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %configured libbar/1.1.0 \[cfg-bar-foo.\]%
+ configured libbaz/1.1.0
+ %info: cfg-bar-foo.+libfoo-1.1.0.+ is up to date%
+ %info: cfg-bar-foo.+libbar-1.1.0.+ is up to date%
+ %info: cfg[^-].+libbaz-1.1.0.+ is up to date%
+ %updated libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %updated libbar/1.1.0 \[cfg-bar-foo.\]%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ libbar [cfg-bar-foo/] configured 1.1.0
+ libfoo [cfg-bar-foo/] configured 1.1.0
+ libfoo [cfg-bar-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar-foo -r libbar >>EOO;
+ libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar-foo libfoo >'libfoo configured 1.1.0';
+
+ $pkg_drop libbaz;
+
+ $pkg_status libbaz libbar libfoo >>EOO
+ libbaz available 1.1.0
+ libbar available [1.1.0]
+ libfoo available [1.1.0] 1.0.0
+ EOO
+ }
+ }
+
+ : 3-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ : baz
+ :
+ {
+ uuid = '28f48b4b-b5d9-4712-b98c-1930df1c4228';
+
+ $clone_cfg;
+ $cfg_create -d cfg-bar &cfg-bar/***;
+ $cfg_create -d cfg-foo --config-uuid $uuid &cfg-foo/***;
+
+ $cfg_add -d cfg cfg-bar;
+ $cfg_add -d cfg-bar cfg-foo;
+
+ $* libbar@"$rep/t4b" -d cfg-bar ?libfoo +{ --config-id 2 } --trust-yes 2>>~%EOE%;
+ added pkg:build2.org/pkg-build/t4b
+ fetching pkg:build2.org/pkg-build/t4b
+ fetching pkg:build2.org/pkg-build/t4a (prerequisite of pkg:build2.org/pkg-build/t4b)
+ %fetched libfoo/1.1.0 \[cfg-foo.\]%
+ %unpacked libfoo/1.1.0 \[cfg-foo.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libfoo/1.1.0 \[cfg-foo.\]%
+ configured libbar/1.1.0
+ %info: cfg-foo.+libfoo-1.1.0.+ is up to date%
+ %info: cfg-bar.+libbar-1.1.0.+ is up to date%
+ %updated libfoo/1.1.0 \[cfg-foo.\]%
+ updated libbar/1.1.0
+ EOE
+
+ $* libfoo --config-uuid $uuid 2>>~%EOE%;
+ %info: cfg-foo.+libfoo-1.1.0.+ is up to date%
+ %updated libfoo/1.1.0 \[cfg-foo.\]%
+ EOE
+
+ $* libbaz 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libbaz/1.1.0
+ %info: cfg[^-].+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbaz configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_drop libbaz;
+
+ $* ?libfoo --config-uuid $uuid; # Unhold.
+
+ $pkg_status libbaz libbar libfoo >>/EOO;
+ libbaz available 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $* ?libbar +{ --config-id 1 } <'y' 2>>~%EOE%;
+ % drop libfoo/1.1.0 \[cfg-foo.\] \(unused\)%
+ % drop libbar/1.1.0 \[cfg-bar.\] \(unused\)%
+ %continue\? \[Y/n\] disfigured libbar/1.1.0 \[cfg-bar.\]%
+ %disfigured libfoo/1.1.0 \[cfg-foo.\]%
+ %purged libfoo/1.1.0 \[cfg-foo.\]%
+ %purged libbar/1.1.0 \[cfg-bar.\]%
+ EOE
+
+ $pkg_status libbar libfoo >>EOO
+ libbar available [1.1.0]
+ libfoo available [1.1.0] 1.0.0
+ EOO
+ }
+ }
+
+ : buildtime-dep
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : external-config
+ :
+ {
+ +$clone_cfg
+ +$cfg_create -d cfg2 --type host --name alt-host &cfg2/***
+ +$cfg_add -d cfg cfg2
+
+ : upgrade-dependency
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % new libbaz/1.0.0 \[cfg2.\] \(required by foo \[cfg2.\]\)%
+ % new foo/1.0.0 \[cfg2.\] \(required by libbar\)%
+ % new libbaz/1.0.0 \(required by libbar\)%
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ # While at it, make sure that from several available host
+ # configurations the selected package configuration is chosen and we
+ # don't fail with the 'multiple possible host configurations'
+ # diagnostics.
+ #
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_add -d cfg cfg3;
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % upgrade libbaz/1.1.0 \[cfg2.\] \(required by foo \[cfg2.\]\)%
+ % upgrade foo/1.1.0 \[cfg2.\] \(required by libbar\)%
+ upgrade libbar/1.1.0
+ %continue\? \[Y/n\] disfigured libbar/1.0.0%
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %fetched libbaz/1.1.0 \[cfg2.\]%
+ %unpacked libbaz/1.1.0 \[cfg2.\]%
+ %fetched foo/1.1.0 \[cfg2.\]%
+ %unpacked foo/1.1.0 \[cfg2.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[cfg2.\]%
+ %configured foo/1.1.0 \[cfg2.\]%
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.1.0
+ foo [cfg2/] configured 1.1.0
+ libbaz [cfg2/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+
+ # Now upgrade libbaz in cfg/ and downgrade it in cfg2/. Note that
+ # libbaz/1.1.0 doesn't exist in the t7a repository where libbar/1.0.0
+ # belongs to, thus we build it to hold rather than a dependency.
+ #
+ # While at it, test the --config-name option.
+ #
+ $* libbar/1.0.0 libbaz/1.1.0 ?foo/1.0.0 +{ --config-name alt-host } \
+ ?libbaz/1.0.0 +{ --config-id 1 } <'y' 2>>~%EOE%;
+ % downgrade libbaz/1.0.0 \[cfg2.\]%
+ % downgrade foo/1.0.0 \[cfg2.\]%
+ upgrade libbaz/1.1.0
+ downgrade libbar/1.0.0
+ continue? [Y/n] disfigured libbar/1.1.0
+ disfigured libbaz/1.0.0
+ %disfigured foo/1.1.0 \[cfg2.\]%
+ %disfigured libbaz/1.1.0 \[cfg2.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.1.0
+ configured libbar/1.0.0
+ %info: .+libbaz-1.0.0.+ is up to date%
+ %info: .+foo-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[cfg2.\]%
+ %updated foo/1.0.0 \[cfg2.\]%
+ updated libbaz/1.1.0
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured !1.0.0 available 1.1.0
+ foo [cfg2/] configured !1.0.0 available 1.1.0
+ libbaz [cfg2/] configured !1.0.0 available 1.1.0
+ !libbaz configured !1.1.0
+ EOO
+
+ $pkg_drop libbar libbaz
+ }
+
+ : resolve-host-config
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_add -d cfg cfg3;
+
+ $* libbar 2>>/~%EOE% != 0;
+ error: multiple possible host configurations for build-time dependency (foo ^1.0.0)
+ info: cfg2/
+ info: cfg3/
+ info: use --config-* to select the configuration
+ %info: while satisfying libbar.1.0.0%
+ EOE
+
+ $* libbar ?foo +{ --config-id 2 } --yes 2>!;
+
+ $pkg_status -r libbar >>/EOE;
+ !libbar configured 1.0.0
+ foo [cfg3/] configured 1.0.0
+ libbaz [cfg3/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOE
+
+ $pkg_drop libbar
+ }
+
+ : 3-configs
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_add -d cfg2 cfg3;
+
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $* -d cfg2 libbaz +{ --config-id 2 } 2>!;
+
+ $* libbar --yes 2>>~%EOE%;
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOE;
+ !libbar configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ !libbaz [cfg3/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOE
+
+ $pkg_drop libbar
+ }
+ }
+
+ : private-config
+ :
+ {
+ $clone_cfg;
+
+ $* libbar <'y' 2>>~%EOE% &cfg/.bpkg/host/***;
+ % new libbaz/1.0.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new foo/1.0.0 \[cfg..bpkg.host.\] \(required by libbar\)%
+ % new libbaz/1.0.0 \(required by libbar\)%
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.0.0 \[cfg..bpkg.host.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.0.0 \[cfg..bpkg.host.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg/.bpkg/host/] configured 1.0.0
+ libbaz [cfg/.bpkg/host/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % upgrade libbaz/1.1.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % upgrade foo/1.1.0 \[cfg..bpkg.host.\] \(required by libbar\)%
+ upgrade libbar/1.1.0
+ %continue\? \[Y/n\] disfigured libbar/1.0.0%
+ %disfigured foo/1.0.0 \[cfg..bpkg.host.\]%
+ %disfigured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.1.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.1.0 \[cfg..bpkg.host.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.1.0 \[cfg..bpkg.host.\]%
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.1.0
+ foo [cfg/.bpkg/host/] configured 1.1.0
+ libbaz [cfg/.bpkg/host/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : self-hosted-config
+ :
+ {
+ $cfg_create -d cfg --type host &cfg/***;
+ $rep_add $rep/t7a && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ new libbaz/1.0.0 (required by foo libbar)
+ new foo/1.0.0 (required by libbar)
+ new libbar/1.0.0
+ continue? [Y/n] fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libbaz/1.0.0
+ configured foo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.0.0
+ foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ upgrade libbaz/1.1.0 (required by foo libbar)
+ upgrade foo/1.1.0 (required by libbar)
+ upgrade libbar/1.1.0
+ continue? [Y/n] disfigured libbar/1.0.0
+ disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched foo/1.1.0
+ unpacked foo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libbaz/1.1.0
+ configured foo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ foo configured 1.1.0
+ libbaz configured 1.1.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
+
+ : verify-dependencies
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : fail
+ :
+ {
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $cfg_create -d cfg3 &cfg3/***;
+ $rep_add -d cfg3 $rep/t7a && $rep_fetch -d cfg3;
+
+ $* -d cfg2 --yes libbar 2>!;
+ $* -d cfg3 --yes libbox 2>!;
+
+ $clone_cfg;
+ $cfg_add -d cfg cfg2;
+ $cfg_add -d cfg cfg3;
+
+ $* libfix --yes 2>>~%EOE% != 0
+ error: package libbaz indirectly required by libfix/1.0.0 is configured in multiple configurations
+ % info: libbaz/1.0.0 \[cfg3.\]%
+ % info: libbaz/1.0.0 \[cfg2.\]%
+ EOE
+ }
+
+ : succeed
+ :
+ {
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t7b && $rep_fetch -d cfg2;
+
+ $cfg_create -d cfg3 &cfg3/***;
+ $rep_add -d cfg3 $rep/t7b && $rep_fetch -d cfg3;
+
+ $* -d cfg2 --yes libbar 2>!;
+ $* -d cfg3 --yes libbox 2>!;
+
+ $clone_cfg;
+ $cfg_add -d cfg cfg2;
+ $cfg_add -d cfg cfg3;
+
+ $* libfix --yes 2>>~%EOE%;
+ fetched libfix/1.0.0
+ unpacked libfix/1.0.0
+ configured libfix/1.0.0
+ %info: .+libfix-1.0.0.+ is up to date%
+ updated libfix/1.0.0
+ EOE
+
+ $pkg_drop libfix
+ }
+ }
+}
diff --git a/tests/pkg-build/t7a b/tests/pkg-build/t7a
new file mode 120000
index 0000000..05d62de
--- /dev/null
+++ b/tests/pkg-build/t7a
@@ -0,0 +1 @@
+../common/associated/t7a \ No newline at end of file
diff --git a/tests/pkg-build/t7b b/tests/pkg-build/t7b
new file mode 120000
index 0000000..31d6d0f
--- /dev/null
+++ b/tests/pkg-build/t7b
@@ -0,0 +1 @@
+../common/associated/t7b \ No newline at end of file
diff --git a/tests/pkg-drop.testscript b/tests/pkg-drop.testscript
index db9cf7c..ff42e58 100644
--- a/tests/pkg-drop.testscript
+++ b/tests/pkg-drop.testscript
@@ -9,16 +9,31 @@
# |-- t4a
# | |-- libfoo-1.1.0.tar.gz
# | `-- repositories.manifest
+# |
# |-- t4b -> t4a (prerequisite repository)
# | |-- libbar-1.1.0.tar.gz -> libfoo == 1.1.0
# | `-- repositories.manifest
+# |
# |-- t4c -> t4b (prerequisite repository)
# | |-- libbaz-1.1.0.tar.gz -> libfoo, libbar
# | |-- libfoo-1.0.0.tar.gz
# | `-- repositories.manifest
-# `-- t4d -> t4c (complement)
-# |-- libbiz-1.0.0.tar.gz -> libfox, libfoo, libbaz
-# |-- libfox-1.0.0.tar.gz
+# |
+# |-- t4d -> t4c (complement)
+# | |-- libbiz-1.0.0.tar.gz -> libfox, libfoo, libbaz
+# | |-- libfox-1.0.0.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t7a
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- foo-1.0.0.tar.gz -> libbaz ^1.0.0
+# | |-- libbar-1.0.0.tar.gz -> * foo ^1.0.0, libbaz ^1.0.0
+# | `-- repositories.manifest
+# |
+# `-- t7b -> t7a (complement repository)
+# |-- libbaz-1.1.0.tar.gz
+# |-- foo-1.1.0.tar.gz -> libbaz ^1.1.0
+# |-- libbar-1.1.0.tar.gz -> * foo ^1.1.0, libbaz ^1.0.0
# `-- repositories.manifest
# Prepare repositories used by tests if running in the local mode.
@@ -30,8 +45,12 @@
cp -r $src/t4b $out/t4b && $rep_create $out/t4b &$out/t4b/packages.manifest
cp -r $src/t4c $out/t4c && $rep_create $out/t4c &$out/t4c/packages.manifest
cp -r $src/t4d $out/t4d && $rep_create $out/t4d &$out/t4d/packages.manifest
+ cp -r $src/t7a $out/t7a && $rep_create $out/t7a &$out/t7a/packages.manifest
+ cp -r $src/t7b $out/t7b && $rep_create $out/t7b &$out/t7b/packages.manifest
end
+cfg_create += 2>!
+cfg_add += 2>!
pkg_build += -d cfg --yes 2>!
pkg_status += -d cfg
rep_add += -d cfg 2>!
@@ -444,3 +463,234 @@ $* libfoo/1.0.0 2>>~%EOE% != 0
$* libfoo 2>'purged libfoo'
}
+
+: associated-configs
+:
+{
+ : 3-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ +$cfg_create -d cfg-bar &cfg-bar/***
+ +$cfg_create -d cfg-foo &cfg-foo/***
+
+ +$cfg_add -d cfg cfg-bar
+ +$cfg_add -d cfg-bar cfg-foo
+
+ : baz
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ $pkg_build libbar@"$rep/t4b" -d cfg-bar ?libfoo +{ --config-id 2 } --trust-yes 2>!;
+ $pkg_build libbaz 2>!;
+
+ $pkg_build '?libbar' +{ --config-id 1 } 2>!;
+
+ $* libbaz <<EOI 2>>/~%EOE%
+ y
+ y
+ EOI
+ following dependencies were automatically built but will no longer be used:
+ libbar [cfg-bar/]
+ libfoo [cfg-foo/]
+ %drop unused packages\? \[Y.n\] drop libbaz%
+ drop libbar [cfg-bar/]
+ drop libfoo [cfg-foo/]
+ %continue\? \[Y.n\] disfigured libbaz%
+ disfigured libbar [cfg-bar/]
+ disfigured libfoo [cfg-foo/]
+ purged libbaz
+ purged libbar [cfg-bar/]
+ purged libfoo [cfg-foo/]
+ EOE
+ }
+
+ : foo
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ $pkg_build libbar@"$rep/t4b" -d cfg-bar ?libfoo +{ --config-id 2 } --trust-yes 2>!;
+ $pkg_build libbaz 2>!;
+
+ # Make sure that dependents of a package being dropped can be found in
+ # implicitly associated configurations recursively. Note that
+ # configuring libbar as system, we make libbaz an only dependent of
+ # libfoo.
+ #
+ $pkg_build '?sys:libbar' +{ --config-id 1 } 2>!;
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ libbar [cfg-bar/] configured,system !* available [1.1.0]
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar -r libbar >>EOO;
+ libbar configured,system !* available 1.1.0
+ EOO
+
+ $pkg_status -d cfg-foo libfoo >'libfoo configured 1.1.0';
+
+ $* -d cfg-foo libfoo <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ libbaz [cfg/] (requires libfoo)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ sys:libbar [cfg-bar/]
+ %drop unused packages\? \[Y.n\] drop libbaz \[cfg/\]%
+ drop libfoo
+ drop libbar [cfg-bar/]
+ %continue\? \[Y.n\] disfigured libbaz \[cfg/\]%
+ disfigured libfoo
+ purged libbar [cfg-bar/]
+ purged libbaz [cfg/]
+ purged libfoo
+ EOE
+
+ $pkg_status libbaz >'libbaz available 1.1.0';
+ $pkg_status -d cfg-bar libbar >'libbar available 1.1.0';
+ $pkg_status -d cfg-foo libfoo >'libfoo unknown'
+ }
+
+ : bar
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ # Test that if we turn implicit associations into explicit, then all
+ # dependents are still discovered.
+ #
+ $cfg_add -d cfg-bar cfg;
+ $cfg_add -d cfg-foo cfg-bar;
+
+ $pkg_build libbar@"$rep/t4b" -d cfg-bar ?libfoo +{ --config-id 2 } --trust-yes 2>!;
+ $pkg_build libbaz 2>!;
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar -r libbar >>/EOO;
+ !libbar configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-foo libfoo >'libfoo configured 1.1.0';
+
+ $* -d cfg-bar libbar <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ libbaz [cfg/] (requires libbar)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libfoo [cfg-foo/]
+ %drop unused packages\? \[Y.n\] drop libbaz \[cfg/\]%
+ drop libbar
+ drop libfoo [cfg-foo/]
+ %continue\? \[Y.n\] disfigured libbaz \[cfg/\]%
+ disfigured libbar
+ disfigured libfoo [cfg-foo/]
+ purged libbaz [cfg/]
+ purged libbar
+ purged libfoo [cfg-foo/]
+ EOE
+
+ $pkg_status libbaz >'libbaz available 1.1.0';
+ $pkg_status -d cfg-bar libbar >'libbar available 1.1.0';
+ $pkg_status -d cfg-foo libfoo >'libfoo unknown'
+ }
+ }
+}
+
+: buildtime-dep
+:
+{
+ +$clone_cfg && $rep_add $rep/t7a && $rep_fetch
+ +$cfg_create -d cfg2 --type host &cfg2/***
+ +$cfg_add -d cfg cfg2
+
+ : drop-dependent
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $pkg_build libbar --yes >!;
+
+ $* libbar <<EOI 2>>/~%EOE%;
+ y
+ y
+ EOI
+ following dependencies were automatically built but will no longer be used:
+ foo [cfg2/]
+ libbaz [cfg2/]
+ libbaz
+ %drop unused packages\? \[Y.n\] drop libbar%
+ drop foo [cfg2/]
+ drop libbaz [cfg2/]
+ drop libbaz
+ %continue\? \[Y.n\] disfigured libbar%
+ disfigured foo [cfg2/]
+ disfigured libbaz [cfg2/]
+ disfigured libbaz
+ purged libbar
+ purged foo [cfg2/]
+ purged libbaz [cfg2/]
+ purged libbaz
+ EOE
+
+ $pkg_status -r libbar >'libbar available 1.0.0'
+ }
+
+ : drop-dependency
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $pkg_build libbar --yes >!;
+
+ $* -d cfg2 libbaz <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ foo (requires libbaz)
+ libbar [cfg/] (requires foo)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libbaz [cfg/]
+ %drop unused packages\? \[Y.n\] drop libbar \[cfg.\]%
+ drop foo
+ drop libbaz
+ drop libbaz [cfg/]
+ %continue\? \[Y.n\] disfigured libbar \[cfg.\]%
+ disfigured foo
+ disfigured libbaz
+ disfigured libbaz [cfg/]
+ purged libbar [cfg/]
+ purged foo
+ purged libbaz
+ purged libbaz [cfg/]
+ EOE
+
+ $pkg_status -r libbar >'libbar available 1.0.0'
+ }
+}
diff --git a/tests/pkg-drop/t7a b/tests/pkg-drop/t7a
new file mode 120000
index 0000000..05d62de
--- /dev/null
+++ b/tests/pkg-drop/t7a
@@ -0,0 +1 @@
+../common/associated/t7a \ No newline at end of file
diff --git a/tests/pkg-drop/t7b b/tests/pkg-drop/t7b
new file mode 120000
index 0000000..31d6d0f
--- /dev/null
+++ b/tests/pkg-drop/t7b
@@ -0,0 +1 @@
+../common/associated/t7b \ No newline at end of file