aboutsummaryrefslogtreecommitdiff
path: root/bpkg/pkg-build.cxx
diff options
context:
space:
mode:
authorKaren Arutyunov <karen@codesynthesis.com>2021-04-22 21:57:13 +0300
committerKaren Arutyunov <karen@codesynthesis.com>2021-07-09 14:06:57 +0300
commit5d513688ae07d96910dd1eef83bdad4e9d780373 (patch)
tree402a87bbdb79a5a2392b714553e567465c3415c1 /bpkg/pkg-build.cxx
parent50a5c7b14250dab7941e8b0a67dfe6c442d2c5a9 (diff)
Add support for linked configurations
Diffstat (limited to 'bpkg/pkg-build.cxx')
-rw-r--r--bpkg/pkg-build.cxx2631
1 files changed, 2012 insertions, 619 deletions
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 1e80d1e..cc90c29 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -20,9 +20,11 @@
#include <bpkg/common-options.hxx>
+#include <bpkg/cfg-link.hxx>
#include <bpkg/pkg-purge.hxx>
#include <bpkg/pkg-fetch.hxx>
#include <bpkg/rep-fetch.hxx>
+#include <bpkg/cfg-create.hxx>
#include <bpkg/pkg-unpack.hxx>
#include <bpkg/pkg-update.hxx>
#include <bpkg/pkg-verify.hxx>
@@ -195,7 +197,6 @@ namespace bpkg
//
static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
make_available (const common_options& options,
- const dir_path& c,
database& db,
const shared_ptr<selected_package>& sp)
{
@@ -214,7 +215,7 @@ namespace bpkg
// moment).
//
shared_ptr<repository_fragment> af (
- db.find<repository_fragment> (
+ db.main_database ().find<repository_fragment> (
sp->repository_fragment.canonical_name ()));
// The package is in at least fetched state, which means we should
@@ -225,10 +226,10 @@ namespace bpkg
package_manifest m (
sp->state == package_state::fetched
? pkg_verify (options,
- a->absolute () ? *a : c / *a,
+ a->absolute () ? *a : db.config_orig / *a,
true /* ignore_unknown */,
false /* expand_values */)
- : pkg_verify (sp->effective_src_root (c),
+ : pkg_verify (sp->effective_src_root (db.config_orig),
true /* ignore_unknown */,
// Copy potentially fixed up version from selected package.
[&sp] (version& v) {v = sp->version;}));
@@ -249,6 +250,34 @@ namespace bpkg
return r;
}
+ // Compare two shared pointers via the pointed-to object addresses.
+ //
+ struct compare_shared_ptr
+ {
+ template <typename P>
+ bool
+ operator() (const P& x, const P& y) const
+ {
+ return x.get () < y.get ();
+ }
+ };
+
+ // The current configuration dependents being "repointed" to prerequisites
+ // in other configurations, together with their replacement flags. The flag
+ // is true for the replacement prerequisites ("new") and false for the
+ // prerequisites being replaced ("old"). The unamended prerequisites have no
+ // entries.
+ //
+ using repointed_dependents = map<shared_ptr<selected_package>,
+ map<config_package, bool>,
+ compare_shared_ptr>;
+
+ // List of the private configuration paths, relative to the containing
+ // configuration directories (.bpkg/host/, etc), together with the
+ // containing configuration databases.
+ //
+ using private_configs = vector<pair<database&, dir_path>>;
+
// A "dependency-ordered" list of packages and their prerequisites.
// That is, every package on the list only possibly depending on the
// ones after it. In a nutshell, the usage is as follows: we first
@@ -311,6 +340,8 @@ namespace bpkg
//
optional<action_type> action;
+ reference_wrapper<database> db; // Needs to be move-assignable.
+
shared_ptr<selected_package> selected; // NULL if not selected.
shared_ptr<available_package> available; // Can be NULL, fake/transient.
@@ -337,12 +368,12 @@ namespace bpkg
//
struct constraint_type
{
+ reference_wrapper<database> db; // Main database for non-packages.
string dependent;
version_constraint value;
- constraint_type () = default;
- constraint_type (string d, version_constraint v)
- : dependent (move (d)), value (move (v)) {}
+ constraint_type (database& d, string dp, version_constraint v)
+ : db (d), dependent (move (dp)), value (move (v)) {}
};
vector<constraint_type> constraints;
@@ -351,12 +382,18 @@ namespace bpkg
//
bool system;
- // If the flag is set and the external package is being replaced with an
+ // If this flag is set and the external package is being replaced with an
// external one, then keep its output directory between upgrades and
// downgrades.
//
bool keep_out;
+ // If this flag is set, then don't build this package, only configure.
+ //
+ // Note: use configure_only() to query.
+ //
+ bool configure_only_;
+
// If present, then check out the package into the specified directory
// rather than into the configuration directory, if it comes from a
// version control-based repository. Optionally, remove this directory
@@ -370,20 +407,32 @@ namespace bpkg
//
strings config_vars;
- // Set of package names that caused this package to be built or adjusted.
- // Empty name signifies user selection.
+ // Set of packages (dependents or dependencies but not a mix) that caused
+ // this package to be built or adjusted. Empty name signifies user
+ // selection and can be present regardless of the required_by_dependents
+ // flag value.
//
- set<package_name> required_by;
+ set<config_package> required_by;
+
+ // If this flags is true, then required_by contains dependents.
+ //
+ // We need this because required_by packages have different semantics for
+ // different actions: the dependent for regular builds and dependency for
+ // adjustments and repointed dependent reconfiguration builds. Mixing them
+ // would break prompts/diagnostics.
+ //
+ bool required_by_dependents;
bool
user_selection () const
{
- return required_by.find (package_name ()) != required_by.end ();
+ return required_by.find (config_package {db.get ().main_database (),
+ ""}) != required_by.end ();
}
- // Adjustment flags.
+ // State flags.
//
- uint16_t adjustments;
+ uint16_t flags;
// Set if we also need to clear the hold package flag.
//
@@ -392,7 +441,7 @@ namespace bpkg
bool
unhold () const
{
- return (adjustments & adjust_unhold) != 0;
+ return (flags & adjust_unhold) != 0;
}
// Set if we also need to reconfigure this package. Note that in some
@@ -420,13 +469,26 @@ namespace bpkg
return selected != nullptr &&
selected->state == package_state::configured &&
- ((adjustments & adjust_reconfigure) != 0 ||
+ ((flags & adjust_reconfigure) != 0 ||
(*action == build &&
(selected->system () != system ||
selected->version != available_version () ||
(!system && !config_vars.empty ()))));
}
+ // Set if this build action is for repointing of prerequisite.
+ //
+ static const uint16_t build_repoint = 0x0004;
+
+ bool
+ configure_only () const
+ {
+ assert (action);
+
+ return configure_only_ ||
+ (*action == build && (flags & build_repoint) != 0);
+ }
+
const version&
available_version () const
{
@@ -434,25 +496,40 @@ namespace bpkg
//
assert (available != nullptr &&
(system
- ? available->system_version () != nullptr
+ ? available->system_version (db) != nullptr
: !available->stub ()));
- return system ? *available->system_version () : available->version;
+ return system ? *available->system_version (db) : available->version;
}
string
available_name_version () const
{
assert (available != nullptr);
- return package_string (available->id.name, available_version (), system);
+ return package_string (available->id.name,
+ available_version (),
+ system);
}
- // Merge constraints, required-by package names, hold_* flags,
- // adjustments, and user-specified options/variables.
+ string
+ available_name_version_db () const
+ {
+ string s (db.get ().string ());
+ return !s.empty ()
+ ? available_name_version () + ' ' + s
+ : available_name_version ();
+ }
+
+ // Merge constraints, required-by package names, hold_* flags, state
+ // flags, and user-specified options/variables.
//
void
merge (build_package&& p)
{
+ // We don't merge objects from different configurations.
+ //
+ assert (db == p.db);
+
// We don't merge into pre-entered objects, and from/into drops.
//
assert (action && *action != drop && (!p.action || *p.action != drop));
@@ -472,6 +549,9 @@ namespace bpkg
if (p.keep_out)
keep_out = p.keep_out;
+ if (p.configure_only_)
+ configure_only_ = p.configure_only_;
+
if (p.checkout_root)
checkout_root = move (p.checkout_root);
@@ -483,15 +563,12 @@ namespace bpkg
// Propagate the user-selection tag.
//
- required_by.insert (package_name ());
+ required_by.emplace (db.get ().main_database (), package_name ());
}
- // Required-by package names have different semantics for different
- // actions: dependent for builds and prerequisite for adjustment. Mixing
- // them would break prompts/diagnostics, so we copy them only if actions
- // match.
+ // Copy the required-by package names only if semantics matches.
//
- if (p.action && *p.action == *action)
+ if (p.required_by_dependents == required_by_dependents)
required_by.insert (p.required_by.begin (), p.required_by.end ());
// Copy constraints.
@@ -510,9 +587,21 @@ namespace bpkg
if (!hold_version || (p.hold_version && *p.hold_version > *hold_version))
hold_version = p.hold_version;
- // Copy adjustments flags.
+ // Copy state flags.
//
- adjustments |= p.adjustments;
+ flags |= p.flags;
+
+ if (*action == build)
+ {
+ // We never merge two repointed dependent reconfigurations.
+ //
+ assert ((flags & build_repoint) == 0 ||
+ (p.flags & build_repoint) == 0);
+
+ // Upgrade repoint to the full build.
+ //
+ flags &= ~build_repoint;
+ }
// Note that we don't copy the build_package::system flag. If it was
// set from the command line ("strong system") then we will also have
@@ -545,7 +634,10 @@ namespace bpkg
{
assert (!pkg.action);
- auto p (map_.emplace (move (name), data_type {end (), move (pkg)}));
+ database& db (pkg.db); // Save before the move() call.
+ auto p (map_.emplace (config_package {db, move (name)},
+ data_type {end (), move (pkg)}));
+
assert (p.second);
}
@@ -553,11 +645,24 @@ namespace bpkg
// version was, in fact, added to the map and NULL if it was already there
// or the existing version was preferred. So can be used as bool.
//
+ // Also, in the recursive mode:
+ //
+ // - Use the custom search function to find the package dependency
+ // databases.
+ //
+ // - For the repointed dependents collect the prerequisite replacements
+ // rather than prerequisites being replaced.
+ //
+ // - Add paths of the created private configurations, together with the
+ // containing configuration databases, into the specified list (see
+ // private_configs for details).
+ //
build_package*
collect_build (const common_options& options,
- const dir_path& cd,
- database& db,
build_package pkg,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ private_configs& priv_cfgs,
postponed_packages* recursively = nullptr)
{
using std::swap; // ...and not list::swap().
@@ -569,7 +674,7 @@ namespace bpkg
assert (pkg.action && *pkg.action == build_package::build &&
pkg.available != nullptr);
- auto i (map_.find (pkg.available->id.name));
+ auto i (map_.find (pkg.db, pkg.available->id.name));
// If we already have an entry for this package name, then we
// have to pick one over the other.
@@ -644,15 +749,15 @@ namespace bpkg
//
if (auto c1 = test (p2, p1))
{
- const package_name& n (i->first);
+ const package_name& n (i->first.name);
const string& d1 (c1->dependent);
const string& d2 (c2->dependent);
fail << "unable to satisfy constraints on package " << n <<
- info << d1 << " depends on (" << n << " " << c1->value
- << ")" <<
- info << d2 << " depends on (" << n << " " << c2->value
- << ")" <<
+ info << d1 << c1->db << " depends on (" << n << " "
+ << c1->value << ")" <<
+ info << d2 << c2->db << " depends on (" << n << " "
+ << c2->value << ")" <<
info << "available " << p1->available_name_version () <<
info << "available " << p2->available_name_version () <<
info << "explicitly specify " << n << " version to manually "
@@ -662,8 +767,8 @@ namespace bpkg
swap (p1, p2);
}
- l4 ([&]{trace << "pick " << p1->available_name_version ()
- << " over " << p2->available_name_version ();});
+ l4 ([&]{trace << "pick " << p1->available_name_version_db ()
+ << " over " << p2->available_name_version_db ();});
}
// If versions are the same, then we still need to pick the entry as
// one of them can build a package from source while another
@@ -698,12 +803,14 @@ namespace bpkg
{
// This is the first time we are adding this package name to the map.
//
- l4 ([&]{trace << "add " << pkg.available_name_version ();});
+ l4 ([&]{trace << "add " << pkg.available_name_version_db ();});
// Note: copy; see emplace() below.
//
+ database& db (pkg.db); // Save before the move() call.
package_name n (pkg.available->id.name);
- i = map_.emplace (move (n), data_type {end (), move (pkg)}).first;
+ i = map_.emplace (config_package {db, move (n)},
+ data_type {end (), move (pkg)}).first;
}
build_package& p (i->second.package);
@@ -729,39 +836,74 @@ namespace bpkg
// reasoning wrong.
//
if (recursively != nullptr)
- collect_build_prerequisites (options, cd, db, p, recursively);
+ collect_build_prerequisites (options,
+ p,
+ recursively,
+ fdb,
+ rpt_depts,
+ priv_cfgs);
return &p;
}
- // Collect prerequisites of the package being built recursively. But first
- // "prune" this process if the package we build is a system one or is
- // already configured since that would mean all its prerequisites are
- // configured as well. Note that this is not merely an optimization: the
- // package could be an orphan in which case the below logic will fail (no
- // repository fragment in which to search for prerequisites). By skipping
- // the prerequisite check we are able to gracefully handle configured
- // orphans.
+ // Collect prerequisites of the package being built recursively.
+ //
+ // But first "prune" this process if the package we build is a system one
+ // or is already configured and is not a repointed dependent, since that
+ // would mean all its prerequisites are configured as well. Note that this
+ // is not merely an optimization: the package could be an orphan in which
+ // case the below logic will fail (no repository fragment in which to
+ // search for prerequisites). By skipping the prerequisite check we are
+ // able to gracefully handle configured orphans.
+ //
+ // For the repointed dependent, we still need to collect its prerequisite
+ // replacements to make sure its constraints over them are satisfied. Note
+ // that, as it was said above, we can potentially fail if the dependent is
+ // an orphan, but this is exactly what we need to do in that case, since
+ // we won't be able to be reconfigure it anyway.
//
void
collect_build_prerequisites (const common_options& options,
- const dir_path& cd,
- database& db,
const build_package& pkg,
- postponed_packages* postponed)
+ postponed_packages* postponed,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ private_configs& priv_cfgs)
{
tracer trace ("collect_build_prerequisites");
assert (pkg.action && *pkg.action == build_package::build);
+ if (pkg.system)
+ return;
+
const shared_ptr<selected_package>& sp (pkg.selected);
- if (pkg.system ||
- (sp != nullptr &&
- sp->state == package_state::configured &&
- sp->substate != package_substate::system &&
- sp->version == pkg.available_version ()))
- return;
+ // True if this is an up/down-grade.
+ //
+ bool ud (false);
+
+ // If this is a repointed dependent, then it points to its prerequisite
+ // replacements flag map (see repointed_dependents for details).
+ //
+ const map<config_package, bool>* rpt_prereq_flags (nullptr);
+
+ // Bail out if this is a configured non-system package and no
+ // up/down-grade nor collecting prerequisite replacements are required.
+ //
+ if (sp != nullptr &&
+ sp->state == package_state::configured &&
+ sp->substate != package_substate::system)
+ {
+ ud = sp->version != pkg.available_version ();
+
+ repointed_dependents::const_iterator i (rpt_depts.find (sp));
+ if (i != rpt_depts.end ())
+ rpt_prereq_flags = &i->second;
+
+ if (!ud && rpt_prereq_flags == nullptr)
+ return;
+ }
// Show how we got here if things go wrong.
//
@@ -769,13 +911,16 @@ namespace bpkg
make_exception_guard (
[&pkg] ()
{
- info << "while satisfying " << pkg.available_name_version ();
+ info << "while satisfying " << pkg.available_name_version_db ();
}));
const shared_ptr<available_package>& ap (pkg.available);
const shared_ptr<repository_fragment>& af (pkg.repository_fragment);
const package_name& name (ap->id.name);
+ database& pdb (pkg.db);
+ database& mdb (pdb.main_database ());
+
for (const dependency_alternatives_ex& da: ap->dependencies)
{
if (da.conditional) // @@ TODO
@@ -805,10 +950,6 @@ namespace bpkg
continue;
}
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-configure.
}
bool system (false);
@@ -831,7 +972,12 @@ namespace bpkg
//
const version_constraint* dep_constr (nullptr);
- auto i (map_.find (dn));
+ database* ddb (fdb (pdb, dn, da.buildtime));
+
+ auto i (ddb != nullptr
+ ? map_.find (*ddb, dn)
+ : map_.find_dependency (pdb, dn, da.buildtime));
+
if (i != map_.end ())
{
const build_package& bp (i->second.package);
@@ -857,9 +1003,9 @@ namespace bpkg
if (!wildcard (*dep_constr) &&
!satisfies (*dep_constr, dp.constraint))
fail << "unable to satisfy constraints on package " << dn <<
- info << name << " depends on (" << dn << " "
+ info << name << pdb << " depends on (" << dn << " "
<< *dp.constraint << ")" <<
- info << c.dependent << " depends on (" << dn << " "
+ info << c.dependent << c.db << " depends on (" << dn << " "
<< c.value << ")" <<
info << "specify " << dn << " version to satisfy " << name
<< " constraint";
@@ -871,11 +1017,28 @@ namespace bpkg
: dependency {dn, *dep_constr});
// First see if this package is already selected. If we already have
- // it in the configuraion and it satisfies our dependency version
+ // it in the configuration and it satisfies our dependency version
// constraint, then we don't want to be forcing its upgrade (or,
// worse, downgrade).
//
- shared_ptr<selected_package> dsp (db.find<selected_package> (dn));
+ // If the prerequisite configuration is explicitly specified by the
+ // user, then search for the prerequisite in this specific
+ // configuration. Otherwise, search recursively in the explicitly
+ // linked configurations of the dependent configuration.
+ //
+ // Note that for the repointed dependent we will always find the
+ // prerequisite replacement rather than the prerequisite being
+ // replaced.
+ //
+ pair<shared_ptr<selected_package>, database*> spd (
+ ddb != nullptr
+ ? make_pair (ddb->find<selected_package> (dn), ddb)
+ : find_dependency (pdb, dn, da.buildtime));
+
+ if (ddb == nullptr)
+ ddb = &pdb;
+
+ shared_ptr<selected_package>& dsp (spd.first);
pair<shared_ptr<available_package>,
shared_ptr<repository_fragment>> rp;
@@ -886,8 +1049,34 @@ namespace bpkg
if (dsp != nullptr)
{
+ // Switch to the selected package configuration.
+ //
+ ddb = spd.second;
+
+ // If we are collecting prerequisites of the repointed dependent,
+ // then only proceed further if this is either a replacement or
+ // unamended prerequisite and we are up/down-grading (only for the
+ // latter).
+ //
+ if (rpt_prereq_flags != nullptr)
+ {
+ auto i (rpt_prereq_flags->find (config_package {*ddb, dn}));
+
+ bool unamended (i == rpt_prereq_flags->end ());
+ bool replacement (!unamended && i->second);
+
+ // We can never end up with the prerequisite being replaced, since
+ // the fdb() function should always return the replacement instead
+ // (see above).
+ //
+ assert (unamended || replacement);
+
+ if (!(replacement || (unamended && ud)))
+ continue;
+ }
+
if (dsp->state == package_state::broken)
- fail << "unable to build broken package " << dn <<
+ fail << "unable to build broken package " << dn << *ddb <<
info << "use 'pkg-purge --force' to remove";
// If the constraint is imposed by the user we also need to make sure
@@ -905,11 +1094,11 @@ namespace bpkg
// doesn't really matter).
//
shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
+ mdb.load<repository_fragment> (""));
rp = system
- ? find_available_one (db, dn, nullopt, root)
- : find_available_one (db,
+ ? find_available_one (mdb, dn, nullopt, root)
+ : find_available_one (mdb,
dn,
version_constraint (dsp->version),
root);
@@ -918,7 +1107,7 @@ namespace bpkg
// (returning stub as an available package feels wrong).
//
if (dap == nullptr || dap->stub ())
- rp = make_available (options, cd, db, dsp);
+ rp = make_available (options, *ddb, dsp);
}
else
// Remember that we may be forcing up/downgrade; we will deal with
@@ -927,6 +1116,117 @@ namespace bpkg
force = true;
}
+ // If this is a build-time dependency and we build it for the first
+ // time, then we need to find a suitable configuration (of the host or
+ // build2 type) to build it in.
+ //
+ // If the current configuration (ddb) is of the suitable type, then we
+ // use that. Otherwise, we go through its immediate explicit links. If
+ // only one of them has the suitable type, then we use that. If there
+ // are multiple of them, then we fail advising the user to pick one
+ // explicitly. If there are none, then we create the private
+ // configuration and use that.
+ //
+ // Note that if the user has explicitly specified the configuration
+ // for this dependency on the command line (using --config-*), then
+ // this configuration is used as the starting point for this search.
+ //
+ if (da.buildtime && dsp == nullptr)
+ {
+ database* db (nullptr);
+ const string& type (buildtime_dependency_type (dn));
+
+ // Note that the first returned link is for ddb itself.
+ //
+ for (const linked_config& lc: ddb->explicit_links ())
+ {
+ database& ldb (lc.db);
+
+ if (ldb.type == type)
+ {
+ // We are done if the self-link is of the suitable type.
+ //
+ if (lc.id == 0)
+ {
+ db = &ldb;
+ break;
+ }
+
+ if (db == nullptr)
+ db = &ldb;
+ else
+ fail << "multiple possible " << type << " configurations for "
+ << "build-time dependency (" << dp << ")" <<
+ info << db->config_orig <<
+ info << ldb.config_orig <<
+ info << "use --config-* to select the configuration";
+ }
+ }
+
+ // If no suitable configuration is found, then create and link it.
+ //
+ if (db == nullptr)
+ {
+ const strings mods {"cc"};
+
+ const strings vars {
+ "config.config.load=~" + type,
+ "config.config.persist+='config.*'@unused=drop"};
+
+ dir_path cd (bpkg_dir / dir_path (type));
+
+ // Wipe a potentially existing un-linked private configuration
+ // left from a previous faulty run. Note that trying to reuse it
+ // would be a bad idea since it can be half-prepared, with an
+ // outdated database schema version, etc.
+ //
+ cfg_create (options,
+ ddb->config_orig / cd,
+ optional<string> (type) /* name */,
+ type /* type */,
+ mods,
+ vars,
+ false /* existing */,
+ true /* wipe */);
+
+ // Note that we will copy the name from the configuration unless
+ // it clashes with one of the existing links.
+ //
+ shared_ptr<configuration> lc (cfg_link (*ddb,
+ ddb->config / cd,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */));
+
+ // Save the newly-created private configuration, together with the
+ // containing configuration database, for their subsequent re-
+ // link.
+ //
+ priv_cfgs.emplace_back (*ddb, move (cd));
+
+ db = &ddb->find_attached (*lc->id);
+ }
+
+ ddb = db; // Switch to the dependency configuration.
+ }
+
+ // Note that building a dependent which is not a build2 module in the
+ // same configuration with the build2 module it depends upon is an
+ // error.
+ //
+ if (da.buildtime &&
+ !build2_module (name) &&
+ build2_module (dn) &&
+ pdb == *ddb)
+ {
+ // Note that the dependent package information is printed by the
+ // above exception guard.
+ //
+ fail << "unable to build build system module " << dn << " in its "
+ << "dependent package configuration " << pdb.config_orig <<
+ info << "use --config-* to select suitable configuration";
+ }
+
// If we didn't get the available package corresponding to the
// selected package, look for any that satisfies the constraint.
//
@@ -938,7 +1238,7 @@ namespace bpkg
// prerequisites).
//
if (af == nullptr)
- fail << "package " << pkg.available_name_version ()
+ fail << "package " << pkg.available_name_version_db ()
<< " is orphaned" <<
info << "explicitly upgrade it to a new version";
@@ -984,7 +1284,7 @@ namespace bpkg
// the package is recognized. An unrecognized package means the
// broken/stale repository (see below).
//
- rp = find_available_one (db,
+ rp = find_available_one (mdb,
dn,
!system ? d.constraint : nullopt,
af);
@@ -1006,7 +1306,7 @@ namespace bpkg
if (d.constraint && (!dep_constr || !wildcard (*dep_constr)))
dr << ' ' << *d.constraint;
- dr << " of package " << name;
+ dr << " of package " << name << pdb;
if (!af->location.empty () && (!dep_constr || system))
dr << info << "repository " << af->location << " appears to "
@@ -1032,17 +1332,17 @@ namespace bpkg
// version constraint). If it were, then the system version
// wouldn't be NULL and would satisfy itself.
//
- if (dap->system_version () == nullptr)
+ if (dap->system_version (*ddb) == nullptr)
fail << "dependency " << d << " of package " << name << " is "
<< "not available in source" <<
info << "specify ?sys:" << dn << " if it is available from "
<< "the system";
- if (!satisfies (*dap->system_version (), d.constraint))
+ if (!satisfies (*dap->system_version (*ddb), d.constraint))
fail << "dependency " << d << " of package " << name << " is "
<< "not available in source" <<
info << package_string (dn,
- *dap->system_version (),
+ *dap->system_version (*ddb),
true /* system */)
<< " does not satisfy the constrains";
@@ -1050,7 +1350,7 @@ namespace bpkg
}
else
{
- auto p (dap->system_version_authoritative ());
+ auto p (dap->system_version_authoritative (*ddb));
if (p.first != nullptr &&
p.second && // Authoritative.
@@ -1061,19 +1361,22 @@ namespace bpkg
build_package bp {
build_package::build,
+ *ddb,
dsp,
dap,
rp.second,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {name}, // Required by (dependent).
- 0}; // Adjustments.
+ false, // Keep output directory.
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {pdb, name}}, // Required by (dependent).
+ true, // Required by dependents.
+ 0}; // State flags.
// Add our constraint, if we have one.
//
@@ -1083,7 +1386,7 @@ namespace bpkg
// completeness.
//
if (dp.constraint)
- bp.constraints.emplace_back (name.string (), *dp.constraint);
+ bp.constraints.emplace_back (pdb, name.string (), *dp.constraint);
// Now collect this prerequisite. If it was actually collected
// (i.e., it wasn't already there) and we are forcing a downgrade or
@@ -1104,7 +1407,12 @@ namespace bpkg
// build foo ?sys:bar/2
//
const build_package* p (
- collect_build (options, cd, db, move (bp), postponed));
+ collect_build (options,
+ move (bp),
+ fdb,
+ rpt_depts,
+ priv_cfgs,
+ postponed));
if (p != nullptr && force && !dep_optional)
{
@@ -1126,9 +1434,9 @@ namespace bpkg
(f ? dr << fail :
w ? dr << warn :
dr << info)
- << "package " << name << " dependency on "
+ << "package " << name << pdb << " dependency on "
<< (c ? "(" : "") << d << (c ? ")" : "") << " is forcing "
- << (u ? "up" : "down") << "grade of " << *dsp << " to ";
+ << (u ? "up" : "down") << "grade of " << *dsp << *ddb << " to ";
// Print both (old and new) package names in full if the system
// attribution changes.
@@ -1139,7 +1447,7 @@ namespace bpkg
dr << av; // Can't be a system version so is never wildcard.
if (dsp->hold_version)
- dr << info << "package version " << *dsp << " is held";
+ dr << info << "package version " << *dsp << *ddb << " is held";
if (f)
dr << info << "explicitly request version "
@@ -1149,15 +1457,97 @@ namespace bpkg
}
}
+ // Collect the repointed dependents and their replaced prerequisites,
+ // recursively.
+ //
+ // If a repointed dependent is already pre-entered or collected with an
+ // action other than adjustment, then just mark it for reconfiguration
+ // unless it is already implied. Otherwise, collect the package build with
+ // the repoint sub-action and reconfigure adjustment flag.
+ //
+ void
+ collect_repointed_dependents (
+ const common_options& o,
+ database& mdb,
+ const repointed_dependents& rpt_depts,
+ build_packages::postponed_packages& postponed,
+ const function<find_database_function>& fdb,
+ private_configs& priv_cfgs)
+ {
+ for (const auto& rd: rpt_depts)
+ {
+ const shared_ptr<selected_package>& sp (rd.first);
+
+ auto i (map_.find (mdb, sp->name));
+ if (i != map_.end ())
+ {
+ build_package& b (i->second.package);
+
+ if (!b.action || *b.action != build_package::adjust)
+ {
+ if (!b.action ||
+ (*b.action != build_package::drop && !b.reconfigure ()))
+ b.flags |= build_package::adjust_reconfigure;
+
+ continue;
+ }
+ }
+
+ // The repointed dependent can be an orphan, so just create the
+ // available package from the selected package.
+ //
+ auto rp (make_available (o, mdb, sp));
+
+ // Add the prerequisite replacements as the required-by packages.
+ //
+ set<config_package> required_by;
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const config_package& cp (prq.first);
+ required_by.emplace (cp.db, cp.name);
+ }
+ }
+
+ build_package p {
+ build_package::build,
+ mdb,
+ sp,
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ sp->system (),
+ false, // Keep output directory.
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ move (required_by), // Required by (dependencies).
+ false, // Required by dependents.
+ build_package::adjust_reconfigure | build_package::build_repoint};
+
+ collect_build (o,
+ move (p),
+ fdb,
+ rpt_depts,
+ priv_cfgs,
+ &postponed);
+ }
+ }
+
// Collect the package being dropped.
//
void
- collect_drop (shared_ptr<selected_package> sp)
+ collect_drop (database& db, shared_ptr<selected_package> sp)
{
const package_name& nm (sp->name);
build_package p {
build_package::drop,
+ db,
move (sp),
nullptr,
nullptr,
@@ -1166,37 +1556,36 @@ namespace bpkg
{}, // Constraints.
false, // System package.
false, // Keep output directory.
+ false, // Configure-only.
nullopt, // Checkout root.
false, // Checkout purge.
strings (), // Configuration variables.
{}, // Required by.
- 0}; // Adjustments.
+ false, // Required by dependents.
+ 0}; // State flags.
- auto i (map_.find (nm));
+ auto i (map_.find (db, nm));
if (i != map_.end ())
{
build_package& bp (i->second.package);
- // Can't think of the scenario when this happens. We would start
- // collecting from scratch (see below).
- //
- assert (!bp.action || *bp.action != build_package::build);
-
- // Overwrite the existing (possibly pre-entered or adjustment) entry.
+ // Overwrite the existing (possibly pre-entered, adjustment, or
+ // repoint) entry.
//
bp = move (p);
}
else
- map_.emplace (nm, data_type {end (), move (p)});
+ map_.emplace (config_package {db, nm},
+ data_type {end (), move (p)});
}
// Collect the package being unheld.
//
void
- collect_unhold (const shared_ptr<selected_package>& sp)
+ collect_unhold (database& db, const shared_ptr<selected_package>& sp)
{
- auto i (map_.find (sp->name));
+ auto i (map_.find (db, sp->name));
// Currently, it must always be pre-entered.
//
@@ -1208,6 +1597,7 @@ namespace bpkg
{
build_package p {
build_package::adjust,
+ db,
sp,
nullptr,
nullptr,
@@ -1216,36 +1606,47 @@ namespace bpkg
{}, // Constraints.
false, // System package.
false, // Keep output directory.
+ false, // Configure-only.
nullopt, // Checkout root.
false, // Checkout purge.
strings (), // Configuration variables.
{}, // Required by.
+ false, // Required by dependents.
build_package::adjust_unhold};
p.merge (move (bp));
bp = move (p);
}
else
- bp.adjustments |= build_package::adjust_unhold;
+ bp.flags |= build_package::adjust_unhold;
}
void
collect_build_prerequisites (const common_options& o,
- const dir_path& cd,
database& db,
const package_name& name,
- postponed_packages& postponed)
+ postponed_packages& postponed,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ private_configs& priv_cfgs)
{
- auto mi (map_.find (name));
+ auto mi (map_.find (db, name));
assert (mi != map_.end ());
- collect_build_prerequisites (o, cd, db, mi->second.package, &postponed);
+
+ collect_build_prerequisites (o,
+ mi->second.package,
+ &postponed,
+ fdb,
+ rpt_depts,
+ priv_cfgs);
}
void
collect_build_postponed (const common_options& o,
- const dir_path& cd,
- database& db,
- postponed_packages& pkgs)
+ postponed_packages& pkgs,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ private_configs& priv_cfgs)
{
// Try collecting postponed packages for as long as we are making
// progress.
@@ -1255,7 +1656,12 @@ namespace bpkg
postponed_packages npkgs;
for (const build_package* p: pkgs)
- collect_build_prerequisites (o, cd, db, *p, prog ? &npkgs : nullptr);
+ collect_build_prerequisites (o,
+ *p,
+ prog ? &npkgs : nullptr,
+ fdb,
+ rpt_depts,
+ priv_cfgs);
assert (prog); // collect_build_prerequisites() should have failed.
prog = (npkgs != pkgs);
@@ -1264,16 +1670,27 @@ namespace bpkg
}
// Order the previously-collected package with the specified name
- // returning its positions. Recursively order the package dependencies
- // being ordered failing if a dependency cycle is detected. If reorder is
- // true, then reorder this package to be considered as "early" as
- // possible.
+ // returning its positions.
+ //
+ // If buildtime is nullopt, then search for the specified package build in
+ // only the specified configuration. Otherwise, treat the package as a
+ // dependency and use the custom search function to find its build
+ // configuration. Failed that, search for it recursively (see
+ // config_package_map::find_dependency() for details).
+ //
+ // Recursively order the package dependencies being ordered failing if a
+ // dependency cycle is detected. If reorder is true, then reorder this
+ // package to be considered as "early" as possible.
//
iterator
- order (const package_name& name, bool reorder = true)
+ order (database& db,
+ const package_name& name,
+ optional<bool> buildtime,
+ const function<find_database_function>& fdb,
+ bool reorder = true)
{
- package_names chain;
- return order (name, chain, reorder);
+ config_package_names chain;
+ return order (db, name, buildtime, chain, fdb, reorder);
}
// If a configured package is being up/down-graded then that means
@@ -1292,7 +1709,7 @@ namespace bpkg
// to also notice this.
//
void
- collect_order_dependents (database& db)
+ collect_order_dependents (const repointed_dependents& rpt_depts)
{
// For each package on the list we want to insert all its dependents
// before it so that they get configured after the package on which
@@ -1313,18 +1730,21 @@ namespace bpkg
// Dropped package may have no dependents.
//
if (*p.action != build_package::drop && p.reconfigure ())
- collect_order_dependents (db, i);
+ collect_order_dependents (i, rpt_depts);
}
}
void
- collect_order_dependents (database& db, iterator pos)
+ collect_order_dependents (iterator pos,
+ const repointed_dependents& rpt_depts)
{
tracer trace ("collect_order_dependents");
assert (pos != end ());
build_package& p (*pos);
+
+ database& pdb (p.db);
const shared_ptr<selected_package>& sp (p.selected);
const package_name& n (sp->name);
@@ -1336,166 +1756,197 @@ namespace bpkg
? sp->version.compare (p.available_version ())
: 0);
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == n))
+ for (database& ddb: pdb.dependent_configs ())
{
- package_name& dn (pd.name);
- auto i (map_.find (dn));
+ for (auto& pd: query_dependents (ddb, n, pdb))
+ {
+ package_name& dn (pd.name);
+ auto i (map_.find (ddb, dn));
- // First make sure the up/downgraded package still satisfies this
- // dependent.
- //
- bool check (ud != 0 && pd.constraint);
+ // Make sure the up/downgraded package still satisfies this
+ // dependent. But first "prune" if this is a replaced prerequisite
+ // of the repointed dependent.
+ //
+ // Note that the repointed dependents are always collected and have
+ // all their collected prerequisites ordered (including new and old
+ // ones). See collect_build_prerequisites() and order() for details.
+ //
+ bool check (ud != 0 && pd.constraint);
- // There is one tricky aspect: the dependent could be in the process
- // of being up/downgraded as well. In this case all we need to do is
- // detect this situation and skip the test since all the (new)
- // contraints of this package have been satisfied in collect_build().
- //
- if (check && i != map_.end () && i->second.position != end ())
- {
- build_package& dp (i->second.package);
+ if (i != map_.end () && i->second.position != end ())
+ {
+ build_package& dp (i->second.package);
- check = dp.available == nullptr ||
- (dp.selected->system () == dp.system &&
- dp.selected->version == dp.available_version ());
- }
+ const shared_ptr<selected_package>& dsp (dp.selected);
- if (check)
- {
- const version& av (p.available_version ());
- const version_constraint& c (*pd.constraint);
+ repointed_dependents::const_iterator j (rpt_depts.find (sp));
- if (!satisfies (av, c))
- {
- diag_record dr (fail);
+ if (j != rpt_depts.end ())
+ {
+ const map<config_package, bool>& prereqs_flags (j->second);
- dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
- << "package " << *sp << " to ";
+ auto k (prereqs_flags.find (config_package {pdb, n}));
- // Print both (old and new) package names in full if the system
- // attribution changes.
+ if (k != prereqs_flags.end () && !k->second)
+ continue;
+ }
+
+ // There is one tricky aspect: the dependent could be in the
+ // process of being up/downgraded as well. In this case all we
+ // need to do is detect this situation and skip the test since all
+ // the (new) contraints of this package have been satisfied in
+ // collect_build().
//
- if (p.system != sp->system ())
- dr << p.available_name_version ();
- else
- dr << av; // Can't be the wildcard otherwise would satisfy.
+ if (check)
+ {
+ check = dp.available == nullptr ||
+ (dsp->system () == dp.system &&
+ dsp->version == dp.available_version ());
+ }
+ }
- dr << info << "because package " << dn << " depends on (" << n
- << " " << c << ")";
+ if (check)
+ {
+ const version& av (p.available_version ());
+ const version_constraint& c (*pd.constraint);
- string rb;
- if (!p.user_selection ())
+ if (!satisfies (av, c))
{
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
- }
+ diag_record dr (fail);
- if (!rb.empty ())
- dr << info << "package " << p.available_name_version ()
- << " required by" << rb;
+ dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
+ << "package " << *sp << pdb << " to ";
- dr << info << "explicitly request up/downgrade of package " << dn;
+ // Print both (old and new) package names in full if the system
+ // attribution changes.
+ //
+ if (p.system != sp->system ())
+ dr << p.available_name_version ();
+ else
+ dr << av; // Can't be the wildcard otherwise would satisfy.
- dr << info << "or explicitly specify package " << n << " version "
- << "to manually satisfy these constraints";
- }
+ dr << info << "because package " << dn << ddb << " depends on ("
+ << n << " " << c << ")";
- // Add this contraint to the list for completeness.
- //
- p.constraints.emplace_back (dn.string (), c);
- }
+ string rb;
+ if (!p.user_selection ())
+ {
+ for (const config_package& cp: p.required_by)
+ rb += ' ' + cp.string ();
+ }
- auto adjustment = [&dn, &n, &db] () -> build_package
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (dn));
- bool system (dsp->system ()); // Save flag before the move(dsp) call.
-
- return build_package {
- build_package::adjust,
- move (dsp),
- nullptr, // No available package/repository fragment.
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {n}, // Required by (dependency).
- build_package::adjust_reconfigure};
- };
+ if (!rb.empty ())
+ dr << info << "package " << p.available_name_version ()
+ << " required by" << rb;
- // We can have three cases here: the package is already on the
- // list, the package is in the map (but not on the list) and it
- // is in neither.
- //
- // If the existing entry is a drop, then we skip it. If it is
- // pre-entered, is an adjustment, or is a build that is not supposed
- // to be built (not in the list), then we merge it into the new
- // adjustment entry. Otherwise (is a build in the list), we just add
- // the reconfigure adjustment flag to it.
- //
- if (i != map_.end ())
- {
- build_package& dp (i->second.package);
- iterator& dpos (i->second.position);
+ dr << info << "explicitly request up/downgrade of package "
+ << dn;
- if (!dp.action || // Pre-entered.
- *dp.action != build_package::build || // Non-build.
- dpos == end ()) // Build not in the list.
- {
- // Skip the droped package.
- //
- if (dp.action && *dp.action == build_package::drop)
- continue;
+ dr << info << "or explicitly specify package " << n
+ << " version to manually satisfy these constraints";
+ }
- build_package bp (adjustment ());
- bp.merge (move (dp));
- dp = move (bp);
+ // Add this contraint to the list for completeness.
+ //
+ p.constraints.emplace_back (ddb, dn.string (), c);
}
- else // Build in the list.
- dp.adjustments |= build_package::adjust_reconfigure;
-
- // It may happen that the dependent is already in the list but is
- // not properly ordered against its dependencies that get into the
- // list via another dependency path. Thus, we check if the dependent
- // is to the right of its dependency and, if that's the case,
- // reinsert it in front of the dependency.
+
+ auto adjustment = [&dn, &ddb, &n, &pdb] () -> build_package
+ {
+ shared_ptr<selected_package> dsp (ddb.load<selected_package> (dn));
+
+ bool system (dsp->system ()); // Save before the move(dsp) call.
+
+ return build_package {
+ build_package::adjust,
+ ddb,
+ move (dsp),
+ nullptr, // No available pkg/repo fragment.
+ nullptr,
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ system,
+ false, // Keep output directory.
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {pdb, n}}, // Required by (dependency).
+ false, // Required by dependents.
+ build_package::adjust_reconfigure};
+ };
+
+ // We can have three cases here: the package is already on the
+ // list, the package is in the map (but not on the list) and it
+ // is in neither.
+ //
+ // If the existing entry is a drop, then we skip it. If it is
+ // pre-entered, is an adjustment, or is a build that is not supposed
+ // to be built (not in the list), then we merge it into the new
+ // adjustment entry. Otherwise (is a build in the list), we just add
+ // the reconfigure adjustment flag to it.
//
- if (dpos != end ())
+ if (i != map_.end ())
{
- for (auto i (pos); i != end (); ++i)
+ build_package& dp (i->second.package);
+ iterator& dpos (i->second.position);
+
+ if (!dp.action || // Pre-entered.
+ *dp.action != build_package::build || // Non-build.
+ dpos == end ()) // Build not in the list.
+ {
+ // Skip the droped package.
+ //
+ if (dp.action && *dp.action == build_package::drop)
+ continue;
+
+ build_package bp (adjustment ());
+ bp.merge (move (dp));
+ dp = move (bp);
+ }
+ else // Build in the list.
+ dp.flags |= build_package::adjust_reconfigure;
+
+ // It may happen that the dependent is already in the list but is
+ // not properly ordered against its dependencies that get into the
+ // list via another dependency path. Thus, we check if the
+ // dependent is to the right of its dependency and, if that's the
+ // case, reinsert it in front of the dependency.
+ //
+ if (dpos != end ())
{
- if (i == dpos)
+ for (auto i (pos); i != end (); ++i)
{
- erase (dpos);
- dpos = insert (pos, dp);
- break;
+ if (i == dpos)
+ {
+ erase (dpos);
+ dpos = insert (pos, dp);
+ break;
+ }
}
}
+ else
+ dpos = insert (pos, dp);
}
else
- dpos = insert (pos, dp);
- }
- else
- {
- i = map_.emplace (
- move (dn), data_type {end (), adjustment ()}).first;
+ {
+ // Don't move dn since it is used by adjustment().
+ //
+ i = map_.emplace (config_package {ddb, dn},
+ data_type {end (), adjustment ()}).first;
- i->second.position = insert (pos, i->second.package);
- }
+ i->second.position = insert (pos, i->second.package);
+ }
- // Recursively collect our own dependents inserting them before us.
- //
- // Note that we cannot end up with an infinite recursion for
- // configured packages due to a dependency cycle (see order() for
- // details).
- //
- collect_order_dependents (db, i->second.position);
+ // Recursively collect our own dependents inserting them before us.
+ //
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
+ //
+ collect_order_dependents (i->second.position, rpt_depts);
+ }
}
}
@@ -1516,34 +1967,64 @@ namespace bpkg
}
private:
- using package_names = small_vector<reference_wrapper<const package_name>,
- 16>;
+ struct config_package_name
+ {
+ database& db;
+ const package_name& name;
+
+ bool
+ operator== (const config_package_name& v)
+ {
+ return name == v.name && db == v.db;
+ }
+ };
+ using config_package_names = small_vector<config_package_name, 16>;
iterator
- order (const package_name& name, package_names& chain, bool reorder)
+ order (database& db,
+ const package_name& name,
+ optional<bool> buildtime,
+ config_package_names& chain,
+ const function<find_database_function>& fdb,
+ bool reorder)
{
+ config_package_map::iterator mi;
+
+ if (buildtime)
+ {
+ database* ddb (fdb (db, name, *buildtime));
+
+ mi = ddb != nullptr
+ ? map_.find (*ddb, name)
+ : map_.find_dependency (db, name, *buildtime);
+ }
+ else
+ mi = map_.find (db, name);
+
// Every package that we order should have already been collected.
//
- auto mi (map_.find (name));
assert (mi != map_.end ());
build_package& p (mi->second.package);
assert (p.action); // Can't order just a pre-entered package.
+ database& pdb (p.db);
+
// Make sure there is no dependency cycle.
//
+ config_package_name cp {pdb, name};
{
- auto i (find (chain.begin (), chain.end (), name));
+ auto i (find (chain.begin (), chain.end (), cp));
if (i != chain.end ())
{
diag_record dr (fail);
- dr << "dependency cycle detected involving package " << name;
+ dr << "dependency cycle detected involving package " << name << pdb;
- auto nv = [this] (const package_name& name)
+ auto nv = [this] (const config_package_name& cp)
{
- auto mi (map_.find (name));
+ auto mi (map_.find (cp.db, cp.name));
assert (mi != map_.end ());
build_package& p (mi->second.package);
@@ -1556,14 +2037,14 @@ namespace bpkg
//
assert (p.available != nullptr);
- return p.available_name_version ();
+ return p.available_name_version_db ();
};
// Note: push_back() can invalidate the iterator.
//
size_t j (i - chain.begin ());
- for (chain.push_back (name); j != chain.size () - 1; ++j)
+ for (chain.push_back (cp); j != chain.size () - 1; ++j)
dr << info << nv (chain[j]) << " depends on " << nv (chain[j + 1]);
}
}
@@ -1585,7 +2066,7 @@ namespace bpkg
// position of its "earliest" prerequisite -- this is where it
// will be inserted.
//
- const shared_ptr<selected_package>& sp (p.selected);
+ const shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
bool build (*p.action == build_package::build);
@@ -1620,6 +2101,10 @@ namespace bpkg
// be disfigured during the plan execution, then we must order its
// (current) dependencies that also need to be disfigured.
//
+ // And yet, if the package we are ordering is a repointed dependent,
+ // then we must order not only its unamended and new prerequisites but
+ // also its replaced prerequisites, which can also be disfigured.
+ //
bool src_conf (sp != nullptr &&
sp->state == package_state::configured &&
sp->substate != package_substate::system);
@@ -1632,7 +2117,7 @@ namespace bpkg
bool order_disfigured (src_conf && disfigure (p));
- chain.push_back (name);
+ chain.push_back (cp);
// Order the build dependencies.
//
@@ -1647,13 +2132,23 @@ namespace bpkg
{
for (const auto& p: sp->prerequisites)
{
+ database& db (p.first.database ());
const package_name& name (p.first.object_id ());
// The prerequisites may not necessarily be in the map.
//
- auto i (map_.find (name));
+ // Note that for the repointed dependent we also order its new and
+ // replaced prerequisites here, since they all are in the selected
+ // package prerequisites set.
+ //
+ auto i (map_.find (db, name));
if (i != map_.end () && i->second.package.action)
- update (order (name, chain, false /* reorder */));
+ update (order (db,
+ name,
+ nullopt /* buildtime */,
+ chain,
+ fdb,
+ false /* reorder */));
}
// We just ordered them among other prerequisites.
@@ -1679,7 +2174,16 @@ namespace bpkg
if (da.buildtime && (dn == "build2" || dn == "bpkg"))
continue;
- update (order (d.name, chain, false /* reorder */));
+ // Note that for the repointed dependent we only order its new and
+ // unamended prerequisites here. Its replaced prerequisites will
+ // be ordered below.
+ //
+ update (order (pdb,
+ d.name,
+ da.buildtime,
+ chain,
+ fdb,
+ false /* reorder */));
}
}
}
@@ -1690,14 +2194,25 @@ namespace bpkg
{
for (const auto& p: sp->prerequisites)
{
+ database& db (p.first.database ());
const package_name& name (p.first.object_id ());
// The prerequisites may not necessarily be in the map.
//
- auto i (map_.find (name));
+ auto i (map_.find (db, name));
+ // Note that for the repointed dependent we also order its replaced
+ // and potentially new prerequisites here (see above). The latter is
+ // redundant (we may have already ordered them above) but harmless,
+ // since we do not reorder.
+ //
if (i != map_.end () && disfigure (i->second.package))
- update (order (name, chain, false /* reorder */));
+ update (order (db,
+ name,
+ nullopt /* buildtime */,
+ chain,
+ fdb,
+ false /* reorder */));
}
}
@@ -1713,7 +2228,49 @@ namespace bpkg
build_package package;
};
- map<package_name, data_type> map_;
+ class config_package_map: public map<config_package, data_type>
+ {
+ public:
+ using base_type = map<config_package, data_type>;
+
+ iterator
+ find (database& db, const package_name& pn)
+ {
+ return base_type::find (config_package {db, pn});
+ }
+
+ // Try to find a package build in the dependency configurations (see
+ // database::dependency_configs() for details). Return the end iterator
+ // if no build is found and issue diagnostics and fail if multiple
+ // builds (in multiple configurations) are found.
+ //
+ iterator
+ find_dependency (database& db, const package_name& pn, bool buildtime)
+ {
+ iterator r (end ());
+
+ linked_databases ldbs (db.dependency_configs (pn, buildtime));
+
+ for (database& ldb: ldbs)
+ {
+ iterator i (find (ldb, pn));
+ if (i != end ())
+ {
+ if (r == end ())
+ r = i;
+ else
+ fail << "building package " << pn << " in multiple "
+ << "configurations" <<
+ info << r->first.db.config_orig <<
+ info << ldb.config_orig <<
+ info << "use --config-* to select package configuration";
+ }
+ }
+
+ return r;
+ }
+ };
+ config_package_map map_;
};
// Return a patch version constraint for the selected package if it has a
@@ -1767,15 +2324,16 @@ namespace bpkg
//
struct dependency_package
{
- package_name name;
- optional<version_constraint> constraint; // nullopt if unspecified.
- shared_ptr<selected_package> selected; // NULL if not present.
- bool system;
- bool patch; // Only for an empty version.
- bool keep_out;
- optional<dir_path> checkout_root;
- bool checkout_purge;
- strings config_vars; // Only if not system.
+ database& db;
+ package_name name;
+ optional<version_constraint> constraint; // nullopt if unspecified.
+ shared_ptr<selected_package> selected; // NULL if not present.
+ bool system;
+ bool patch; // Only for an empty version.
+ bool keep_out;
+ optional<dir_path> checkout_root;
+ bool checkout_purge;
+ strings config_vars; // Only if not system.
};
using dependency_packages = vector<dependency_package>;
@@ -1796,30 +2354,50 @@ namespace bpkg
//
struct evaluate_result
{
- shared_ptr<available_package> available;
+ reference_wrapper<database> db;
+ shared_ptr<available_package> available;
shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool unused;
- bool system; // Is meaningless if unused.
+ bool unused;
+ bool system; // Is meaningless if unused.
};
- using package_dependents = vector<pair<shared_ptr<selected_package>,
- optional<version_constraint>>>;
+ struct config_package_dependent
+ {
+ database& db;
+ shared_ptr<selected_package> package;
+ optional<version_constraint> constraint;
+
+ config_package_dependent (database& d,
+ shared_ptr<selected_package> p,
+ optional<version_constraint> c)
+ : db (d), package (move (p)), constraint (move (c)) {}
+ };
+
+ using config_package_dependents = vector<config_package_dependent>;
static optional<evaluate_result>
evaluate_dependency (database&,
const shared_ptr<selected_package>&,
const optional<version_constraint>& desired,
bool desired_sys,
+ database& desired_db,
bool patch,
bool explicitly,
const set<shared_ptr<repository_fragment>>&,
- const package_dependents&,
+ const config_package_dependents&,
bool ignore_unsatisfiable);
+ // If there are no user expectations regarding this dependency, then we give
+ // no up/down-grade recommendation, unless there are no dependents in which
+ // case we recommend to drop the dependency.
+ //
+ // Note that the user expectations are only applied for dependencies that
+ // have dependents in the current configuration.
+ //
static optional<evaluate_result>
evaluate_dependency (database& db,
- const dependency_packages& deps,
const shared_ptr<selected_package>& sp,
+ const dependency_packages& deps,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_dependency");
@@ -1828,31 +2406,64 @@ namespace bpkg
const package_name& nm (sp->name);
- // Query the dependents and bail out if the dependency is unused.
+ database& mdb (db.main_database ());
+
+ // Only search for the user expectations regarding this dependency if it
+ // has dependents in the current configuration.
+ //
+ auto mdb_deps (query_dependents (mdb, nm, db)); // Stash not re-query.
+ bool mdb_dep (!mdb_deps.empty ());
+
+ auto i (mdb_dep
+ ? find_if (deps.begin (), deps.end (),
+ [&nm] (const dependency_package& i)
+ {
+ return i.name == nm;
+ })
+ : deps.end ());
+
+ bool user_exp (i != deps.end () && i->db.type == db.type);
+ bool copy_dep (user_exp && i->db != db);
+
+ // If the dependency needs to be copied, then only consider it dependents
+ // in the current configuration for the version constraints, etc.
//
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == nm));
+ linked_databases dbs (copy_dep
+ ? linked_databases ({mdb})
+ : db.dependent_configs ());
+
+ vector<pair<database&, package_dependent>> pds;
+
+ for (database& ddb: dbs)
+ {
+ auto ds (ddb.main () ? move (mdb_deps) : query_dependents (ddb, nm, db));
+
+ // Bail out if the dependency is used but there are no user expectations
+ // regrading it.
+ //
+ if (!ds.empty ())
+ {
+ if (!user_exp)
+ return nullopt;
+ for (auto& d: ds)
+ pds.emplace_back (ddb, move (d));
+ }
+ }
+
+ // Bail out if the dependency is unused.
+ //
if (pds.empty ())
{
- l5 ([&]{trace << *sp << ": unused";});
+ l5 ([&]{trace << *sp << db << ": unused";});
- return evaluate_result {nullptr /* available */,
+ return evaluate_result {db,
+ nullptr /* available */,
nullptr /* repository_fragment */,
true /* unused */,
false /* system */};
}
- // If there are no user expectations regarding this dependency, then we
- // give no up/down-grade recommendation.
- //
- auto i (find_if (
- deps.begin (), deps.end (),
- [&nm] (const dependency_package& i) {return i.name == nm;}));
-
- if (i == deps.end ())
- return nullopt;
-
// If the selected package matches the user expectations then no package
// change is required.
//
@@ -1863,14 +2474,17 @@ namespace bpkg
//
const optional<version_constraint>& dvc (i->constraint); // May be nullopt.
bool dsys (i->system);
+ database& ddb (i->db);
- if (ssys == dsys &&
- dvc &&
- (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
+ if (ssys == dsys &&
+ dvc &&
+ (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)) &&
+ db == ddb)
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ l5 ([&]{trace << *sp << db << ": unchanged";});
- return evaluate_result {nullptr /* available */,
+ return evaluate_result {db,
+ nullptr /* available */,
nullptr /* repository_fragment */,
false /* unused */,
false /* system */};
@@ -1881,14 +2495,18 @@ namespace bpkg
// dependency.
//
set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ config_package_dependents dependents;
for (auto& pd: pds)
{
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
+ database& ddb (pd.first);
+ package_dependent& dep (pd.second);
+
+ shared_ptr<selected_package> dsp (
+ ddb.load<selected_package> (dep.name));
shared_ptr<available_package> dap (
- db.find<available_package> (
+ mdb.find<available_package> (
available_package_id (dsp->name, dsp->version)));
if (dap != nullptr)
@@ -1899,13 +2517,14 @@ namespace bpkg
repo_frags.insert (pl.repository_fragment.load ());
}
- dependents.emplace_back (move (dsp), move (pd.constraint));
+ dependents.emplace_back (ddb, move (dsp), move (dep.constraint));
}
return evaluate_dependency (db,
sp,
dvc,
dsys,
+ ddb,
i->patch,
true /* explicitly */,
repo_frags,
@@ -1913,15 +2532,39 @@ namespace bpkg
ignore_unsatisfiable);
}
+ struct config_selected_package
+ {
+ database& db;
+ const shared_ptr<selected_package>& package;
+
+ config_selected_package (database& d,
+ const shared_ptr<selected_package>& p)
+ : db (d), package (p) {}
+
+ bool
+ operator== (const config_selected_package& v) const
+ {
+ return package->name == v.package->name && db == v.db;
+ }
+
+ bool
+ operator< (const config_selected_package& v) const
+ {
+ int r (package->name.compare (v.package->name));
+ return r != 0 ? (r < 0) : (db < v.db);
+ }
+ };
+
static optional<evaluate_result>
evaluate_dependency (database& db,
const shared_ptr<selected_package>& sp,
const optional<version_constraint>& dvc,
bool dsys,
+ database& ddb,
bool patch,
bool explicitly,
const set<shared_ptr<repository_fragment>>& rfs,
- const package_dependents& dependents,
+ const config_package_dependents& dependents,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_dependency");
@@ -1929,9 +2572,10 @@ namespace bpkg
const package_name& nm (sp->name);
const version& sv (sp->version);
- auto no_change = [] ()
+ auto no_change = [&db] ()
{
- return evaluate_result {nullptr /* available */,
+ return evaluate_result {db,
+ nullptr /* available */,
nullptr /* repository_fragment */,
false /* unused */,
false /* system */};
@@ -1955,7 +2599,7 @@ namespace bpkg
if (!c)
{
- l5 ([&]{trace << *sp << ": non-patchable";});
+ l5 ([&]{trace << *sp << db << ": non-patchable";});
return no_change ();
}
}
@@ -1965,7 +2609,7 @@ namespace bpkg
vector<pair<shared_ptr<available_package>,
shared_ptr<repository_fragment>>> afs (
- find_available (db,
+ find_available (db.main_database (),
nm,
c,
vector<shared_ptr<repository_fragment>> (rfs.begin (),
@@ -1975,30 +2619,21 @@ namespace bpkg
// satisfies all the dependents. Collect (and sort) unsatisfied dependents
// per the unsatisfiable version in case we need to print them.
//
- struct compare_sp
- {
- bool
- operator() (const shared_ptr<selected_package>& x,
- const shared_ptr<selected_package>& y) const
- {
- return x->name < y->name;
- }
- };
-
- using sp_set = set<reference_wrapper<const shared_ptr<selected_package>>,
- compare_sp>;
+ using sp_set = set<config_selected_package>;
vector<pair<version, sp_set>> unsatisfiable;
bool stub (false);
bool ssys (sp->system ());
- assert (!dsys || system_repository.find (nm) != nullptr);
+ assert (!dsys ||
+ (db.system_repository &&
+ db.system_repository->find (nm) != nullptr));
for (auto& af: afs)
{
shared_ptr<available_package>& ap (af.first);
- const version& av (!dsys ? ap->version : *ap->system_version ());
+ const version& av (!dsys ? ap->version : *ap->system_version (db));
// If we aim to upgrade to the latest version and it tends to be less
// then the selected one, then what we currently have is the best that
@@ -2006,7 +2641,7 @@ namespace bpkg
//
// Note that we also handle a package stub here.
//
- if (!dvc && av < sv)
+ if (!dvc && av < sv && db == ddb)
{
assert (!dsys); // Version can't be empty for the system package.
@@ -2015,7 +2650,7 @@ namespace bpkg
//
if (!ssys)
{
- l5 ([&]{trace << *sp << ": best";});
+ l5 ([&]{trace << *sp << db << ": best";});
return no_change ();
}
@@ -2037,7 +2672,7 @@ namespace bpkg
for (const auto& dp: dependents)
{
- if (!satisfies (av, dp.second))
+ if (!satisfies (av, dp.constraint))
{
satisfactory = false;
@@ -2047,7 +2682,7 @@ namespace bpkg
if (ignore_unsatisfiable)
break;
- unsatisfied_dependents.insert (dp.first);
+ unsatisfied_dependents.emplace (dp.db, dp.package);
}
}
@@ -2070,39 +2705,39 @@ namespace bpkg
// match the ones of the selected package, then no package change is
// required. Otherwise, recommend an up/down-grade.
//
- if (av == sv && ssys == dsys)
+ if (av == sv && ssys == dsys && db == ddb)
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ l5 ([&]{trace << *sp << db << ": unchanged";});
return no_change ();
}
- l5 ([&]{trace << *sp << ": update to "
- << package_string (nm, av, dsys);});
+ l5 ([&]{trace << *sp << db << ": update to "
+ << package_string (nm, av, dsys) << ddb;});
return evaluate_result {
- move (ap), move (af.second), false /* unused */, dsys};
+ ddb, move (ap), move (af.second), false /* unused */, dsys};
}
// If we aim to upgrade to the latest version, then what we currently have
// is the only thing that we can get, and so returning the "no change"
// result, unless we need to upgrade a package configured as system.
//
- if (!dvc && !ssys)
+ if (!dvc && !ssys && db == ddb)
{
assert (!dsys); // Version cannot be empty for the system package.
- l5 ([&]{trace << *sp << ": only";});
+ l5 ([&]{trace << *sp << db << ": only";});
return no_change ();
}
// If the version satisfying the desired dependency version constraint is
// unavailable or unsatisfiable for some dependents then we fail, unless
- // requested not to do so. In the later case we return the "no change"
+ // requested not to do so. In the latter case we return the "no change"
// result.
//
if (ignore_unsatisfiable)
{
- l5 ([&]{trace << package_string (nm, dvc, dsys)
+ l5 ([&]{trace << package_string (nm, dvc, dsys) << ddb
<< (unsatisfiable.empty ()
? ": no source"
: ": unsatisfiable");});
@@ -2126,11 +2761,11 @@ namespace bpkg
//
assert (explicitly);
- fail << "patch version for " << *sp << " is not available "
+ fail << "patch version for " << *sp << db << " is not available "
<< "from its dependents' repositories";
}
else if (!stub)
- fail << package_string (nm, dsys ? nullopt : dvc)
+ fail << package_string (nm, dsys ? nullopt : dvc) << ddb
<< " is not available from its dependents' repositories";
else // The only available package is a stub.
{
@@ -2139,15 +2774,15 @@ namespace bpkg
//
assert (!dvc && !dsys && ssys);
- fail << package_string (nm, dvc) << " is not available in source "
- << "from its dependents' repositories";
+ fail << package_string (nm, dvc) << ddb << " is not available in "
+ << "source from its dependents' repositories";
}
}
// Issue the diagnostics and fail.
//
diag_record dr (fail);
- dr << "package " << nm << " doesn't satisfy its dependents";
+ dr << "package " << nm << ddb << " doesn't satisfy its dependents";
// Print the list of unsatisfiable versions together with dependents they
// don't satisfy: up to three latest versions with no more than five
@@ -2160,9 +2795,9 @@ namespace bpkg
size_t n (0);
const sp_set& ps (u.second);
- for (const shared_ptr<selected_package>& p: ps)
+ for (const config_selected_package& p: ps)
{
- dr << ' ' << *p;
+ dr << ' ' << *p.package << p.db;
if (++n == 5 && ps.size () != 6) // Printing 'and 1 more' looks stupid.
break;
@@ -2186,6 +2821,7 @@ namespace bpkg
//
struct recursive_package
{
+ database& db;
package_name name;
bool upgrade; // true -- upgrade, false -- patch.
bool recursive; // true -- recursive, false -- immediate.
@@ -2199,18 +2835,18 @@ namespace bpkg
static optional<bool>
upgrade_dependencies (database& db,
const package_name& nm,
- const recursive_packages& recs,
+ const recursive_packages& rs,
bool recursion = false)
{
- auto i (find_if (recs.begin (), recs.end (),
- [&nm] (const recursive_package& i) -> bool
+ auto i (find_if (rs.begin (), rs.end (),
+ [&nm, &db] (const recursive_package& i) -> bool
{
- return i.name == nm;
+ return i.name == nm && i.db == db;
}));
optional<bool> r;
- if (i != recs.end () && i->recursive >= recursion)
+ if (i != rs.end () && i->recursive >= recursion)
{
r = i->upgrade;
@@ -2218,20 +2854,23 @@ namespace bpkg
return r;
}
- for (const auto& pd: db.query<package_dependent> (
- query<package_dependent>::name == nm))
+ for (database& ddb: db.dependent_configs ())
{
- // Note that we cannot end up with an infinite recursion for configured
- // packages due to a dependency cycle (see order() for details).
- //
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs, true))
+ for (auto& pd: query_dependents (ddb, nm, db))
{
- if (!r || *r < *u) // Upgrade wins patch.
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
+ //
+ if (optional<bool> u = upgrade_dependencies (ddb, pd.name, rs, true))
{
- r = u;
+ if (!r || *r < *u) // Upgrade wins patch.
+ {
+ r = u;
- if (*r) // Upgrade (vs patch)?
- return r;
+ if (*r) // Upgrade (vs patch)?
+ return r;
+ }
}
}
}
@@ -2252,8 +2891,8 @@ namespace bpkg
//
static optional<evaluate_result>
evaluate_recursive (database& db,
- const recursive_packages& recs,
const shared_ptr<selected_package>& sp,
+ const recursive_packages& recs,
bool ignore_unsatisfiable)
{
tracer trace ("evaluate_recursive");
@@ -2265,10 +2904,7 @@ namespace bpkg
// dependency.
//
set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
-
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == sp->name));
+ config_package_dependents dependents;
// Only collect repository fragments (for best version selection) of
// (immediate) dependents that have a hit (direct or indirect) in recs.
@@ -2276,39 +2912,46 @@ namespace bpkg
//
optional<bool> upgrade;
- for (const auto& pd: pds)
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
- dependents.emplace_back (dsp, move (pd.constraint));
+ database& mdb (db.main_database ());
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs))
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (auto& pd: query_dependents (ddb, sp->name, db))
{
- if (!upgrade || *upgrade < *u) // Upgrade wins patch.
- upgrade = u;
- }
- else
- continue;
+ shared_ptr<selected_package> dsp (
+ ddb.load<selected_package> (pd.name));
- // While we already know that the dependency upgrade is required, we
- // continue to iterate over dependents, collecting the repository
- // fragments and the constraints.
- //
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ dependents.emplace_back (ddb, dsp, move (pd.constraint));
- if (dap != nullptr)
- {
- assert (!dap->locations.empty ());
+ if (optional<bool> u = upgrade_dependencies (ddb, pd.name, recs))
+ {
+ if (!upgrade || *upgrade < *u) // Upgrade wins patch.
+ upgrade = u;
+ }
+ else
+ continue;
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
+ // While we already know that the dependency upgrade is required, we
+ // continue to iterate over dependents, collecting the repository
+ // fragments and the constraints.
+ //
+ shared_ptr<available_package> dap (
+ mdb.find<available_package> (
+ available_package_id (dsp->name, dsp->version)));
+
+ if (dap != nullptr)
+ {
+ assert (!dap->locations.empty ());
+
+ for (const auto& pl: dap->locations)
+ repo_frags.insert (pl.repository_fragment.load ());
+ }
}
}
if (!upgrade)
{
- l5 ([&]{trace << *sp << ": no hit";});
+ l5 ([&]{trace << *sp << db << ": no hit";});
return nullopt;
}
@@ -2319,6 +2962,7 @@ namespace bpkg
sp,
nullopt /* desired */,
false /*desired_sys */,
+ db,
!*upgrade /* patch */,
false /* explicitly */,
repo_frags,
@@ -2331,12 +2975,13 @@ namespace bpkg
return r && r->available == nullptr ? nullopt : r;
}
- static void
+ // Return false if the plan execution was noop.
+ //
+ static bool
execute_plan (const pkg_build_options&,
- const dir_path&,
- database&,
build_package_list&,
- bool simulate);
+ bool simulate,
+ const function<find_database_function>&);
using pkg_options = pkg_build_pkg_options;
@@ -2365,7 +3010,13 @@ namespace bpkg
(o.upgrade_recursive () ? 1 : 0) +
(o.patch_immediate () ? 1 : 0) +
(o.patch_recursive () ? 1 : 0)) > 1)
- fail << "multiple --(upgrade|patch)-(immediate|recursive) specified";
+ dr << fail << "multiple --(upgrade|patch)-(immediate|recursive) "
+ << "specified";
+
+ if (((o.config_id_specified () ? 1 : 0) +
+ (o.config_name_specified () ? 1 : 0) +
+ (o.config_uuid_specified () ? 1 : 0)) > 1)
+ dr << fail << "multiple --config-* specified";
if (!dr.empty () && !pkg.empty ())
dr << info << "while validating options for " << pkg;
@@ -2408,6 +3059,29 @@ namespace bpkg
}
dst.checkout_purge (src.checkout_purge () || dst.checkout_purge ());
+
+ if (!dst.config_id_specified () &&
+ !dst.config_name_specified () &&
+ !dst.config_uuid_specified ())
+ {
+ if (src.config_id_specified ())
+ {
+ dst.config_id (src.config_id ());
+ dst.config_id_specified (true);
+ }
+
+ if (src.config_name_specified ())
+ {
+ dst.config_name (src.config_name ());
+ dst.config_name_specified (true);
+ }
+
+ if (src.config_uuid_specified ())
+ {
+ dst.config_uuid (src.config_uuid ());
+ dst.config_uuid_specified (true);
+ }
+ }
}
static bool
@@ -2424,7 +3098,10 @@ namespace bpkg
x.patch_immediate () == y.patch_immediate () &&
x.patch_recursive () == y.patch_recursive () &&
x.checkout_root () == y.checkout_root () &&
- x.checkout_purge () == y.checkout_purge ();
+ x.checkout_purge () == y.checkout_purge () &&
+ x.config_id () == y.config_id () &&
+ x.config_name () == y.config_name () &&
+ x.config_uuid () == y.config_uuid ();
}
int
@@ -2446,7 +3123,9 @@ namespace bpkg
fail << "package name argument expected" <<
info << "run 'bpkg help pkg-build' for more information";
- database db (open (c, trace)); // Also populates the system repository.
+ // Also populates the system repository.
+ //
+ database mdb (c, trace, true /* pre_attach */, true /* sys_rep */);
// Note that the session spans all our transactions. The idea here is that
// selected_package objects in build_packages below will be cached in this
@@ -2468,6 +3147,7 @@ namespace bpkg
//
struct pkg_spec
{
+ database* db; // A pointer since we build these objects incrementally.
string packages;
repository_location location;
pkg_options options;
@@ -2521,7 +3201,7 @@ namespace bpkg
vector<repository_location> locations;
- transaction t (db);
+ transaction t (mdb);
while (args.more ())
{
@@ -2571,6 +3251,15 @@ namespace bpkg
fail << e << " grouped for argument '" << a << "'";
}
+ // Note: main database if no --config-* option is specified.
+ //
+ if (ps.options.config_name_specified ())
+ ps.db = &mdb.find_attached (ps.options.config_name ());
+ else if (ps.options.config_uuid_specified ())
+ ps.db = &mdb.find_dependency_config (ps.options.config_uuid ());
+ else
+ ps.db = &mdb.find_attached (ps.options.config_id ());
+
if (!a.empty () && a[0] == '?')
{
ps.options.dependency (true);
@@ -2655,7 +3344,7 @@ namespace bpkg
( query::local && u + " COLLATE nocase = " + query::_val (l)));
#endif
- auto rs (db.query<repository> (q));
+ auto rs (mdb.query<repository> (q));
auto i (rs.begin ());
if (i != rs.end ())
@@ -2698,10 +3387,14 @@ namespace bpkg
t.commit ();
+ // Fetch the repositories in the current configuration.
+ //
+ // Note that during this build only the repositories information from
+ // the main database will be used.
+ //
if (!locations.empty ())
rep_fetch (o,
- c,
- db,
+ mdb,
locations,
o.fetch_shallow (),
string () /* reason for "fetching ..." */);
@@ -2718,6 +3411,7 @@ namespace bpkg
//
struct pkg_arg
{
+ reference_wrapper<database> db;
package_scheme scheme;
package_name name;
optional<version_constraint> constraint;
@@ -2728,7 +3422,8 @@ namespace bpkg
// Create the parsed package argument.
//
- auto arg_package = [] (package_scheme sc,
+ auto arg_package = [] (database& db,
+ package_scheme sc,
package_name nm,
optional<version_constraint> vc,
pkg_options os,
@@ -2736,7 +3431,8 @@ namespace bpkg
{
assert (!vc || !vc->empty ()); // May not be empty if present.
- pkg_arg r {sc, move (nm), move (vc), string (), move (os), move (vs)};
+ pkg_arg r {
+ db, sc, move (nm), move (vc), string (), move (os), move (vs)};
switch (sc)
{
@@ -2750,14 +3446,20 @@ namespace bpkg
//
assert (r.constraint->min_version == r.constraint->max_version);
- const system_package* sp (system_repository.find (r.name));
+ assert (db.system_repository);
+
+ const system_package* sp (db.system_repository->find (r.name));
// Will deal with all the duplicates later.
//
if (sp == nullptr || !sp->authoritative)
- system_repository.insert (r.name,
- *r.constraint->min_version,
- true /* authoritative */);
+ {
+ assert (db.system_repository);
+
+ db.system_repository->insert (r.name,
+ *r.constraint->min_version,
+ true /* authoritative */);
+ }
break;
}
@@ -2769,9 +3471,13 @@ namespace bpkg
// Create the unparsed package argument.
//
- auto arg_raw = [] (string v, pkg_options os, strings vs) -> pkg_arg
+ auto arg_raw = [] (database& db,
+ string v,
+ pkg_options os,
+ strings vs) -> pkg_arg
{
- return pkg_arg {package_scheme::none,
+ return pkg_arg {db,
+ package_scheme::none,
package_name (),
nullopt /* constraint */,
move (v),
@@ -2838,6 +3544,11 @@ namespace bpkg
append (v, s);
};
+ auto add_num = [&add_string] (const char* o, auto v)
+ {
+ add_string (o, to_string (v));
+ };
+
const pkg_options& o (a.options);
add_bool ("--keep-out", o.keep_out ());
@@ -2855,6 +3566,15 @@ namespace bpkg
add_bool ("--checkout-purge", o.checkout_purge ());
+ if (o.config_id_specified ())
+ add_num ("--config-id", o.config_id ());
+
+ if (o.config_name_specified ())
+ add_string ("--config-name", o.config_name ());
+
+ if (o.config_uuid_specified ())
+ add_string ("--config-uuid", o.config_uuid ().string ());
+
// Compose the option/variable group.
//
if (!s.empty () || !a.config_vars.empty ())
@@ -2887,7 +3607,7 @@ namespace bpkg
//
vector<shared_ptr<available_package>> stubs;
- transaction t (db);
+ transaction t (mdb);
// Don't fold the zero revision if building the package from source so
// that we build the exact X+0 package revision if it is specified.
@@ -2944,14 +3664,16 @@ namespace bpkg
if (sys && vc)
stubs.push_back (make_shared<available_package> (n));
- pkg_args.push_back (arg_package (sc,
+ pkg_args.push_back (arg_package (*ps.db,
+ sc,
move (n),
move (vc),
move (ps.options),
move (ps.config_vars)));
}
else // Add unparsed.
- pkg_args.push_back (arg_raw (move (ps.packages),
+ pkg_args.push_back (arg_raw (*ps.db,
+ move (ps.packages),
move (ps.options),
move (ps.config_vars)));
@@ -2963,7 +3685,7 @@ namespace bpkg
// presence of --no-fetch option.
//
shared_ptr<repository> r (
- db.find<repository> (ps.location.canonical_name ()));
+ mdb.find<repository> (ps.location.canonical_name ()));
if (r == nullptr)
fail << "repository '" << ps.location
@@ -2986,7 +3708,7 @@ namespace bpkg
{
using query = query<repository_fragment_package>;
- for (const auto& rp: db.query<repository_fragment_package> (
+ for (const auto& rp: mdb.query<repository_fragment_package> (
(query::repository_fragment::name ==
rf.fragment.load ()->name) +
order_by_version_desc (query::package::id.version)))
@@ -3001,7 +3723,7 @@ namespace bpkg
if (ps.options.patch ())
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (nm));
+ ps.db->find<selected_package> (nm));
// It seems natural in the presence of --patch option to only
// patch the selected packages and not to build new packages if
@@ -3052,7 +3774,8 @@ namespace bpkg
info << "package " << pv.first << " is not present in "
<< "configuration";
else
- pkg_args.push_back (arg_package (package_scheme::none,
+ pkg_args.push_back (arg_package (*ps.db,
+ package_scheme::none,
pv.first,
version_constraint (pv.second),
ps.options,
@@ -3108,12 +3831,14 @@ namespace bpkg
optional<version_constraint> c;
shared_ptr<selected_package> sp;
+ database& pdb (*ps.db);
+
if (!sys)
{
if (!vc)
{
if (ps.options.patch () &&
- (sp = db.find<selected_package> (n)) != nullptr)
+ (sp = pdb.find<selected_package> (n)) != nullptr)
{
c = patch_constraint (sp);
@@ -3129,7 +3854,7 @@ namespace bpkg
}
shared_ptr<available_package> ap (
- find_available_one (db, n, c, rfs, false /* prereq */).first);
+ find_available_one (mdb, n, c, rfs, false /* prereq */).first);
// Fail if no available package is found or only a stub is
// available and we are building a source package.
@@ -3141,7 +3866,7 @@ namespace bpkg
// If the selected package is loaded then we aim to patch it.
//
if (sp != nullptr)
- dr << "patch version for " << *sp << " is not found in "
+ dr << "patch version for " << *sp << pdb << " is not found in "
<< r->name;
else if (ap == nullptr)
dr << "package " << pkg << " is not found in " << r->name;
@@ -3166,7 +3891,8 @@ namespace bpkg
// Don't move options and variables as they may be reused.
//
- pkg_args.push_back (arg_package (sc,
+ pkg_args.push_back (arg_package (*ps.db,
+ sc,
move (n),
move (vc),
ps.options,
@@ -3180,6 +3906,10 @@ namespace bpkg
imaginary_stubs = move (stubs);
}
+ // List of packages specified on the command line.
+ //
+ vector<config_package> conf_pkgs;
+
// Separate the packages specified on the command line into to hold and to
// up/down-grade as dependencies, and save dependents whose dependencies
// must be upgraded recursively.
@@ -3214,6 +3944,7 @@ namespace bpkg
if (!r.second &&
(a.scheme != pa.scheme ||
a.name != pa.name ||
+ a.db != pa.db ||
a.constraint != pa.constraint ||
!compare_options (a.options, pa.options) ||
a.config_vars != pa.config_vars))
@@ -3224,9 +3955,10 @@ namespace bpkg
return !r.second;
};
- transaction t (db);
+ transaction t (mdb);
- shared_ptr<repository_fragment> root (db.load<repository_fragment> (""));
+ shared_ptr<repository_fragment> root (
+ mdb.load<repository_fragment> (""));
// Here is what happens here: for unparsed package args we are going to
// try and guess whether we are dealing with a package archive, package
@@ -3234,13 +3966,15 @@ namespace bpkg
// then as a directory, and then assume it is name/version. Sometimes,
// however, it is really one of the first two but just broken. In this
// case things are really confusing since we suppress all diagnostics
- // for the first two "guesses". So what we are going to do here is re-run
- // them with full diagnostics if the name/version guess doesn't pan out.
+ // for the first two "guesses". So what we are going to do here is
+ // re-run them with full diagnostics if the name/version guess doesn't
+ // pan out.
//
bool diag (false);
for (auto i (pkg_args.begin ()); i != pkg_args.end (); )
{
- pkg_arg& pa (*i);
+ pkg_arg& pa (*i);
+ database& pdb (pa.db);
// Reduce all the potential variations (archive, directory, package
// name, package name/version) to a single available_package object.
@@ -3288,7 +4022,8 @@ namespace bpkg
fail << "package archive '" << a
<< "' may not be built as a dependency";
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3366,7 +4101,7 @@ namespace bpkg
//
if (optional<version> v =
package_iteration (o,
- c,
+ pdb,
t,
d,
m.name,
@@ -3374,7 +4109,8 @@ namespace bpkg
true /* check_external */))
m.version = move (*v);
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3432,7 +4168,8 @@ namespace bpkg
false /* allow_wildcard */,
false /* fold_zero_revision */));
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
move (n),
move (vc),
move (pa.options),
@@ -3454,7 +4191,7 @@ namespace bpkg
assert (!arg_sys (pa));
if (pa.options.patch () &&
- (sp = db.find<selected_package> (pa.name)) != nullptr)
+ (sp = pdb.find<selected_package> (pa.name)) != nullptr)
{
c = patch_constraint (sp);
@@ -3473,7 +4210,7 @@ namespace bpkg
else if (!arg_sys (pa))
c = pa.constraint;
- auto rp (find_available_one (db, pa.name, c, root));
+ auto rp (find_available_one (mdb, pa.name, c, root));
ap = move (rp.first);
af = move (rp.second);
}
@@ -3511,7 +4248,7 @@ namespace bpkg
l4 ([&]{trace << "stashing recursive package "
<< arg_string (pa);});
- rec_pkgs.push_back (recursive_package {pa.name, *u, *r});
+ rec_pkgs.push_back (recursive_package {pdb, pa.name, *u, *r});
}
}
@@ -3527,23 +4264,26 @@ namespace bpkg
// Make sure that the package is known.
//
auto apr (!pa.constraint || sys
- ? find_available (db, pa.name, nullopt)
- : find_available (db, pa.name, *pa.constraint));
+ ? find_available (mdb, pa.name, nullopt)
+ : find_available (mdb, pa.name, *pa.constraint));
if (apr.empty ())
{
diag_record dr (fail);
dr << "unknown package " << arg_string (pa, false /* options */);
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
// Save before the name move.
//
- sp = db.find<selected_package> (pa.name);
+ sp = pdb.find<selected_package> (pa.name);
+
+ conf_pkgs.emplace_back (pdb, pa.name);
dep_pkgs.push_back (
- dependency_package {move (pa.name),
+ dependency_package {pdb,
+ move (pa.name),
move (pa.constraint),
move (sp),
sys,
@@ -3566,10 +4306,10 @@ namespace bpkg
// the same as the selected package).
//
if (sp == nullptr)
- sp = db.find<selected_package> (pa.name);
+ sp = pdb.find<selected_package> (pa.name);
if (sp != nullptr && sp->state == package_state::broken)
- fail << "unable to build broken package " << pa.name <<
+ fail << "unable to build broken package " << pa.name << pdb <<
info << "use 'pkg-purge --force' to remove";
bool found (true);
@@ -3593,7 +4333,7 @@ namespace bpkg
if (ap == nullptr)
{
if (pa.constraint &&
- find_available_one (db,
+ find_available_one (mdb,
pa.name,
nullopt,
root).first != nullptr)
@@ -3675,7 +4415,7 @@ namespace bpkg
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
else
{
@@ -3698,9 +4438,9 @@ namespace bpkg
{
assert (sp != nullptr && sp->system () == arg_sys (pa));
- auto rp (make_available (o, c, db, sp));
- ap = rp.first;
- af = rp.second; // Could be NULL (orphan).
+ auto rp (make_available (o, pdb, sp));
+ ap = move (rp.first);
+ af = move (rp.second); // Could be NULL (orphan).
}
// We will keep the output directory only if the external package is
@@ -3714,8 +4454,12 @@ namespace bpkg
// Finally add this package to the list.
//
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
build_package p {
build_package::build,
+ pdb,
move (sp),
move (ap),
move (af),
@@ -3724,16 +4468,18 @@ namespace bpkg
{}, // Constraints.
arg_sys (pa),
keep_out,
+ false, // Configure-only.
(pa.options.checkout_root_specified ()
? move (pa.options.checkout_root ())
: optional<dir_path> ()),
pa.options.checkout_purge (),
move (pa.config_vars),
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ {config_package {mdb, ""}}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ << p.available_name_version_db ();});
// "Fix" the version the user asked for by adding the constraint.
//
@@ -3741,7 +4487,10 @@ namespace bpkg
// this build_package instance is never replaced).
//
if (pa.constraint)
- p.constraints.emplace_back ("command line", move (*pa.constraint));
+ p.constraints.emplace_back (
+ mdb, "command line", move (*pa.constraint));
+
+ conf_pkgs.emplace_back (p.db, p.name ());
hold_pkgs.push_back (move (p));
}
@@ -3749,6 +4498,10 @@ namespace bpkg
// If this is just pkg-build -u|-p, then we are upgrading all held
// packages.
//
+ // Should we also upgrade the held packages in the explicitly linked
+ // configurations, recursively? Maybe later and we probably will need a
+ // command line option to enable this behavior.
+ //
if (hold_pkgs.empty () && dep_pkgs.empty () &&
(o.upgrade () || o.patch ()))
{
@@ -3756,15 +4509,14 @@ namespace bpkg
for (shared_ptr<selected_package> sp:
pointer_result (
- db.query<selected_package> (query::state == "configured" &&
- query::hold_package)))
+ mdb.query<selected_package> (query::state == "configured" &&
+ query::hold_package)))
{
// Let's skip upgrading system packages as they are, probably,
// configured as such for a reason.
//
if (sp->system ())
continue;
-
const package_name& name (sp->name);
optional<version_constraint> pc;
@@ -3780,7 +4532,7 @@ namespace bpkg
continue;
}
- auto apr (find_available_one (db, name, pc, root));
+ auto apr (find_available_one (mdb, name, pc, root));
shared_ptr<available_package> ap (move (apr.first));
if (ap == nullptr || ap->stub ())
@@ -3796,7 +4548,7 @@ namespace bpkg
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ check_any_available (mdb, t, &dr);
}
// We will keep the output directory only if the external package is
@@ -3804,24 +4556,30 @@ namespace bpkg
//
bool keep_out (o.keep_out () && sp->external ());
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
build_package p {
- build_package::build,
+ build_package::build,
+ mdb,
move (sp),
move (ap),
move (apr.second),
- true, // Hold package.
- false, // Hold version.
- {}, // Constraints.
- false, // System package.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System package.
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {mdb, ""}}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ << p.available_name_version_db ();});
hold_pkgs.push_back (move (p));
@@ -3830,7 +4588,7 @@ namespace bpkg
//
if (o.immediate () || o.recursive ())
rec_pkgs.push_back (
- recursive_package {name, o.upgrade (), o.recursive ()});
+ recursive_package {mdb, name, o.upgrade (), o.recursive ()});
}
}
@@ -3845,6 +4603,39 @@ namespace bpkg
return 0;
}
+ // Search for the package prerequisite among packages specified on the
+ // command line and, if found, return its desired database. Return NULL
+ // otherwise. The `db` argument specifies the dependent database.
+ //
+ // Note that the semantics of a package specified on the command line is:
+ // build the package in the specified configuration (current by default)
+ // and repoint all dependents in the current configuration of this
+ // prerequisite to this new prerequisite. Thus, the function always
+ // returns NULL for dependents not in the current configuration.
+ //
+ // Also note that we rely on "small function object" optimization here.
+ //
+ const function<find_database_function> find_prereq_database (
+ [&conf_pkgs] (database& db,
+ const package_name& nm,
+ bool buildtime) -> database*
+ {
+ if (db.main ())
+ {
+ auto i (find_if (conf_pkgs.begin (), conf_pkgs.end (),
+ [&nm] (const config_package& i)
+ {
+ return i.name == nm;
+ }));
+
+ if (i != conf_pkgs.end () &&
+ i->db.type == dependency_type (db, nm, buildtime))
+ return &i->db;
+ }
+
+ return nullptr;
+ });
+
// Assemble the list of packages we will need to build-to-hold, still used
// dependencies to up/down-grade, and unused dependencies to drop. We call
// this the plan.
@@ -3893,17 +4684,70 @@ namespace bpkg
{
struct dep
{
- package_name name; // Empty if up/down-grade.
+ reference_wrapper<database> db;
+ package_name name; // Empty if up/down-grade.
// Both are NULL if drop.
//
- shared_ptr<available_package> available;
+ shared_ptr<available_package> available;
shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool system;
+ bool system;
};
vector<dep> deps;
+ // Map the repointed dependents to the replacement flags (see
+ // repointed_dependents for details).
+ //
+ // Note that the overall plan is to add the replacement prerequisites to
+ // the repointed dependents prerequisites sets at the beginning of the
+ // refinement loop iteration and remove them right before the plan
+ // execution simulation. This will allow the collecting/ordering
+ // functions to see both kinds of prerequisites (being replaced and
+ // their replacements) and only consider one kind or another or both, as
+ // appropriate.
+ //
+ repointed_dependents rpt_depts;
+ {
+ transaction t (mdb);
+
+ using query = query<selected_package>;
+
+ query q (query::state == "configured");
+
+ for (shared_ptr<selected_package> sp:
+ pointer_result (mdb.query<selected_package> (q)))
+ {
+ map<config_package, bool> ps; // Old/new prerequisites.
+
+ for (const auto& p: sp->prerequisites)
+ {
+ database& db (p.first.database ());
+ const package_name& name (p.first.object_id ());
+
+ auto i (find_if (conf_pkgs.begin (), conf_pkgs.end (),
+ [&name] (const config_package& i)
+ {
+ return i.name == name;
+ }));
+
+ // Only consider a prerequisite if its new configuration is of the
+ // same type as an old one.
+ //
+ if (i != conf_pkgs.end () && i->db != db && i->db.type == db.type)
+ {
+ ps.emplace (config_package {i->db, name}, true);
+ ps.emplace (config_package { db, name}, false);
+ }
+ }
+
+ if (!ps.empty ())
+ rpt_depts.emplace (move (sp), move (ps));
+ }
+
+ t.commit ();
+ }
+
// Iteratively refine the plan with dependency up/down-grades/drops.
//
for (bool refine (true), scratch (true); refine; )
@@ -3911,7 +4755,57 @@ namespace bpkg
l4 ([&]{trace << "refining execution plan"
<< (scratch ? " from scratch" : "");});
- transaction t (db);
+ transaction t (mdb);
+
+ // Temporarily add the replacement prerequisites to the repointed
+ // dependent prerequisites sets and persist the changes.
+ //
+ // Note that we don't copy the prerequisite constraints into the
+ // replacements, since they are unused in the collecting/ordering
+ // logic.
+ //
+ for (auto& rd: rpt_depts)
+ {
+ const shared_ptr<selected_package>& sp (rd.first);
+
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const config_package& cp (prq.first);
+
+ auto i (sp->prerequisites.emplace (
+ lazy_shared_ptr<selected_package> (cp.db, cp.name),
+ nullopt));
+
+ // The selected package should only contain the old
+ // prerequisites at this time, so adding a replacement should
+ // always succeed.
+ //
+ assert (i.second);
+ }
+ }
+
+ mdb.update (sp);
+ }
+
+ // Private configurations that were created during collection of the
+ // package builds.
+ //
+ // Note that the private configurations are linked to their parent
+ // configurations right after being created, so that the subsequent
+ // collecting, ordering, and plan execution simulation logic can use
+ // them. However, we can not easily commit these changes at some
+ // point, since there could also be some other changes made to the
+ // database which needs to be rolled back at the end of the refinement
+ // iteration.
+ //
+ // Thus, the plan is to collect configurations where the private
+ // configurations were created and, after the transaction is rolled
+ // back, re-link these configurations and persist the changes using
+ // the new transaction.
+ //
+ private_configs priv_cfgs;
build_packages::postponed_packages postponed;
@@ -3932,23 +4826,27 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
build_package bp {
- nullopt, // Action.
- nullptr, // Selected package.
- nullptr, // Available package/repository frag.
+ nullopt, // Action.
+ p.db,
+ nullptr, // Selected package.
+ nullptr, // Available package/repository frag.
nullptr,
- false, // Hold package.
- p.constraint.has_value (), // Hold version.
- {}, // Constraints.
+ false, // Hold package.
+ p.constraint.has_value (), // Hold version.
+ {}, // Constraints.
p.system,
p.keep_out,
+ false, // Configure-only.
p.checkout_root,
p.checkout_purge,
p.config_vars,
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ {config_package {mdb, ""}}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
if (p.constraint)
- bp.constraints.emplace_back ("command line", *p.constraint);
+ bp.constraints.emplace_back (
+ mdb, "command line", *p.constraint);
pkgs.enter (p.name, move (bp));
}
@@ -3958,12 +4856,22 @@ namespace bpkg
// specify packages on the command line does not matter).
//
for (const build_package& p: hold_pkgs)
- pkgs.collect_build (o, c, db, p);
+ pkgs.collect_build (o,
+ p,
+ find_prereq_database,
+ rpt_depts,
+ priv_cfgs);
// Collect all the prerequisites of the user selection.
//
for (const build_package& p: hold_pkgs)
- pkgs.collect_build_prerequisites (o, c, db, p.name (), postponed);
+ pkgs.collect_build_prerequisites (o,
+ p.db,
+ p.name (),
+ postponed,
+ find_prereq_database,
+ rpt_depts,
+ priv_cfgs);
// Note that we need to collect unheld after prerequisites, not to
// overwrite the pre-entered entries before they are used to provide
@@ -3972,9 +4880,19 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
if (p.selected != nullptr && p.selected->hold_package)
- pkgs.collect_unhold (p.selected);
+ pkgs.collect_unhold (p.db, p.selected);
}
+ // Collect dependents whose dependencies need to be repointed to
+ // packages from different configurations.
+ //
+ pkgs.collect_repointed_dependents (o,
+ mdb,
+ rpt_depts,
+ postponed,
+ find_prereq_database,
+ priv_cfgs);
+
scratch = false;
}
else
@@ -3985,12 +4903,16 @@ namespace bpkg
//
for (const dep& d: deps)
{
+ database& ddb (d.db);
+
if (d.available == nullptr)
- pkgs.collect_drop (db.load<selected_package> (d.name));
+ {
+ pkgs.collect_drop (ddb, ddb.load<selected_package> (d.name));
+ }
else
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (d.name));
+ ddb.find<selected_package> (d.name));
// We will keep the output directory only if the external package
// is replaced with an external one (see above for details).
@@ -4005,28 +4927,40 @@ namespace bpkg
//
build_package p {
build_package::build,
+ ddb,
move (sp),
d.available,
d.repository_fragment,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
d.system,
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
-
- pkgs.collect_build (o, c, db, p, &postponed /* recursively */);
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ {config_package {mdb, ""}}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
+
+ pkgs.collect_build (o,
+ move (p),
+ find_prereq_database,
+ rpt_depts,
+ priv_cfgs,
+ &postponed /* recursively */);
}
}
// Handle the (combined) postponed collection.
//
if (!postponed.empty ())
- pkgs.collect_build_postponed (o, c, db, postponed);
+ pkgs.collect_build_postponed (o,
+ postponed,
+ find_prereq_database,
+ rpt_depts,
+ priv_cfgs);
// Now that we have collected all the package versions that we need to
// build, arrange them in the "dependency order", that is, with every
@@ -4042,16 +4976,30 @@ namespace bpkg
// appear (e.g., on the plan) last.
//
for (const dep& d: deps)
- pkgs.order (d.name, false /* reorder */);
+ pkgs.order (d.db,
+ d.name,
+ nullopt /* buildtime */,
+ find_prereq_database,
+ false /* reorder */);
for (const build_package& p: reverse_iterate (hold_pkgs))
- pkgs.order (p.name ());
+ pkgs.order (p.db,
+ p.name (),
+ nullopt /* buildtime */,
+ find_prereq_database);
+
+ for (const auto& rd: rpt_depts)
+ pkgs.order (mdb,
+ rd.first->name,
+ nullopt /* buildtime */,
+ find_prereq_database,
+ false /* reorder */);
// Collect and order all the dependents that we will need to
// reconfigure because of the up/down-grades of packages that are now
// on the list.
//
- pkgs.collect_order_dependents (db);
+ pkgs.collect_order_dependents (rpt_depts);
// And, finally, make sure all the packages that we need to unhold
// are on the list.
@@ -4059,7 +5007,39 @@ namespace bpkg
for (const dependency_package& p: dep_pkgs)
{
if (p.selected != nullptr && p.selected->hold_package)
- pkgs.order (p.name, false /* reorder */);
+ pkgs.order (p.db,
+ p.name,
+ nullopt /* buildtime */,
+ find_prereq_database,
+ false /* reorder */);
+ }
+
+ // Now, as we are done with package builds collecting/ordering, erase
+ // the replacements from the repointed dependents prerequisite sets
+ // and persist the changes.
+ //
+ for (auto& rd: rpt_depts)
+ {
+ const shared_ptr<selected_package>& sp (rd.first);
+
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const config_package& cp (prq.first);
+
+ size_t n (sp->prerequisites.erase (
+ lazy_shared_ptr<selected_package> (cp.db, cp.name)));
+
+ // The selected package should always contain the prerequisite
+ // replacement at this time, so its removal should always
+ // succeed.
+ //
+ assert (n == 1);
+ }
+ }
+
+ mdb.update (sp);
}
// We are about to execute the plan on the database (but not on the
@@ -4068,36 +5048,65 @@ namespace bpkg
// below for details).
//
using selected_packages = session::object_map<selected_package>;
- auto selected_packages_session = [&db, &ses] () -> selected_packages*
+ auto sp_session = [] (const auto& tm) -> selected_packages*
{
- auto& m (ses.map ()[&db]);
- auto i (m.find (&typeid (selected_package)));
- return (i != m.end ()
+ auto i (tm.find (&typeid (selected_package)));
+ return (i != tm.end ()
? &static_cast<selected_packages&> (*i->second)
: nullptr);
};
- selected_packages old_sp;
- if (const selected_packages* sp = selected_packages_session ())
- old_sp = *sp;
+ map<const odb::database*, selected_packages> old_sp;
+
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
+ old_sp.emplace (dps.first, *sps);
+ }
// Note that we need to perform the execution on the copies of the
// build/drop_package objects to preserve the original ones. The
// selected_package objects will still be changed so we will reload
// them afterwards (see below).
//
+ // After the plan execution simulation, save the packages being built
+ // (selected non-system packages) for the subsequent dependency
+ // hierarchies verification.
+ //
+ bool changed;
+ vector<pair<database&, shared_ptr<selected_package>>> build_pkgs;
{
vector<build_package> tmp (pkgs.begin (), pkgs.end ());
build_package_list bl (tmp.begin (), tmp.end ());
- execute_plan (o, c, db, bl, true /* simulate */);
+ changed = execute_plan (o,
+ bl,
+ true /* simulate */,
+ find_prereq_database);
+
+ if (changed)
+ {
+ for (build_package& p: bl)
+ {
+ shared_ptr<selected_package>& sp (p.selected);
+
+ if (sp != nullptr)
+ {
+ if (!sp->system ())
+ build_pkgs.emplace_back (p.db, move (sp));
+ }
+ else
+ assert (p.action && *p.action == build_package::drop);
+ }
+ }
}
// Return nullopt if no changes to the dependency are necessary. This
// value covers both the "no change is required" and the "no
// recommendation available" cases.
//
- auto eval_dep = [&db, &dep_pkgs, &rec_pkgs] (
+ auto eval_dep = [&dep_pkgs, &rec_pkgs] (
+ database& db,
const shared_ptr<selected_package>& sp,
bool ignore_unsatisfiable = true) -> optional<evaluate_result>
{
@@ -4106,7 +5115,7 @@ namespace bpkg
// See if there is an optional dependency upgrade recommendation.
//
if (!sp->hold_package)
- r = evaluate_dependency (db, dep_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_dependency (db, sp, dep_pkgs, ignore_unsatisfiable);
// If none, then see for the recursive dependency upgrade
// recommendation.
@@ -4115,7 +5124,7 @@ namespace bpkg
// configured as such for a reason.
//
if (!r && !sp->system () && !rec_pkgs.empty ())
- r = evaluate_recursive (db, rec_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_recursive (db, sp, rec_pkgs, ignore_unsatisfiable);
// Translate the "no change" result to nullopt.
//
@@ -4125,16 +5134,18 @@ namespace bpkg
// The empty version means that the package must be dropped.
//
const version ev;
- auto target_version = [&ev] (const shared_ptr<available_package>& ap,
- bool sys) -> const version&
+ auto target_version = [&ev]
+ (database& db,
+ const shared_ptr<available_package>& ap,
+ bool sys) -> const version&
{
if (ap == nullptr)
return ev;
if (sys)
{
- assert (ap->system_version () != nullptr);
- return *ap->system_version ();
+ assert (ap->system_version (db) != nullptr);
+ return *ap->system_version (db);
}
return ap->version;
@@ -4147,15 +5158,17 @@ namespace bpkg
{
bool s (false);
+ database& db (i->db);
+
// Here we scratch if evaluate changed its mind or if the resulting
// version doesn't match what we expect it to be.
//
if (auto sp = db.find<selected_package> (i->name))
{
- const version& dv (target_version (i->available, i->system));
+ const version& dv (target_version (db, i->available, i->system));
- if (optional<evaluate_result> r = eval_dep (sp))
- s = dv != target_version (r->available, r->system) ||
+ if (optional<evaluate_result> r = eval_dep (db, sp))
+ s = dv != target_version (db, r->available, r->system) ||
i->system != r->system;
else
s = dv != sp->version || i->system != sp->system ();
@@ -4172,7 +5185,24 @@ namespace bpkg
++i;
}
- if (!scratch)
+ // If the execute_plan() call was noop, there are no user expectations
+ // regarding any dependency, and no upgrade is requested, then the
+ // only possible refinement outcome can be recommendations to drop
+ // unused dependencies (that the user has refused to drop on the
+ // previous build or drop command run). Thus, if the --keep-unused|-K
+ // or --no-refinement option is also specified, then we omit the
+ // need_refinement() call altogether and assume that no refinement is
+ // required.
+ //
+ if (!changed && dep_pkgs.empty () && rec_pkgs.empty ())
+ {
+ assert (!scratch); // No reason to change any previous decision.
+
+ if (o.keep_unused () || o.no_refinement ())
+ refine = false;
+ }
+
+ if (!scratch && refine)
{
// First, we check if the refinement is required, ignoring the
// unsatisfiable dependency version constraints. If we end up
@@ -4182,12 +5212,12 @@ namespace bpkg
// make sure that the unsatisfiable dependency, if left, is
// reported.
//
- auto need_refinement = [&eval_dep, &deps, &rec_pkgs, &db, &o] (
+ auto need_refinement = [&eval_dep, &deps, &rec_pkgs, &mdb, &o] (
bool diag = false) -> bool
{
// Examine the new dependency set for any up/down-grade/drops.
//
- bool r (false); // Presumably no more refinments are necessary.
+ bool r (false); // Presumably no more refinements are necessary.
using query = query<selected_package>;
@@ -4196,22 +5226,32 @@ namespace bpkg
if (rec_pkgs.empty ())
q = q && !query::hold_package;
- for (shared_ptr<selected_package> sp:
- pointer_result (db.query<selected_package> (q)))
+ // It seems right to only evaluate dependencies in the explicitly
+ // linked configurations, recursively. Indeed, we shouldn't be
+ // up/down-grading or dropping packages in configurations that
+ // only contain dependents, some of which we may only reconfigure.
+ //
+ for (database& ldb: mdb.dependency_configs ())
{
- if (optional<evaluate_result> er = eval_dep (sp, !diag))
+ for (shared_ptr<selected_package> sp:
+ pointer_result (ldb.query<selected_package> (q)))
{
- // Skip unused if we were instructed to keep them.
- //
- if (o.keep_unused () && er->available == nullptr)
- continue;
+ if (optional<evaluate_result> er = eval_dep (ldb, sp, !diag))
+ {
+ // Skip unused if we were instructed to keep them.
+ //
+ if (o.keep_unused () && er->available == nullptr)
+ continue;
+
+ if (!diag)
+ deps.push_back (dep {er->db,
+ sp->name,
+ move (er->available),
+ move (er->repository_fragment),
+ er->system});
- if (!diag)
- deps.push_back (dep {sp->name,
- move (er->available),
- move (er->repository_fragment),
- er->system});
- r = true;
+ r = true;
+ }
}
}
@@ -4224,18 +5264,241 @@ namespace bpkg
need_refinement (true /* diag */);
}
+ // Note that we prevent building multiple instances of the same
+ // package dependency in different configurations (of the same type)
+ // while creating the build plan. However, we may potentially end up
+ // with the same dependency in multiple configurations since we do not
+ // descend into prerequisites of already configured packages which
+ // require no up/downgrade.
+ //
+ // To prevent this, we additionally verify that none of the dependency
+ // hierarchies of the packages being built contains the same runtime
+ // dependency, built in multiple configurations.
+ //
+ // Note that we also fail for a system dependency configured in
+ // multiple configurations, since these configurations can potentially
+ // be configured differently and so these system packages can refer to
+ // different targets.
+ //
+ if (changed && !refine)
+ {
+ // Verify the specified package dependency hierarchy and return the
+ // set of packages plus their runtime dependencies, including
+ // indirect ones. Fail if a dependency cycle is detected.
+ //
+ // Also add the result into the `package_prereqs` map, to use it as
+ // a cache and for subsequent additional dependency verification.
+ //
+ // Note that all the encountered dependency sub-hierarchies that
+ // reside in configurations of different types (or beneath them) are
+ // also verified but not included into the resulting set.
+ //
+ using prerequisites = set<lazy_shared_ptr<selected_package>,
+ compare_lazy_ptr_id>;
+
+ map<config_package, prerequisites> package_prereqs;
+ small_vector<config_selected_package, 16> chain;
+
+ auto verify_dependencies = [&package_prereqs, &chain]
+ (database& db,
+ shared_ptr<selected_package> sp,
+ const auto& verify_dependencies)
+ -> const prerequisites&
+ {
+ // Return the cached value, if present.
+ //
+ config_package cp {db, sp->name};
+ {
+ auto i (package_prereqs.find (cp));
+
+ if (i != package_prereqs.end ())
+ return i->second;
+ }
+
+ // Make sure there is no dependency cycle.
+ //
+ config_selected_package csp {db, sp};
+ {
+ auto i (find (chain.begin (), chain.end (), csp));
+
+ if (i != chain.end ())
+ {
+ diag_record dr (fail);
+ dr << "dependency cycle detected involving package " << *sp
+ << db;
+
+ // Note: push_back() can invalidate the iterator.
+ //
+ size_t j (i - chain.begin ());
+
+ for (chain.push_back (csp); j != chain.size () - 1; ++j)
+ dr << info << *chain[j].package << chain[j].db
+ << " depends on "
+ << *chain[j + 1].package << chain[j + 1].db;
+ }
+ }
+
+ chain.push_back (csp);
+
+ // Verify all prerequisites, but only collect those corresponding
+ // to the runtime dependencies.
+ //
+ // Indeed, we don't care if a linked host configuration contains a
+ // configured package that we also have configured in our target
+ // configuration. It's also fine if some of our runtime
+ // dependencies from different configurations build-time depend on
+ // the same package (of potentially different versions) configured
+ // in different host configurations.
+ //
+ // Note, however, that we cannot easily determine if the
+ // prerequisite corresponds to the runtime or build-time
+ // dependency, since we only store its version constraint. The
+ // current implementation relies on the fact that the build-time
+ // dependency configuration type (host or build2) differs from the
+ // dependent configuration type (target is a common case) and
+ // doesn't work well, for example, for the self-hosted
+ // configurations. For them it can fail erroneously. We can
+ // potentially fix that by additionally storing the build-time
+ // flag besides the version constraint. However, let's first see
+ // if it ever becomes a problem.
+ //
+ prerequisites r;
+ const package_prerequisites& prereqs (sp->prerequisites);
+
+ for (const auto& prereq: prereqs)
+ {
+ const lazy_shared_ptr<selected_package>& p (prereq.first);
+ database& pdb (p.database ());
+
+ // Validate prerequisite sub-hierarchy also in configuration of
+ // different type but do not collect it.
+ //
+ const prerequisites& ps (
+ verify_dependencies (pdb, p.load (), verify_dependencies));
+
+ if (pdb.type != db.type)
+ continue;
+
+ // Collect prerequisite sub-hierarchy, checking that none of the
+ // packages are already collected.
+ //
+ for (const lazy_shared_ptr<selected_package>& p: ps)
+ {
+ // Note: compare_id_lazy_ptr only considers package names.
+ //
+ auto i (r.find (p));
+
+ if (i != r.end ())
+ {
+ database& db1 (p.database ());
+ database& db2 (i->database ());
+
+ if (db1 != db2)
+ {
+ bool indirect (prereqs.find (p) == prereqs.end ());
+
+ fail << "package " << p.object_id ()
+ << (indirect ? " indirectly" : "") << " required by "
+ << *sp << db << " is configured in multiple "
+ << "configurations" <<
+ info << *p.load () << db1 <<
+ info << *i->load () << db2;
+ }
+ }
+ else
+ r.insert (p);
+ }
+ }
+
+ chain.pop_back ();
+
+ // Collect the dependent package itself.
+ //
+ r.insert (lazy_shared_ptr<selected_package> (db, move (sp)));
+
+ // Cache the resulting package prerequisites set and return a
+ // reference to it.
+ //
+ auto j (package_prereqs.emplace (move (cp), move (r)));
+ assert (j.second); // A package cannot depend on itself.
+
+ return j.first->second;
+ };
+
+ for (auto& p: build_pkgs)
+ verify_dependencies (p.first,
+ move (p.second),
+ verify_dependencies);
+
+ // Now, verify that none of the build2 modules may simultaneously be
+ // built in multiple configurations, accross all (potentially
+ // unrelated) dependency trees.
+ //
+ // For that we use the `package_prereqs` map: its key set refers to
+ // all the packages potentially involved into the build (explicitly
+ // or implicitly).
+ //
+ {
+ map<package_name, database&> build2_mods;
+
+ for (const auto& pp: package_prereqs)
+ {
+ const config_package& cp (pp.first);
+
+ // Skip packages other than the build2 modules.
+ //
+ if (!build2_module (cp.name))
+ continue;
+
+ // Skip build2 modules configured as system.
+ //
+ {
+ shared_ptr<selected_package> sp (
+ cp.db.find<selected_package> (cp.name));
+
+ assert (sp != nullptr);
+
+ if (sp->system ())
+ continue;
+ }
+
+ auto i (build2_mods.emplace (cp.name, cp.db));
+
+ if (!i.second)
+ {
+ database& db (i.first->second);
+
+ // The `package_prereqs` map can only contain the same package
+ // twice if databases differ.
+ //
+ assert (db != cp.db);
+
+ fail << "building build system module " << cp.name << " in "
+ << "multiple configurations" <<
+ info << db.config_orig <<
+ info << cp.db.config_orig;
+ }
+ }
+ }
+ }
+
// Rollback the changes to the database and reload the changed
// selected_package objects.
//
t.rollback ();
{
- transaction t (db);
+ transaction t (mdb);
// First reload all the selected_package object that could have been
// modified (conceptually, we should only modify what's on the
// plan). And in case of drop the object is removed from the session
// so we need to bring it back.
//
+ // Make sure that selected packages are only owned by the session
+ // and the build package list.
+ //
+ build_pkgs.clear ();
+
// Note: we use the original pkgs list since the executed ones may
// contain newly created (but now gone) selected_package objects.
//
@@ -4243,62 +5506,128 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
if (*p.action == build_package::drop)
{
assert (p.selected != nullptr);
ses.cache_insert<selected_package> (
- db, p.selected->name, p.selected);
+ pdb, p.selected->name, p.selected);
}
if (p.selected != nullptr)
- db.reload (*p.selected);
+ pdb.reload (*p.selected);
}
// Now remove all the newly created selected_package objects from
// the session. The tricky part is to distinguish newly created ones
// from newly loaded (and potentially cached).
//
- if (selected_packages* sp = selected_packages_session ())
+ for (bool rescan (true); rescan; )
{
- for (bool rescan (true); rescan; )
- {
- rescan = false;
+ rescan = false;
- for (auto i (sp->begin ()); i != sp->end (); )
+ for (const auto& dps: ses.map ())
+ {
+ if (selected_packages* sps = sp_session (dps.second))
{
- bool erased (false);
- auto j (old_sp.find (i->first));
-
+ auto j (old_sp.find (dps.first)); // Find the database.
+
+ // Note that if a database has been introduced only during
+ // simulation, then we could just clear all its selected
+ // packages in one shot. Let's however, be cautious and remove
+ // them iteratively to make sure that none of them are left at
+ // the end (no more rescan is necessary). If any of them is
+ // left, then that would mean that is is referenced from
+ // somewhere besides the session object, which would be a bug.
+ //
if (j == old_sp.end ())
{
- if (i->second.use_count () == 1)
+ if (!sps->empty ())
+ {
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ rescan = true;
+ }
+ else
+ ++i;
+ }
+ }
+
+ continue;
+ }
+
+ const selected_packages& osp (j->second);
+
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ bool erased (false);
+ auto j (osp.find (i->first));
+
+ if (j == osp.end ())
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ erased = true;
+ rescan = true;
+ }
+ }
+ // It may also happen that the object was erased from the
+ // database and then recreated. In this case we restore the
+ // pointer that is stored in the session.
+ //
+ else if (i->second != j->second)
{
// This might cause another object's use count to drop.
//
- i = sp->erase (i);
- erased = true;
+ i->second = j->second;
rescan = true;
}
+
+ if (!erased)
+ ++i;
}
- // It may also happen that the object was erased from the
- // database and then recreated. In this case we restore the
- // pointer that is stored in the session.
- //
- else if (i->second != j->second)
+ }
+ }
+
+ // Verify that all the selected packages of the newly introduced
+ // during simulation databases are erased (see above for the
+ // verification reasoning).
+ //
+ if (!rescan)
+ {
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
{
- // This might cause another object's use count to drop.
- //
- i->second = j->second;
- rescan = true;
+ if (old_sp.find (dps.first) == old_sp.end ())
+ assert (sps->empty ());
}
-
- if (!erased)
- ++i;
}
}
}
+ // Re-link the private configurations that were created during the
+ // collection of the package builds with their parent
+ // configurations. Note that these links were lost on the previous
+ // transaction rollback.
+ //
+ for (const pair<database&, dir_path>& pc: priv_cfgs)
+ cfg_link (pc.first,
+ pc.first.config / pc.second,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */);
+
t.commit ();
}
}
@@ -4325,6 +5654,7 @@ namespace bpkg
for (const build_package& p: reverse_iterate (pkgs))
{
+ database& pdb (p.db);
const shared_ptr<selected_package>& sp (p.selected);
string act;
@@ -4333,7 +5663,7 @@ namespace bpkg
if (*p.action == build_package::drop)
{
- act = "drop " + sp->string () + " (unused)";
+ act = "drop " + sp->string (pdb) + " (unused)";
need_prompt = true;
}
else
@@ -4369,6 +5699,10 @@ namespace bpkg
}
act += ' ' + sp->name.string ();
+
+ string s (pdb.string ());
+ if (!s.empty ())
+ act += ' ' + s;
}
else
{
@@ -4386,13 +5720,15 @@ namespace bpkg
//
if (!p.reconfigure () &&
sp->state == package_state::configured &&
- (!p.user_selection () || o.configure_only ()))
+ (!p.user_selection () ||
+ o.configure_only () ||
+ p.configure_only ()))
continue;
act = p.system
? "reconfigure"
: (p.reconfigure ()
- ? (o.configure_only ()
+ ? (o.configure_only () || p.configure_only ()
? "reconfigure"
: "reconfigure/update")
: "update");
@@ -4411,15 +5747,18 @@ namespace bpkg
if (p.unhold ())
act += "/unhold";
- act += ' ' + p.available_name_version ();
- cause = "required by";
+ act += ' ' + p.available_name_version_db ();
+ cause = p.required_by_dependents ? "required by" : "dependent of";
+
+ if (p.configure_only ())
+ update_dependents = true;
}
string rb;
if (!p.user_selection ())
{
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
+ for (const config_package& cp: p.required_by)
+ rb += ' ' + cp.string ();
// If not user-selected, then there should be another (implicit)
// reason for the action.
@@ -4499,7 +5838,7 @@ namespace bpkg
// prerequsites got upgraded/downgraded and that the user may want to in
// addition update (that update_dependents flag above).
//
- execute_plan (o, c, db, pkgs, false /* simulate */);
+ execute_plan (o, pkgs, false /* simulate */, find_prereq_database);
if (o.configure_only ())
return 0;
@@ -4517,14 +5856,17 @@ namespace bpkg
{
assert (p.action);
- if (*p.action != build_package::build)
+ if (*p.action != build_package::build || p.configure_only ())
continue;
+ database& db (p.db);
const shared_ptr<selected_package>& sp (p.selected);
if (!sp->system () && // System package doesn't need update.
p.user_selection ())
- upkgs.push_back (pkg_command_vars {sp,
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ db.main (),
+ sp,
strings () /* vars */,
false /* cwd */});
}
@@ -4538,35 +5880,41 @@ namespace bpkg
{
assert (p.action);
- if (*p.action == build_package::adjust && p.reconfigure ())
- upkgs.push_back (pkg_command_vars {p.selected,
+ database& db (p.db);
+
+ if ((*p.action == build_package::adjust && p.reconfigure ()) ||
+ (*p.action == build_package::build &&
+ (p.flags & build_package::build_repoint) != 0))
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ db.main (),
+ p.selected,
strings () /* vars */,
false /* cwd */});
}
}
- pkg_update (c, o, o.for_ (), strings (), upkgs);
+ pkg_update (o, o.for_ (), strings (), upkgs);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: upkgs)
- text << "updated " << *pv.pkg;
+ text << "updated " << pv.string ();
}
return 0;
}
- static void
+ static bool
execute_plan (const pkg_build_options& o,
- const dir_path& c,
- database& db,
build_package_list& build_pkgs,
- bool simulate)
+ bool simulate,
+ const function<find_database_function>& fdb)
{
tracer trace ("execute_plan");
l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");});
+ bool r (false);
uint16_t verbose (!simulate ? verb : 0);
// disfigure
@@ -4581,12 +5929,13 @@ namespace bpkg
if (*p.action != build_package::drop && !p.reconfigure ())
continue;
+ database& pdb (p.db);
shared_ptr<selected_package>& sp (p.selected);
// Each package is disfigured in its own transaction, so that we
// always leave the configuration in a valid state.
//
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Reset the flag if the package being unpacked is not an external one.
//
@@ -4620,7 +5969,9 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, sp, !p.keep_out, simulate);
+ pkg_disfigure (o, pdb, t, sp, !p.keep_out, simulate);
+
+ r = true;
assert (sp->state == package_state::unpacked ||
sp->state == package_state::transient);
@@ -4628,7 +5979,7 @@ namespace bpkg
if (verbose && !o.no_result ())
text << (sp->state == package_state::transient
? "purged "
- : "disfigured ") << *sp;
+ : "disfigured ") << *sp << pdb;
// Selected system package is now gone from the database. Before we drop
// the object we need to make sure the hold state is preserved in the
@@ -4652,6 +6003,8 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
@@ -4669,11 +6022,13 @@ namespace bpkg
{
assert (!sp->system ());
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ text << "purged " << *sp << pdb;
sp = nullptr;
}
@@ -4698,11 +6053,13 @@ namespace bpkg
{
if (sp != nullptr && !sp->system ())
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ text << "purged " << *sp << pdb;
if (!p.hold_package)
p.hold_package = sp->hold_package;
@@ -4729,7 +6086,7 @@ namespace bpkg
if (pl.repository_fragment.object_id () != "") // Special root?
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Go through package repository fragments to decide if we should
// fetch, checkout or unpack depending on the available repository
@@ -4762,7 +6119,7 @@ namespace bpkg
case repository_basis::archive:
{
sp = pkg_fetch (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4774,7 +6131,7 @@ namespace bpkg
{
sp = p.checkout_root
? pkg_checkout (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4783,7 +6140,7 @@ namespace bpkg
p.checkout_purge,
simulate)
: pkg_checkout (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4794,7 +6151,7 @@ namespace bpkg
case repository_basis::directory:
{
sp = pkg_unpack (o,
- c,
+ pdb,
t,
ap->id.name,
p.available_version (),
@@ -4808,11 +6165,11 @@ namespace bpkg
//
else if (exists (pl.location))
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_fetch (
o,
- c,
+ pdb,
t,
pl.location, // Archive path.
true, // Replace
@@ -4822,6 +6179,8 @@ namespace bpkg
if (sp != nullptr) // Actually fetched or checked out something?
{
+ r = true;
+
assert (sp->state == package_state::fetched ||
sp->state == package_state::unpacked);
@@ -4841,19 +6200,19 @@ namespace bpkg
case repository_basis::archive:
{
assert (sp->state == package_state::fetched);
- dr << "fetched " << *sp;
+ dr << "fetched " << *sp << pdb;
break;
}
case repository_basis::directory:
{
assert (sp->state == package_state::unpacked);
- dr << "using " << *sp << " (external)";
+ dr << "using " << *sp << pdb << " (external)";
break;
}
case repository_basis::version_control:
{
assert (sp->state == package_state::unpacked);
- dr << "checked out " << *sp;
+ dr << "checked out " << *sp << pdb;
break;
}
}
@@ -4868,23 +6227,23 @@ namespace bpkg
{
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Commits the transaction.
//
- sp = pkg_unpack (o, c, t, ap->id.name, simulate);
+ sp = pkg_unpack (o, pdb, t, ap->id.name, simulate);
if (verbose && !o.no_result ())
- text << "unpacked " << *sp;
+ text << "unpacked " << *sp << pdb;
}
else
{
const package_location& pl (ap->locations[0]);
assert (pl.repository_fragment.object_id () == ""); // Special root.
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_unpack (o,
- c,
+ pdb,
t,
path_cast<dir_path> (pl.location),
true, // Replace.
@@ -4892,9 +6251,11 @@ namespace bpkg
simulate);
if (verbose && !o.no_result ())
- text << "using " << *sp << " (external)";
+ text << "using " << *sp << pdb << " (external)";
}
+ r = true;
+
assert (sp->state == package_state::unpacked);
}
@@ -4927,14 +6288,37 @@ namespace bpkg
if (sp != nullptr && sp->state == package_state::configured)
continue;
- transaction t (db, !simulate /* start */);
+ database& pdb (p.db);
+
+ transaction t (pdb, !simulate /* start */);
+
+ // Show how we got here if things go wrong, for example selecting a
+ // prerequisite is ambiguous due to the dependency package being
+ // configured in multiple linked configurations.
+ //
+ auto g (
+ make_exception_guard (
+ [&p] ()
+ {
+ info << "while configuring " << p.name () << p.db;
+ }));
// Note that pkg_configure() commits the transaction.
//
if (p.system)
- sp = pkg_configure_system (ap->id.name, p.available_version (), t);
+ sp = pkg_configure_system (ap->id.name,
+ p.available_version (),
+ pdb,
+ t);
else if (ap != nullptr)
- pkg_configure (c, o, t, sp, ap->dependencies, p.config_vars, simulate);
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ ap->dependencies,
+ p.config_vars,
+ simulate,
+ fdb);
else // Dependent.
{
// Must be in the unpacked state since it was disfigured on the first
@@ -4943,23 +6327,26 @@ namespace bpkg
assert (sp->state == package_state::unpacked);
package_manifest m (
- pkg_verify (sp->effective_src_root (c),
+ pkg_verify (sp->effective_src_root (pdb.config_orig),
true /* ignore_unknown */,
[&sp] (version& v) {v = sp->version;}));
- pkg_configure (c,
- o,
+ pkg_configure (o,
+ p.db,
t,
sp,
convert (move (m.dependencies)),
p.config_vars,
- simulate);
+ simulate,
+ fdb);
}
+ r = true;
+
assert (sp->state == package_state::configured);
if (verbose && !o.no_result ())
- text << "configured " << *sp;
+ text << "configured " << *sp << pdb;
}
// Update the hold state.
@@ -4974,6 +6361,8 @@ namespace bpkg
if (*p.action == build_package::drop)
continue;
+ database& pdb (p.db);
+
const shared_ptr<selected_package>& sp (p.selected);
assert (sp != nullptr);
@@ -4994,19 +6383,23 @@ namespace bpkg
sp->hold_package = hp;
sp->hold_version = hv;
- transaction t (db, !simulate /* start */);
- db.update (sp);
+ transaction t (pdb, !simulate /* start */);
+ pdb.update (sp);
t.commit ();
+ r = true;
+
if (verbose > 1)
{
if (hp)
- text << "holding package " << sp->name;
+ text << "holding package " << sp->name << pdb;
if (hv)
- text << "holding version " << *sp;
+ text << "holding version " << *sp << pdb;
}
}
}
+
+ return r;
}
}