aboutsummaryrefslogtreecommitdiff
path: root/bpkg/pkg-build.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'bpkg/pkg-build.cxx')
-rw-r--r--bpkg/pkg-build.cxx8766
1 files changed, 6173 insertions, 2593 deletions
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 082696e..fac79c2 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -5,11 +5,11 @@
#include <map>
#include <set>
-#include <list>
-#include <cstring> // strlen()
-#include <iostream> // cout
+#include <cstring> // strlen()
+#include <sstream>
+#include <iostream> // cout
-#include <libbutl/standard-version.mxx>
+#include <libbutl/standard-version.hxx>
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
@@ -20,6 +20,8 @@
#include <bpkg/common-options.hxx>
+#include <bpkg/cfg-link.hxx>
+#include <bpkg/rep-mask.hxx>
#include <bpkg/pkg-purge.hxx>
#include <bpkg/pkg-fetch.hxx>
#include <bpkg/rep-fetch.hxx>
@@ -29,2300 +31,2898 @@
#include <bpkg/pkg-checkout.hxx>
#include <bpkg/pkg-configure.hxx>
#include <bpkg/pkg-disfigure.hxx>
+#include <bpkg/package-query.hxx>
+#include <bpkg/package-skeleton.hxx>
+
#include <bpkg/system-repository.hxx>
+#include <bpkg/system-package-manager.hxx>
+
+#include <bpkg/pkg-build-collect.hxx>
using namespace std;
using namespace butl;
namespace bpkg
{
- // @@ Overall TODO:
- //
- // - Configuration vars (both passed and preserved)
+ // System package manager. Resolved lazily if and when needed. Present NULL
+ // value means no system package manager is available for this host.
//
+ static optional<unique_ptr<system_package_manager>> sys_pkg_mgr;
- // Try to find an available stub package in the imaginary system repository.
- // Such a repository contains stubs corresponding to the system packages
- // specified by the user on the command line with version information
- // (sys:libfoo/1.0, ?sys:libfoo/* but not ?sys:libfoo; the idea is that a
- // real stub won't add any extra information to such a specification so we
- // shouldn't insist on its presence). Semantically this imaginary repository
- // complements all real repositories.
+ // Current configurations as specified with --directory|-d (or the current
+ // working directory if none specified).
//
- static vector<shared_ptr<available_package>> imaginary_stubs;
+ static linked_databases current_configs;
- static shared_ptr<available_package>
- find_imaginary_stub (const package_name& name)
+ static inline bool
+ multi_config ()
{
- auto i (find_if (imaginary_stubs.begin (), imaginary_stubs.end (),
- [&name] (const shared_ptr<available_package>& p)
- {
- return p->id.name == name;
- }));
+ return current_configs.size () != 1;
+ }
- return i != imaginary_stubs.end () ? *i : nullptr;
+ static inline bool
+ current (database& db)
+ {
+ return find (current_configs.begin (), current_configs.end (), db) !=
+ current_configs.end ();
}
- // Try to find packages that optionally satisfy the specified version
- // constraint. Return the list of packages and repository fragments in which
- // each was found or empty list if none were found. Note that a stub
- // satisfies any constraint.
+ // Retrieve the repository fragments for the specified package from its
+ // ultimate dependent configurations and add them to the respective
+ // configuration-associated fragment lists.
//
- static
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- find_available (database& db,
- const package_name& name,
- const optional<version_constraint>& c)
+ // If this package's repository fragment is a root fragment (package is
+ // fetched/unpacked using the existing archive/directory), then also add
+ // this repository fragment to the resulting list assuming that this
+ // package's dependencies can be resolved from this repository fragment or
+ // its complements (user-added repositories) as well.
+ //
+ static void
+ add_dependent_repo_fragments (database& db,
+ const shared_ptr<selected_package>& p,
+ config_repo_fragments& r)
{
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> r;
+ available_package_id id (p->name, p->version);
- for (shared_ptr<available_package> ap:
- pointer_result (query_available (db, name, c)))
+ // Add a repository fragment to the specified list, suppressing duplicates.
+ //
+ auto add = [] (shared_ptr<repository_fragment>&& rf,
+ vector<shared_ptr<repository_fragment>>& rfs)
{
- // An available package should come from at least one fetched
- // repository fragment.
- //
- assert (!ap->locations.empty ());
+ if (find (rfs.begin (), rfs.end (), rf) == rfs.end ())
+ rfs.push_back (move (rf));
+ };
- // All repository fragments the package comes from are equally good, so
- // we pick the first one.
- //
- r.emplace_back (move (ap),
- ap->locations[0].repository_fragment.load ());
- }
+ if (p->repository_fragment.empty ()) // Root repository fragment?
+ add (db.find<repository_fragment> (empty_string), r[db]);
- // Adding a stub from the imaginary system repository to the non-empty
- // results isn't necessary but may end up with a duplicate. That's why we
- // only add it if nothing else is found.
- //
- if (r.empty ())
+ for (database& ddb: dependent_repo_configs (db))
{
- shared_ptr<available_package> ap (find_imaginary_stub (name));
+ shared_ptr<available_package> dap (ddb.find<available_package> (id));
- if (ap != nullptr)
- r.emplace_back (move (ap), nullptr);
- }
+ if (dap != nullptr)
+ {
+ assert (!dap->locations.empty ());
- return r;
+ config_repo_fragments::iterator i (r.find (ddb));
+
+ if (i == r.end ())
+ i = r.insert (ddb,
+ vector<shared_ptr<repository_fragment>> ()).first;
+
+ vector<shared_ptr<repository_fragment>>& rfs (i->second);
+
+ for (const auto& pl: dap->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
+
+ if (!rep_masked_fragment (lrf))
+ add (lrf.load (), rfs);
+ }
+
+ // Erase the entry from the map if it contains no fragments, which may
+ // happen if all the available package repositories are masked.
+ //
+ if (rfs.empty ())
+ r.erase (i);
+ }
+ }
}
- // As above but only look for packages from the specified list fo repository
- // fragments, their prerequisite repositories, and their complements,
- // recursively (note: recursivity applies to complements, not
- // prerequisites).
+ // Return a patch version constraint for the specified package version if it
+ // is a standard version (~ shortcut). Otherwise, if requested, issue a
+ // warning and return nullopt.
+ //
+ // Note that the function may also issue a warning and return nullopt if the
+ // package minor version reached the limit (see standard-version.cxx for
+ // details).
//
- static
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- find_available (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const vector<shared_ptr<repository_fragment>>& rfs,
- bool prereq = true)
+ static optional<version_constraint>
+ patch_constraint (const package_name& nm,
+ const version& pv,
+ bool quiet = false)
{
- // Filter the result based on the repository fragments to which each
- // version belongs.
+ // Note that we don't pass allow_stub flag so the system wildcard version
+ // will (naturally) not be patched.
//
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> r (
- filter (rfs, query_available (db, name, c), prereq));
+ string vs (pv.string ());
+ optional<standard_version> v (parse_standard_version (vs));
- if (r.empty ())
+ if (!v)
{
- shared_ptr<available_package> ap (find_imaginary_stub (name));
+ if (!quiet)
+ warn << "unable to patch " << package_string (nm, pv) <<
+ info << "package is not using semantic/standard version";
- if (ap != nullptr)
- r.emplace_back (move (ap), nullptr);
+ return nullopt;
}
- return r;
- }
-
- // As above but only look for a single package from the specified repository
- // fragment, its prerequisite repositories, and their complements,
- // recursively (note: recursivity applies to complements, not
- // prerequisites). Return the package and the repository fragment in which
- // it was found or NULL for both if not found.
- //
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- find_available_one (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const shared_ptr<repository_fragment>& rf,
- bool prereq = true)
- {
- // Filter the result based on the repository fragment to which each
- // version belongs.
+ try
+ {
+ return version_constraint ('~' + vs);
+ }
+ // Note that the only possible reason for invalid_argument exception to be
+ // thrown is that minor version reached the 99999 limit (see
+ // standard-version.cxx for details).
//
- auto r (filter_one (rf, query_available (db, name, c), prereq));
+ catch (const invalid_argument&)
+ {
+ if (!quiet)
+ warn << "unable to patch " << package_string (nm, pv) <<
+ info << "minor version limit reached";
- if (r.first == nullptr)
- r.first = find_imaginary_stub (name);
+ return nullopt;
+ }
+ }
- return r;
+ static inline optional<version_constraint>
+ patch_constraint (const shared_ptr<selected_package>& sp, bool quiet = false)
+ {
+ return patch_constraint (sp->name, sp->version, quiet);
}
- // As above but look for a single package from a list of repository
- // fragments.
+ // As above but returns a minor version constraint (^ shortcut) instead of
+ // the patch version constraint (~ shortcut).
//
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- find_available_one (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const vector<shared_ptr<repository_fragment>>& rfs,
- bool prereq = true)
+ static optional<version_constraint>
+ minor_constraint (const package_name& nm,
+ const version& pv,
+ bool quiet = false)
{
- // Filter the result based on the repository fragments to which each
- // version belongs.
+ // Note that we don't pass allow_stub flag so the system wildcard version
+ // will (naturally) not be patched.
//
- auto r (filter_one (rfs, query_available (db, name, c), prereq));
+ string vs (pv.string ());
+ optional<standard_version> v (parse_standard_version (vs));
- if (r.first == nullptr)
- r.first = find_imaginary_stub (name);
+ if (!v)
+ {
+ if (!quiet)
+ warn << "unable to upgrade " << package_string (nm, pv)
+ << " to latest minor version" <<
+ info << "package is not using semantic/standard version";
- return r;
+ return nullopt;
+ }
+
+ try
+ {
+ return version_constraint ('^' + vs);
+ }
+ // Note that the only possible reason for invalid_argument exception to be
+ // thrown is that major version reached the 99999 limit (see
+ // standard-version.cxx for details).
+ //
+ catch (const invalid_argument&)
+ {
+ if (!quiet)
+ warn << "unable to upgrade " << package_string (nm, pv)
+ << " to latest minor version" <<
+ info << "major version limit reached";
+
+ return nullopt;
+ }
}
- // Create a transient (or fake, if you prefer) available_package object
- // corresponding to the specified selected object. Note that the package
- // locations list is left empty and that the returned repository fragment
- // could be NULL if the package is an orphan.
+ // Return true if the selected package is not configured as system and its
+ // repository fragment is not present in any ultimate dependent
+ // configuration (see dependent_repo_configs() for details) or this exact
+ // version is not available from this repository fragment nor from its
+ // complements. Also return true if the selected package repository fragment
+ // is a root fragment (package is fetched/unpacked using the existing
+ // archive/directory).
//
- // Note also that in our model we assume that make_available() is only
- // called if there is no real available_package. This makes sure that if
- // the package moves (e.g., from testing to stable), then we will be using
- // stable to resolve its dependencies.
+ // Note that the orphan definition here is stronger than in the rest of the
+ // code, since we request the available package to also be present in the
+ // repository fragment and consider packages built as existing archives or
+ // directories as orphans. It feels that such a definition aligns better
+ // with the user expectations about deorphaning.
//
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- make_available (const common_options& options,
- const dir_path& c,
- database& db,
- const shared_ptr<selected_package>& sp)
+ static bool
+ orphan_package (database& db, const shared_ptr<selected_package>& sp)
{
- assert (sp != nullptr && sp->state != package_state::broken);
+ assert (sp != nullptr);
if (sp->system ())
- return make_pair (make_shared<available_package> (sp->name, sp->version),
- nullptr);
+ return false;
- // First see if we can find its repository fragment.
- //
- // Note that this is package's "old" repository fragment and there is no
- // guarantee that its dependencies are still resolvable from it. But this
- // is our best chance (we could go nuclear and point all orphans to the
- // root repository fragment but that feels a bit too drastic at the
- // moment).
- //
- shared_ptr<repository_fragment> af (
- db.find<repository_fragment> (
- sp->repository_fragment.canonical_name ()));
+ const string& cn (sp->repository_fragment.canonical_name ());
- // The package is in at least fetched state, which means we should
- // be able to get its manifest.
- //
- const optional<path>& a (sp->archive);
+ if (cn.empty ()) // Root repository fragment?
+ return true;
- package_manifest m (
- sp->state == package_state::fetched
- ? pkg_verify (options,
- a->absolute () ? *a : c / *a,
- true /* ignore_unknown */,
- false /* expand_values */)
- : pkg_verify (sp->effective_src_root (c),
- true /* ignore_unknown */,
- // Copy potentially fixed up version from selected package.
- [&sp] (version& v) {v = sp->version;}));
+ for (database& ddb: dependent_repo_configs (db))
+ {
+ const shared_ptr<repository_fragment> rf (
+ ddb.find<repository_fragment> (cn));
- return make_pair (make_shared<available_package> (move (m)), move (af));
+ if (rf != nullptr && !rep_masked_fragment (ddb, rf))
+ {
+ auto af (
+ find_available_one (sp->name,
+ version_constraint (sp->version),
+ lazy_shared_ptr<repository_fragment> (ddb,
+ move (rf)),
+ false /* prereq */,
+ true /* revision */));
+
+ const shared_ptr<available_package>& ap (af.first);
+
+ if (ap != nullptr && !ap->stub ())
+ return false;
+ }
+ }
+
+ return true;
}
- // Return true if the version constraint represents the wildcard version.
+ // List of dependency packages (specified with ? on the command line).
//
- static inline bool
- wildcard (const version_constraint& vc)
+ // If configuration is not specified for a system dependency package (db is
+ // NULL), then the dependency is assumed to be specified for all current
+ // configurations and their explicitly linked configurations, recursively,
+ // including private configurations that can potentially be created during
+ // this run.
+ //
+ // The selected package is not NULL if the database is not NULL and the
+ // dependency package is present in this database.
+ //
+ struct dependency_package
{
- bool r (vc.min_version && *vc.min_version == wildcard_version);
+ database* db; // Can only be NULL if system.
+ package_name name;
+ optional<version_constraint> constraint; // nullopt if unspecified.
+
+ // Can only be true if constraint is specified.
+ //
+ bool hold_version;
- if (r)
- assert (vc.max_version == vc.min_version);
+ shared_ptr<selected_package> selected;
+ bool system;
+ bool existing; // Build as archive or directory.
- return r;
- }
+ // true -- upgrade, false -- patch.
+ //
+ optional<bool> upgrade; // Only for absent constraint.
+
+ bool deorphan;
+ bool keep_out;
+ bool disfigure;
+ optional<dir_path> checkout_root;
+ bool checkout_purge;
+ strings config_vars; // Only if not system.
+ const system_package_status* system_status; // See struct pkg_arg.
+ };
+ using dependency_packages = vector<dependency_package>;
- // A "dependency-ordered" list of packages and their prerequisites.
- // That is, every package on the list only possibly depending on the
- // ones after it. In a nutshell, the usage is as follows: we first
- // add one or more packages (the "initial selection"; for example, a
- // list of packages the user wants built). The list then satisfies all
- // the prerequisites of the packages that were added, recursively. At
- // the end of this process we have an ordered list of all the packages
- // that we have to build, from last to first, in order to build our
- // initial selection.
+ // Evaluate a dependency package and return a new desired version. If the
+ // result is absent (nullopt), then there are no user expectations regarding
+ // this dependency. If the result is a NULL available_package, then it is
+ // either no longer used and can be dropped, or no changes to the dependency
+ // are necessary. Otherwise, the result is available_package to
+ // upgrade/downgrade to or replace with the same version (deorphan, rebuild
+ // as an existing archive or directory, etc) as well as the repository
+ // fragment it must come from, the system flag, and the database it must be
+ // configured in.
//
- // This process is split into two phases: satisfaction of all the
- // dependencies (the collect_build() function) and ordering of the list
- // (the order() function).
+ // If the dependency is being rebuilt as an existing archive or directory we
+ // may end up with the available package version being the same as the
+ // selected package version. In this case the dependency needs to be
+ // re-fetched/re-unpacked from this archive/directory. Also note that if the
+ // dependency needs to be rebuilt as an existing archive or directory the
+ // caller may need to stash its name/database. This way on the subsequent
+ // call this function may return the "no change" recommendation rather than
+ // the "replace" recommendation.
//
- // During the satisfaction phase, we collect all the packages, their
- // prerequisites (and so on, recursively) in a map trying to satisfy
- // any version constraints. Specifically, during this step, we may
- // "upgrade" or "downgrade" a package that is already in a map as a
- // result of another package depending on it and, for example, requiring
- // a different version. One notable side-effect of this process is that
- // we may end up with a lot more packages in the map (but not in the list)
- // than we will have on the list. This is because some of the prerequisites
- // of "upgraded" or "downgraded" packages may no longer need to be built.
+ // If in the deorphan mode it turns out that the package is not an orphan
+ // and there is no version constraint specified and upgrade/patch is not
+ // requested, then assume that no changes are necessary for the dependency.
+ // Otherwise, if the package version is not constrained and no upgrade/patch
+ // is requested, then pick the version that matches the dependency version
+ // best in the following preference order:
//
- // Note also that we don't try to do exhaustive constraint satisfaction
- // (i.e., there is no backtracking). Specifically, if we have two
- // candidate packages each satisfying a constraint of its dependent
- // package, then if neither of them satisfy both constraints, then we
- // give up and ask the user to resolve this manually by explicitly
- // specifying the version that will satisfy both constraints.
+ // - same version, revision, and iteration
+ // - latest iteration of same version and revision
+ // - later revision of same version
+ // - later patch of same version
+ // - later minor of same version
+ // - latest available version, including earlier
//
- struct build_package
+ // Otherwise, always upgrade/downgrade the orphan or fail if no satisfactory
+ // version is available. Note that in the both cases (deorphan and
+ // upgrade/downgrade+deorphan) we may end up with the available package
+ // version being the same as the selected package version. In this case the
+ // dependency needs to be re-fetched from an existing repository. Also note
+ // that if the dependency needs to be deorphaned the caller may need to
+ // cache the original orphan version. This way on the subsequent calls this
+ // function still considers this package as an orphan and uses its original
+ // version to deduce the best match, which may change due, for example, to a
+ // change of the constraining dependents set.
+ //
+ // If the package version that satisfies explicitly specified dependency
+ // version constraint can not be found in the dependents repositories, then
+ // return the "no changes are necessary" result if ignore_unsatisfiable
+ // argument is true and fail otherwise. The common approach is to pass true
+ // for this argument until the execution plan is finalized, assuming that
+ // the problematic dependency might be dropped.
+ //
+ struct evaluate_result
{
- enum action_type
+ // The system, existing, upgrade, and orphan members are meaningless if
+ // the unused flag is true.
+ //
+ reference_wrapper<database> db;
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
+ bool unused;
+ bool system;
+ bool existing;
+ optional<bool> upgrade;
+
+ // Original orphan version which needs to be deorphaned. May only be
+ // present for the deorphan mode.
+ //
+ optional<version> orphan;
+ };
+
+ struct dependent_constraint
+ {
+ database& db;
+ shared_ptr<selected_package> package;
+ optional<version_constraint> constraint;
+
+ dependent_constraint (database& d,
+ shared_ptr<selected_package> p,
+ optional<version_constraint> c)
+ : db (d), package (move (p)), constraint (move (c)) {}
+ };
+
+ using dependent_constraints = vector<dependent_constraint>;
+ using deorphaned_dependencies = map<package_key, version>;
+ using existing_dependencies = vector<package_key>;
+
+ static evaluate_result
+ evaluate_dependency (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&,
+ const optional<version_constraint>& desired,
+ bool desired_sys,
+ bool existing,
+ database& desired_db,
+ const shared_ptr<selected_package>& desired_db_sp,
+ optional<bool> upgrade,
+ bool deorphan,
+ bool explicitly,
+ const config_repo_fragments&,
+ const dependent_constraints&,
+ const existing_dependencies&,
+ const deorphaned_dependencies&,
+ const build_packages&,
+ bool ignore_unsatisfiable);
+
+ // If there are no user expectations regarding this dependency, then we give
+ // no up/down-grade/replace recommendation, unless there are no dependents
+ // in which case we recommend to drop the dependency.
+ //
+ // Note that the user expectations are only applied for dependencies that
+ // have dependents in the current configurations.
+ //
+ static optional<evaluate_result>
+ evaluate_dependency (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const dependency_packages& deps,
+ bool no_move,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable)
+ {
+ tracer trace ("evaluate_dependency");
+
+ assert (sp != nullptr && !sp->hold_package);
+
+ const package_name& nm (sp->name);
+
+ auto no_change = [&db] ()
{
- build,
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ false /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ };
- // Selected package is not NULL, available package is NULL.
- //
- drop,
+ // Only search for the user expectations regarding this dependency if it
+ // has dependents in the current configurations, unless --no-move is
+ // specified.
+ //
+ // In the no-move mode consider the user-specified configurations not as a
+ // dependency new location, but as the current location of the dependency
+ // to which the expectations are applied. Note that multiple package specs
+ // for the same dependency in different configurations can be specified on
+ // the command line.
+ //
+ linked_databases cur_dbs;
+ dependency_packages::const_iterator i (deps.end ());
- // Selected package is not NULL, available package is NULL.
- //
- // This is the "only adjustments" action for a selected package.
- // Adjustment flags (see below) are unhold (the package should be
- // treated as a dependency) and reconfigure (dependent package that
- // needs to be reconfigured because its prerequisite is being
- // up/down-graded or reconfigured).
+ if (!no_move)
+ {
+ // Collect the current configurations which contain dependents for this
+ // dependency and assume no expectations if there is none.
//
- // Note that this action is "replaceable" with either drop or build
- // action but in the latter case the adjustments must be copied over.
+ for (database& cdb: current_configs)
+ {
+ if (!query_dependents (cdb, nm, db).empty ())
+ cur_dbs.push_back (cdb);
+ }
+
+ // Search for the user expectations regarding this dependency by
+ // matching the package name and configuration type, if configuration is
+ // specified, preferring entries with configuration specified and fail
+ // if there are multiple candidates.
//
- adjust
- };
+ if (!cur_dbs.empty ())
+ {
+ for (dependency_packages::const_iterator j (deps.begin ());
+ j != deps.end ();
+ ++j)
+ {
+ if (j->name == nm && (j->db == nullptr || j->db->type == db.type))
+ {
+ if (i == deps.end () || i->db == nullptr)
+ {
+ i = j;
+ }
+ else if (j->db != nullptr)
+ {
+ fail << "multiple " << db.type << " configurations specified "
+ << "for dependency package " << nm <<
+ info << i->db->config_orig <<
+ info << j->db->config_orig <<
+ info << "consider using the --no-move option";
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ for (dependency_packages::const_iterator j (deps.begin ());
+ j != deps.end ();
+ ++j)
+ {
+ if (j->name == nm && (j->db == nullptr || *j->db == db))
+ {
+ if (i == deps.end () || i->db == nullptr)
+ i = j;
- // An object with an absent action is there to "pre-enter" information
- // about a package (constraints and flags) in case it is used.
- //
- optional<action_type> action;
+ if (i->db != nullptr)
+ break;
+ }
+ }
+ }
- shared_ptr<selected_package> selected; // NULL if not selected.
- shared_ptr<available_package> available; // Can be NULL, fake/transient.
+ bool user_exp (i != deps.end ());
+ bool copy_dep (user_exp && i->db != nullptr && *i->db != db);
- // Can be NULL (orphan) or root.
+ // Collect the dependents for checking the version constraints, using
+ // their repository fragments for discovering available dependency package
+ // versions, etc.
+ //
+ // Note that if dependency needs to be copied, then we only consider its
+ // dependents in the current configurations which potentially can be
+ // repointed to it. Note that configurations of such dependents must
+ // contain the new dependency configuration in their dependency tree.
//
- shared_ptr<bpkg::repository_fragment> repository_fragment;
+ linked_databases dep_dbs;
- const package_name&
- name () const
+ if (copy_dep)
{
- return selected != nullptr ? selected->name : available->id.name;
+ for (database& db: i->db->dependent_configs ())
+ {
+ if (find (cur_dbs.begin (), cur_dbs.end (), db) != cur_dbs.end ())
+ dep_dbs.push_back (db);
+ }
+
+ // Bail out if no dependents can be repointed to the dependency.
+ //
+ if (dep_dbs.empty ())
+ {
+ l5 ([&]{trace << *sp << db << ": can't repoint";});
+ return no_change ();
+ }
}
+ else
+ dep_dbs = db.dependent_configs ();
- // Hold flags. Note that we only "increase" the hold_package value that is
- // already in the selected package.
+ // Collect the dependents but bail out if the dependency is used but there
+ // are no user expectations regarding it.
//
- optional<bool> hold_package;
- optional<bool> hold_version;
+ vector<pair<database&, package_dependent>> pds;
- // Constraint value plus, normally, the dependent package name that placed
- // this constraint but can also be some other name for the initial
- // selection (e.g., package version specified by the user on the command
- // line). This why we use the string type, rather than package_name.
- //
- struct constraint_type
+ for (database& ddb: dep_dbs)
{
- string dependent;
- version_constraint value;
+ auto ds (query_dependents (ddb, nm, db));
- constraint_type () = default;
- constraint_type (string d, version_constraint v)
- : dependent (move (d)), value (move (v)) {}
- };
+ if (!ds.empty ())
+ {
+ if (!user_exp)
+ return nullopt;
- vector<constraint_type> constraints;
+ for (auto& d: ds)
+ pds.emplace_back (ddb, move (d));
+ }
+ }
- // System package indicator. See also a note in the merge() function.
+ // Bail out if the dependency is unused.
//
- bool system;
+ if (pds.empty ())
+ {
+ l5 ([&]{trace << *sp << db << ": unused";});
- // If the flag is set and the external package is being replaced with an
- // external one, then keep its output directory between upgrades and
- // downgrades.
- //
- bool keep_out;
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ true /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ }
- // If present, then check out the package into the specified directory
- // rather than into the configuration directory, if it comes from a
- // version control-based repository. Optionally, remove this directory
- // when the package is purged.
+ // The requested dependency database, version constraint, and system flag.
//
- optional<dir_path> checkout_root;
- bool checkout_purge;
+ assert (i != deps.end ());
+
+ database& ddb (i->db != nullptr ? *i->db : db);
+ const optional<version_constraint>& dvc (i->constraint); // May be nullopt.
+ bool dsys (i->system);
+ bool existing (i->existing);
+ bool deorphan (i->deorphan);
- // Command line configuration variables. Only meaningful for non-system
- // packages.
+ // The selected package in the desired database which we copy over.
+ //
+ // It is the current dependency package, if we don't copy, and may or may
+ // not exist otherwise.
//
- strings config_vars;
+ shared_ptr<selected_package> dsp (db == ddb
+ ? sp
+ : ddb.find<selected_package> (nm));
+
+ // If a package in the desired database is already selected and matches
+ // the user expectations then no package change is required, unless the
+ // package is also being built as an existing archive or directory or
+ // needs to be deorphaned.
+ //
+ if (dsp != nullptr && dvc)
+ {
+ const version& sv (dsp->version);
+ bool ssys (dsp->system ());
- // Set of package names that caused this package to be built or adjusted.
- // Empty name signifies user selection.
+ if (!existing &&
+ !deorphan &&
+ ssys == dsys &&
+ (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
+ {
+ l5 ([&]{trace << *dsp << ddb << ": unchanged";});
+ return no_change ();
+ }
+ }
+
+ // Build a set of repository fragments the dependent packages come from.
+ // Also cache the dependents and the constraints they apply to this
+ // dependency.
//
- set<package_name> required_by;
+ config_repo_fragments repo_frags;
+ dependent_constraints dpt_constrs;
- bool
- user_selection () const
+ for (auto& pd: pds)
{
- return required_by.find (package_name ()) != required_by.end ();
+ database& ddb (pd.first);
+ package_dependent& dep (pd.second);
+
+ shared_ptr<selected_package> p (ddb.load<selected_package> (dep.name));
+ add_dependent_repo_fragments (ddb, p, repo_frags);
+
+ dpt_constrs.emplace_back (ddb, move (p), move (dep.constraint));
}
- // Adjustment flags.
- //
- uint16_t adjustments;
+ return evaluate_dependency (o,
+ db,
+ sp,
+ dvc,
+ dsys,
+ existing,
+ ddb,
+ dsp,
+ i->upgrade,
+ deorphan,
+ true /* explicitly */,
+ repo_frags,
+ dpt_constrs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable);
+ }
- // Set if we also need to clear the hold package flag.
- //
- static const uint16_t adjust_unhold = 0x0001;
+ struct config_selected_package
+ {
+ database& db;
+ const shared_ptr<selected_package>& package;
+
+ config_selected_package (database& d,
+ const shared_ptr<selected_package>& p)
+ : db (d), package (p) {}
bool
- unhold () const
+ operator== (const config_selected_package& v) const
{
- return (adjustments & adjust_unhold) != 0;
+ return package->name == v.package->name && db == v.db;
}
- // Set if we also need to reconfigure this package. Note that in some
- // cases reconfigure is naturally implied. For example, if an already
- // configured package is being up/down-graded. For such cases we don't
- // guarantee that the reconfigure flag is set. We only make sure to set it
- // for cases that would otherwise miss the need for reconfiguration. As a
- // result, use the reconfigure() predicate which detects both explicit and
- // implied cases.
- //
- // At first, it may seem that this flag is redundant and having the
- // available package set to NULL is sufficient. But consider the case
- // where the user asked us to build a package that is already in the
- // configured state (so all we have to do is pkg-update). Next, add to
- // this a prerequisite package that is being upgraded. Now our original
- // package has to be reconfigured. But without this flag we won't know
- // (available for our package won't be NULL).
- //
- static const uint16_t adjust_reconfigure = 0x0002;
-
bool
- reconfigure () const
+ operator< (const config_selected_package& v) const
{
- assert (action && *action != drop);
-
- return selected != nullptr &&
- selected->state == package_state::configured &&
- ((adjustments & adjust_reconfigure) != 0 ||
- (*action == build &&
- (selected->system () != system ||
- selected->version != available_version () ||
- (!system && !config_vars.empty ()))));
+ int r (package->name.compare (v.package->name));
+ return r != 0 ? (r < 0) : (db < v.db);
}
+ };
- const version&
- available_version () const
- {
- // This should have been diagnosed before creating build_package object.
- //
- assert (available != nullptr &&
- (system
- ? available->system_version () != nullptr
- : !available->stub ()));
+ static evaluate_result
+ evaluate_dependency (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const optional<version_constraint>& dvc,
+ bool dsys,
+ bool existing,
+ database& ddb,
+ const shared_ptr<selected_package>& dsp,
+ optional<bool> upgrade,
+ bool deorphan,
+ bool explicitly,
+ const config_repo_fragments& rfs,
+ const dependent_constraints& dpt_constrs,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable)
+ {
+ tracer trace ("evaluate_dependency");
- return system ? *available->system_version () : available->version;
- }
+ const package_name& nm (sp->name);
- string
- available_name_version () const
+ auto no_change = [&db] ()
{
- assert (available != nullptr);
- return package_string (available->id.name, available_version (), system);
- }
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ false /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ };
- // Merge constraints, required-by package names, hold_* flags,
- // adjustments, and user-specified options/variables.
+ // Build the list of available packages for the potential up/down-grade
+ // to, in the version-descending order. If patching, then we constrain the
+ // choice with the latest patch version and place no constraints if
+ // upgrading. For a system package we will try to find the available
+ // package that matches the user-specified system version (preferable for
+ // the configuration negotiation machinery) and, if fail, fallback to
+ // picking the latest one just to make sure the package is recognized.
//
- void
- merge (build_package&& p)
+ // But first check if this package is specified as an existing archive or
+ // directory. If that's the case, then only consider its (transient)
+ // available package instead of the above.
+ //
+ bool patch (false);
+ available_packages afs;
+
+ if (existing)
{
- // We don't merge into pre-entered objects, and from/into drops.
+ // By definition such a dependency has a version specified and may not
+ // be system.
//
- assert (action && *action != drop && (!p.action || *p.action != drop));
+ assert (dvc && !dsys);
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_existing (ddb, nm, *dvc));
- // Copy the user-specified options/variables.
+ // Must have been added to the existing packages registry.
//
- if (p.user_selection ())
+ assert (rp.first != nullptr);
+
+ afs.push_back (move (rp));
+ }
+ else
+ {
+ optional<version_constraint> c;
+
+ if (!dvc)
{
- // We don't allow a package specified on the command line multiple
- // times to have different sets of options/variables. Given that, it's
- // tempting to assert that the options/variables don't change if we
- // merge into a user selection. That's, however, not the case due to
- // the iterative plan refinement implementation details (--checkout-*
- // options and variables are only saved into the pre-entered
- // dependencies, etc.).
- //
- if (p.keep_out)
- keep_out = p.keep_out;
+ assert (!dsys); // The version can't be empty for the system package.
- if (p.checkout_root)
- checkout_root = move (p.checkout_root);
+ patch = (upgrade && !*upgrade);
- if (p.checkout_purge)
- checkout_purge = p.checkout_purge;
+ if (patch)
+ {
+ c = patch_constraint (sp, ignore_unsatisfiable);
+
+ if (!c)
+ {
+ l5 ([&]{trace << *sp << db << ": non-patchable";});
+ return no_change ();
+ }
+ }
+ }
+ else if (!dsys || !wildcard (*dvc))
+ c = dvc;
- if (!p.config_vars.empty ())
- config_vars = move (p.config_vars);
+ afs = find_available (nm, c, rfs);
- // Propagate the user-selection tag.
- //
- required_by.insert (package_name ());
+ if (afs.empty () && dsys && c)
+ afs = find_available (nm, nullopt, rfs);
+ }
+
+ // In the deorphan mode check that the dependency is an orphan or was
+ // deorphaned on some previous refinement iteration. If that's not the
+ // case, then just disable the deorphan mode for this dependency and, if
+ // the version is not constrained and upgrade/patch is not requested, bail
+ // out indicating that no change is required.
+ //
+ // Note that in the move mode (dsp != sp) we deorphan the dependency in
+ // its destination configuration, if present. In the worst case scenario
+ // both the source and destination selected packages may need to be
+ // deorphaned since the source selected package may also stay if some
+ // dependents were not repointed to the new dependency (remember that the
+ // move mode is actually a copy mode). We, however, have no easy way to
+ // issue recommendations for both the old and the new dependencies at the
+ // moment. Given that in the common case the old dependency get dropped,
+ // let's keep it simple and do nothing about the old dependency and see
+ // how it goes.
+ //
+ const version* deorphaned (nullptr);
+
+ if (deorphan)
+ {
+ bool orphan (dsp != nullptr && !dsp->system () && !dsys);
+
+ if (orphan)
+ {
+ auto i (deorphaned_deps.find (package_key (ddb, nm)));
+
+ if (i == deorphaned_deps.end ())
+ orphan = orphan_package (ddb, dsp);
+ else
+ deorphaned = &i->second;
}
- // Required-by package names have different semantics for different
- // actions: dependent for builds and prerequisite for adjustment. Mixing
- // them would break prompts/diagnostics, so we copy them only if actions
- // match.
- //
- if (p.action && *p.action == *action)
- required_by.insert (p.required_by.begin (), p.required_by.end ());
+ if (!orphan)
+ {
+ if (!dvc && !upgrade)
+ {
+ l5 ([&]{trace << *sp << db << ": non-orphan";});
+ return no_change ();
+ }
- // Copy constraints.
- //
- // Note that we may duplicate them, but this is harmless.
- //
- constraints.insert (constraints.end (),
- make_move_iterator (p.constraints.begin ()),
- make_move_iterator (p.constraints.end ()));
+ deorphan = false;
+ }
+ }
- // Copy hold_* flags if they are "stronger".
- //
- if (!hold_package || (p.hold_package && *p.hold_package > *hold_package))
- hold_package = p.hold_package;
+ // Go through up/down-grade candidates and pick the first one that
+ // satisfies all the dependents. In the deorphan mode if the package
+ // version is not constrained and upgrade/patch is not requested, then
+ // pick the version that matches the dependency version best (see the
+ // function description for details). Collect (and sort) unsatisfied
+ // dependents per the unsatisfiable version in case we need to print them.
+ //
+ // NOTE: don't forget to update the find_orphan_match() lambda and the
+ // try_replace_dependency() function if changing anything deorphan-related
+ // here.
+ //
+ using sp_set = set<config_selected_package>;
- if (!hold_version || (p.hold_version && *p.hold_version > *hold_version))
- hold_version = p.hold_version;
+ vector<pair<version, sp_set>> unsatisfiable;
- // Copy adjustments flags.
- //
- adjustments |= p.adjustments;
+ bool stub (false);
+
+ assert (!dsys ||
+ (ddb.system_repository &&
+ ddb.system_repository->find (nm) != nullptr));
- // Note that we don't copy the build_package::system flag. If it was
- // set from the command line ("strong system") then we will also have
- // the '==' constraint which means that this build_package object will
- // never be replaced.
+ // Version to deorphan (original orphan version).
+ //
+ const version* dov (deorphaned != nullptr ? deorphaned :
+ deorphan ? &dsp->version :
+ nullptr);
+
+ optional<version_constraint> dopc; // Patch constraint for the above.
+ optional<version_constraint> domc; // Minor constraint for the above.
+
+ bool orphan_best_match (deorphan && !dvc && !upgrade);
+
+ if (orphan_best_match)
+ {
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
//
- // For other cases ("weak system") we don't want to copy system over in
- // order not prevent, for example, system to non-system upgrade.
+ version v (dov->epoch,
+ dov->upstream,
+ dov->release,
+ dov->revision,
+ 0 /* iteration */);
+
+ dopc = patch_constraint (nm, v, true /* quiet */);
+ domc = minor_constraint (nm, v, true /* quiet */);
}
- };
- using build_package_list = list<reference_wrapper<build_package>>;
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
- struct build_packages: build_package_list
- {
- // Packages collection of whose prerequisites has been postponed due the
- // inability to find a version satisfying the pre-entered constraint from
- // repositories available to this package. The idea is that this
- // constraint could still be satisfied from a repository fragment of some
- // other package (that we haven't processed yet) that also depends on this
- // prerequisite.
- //
- using postponed_packages = set<const build_package*>;
+ available deorphan_latest_iteration;
+ available deorphan_later_revision;
+ available deorphan_later_patch;
+ available deorphan_later_minor;
+ available deorphan_latest_available;
- // Pre-enter a build_package without an action. No entry for this package
- // may already exists.
+ // If the dependency is deorphaned to the same version as on the previous
+ // call, then return the "no change" result. Otherwise, return the
+ // deorphan result.
//
- void
- enter (package_name name, build_package pkg)
+ auto deorphan_result = [&sp, &db,
+ &ddb, &dsp,
+ dsys,
+ deorphaned, dov,
+ existing,
+ upgrade,
+ &no_change,
+ &trace] (available&& a, const char* what)
{
- assert (!pkg.action);
+ if (deorphaned != nullptr && dsp->version == a.first->version)
+ {
+ l5 ([&]{trace << *sp << db << ": already deorphaned";});
+ return no_change ();
+ }
- auto p (map_.emplace (move (name), data_type {end (), move (pkg)}));
- assert (p.second);
- }
+ l5 ([&]{trace << *sp << db << ": deorphan to " << what << ' '
+ << package_string (sp->name, a.first->version)
+ << ddb;});
- // Collect the package being built. Return its pointer if this package
- // version was, in fact, added to the map and NULL if it was already there
- // or the existing version was preferred. So can be used as bool.
- //
- build_package*
- collect_build (const common_options& options,
- const dir_path& cd,
- database& db,
- build_package pkg,
- postponed_packages* recursively = nullptr)
+ return evaluate_result {
+ ddb, move (a.first), move (a.second),
+ false /* unused */,
+ dsys,
+ existing,
+ upgrade,
+ *dov};
+ };
+
+ auto build_result = [&ddb, dsys, existing, upgrade] (available&& a)
{
- using std::swap; // ...and not list::swap().
+ return evaluate_result {
+ ddb, move (a.first), move (a.second),
+ false /* unused */,
+ dsys,
+ existing,
+ upgrade,
+ nullopt /* orphan */};
+ };
- tracer trace ("collect_build");
+ // Note that if the selected dependency is the best that we can get, we
+ // normally issue the "no change" recommendation. However, if the
+ // configuration variables are specified for this dependency on the
+ // command line, then we issue the "reconfigure" recommendation instead.
+ //
+ // Return true, if the already selected dependency has been specified on
+ // the command line with the configuration variables, but has not yet been
+ // built on this pkg-build run.
+ //
+ auto reconfigure = [&ddb, &dsp, &nm, dsys, &pkgs] ()
+ {
+ assert (dsp != nullptr);
- // Only builds are allowed here.
- //
- assert (pkg.action && *pkg.action == build_package::build &&
- pkg.available != nullptr);
+ if (!dsys)
+ {
+ const build_package* p (pkgs.entered_build (ddb, nm));
+ return p != nullptr && !p->action && !p->config_vars.empty ();
+ }
+ else
+ return false;
+ };
- auto i (map_.find (pkg.available->id.name));
+ for (available& af: afs)
+ {
+ shared_ptr<available_package>& ap (af.first);
+ const version& av (!dsys ? ap->version : *ap->system_version (ddb));
- // If we already have an entry for this package name, then we
- // have to pick one over the other.
+ // If we aim to upgrade to the latest version and it tends to be less
+ // then the selected one, then what we currently have is the best that
+ // we can get, and so we return the "no change" result, unless we are
+ // deorphaning.
//
- // If the existing entry is a pre-entered or is non-build one, then we
- // merge it into the new build entry. Otherwise (both are builds), we
- // pick one and merge the other into it.
+ // Note that we also handle a package stub here.
//
- if (i != map_.end ())
+ if (!dvc && dsp != nullptr && av < dsp->version)
{
- build_package& bp (i->second.package);
+ assert (!dsys); // Version can't be empty for the system package.
- // Can't think of the scenario when this happens. We would start
- // collecting from scratch (see below).
+ // For the selected system package we still need to pick a source
+ // package version to downgrade to.
//
- assert (!bp.action || *bp.action != build_package::drop);
+ if (!dsp->system () && !deorphan)
+ {
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure (best)";});
+ return build_result (find_available_fragment (o, ddb, dsp));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": best";});
+ return no_change ();
+ }
+ }
- if (!bp.action || *bp.action != build_package::build) // Non-build.
+ // We can not upgrade the package to a stub version, so just skip it.
+ //
+ if (ap->stub ())
{
- pkg.merge (move (bp));
- bp = move (pkg);
+ stub = true;
+ continue;
}
- else // Build.
+ }
+
+ // Check if the version satisfies all the dependents and collect
+ // unsatisfied ones.
+ //
+ bool satisfactory (true);
+ sp_set unsatisfied_dependents;
+
+ for (const auto& dp: dpt_constrs)
+ {
+ if (!satisfies (av, dp.constraint))
{
- // At the end we want p1 to point to the object that we keep
- // and p2 to the object that we merge from.
+ satisfactory = false;
+
+ // Continue to collect dependents of the unsatisfiable version if
+ // we need to print them before failing.
//
- build_package* p1 (&bp);
- build_package* p2 (&pkg);
+ if (ignore_unsatisfiable)
+ break;
- if (p1->available_version () != p2->available_version ())
- {
- using constraint_type = build_package::constraint_type;
+ unsatisfied_dependents.emplace (dp.db, dp.package);
+ }
+ }
- // If the versions differ, we have to pick one. Start with the
- // newest version since if both satisfy, then that's the one we
- // should prefer. So get the first to try into p1 and the second
- // to try -- into p2.
- //
- if (p2->available_version () > p1->available_version ())
- swap (p1, p2);
+ if (!satisfactory)
+ {
+ if (!ignore_unsatisfiable)
+ unsatisfiable.emplace_back (av, move (unsatisfied_dependents));
- // See if pv's version satisfies pc's constraints. Return the
- // pointer to the unsatisfied constraint or NULL if all are
- // satisfied.
- //
- auto test = [] (build_package* pv,
- build_package* pc) -> const constraint_type*
- {
- for (const constraint_type& c: pc->constraints)
- {
- if (!satisfies (pv->available_version (), c.value))
- return &c;
- }
+ // If the dependency is expected to be configured as system, then bail
+ // out, as an available package version will always resolve to the
+ // system one (see above).
+ //
+ if (dsys)
+ break;
- return nullptr;
- };
+ continue;
+ }
- // First see if p1 satisfies p2's constraints.
- //
- if (auto c2 = test (p1, p2))
- {
- // If not, try the other way around.
- //
- if (auto c1 = test (p2, p1))
- {
- const package_name& n (i->first);
- const string& d1 (c1->dependent);
- const string& d2 (c2->dependent);
-
- fail << "unable to satisfy constraints on package " << n <<
- info << d1 << " depends on (" << n << " " << c1->value
- << ")" <<
- info << d2 << " depends on (" << n << " " << c2->value
- << ")" <<
- info << "available " << p1->available_name_version () <<
- info << "available " << p2->available_name_version () <<
- info << "explicitly specify " << n << " version to manually "
- << "satisfy both constraints";
- }
- else
- swap (p1, p2);
- }
+ if (orphan_best_match)
+ {
+ // If the exact orphan version is encountered, then we are done.
+ //
+ if (av == *dov)
+ return deorphan_result (move (af), "exactly same version");
- l4 ([&]{trace << "pick " << p1->available_name_version ()
- << " over " << p2->available_name_version ();});
- }
- // If versions are the same, then we still need to pick the entry as
- // one of them can build a package from source while another
- // configure a system package. We prefer a user-selected entry (if
- // there is one). If none of them is user-selected we prefer a
- // source package over a system one.
- //
- else if (p2->user_selection () ||
- (!p1->user_selection () && !p2->system))
- swap (p1, p2);
-
- // See if we are replacing the object. If not, then we don't
- // need to collect its prerequisites since that should have
- // already been done. Remember, p1 points to the object we
- // want to keep.
- //
- bool replace (p1 != &i->second.package);
+ // If the available package is of the same revision as orphan but a
+ // different iteration, then save it as the latest iteration of same
+ // orphan version and revision.
+ //
+ if (deorphan_latest_iteration.first == nullptr &&
+ av.compare (*dov, false /* revision */, true /* iteration */) == 0)
+ deorphan_latest_iteration = af;
- if (replace)
- {
- swap (*p1, *p2);
- swap (p1, p2); // Setup for merge below.
- }
+ // If the available package is of the same version as orphan and its
+ // revision is greater, then save it as the later revision of same
+ // version.
+ //
+ if (deorphan_later_revision.first == nullptr &&
+ av.compare (*dov, true /* revision */) == 0 &&
+ av.compare (*dov, false /* revision */, true /* iteration */) > 0)
+ deorphan_later_revision = af;
+
+ // If the available package is of the same minor version as orphan but
+ // of the greater patch version, then save it as the later patch of
+ // same version.
+ //
+ if (deorphan_later_patch.first == nullptr &&
+ dopc && satisfies (av, *dopc) &&
+ av.compare (*dov, true /* revision */) > 0) // Patch is greater?
+ deorphan_later_patch = af;
+
+ // If the available package is of the same major version as orphan but
+ // of the greater minor version, then save it as the later minor of
+ // same version.
+ //
+ // Note that we can't easily compare minor versions here since these
+ // are bpkg version objects. Thus, we consider that this is a greater
+ // minor version if the version is greater (ignoring revisions) and
+ // the latest patch is not yet saved.
+ //
+ if (deorphan_later_minor.first == nullptr &&
+ domc && satisfies (av, *domc) &&
+ av.compare (*dov, true /* revision */) > 0 &&
+ deorphan_later_patch.first == nullptr)
+ deorphan_later_minor = af;
- p1->merge (move (*p2));
+ // Save the latest available package version.
+ //
+ if (deorphan_latest_available.first == nullptr)
+ deorphan_latest_available = move (af);
- if (!replace)
- return nullptr;
+ // If the available package version is less then the orphan revision
+ // then we can bail out from the loop, since all the versions from the
+ // preference list have already been encountered, if present.
+ //
+ if (av.compare (*dov, false /* revision */, true /* iteration */) < 0)
+ {
+ assert (deorphan_latest_iteration.first != nullptr ||
+ deorphan_later_revision.first != nullptr ||
+ deorphan_later_patch.first != nullptr ||
+ deorphan_later_minor.first != nullptr ||
+ deorphan_latest_available.first != nullptr);
+ break;
}
}
else
{
- // This is the first time we are adding this package name to the map.
+ // In the up/downgrade+deorphan mode always replace the dependency,
+ // re-fetching it from an existing repository if the version stays the
+ // same.
//
- l4 ([&]{trace << "add " << pkg.available_name_version ();});
+ if (deorphan)
+ return deorphan_result (move (af), "constrained version");
- // Note: copy; see emplace() below.
+ // For the regular up/downgrade if the best satisfactory version and
+ // the desired system flag perfectly match the ones of the selected
+ // package, then no package change is required. Otherwise, recommend
+ // an upgrade/downgrade/replacement.
+ //
+ // Note: we need to be careful here not to start yo-yo'ing for a
+ // dependency being built as an existing archive or directory. For
+ // such a dependency we need to return the "no change" recommendation
+ // if any version recommendation (which may not change) has already
+ // been returned.
//
- package_name n (pkg.available->id.name);
- i = map_.emplace (move (n), data_type {end (), move (pkg)}).first;
+ if (dsp != nullptr &&
+ av == dsp->version &&
+ dsp->system () == dsys &&
+ (!existing ||
+ find (existing_deps.begin (), existing_deps.end (),
+ package_key (ddb, nm)) != existing_deps.end ()))
+ {
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure";});
+ return build_result (move (af));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": unchanged";});
+ return no_change ();
+ }
+ }
+ else
+ {
+ l5 ([&]{trace << *sp << db << ": update to "
+ << package_string (nm, av, dsys) << ddb;});
+
+ return build_result (move (af));
+ }
}
+ }
+
+ if (orphan_best_match)
+ {
+ if (deorphan_latest_iteration.first != nullptr)
+ return deorphan_result (move (deorphan_latest_iteration),
+ "latest iteration");
- build_package& p (i->second.package);
+ if (deorphan_later_revision.first != nullptr)
+ return deorphan_result (move (deorphan_later_revision),
+ "later revision");
- // Recursively collect build prerequisites, if requested.
- //
- // Note that detecting dependency cycles during the satisfaction phase
- // would be premature since they may not be present in the final package
- // list. Instead we check for them during the ordering phase.
- //
- // The question, of course, is whether we can still end up with an
- // infinite recursion here? Note that for an existing map entry we only
- // recurse after the entry replacement. The infinite recursion would
- // mean that we may replace a package in the map with the same version
- // multiple times:
- //
- // ... p1 -> p2 -> ... p1
- //
- // Every replacement increases the entry version and/or tightens the
- // constraints the next replacement will need to satisfy. It feels
- // impossible that a package version can "return" into the map being
- // replaced once. So let's wait until some real use case proves this
- // reasoning wrong.
- //
- if (recursively != nullptr)
- collect_build_prerequisites (options, cd, db, p, recursively);
+ if (deorphan_later_patch.first != nullptr)
+ return deorphan_result (move (deorphan_later_patch), "later patch");
- return &p;
+ if (deorphan_later_minor.first != nullptr)
+ return deorphan_result (move (deorphan_later_minor), "later minor");
+
+ if (deorphan_latest_available.first != nullptr)
+ return deorphan_result (move (deorphan_latest_available),
+ "latest available");
}
- // Collect prerequisites of the package being built recursively. But first
- // "prune" this process if the package we build is a system one or is
- // already configured since that would mean all its prerequisites are
- // configured as well. Note that this is not merely an optimization: the
- // package could be an orphan in which case the below logic will fail (no
- // repository fragment in which to search for prerequisites). By skipping
- // the prerequisite check we are able to gracefully handle configured
- // orphans.
+ // If we aim to upgrade to the latest version, then what we currently have
+ // is the only thing that we can get, and so returning the "no change"
+ // result, unless we need to upgrade a package configured as system or to
+ // deorphan.
//
- void
- collect_build_prerequisites (const common_options& options,
- const dir_path& cd,
- database& db,
- const build_package& pkg,
- postponed_packages* postponed)
+ if (!dvc && dsp != nullptr && !dsp->system () && !deorphan)
{
- tracer trace ("collect_build_prerequisites");
-
- assert (pkg.action && *pkg.action == build_package::build);
+ assert (!dsys); // Version cannot be empty for the system package.
- const shared_ptr<selected_package>& sp (pkg.selected);
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure (only)";});
+ return build_result (find_available_fragment (o, ddb, dsp));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": only";});
+ return no_change ();
+ }
+ }
- if (pkg.system ||
- (sp != nullptr &&
- sp->state == package_state::configured &&
- sp->substate != package_substate::system &&
- sp->version == pkg.available_version ()))
- return;
+ // If the version satisfying the desired dependency version constraint is
+ // unavailable or unsatisfiable for some dependents then we fail, unless
+ // requested not to do so. In the latter case we return the "no change"
+ // result.
+ //
+ if (ignore_unsatisfiable)
+ {
+ l5 ([&]{trace << package_string (nm, dvc, dsys) << ddb
+ << (unsatisfiable.empty ()
+ ? ": no source"
+ : ": unsatisfiable");});
- // Show how we got here if things go wrong.
- //
- auto g (
- make_exception_guard (
- [&pkg] ()
- {
- info << "while satisfying " << pkg.available_name_version ();
- }));
+ return no_change ();
+ }
- const shared_ptr<available_package>& ap (pkg.available);
- const shared_ptr<repository_fragment>& af (pkg.repository_fragment);
- const package_name& name (ap->id.name);
+ // If there are no unsatisfiable versions then the package is not present
+ // (or is not available in source) in its dependents' repositories.
+ //
+ if (unsatisfiable.empty ())
+ {
+ diag_record dr (fail);
- for (const dependency_alternatives_ex& da: ap->dependencies)
+ if (patch)
{
- if (da.conditional) // @@ TODO
- fail << "conditional dependencies are not yet supported";
+ // Otherwise, we should have bailed out earlier returning "no change"
+ // (see above).
+ //
+ assert (dsp != nullptr && (dsp->system () || deorphan));
- if (da.size () != 1) // @@ TODO
- fail << "multiple dependency alternatives not yet supported";
+ // Patch (as any upgrade) of a system package is always explicit, so
+ // we always fail and never treat the package as being up to date.
+ //
+ assert (explicitly);
- const dependency& dp (da.front ());
- const package_name& dn (dp.name);
+ fail << "patch version for " << *sp << db << " is not available "
+ << "from its dependents' repositories";
+ }
+ else if (!stub)
+ fail << package_string (nm, dsys ? nullopt : dvc) << ddb
+ << " is not available from its dependents' repositories";
+ else // The only available package is a stub.
+ {
+ // Otherwise, we should have bailed out earlier, returning "no change"
+ // rather then setting the stub flag to true (see above).
+ //
+ assert (!dvc && !dsys && dsp != nullptr && (dsp->system () || deorphan));
- if (da.buildtime)
- {
- // Handle special names.
- //
- if (dn == "build2")
- {
- if (dp.constraint)
- satisfy_build2 (options, name, dp);
+ fail << package_string (nm, dvc) << ddb << " is not available in "
+ << "source from its dependents' repositories";
+ }
+ }
- continue;
- }
- else if (dn == "bpkg")
- {
- if (dp.constraint)
- satisfy_bpkg (options, name, dp);
+ // Issue the diagnostics and fail.
+ //
+ diag_record dr (fail);
+ dr << "package " << nm << ddb << " doesn't satisfy its dependents";
- continue;
- }
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-configure.
- }
+ // Print the list of unsatisfiable versions together with dependents they
+ // don't satisfy: up to three latest versions with no more than five
+ // dependents each.
+ //
+ size_t nv (0);
+ for (const auto& u: unsatisfiable)
+ {
+ dr << info << package_string (nm, u.first) << " doesn't satisfy";
- bool system (false);
- bool dep_optional (false);
+ const sp_set& ps (u.second);
- // If the user specified the desired dependency version constraint,
- // then we will use it to overwrite the constraint imposed by the
- // dependent package, checking that it is still satisfied.
- //
- // Note that we can't just rely on the execution plan refinement that
- // will pick up the proper dependency version at the end of the day.
- // We may just not get to the plan execution simulation, failing due
- // to inability for dependency versions collected by two dependents to
- // satisfy each other constraints (for an example see the
- // pkg-build/dependency/apply-constraints/resolve-conflict{1,2}
- // tests).
-
- // Points to the desired dependency version constraint, if specified,
- // and is NULL otherwise. Can be used as boolean flag.
+ size_t i (0), n (ps.size ());
+ for (auto p (ps.begin ()); i != n; ++p)
+ {
+ // It would probably be nice to also print the unsatisfied constraint
+ // here, but let's keep it simple for now.
//
- const version_constraint* dep_constr (nullptr);
-
- auto i (map_.find (dn));
- if (i != map_.end ())
- {
- const build_package& bp (i->second.package);
-
- dep_optional = !bp.action; // Is pre-entered.
+ dr << (i == 0 ? " " : ", ") << *p->package << p->db;
- if (dep_optional &&
- //
- // The version constraint is specified,
- //
- bp.hold_version && *bp.hold_version)
- {
- assert (bp.constraints.size () == 1);
+ if (++i == 5 && n != 6) // Printing 'and 1 more' looks stupid.
+ break;
+ }
- const build_package::constraint_type& c (bp.constraints[0]);
+ if (i != n)
+ dr << " and " << n - i << " more";
- dep_constr = &c.value;
- system = bp.system;
+ if (++nv == 3 && unsatisfiable.size () != 4)
+ break;
+ }
- // If the user-specified dependency constraint is the wildcard
- // version, then it satisfies any dependency constraint.
- //
- if (!wildcard (*dep_constr) &&
- !satisfies (*dep_constr, dp.constraint))
- fail << "unable to satisfy constraints on package " << dn <<
- info << name << " depends on (" << dn << " "
- << *dp.constraint << ")" <<
- info << c.dependent << " depends on (" << dn << " "
- << c.value << ")" <<
- info << "specify " << dn << " version to satisfy " << name
- << " constraint";
- }
- }
+ if (nv != unsatisfiable.size ())
+ dr << info << "and " << unsatisfiable.size () - nv << " more";
- const dependency& d (!dep_constr
- ? dp
- : dependency {dn, *dep_constr});
+ dr << endf;
+ }
- // First see if this package is already selected. If we already have
- // it in the configuraion and it satisfies our dependency version
- // constraint, then we don't want to be forcing its upgrade (or,
- // worse, downgrade).
- //
- shared_ptr<selected_package> dsp (db.find<selected_package> (dn));
+ // List of dependent packages whose immediate/recursive dependencies must be
+ // upgraded and/or deorphaned (specified with -i/-r on the command line).
+ //
+ struct recursive_package
+ {
+ database& db;
+ package_name name;
- pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>> rp;
+ // Recursive/immediate upgrade/patch. Note the upgrade member is only
+ // meaningful if recursive is present.
+ //
+ optional<bool> recursive; // true -- recursive, false -- immediate.
+ bool upgrade; // true -- upgrade, false -- patch.
- shared_ptr<available_package>& dap (rp.first);
+ // Recursive/immediate deorphaning.
+ //
+ optional<bool> deorphan; // true -- recursive, false -- immediate.
+ };
+ using recursive_packages = vector<recursive_package>;
- bool force (false);
+ // Recursively check if immediate dependencies of this dependent must be
+ // upgraded or patched and/or deorphaned.
+ //
+ // Cache the results of this function calls to avoid multiple traversals of
+ // the same dependency graphs.
+ //
+ struct upgrade_dependencies_key
+ {
+ package_key dependent;
+ bool recursion;
- if (dsp != nullptr)
- {
- if (dsp->state == package_state::broken)
- fail << "unable to build broken package " << dn <<
- info << "use 'pkg-purge --force' to remove";
+ bool
+ operator< (const upgrade_dependencies_key& v) const
+ {
+ if (recursion != v.recursion)
+ return recursion < v.recursion;
- // If the constraint is imposed by the user we also need to make sure
- // that the system flags are the same.
- //
- if (satisfies (dsp->version, d.constraint) &&
- (!dep_constr || dsp->system () == system))
- {
- system = dsp->system ();
+ return dependent < v.dependent;
+ }
+ };
- // First try to find an available package for this exact version.
- // In particular, this handles the case where a package moves from
- // one repository to another (e.g., from testing to stable). For a
- // system package we pick the latest one (its exact version
- // doesn't really matter).
- //
- shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
-
- rp = system
- ? find_available_one (db, dn, nullopt, root)
- : find_available_one (db,
- dn,
- version_constraint (dsp->version),
- root);
-
- // A stub satisfies any version constraint so we weed them out
- // (returning stub as an available package feels wrong).
- //
- if (dap == nullptr || dap->stub ())
- rp = make_available (options, cd, db, dsp);
- }
- else
- // Remember that we may be forcing up/downgrade; we will deal with
- // it below.
- //
- force = true;
- }
+ struct upgrade_deorphan
+ {
+ optional<bool> upgrade; // true -- upgrade, false -- patch.
+ bool deorphan;
+ };
- // If we didn't get the available package corresponding to the
- // selected package, look for any that satisfies the constraint.
- //
- if (dap == nullptr)
- {
- // And if we have no repository fragment to look in, then that means
- // the package is an orphan (we delay this check until we actually
- // need the repository fragment to allow orphans without
- // prerequisites).
- //
- if (af == nullptr)
- fail << "package " << pkg.available_name_version ()
- << " is orphaned" <<
- info << "explicitly upgrade it to a new version";
-
- // We look for prerequisites only in the repositories of this
- // package (and not in all the repositories of this configuration).
- // At first this might look strange, but it also kind of makes
- // sense: we only use repositories "approved" for this package
- // version. Consider this scenario as an example: hello/1.0.0 and
- // libhello/1.0.0 in stable and libhello/2.0.0 in testing. As a
- // prerequisite of hello, which version should libhello resolve to?
- // While one can probably argue either way, resolving it to 1.0.0 is
- // the conservative choice and the user can always override it by
- // explicitly building libhello.
- //
- // Note though, that if this is a test package, then its special
- // test dependencies (main packages that refer to it) should be
- // searched upstream through the complement repositories
- // recursively, since the test packages may only belong to the main
- // package's repository and its complements.
- //
- // @@ Currently we don't implement the reverse direction search for
- // the test dependencies, effectively only supporting the common
- // case where the main and test packages belong to the same
- // repository. Will need to fix this eventually.
- //
- // Note that this logic (naturally) does not apply if the package is
- // already selected by the user (see above).
- //
- // Also note that for the user-specified dependency version
- // constraint we rely on the satisfying package version be present
- // in repositories of the first dependent met. As a result, we may
- // fail too early if such package version doesn't belong to its
- // repositories, but belongs to the ones of some dependent that
- // we haven't met yet. Can we just search all repositories for an
- // available package of the appropriate version and just take it,
- // if present? We could, but then which repository should we pick?
- // The wrong choice can introduce some unwanted repositories and
- // package versions into play. So instead, we will postpone
- // collecting the problematic dependent, expecting that some other
- // one will find the appropriate version in its repositories.
- //
- // For a system package we pick the latest version just to make sure
- // the package is recognized. An unrecognized package means the
- // broken/stale repository (see below).
- //
- rp = find_available_one (db,
- dn,
- !system ? d.constraint : nullopt,
- af);
+ using upgrade_dependencies_cache = map<upgrade_dependencies_key,
+ upgrade_deorphan>;
- if (dap == nullptr)
- {
- if (dep_constr && !system && postponed)
- {
- postponed->insert (&pkg);
- return;
- }
+ static upgrade_deorphan
+ upgrade_dependencies (database& db,
+ const package_name& nm,
+ const recursive_packages& rs,
+ upgrade_dependencies_cache& cache,
+ bool recursion = false)
+ {
+ // If the result of the upgrade_dependencies() call for these dependent
+ // and recursion flag value is cached, then return that. Otherwise, cache
+ // the calculated result prior to returning it to the caller.
+ //
+ upgrade_dependencies_key k {package_key (db, nm), recursion};
+ {
+ auto i (cache.find (k));
- diag_record dr (fail);
- dr << "unknown dependency " << dn;
+ if (i != cache.end ())
+ return i->second;
+ }
- // We need to be careful not to print the wildcard-based
- // constraint.
- //
- if (d.constraint && (!dep_constr || !wildcard (*dep_constr)))
- dr << ' ' << *d.constraint;
+ auto i (find_if (rs.begin (), rs.end (),
+ [&nm, &db] (const recursive_package& i) -> bool
+ {
+ return i.name == nm && i.db == db;
+ }));
- dr << " of package " << name;
+ upgrade_deorphan r {nullopt /* upgrade */, false /* deorphan */};
- if (!af->location.empty () && (!dep_constr || system))
- dr << info << "repository " << af->location << " appears to "
- << "be broken" <<
- info << "or the repository state could be stale" <<
- info << "run 'bpkg rep-fetch' to update";
- }
+ if (i != rs.end ())
+ {
+ if (i->recursive && *i->recursive >= recursion)
+ r.upgrade = i->upgrade;
- // If all that's available is a stub then we need to make sure the
- // package is present in the system repository and it's version
- // satisfies the constraint. If a source package is available but
- // there is a system package specified on the command line and it's
- // version satisfies the constraint then the system package should
- // be preferred. To recognize such a case we just need to check if
- // the authoritative system version is set and it satisfies the
- // constraint. If the corresponding system package is non-optional
- // it will be preferred anyway.
- //
- if (dap->stub ())
- {
- // Note that the constraint can safely be printed as it can't
- // be a wildcard (produced from the user-specified dependency
- // version constraint). If it were, then the system version
- // wouldn't be NULL and would satisfy itself.
- //
- if (dap->system_version () == nullptr)
- fail << "dependency " << d << " of package " << name << " is "
- << "not available in source" <<
- info << "specify ?sys:" << dn << " if it is available from "
- << "the system";
-
- if (!satisfies (*dap->system_version (), d.constraint))
- fail << "dependency " << d << " of package " << name << " is "
- << "not available in source" <<
- info << package_string (dn,
- *dap->system_version (),
- true /* system */)
- << " does not satisfy the constrains";
-
- system = true;
- }
- else
- {
- auto p (dap->system_version_authoritative ());
+ if (i->deorphan && *i->deorphan >= recursion)
+ r.deorphan = true;
- if (p.first != nullptr &&
- p.second && // Authoritative.
- satisfies (*p.first, d.constraint))
- system = true;
- }
- }
+ // If we both upgrade and deorphan, then we can bail out since the value
+ // may not change any further (upgrade wins over patch and deorphaning
+ // can't be canceled).
+ //
+ if (r.upgrade && *r.upgrade && r.deorphan)
+ {
+ cache[move (k)] = r;
+ return r;
+ }
+ }
- build_package bp {
- build_package::build,
- dsp,
- dap,
- rp.second,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {name}, // Required by (dependent).
- 0}; // Adjustments.
-
- // Add our constraint, if we have one.
- //
- // Note that we always add the constraint implied by the dependent. The
- // user-implied constraint, if present, will be added when merging from
- // the pre-entered entry. So we will have both constraints for
- // completeness.
- //
- if (dp.constraint)
- bp.constraints.emplace_back (name.string (), *dp.constraint);
-
- // Now collect this prerequisite. If it was actually collected
- // (i.e., it wasn't already there) and we are forcing a downgrade or
- // upgrade, then refuse for a held version, warn for a held package,
- // and print the info message otherwise, unless the verbosity level is
- // less than two.
- //
- // Note though that while the prerequisite was collected it could have
- // happen because it is an optional package and so not being
- // pre-collected earlier. Meanwhile the package was specified
- // explicitly and we shouldn't consider that as a dependency-driven
- // up/down-grade enforcement.
- //
- // Here is an example of the situation we need to handle properly:
- //
- // repo: foo/2(->bar/2), bar/0+1
- // build sys:bar/1
- // build foo ?sys:bar/2
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (auto& pd: query_dependents_cache (ddb, nm, db))
+ {
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
//
- const build_package* p (
- collect_build (options, cd, db, move (bp), postponed));
+ upgrade_deorphan ud (
+ upgrade_dependencies (ddb, pd.name, rs, cache, true /* recursion */));
- if (p != nullptr && force && !dep_optional)
+ if (ud.upgrade || ud.deorphan)
{
- // Fail if the version is held. Otherwise, warn if the package is
- // held.
+ // Upgrade wins over patch.
//
- bool f (dsp->hold_version);
- bool w (!f && dsp->hold_package);
-
- if (f || w || verb >= 2)
- {
- const version& av (p->available_version ());
+ if (ud.upgrade && (!r.upgrade || *r.upgrade < *ud.upgrade))
+ r.upgrade = *ud.upgrade;
- bool u (av > dsp->version);
- bool c (d.constraint);
+ if (ud.deorphan)
+ r.deorphan = true;
- diag_record dr;
-
- (f ? dr << fail :
- w ? dr << warn :
- dr << info)
- << "package " << name << " dependency on "
- << (c ? "(" : "") << d << (c ? ")" : "") << " is forcing "
- << (u ? "up" : "down") << "grade of " << *dsp << " to ";
-
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (dsp->system ())
- dr << p->available_name_version ();
- else
- dr << av; // Can't be a system version so is never wildcard.
-
- if (dsp->hold_version)
- dr << info << "package version " << *dsp << " is held";
-
- if (f)
- dr << info << "explicitly request version "
- << (u ? "up" : "down") << "grade to continue";
+ // If we both upgrade and deorphan, then we can bail out (see above
+ // for details).
+ //
+ if (r.upgrade && *r.upgrade && r.deorphan)
+ {
+ cache[move (k)] = r;
+ return r;
}
}
}
}
- // Collect the package being dropped.
- //
- void
- collect_drop (shared_ptr<selected_package> sp)
- {
- const package_name& nm (sp->name);
-
- build_package p {
- build_package::drop,
- move (sp),
- nullptr,
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- false, // System package.
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {}, // Required by.
- 0}; // Adjustments.
-
- auto i (map_.find (nm));
-
- if (i != map_.end ())
- {
- build_package& bp (i->second.package);
+ cache[move (k)] = r;
+ return r;
+ }
- // Can't think of the scenario when this happens. We would start
- // collecting from scratch (see below).
- //
- assert (!bp.action || *bp.action != build_package::build);
+ // Evaluate a package (not necessarily dependency) and return a new desired
+ // version. If the result is absent (nullopt), then no changes to the
+ // package are necessary. Otherwise, the result is available_package to
+ // upgrade/downgrade to or replace with, as well as the repository fragment
+ // it must come from.
+ //
+ // If the system package cannot be upgraded to the source one, not being
+ // found in the dependents repositories, then return nullopt if
+ // ignore_unsatisfiable argument is true and fail otherwise (see the
+ // evaluate_dependency() function description for details).
+ //
+ static optional<evaluate_result>
+ evaluate_recursive (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const recursive_packages& recs,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable,
+ upgrade_dependencies_cache& cache)
+ {
+ tracer trace ("evaluate_recursive");
- // Overwrite the existing (possibly pre-entered or adjustment) entry.
- //
- bp = move (p);
- }
- else
- map_.emplace (nm, data_type {end (), move (p)});
- }
+ assert (sp != nullptr);
- // Collect the package being unheld.
+ // Build a set of repository fragment the dependent packages come from.
+ // Also cache the dependents and the constraints they apply to this
+ // dependency.
//
- void
- collect_unhold (const shared_ptr<selected_package>& sp)
+ config_repo_fragments repo_frags;
+ dependent_constraints dpt_constrs;
+
+ // Only collect repository fragments (for best version selection) of
+ // (immediate) dependents that have a hit (direct or indirect) in recs.
+ // Note, however, that we collect constraints from all the dependents.
+ //
+ upgrade_deorphan ud {nullopt /* upgrade */, false /* deorphan */};
+
+ for (database& ddb: db.dependent_configs ())
{
- auto i (map_.find (sp->name));
+ for (auto& pd: query_dependents_cache (ddb, sp->name, db))
+ {
+ shared_ptr<selected_package> p (ddb.load<selected_package> (pd.name));
- // Currently, it must always be pre-entered.
- //
- assert (i != map_.end ());
+ dpt_constrs.emplace_back (ddb, p, move (pd.constraint));
- build_package& bp (i->second.package);
+ upgrade_deorphan u (upgrade_dependencies (ddb, pd.name, recs, cache));
- if (!bp.action) // Pre-entered.
- {
- build_package p {
- build_package::adjust,
- sp,
- nullptr,
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- false, // System package.
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {}, // Required by.
- build_package::adjust_unhold};
-
- p.merge (move (bp));
- bp = move (p);
+ if (u.upgrade || u.deorphan)
+ {
+ // Upgrade wins over patch.
+ //
+ if (u.upgrade && (!ud.upgrade || *ud.upgrade < *u.upgrade))
+ ud.upgrade = *u.upgrade;
+
+ if (u.deorphan)
+ ud.deorphan = true;
+ }
+ else
+ continue;
+
+ // While we already know that the dependency upgrade is required, we
+ // continue to iterate over dependents, collecting the repository
+ // fragments and the constraints.
+ //
+ add_dependent_repo_fragments (ddb, p, repo_frags);
}
- else
- bp.adjustments |= build_package::adjust_unhold;
}
- void
- collect_build_prerequisites (const common_options& o,
- const dir_path& cd,
- database& db,
- const package_name& name,
- postponed_packages& postponed)
+ if (!ud.upgrade && !ud.deorphan)
{
- auto mi (map_.find (name));
- assert (mi != map_.end ());
- collect_build_prerequisites (o, cd, db, mi->second.package, &postponed);
+ l5 ([&]{trace << *sp << db << ": no hit";});
+ return nullopt;
}
- void
- collect_build_postponed (const common_options& o,
- const dir_path& cd,
- database& db,
- postponed_packages& pkgs)
- {
- // Try collecting postponed packages for as long as we are making
- // progress.
- //
- for (bool prog (true); !pkgs.empty (); )
- {
- postponed_packages npkgs;
-
- for (const build_package* p: pkgs)
- collect_build_prerequisites (o, cd, db, *p, prog ? &npkgs : nullptr);
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_existing (db, sp->name, nullopt /* version_constraint */));
- assert (prog); // collect_build_prerequisites() should have failed.
- prog = (npkgs != pkgs);
- pkgs.swap (npkgs);
- }
- }
+ optional<evaluate_result> r (
+ evaluate_dependency (o,
+ db,
+ sp,
+ nullopt /* desired */,
+ false /* desired_sys */,
+ rp.first != nullptr /* existing */,
+ db,
+ sp,
+ ud.upgrade,
+ ud.deorphan,
+ false /* explicitly */,
+ repo_frags,
+ dpt_constrs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable));
- // Order the previously-collected package with the specified name
- // returning its positions. Recursively order the package dependencies
- // being ordered failing if a dependency cycle is detected. If reorder is
- // true, then reorder this package to be considered as "early" as
- // possible.
+ // Translate the "no change" result into nullopt.
//
- iterator
- order (const package_name& name, bool reorder = true)
+ assert (!r || !r->unused);
+ return r && r->available == nullptr ? nullopt : r;
+ }
+
+ // Stack of the command line adjustments as per unsatisfied_dependents
+ // description.
+ //
+ struct cmdline_adjustment
+ {
+ enum class adjustment_type: uint8_t
{
- package_names chain;
- return order (name, chain, reorder);
- }
+ hold_existing, // Adjust constraint in existing build-to-hold spec.
+ dep_existing, // Adjust constraint in existing dependency spec.
+ hold_new, // Add new build-to-hold spec.
+ dep_new // Add new dependency spec.
+ };
- // If a configured package is being up/down-graded then that means
- // all its dependents could be affected and we have to reconfigure
- // them. This function examines every package that is already on
- // the list and collects and orders all its dependents. We also need
- // to make sure the dependents are ok with the up/downgrade.
- //
- // Should we reconfigure just the direct depends or also include
- // indirect, recursively? Consider this plauisible scenario as an
- // example: We are upgrading a package to a version that provides
- // an additional API. When its direct dependent gets reconfigured,
- // it notices this new API and exposes its own extra functionality
- // that is based on it. Now it would make sense to let its own
- // dependents (which would be our original package's indirect ones)
- // to also notice this.
+ adjustment_type type;
+ reference_wrapper<database> db;
+ package_name name;
+ bpkg::version version; // Replacement.
+
+ // Meaningful only for the *_new types.
//
- void
- collect_order_dependents (database& db)
- {
- // For each package on the list we want to insert all its dependents
- // before it so that they get configured after the package on which
- // they depend is configured (remember, our build order is reverse,
- // with the last package being built first). This applies to both
- // packages that are already on the list as well as the ones that
- // we add, recursively.
- //
- for (auto i (begin ()); i != end (); ++i)
- {
- const build_package& p (*i);
+ optional<bool> upgrade;
+ bool deorphan = false;
- // Prune if this is not a configured package being up/down-graded
- // or reconfigured.
- //
- assert (p.action);
+ // For the newly created or popped from the stack object the following
+ // three members contain the package version replacement information.
+ // Otherwise (pushed to the stack), they contain the original command line
+ // spec information.
+ //
+ shared_ptr<available_package> available; // NULL for dep_* types.
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment; // As above.
+ optional<version_constraint> constraint;
- // Dropped package may have no dependents.
- //
- if (*p.action != build_package::drop && p.reconfigure ())
- collect_order_dependents (db, i);
- }
- }
+ // Create object of the hold_existing type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ shared_ptr<available_package>&& a,
+ lazy_shared_ptr<bpkg::repository_fragment>&& f)
+ : type (adjustment_type::hold_existing),
+ db (d),
+ name (n),
+ version (a->version),
+ available (move (a)),
+ repository_fragment (move (f)),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the dep_existing type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ const bpkg::version& v)
+ : type (adjustment_type::dep_existing),
+ db (d),
+ name (n),
+ version (v),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the hold_new type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ shared_ptr<available_package>&& a,
+ lazy_shared_ptr<bpkg::repository_fragment>&& f,
+ optional<bool> u,
+ bool o)
+ : type (adjustment_type::hold_new),
+ db (d),
+ name (n),
+ version (a->version),
+ upgrade (u),
+ deorphan (o),
+ available (move (a)),
+ repository_fragment (move (f)),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the dep_new type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ const bpkg::version& v,
+ optional<bool> u,
+ bool o)
+ : type (adjustment_type::dep_new),
+ db (d),
+ name (n),
+ version (v),
+ upgrade (u),
+ deorphan (o),
+ constraint (version_constraint (version)) {}
+ };
+ class cmdline_adjustments
+ {
+ public:
+ cmdline_adjustments (vector<build_package>& hps, dependency_packages& dps)
+ : hold_pkgs_ (hps),
+ dep_pkgs_ (dps) {}
+
+ // Apply the specified adjustment to the command line, push the adjustment
+ // to the stack, and record the resulting command line state as the SHA256
+ // checksum.
+ //
void
- collect_order_dependents (database& db, iterator pos)
+ push (cmdline_adjustment&& a)
{
- tracer trace ("collect_order_dependents");
-
- assert (pos != end ());
-
- build_package& p (*pos);
- const shared_ptr<selected_package>& sp (p.selected);
+ using type = cmdline_adjustment::adjustment_type;
- const package_name& n (sp->name);
-
- // See if we are up/downgrading this package. In particular, the
- // available package could be NULL meaning we are just adjusting.
+ // We always set the `== <version>` constraint in the resulting spec.
//
- int ud (p.available != nullptr
- ? sp->version.compare (p.available_version ())
- : 0);
+ assert (a.constraint);
- using query = query<package_dependent>;
+ database& db (a.db);
+ const package_name& nm (a.name);
+ package_version_key cmd_line (db.main_database (), "command line");
- for (auto& pd: db.query<package_dependent> (query::name == n))
+ switch (a.type)
{
- package_name& dn (pd.name);
- auto i (map_.find (dn));
+ case type::hold_existing:
+ {
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ()); // As per adjustment type.
- // First make sure the up/downgraded package still satisfies this
- // dependent.
- //
- bool check (ud != 0 && pd.constraint);
+ build_package& bp (*i);
+ swap (bp.available, a.available);
+ swap (bp.repository_fragment, a.repository_fragment);
- // There is one tricky aspect: the dependent could be in the process
- // of being up/downgraded as well. In this case all we need to do is
- // detect this situation and skip the test since all the (new)
- // contraints of this package have been satisfied in collect_build().
- //
- if (check && i != map_.end () && i->second.position != end ())
- {
- build_package& dp (i->second.package);
+ if (!bp.constraints.empty ())
+ {
+ swap (bp.constraints[0].value, *a.constraint);
+ }
+ else
+ {
+ bp.constraints.emplace_back (move (*a.constraint),
+ cmd_line.db,
+ cmd_line.name.string ());
+ a.constraint = nullopt;
+ }
- check = dp.available == nullptr ||
- (dp.selected->system () == dp.system &&
- dp.selected->version == dp.available_version ());
+ break;
}
-
- if (check)
+ case type::dep_existing:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ()); // As per adjustment type.
+ swap (i->constraint, a.constraint);
+ break;
+ }
+ case type::hold_new:
{
- const version& av (p.available_version ());
- const version_constraint& c (*pd.constraint);
+ // As per adjustment type.
+ //
+ assert (find_hold_pkg (a) == hold_pkgs_.end ());
- if (!satisfies (av, c))
- {
- diag_record dr (fail);
+ // Start the database transaction to perform the
+ // database::find<selected_package> call, unless we are already in
+ // the transaction.
+ //
+ transaction t (db, !transaction::has_current ());
- dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
- << "package " << *sp << " to ";
+ build_package bp {
+ build_package::build,
+ db,
+ db.find<selected_package> (nm),
+ move (a.available),
+ move (a.repository_fragment),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ a.upgrade,
+ a.deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ (a.deorphan
+ ? build_package::build_replace
+ : uint16_t (0))};
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (p.system != sp->system ())
- dr << p.available_name_version ();
- else
- dr << av; // Can't be the wildcard otherwise would satisfy.
+ t.commit ();
- dr << info << "because package " << dn << " depends on (" << n
- << " " << c << ")";
+ bp.constraints.emplace_back (move (*a.constraint),
+ cmd_line.db,
+ cmd_line.name.string ());
- string rb;
- if (!p.user_selection ())
- {
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
- }
+ a.constraint = nullopt;
- if (!rb.empty ())
- dr << info << "package " << p.available_name_version ()
- << " required by" << rb;
+ hold_pkgs_.push_back (move (bp));
+ break;
+ }
+ case type::dep_new:
+ {
+ // As per adjustment type.
+ //
+ assert (find_dep_pkg (a) == dep_pkgs_.end ());
- dr << info << "explicitly request up/downgrade of package " << dn;
+ // Start the database transaction to perform the
+ // database::find<selected_package> call, unless we are already in
+ // the transaction.
+ //
+ transaction t (db, !transaction::has_current ());
+
+ dep_pkgs_.push_back (
+ dependency_package {&db,
+ nm,
+ move (*a.constraint),
+ false /* hold_version */,
+ db.find<selected_package> (nm),
+ false /* system */,
+ false /* existing */,
+ a.upgrade,
+ a.deorphan,
+ false /* keep_out */,
+ false /* disfigure */,
+ nullopt /* checkout_root */,
+ false /* checkout_purge */,
+ strings () /* config_vars */,
+ nullptr /* system_status */});
- dr << info << "or explicitly specify package " << n << " version "
- << "to manually satisfy these constraints";
- }
+ t.commit ();
- // Add this contraint to the list for completeness.
- //
- p.constraints.emplace_back (dn.string (), c);
+ a.constraint = nullopt;
+ break;
}
+ }
- auto adjustment = [&dn, &n, &db] () -> build_package
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (dn));
- bool system (dsp->system ()); // Save flag before the move(dsp) call.
+ packages_.insert (package_version_key (db, nm, a.version));
+ adjustments_.push_back (move (a));
+ former_states_.insert (state ());
+ }
- return build_package {
- build_package::adjust,
- move (dsp),
- nullptr, // No available package/repository fragment.
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {n}, // Required by (dependency).
- build_package::adjust_reconfigure};
- };
+ // Roll back the latest (default) or first command line adjustment, pop it
+ // from the stack, and return the popped adjustment. Assume that the stack
+ // is not empty.
+ //
+ // Note that the returned object can be pushed to the stack again.
+ //
+ cmdline_adjustment
+ pop (bool first = false)
+ {
+ using type = cmdline_adjustment::adjustment_type;
- // We can have three cases here: the package is already on the
- // list, the package is in the map (but not on the list) and it
- // is in neither.
- //
- // If the existing entry is a drop, then we skip it. If it is
- // pre-entered, is an adjustment, or is a build that is not supposed
- // to be built (not in the list), then we merge it into the new
- // adjustment entry. Otherwise (is a build in the list), we just add
- // the reconfigure adjustment flag to it.
- //
- if (i != map_.end ())
+ assert (!empty ());
+
+ // Pop the adjustment.
+ //
+ cmdline_adjustment a (move (!first
+ ? adjustments_.back ()
+ : adjustments_.front ()));
+ if (!first)
+ adjustments_.pop_back ();
+ else
+ adjustments_.erase (adjustments_.begin ());
+
+ packages_.erase (package_version_key (a.db, a.name, a.version));
+
+ // Roll back the adjustment.
+ //
+ switch (a.type)
+ {
+ case type::hold_existing:
{
- build_package& dp (i->second.package);
- iterator& dpos (i->second.position);
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- if (!dp.action || // Pre-entered.
- *dp.action != build_package::build || // Non-build.
- dpos == end ()) // Build not in the list.
- {
- // Skip the droped package.
- //
- if (dp.action && *dp.action == build_package::drop)
- continue;
+ build_package& bp (*i);
+ swap (bp.available, a.available);
+ swap (bp.repository_fragment, a.repository_fragment);
- build_package bp (adjustment ());
- bp.merge (move (dp));
- dp = move (bp);
- }
- else // Build in the list.
- dp.adjustments |= build_package::adjust_reconfigure;
-
- // It may happen that the dependent is already in the list but is
- // not properly ordered against its dependencies that get into the
- // list via another dependency path. Thus, we check if the dependent
- // is to the right of its dependency and, if that's the case,
- // reinsert it in front of the dependency.
+ // Must contain the replacement version.
//
- if (dpos != end ())
+ assert (!bp.constraints.empty ());
+
+ version_constraint& c (bp.constraints[0].value);
+
+ if (a.constraint) // Original spec contains a version constraint?
{
- for (auto i (pos); i != end (); ++i)
- {
- if (i == dpos)
- {
- erase (dpos);
- dpos = insert (pos, dp);
- break;
- }
- }
+ swap (c, *a.constraint);
}
else
- dpos = insert (pos, dp);
+ {
+ a.constraint = move (c);
+ bp.constraints.clear ();
+ }
+
+ break;
}
- else
+ case type::dep_existing:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
+ swap (i->constraint, a.constraint);
+ break;
+ }
+ case type::hold_new:
{
- i = map_.emplace (
- move (dn), data_type {end (), adjustment ()}).first;
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- i->second.position = insert (pos, i->second.package);
+ build_package& bp (*i);
+ a.available = move (bp.available);
+ a.repository_fragment = move (bp.repository_fragment);
+
+ // Must contain the replacement version.
+ //
+ assert (!bp.constraints.empty ());
+
+ a.constraint = move (bp.constraints[0].value);
+
+ hold_pkgs_.erase (i);
+ break;
}
+ case type::dep_new:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
- // Recursively collect our own dependents inserting them before us.
- //
- // Note that we cannot end up with an infinite recursion for
- // configured packages due to a dependency cycle (see order() for
- // details).
- //
- collect_order_dependents (db, i->second.position);
+ a.constraint = move (i->constraint);
+
+ dep_pkgs_.erase (i);
+ break;
+ }
}
- }
- void
- clear ()
- {
- build_package_list::clear ();
- map_.clear ();
+ return a;
}
- void
- clear_order ()
+ // Return the specified adjustment's string representation in the
+ // following form:
+ //
+ // hold_existing: '<pkg>[ <constraint>][ <database>]' -> '<pkg> <constraint>'
+ // dep_existing: '?<pkg>[ <constraint>][ <database>]' -> '?<pkg> <constraint>'
+ // hold_new: '<pkg> <constraint>[ <database>]'
+ // dep_new: '?<pkg> <constraint>[ <database>]'
+ //
+ // Note: the adjustment is assumed to be newly created or be popped from
+ // the stack.
+ //
+ string
+ to_string (const cmdline_adjustment& a) const
{
- build_package_list::clear ();
+ using type = cmdline_adjustment::adjustment_type;
- for (auto& p: map_)
- p.second.position = end ();
- }
+ assert (a.constraint); // Since not pushed.
- private:
- using package_names = small_vector<reference_wrapper<const package_name>,
- 16>;
+ const string& s (a.db.get ().string);
- iterator
- order (const package_name& name, package_names& chain, bool reorder)
- {
- // Every package that we order should have already been collected.
- //
- auto mi (map_.find (name));
- assert (mi != map_.end ());
+ switch (a.type)
+ {
+ case type::hold_existing:
+ {
+ string r ("'" + a.name.string ());
- build_package& p (mi->second.package);
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- assert (p.action); // Can't order just a pre-entered package.
+ const build_package& bp (*i);
+ if (!bp.constraints.empty ())
+ r += ' ' + bp.constraints[0].value.string ();
- // Make sure there is no dependency cycle.
- //
- {
- auto i (find (chain.begin (), chain.end (), name));
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "' -> '" + a.name.string () + ' ' + a.constraint->string () +
+ "'";
- if (i != chain.end ())
+ return r;
+ }
+ case type::dep_existing:
{
- diag_record dr (fail);
- dr << "dependency cycle detected involving package " << name;
+ string r ("'?" + a.name.string ());
- auto nv = [this] (const package_name& name)
- {
- auto mi (map_.find (name));
- assert (mi != map_.end ());
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
- build_package& p (mi->second.package);
+ if (i->constraint)
+ r += ' ' + i->constraint->string ();
- assert (p.action); // See above.
+ if (!s.empty ())
+ r += ' ' + s;
- // We cannot end up with a dependency cycle for actions other than
- // build since these packages are configured and we would fail on
- // a previous run while building them.
- //
- assert (p.available != nullptr);
+ r += "' -> '?" + a.name.string () + ' ' + a.constraint->string () +
+ "'";
- return p.available_name_version ();
- };
+ return r;
+ }
+ case type::hold_new:
+ {
+ assert (find_hold_pkg (a) == hold_pkgs_.end ());
- for (chain.push_back (name); i != chain.end () - 1; ++i)
- dr << info << nv (*i) << " depends on " << nv (*(i + 1));
+ string r ("'" + a.name.string () + ' ' + a.constraint->string ());
+
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "'";
+ return r;
}
- }
+ case type::dep_new:
+ {
+ assert (find_dep_pkg (a) == dep_pkgs_.end ());
- // If this package is already in the list, then that would also
- // mean all its prerequisites are in the list and we can just
- // return its position. Unless we want it reordered.
- //
- iterator& pos (mi->second.position);
- if (pos != end ())
- {
- if (reorder)
- erase (pos);
- else
- return pos;
+ string r ("'?" + a.name.string () + ' ' + a.constraint->string ());
+
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "'";
+ return r;
+ }
}
- // Order all the prerequisites of this package and compute the
- // position of its "earliest" prerequisite -- this is where it
- // will be inserted.
- //
- const shared_ptr<selected_package>& sp (p.selected);
- const shared_ptr<available_package>& ap (p.available);
+ assert (false); // Can't be here.
+ return "";
+ }
- bool build (*p.action == build_package::build);
+ // Return true, if there are no adjustments in the stack.
+ //
+ bool
+ empty () const
+ {
+ return adjustments_.empty ();
+ }
- // Package build must always have the available package associated.
- //
- assert (!build || ap != nullptr);
+ // Return true, if push() has been called at least once.
+ //
+ bool
+ tried () const
+ {
+ return !former_states_.empty ();
+ }
- // Unless this package needs something to be before it, add it to
- // the end of the list.
- //
- iterator i (end ());
+ // Return the number of adjustments in the stack.
+ //
+ size_t
+ size () const
+ {
+ return adjustments_.size ();
+ }
- // Figure out if j is before i, in which case set i to j. The goal
- // here is to find the position of our "earliest" prerequisite.
- //
- auto update = [this, &i] (iterator j)
- {
- for (iterator k (j); i != j && k != end ();)
- if (++k == i)
- i = j;
- };
+ // Return true if replacing a package build with the specified version
+ // will result in a command line which has already been (unsuccessfully)
+ // tried as a starting point for the package builds re-collection.
+ //
+ bool
+ tried_earlier (database& db, const package_name& n, const version& v) const
+ {
+ if (former_states_.empty ())
+ return false;
- // Similar to collect_build(), we can prune if the package is already
- // configured, right? While in collect_build() we didn't need to add
- // prerequisites of such a package, it doesn't mean that they actually
- // never ended up in the map via another dependency path. For example,
- // some can be a part of the initial selection. And in that case we must
- // order things properly.
+ // Similar to the state() function, calculate the checksum over the
+ // packages set, but also consider the specified package version as if
+ // it were present in the set.
//
- // Also, if the package we are ordering is not a system one and needs to
- // be disfigured during the plan execution, then we must order its
- // (current) dependencies that also need to be disfigured.
+ // Note that the specified package version may not be in the set, since
+ // we shouldn't be trying to replace with the package version which is
+ // already in the command line.
//
- bool src_conf (sp != nullptr &&
- sp->state == package_state::configured &&
- sp->substate != package_substate::system);
+ sha256 cs;
- auto disfigure = [] (const build_package& p)
+ auto lt = [&db, &n, &v] (const package_version_key& pvk)
{
- return p.action && (*p.action == build_package::drop ||
- p.reconfigure ());
- };
+ if (int r = n.compare (pvk.name))
+ return r < 0;
- bool order_disfigured (src_conf && disfigure (p));
+ if (int r = v.compare (*pvk.version))
+ return r < 0;
- chain.push_back (name);
+ return db < pvk.db;
+ };
- // Order the build dependencies.
- //
- if (build && !p.system)
+ bool appended (false);
+ for (const package_version_key& p: packages_)
{
- // So here we are going to do things differently depending on
- // whether the package is already configured or not. If it is and
- // not as a system package, then that means we can use its
- // prerequisites list. Otherwise, we use the manifest data.
- //
- if (src_conf && sp->version == p.available_version ())
- {
- for (const auto& p: sp->prerequisites)
- {
- const package_name& name (p.first.object_id ());
+ assert (p.version); // Only the real packages can be here.
- // The prerequisites may not necessarily be in the map.
- //
- auto i (map_.find (name));
- if (i != map_.end () && i->second.package.action)
- update (order (name, chain, false /* reorder */));
- }
+ if (!appended && lt (p))
+ {
+ cs.append (db.config.string ());
+ cs.append (n.string ());
+ cs.append (v.string ());
- // We just ordered them among other prerequisites.
- //
- order_disfigured = false;
+ appended = true;
}
- else
- {
- // We are iterating in reverse so that when we iterate over
- // the dependency list (also in reverse), prerequisites will
- // be built in the order that is as close to the manifest as
- // possible.
- //
- for (const dependency_alternatives_ex& da:
- reverse_iterate (ap->dependencies))
- {
- assert (!da.conditional && da.size () == 1); // @@ TODO
- const dependency& d (da.front ());
- const package_name& dn (d.name);
- // Skip special names.
- //
- if (da.buildtime && (dn == "build2" || dn == "bpkg"))
- continue;
+ cs.append (p.db.get ().config.string ());
+ cs.append (p.name.string ());
+ cs.append (p.version->string ());
+ }
- update (order (d.name, chain, false /* reorder */));
- }
- }
+ if (!appended)
+ {
+ cs.append (db.config.string ());
+ cs.append (n.string ());
+ cs.append (v.string ());
}
- // Order the dependencies being disfigured.
+ return former_states_.find (cs.string ()) != former_states_.end ();
+ }
+
+ private:
+ // Return the SHA256 checksum of the current command line state.
+ //
+ string
+ state () const
+ {
+ // NOTE: remember to update tried_earlier() if changing anything here.
//
- if (order_disfigured)
+ sha256 cs;
+ for (const package_version_key& p: packages_)
{
- for (const auto& p: sp->prerequisites)
- {
- const package_name& name (p.first.object_id ());
+ assert (p.version); // Only the real packages can be here.
- // The prerequisites may not necessarily be in the map.
- //
- auto i (map_.find (name));
-
- if (i != map_.end () && disfigure (i->second.package))
- update (order (name, chain, false /* reorder */));
- }
+ cs.append (p.db.get ().config.string ());
+ cs.append (p.name.string ());
+ cs.append (p.version->string ());
}
- chain.pop_back ();
+ return cs.string ();
+ }
- return pos = insert (i, p);
+ // Find the command line package spec an adjustment applies to.
+ //
+ vector<build_package>::iterator
+ find_hold_pkg (const cmdline_adjustment& a) const
+ {
+ return find_if (hold_pkgs_.begin (), hold_pkgs_.end (),
+ [&a] (const build_package& p)
+ {
+ return p.name () == a.name && p.db == a.db;
+ });
}
- private:
- struct data_type
+ dependency_packages::iterator
+ find_dep_pkg (const cmdline_adjustment& a) const
{
- iterator position; // Note: can be end(), see collect_build().
- build_package package;
- };
+ return find_if (dep_pkgs_.begin (), dep_pkgs_.end (),
+ [&a] (const dependency_package& p)
+ {
+ return p.name == a.name &&
+ p.db != nullptr &&
+ *p.db == a.db;
+ });
+ }
- map<package_name, data_type> map_;
+ private:
+ vector<build_package>& hold_pkgs_;
+ dependency_packages& dep_pkgs_;
+
+ vector<cmdline_adjustment> adjustments_; // Adjustments stack.
+ set<package_version_key> packages_; // Replacements.
+ set<string> former_states_; // Command line seen states.
};
- // Return a patch version constraint for the selected package if it has a
- // standard version, otherwise, if requested, issue a warning and return
- // nullopt.
+ // Try to replace a collected package with a different available version,
+ // satisfactory for all its new and/or existing dependents. Return the
+ // command line adjustment if such a replacement is deduced and nullopt
+ // otherwise. In the latter case, also return the list of the being built
+ // dependents which are unsatisfied by some of the dependency available
+ // versions (unsatisfied_dpts argument).
//
- // Note that the function may also issue a warning and return nullopt if the
- // selected package minor version reached the limit (see
- // standard-version.cxx for details).
+ // Specifically, try to find the best available package version considering
+ // all the imposed constraints as per unsatisfied_dependents description. If
+ // succeed, return the command line adjustment reflecting the replacement.
//
- static optional<version_constraint>
- patch_constraint (const shared_ptr<selected_package>& sp, bool quiet = false)
+ // Notes:
+ //
+ // - Doesn't perform the actual adjustment of the command line.
+ //
+ // - Expected to be called after the execution plan is fully refined. That,
+ // in particular, means that all the existing dependents are also
+ // collected and thus the constraints they impose are already in their
+ // dependencies' constraints lists.
+ //
+ // - The specified package version may or may not be satisfactory for its
+ // new and existing dependents.
+ //
+ // - The replacement is denied in the following cases:
+ //
+ // - If it turns out that the package have been specified on the command
+ // line (by the user or by us on some previous iteration) with an exact
+ // version constraint, then we cannot try any other version.
+ //
+ // - If the dependency is system, then it is either specified with the
+ // wildcard version or its exact version have been specified by the user
+ // or have been deduced by the system package manager. In the former
+ // case we actually won't be calling this function for this package
+ // since the wildcard version satisfies any constraint. Thus, an exact
+ // version has been specified/deduced for this dependency and so we
+ // cannot try any other version.
+ //
+ // - If the dependency is being built as an existing archive/directory,
+ // then its version is determined and so we cannot try any other
+ // version.
+ //
+ // - If the package is already configured with the version held and the
+ // user didn't specify this package on the command line and it is not
+ // requested to be upgraded, patched, and/or deorphaned, then we
+ // shouldn't be silently up/down-grading it.
+ //
+ optional<cmdline_adjustment>
+ try_replace_dependency (const common_options& o,
+ const build_package& p,
+ const build_packages& pkgs,
+ const vector<build_package>& hold_pkgs,
+ const dependency_packages& dep_pkgs,
+ const cmdline_adjustments& cmdline_adjs,
+ vector<package_key>& unsatisfied_dpts,
+ const char* what)
{
- const package_name& nm (sp->name);
- const version& sv (sp->version);
+ tracer trace ("try_replace_dependency");
- // Note that we don't pass allow_stub flag so the system wildcard version
- // will (naturally) not be patched.
- //
- string vs (sv.string ());
- optional<standard_version> v (parse_standard_version (vs));
+ assert (p.available != nullptr); // By definition.
- if (!v)
+ // Bail out for the system package build.
+ //
+ if (p.system)
{
- if (!quiet)
- warn << "unable to patch " << package_string (nm, sv) <<
- info << "package is not using semantic/standard version";
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is being configured as system";});
return nullopt;
}
- try
- {
- return version_constraint ("~" + vs);
- }
- // Note that the only possible reason for invalid_argument exception to
- // be thrown is that minor version reached the 99999 limit (see
- // standard-version.cxx for details).
+ // Bail out for an existing package archive/directory.
//
- catch (const invalid_argument&)
+ database& db (p.db);
+ const package_name& nm (p.name ());
+ const version& ver (p.available->version);
+
+ if (find_existing (db,
+ nm,
+ nullopt /* version_constraint */).first != nullptr)
{
- if (!quiet)
- warn << "unable to patch " << package_string (nm, sv) <<
- info << "minor version limit reached";
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied since "
+ << "it is being built as existing archive/directory";});
return nullopt;
}
- }
-
- // List of dependency packages (specified with ? on the command line).
- //
- struct dependency_package
- {
- package_name name;
- optional<version_constraint> constraint; // nullopt if unspecified.
- shared_ptr<selected_package> selected; // NULL if not present.
- bool system;
- bool patch; // Only for an empty version.
- bool keep_out;
- optional<dir_path> checkout_root;
- bool checkout_purge;
- strings config_vars; // Only if not system.
- };
- using dependency_packages = vector<dependency_package>;
- // Evaluate a dependency package and return a new desired version. If the
- // result is absent (nullopt), then there are no user expectations regarding
- // this dependency. If the result is a NULL available_package, then it is
- // either no longer used and can be dropped, or no changes to the dependency
- // are necessary. Otherwise, the result is available_package to
- // upgrade/downgrade to as well as the repository fragment it must come
- // from, and the system flag.
- //
- // If the package version that satisfies explicitly specified dependency
- // version constraint can not be found in the dependents repositories, then
- // return the "no changes are necessary" result if ignore_unsatisfiable
- // argument is true and fail otherwise. The common approach is to pass true
- // for this argument until the execution plan is finalized, assuming that
- // the problematic dependency might be dropped.
- //
- struct evaluate_result
- {
- shared_ptr<available_package> available;
- shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool unused;
- bool system; // Is meaningless if unused.
- };
+ // Find the package command line entry and stash the reference to its
+ // version constraint, if any. Bail out if the constraint is specified as
+ // an exact package version.
+ //
+ const build_package* hold_pkg (nullptr);
+ const dependency_package* dep_pkg (nullptr);
+ const version_constraint* constraint (nullptr);
- using package_dependents = vector<pair<shared_ptr<selected_package>,
- optional<version_constraint>>>;
+ for (const build_package& hp: hold_pkgs)
+ {
+ if (hp.name () == nm && hp.db == db)
+ {
+ hold_pkg = &hp;
- static optional<evaluate_result>
- evaluate_dependency (database&,
- const shared_ptr<selected_package>&,
- const optional<version_constraint>& desired,
- bool desired_sys,
- bool patch,
- bool explicitly,
- const set<shared_ptr<repository_fragment>>&,
- const package_dependents&,
- bool ignore_unsatisfiable);
+ if (!hp.constraints.empty ())
+ {
+ // Can only contain the user-specified constraint.
+ //
+ assert (hp.constraints.size () == 1);
- static optional<evaluate_result>
- evaluate_dependency (database& db,
- const dependency_packages& deps,
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_dependency");
+ const version_constraint& c (hp.constraints[0].value);
- assert (sp != nullptr && !sp->hold_package);
+ if (c.min_version == c.max_version)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is specified on command line as '"
+ << nm << ' ' << c << "'";});
- const package_name& nm (sp->name);
+ return nullopt;
+ }
+ else
+ constraint = &c;
+ }
- // Query the dependents and bail out if the dependency is unused.
- //
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == nm));
+ break;
+ }
+ }
- if (pds.empty ())
+ if (hold_pkg == nullptr)
{
- l5 ([&]{trace << *sp << ": unused";});
+ for (const dependency_package& dp: dep_pkgs)
+ {
+ if (dp.name == nm && dp.db != nullptr && *dp.db == db)
+ {
+ dep_pkg = &dp;
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- true /* unused */,
- false /* system */};
+ if (dp.constraint)
+ {
+ const version_constraint& c (*dp.constraint);
+
+ if (c.min_version == c.max_version)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is specified on command line as '?"
+ << nm << ' ' << c << "'";});
+
+ return nullopt;
+ }
+ else
+ constraint = &c;
+ }
+
+ break;
+ }
+ }
}
- // If there are no user expectations regarding this dependency, then we
- // give no up/down-grade recommendation.
+ // Bail out if the selected package version is held and the package is not
+ // specified on the command line nor is being upgraded/deorphaned via its
+ // dependents recursively.
//
- auto i (find_if (
- deps.begin (), deps.end (),
- [&nm] (const dependency_package& i) {return i.name == nm;}));
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ if (sp != nullptr && sp->hold_version &&
+ hold_pkg == nullptr && dep_pkg == nullptr &&
+ !p.upgrade && !p.deorphan)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied since "
+ << "it is already built to hold version and it is not "
+ << "specified on command line nor is being upgraded or "
+ << "deorphaned";});
- if (i == deps.end ())
return nullopt;
+ }
- // If the selected package matches the user expectations then no package
- // change is required.
- //
- const version& sv (sp->version);
- bool ssys (sp->system ());
+ transaction t (db);
- // The requested dependency version constraint and system flag.
+ // Collect the repository fragments to search the available packages in.
//
- const optional<version_constraint>& dvc (i->constraint); // May be nullopt.
- bool dsys (i->system);
+ config_repo_fragments rfs;
- if (ssys == dsys &&
- dvc &&
- (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
+ // Add a repository fragment to the specified list, suppressing duplicates.
+ //
+ auto add = [] (shared_ptr<repository_fragment>&& rf,
+ vector<shared_ptr<repository_fragment>>& rfs)
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ if (find (rfs.begin (), rfs.end (), rf) == rfs.end ())
+ rfs.push_back (move (rf));
+ };
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- false /* unused */,
- false /* system */};
+ // If the package is specified as build-to-hold on the command line, then
+ // collect the root repository fragment from its database. Otherwise,
+ // collect the repository fragments its dependent packages come from.
+ //
+ if (hold_pkg != nullptr)
+ {
+ add (db.find<repository_fragment> (empty_string), rfs[db]);
}
+ else
+ {
+ // Collect the repository fragments the new dependents come from.
+ //
+ if (p.required_by_dependents)
+ {
+ for (const package_version_key& dvk: p.required_by)
+ {
+ if (dvk.version) // Real package?
+ {
+ const build_package* d (pkgs.entered_build (dvk.db, dvk.name));
- // Build a set of repository fragments the dependent packages now come
- // from. Also cache the dependents and the constraints they apply to this
- // dependency.
- //
- set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ // Must be collected as a package build (see
+ // build_package::required_by for details).
+ //
+ assert (d != nullptr &&
+ d->action &&
+ *d->action == build_package::build &&
+ d->available != nullptr);
- for (auto& pd: pds)
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
+ for (const package_location& pl: d->available->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ // Note that here we also handle dependents fetched/unpacked
+ // using the existing archive/directory adding the root
+ // repository fragments from their configurations.
+ //
+ if (!rep_masked_fragment (lrf))
+ add (lrf.load (), rfs[lrf.database ()]);
+ }
+ }
+ }
+ }
- if (dap != nullptr)
+ // Collect the repository fragments the existing dependents come from.
+ //
+ // Note that all the existing dependents are already in the map (since
+ // collect_dependents() has already been called) and are either
+ // reconfigure adjustments or non-collected recursively builds.
+ //
+ if (sp != nullptr)
{
- assert (!dap->locations.empty ());
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (const auto& pd: query_dependents (ddb, nm, db))
+ {
+ const build_package* d (pkgs.entered_build (ddb, pd.name));
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
- }
+ // See collect_dependents() for details.
+ //
+ assert (d != nullptr && d->action);
+
+ if ((*d->action == build_package::adjust &&
+ (d->flags & build_package::adjust_reconfigure) != 0) ||
+ (*d->action == build_package::build && !d->dependencies))
+ {
+ shared_ptr<selected_package> p (
+ ddb.load<selected_package> (pd.name));
- dependents.emplace_back (move (dsp), move (pd.constraint));
+ add_dependent_repo_fragments (ddb, p, rfs);
+ }
+ }
+ }
+ }
}
- return evaluate_dependency (db,
- sp,
- dvc,
- dsys,
- i->patch,
- true /* explicitly */,
- repo_frags,
- dependents,
- ignore_unsatisfiable);
- }
+ // Query the dependency available packages from all the collected
+ // repository fragments and select the most appropriate one. Note that
+ // this code is inspired by the evaluate_dependency() function
+ // implementation, which documents the below logic in great detail.
+ //
+ optional<version_constraint> c (constraint != nullptr
+ ? *constraint
+ : optional<version_constraint> ());
- static optional<evaluate_result>
- evaluate_dependency (database& db,
- const shared_ptr<selected_package>& sp,
- const optional<version_constraint>& dvc,
- bool dsys,
- bool patch,
- bool explicitly,
- const set<shared_ptr<repository_fragment>>& rfs,
- const package_dependents& dependents,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_dependency");
+ if (!c && p.upgrade && !*p.upgrade)
+ {
+ assert (sp != nullptr); // See build_package::upgrade.
- const package_name& nm (sp->name);
- const version& sv (sp->version);
+ c = patch_constraint (sp);
- auto no_change = [] ()
- {
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- false /* unused */,
- false /* system */};
- };
+ assert (c); // See build_package::upgrade.
+ }
- // Build the list of available packages for the potential up/down-grade
- // to, in the version-descending order. If patching, then we constrain the
- // choice with the latest patch version and place no constraints if
- // upgrading. For a system package we also put no constraints just to make
- // sure that the package is recognized.
+ available_packages afs (find_available (nm, c, rfs));
+
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
+
+ available ra;
+
+ // Version to deorphan.
//
- optional<version_constraint> c;
+ const version* dov (p.deorphan ? &sp->version : nullptr);
- if (!dvc)
- {
- assert (!dsys); // The version can't be empty for the system package.
+ optional<version_constraint> dopc; // Patch constraint for the above.
+ optional<version_constraint> domc; // Minor constraint for the above.
- if (patch)
- {
- c = patch_constraint (sp, ignore_unsatisfiable);
+ bool orphan_best_match (p.deorphan && constraint == nullptr && !p.upgrade);
- if (!c)
- {
- l5 ([&]{trace << *sp << ": non-patchable";});
- return no_change ();
- }
- }
+ if (orphan_best_match)
+ {
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
+ //
+ version v (dov->epoch,
+ dov->upstream,
+ dov->release,
+ dov->revision,
+ 0 /* iteration */);
+
+ dopc = patch_constraint (nm, v, true /* quiet */);
+ domc = minor_constraint (nm, v, true /* quiet */);
}
- else if (!dsys)
- c = dvc;
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> afs (
- find_available (db,
- nm,
- c,
- vector<shared_ptr<repository_fragment>> (rfs.begin (),
- rfs.end ())));
+ available deorphan_latest_iteration;
+ available deorphan_later_revision;
+ available deorphan_later_patch;
+ available deorphan_later_minor;
+ available deorphan_latest_available;
- // Go through up/down-grade candidates and pick the first one that
- // satisfies all the dependents. Collect (and sort) unsatisfied dependents
- // per the unsatisfiable version in case we need to print them.
+ // Return true if a version satisfies all the dependency constraints.
+ // Otherwise, save all the being built unsatisfied dependents into the
+ // resulting list, suppressing duplicates.
//
- struct compare_sp
+ auto satisfactory = [&p, &unsatisfied_dpts] (const version& v)
{
- bool
- operator() (const shared_ptr<selected_package>& x,
- const shared_ptr<selected_package>& y) const
+ bool r (true);
+
+ for (const auto& c: p.constraints)
{
- return x->name < y->name;
+ if (!satisfies (v, c.value))
+ {
+ r = false;
+
+ if (c.dependent.version && !c.selected_dependent)
+ {
+ package_key pk (c.dependent.db, c.dependent.name);
+
+ if (find (unsatisfied_dpts.begin (),
+ unsatisfied_dpts.end (),
+ pk) == unsatisfied_dpts.end ())
+ unsatisfied_dpts.push_back (move (pk));
+ }
+ }
}
+
+ return r;
};
- using sp_set = set<reference_wrapper<const shared_ptr<selected_package>>,
- compare_sp>;
+ for (available& af: afs)
+ {
+ shared_ptr<available_package>& ap (af.first);
- vector<pair<version, sp_set>> unsatisfiable;
+ if (ap->stub ())
+ continue;
- bool stub (false);
- bool ssys (sp->system ());
+ const version& av (ap->version);
- assert (!dsys || system_repository.find (nm) != nullptr);
+ // Skip if the available package version doesn't satisfy all the
+ // constraints (note: must be checked first since has a byproduct).
+ //
+ if (!satisfactory (av))
+ continue;
- for (auto& af: afs)
- {
- shared_ptr<available_package>& ap (af.first);
- const version& av (!dsys ? ap->version : *ap->system_version ());
+ // Don't offer to replace to the same version.
+ //
+ if (av == ver)
+ continue;
+
+ // Don't repeatedly offer the same adjustments for the same command
+ // line.
+ //
+ if (cmdline_adjs.tried_earlier (db, nm, av))
+ {
+ l5 ([&]{trace << "replacement " << package_version_key (db, nm, av)
+ << " tried earlier for same command line, skipping";});
+
+ continue;
+ }
// If we aim to upgrade to the latest version and it tends to be less
// then the selected one, then what we currently have is the best that
- // we can get, and so we return the "no change" result.
- //
- // Note that we also handle a package stub here.
+ // we can get. Thus, we use the selected version as a replacement,
+ // unless it doesn't satisfy all the constraints or we are deorphaning.
//
- if (!dvc && av < sv)
+ if (constraint == nullptr && sp != nullptr)
{
- assert (!dsys); // Version can't be empty for the system package.
-
- // For the selected system package we still need to pick a source
- // package version to downgrade to.
- //
- if (!ssys)
+ const version& sv (sp->version);
+ if (av < sv && !sp->system () && !p.deorphan)
{
- l5 ([&]{trace << *sp << ": best";});
- return no_change ();
+ // Only consider the selected package if its version is satisfactory
+ // for its new dependents (note: must be checked first since has a
+ // byproduct), differs from the version being replaced, and was
+ // never used for the same command line (see above for details).
+ //
+ if (satisfactory (sv) && sv != ver)
+ {
+ if (!cmdline_adjs.tried_earlier (db, nm, sv))
+ {
+ ra = make_available_fragment (o, db, sp);
+ break;
+ }
+ else
+ l5 ([&]{trace << "selected package replacement "
+ << package_version_key (db, nm, sp->version)
+ << " tried earlier for same command line, "
+ << "skipping";});
+ }
}
+ }
- // We can not upgrade the (system) package to a stub version, so just
- // skip it.
- //
- if (ap->stub ())
+ if (orphan_best_match)
+ {
+ if (av == *dov)
{
- stub = true;
- continue;
+ ra = move (af);
+ break;
}
- }
- // Check if the version satisfies all the dependents and collect
- // unsatisfied ones.
- //
- bool satisfactory (true);
- sp_set unsatisfied_dependents;
+ if (deorphan_latest_iteration.first == nullptr &&
+ av.compare (*dov, false /* revision */, true /* iteration */) == 0)
+ deorphan_latest_iteration = af;
- for (const auto& dp: dependents)
- {
- if (!satisfies (av, dp.second))
- {
- satisfactory = false;
+ if (deorphan_later_revision.first == nullptr &&
+ av.compare (*dov, true /* revision */) == 0 &&
+ av.compare (*dov, false /* revision */, true /* iteration */) > 0)
+ deorphan_later_revision = af;
- // Continue to collect dependents of the unsatisfiable version if
- // we need to print them before failing.
- //
- if (ignore_unsatisfiable)
- break;
+ if (deorphan_later_patch.first == nullptr &&
+ dopc && satisfies (av, *dopc) &&
+ av.compare (*dov, true /* revision */) > 0) // Patch is greater?
+ deorphan_later_patch = af;
- unsatisfied_dependents.insert (dp.first);
- }
- }
+ if (deorphan_later_minor.first == nullptr &&
+ domc && satisfies (av, *domc) &&
+ av.compare (*dov, true /* revision */) > 0 &&
+ deorphan_later_patch.first == nullptr)
+ deorphan_later_minor = af;
- if (!satisfactory)
- {
- if (!ignore_unsatisfiable)
- unsatisfiable.emplace_back (av, move (unsatisfied_dependents));
+ if (deorphan_latest_available.first == nullptr)
+ deorphan_latest_available = move (af);
- // If the dependency is expected to be configured as system, then bail
- // out, as an available package version will always resolve to the
- // system one (see above).
- //
- if (dsys)
- break;
+ if (av.compare (*dov, false /* revision */, true /* iteration */) < 0)
+ {
+ assert (deorphan_latest_iteration.first != nullptr ||
+ deorphan_later_revision.first != nullptr ||
+ deorphan_later_patch.first != nullptr ||
+ deorphan_later_minor.first != nullptr ||
+ deorphan_latest_available.first != nullptr);
- continue;
+ break;
+ }
}
-
- // If the best satisfactory version and the desired system flag perfectly
- // match the ones of the selected package, then no package change is
- // required. Otherwise, recommend an up/down-grade.
- //
- if (av == sv && ssys == dsys)
+ else
{
- l5 ([&]{trace << *sp << ": unchanged";});
- return no_change ();
+ ra = move (af);
+ break;
}
-
- l5 ([&]{trace << *sp << ": update to "
- << package_string (nm, av, dsys);});
-
- return evaluate_result {
- move (ap), move (af.second), false /* unused */, dsys};
}
- // If we aim to upgrade to the latest version, then what we currently have
- // is the only thing that we can get, and so returning the "no change"
- // result, unless we need to upgrade a package configured as system.
- //
- if (!dvc && !ssys)
- {
- assert (!dsys); // Version cannot be empty for the system package.
+ shared_ptr<available_package>& rap (ra.first);
- l5 ([&]{trace << *sp << ": only";});
- return no_change ();
+ if (rap == nullptr && orphan_best_match)
+ {
+ if (deorphan_latest_iteration.first != nullptr)
+ ra = move (deorphan_latest_iteration);
+ else if (deorphan_later_revision.first != nullptr)
+ ra = move (deorphan_later_revision);
+ else if (deorphan_later_patch.first != nullptr)
+ ra = move (deorphan_later_patch);
+ else if (deorphan_later_minor.first != nullptr)
+ ra = move (deorphan_later_minor);
+ else if (deorphan_latest_available.first != nullptr)
+ ra = move (deorphan_latest_available);
}
- // If the version satisfying the desired dependency version constraint is
- // unavailable or unsatisfiable for some dependents then we fail, unless
- // requested not to do so. In the later case we return the "no change"
- // result.
+ t.commit ();
+
+ // Bail out if no appropriate replacement is found and return the
+ // command line adjustment object otherwise.
//
- if (ignore_unsatisfiable)
- {
- l5 ([&]{trace << package_string (nm, dvc, dsys)
- << (unsatisfiable.empty ()
- ? ": no source"
- : ": unsatisfiable");});
+ if (rap == nullptr)
+ return nullopt;
- return no_change ();
- }
+ optional<cmdline_adjustment> r;
- // If there are no unsatisfiable versions then the package is not present
- // (or is not available in source) in its dependents' repositories.
- //
- if (unsatisfiable.empty ())
- {
- diag_record dr (fail);
+ lazy_shared_ptr<repository_fragment>& raf (ra.second);
- if (!dvc && patch)
+ if (hold_pkg != nullptr || dep_pkg != nullptr) // Specified on command line?
+ {
+ if (hold_pkg != nullptr)
{
- assert (ssys); // Otherwise, we would bail out earlier (see above).
+ r = cmdline_adjustment (hold_pkg->db,
+ hold_pkg->name (),
+ move (rap),
+ move (raf));
- // Patch (as any upgrade) of a system package is always explicit, so
- // we always fail and never treat the package as being up to date.
- //
- assert (explicitly);
-
- fail << "patch version for " << *sp << " is not available "
- << "from its dependents' repositories";
+ if (constraint != nullptr)
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by overwriting constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
+ else
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by adding constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
}
- else if (!stub)
- fail << package_string (nm, dsys ? nullopt : dvc)
- << " is not available from its dependents' repositories";
- else // The only available package is a stub.
+ else // dep_pkg != nullptr
{
- // Note that we don't advise to "build" the package as a system one as
- // it is already as such (see above).
- //
- assert (!dvc && !dsys && ssys);
+ r = cmdline_adjustment (*dep_pkg->db, dep_pkg->name, rap->version);
- fail << package_string (nm, dvc) << " is not available in source "
- << "from its dependents' repositories";
+ if (constraint != nullptr)
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by overwriting constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
+ else
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by adding constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
}
}
-
- // Issue the diagnostics and fail.
- //
- diag_record dr (fail);
- dr << "package " << nm << " doesn't satisfy its dependents";
-
- // Print the list of unsatisfiable versions together with dependents they
- // don't satisfy: up to three latest versions with no more than five
- // dependents each.
- //
- size_t nv (0);
- for (const auto& u: unsatisfiable)
+ else // The package is not specified on the command line.
{
- dr << info << package_string (nm, u.first) << " doesn't satisfy";
+ // If the package is configured as system, then since it is not
+ // specified by the user (both hold_pkg and dep_pkg are NULL) we may
+ // only build it as system. Thus we wouldn't be here (see above).
+ //
+ assert (sp == nullptr || !sp->system ());
- size_t n (0);
- const sp_set& ps (u.second);
- for (const shared_ptr<selected_package>& p: ps)
+ // Similar to the collect lambda in collect_build_prerequisites(), issue
+ // the warning if we are forcing an up/down-grade.
+ //
+ if (sp != nullptr && (sp->hold_package || verb >= 2))
{
- dr << ' ' << *p;
+ const version& av (rap->version);
+ const version& sv (sp->version);
- if (++n == 5 && ps.size () != 6) // Printing 'and 1 more' looks stupid.
- break;
+ int ud (sv.compare (av));
+
+ if (ud != 0)
+ {
+ for (const auto& c: p.constraints)
+ {
+ if (c.dependent.version && !satisfies (sv, c.value))
+ {
+ warn << "package " << c.dependent << " dependency on ("
+ << nm << ' ' << c.value << ") is forcing "
+ << (ud < 0 ? "up" : "down") << "grade of " << *sp << db
+ << " to " << av;
+
+ break;
+ }
+ }
+ }
}
- if (n != ps.size ())
- dr << " and " << ps.size () - n << " more";
+ // For the selected built-to-hold package create the build-to-hold
+ // package spec and the dependency spec otherwise.
+ //
+ if (sp != nullptr && sp->hold_package)
+ {
+ r = cmdline_adjustment (db,
+ nm,
+ move (rap),
+ move (raf),
+ p.upgrade,
+ p.deorphan);
+
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with " << r->version
+ << " by adding package spec "
+ << cmdline_adjs.to_string (*r)
+ << " to command line";});
+ }
+ else
+ {
+ r = cmdline_adjustment (db, nm, rap->version, p.upgrade, p.deorphan);
- if (++nv == 3 && unsatisfiable.size () != 4)
- break;
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with " << r->version
+ << " by adding package spec "
+ << cmdline_adjs.to_string (*r)
+ << " to command line";});
+ }
}
- if (nv != unsatisfiable.size ())
- dr << info << "and " << unsatisfiable.size () - nv << " more";
-
- dr << endf;
+ return r;
}
- // List of dependent packages whose immediate/recursive dependencies must be
- // upgraded (specified with -i/-r on the command line).
+ // Try to replace some of the being built, potentially indirect, dependents
+ // of the specified dependency with a different available version,
+ // satisfactory for all its new and existing dependents (if any). Return the
+ // command line adjustment if such a replacement is deduced and nullopt
+ // otherwise. It is assumed that the dependency replacement has been
+ // (unsuccessfully) tried by using the try_replace_dependency() call and its
+ // resulting list of the dependents, unsatisfied by some of the dependency
+ // available versions, is also passed to the function call as the
+ // unsatisfied_dpts argument.
//
- struct recursive_package
- {
- package_name name;
- bool upgrade; // true -- upgrade, false -- patch.
- bool recursive; // true -- recursive, false -- immediate.
- };
- using recursive_packages = vector<recursive_package>;
-
- // Recursively check if immediate dependencies of this dependent must be
- // upgraded or patched. Return true if it must be upgraded, false if
- // patched, and nullopt otherwise.
+ // Specifically, try to replace the dependents in the following order by
+ // calling try_replace_dependency() for them:
//
- static optional<bool>
- upgrade_dependencies (database& db,
- const package_name& nm,
- const recursive_packages& recs,
- bool recursion = false)
+ // - Immediate dependents unsatisfied with the specified dependency. For the
+ // sake of tracing and documentation, we (naturally) call them unsatisfied
+ // dependents.
+ //
+ // - Immediate dependents satisfied with the dependency but applying the
+ // version constraint which has prevented us from picking a version which
+ // would be satisfactory to the unsatisfied dependents. Note that this
+ // information is only available for the being built unsatisfied
+ // dependents (added by collect_build() rather than collect_dependents()).
+ // We call them conflicting dependents.
+ //
+ // - Immediate dependents which apply constraint to this dependency,
+ // incompatible with constraints of some other dependents (both new and
+ // existing). We call them unsatisfiable dependents.
+ //
+ // - Immediate dependents from unsatisfied_dpts argument. We call them
+ // constraining dependents.
+ //
+ // - Dependents of all the above types of dependents, discovered by
+ // recursively calling try_replace_dependent() for them.
+ //
+ optional<cmdline_adjustment>
+ try_replace_dependent (const common_options& o,
+ const build_package& p, // Dependency.
+ const vector<unsatisfied_constraint>* ucs,
+ const build_packages& pkgs,
+ const cmdline_adjustments& cmdline_adjs,
+ const vector<package_key>& unsatisfied_dpts,
+ vector<build_package>& hold_pkgs,
+ dependency_packages& dep_pkgs,
+ set<const build_package*>& visited_dpts)
{
- auto i (find_if (recs.begin (), recs.end (),
- [&nm] (const recursive_package& i) -> bool
- {
- return i.name == nm;
- }));
+ tracer trace ("try_replace_dependent");
- optional<bool> r;
+ // Bail out if the dependent has already been visited and add it to the
+ // visited set otherwise.
+ //
+ if (!visited_dpts.insert (&p).second)
+ return nullopt;
- if (i != recs.end () && i->recursive >= recursion)
- {
- r = i->upgrade;
+ using constraint_type = build_package::constraint_type;
- if (*r) // Upgrade (vs patch)?
- return r;
- }
+ const shared_ptr<available_package>& ap (p.available);
+ assert (ap != nullptr); // By definition.
+
+ const version& av (ap->version);
+
+ // List of the dependents which we have (unsuccessfully) tried to replace
+ // together with the lists of the constraining dependents.
+ //
+ vector<pair<package_key, vector<package_key>>> dpts;
- for (const auto& pd: db.query<package_dependent> (
- query<package_dependent>::name == nm))
+ // Try to replace a dependent, unless we have already tried to replace it.
+ //
+ auto try_replace = [&o,
+ &p,
+ &pkgs,
+ &cmdline_adjs,
+ &hold_pkgs,
+ &dep_pkgs,
+ &visited_dpts,
+ &dpts,
+ &trace] (package_key dk, const char* what)
+ -> optional<cmdline_adjustment>
{
- // Note that we cannot end up with an infinite recursion for configured
- // packages due to a dependency cycle (see order() for details).
- //
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs, true))
+ if (find_if (dpts.begin (), dpts.end (),
+ [&dk] (const auto& v) {return v.first == dk;}) ==
+ dpts.end ())
{
- if (!r || *r < *u) // Upgrade wins patch.
+ const build_package* d (pkgs.entered_build (dk));
+
+ // Always come from the dependency's constraints member.
+ //
+ assert (d != nullptr);
+
+ // Skip the visited dependents since, by definition, we have already
+ // tried to replace them.
+ //
+ if (find (visited_dpts.begin (), visited_dpts.end (), d) ==
+ visited_dpts.end ())
{
- r = u;
+ l5 ([&]{trace << "try to replace " << what << ' '
+ << d->available_name_version_db () << " of dependency "
+ << p.available_name_version_db () << " with some "
+ << "other version";});
+
+ vector<package_key> uds;
+
+ if (optional<cmdline_adjustment> a = try_replace_dependency (
+ o,
+ *d,
+ pkgs,
+ hold_pkgs,
+ dep_pkgs,
+ cmdline_adjs,
+ uds,
+ what))
+ {
+ return a;
+ }
- if (*r) // Upgrade (vs patch)?
- return r;
+ dpts.emplace_back (move (dk), move (uds));
}
}
- }
- return r;
- }
+ return nullopt;
+ };
- // Evaluate a package (not necessarily dependency) and return a new desired
- // version. If the result is absent (nullopt), then no changes to the
- // package are necessary. Otherwise, the result is available_package to
- // upgrade/downgrade to as well as the repository fragment it must come
- // from.
- //
- // If the system package cannot be upgraded to the source one, not being
- // found in the dependents repositories, then return nullopt if
- // ignore_unsatisfiable argument is true and fail otherwise (see the
- // evaluate_dependency() function description for details).
- //
- static optional<evaluate_result>
- evaluate_recursive (database& db,
- const recursive_packages& recs,
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_recursive");
+ // Try to replace unsatisfied dependents.
+ //
+ for (const constraint_type& c: p.constraints)
+ {
+ const package_version_key& dvk (c.dependent);
- assert (sp != nullptr);
+ if (dvk.version && !c.selected_dependent && !satisfies (av, c.value))
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "unsatisfied dependent"))
+ {
+ return a;
+ }
+ }
+ }
- // Build a set of repository fragment the dependent packages come from.
- // Also cache the dependents and the constraints they apply to this
- // dependency.
+ // Try to replace conflicting dependents.
//
- set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ if (ucs != nullptr)
+ {
+ for (const unsatisfied_constraint& uc: *ucs)
+ {
+ const package_version_key& dvk (uc.constraint.dependent);
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == sp->name));
+ if (dvk.version)
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "conflicting dependent"))
+ {
+ return a;
+ }
+ }
+ }
+ }
- // Only collect repository fragments (for best version selection) of
- // (immediate) dependents that have a hit (direct or indirect) in recs.
- // Note, however, that we collect constraints from all the dependents.
+ // Try to replace unsatisfiable dependents.
//
- optional<bool> upgrade;
-
- for (const auto& pd: pds)
+ for (const constraint_type& c1: p.constraints)
{
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
- dependents.emplace_back (dsp, move (pd.constraint));
+ const package_version_key& dvk (c1.dependent);
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs))
+ if (dvk.version && !c1.selected_dependent)
{
- if (!upgrade || *upgrade < *u) // Upgrade wins patch.
- upgrade = u;
- }
- else
- continue;
+ const version_constraint& v1 (c1.value);
- // While we already know that the dependency upgrade is required, we
- // continue to iterate over dependents, collecting the repository
- // fragments and the constraints.
- //
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ bool unsatisfiable (false);
+ for (const constraint_type& c2: p.constraints)
+ {
+ const version_constraint& v2 (c2.value);
- if (dap != nullptr)
- {
- assert (!dap->locations.empty ());
+ if (!satisfies (v1, v2) && !satisfies (v2, v1))
+ {
+ unsatisfiable = true;
+ break;
+ }
+ }
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
+ if (unsatisfiable)
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "unsatisfiable dependent"))
+ {
+ return a;
+ }
+ }
}
}
- if (!upgrade)
+ // Try to replace constraining dependents.
+ //
+ for (const auto& dk: unsatisfied_dpts)
{
- l5 ([&]{trace << *sp << ": no hit";});
- return nullopt;
+ if (optional<cmdline_adjustment> a = try_replace (
+ dk, "constraining dependent"))
+ {
+ return a;
+ }
}
- // Recommends the highest possible version.
+ // Try to replace dependents of the above dependents, recursively.
//
- optional<evaluate_result> r (
- evaluate_dependency (db,
- sp,
- nullopt /* desired */,
- false /*desired_sys */,
- !*upgrade /* patch */,
- false /* explicitly */,
- repo_frags,
- dependents,
- ignore_unsatisfiable));
+ for (const auto& dep: dpts)
+ {
+ const build_package* d (pkgs.entered_build (dep.first));
+
+ assert (d != nullptr);
+
+ if (optional<cmdline_adjustment> a = try_replace_dependent (
+ o,
+ *d,
+ nullptr /* unsatisfied_constraints */,
+ pkgs,
+ cmdline_adjs,
+ dep.second,
+ hold_pkgs,
+ dep_pkgs,
+ visited_dpts))
+ {
+ return a;
+ }
+ }
- // Translate the "no change" result into nullopt.
- //
- assert (!r || !r->unused);
- return r && r->available == nullptr ? nullopt : r;
+ return nullopt;
}
- static void
+ // Return false if the plan execution was noop. If unsatisfied dependents
+ // are specified then we are in the simulation mode.
+ //
+ static bool
execute_plan (const pkg_build_options&,
- const dir_path&,
- database&,
build_package_list&,
- bool simulate);
+ unsatisfied_dependents* simulate,
+ const function<find_database_function>&);
using pkg_options = pkg_build_pkg_options;
@@ -2338,20 +2938,39 @@ namespace bpkg
dr << fail << "both --immediate|-i and --recursive|-r specified";
// The --immediate or --recursive option can only be specified with an
- // explicit --upgrade or --patch.
+ // explicit --upgrade, --patch, or --deorphan.
//
if (const char* n = (o.immediate () ? "--immediate" :
o.recursive () ? "--recursive" : nullptr))
{
- if (!o.upgrade () && !o.patch ())
- dr << fail << n << " requires explicit --upgrade|-u or --patch|-p";
+ if (!o.upgrade () && !o.patch () && !o.deorphan ())
+ dr << fail << n << " requires explicit --upgrade|-u, --patch|-p, or "
+ << "--deorphan";
}
if (((o.upgrade_immediate () ? 1 : 0) +
(o.upgrade_recursive () ? 1 : 0) +
(o.patch_immediate () ? 1 : 0) +
(o.patch_recursive () ? 1 : 0)) > 1)
- fail << "multiple --(upgrade|patch)-(immediate|recursive) specified";
+ dr << fail << "multiple --(upgrade|patch)-(immediate|recursive) "
+ << "specified";
+
+ if (o.deorphan_immediate () && o.deorphan_recursive ())
+ dr << fail << "both --deorphan-immediate and --deorphan-recursive "
+ << "specified";
+
+ if (multi_config ())
+ {
+ if (const char* opt = o.config_name_specified () ? "--config-name" :
+ o.config_id_specified () ? "--config-id" :
+ nullptr)
+ {
+ dr << fail << opt << " specified for multiple current "
+ << "configurations" <<
+ info << "use --config-uuid to specify configurations in "
+ << "this mode";
+ }
+ }
if (!dr.empty () && !pkg.empty ())
dr << info << "while validating options for " << pkg;
@@ -2366,13 +2985,16 @@ namespace bpkg
dst.recursive (src.recursive ());
// If -r|-i was specified at the package level, then so should
- // -u|-p.
+ // -u|-p and --deorphan.
//
if (!(dst.upgrade () || dst.patch ()))
{
dst.upgrade (src.upgrade ());
dst.patch (src.patch ());
}
+
+ if (!dst.deorphan ())
+ dst.deorphan (src.deorphan ());
}
if (!(dst.upgrade_immediate () || dst.upgrade_recursive () ||
@@ -2384,8 +3006,15 @@ namespace bpkg
dst.patch_recursive (src.patch_recursive ());
}
+ if (!(dst.deorphan_immediate () || dst.deorphan_recursive ()))
+ {
+ dst.deorphan_immediate (src.deorphan_immediate ());
+ dst.deorphan_recursive (src.deorphan_recursive ());
+ }
+
dst.dependency (src.dependency () || dst.dependency ());
dst.keep_out (src.keep_out () || dst.keep_out ());
+ dst.disfigure (src.disfigure () || dst.disfigure ());
if (!dst.checkout_root_specified () && src.checkout_root_specified ())
{
@@ -2394,23 +3023,54 @@ namespace bpkg
}
dst.checkout_purge (src.checkout_purge () || dst.checkout_purge ());
+
+ if (src.config_id_specified ())
+ {
+ const vector<uint64_t>& s (src.config_id ());
+ vector<uint64_t>& d (dst.config_id ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_id_specified (true);
+ }
+
+ if (src.config_name_specified ())
+ {
+ const strings& s (src.config_name ());
+ strings& d (dst.config_name ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_name_specified (true);
+ }
+
+ if (src.config_uuid_specified ())
+ {
+ const vector<uuid>& s (src.config_uuid ());
+ vector<uuid>& d (dst.config_uuid ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_uuid_specified (true);
+ }
}
static bool
compare_options (const pkg_options& x, const pkg_options& y)
{
- return x.keep_out () == y.keep_out () &&
- x.dependency () == y.dependency () &&
- x.upgrade () == y.upgrade () &&
- x.patch () == y.patch () &&
- x.immediate () == y.immediate () &&
- x.recursive () == y.recursive () &&
- x.upgrade_immediate () == y.upgrade_immediate () &&
- x.upgrade_recursive () == y.upgrade_recursive () &&
- x.patch_immediate () == y.patch_immediate () &&
- x.patch_recursive () == y.patch_recursive () &&
- x.checkout_root () == y.checkout_root () &&
- x.checkout_purge () == y.checkout_purge ();
+ return x.keep_out () == y.keep_out () &&
+ x.disfigure () == y.disfigure () &&
+ x.dependency () == y.dependency () &&
+ x.upgrade () == y.upgrade () &&
+ x.patch () == y.patch () &&
+ x.deorphan () == y.deorphan () &&
+ x.immediate () == y.immediate () &&
+ x.recursive () == y.recursive () &&
+ x.upgrade_immediate () == y.upgrade_immediate () &&
+ x.upgrade_recursive () == y.upgrade_recursive () &&
+ x.patch_immediate () == y.patch_immediate () &&
+ x.patch_recursive () == y.patch_recursive () &&
+ x.deorphan_immediate () == y.deorphan_immediate () &&
+ x.deorphan_recursive () == y.deorphan_recursive () &&
+ x.checkout_root () == y.checkout_root () &&
+ x.checkout_purge () == y.checkout_purge ();
}
int
@@ -2418,21 +3078,97 @@ namespace bpkg
{
tracer trace ("pkg_build");
- const dir_path& c (o.directory ());
- l4 ([&]{trace << "configuration: " << c;});
+ dir_paths cs;
+ const dir_paths& config_dirs (!o.directory ().empty ()
+ ? o.directory ()
+ : cs);
- validate_options (o, ""); // Global package options.
+ if (config_dirs.empty ())
+ cs.push_back (current_dir);
+
+ l4 ([&]{for (const auto& d: config_dirs) trace << "configuration: " << d;});
+
+ // Make sure that potential stdout writing failures can be detected.
+ //
+ cout.exceptions (ostream::badbit | ostream::failbit);
+
+ if (o.noop_exit_specified ())
+ {
+ if (o.print_only ())
+ fail << "--noop-exit specified with --print-only";
+
+ // We can probably use build2's --structured-result to support this.
+ //
+ if (!o.configure_only ())
+ fail << "--noop-exit is only supported in --configure-only mode";
+ }
if (o.update_dependent () && o.leave_dependent ())
fail << "both --update-dependent|-U and --leave-dependent|-L "
<< "specified" <<
info << "run 'bpkg help pkg-build' for more information";
- if (!args.more () && !o.upgrade () && !o.patch ())
+ if (o.sys_no_query () && o.sys_install ())
+ fail << "both --sys-no-query and --sys-install specified" <<
+ info << "run 'bpkg help pkg-build' for more information";
+
+ if (!args.more () && !o.upgrade () && !o.patch () && !o.deorphan ())
fail << "package name argument expected" <<
info << "run 'bpkg help pkg-build' for more information";
- database db (open (c, trace)); // Also populates the system repository.
+ // If multiple current configurations are specified, then open the first
+ // one, attach the remaining, verify that their schemas match (which may
+ // not be the case if they don't belong to the same linked database
+ // cluster), and attach their explicitly linked databases, recursively.
+ //
+ // Also populates the system repository.
+ //
+ // @@ Note that currently we don't verify the specified configurations
+ // belong to the same cluster.
+ //
+ database mdb (config_dirs[0],
+ trace,
+ true /* pre_attach */,
+ true /* sys_rep */,
+ dir_paths () /* pre_link */,
+ (config_dirs.size () == 1
+ ? empty_string
+ : '[' + config_dirs[0].representation () + ']'));
+
+ // Command line as a dependent.
+ //
+ package_version_key cmd_line (mdb, "command line");
+
+ current_configs.push_back (mdb);
+
+ if (config_dirs.size () != 1)
+ {
+ transaction t (mdb);
+
+ odb::schema_version sv (mdb.schema_version ());
+ for (auto i (config_dirs.begin () + 1); i != config_dirs.end (); ++i)
+ {
+ database& db (mdb.attach (normalize (*i, "configuration"),
+ true /* sys_rep */));
+
+ if (db.schema_version () != sv)
+ fail << "specified configurations belong to different linked "
+ << "configuration clusters" <<
+ info << mdb.config_orig <<
+ info << db.config_orig;
+
+ db.attach_explicit (true /* sys_rep */);
+
+ // Suppress duplicates.
+ //
+ if (!current (db))
+ current_configs.push_back (db);
+ }
+
+ t.commit ();
+ }
+
+ validate_options (o, ""); // Global package options.
// Note that the session spans all our transactions. The idea here is that
// selected_package objects in build_packages below will be cached in this
@@ -2440,7 +3176,7 @@ namespace bpkg
// will modify the cached instance, which means our list will always "see"
// their updated state.
//
- // Also note that rep_fetch() must be called in session.
+ // Also note that rep_fetch() and pkg_fetch() must be called in session.
//
session ses;
@@ -2452,12 +3188,24 @@ namespace bpkg
// duplicates. Note that the last repository location overrides the
// previous ones with the same canonical name.
//
+ // Also note that the dependency specs may not have the repository
+ // location specified, since they obtain the repository information via
+ // their ultimate dependent configurations.
+ //
+ // Also collect the databases specified on the command line for the held
+ // packages, to later use them as repository information sources for the
+ // dependencies. Additionally use the current configurations as repository
+ // information sources.
+ //
+ repo_configs = current_configs;
+
struct pkg_spec
{
- string packages;
- repository_location location;
- pkg_options options;
- strings config_vars;
+ reference_wrapper<database> db;
+ string packages;
+ repository_location location;
+ pkg_options options;
+ strings config_vars;
};
vector<pkg_spec> specs;
@@ -2498,16 +3246,16 @@ namespace bpkg
fail << "unexpected options group for configuration variable '"
<< v << "'";
- cvars.push_back (move (v));
+ cvars.push_back (move (trim (v)));
}
if (!cvars.empty () && !sep)
fail << "configuration variables must be separated from packages "
<< "with '--'";
- vector<repository_location> locations;
+ database_map<vector<repository_location>> locations;
- transaction t (db);
+ transaction t (mdb);
while (args.more ())
{
@@ -2520,28 +3268,33 @@ namespace bpkg
fail << "unexpected configuration variable '" << a << "'" <<
info << "use the '--' separator to treat it as a package";
- specs.emplace_back ();
- pkg_spec& ps (specs.back ());
+ pkg_options po;
+
+ // Merge the common and package-specific configuration variables
+ // (commons go first).
+ //
+ strings cvs (cvars);
try
{
- auto& po (ps.options);
-
cli::scanner& ag (args.group ());
- po.parse (ag, cli::unknown_mode::fail, cli::unknown_mode::stop);
-
- // Merge the common and package-specific configuration variables
- // (commons go first).
- //
- ps.config_vars = cvars;
while (ag.more ())
{
- string a (ag.next ());
- if (a.find ('=') == string::npos)
- fail << "unexpected group argument '" << a << "'";
+ if (!po.parse (ag) || ag.more ())
+ {
+ string a (ag.next ());
+ if (a.find ('=') == string::npos)
+ fail << "unexpected group argument '" << a << "'";
- ps.config_vars.push_back (move (a));
+ trim (a);
+
+ if (a[0] == '!')
+ fail << "global override in package-specific configuration "
+ << "variable '" << a << "'";
+
+ cvs.push_back (move (a));
+ }
}
// We have to manually merge global options into local since just
@@ -2554,15 +3307,80 @@ namespace bpkg
}
catch (const cli::exception& e)
{
- fail << e << " grouped for argument '" << a << "'";
+ fail << e << " grouped for argument " << a;
+ }
+
+ // Resolve the configuration options into the databases, suppressing
+ // duplicates.
+ //
+ // Note: main database if no --config-* option is specified, unless we
+ // are in the multi-config mode, in which case we fail.
+ //
+ linked_databases dbs;
+ auto add_db = [&dbs] (database& db)
+ {
+ if (find (dbs.begin (), dbs.end (), db) == dbs.end ())
+ dbs.push_back (db);
+ };
+
+ for (const string& nm: po.config_name ())
+ {
+ assert (!multi_config ()); // Should have failed earlier.
+ add_db (mdb.find_attached (nm));
}
+ for (uint64_t id: po.config_id ())
+ {
+ assert (!multi_config ()); // Should have failed earlier.
+ add_db (mdb.find_attached (id));
+ }
+
+ for (const uuid& uid: po.config_uuid ())
+ {
+ database* db (nullptr);
+
+ for (database& cdb: current_configs)
+ {
+ if ((db = cdb.try_find_dependency_config (uid)) != nullptr)
+ break;
+ }
+
+ if (db == nullptr)
+ fail << "no configuration with uuid " << uid << " is linked with "
+ << (!multi_config ()
+ ? mdb.config_orig.representation ()
+ : "specified current configurations");
+
+ add_db (*db);
+ }
+
+ // Note that unspecified package configuration in the multi-
+ // configurations mode is an error, unless this is a system
+ // dependency. We, however, do not parse the package scheme at this
+ // stage and so delay the potential failure.
+ //
+ if (dbs.empty ())
+ dbs.push_back (mdb);
+
if (!a.empty () && a[0] == '?')
{
- ps.options.dependency (true);
+ po.dependency (true);
a.erase (0, 1);
}
+ // If this is a package to hold, then add its databases to the
+ // repository information source list, suppressing duplicates.
+ //
+ if (!po.dependency ())
+ {
+ for (database& db: dbs)
+ {
+ if (find (repo_configs.begin (), repo_configs.end (), db) ==
+ repo_configs.end ())
+ repo_configs.push_back (db);
+ }
+ }
+
// Check if the argument has the [<packages>]@<location> form or looks
// like a URL. Find the position of <location> if that's the case and
// set it to string::npos otherwise.
@@ -2603,96 +3421,162 @@ namespace bpkg
if (l.empty ())
fail << "empty repository location in '" << a << "'";
- // Search for the repository location in the database before trying
- // to parse it. Note that the straight parsing could otherwise fail,
- // being unable to properly guess the repository type.
- //
- // Also note that the repository location URL is not unique and we
- // can potentially end up with multiple repositories. For example:
- //
- // $ bpkg add git+file:/path/to/git/repo dir+file:/path/to/git/repo
- // $ bpkg build @/path/to/git/repo
- //
- // That's why we pick the repository only if there is exactly one
- // match.
- //
- shared_ptr<repository> r;
+ if (po.dependency ())
+ fail << "unexpected repository location in '?" << a << "'" <<
+ info << "repository location cannot be specified for "
+ << "dependencies";
+
+ string pks (p > 1 ? string (a, 0, p - 1) : empty_string);
+
+ for (size_t i (0); i != dbs.size (); ++i)
{
- using query = query<repository>;
+ database& db (dbs[i]);
- // For case-insensitive filesystems (Windows) we need to match the
- // location case-insensitively against the local repository URLs
- // and case-sensitively against the remote ones.
+ // Search for the repository location in the database before
+ // trying to parse it. Note that the straight parsing could
+ // otherwise fail, being unable to properly guess the repository
+ // type.
//
- // Note that the root repository will never be matched, since its
- // location is empty.
+ // Also note that the repository location URL is not unique and we
+ // can potentially end up with multiple repositories. For example:
//
- const auto& url (query::location.url);
+ // $ bpkg add git+file:/path/to/git/repo dir+file:/path/to/git/repo
+ // $ bpkg build @/path/to/git/repo
+ //
+ // That's why we pick the repository only if there is exactly one
+ // match.
+ //
+ shared_ptr<repository> r;
+ {
+ using query = query<repository>;
+
+ // For case-insensitive filesystems (Windows) we need to match
+ // the location case-insensitively against the local repository
+ // URLs and case-sensitively against the remote ones.
+ //
+ // Note that the root repository will never be matched, since
+ // its location is empty.
+ //
+ const auto& url (query::location.url);
#ifndef _WIN32
- query q (url == l);
+ query q (url == l);
#else
- string u (url.table ());
- u += '.';
- u += url.column ();
+ string u (url.table ());
+ u += '.';
+ u += url.column ();
- query q (
- (!query::local && url == l) ||
- ( query::local && u + " COLLATE nocase = " + query::_val (l)));
+ query q (
+ (!query::local && url == l) ||
+ ( query::local && u + " COLLATE nocase = " + query::_val (l)));
#endif
- auto rs (db.query<repository> (q));
- auto i (rs.begin ());
+ auto rs (db.query<repository> (q));
+ auto i (rs.begin ());
- if (i != rs.end ())
- {
- r = i.load ();
+ if (i != rs.end ())
+ {
+ r = i.load ();
- // Fallback to parsing the location if several repositories
- // match.
- //
- if (++i != rs.end ())
- r = nullptr;
+ // Fallback to parsing the location if several repositories
+ // match.
+ //
+ if (++i != rs.end ())
+ r = nullptr;
+ }
}
- }
-
- ps.location = r != nullptr
- ? r->location
- : parse_location (l, nullopt /* type */);
- if (p > 1)
- ps.packages = string (a, 0, p - 1);
+ repository_location loc (r != nullptr
+ ? r->location
+ : parse_location (l, nullopt /* type */));
- if (!o.no_fetch ())
- {
- auto pr = [&ps] (const repository_location& i) -> bool
+ if (!o.no_fetch ())
{
- return i.canonical_name () == ps.location.canonical_name ();
- };
+ auto i (locations.find (db));
+ if (i == locations.end ())
+ i = locations.insert (db,
+ vector<repository_location> ()).first;
+
+ auto pr = [&loc] (const repository_location& i) -> bool
+ {
+ return i.canonical_name () == loc.canonical_name ();
+ };
- auto i (find_if (locations.begin (), locations.end (), pr));
+ vector<repository_location>& ls (i->second);
+ auto j (find_if (ls.begin (), ls.end (), pr));
- if (i != locations.end ())
- *i = ps.location;
+ if (j != ls.end ())
+ *j = loc;
+ else
+ ls.push_back (loc);
+ }
+
+ // Move the pkg_spec components for the last database on the list,
+ // rather then copying them.
+ //
+ if (i != dbs.size () - 1)
+ specs.push_back (pkg_spec {db, pks, move (loc), po, cvs});
else
- locations.push_back (ps.location);
+ specs.push_back (pkg_spec {db,
+ move (pks),
+ move (loc),
+ move (po),
+ move (cvs)});
}
}
else
- ps.packages = move (a);
+ {
+ // Move the pkg_spec components for the last database in the list,
+ // rather then copying them.
+ //
+ for (size_t i (0); i != dbs.size (); ++i)
+ {
+ database& db (dbs[i]);
+
+ if (i != dbs.size () - 1)
+ specs.emplace_back (pkg_spec {db,
+ a,
+ repository_location (),
+ po,
+ cvs});
+ else
+ specs.emplace_back (pkg_spec {db,
+ move (a),
+ repository_location (),
+ move (po),
+ move (cvs)});
+ }
+ }
}
t.commit ();
- if (!locations.empty ())
+ // Initialize tmp directories.
+ //
+ for (database& db: repo_configs)
+ init_tmp (db.config_orig);
+
+ // Fetch the repositories in the current configuration.
+ //
+ // Note that during this build only the repositories information from
+ // the main database will be used.
+ //
+ for (const auto& l: locations)
rep_fetch (o,
- c,
- db,
- locations,
+ l.first,
+ l.second,
o.fetch_shallow (),
string () /* reason for "fetching ..." */);
}
+ // Now, as repo_configs is filled and the repositories are fetched mask
+ // the repositories, if any.
+ //
+ if (o.mask_repository_specified () || o.mask_repository_uuid_specified ())
+ rep_mask (o.mask_repository (),
+ o.mask_repository_uuid (),
+ current_configs);
+
// Expand the package specs into individual package args, parsing them
// into the package scheme, name, and version constraint components, and
// also saving associated options and configuration variables.
@@ -2704,65 +3588,22 @@ namespace bpkg
//
struct pkg_arg
{
+ // NULL for system dependency with unspecified configuration.
+ //
+ database* db;
+
package_scheme scheme;
package_name name;
optional<version_constraint> constraint;
string value;
pkg_options options;
strings config_vars;
- };
-
- // Create the parsed package argument.
- //
- auto arg_package = [] (package_scheme sc,
- package_name nm,
- optional<version_constraint> vc,
- pkg_options os,
- strings vs) -> pkg_arg
- {
- assert (!vc || !vc->empty ()); // May not be empty if present.
-
- pkg_arg r {sc, move (nm), move (vc), string (), move (os), move (vs)};
-
- switch (sc)
- {
- case package_scheme::sys:
- {
- if (!r.constraint)
- r.constraint = version_constraint (wildcard_version);
-
- // The system package may only have an exact/wildcard version
- // specified.
- //
- assert (r.constraint->min_version == r.constraint->max_version);
-
- const system_package* sp (system_repository.find (r.name));
-
- // Will deal with all the duplicates later.
- //
- if (sp == nullptr || !sp->authoritative)
- system_repository.insert (r.name,
- *r.constraint->min_version,
- true /* authoritative */);
-
- break;
- }
- case package_scheme::none: break; // Nothing to do.
- }
- return r;
- };
-
- // Create the unparsed package argument.
- //
- auto arg_raw = [] (string v, pkg_options os, strings vs) -> pkg_arg
- {
- return pkg_arg {package_scheme::none,
- package_name (),
- nullopt /* constraint */,
- move (v),
- move (os),
- move (vs)};
+ // If schema is sys then this member indicates whether the constraint
+ // came from the system package manager (not NULL) or user/fallback
+ // (NULL).
+ //
+ const system_package_status* system_status;
};
auto arg_parsed = [] (const pkg_arg& a) {return !a.name.empty ();};
@@ -2824,23 +3665,41 @@ namespace bpkg
append (v, s);
};
+ auto add_num = [&add_string] (const char* o, auto v)
+ {
+ add_string (o, to_string (v));
+ };
+
const pkg_options& o (a.options);
- add_bool ("--keep-out", o.keep_out ());
- add_bool ("--upgrade", o.upgrade ());
- add_bool ("--patch", o.patch ());
- add_bool ("--immediate", o.immediate ());
- add_bool ("--recursive", o.recursive ());
- add_bool ("--upgrade-immediate", o.upgrade_immediate ());
- add_bool ("--upgrade-recursive", o.upgrade_recursive ());
- add_bool ("--patch-immediate", o.patch_immediate ());
- add_bool ("--patch-recursive", o.patch_recursive ());
+ add_bool ("--keep-out", o.keep_out ());
+ add_bool ("--disfigure", o.disfigure ());
+ add_bool ("--upgrade", o.upgrade ());
+ add_bool ("--patch", o.patch ());
+ add_bool ("--deorphan", o.deorphan ());
+ add_bool ("--immediate", o.immediate ());
+ add_bool ("--recursive", o.recursive ());
+ add_bool ("--upgrade-immediate", o.upgrade_immediate ());
+ add_bool ("--upgrade-recursive", o.upgrade_recursive ());
+ add_bool ("--patch-immediate", o.patch_immediate ());
+ add_bool ("--patch-recursive", o.patch_recursive ());
+ add_bool ("--deorphan-immediate", o.deorphan_immediate ());
+ add_bool ("--deorphan-recursive", o.deorphan_recursive ());
if (o.checkout_root_specified ())
add_string ("--checkout-root", o.checkout_root ().string ());
add_bool ("--checkout-purge", o.checkout_purge ());
+ for (const string& nm: o.config_name ())
+ add_string ("--config-name", nm);
+
+ for (uint64_t id: o.config_id ())
+ add_num ("--config-id", id);
+
+ for (const uuid& uid: o.config_uuid ())
+ add_string ("--config-uuid", uid.string ());
+
// Compose the option/variable group.
//
if (!s.empty () || !a.config_vars.empty ())
@@ -2863,6 +3722,222 @@ namespace bpkg
return r;
};
+ // Figure out the system package version unless explicitly specified and
+ // add the system package authoritative information to the database's
+ // system repository unless the database is NULL or it already contains
+ // authoritative information for this package. Return the figured out
+ // system package version as constraint.
+ //
+ // Note that it is assumed that all the possible duplicates are handled
+ // elsewhere/later.
+ //
+ auto add_system_package = [&o] (database* db,
+ const package_name& nm,
+ optional<version_constraint> vc,
+ const system_package_status* sps,
+ vector<shared_ptr<available_package>>* stubs)
+ -> pair<version_constraint, const system_package_status*>
+ {
+ if (!vc)
+ {
+ assert (sps == nullptr);
+
+ // See if we should query the system package manager.
+ //
+ if (!sys_pkg_mgr)
+ sys_pkg_mgr = o.sys_no_query ()
+ ? nullptr
+ : make_consumption_system_package_manager (o,
+ host_triplet,
+ o.sys_distribution (),
+ o.sys_architecture (),
+ o.sys_install (),
+ !o.sys_no_fetch (),
+ o.sys_yes (),
+ o.sys_sudo ());
+ if (*sys_pkg_mgr != nullptr)
+ {
+ system_package_manager& spm (**sys_pkg_mgr);
+
+ // First check the cache.
+ //
+ optional<const system_package_status*> os (spm.status (nm, nullptr));
+
+ available_packages aps;
+ if (!os)
+ {
+ // If no cache hit, then collect the available packages for the
+ // mapping information.
+ //
+ aps = find_available_all (current_configs, nm);
+
+ // If no source/stub for the package (and thus no mapping), issue
+ // diagnostics consistent with other such places unless explicitly
+ // allowed by the user.
+ //
+ if (aps.empty ())
+ {
+ if (!o.sys_no_stub ())
+ fail << "unknown package " << nm <<
+ info << "consider specifying --sys-no-stub or " << nm << "/*";
+
+ // Add the stub package to the imaginary system repository (like
+ // the user-specified case below).
+ //
+ if (stubs != nullptr)
+ stubs->push_back (make_shared<available_package> (nm));
+ }
+ }
+
+ // This covers both our diagnostics below as well as anything that
+ // might be issued by status().
+ //
+ auto df = make_diag_frame (
+ [&nm] (diag_record& dr)
+ {
+ dr << info << "specify " << nm << "/* if package is not "
+ << "installed with system package manager";
+
+ dr << info << "specify --sys-no-query to disable system "
+ << "package manager interactions";
+ });
+
+ if (!os)
+ {
+ os = spm.status (nm, &aps);
+ assert (os);
+ }
+
+ if ((sps = *os) != nullptr)
+ vc = version_constraint (sps->version);
+ else
+ {
+ diag_record dr (fail);
+
+ dr << "no installed " << (o.sys_install () ? "or available " : "")
+ << "system package for " << nm;
+
+ if (!o.sys_install ())
+ dr << info << "specify --sys-install to try to install it";
+ }
+ }
+ else
+ vc = version_constraint (wildcard_version);
+ }
+ else
+ {
+ // The system package may only have an exact/wildcard version
+ // specified.
+ //
+ assert (vc->min_version == vc->max_version);
+
+ // For system packages not associated with a specific repository
+ // location add the stub package to the imaginary system repository
+ // (see below for details).
+ //
+ if (stubs != nullptr)
+ stubs->push_back (make_shared<available_package> (nm));
+ }
+
+ if (db != nullptr)
+ {
+ assert (db->system_repository);
+
+ const system_package* sp (db->system_repository->find (nm));
+
+ // Note that we don't check for the version match here since that's
+ // handled by check_dup() lambda at a later stage, which covers both
+ // db and no-db cases consistently.
+ //
+ if (sp == nullptr || !sp->authoritative)
+ db->system_repository->insert (nm,
+ *vc->min_version,
+ true /* authoritative */,
+ sps);
+ }
+
+ return make_pair (move (*vc), sps);
+ };
+
+ // Create the parsed package argument. Issue diagnostics and fail if the
+ // package specification is invalid.
+ //
+ auto arg_package = [&arg_string, &add_system_package]
+ (database* db,
+ package_scheme sc,
+ package_name nm,
+ optional<version_constraint> vc,
+ pkg_options os,
+ strings vs,
+ vector<shared_ptr<available_package>>* stubs = nullptr)
+ -> pkg_arg
+ {
+ assert (!vc || !vc->empty ()); // May not be empty if present.
+
+ if (db == nullptr)
+ assert (sc == package_scheme::sys && os.dependency ());
+
+ pkg_arg r {db,
+ sc,
+ move (nm),
+ move (vc),
+ string () /* value */,
+ move (os),
+ move (vs),
+ nullptr /* system_status */};
+
+ // Verify that the package database is specified in the multi-config
+ // mode, unless this is a system dependency package.
+ //
+ if (multi_config () &&
+ !os.config_uuid_specified () &&
+ !(db == nullptr &&
+ sc == package_scheme::sys &&
+ os.dependency ()))
+ fail << "no configuration specified for " << arg_string (r) <<
+ info << "configuration must be explicitly specified for each "
+ << "package in multi-configurations mode" <<
+ info << "use --config-uuid to specify its configuration";
+
+ switch (sc)
+ {
+ case package_scheme::sys:
+ {
+ assert (stubs != nullptr);
+
+ auto sp (add_system_package (db,
+ r.name,
+ move (r.constraint),
+ nullptr /* system_package_status */,
+ stubs));
+
+ r.constraint = move (sp.first);
+ r.system_status = sp.second;
+ break;
+ }
+ case package_scheme::none: break; // Nothing to do.
+ }
+
+ return r;
+ };
+
+ // Create the unparsed package argument.
+ //
+ auto arg_raw = [] (database& db,
+ string v,
+ pkg_options os,
+ strings vs) -> pkg_arg
+ {
+ return pkg_arg {&db,
+ package_scheme::none,
+ package_name (),
+ nullopt /* constraint */,
+ move (v),
+ move (os),
+ move (vs),
+ nullptr /* system_status */};
+ };
+
vector<pkg_arg> pkg_args;
{
// Cache the system stubs to create the imaginary system repository at
@@ -2873,18 +3948,18 @@ namespace bpkg
//
vector<shared_ptr<available_package>> stubs;
- transaction t (db);
+ transaction t (mdb);
// Don't fold the zero revision if building the package from source so
// that we build the exact X+0 package revision if it is specified.
//
- auto fold_zero_rev = [] (package_scheme sc)
+ auto version_flags = [] (package_scheme sc)
{
- bool r (false);
+ version::flags r (version::none);
switch (sc)
{
- case package_scheme::none: r = false; break;
- case package_scheme::sys: r = true; break;
+ case package_scheme::none: r = version::none; break;
+ case package_scheme::sys: r = version::fold_zero_revision; break;
}
return r;
};
@@ -2921,39 +3996,51 @@ namespace bpkg
optional<version_constraint> vc (
parse_package_version_constraint (
- s, sys, fold_zero_rev (sc), version_only (sc)));
+ s, sys, version_flags (sc), version_only (sc)));
- // For system packages not associated with a specific repository
- // location add the stub package to the imaginary system
- // repository (see above for details).
- //
- if (sys && vc)
- stubs.push_back (make_shared<available_package> (n));
+ pkg_options& o (ps.options);
- pkg_args.push_back (arg_package (sc,
+ // Disregard the (main) database for a system dependency with
+ // unspecified configuration.
+ //
+ bool no_db (sys &&
+ o.dependency () &&
+ !o.config_name_specified () &&
+ !o.config_id_specified () &&
+ !o.config_uuid_specified ());
+
+ pkg_args.push_back (arg_package (no_db ? nullptr : &ps.db.get (),
+ sc,
move (n),
move (vc),
- move (ps.options),
- move (ps.config_vars)));
+ move (o),
+ move (ps.config_vars),
+ &stubs));
}
else // Add unparsed.
- pkg_args.push_back (arg_raw (move (ps.packages),
+ pkg_args.push_back (arg_raw (ps.db,
+ move (ps.packages),
move (ps.options),
move (ps.config_vars)));
continue;
}
+ // Use it both as the package database and the source of the
+ // repository information.
+ //
+ database& pdb (ps.db);
+
// Expand the [[<packages>]@]<location> spec. Fail if the repository
// is not found in this configuration, that can be the case in the
// presence of --no-fetch option.
//
shared_ptr<repository> r (
- db.find<repository> (ps.location.canonical_name ()));
+ pdb.find<repository> (ps.location.canonical_name ()));
if (r == nullptr)
- fail << "repository '" << ps.location
- << "' does not exist in this configuration";
+ fail << "repository '" << ps.location << "' does not exist in this "
+ << "configuration";
// If no packages are specified explicitly (the argument starts with
// '@' or is a URL) then we select latest versions of all the packages
@@ -2972,7 +4059,7 @@ namespace bpkg
{
using query = query<repository_fragment_package>;
- for (const auto& rp: db.query<repository_fragment_package> (
+ for (const auto& rp: pdb.query<repository_fragment_package> (
(query::repository_fragment::name ==
rf.fragment.load ()->name) +
order_by_version_desc (query::package::id.version)))
@@ -2987,7 +4074,7 @@ namespace bpkg
if (ps.options.patch ())
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (nm));
+ pdb.find<selected_package> (nm));
// It seems natural in the presence of --patch option to only
// patch the selected packages and not to build new packages if
@@ -3038,7 +4125,8 @@ namespace bpkg
info << "package " << pv.first << " is not present in "
<< "configuration";
else
- pkg_args.push_back (arg_package (package_scheme::none,
+ pkg_args.push_back (arg_package (&pdb,
+ package_scheme::none,
pv.first,
version_constraint (pv.second),
ps.options,
@@ -3065,7 +4153,7 @@ namespace bpkg
optional<version_constraint> vc (
parse_package_version_constraint (
- s, sys, fold_zero_rev (sc), version_only (sc)));
+ s, sys, version_flags (sc), version_only (sc)));
// Check if the package is present in the repository and its
// complements, recursively. If the version is not specified then
@@ -3099,7 +4187,7 @@ namespace bpkg
if (!vc)
{
if (ps.options.patch () &&
- (sp = db.find<selected_package> (n)) != nullptr)
+ (sp = pdb.find<selected_package> (n)) != nullptr)
{
c = patch_constraint (sp);
@@ -3115,7 +4203,7 @@ namespace bpkg
}
shared_ptr<available_package> ap (
- find_available_one (db, n, c, rfs, false /* prereq */).first);
+ find_available_one (pdb, n, c, rfs, false /* prereq */).first);
// Fail if no available package is found or only a stub is
// available and we are building a source package.
@@ -3127,7 +4215,7 @@ namespace bpkg
// If the selected package is loaded then we aim to patch it.
//
if (sp != nullptr)
- dr << "patch version for " << *sp << " is not found in "
+ dr << "patch version for " << *sp << pdb << " is not found in "
<< r->name;
else if (ap == nullptr)
dr << "package " << pkg << " is not found in " << r->name;
@@ -3152,11 +4240,17 @@ namespace bpkg
// Don't move options and variables as they may be reused.
//
- pkg_args.push_back (arg_package (sc,
+ // Note that this cannot be a system dependency with unspecified
+ // configuration since location is specified and so we always pass
+ // the database to the constructor.
+ //
+ pkg_args.push_back (arg_package (&pdb,
+ sc,
move (n),
move (vc),
ps.options,
- ps.config_vars));
+ ps.config_vars,
+ &stubs));
}
}
}
@@ -3166,6 +4260,10 @@ namespace bpkg
imaginary_stubs = move (stubs);
}
+ // List of package configurations specified on the command line.
+ //
+ vector<package_key> pkg_confs;
+
// Separate the packages specified on the command line into to hold and to
// up/down-grade as dependencies, and save dependents whose dependencies
// must be upgraded recursively.
@@ -3174,18 +4272,64 @@ namespace bpkg
dependency_packages dep_pkgs;
recursive_packages rec_pkgs;
+ // Note that the command line adjustments which resolve the unsatisfied
+ // dependent issue (see unsatisfied_dependents for details) may
+ // potentially be sub-optimal, since we do not perform the full
+ // backtracking by trying all the possible adjustments and picking the
+ // most optimal combination. Instead, we keep collecting adjustments until
+ // either the package builds collection succeeds or there are no more
+ // adjustment combinations to try (and we don't try all of them). As a
+ // result we, for example, may end up with some redundant constraints on
+ // the command line just because the respective dependents have been
+ // evaluated first. Generally, dropping all the redundant adjustments can
+ // potentially be quite time-consuming, since we would need to try
+ // dropping all their possible combinations. We, however, will implement
+ // the refinement for only the common case (adjustments are independent),
+ // trying to drop just one adjustment per the refinement cycle iteration
+ // and wait and see how it goes.
+ //
+ cmdline_adjustments cmdline_adjs (hold_pkgs, dep_pkgs);
+
+ // If both are present, then we are in the command line adjustments
+ // refinement cycle, where cmdline_refine_adjustment is the adjustment
+ // being currently dropped and cmdline_refine_index is its index on the
+ // stack (as it appears at the beginning of the cycle).
+ //
+ optional<cmdline_adjustment> cmdline_refine_adjustment;
+ optional<size_t> cmdline_refine_index;
+
{
// Check if the package is a duplicate. Return true if it is but
// harmless.
//
- map<package_name, pkg_arg> package_map;
+ struct sys_package_key // Like package_key but with NULL'able db.
+ {
+ package_name name;
+ database* db; // Can be NULL for system dependency.
- auto check_dup = [&package_map, &arg_string, arg_parsed] (
- const pkg_arg& pa) -> bool
+ sys_package_key (package_name n, database* d)
+ : name (move (n)), db (d) {}
+
+ bool
+ operator< (const sys_package_key& v) const
+ {
+ if (int r = name.compare (v.name))
+ return r < 0;
+
+ return db != nullptr && v.db != nullptr ? *db < *v.db :
+ db == nullptr && v.db == nullptr ? false :
+ db == nullptr;
+ }
+ };
+
+ map<sys_package_key, pkg_arg> package_map;
+
+ auto check_dup = [&package_map, &arg_string, &arg_parsed]
+ (const pkg_arg& pa) -> bool
{
assert (arg_parsed (pa));
- auto r (package_map.emplace (pa.name, pa));
+ auto r (package_map.emplace (sys_package_key {pa.name, pa.db}, pa));
const pkg_arg& a (r.first->second);
assert (arg_parsed (a));
@@ -3200,19 +4344,132 @@ namespace bpkg
if (!r.second &&
(a.scheme != pa.scheme ||
a.name != pa.name ||
+ a.db != pa.db ||
a.constraint != pa.constraint ||
!compare_options (a.options, pa.options) ||
a.config_vars != pa.config_vars))
fail << "duplicate package " << pa.name <<
- info << "first mentioned as " << arg_string (r.first->second) <<
+ info << "first mentioned as " << arg_string (a) <<
info << "second mentioned as " << arg_string (pa);
return !r.second;
};
- transaction t (db);
+ transaction t (mdb);
+
+ // Return the available package that matches the specified orphan best
+ // (see evaluate_dependency() description for details). Also return the
+ // repository fragment the package comes from. Return a pair of NULLs if
+ // no suitable package has been found.
+ //
+ auto find_orphan_match =
+ [] (const shared_ptr<selected_package>& sp,
+ const lazy_shared_ptr<repository_fragment>& root)
+ {
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
+
+ assert (sp != nullptr);
- shared_ptr<repository_fragment> root (db.load<repository_fragment> (""));
+ const package_name& n (sp->name);
+ const version& v (sp->version);
+ optional<version_constraint> vc {version_constraint (v)};
+
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
+ //
+ version vr (v.epoch,
+ v.upstream,
+ v.release,
+ v.revision,
+ 0 /* iteration */);
+
+ optional<version_constraint> pc (
+ patch_constraint (n, vr, true /* quiet */));
+
+ optional<version_constraint> mc (
+ minor_constraint (n, vr, true /* quiet */));
+
+ // Note: explicit revision makes query_available() to always consider
+ // revisions (but not iterations) regardless of the revision argument
+ // value.
+ //
+ optional<version_constraint> verc {
+ version_constraint (version (v.epoch,
+ v.upstream,
+ v.release,
+ v.revision ? v.revision : 0,
+ 0 /* iteration */))};
+
+ optional<version_constraint> vlc {
+ version_constraint (version (v.epoch,
+ v.upstream,
+ v.release,
+ nullopt,
+ 0 /* iteration */))};
+
+ // Find the latest available non-stub package, optionally matching a
+ // constraint and considering revision. If a package is found, then
+ // cache it together with the repository fragment it comes from and
+ // return true.
+ //
+ available find_result;
+ const version* find_version (nullptr);
+ auto find = [&n,
+ &root,
+ &find_result,
+ &find_version] (const optional<version_constraint>& c,
+ bool revision = false) -> bool
+ {
+ available r (
+ find_available_one (n, c, root, false /* prereq */, revision));
+
+ const shared_ptr<available_package>& ap (r.first);
+
+ if (ap != nullptr && !ap->stub ())
+ {
+ find_result = move (r);
+ find_version = &find_result.first->version;
+ return true;
+ }
+ else
+ return false;
+ };
+
+ if (// Same version, revision, and iteration.
+ //
+ find (vc, true) ||
+ //
+ // Latest iteration of same version and revision.
+ //
+ find (verc) ||
+ //
+ // Later revision of same version.
+ //
+ (find (vlc) &&
+ find_version->compare (v,
+ false /* revision */,
+ true /* iteration */) > 0) ||
+ //
+ // Later patch of same version.
+ //
+ (pc && find (pc) &&
+ find_version->compare (v, true /* revision */) > 0) ||
+ //
+ // Later minor of same version.
+ //
+ (mc && find (mc) &&
+ find_version->compare (v, true /* revision */) > 0) ||
+ //
+ // Latest available version, including earlier.
+ //
+ find (nullopt))
+ {
+ return find_result;
+ }
+
+ return available ();
+ };
// Here is what happens here: for unparsed package args we are going to
// try and guess whether we are dealing with a package archive, package
@@ -3220,27 +4477,33 @@ namespace bpkg
// then as a directory, and then assume it is name/version. Sometimes,
// however, it is really one of the first two but just broken. In this
// case things are really confusing since we suppress all diagnostics
- // for the first two "guesses". So what we are going to do here is re-run
- // them with full diagnostics if the name/version guess doesn't pan out.
+ // for the first two "guesses". So what we are going to do here is
+ // re-run them with full diagnostics if the name/version guess doesn't
+ // pan out.
//
bool diag (false);
for (auto i (pkg_args.begin ()); i != pkg_args.end (); )
{
- pkg_arg& pa (*i);
+ pkg_arg& pa (*i);
+ database* pdb (pa.db);
// Reduce all the potential variations (archive, directory, package
// name, package name/version) to a single available_package object.
//
- shared_ptr<repository_fragment> af;
+ // Note that the repository fragment is only used for the
+ // build-to-hold packages.
+ //
+ lazy_shared_ptr<repository_fragment> af;
shared_ptr<available_package> ap;
+ bool existing (false); // True if build as an archive or directory.
if (!arg_parsed (pa))
{
- const char* package (pa.value.c_str ());
+ assert (pdb != nullptr); // Unparsed and so can't be system.
- // Is this a package archive?
- //
- bool package_arc (false);
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
+ const char* package (pa.value.c_str ());
try
{
@@ -3255,26 +4518,18 @@ namespace bpkg
pkg_verify (o,
a,
true /* ignore_unknown */,
+ false /* ignore_toolchain */,
false /* expand_values */,
- true /* complete_depends */,
- diag));
+ true /* load_buildfiles */,
+ true /* complete_values */,
+ diag ? 2 : 1));
// This is a package archive.
//
- // Note that throwing failed from here on will be fatal.
- //
- package_arc = true;
-
l4 ([&]{trace << "archive '" << a << "': " << arg_string (pa);});
- // Supporting this would complicate things a bit, but we may add
- // support for it one day.
- //
- if (pa.options.dependency ())
- fail << "package archive '" << a
- << "' may not be built as a dependency";
-
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3283,19 +4538,17 @@ namespace bpkg
af = root;
ap = make_shared<available_package> (move (m));
ap->locations.push_back (package_location {root, move (a)});
+
+ existing_packages.push_back (make_pair (ref (*pdb), ap));
+ existing = true;
}
}
catch (const invalid_path&)
{
// Not a valid path so cannot be an archive.
}
- catch (const failed&)
+ catch (const not_package&)
{
- // If this is a valid package archive but something went wrong
- // afterwards, then we are done.
- //
- if (package_arc)
- throw;
}
// Is this a package directory?
@@ -3309,8 +4562,6 @@ namespace bpkg
size_t pn (strlen (package));
if (pn != 0 && path::traits_type::is_separator (package[pn - 1]))
{
- bool package_dir (false);
-
try
{
dir_path d (package);
@@ -3320,47 +4571,52 @@ namespace bpkg
info << "'" << package << "' does not appear to be a valid "
<< "package directory: ";
+ // For better diagnostics, let's obtain the package info after
+ // pkg_verify() verifies that this is a package directory.
+ //
+ package_version_info pvi;
+
package_manifest m (
pkg_verify (
+ o,
d,
true /* ignore_unknown */,
- [&o, &d] (version& v)
+ false /* ignore_toolchain */,
+ true /* load_buildfiles */,
+ [&o, &d, &pvi] (version& v)
{
- if (optional<version> pv = package_version (o, d))
- v = move (*pv);
+ // Note that we also query subprojects since the package
+ // information will be used for the subsequent
+ // package_iteration() call.
+ //
+ pvi = package_version (o, d, b_info_flags::subprojects);
+
+ if (pvi.version)
+ v = move (*pvi.version);
},
- diag));
+ diag ? 2 : 1));
// This is a package directory.
//
- // Note that throwing failed from here on will be fatal.
- //
- package_dir = true;
-
l4 ([&]{trace << "directory '" << d << "': "
<< arg_string (pa);});
- // Supporting this would complicate things a bit, but we may
- // add support for it one day.
- //
- if (pa.options.dependency ())
- fail << "package directory '" << d
- << "' may not be built as a dependency";
-
// Fix-up the package version to properly decide if we need to
// upgrade/downgrade the package.
//
if (optional<version> v =
package_iteration (o,
- c,
+ *pdb,
t,
d,
m.name,
m.version,
+ &pvi.info,
true /* check_external */))
m.version = move (*v);
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3369,19 +4625,17 @@ namespace bpkg
ap = make_shared<available_package> (move (m));
af = root;
ap->locations.push_back (package_location {root, move (d)});
+
+ existing_packages.push_back (make_pair (ref (*pdb), ap));
+ existing = true;
}
}
catch (const invalid_path&)
{
// Not a valid path so cannot be a package directory.
}
- catch (const failed&)
+ catch (const not_package&)
{
- // If this is a valid package directory but something went wrong
- // afterwards, then we are done.
- //
- if (package_dir)
- throw;
}
}
}
@@ -3395,6 +4649,7 @@ namespace bpkg
//
shared_ptr<selected_package> sp;
bool patch (false);
+ bool deorphan (false);
if (ap == nullptr)
{
@@ -3416,9 +4671,10 @@ namespace bpkg
parse_package_version_constraint (
package,
false /* allow_wildcard */,
- false /* fold_zero_revision */));
+ version::none));
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
move (n),
move (vc),
move (pa.options),
@@ -3429,18 +4685,28 @@ namespace bpkg
if (!pa.options.dependency ())
{
- // Either get the user-specified version or the latest allowed
- // for a source code package. For a system package we pick the
- // latest one just to make sure the package is recognized.
+ assert (pdb != nullptr);
+
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
+ // Get the user-specified version, the latest allowed version,
+ // or the orphan best match for a source code package. For a
+ // system package we will try to find the available package that
+ // matches the user-specified system version (preferable for the
+ // configuration negotiation machinery) and, if fail, fallback
+ // to picking the latest one just to make sure the package is
+ // recognized.
//
optional<version_constraint> c;
+ bool sys (arg_sys (pa));
+
if (!pa.constraint)
{
- assert (!arg_sys (pa));
+ assert (!sys);
if (pa.options.patch () &&
- (sp = db.find<selected_package> (pa.name)) != nullptr)
+ (sp = pdb->find<selected_package> (pa.name)) != nullptr)
{
c = patch_constraint (sp);
@@ -3456,16 +4722,59 @@ namespace bpkg
patch = true;
}
}
- else if (!arg_sys (pa))
+ else if (!sys || !wildcard (*pa.constraint))
c = pa.constraint;
- auto rp (find_available_one (db, pa.name, c, root));
+ if (pa.options.deorphan ())
+ {
+ if (!sys)
+ {
+ if (sp == nullptr)
+ sp = pdb->find<selected_package> (pa.name);
+
+ if (sp != nullptr && orphan_package (*pdb, sp))
+ deorphan = true;
+ }
+
+ // If the package is not an orphan, its version is not
+ // constrained and upgrade/patch is not requested, then just
+ // skip the package.
+ //
+ if (!deorphan &&
+ !pa.constraint &&
+ !pa.options.upgrade () &&
+ !pa.options.patch ())
+ {
+ ++i;
+ continue;
+ }
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ deorphan &&
+ !pa.constraint &&
+ !pa.options.upgrade () &&
+ !pa.options.patch ()
+ ? find_orphan_match (sp, root)
+ : find_available_one (pa.name, c, root));
+
+ if (rp.first == nullptr && sys)
+ {
+ available_packages aps (
+ find_available_all (repo_configs, pa.name));
+
+ if (!aps.empty ())
+ rp = move (aps.front ());
+ }
+
ap = move (rp.first);
af = move (rp.second);
}
}
- catch (const failed&)
+ catch (const failed& e)
{
+ assert (e.code == 1);
diag = true;
continue;
}
@@ -3477,27 +4786,66 @@ namespace bpkg
continue;
// Save (both packages to hold and dependencies) as dependents for
- // recursive upgrade.
+ // recursive upgrade/deorphaning.
//
{
- optional<bool> u;
- optional<bool> r;
+ // Recursive/immediate upgrade/patch.
+ //
+ optional<bool> r; // true -- recursive, false -- immediate.
+ optional<bool> u; // true -- upgrade, false -- patch.
+
+ // Recursive/immediate deorphaning.
+ //
+ optional<bool> d; // true -- recursive, false -- immediate.
const auto& po (pa.options);
- if (po.upgrade_immediate ()) { u = true; r = false; }
- else if (po.upgrade_recursive ()) { u = true; r = true; }
- else if ( po.patch_immediate ()) { u = false; r = false; }
- else if ( po.patch_recursive ()) { u = false; r = true; }
- else if ( po.immediate ()) { u = po.upgrade (); r = false; }
- else if ( po.recursive ()) { u = po.upgrade (); r = true; }
+ // Note that, for example, --upgrade-immediate wins over the
+ // --upgrade --recursive options pair.
+ //
+ if (po.immediate ())
+ {
+ if (po.upgrade () || po.patch ())
+ {
+ r = false;
+ u = po.upgrade ();
+ }
- if (r)
+ if (po.deorphan ())
+ d = false;
+ }
+ else if (po.recursive ())
{
- l4 ([&]{trace << "stashing recursive package "
- << arg_string (pa);});
+ if (po.upgrade () || po.patch ())
+ {
+ r = true;
+ u = po.upgrade ();
+ }
- rec_pkgs.push_back (recursive_package {pa.name, *u, *r});
+ if (po.deorphan ())
+ d = true;
+ }
+
+ if (po.upgrade_immediate ()) { u = true; r = false; }
+ else if (po.upgrade_recursive ()) { u = true; r = true; }
+ else if ( po.patch_immediate ()) { u = false; r = false; }
+ else if ( po.patch_recursive ()) { u = false; r = true; }
+
+ if (po.deorphan_immediate ()) { d = false; }
+ else if (po.deorphan_recursive ()) { d = true; }
+
+ if (r || d)
+ {
+ l4 ([&]{trace << "stash recursive package " << arg_string (pa);});
+
+ // The above options are meaningless for system packages, so we
+ // just ignore them for a system dependency with unspecified
+ // configuration.
+ //
+ if (pdb != nullptr)
+ rec_pkgs.push_back (recursive_package {*pdb, pa.name,
+ r, u && *u,
+ d});
}
}
@@ -3505,46 +4853,83 @@ namespace bpkg
//
if (pa.options.dependency ())
{
- l4 ([&]{trace << "stashing dependency package "
- << arg_string (pa);});
+ l4 ([&]{trace << "stash dependency package " << arg_string (pa);});
bool sys (arg_sys (pa));
- // Make sure that the package is known.
- //
- auto apr (!pa.constraint || sys
- ? find_available (db, pa.name, nullopt)
- : find_available (db, pa.name, *pa.constraint));
+ if (pdb != nullptr)
+ sp = pdb->find<selected_package> (pa.name);
- if (apr.empty ())
+ // Make sure that the package is known. Only allow to unhold an
+ // unknown orphaned selected package (with the view that there is
+ // a good chance it will get dropped; and if not, such an unhold
+ // should be harmless).
+ //
+ if (!existing &&
+ find_available (repo_configs,
+ pa.name,
+ !sys ? pa.constraint : nullopt).empty ())
{
- diag_record dr (fail);
+ // Don't fail if the selected package is held and satisfies the
+ // constraints, if specified. Note that we may still fail later
+ // with the "not available from its dependents' repositories"
+ // error if the dependency is requested to be deorphaned and all
+ // its dependents are orphaned.
+ //
+ if (!(sp != nullptr &&
+ sp->hold_package &&
+ (!pa.constraint || satisfies (sp->version, pa.constraint))))
+ {
+ string n (arg_string (pa, false /* options */));
- dr << "unknown package " << arg_string (pa, false /* options */);
- check_any_available (c, t, &dr);
+ diag_record dr (fail);
+ dr << "unknown package " << n;
+ if (sys)
+ {
+ // Feels like we can't end up here if the version was specified
+ // explicitly.
+ //
+ dr << info << "consider specifying " << n << "/*";
+ }
+ else
+ check_any_available (repo_configs, t, &dr);
+ }
}
- // Save before the name move.
- //
- sp = db.find<selected_package> (pa.name);
+ if (pdb != nullptr)
+ pkg_confs.emplace_back (*pdb, pa.name);
+
+ bool hold_version (pa.constraint.has_value ());
dep_pkgs.push_back (
- dependency_package {move (pa.name),
+ dependency_package {pdb,
+ move (pa.name),
move (pa.constraint),
+ hold_version,
move (sp),
sys,
- pa.options.patch (),
+ existing,
+ (pa.options.upgrade () || pa.options.patch ()
+ ? pa.options.upgrade ()
+ : optional<bool> ()),
+ pa.options.deorphan (),
pa.options.keep_out (),
+ pa.options.disfigure (),
(pa.options.checkout_root_specified ()
? move (pa.options.checkout_root ())
: optional<dir_path> ()),
pa.options.checkout_purge (),
- move (pa.config_vars)});
+ move (pa.config_vars),
+ pa.system_status});
continue;
}
// Add the held package to the list.
//
+ assert (pdb != nullptr);
+
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
// Load the package that may have already been selected (if not done
// yet) and figure out what exactly we need to do here. The end goal
// is the available_package object corresponding to the actual
@@ -3552,15 +4937,17 @@ namespace bpkg
// the same as the selected package).
//
if (sp == nullptr)
- sp = db.find<selected_package> (pa.name);
+ sp = pdb->find<selected_package> (pa.name);
if (sp != nullptr && sp->state == package_state::broken)
- fail << "unable to build broken package " << pa.name <<
+ fail << "unable to build broken package " << pa.name << *pdb <<
info << "use 'pkg-purge --force' to remove";
bool found (true);
bool sys_advise (false);
+ bool sys (arg_sys (pa));
+
// If the package is not available from the repository we can try to
// create it from the orphaned selected package. Meanwhile that
// doesn't make sense for a system package. The only purpose to
@@ -3568,7 +4955,7 @@ namespace bpkg
// package is not in the repository then there is no dependent for it
// (otherwise the repository would be broken).
//
- if (!arg_sys (pa))
+ if (!sys)
{
// If we failed to find the requested package we can still check if
// the package name is present in the repositories and if that's the
@@ -3579,10 +4966,7 @@ namespace bpkg
if (ap == nullptr)
{
if (pa.constraint &&
- find_available_one (db,
- pa.name,
- nullopt,
- root).first != nullptr)
+ find_available_one (pa.name, nullopt, root).first != nullptr)
sys_advise = true;
}
else if (ap->stub ())
@@ -3596,17 +4980,18 @@ namespace bpkg
//
if (pa.constraint)
{
- for (;;)
+ for (;;) // Breakout loop.
{
if (ap != nullptr) // Must be that version, see above.
break;
// Otherwise, our only chance is that the already selected object
- // satisfies the version constraint.
+ // satisfies the version constraint, unless we are deorphaning.
//
- if (sp != nullptr &&
- !sp->system () &&
- satisfies (sp->version, pa.constraint))
+ if (sp != nullptr &&
+ !sp->system () &&
+ satisfies (sp->version, pa.constraint) &&
+ !deorphan)
break; // Derive ap from sp below.
found = false;
@@ -3614,13 +4999,10 @@ namespace bpkg
}
}
//
- // No explicit version was specified by the user (not relevant for a
- // system package, see above).
+ // No explicit version was specified by the user.
//
else
{
- assert (!arg_sys (pa));
-
if (ap != nullptr)
{
assert (!ap->stub ());
@@ -3629,14 +5011,17 @@ namespace bpkg
// we have a newer version, we treat it as an upgrade request;
// otherwise, why specify the package in the first place? We just
// need to check if what we already have is "better" (i.e.,
- // newer).
+ // newer), unless we are deorphaning.
//
- if (sp != nullptr && !sp->system () && ap->version < sp->version)
+ if (sp != nullptr &&
+ !sp->system () &&
+ ap->version < sp->version &&
+ !deorphan)
ap = nullptr; // Derive ap from sp below.
}
else
{
- if (sp == nullptr || sp->system ())
+ if (sp == nullptr || sp->system () || deorphan)
found = false;
// Otherwise, derive ap from sp below.
@@ -3657,15 +5042,30 @@ namespace bpkg
if (!sys_advise)
{
- dr << "unknown package " << pa.name;
+ // Note that if the package is not system and its version was
+ // explicitly specified, then we can only be here if no version of
+ // this package is available in source from the repository
+ // (otherwise we would advise to configure it as a system package;
+ // see above). Thus, let's not print it's version constraint in
+ // this case.
+ //
+ // Also note that for a system package we can't end up here if the
+ // version was specified explicitly.
+ //
+ string n (package_string (pa.name, nullopt /* vc */, sys));
+
+ dr << "unknown package " << n;
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ if (sys)
+ dr << info << "consider specifying " << n << "/*";
+ else
+ check_any_available (*pdb, t, &dr);
}
else
{
- assert (!arg_sys (pa));
+ assert (!sys);
dr << arg_string (pa, false /* options */)
<< " is not available in source";
@@ -3682,11 +5082,11 @@ namespace bpkg
//
if (ap == nullptr)
{
- assert (sp != nullptr && sp->system () == arg_sys (pa));
+ assert (sp != nullptr && sp->system () == sys);
- auto rp (make_available (o, c, db, sp));
- ap = rp.first;
- af = rp.second; // Could be NULL (orphan).
+ auto rp (make_available_fragment (o, *pdb, sp));
+ ap = move (rp.first);
+ af = move (rp.second); // Could be NULL (orphan).
}
// We will keep the output directory only if the external package is
@@ -3698,28 +5098,50 @@ namespace bpkg
bool keep_out (pa.options.keep_out () &&
sp != nullptr && sp->external ());
+ bool replace ((existing && sp != nullptr) || deorphan);
+
// Finally add this package to the list.
//
+ optional<bool> upgrade (sp != nullptr &&
+ !pa.constraint &&
+ (pa.options.upgrade () || pa.options.patch ())
+ ? pa.options.upgrade ()
+ : optional<bool> ());
+
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
build_package p {
build_package::build,
+ *pdb,
move (sp),
move (ap),
move (af),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
true, // Hold package.
pa.constraint.has_value (), // Hold version.
{}, // Constraints.
- arg_sys (pa),
+ sys,
keep_out,
+ pa.options.disfigure (),
+ false, // Configure-only.
(pa.options.checkout_root_specified ()
? move (pa.options.checkout_root ())
: optional<dir_path> ()),
pa.options.checkout_purge (),
move (pa.config_vars),
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ upgrade,
+ deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ replace ? build_package::build_replace : uint16_t (0)};
- l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ l4 ([&]{trace << "stash held package "
+ << p.available_name_version_db ();});
// "Fix" the version the user asked for by adding the constraint.
//
@@ -3727,7 +5149,10 @@ namespace bpkg
// this build_package instance is never replaced).
//
if (pa.constraint)
- p.constraints.emplace_back ("command line", move (*pa.constraint));
+ p.constraints.emplace_back (
+ move (*pa.constraint), cmd_line.db, cmd_line.name.string ());
+
+ pkg_confs.emplace_back (p.db, p.name ());
hold_pkgs.push_back (move (p));
}
@@ -3735,88 +5160,143 @@ namespace bpkg
// If this is just pkg-build -u|-p, then we are upgrading all held
// packages.
//
+ // Should we also upgrade the held packages in the explicitly linked
+ // configurations, recursively? Maybe later and we probably will need a
+ // command line option to enable this behavior.
+ //
if (hold_pkgs.empty () && dep_pkgs.empty () &&
- (o.upgrade () || o.patch ()))
+ (o.upgrade () || o.patch () || o.deorphan ()))
{
- using query = query<selected_package>;
-
- for (shared_ptr<selected_package> sp:
- pointer_result (
- db.query<selected_package> (query::state == "configured" &&
- query::hold_package)))
+ for (database& cdb: current_configs)
{
- // Let's skip upgrading system packages as they are, probably,
- // configured as such for a reason.
- //
- if (sp->system ())
- continue;
+ lazy_shared_ptr<repository_fragment> root (cdb, empty_string);
- const package_name& name (sp->name);
+ using query = query<selected_package>;
- optional<version_constraint> pc;
-
- if (o.patch ())
+ for (shared_ptr<selected_package> sp:
+ pointer_result (
+ cdb.query<selected_package> (
+ query::state == "configured" && query::hold_package)))
{
- pc = patch_constraint (sp);
-
- // Skip the non-patchable selected package. Note that the warning
- // have already been issued in this case.
+ // Let's skip upgrading system packages as they are, probably,
+ // configured as such for a reason.
//
- if (!pc)
+ if (sp->system ())
continue;
- }
- auto apr (find_available_one (db, name, pc, root));
+ const package_name& name (sp->name);
- shared_ptr<available_package> ap (move (apr.first));
- if (ap == nullptr || ap->stub ())
- {
- diag_record dr (fail);
- dr << name << " is not available";
+ optional<version_constraint> pc;
- if (ap != nullptr)
- dr << " in source" <<
- info << "consider building it as "
- << package_string (name, version (), true /* system */)
- << " if it is available from the system";
+ if (o.patch ())
+ {
+ pc = patch_constraint (sp);
- // Let's help the new user out here a bit.
+ // Skip the non-patchable selected package. Note that the
+ // warning have already been issued in this case.
+ //
+ if (!pc)
+ continue;
+ }
+
+ bool deorphan (false);
+
+ if (o.deorphan ())
+ {
+ // If the package is not an orphan and upgrade/patch is not
+ // requested, then just skip the package.
+ //
+ if (orphan_package (cdb, sp))
+ deorphan = true;
+ else if (!o.upgrade () && !o.patch ())
+ continue;
+ }
+
+ // In the deorphan mode with no upgrade/patch requested pick the
+ // version that matches the orphan best. Otherwise, pick the patch
+ // or the latest available version, as requested.
//
- check_any_available (c, t, &dr);
- }
+ auto apr (deorphan && !o.upgrade () && !o.patch ()
+ ? find_orphan_match (sp, root)
+ : find_available_one (name, pc, root));
- // We will keep the output directory only if the external package is
- // replaced with an external one (see above for details).
- //
- bool keep_out (o.keep_out () && sp->external ());
+ shared_ptr<available_package> ap (move (apr.first));
+ if (ap == nullptr || ap->stub ())
+ {
+ diag_record dr (fail);
+ dr << name << " is not available";
- build_package p {
- build_package::build,
+ if (ap != nullptr) // Stub?
+ {
+ dr << " in source" <<
+ info << "consider building it as "
+ << package_string (name, version (), true /* system */)
+ << " if it is available from the system";
+ }
+
+ // Let's help the new user out here a bit.
+ //
+ check_any_available (cdb, t, &dr);
+ }
+
+ // We will keep the output directory only if the external package
+ // is replaced with an external one (see above for details).
+ //
+ bool keep_out (o.keep_out () && sp->external ());
+
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
+ build_package p {
+ build_package::build,
+ cdb,
move (sp),
move (ap),
move (apr.second),
- true, // Hold package.
- false, // Hold version.
- {}, // Constraints.
- false, // System package.
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System package.
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
-
- l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
-
- hold_pkgs.push_back (move (p));
-
- // If there are also -i|-r, then we are also upgrading dependencies
- // of all held packages.
- //
- if (o.immediate () || o.recursive ())
- rec_pkgs.push_back (
- recursive_package {name, o.upgrade (), o.recursive ()});
+ o.disfigure (),
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ (o.upgrade () || o.patch ()
+ ? o.upgrade ()
+ : optional<bool> ()),
+ deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ deorphan ? build_package::build_replace : uint16_t (0)};
+
+ l4 ([&]{trace << "stash held package "
+ << p.available_name_version_db ();});
+
+ hold_pkgs.push_back (move (p));
+
+ // If there are also -i|-r, then we are also upgrading and/or
+ // deorphaning dependencies of all held packages.
+ //
+ if (o.immediate () || o.recursive ())
+ {
+ rec_pkgs.push_back (recursive_package {
+ cdb, name,
+ (o.upgrade () || o.patch ()
+ ? o.recursive ()
+ : optional<bool> ()),
+ o.upgrade (),
+ (o.deorphan ()
+ ? o.recursive ()
+ : optional<bool> ())});
+ }
+ }
}
}
@@ -3827,14 +5307,59 @@ namespace bpkg
{
assert (rec_pkgs.empty ());
+ if (o.noop_exit_specified ())
+ return o.noop_exit ();
+
info << "nothing to build";
return 0;
}
+ // Search for the package prerequisite among packages specified on the
+ // command line and, if found, return its desired database. Return NULL
+ // otherwise. The `db` argument specifies the dependent database.
+ //
+ // Note that the semantics of a package specified on the command line is:
+ // build the package in the specified configuration (current by default)
+ // and repoint all dependents in the current configuration of this
+ // prerequisite to this new prerequisite. Thus, the function always
+ // returns NULL for dependents not in the current configuration.
+ //
+ // Also note that we rely on "small function object" optimization here.
+ //
+ const function<find_database_function> find_prereq_database (
+ [&pkg_confs] (database& db,
+ const package_name& nm,
+ bool buildtime) -> database*
+ {
+ database* r (nullptr);
+
+ linked_databases ddbs (db.dependency_configs (nm, buildtime));
+
+ for (const package_key& p: pkg_confs)
+ {
+ if (p.name == nm &&
+ find (ddbs.begin (), ddbs.end (), p.db) != ddbs.end ())
+ {
+ if (r == nullptr)
+ r = &p.db.get ();
+ else
+ fail << "multiple " << p.db.get ().type << " configurations "
+ << "specified for package " << nm <<
+ info << r->config_orig <<
+ info << p.db.get ().config_orig;
+ }
+ }
+
+ return r;
+ });
+
// Assemble the list of packages we will need to build-to-hold, still used
// dependencies to up/down-grade, and unused dependencies to drop. We call
// this the plan.
//
+ // Note: for the sake of brevity we also assume the package replacement
+ // wherever we mention the package up/down-grade in this description.
+ //
// The way we do it is tricky: we first create the plan based on build-to-
// holds (i.e., the user selected). Next, to decide whether we need to
// up/down-grade or drop any dependecies we need to take into account an
@@ -3873,147 +5398,695 @@ namespace bpkg
// grade order where any subsequent entry does not affect the decision of
// the previous ones.
//
+ // Note that we also need to rebuild the plan from scratch on adding a new
+ // up/down-grade/drop if any dependency configuration negotiation has been
+ // performed, since any package replacement may affect the already
+ // negotiated configurations.
+ //
// Package managers are an easy, already solved problem, right?
//
build_packages pkgs;
{
struct dep
{
- package_name name; // Empty if up/down-grade.
+ reference_wrapper<database> db;
+ package_name name; // Empty if up/down-grade.
// Both are NULL if drop.
//
- shared_ptr<available_package> available;
- shared_ptr<bpkg::repository_fragment> repository_fragment;
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool system;
+ bool system;
+ bool existing; // Build as an existing archive or directory.
+ optional<bool> upgrade;
+ bool deorphan;
};
vector<dep> deps;
+ existing_dependencies existing_deps;
+ deorphaned_dependencies deorphaned_deps;
+
+ replaced_versions replaced_vers;
+ postponed_dependencies postponed_deps;
+ unacceptable_alternatives unacceptable_alts;
+
+ // Map the repointed dependents to the replacement flags (see
+ // repointed_dependents for details), unless --no-move is specified.
+ //
+ // Note that the overall plan is to add the replacement prerequisites to
+ // the repointed dependents prerequisites sets at the beginning of the
+ // refinement loop iteration and remove them right before the plan
+ // execution simulation. This will allow the collecting/ordering
+ // functions to see both kinds of prerequisites (being replaced and
+ // their replacements) and only consider one kind or another or both, as
+ // appropriate.
+ //
+ repointed_dependents rpt_depts;
+
+ if (!o.no_move ())
+ {
+ transaction t (mdb);
+
+ using query = query<selected_package>;
+
+ query q (query::state == "configured");
+
+ for (database& cdb: current_configs)
+ {
+ for (shared_ptr<selected_package> sp:
+ pointer_result (cdb.query<selected_package> (q)))
+ {
+ map<package_key, bool> ps; // Old/new prerequisites.
+
+ for (const auto& p: sp->prerequisites)
+ {
+ database& db (p.first.database ());
+ const package_name& name (p.first.object_id ());
+
+ // Note that if a prerequisite is in a configuration of the host
+ // type, it is not necessarily a build-time dependency (think of
+ // a dependent from a self-hosted configuration and its runtime
+ // dependency). However, here it doesn't really matter.
+ //
+ database* pdb (
+ find_prereq_database (cdb,
+ name,
+ (db.type == host_config_type ||
+ db.type == build2_config_type)));
+
+ if (pdb != nullptr && *pdb != db && pdb->type == db.type)
+ {
+ ps.emplace (package_key {*pdb, name}, true);
+ ps.emplace (package_key { db, name}, false);
+ }
+ }
+
+ if (!ps.empty ())
+ rpt_depts.emplace (package_key {cdb, sp->name}, move (ps));
+ }
+ }
+
+ t.commit ();
+ }
// Iteratively refine the plan with dependency up/down-grades/drops.
//
- for (bool refine (true), scratch (true); refine; )
+ // Note that we should not clean the deps list on scratch_col (scratch
+ // during the package collection) because we want to enter them before
+ // collect_build_postponed() and they could be the dependents that have
+ // the config clauses. In a sense, change to replaced_vers,
+ // postponed_deps, or unacceptable_alts maps should not affect the deps
+ // list. But not the other way around: a dependency erased from the deps
+ // list could have caused an entry in the replaced_vers, postponed_deps,
+ // and/or unacceptable_alts maps. And so we clean replaced_vers,
+ // postponed_deps, and unacceptable_alts on scratch_exe (scratch during
+ // the plan execution).
+ //
+ for (bool refine (true), scratch_exe (true), scratch_col (false);
+ refine; )
{
- l4 ([&]{trace << "refining execution plan"
+ bool scratch (scratch_exe || scratch_col);
+
+ l4 ([&]{trace << "refine package collection/plan execution"
<< (scratch ? " from scratch" : "");});
- transaction t (db);
+ transaction t (mdb);
- build_packages::postponed_packages postponed;
+ // Collect all configurations where dependency packages can
+ // potentially be built or amended during this run.
+ //
+ linked_databases dep_dbs;
- if (scratch)
+ for (database& cdb: current_configs)
{
- pkgs.clear ();
- postponed.clear ();
-
- // Pre-enter dependencies to keep track of the desired versions and
- // options specified on the command line. In particular, if the
- // version is specified and the dependency is used as part of the
- // plan, then the desired version must be used. We also need it to
- // distinguish user-driven dependency up/down-grades from the
- // dependent-driven ones, not to warn/refuse.
- //
- // Also, if a dependency package already has selected package that
- // is held, then we need to unhold it.
- //
- for (const dependency_package& p: dep_pkgs)
+ for (database& db: cdb.dependency_configs ())
{
- build_package bp {
- nullopt, // Action.
- nullptr, // Selected package.
- nullptr, // Available package/repository frag.
- nullptr,
- false, // Hold package.
- p.constraint.has_value (), // Hold version.
- {}, // Constraints.
- p.system,
- p.keep_out,
- p.checkout_root,
- p.checkout_purge,
- p.config_vars,
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
-
- if (p.constraint)
- bp.constraints.emplace_back ("command line", *p.constraint);
-
- pkgs.enter (p.name, move (bp));
+ if (find (dep_dbs.begin (), dep_dbs.end (), db) == dep_dbs.end ())
+ dep_dbs.push_back (db);
}
+ }
- // Pre-collect user selection to make sure dependency-forced
- // up/down-grades are handled properly (i.e., the order in which we
- // specify packages on the command line does not matter).
- //
- for (const build_package& p: hold_pkgs)
- pkgs.collect_build (o, c, db, p);
+ // Temporarily add the replacement prerequisites to the repointed
+ // dependent prerequisites sets and persist the changes.
+ //
+ for (auto& rd: rpt_depts)
+ {
+ database& db (rd.first.db);
+ const package_name& nm (rd.first.name);
- // Collect all the prerequisites of the user selection.
- //
- for (const build_package& p: hold_pkgs)
- pkgs.collect_build_prerequisites (o, c, db, p.name (), postponed);
+ shared_ptr<selected_package> sp (db.load<selected_package> (nm));
+ package_prerequisites& prereqs (sp->prerequisites);
- // Note that we need to collect unheld after prerequisites, not to
- // overwrite the pre-entered entries before they are used to provide
- // additional constraints for the collected prerequisites.
- //
- for (const dependency_package& p: dep_pkgs)
+ for (const auto& prq: rd.second)
{
- if (p.selected != nullptr && p.selected->hold_package)
- pkgs.collect_unhold (p.selected);
+ if (prq.second) // Prerequisite replacement?
+ {
+ const package_key& p (prq.first);
+
+ // Find the being replaced prerequisite to copy it's information
+ // into the replacement.
+ //
+ auto i (find_if (prereqs.begin (), prereqs.end (),
+ [&p] (const auto& pr)
+ {
+ return pr.first.object_id () == p.name;
+ }));
+
+ assert (i != prereqs.end ());
+
+ auto j (prereqs.emplace (
+ lazy_shared_ptr<selected_package> (p.db.get (),
+ p.name),
+ i->second));
+
+ // The selected package should only contain the old
+ // prerequisites at this time, so adding a replacement should
+ // always succeed.
+ //
+ assert (j.second);
+ }
}
- scratch = false;
+ db.update (sp);
}
- else
- pkgs.clear_order (); // Only clear the ordered list.
- // Add to the plan dependencies to up/down-grade/drop that were
- // discovered on the previous iterations.
+ // Erase the replacements from the repointed dependents prerequisite
+ // sets and persist the changes.
//
- for (const dep& d: deps)
+ auto restore_repointed_dependents = [&rpt_depts] ()
{
- if (d.available == nullptr)
- pkgs.collect_drop (db.load<selected_package> (d.name));
- else
+ for (auto& rd: rpt_depts)
{
- shared_ptr<selected_package> sp (
- db.find<selected_package> (d.name));
+ database& db (rd.first.db);
+ const package_name& nm (rd.first.name);
- // We will keep the output directory only if the external package
- // is replaced with an external one (see above for details).
+ shared_ptr<selected_package> sp (db.load<selected_package> (nm));
+
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const package_key& p (prq.first);
+
+ size_t n (
+ sp->prerequisites.erase (
+ lazy_shared_ptr<selected_package> (p.db.get (), p.name)));
+
+ // The selected package should always contain the prerequisite
+ // replacement at this time, so its removal should always
+ // succeed.
+ //
+ assert (n == 1);
+ }
+ }
+
+ db.update (sp);
+ }
+ };
+
+ // Pre-enter dependency to keep track of the desired versions and
+ // options specified on the command line. In particular, if the
+ // version is specified and the dependency is used as part of the
+ // plan, then the desired version must be used. We also need it to
+ // distinguish user-driven dependency up/down-grades from the
+ // dependent-driven ones, not to warn/refuse.
+ //
+ // Also, if a dependency package already has selected package that
+ // is held, then we need to unhold it.
+ //
+ auto enter = [&pkgs, &cmd_line] (database& db,
+ const dependency_package& p)
+ {
+ // Note that we don't set the upgrade and deorphan flags based on
+ // the --upgrade, --patch, and --deorphan options since an option
+ // presense doesn't necessarily means that the respective flag needs
+ // to be set (the package may not be selected, may not be patchable
+ // and/or an orphan, etc). The proper flags will be provided by
+ // evaluate_dependency() if/when any upgrade/deorphan recommendation
+ // is given.
+ //
+ build_package bp {
+ nullopt, // Action.
+ db,
+ nullptr, // Selected package.
+ nullptr, // Available package/repo fragment.
+ nullptr,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ false, // Hold package.
+ p.hold_version,
+ {}, // Constraints.
+ p.system,
+ p.keep_out,
+ p.disfigure,
+ false, // Configure-only.
+ p.checkout_root,
+ p.checkout_purge,
+ p.config_vars,
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
+
+ if (p.constraint)
+ bp.constraints.emplace_back (*p.constraint,
+ cmd_line.db,
+ cmd_line.name.string ());
+
+ pkgs.enter (p.name, move (bp));
+ };
+
+ // Add the system dependency to the database's system repository and
+ // pre-enter it to the build package map.
+ //
+ auto enter_system_dependency = [&add_system_package, &enter]
+ (database& db, const dependency_package& p)
+ {
+ // The system package may only have an exact/wildcard version
+ // specified.
+ //
+ add_system_package (&db,
+ p.name,
+ p.constraint,
+ p.system_status,
+ nullptr /* stubs */);
+ enter (db, p);
+ };
+
+ // Private configurations that were created during collection of the
+ // package builds. The list contains the private configuration paths,
+ // relative to the containing configuration directories (.bpkg/host/,
+ // etc), together with the containing configuration databases.
+ //
+ // Note that the private configurations are linked to their parent
+ // configurations right after being created, so that the subsequent
+ // collecting, ordering, and plan execution simulation logic can use
+ // them. However, we can not easily commit these changes at some
+ // point, since there could also be some other changes made to the
+ // database which needs to be rolled back at the end of the refinement
+ // iteration.
+ //
+ // Thus, the plan is to collect configurations where the private
+ // configurations were created and, after the transaction is rolled
+ // back, re-link these configurations and persist the changes using
+ // the new transaction.
+ //
+ vector<pair<database&, dir_path>> priv_cfgs;
+
+ // Add a newly created private configuration to the private
+ // configurations and the dependency databases lists and pre-enter
+ // builds of system dependencies with unspecified configuration for
+ // this configuration.
+ //
+ const function<build_packages::add_priv_cfg_function> add_priv_cfg (
+ [&priv_cfgs, &dep_dbs, &dep_pkgs, &enter_system_dependency]
+ (database& pdb, dir_path&& cfg)
+ {
+ database& db (pdb.find_attached (pdb.config / cfg,
+ false /* self */));
+
+ priv_cfgs.emplace_back (pdb, move (cfg));
+
+ dep_dbs.push_back (db);
+
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db == nullptr)
+ enter_system_dependency (db, p);
+ }
+ });
+
+ postponed_packages postponed_repo;
+ postponed_packages postponed_alts;
+ postponed_packages postponed_recs;
+ postponed_existing_dependencies postponed_edeps;
+ postponed_configurations postponed_cfgs;
+ strings postponed_cfgs_history;
+ unsatisfied_dependents unsatisfied_depts;
+
+ try
+ {
+ if (scratch)
+ {
+ pkgs.clear ();
+
+ if (scratch_exe)
+ {
+ replaced_vers.clear ();
+ postponed_deps.clear ();
+ unacceptable_alts.clear ();
+
+ scratch_exe = false;
+ }
+ else
+ {
+ assert (scratch_col); // See the scratch definition above.
+
+ // Reset to detect bogus entries.
+ //
+ for (auto& rv: replaced_vers)
+ rv.second.replaced = false;
+
+ for (auto& pd: postponed_deps)
+ {
+ pd.second.wout_config = false;
+ pd.second.with_config = false;
+ }
+
+ scratch_col = false;
+ }
+
+ // Pre-enter dependencies with specified configurations.
//
- bool keep_out (o.keep_out () && sp->external ());
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db != nullptr)
+ enter (*p.db, p);
+ }
- // Marking upgraded dependencies as "required by command line" may
- // seem redundant as they should already be pre-entered as such
- // (see above). But remember dependencies upgraded with -i|-r?
- // Note that the required_by data member should never be empty, as
- // it is used in prompts/diagnostics.
+ // Pre-enter system dependencies with unspecified configuration
+ // for all dependency configurations, excluding those which
+ // already have this dependency pre-entered.
//
- build_package p {
- build_package::build,
- move (sp),
- d.available,
- d.repository_fragment,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- d.system,
- keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db == nullptr)
+ {
+ for (database& db: dep_dbs)
+ {
+ if (!pkgs.entered_build (db, p.name))
+ enter_system_dependency (db, p);
+ }
+ }
+ }
+
+ // Pre-collect user selection to make sure dependency-forced
+ // up/down-grades are handled properly (i.e., the order in which we
+ // specify packages on the command line does not matter).
+ //
+ for (const build_package& p: hold_pkgs)
+ pkgs.collect_build (
+ o, p, replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ // Collect all the prerequisites of the user selection.
+ //
+ // Note that some of the user-selected packages can well be
+ // dependencies whose recursive processing should be postponed.
+ //
+ for (const build_package& p: hold_pkgs)
+ {
+ package_key pk (p.db, p.name ());
+
+ auto i (postponed_deps.find (pk));
+
+ if (i != postponed_deps.end ())
+ {
+ // Even though the user selection may have a configuration, we
+ // treat it as a dependent without any configuration because
+ // it is non-negotiable, known at the outset, and thus cannot
+ // be a reason to postpone anything.
+ //
+ i->second.wout_config = true;
+
+ l5 ([&]{trace << "dep-postpone user-specified " << pk;});
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ l5 ([&]{trace << "dep-postpone user-specified " << pk
+ << " since already in cluster " << *pcfg;});
+ }
+ else
+ {
+ pkgs.collect_build_prerequisites (
+ o,
+ p.db,
+ p.name (),
+ find_prereq_database,
+ add_priv_cfg,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+ }
+ }
+
+ // Note that we need to collect unheld after prerequisites, not to
+ // overwrite the pre-entered entries before they are used to
+ // provide additional constraints for the collected prerequisites.
+ //
+ for (const dependency_package& p: dep_pkgs)
+ {
+ auto unhold = [&p, &pkgs] (database& db)
+ {
+ shared_ptr<selected_package> sp (
+ p.db != nullptr
+ ? p.selected
+ : db.find<selected_package> (p.name));
- pkgs.collect_build (o, c, db, p, &postponed /* recursively */);
+ if (sp != nullptr && sp->hold_package)
+ pkgs.collect_unhold (db, sp);
+ };
+
+ if (p.db != nullptr)
+ {
+ unhold (*p.db);
+ }
+ else
+ {
+ for (database& db: dep_dbs)
+ unhold (db);
+ }
+ }
+
+ // Collect dependents whose dependencies need to be repointed to
+ // packages from different configurations.
+ //
+ pkgs.collect_repointed_dependents (o,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts,
+ find_prereq_database,
+ add_priv_cfg);
}
+ else
+ pkgs.clear_order (); // Only clear the ordered list.
+
+ // Add to the plan dependencies to up/down-grade/drop that were
+ // discovered on the previous iterations.
+ //
+ // Note: this loop takes care of both the from-scratch and
+ // refinement cases.
+ //
+ for (const dep& d: deps)
+ {
+ database& ddb (d.db);
+
+ if (d.available == nullptr)
+ {
+ pkgs.collect_drop (o,
+ ddb,
+ ddb.load<selected_package> (d.name),
+ replaced_vers);
+ }
+ else
+ {
+ shared_ptr<selected_package> sp (
+ ddb.find<selected_package> (d.name));
+
+ // We will keep the output directory only if the external package
+ // is replaced with an external one (see above for details).
+ //
+ bool keep_out (o.keep_out () && sp->external ());
+
+ // Marking upgraded dependencies as "required by command line"
+ // may seem redundant as they should already be pre-entered as
+ // such (see above). But remember dependencies upgraded with
+ // -i|-r? Note that the required_by data member should never be
+ // empty, as it is used in prompts/diagnostics.
+ //
+ build_package p {
+ build_package::build,
+ ddb,
+ move (sp),
+ d.available,
+ d.repository_fragment,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ d.system,
+ keep_out,
+ o.disfigure (),
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ d.upgrade,
+ d.deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ (d.existing || d.deorphan
+ ? build_package::build_replace
+ : uint16_t (0))};
+
+ package_key pk {ddb, d.name};
+
+ // Similar to the user-selected packages, collect non-
+ // recursively the dependencies for which recursive collection
+ // is postponed (see above for details).
+ //
+ auto i (postponed_deps.find (pk));
+ if (i != postponed_deps.end ())
+ {
+ i->second.wout_config = true;
+
+ // Note: not recursive.
+ //
+ pkgs.collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone user-specified dependency "
+ << pk;});
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ // Note: not recursive.
+ //
+ pkgs.collect_build (o,
+ move (p),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone user-specified dependency "
+ << pk << " since already in cluster "
+ << *pcfg;});
+ }
+ else
+ {
+ build_package_refs dep_chain;
+
+ // Note: recursive.
+ //
+ pkgs.collect_build (o,
+ move (p),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts,
+ &dep_chain,
+ find_prereq_database,
+ add_priv_cfg,
+ &rpt_depts,
+ &postponed_repo,
+ &postponed_alts,
+ &postponed_recs,
+ &postponed_edeps,
+ &postponed_deps,
+ &unacceptable_alts);
+ }
+ }
+ }
+ }
+
+ // Handle the (combined) postponed collection.
+ //
+ if (find_if (postponed_recs.begin (), postponed_recs.end (),
+ [] (const build_package* p)
+ {
+ // Note that we check for the dependencies presence
+ // rather than for the recursive_collection flag
+ // (see collect_build_postponed() for details).
+ //
+ return !p->dependencies;
+ }) != postponed_recs.end () ||
+ !postponed_repo.empty () ||
+ !postponed_alts.empty () ||
+ postponed_deps.has_bogus () ||
+ !postponed_cfgs.empty ())
+ pkgs.collect_build_postponed (o,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ postponed_cfgs_history,
+ unacceptable_alts,
+ unsatisfied_depts,
+ find_prereq_database,
+ rpt_depts,
+ add_priv_cfg);
+
+ // Erase the bogus replacements and re-collect from scratch, if any
+ // (see replaced_versions for details).
+ //
+ replaced_vers.cancel_bogus (trace, true /* scratch */);
}
+ catch (const scratch_collection& e)
+ {
+ // Re-collect from scratch (but keep deps).
+ //
+ scratch_col = true;
- // Handle the (combined) postponed collection.
- //
- if (!postponed.empty ())
- pkgs.collect_build_postponed (o, c, db, postponed);
+ l5 ([&]{trace << "collection failed due to " << e.description
+ << (e.package != nullptr
+ ? " (" + e.package->string () + ')'
+ : empty_string)
+ << ", retry from scratch";});
+
+ // Erase the package version replacements that we didn't apply
+ // during the current (re-)collection iteration since the dependents
+ // demanding this version are not collected anymore.
+ //
+ replaced_vers.cancel_bogus (trace, false /* scratch */);
+
+ restore_repointed_dependents ();
+
+ // Commit linking of private configurations that were potentially
+ // created during the collection of the package builds with their
+ // parent configurations.
+ //
+ t.commit ();
+
+ continue;
+ }
+
+ set<package_key> depts (
+ pkgs.collect_dependents (rpt_depts, unsatisfied_depts));
// Now that we have collected all the package versions that we need to
// build, arrange them in the "dependency order", that is, with every
@@ -4025,75 +6098,224 @@ namespace bpkg
// dependencies between the specified packages).
//
// The order of dependency upgrades/downgrades/drops is not really
- // deterministic. We, however, do them before hold_pkgs so that they
- // appear (e.g., on the plan) last.
+ // deterministic. We, however, do upgrades/downgrades before hold_pkgs
+ // so that they appear (e.g., on the plan) after the packages being
+ // built to hold. We handle drops last, though, so that the unused
+ // packages are likely get purged before the package fetches, so that
+ // the disk space they occupy can be reused.
//
for (const dep& d: deps)
- pkgs.order (d.name, false /* reorder */);
+ {
+ if (d.available != nullptr)
+ pkgs.order (d.db,
+ d.name,
+ find_prereq_database,
+ false /* reorder */);
+ }
for (const build_package& p: reverse_iterate (hold_pkgs))
- pkgs.order (p.name ());
+ pkgs.order (p.db, p.name (), find_prereq_database);
+
+ for (const auto& rd: rpt_depts)
+ pkgs.order (rd.first.db,
+ rd.first.name,
+ find_prereq_database,
+ false /* reorder */);
+
+ // Order the existing dependents which have participated in
+ // negotiation of the configuration of their dependencies.
+ //
+ for (const postponed_configuration& cfg: postponed_cfgs)
+ {
+ for (const auto& d: cfg.dependents)
+ {
+ if (d.second.existing)
+ {
+ const package_key& p (d.first);
+ pkgs.order (p.db, p.name, find_prereq_database);
+ }
+ }
+ }
- // Collect and order all the dependents that we will need to
- // reconfigure because of the up/down-grades of packages that are now
- // on the list.
+ // Order the existing dependents whose dependencies are being
+ // up/down-graded or reconfigured.
//
- pkgs.collect_order_dependents (db);
+ for (const package_key& p: depts)
+ pkgs.order (p.db, p.name, find_prereq_database, false /* reorder */);
- // And, finally, make sure all the packages that we need to unhold
- // are on the list.
+ // Order the re-collected packages (deviated dependents, etc).
+ //
+ for (build_package* p: postponed_recs)
+ {
+ assert (p->recursive_collection);
+
+ pkgs.order (p->db, p->name (), find_prereq_database);
+ }
+
+ // Make sure all the packages that we need to unhold are on the list.
//
for (const dependency_package& p: dep_pkgs)
{
- if (p.selected != nullptr && p.selected->hold_package)
- pkgs.order (p.name, false /* reorder */);
+ auto order_unheld = [&p, &pkgs, &find_prereq_database] (database& db)
+ {
+ shared_ptr<selected_package> sp (
+ p.db != nullptr
+ ? p.selected
+ : db.find<selected_package> (p.name));
+
+ if (sp != nullptr && sp->hold_package)
+ pkgs.order (db,
+ p.name,
+ find_prereq_database,
+ false /* reorder */);
+ };
+
+ if (p.db != nullptr)
+ {
+ order_unheld (*p.db);
+ }
+ else
+ {
+ for (database& db: dep_dbs)
+ order_unheld (db);
+ }
}
+ // And, finally, order the package drops.
+ //
+ for (const dep& d: deps)
+ {
+ if (d.available == nullptr)
+ pkgs.order (d.db,
+ d.name,
+ find_prereq_database,
+ false /* reorder */);
+ }
+
+ // Make sure all the postponed dependencies of existing dependents
+ // have been collected and fail if that's not the case.
+ //
+ for (const auto& pd: postponed_edeps)
+ {
+ const build_package* p (pkgs.entered_build (pd.first));
+ assert (p != nullptr && p->available != nullptr);
+
+ if (!p->recursive_collection)
+ {
+ // Feels like this shouldn't happen but who knows.
+ //
+ diag_record dr (fail);
+ dr << "package " << p->available_name_version_db () << " is not "
+ << "built due to its configured dependents deviation in "
+ << "dependency resolution" <<
+ info << "deviated dependents:";
+
+ for (const package_key& d: pd.second)
+ dr << ' ' << d;
+
+ dr << info << "please report in "
+ << "https://github.com/build2/build2/issues/302";
+ }
+ }
+
+#ifndef NDEBUG
+ pkgs.verify_ordering ();
+#endif
+ // Now, as we are done with package builds collecting/ordering, erase
+ // the replacements from the repointed dependents prerequisite sets
+ // and persist the changes.
+ //
+ restore_repointed_dependents ();
+
// We are about to execute the plan on the database (but not on the
// filesystem / actual packages). Save the session state for the
// selected_package objects so that we can restore it later (see
// below for details).
//
using selected_packages = session::object_map<selected_package>;
- auto selected_packages_session = [&db, &ses] () -> selected_packages*
+ auto sp_session = [] (const auto& tm) -> selected_packages*
{
- auto& m (ses.map ()[&db]);
- auto i (m.find (&typeid (selected_package)));
- return (i != m.end ()
+ auto i (tm.find (&typeid (selected_package)));
+ return (i != tm.end ()
? &static_cast<selected_packages&> (*i->second)
: nullptr);
};
- selected_packages old_sp;
- if (const selected_packages* sp = selected_packages_session ())
- old_sp = *sp;
+ map<const odb::database*, selected_packages> old_sp;
+
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
+ old_sp.emplace (dps.first, *sps);
+ }
// Note that we need to perform the execution on the copies of the
// build/drop_package objects to preserve the original ones. The
// selected_package objects will still be changed so we will reload
// them afterwards (see below).
//
+ // After the plan execution simulation, save the packages being built
+ // (selected non-system packages) for the subsequent dependency
+ // hierarchies verification.
+ //
+ bool changed;
+ vector<pair<database&, shared_ptr<selected_package>>> build_pkgs;
{
vector<build_package> tmp (pkgs.begin (), pkgs.end ());
build_package_list bl (tmp.begin (), tmp.end ());
- execute_plan (o, c, db, bl, true /* simulate */);
+ changed = execute_plan (o,
+ bl,
+ &unsatisfied_depts,
+ find_prereq_database);
+
+ if (changed)
+ {
+ for (build_package& p: bl)
+ {
+ shared_ptr<selected_package>& sp (p.selected);
+
+ if (sp != nullptr)
+ {
+ if (!sp->system ())
+ build_pkgs.emplace_back (p.db, move (sp));
+ }
+ else
+ assert (p.action && *p.action == build_package::drop);
+ }
+ }
}
// Return nullopt if no changes to the dependency are necessary. This
// value covers both the "no change is required" and the "no
// recommendation available" cases.
//
- auto eval_dep = [&db, &dep_pkgs, &rec_pkgs] (
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable = true) -> optional<evaluate_result>
+ auto eval_dep = [&dep_pkgs,
+ &rec_pkgs,
+ &o,
+ &existing_deps,
+ &deorphaned_deps,
+ &pkgs,
+ cache = upgrade_dependencies_cache {}] (
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ bool ignore_unsatisfiable = true) mutable
+ -> optional<evaluate_result>
{
optional<evaluate_result> r;
// See if there is an optional dependency upgrade recommendation.
//
if (!sp->hold_package)
- r = evaluate_dependency (db, dep_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_dependency (o,
+ db,
+ sp,
+ dep_pkgs,
+ o.no_move (),
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable);
// If none, then see for the recursive dependency upgrade
// recommendation.
@@ -4102,7 +6324,15 @@ namespace bpkg
// configured as such for a reason.
//
if (!r && !sp->system () && !rec_pkgs.empty ())
- r = evaluate_recursive (db, rec_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_recursive (o,
+ db,
+ sp,
+ rec_pkgs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable,
+ cache);
// Translate the "no change" result to nullopt.
//
@@ -4112,16 +6342,18 @@ namespace bpkg
// The empty version means that the package must be dropped.
//
const version ev;
- auto target_version = [&ev] (const shared_ptr<available_package>& ap,
- bool sys) -> const version&
+ auto target_version = [&ev]
+ (database& db,
+ const shared_ptr<available_package>& ap,
+ bool sys) -> const version&
{
if (ap == nullptr)
return ev;
if (sys)
{
- assert (ap->system_version () != nullptr);
- return *ap->system_version ();
+ assert (ap->system_version (db) != nullptr);
+ return *ap->system_version (db);
}
return ap->version;
@@ -4134,15 +6366,18 @@ namespace bpkg
{
bool s (false);
+ database& db (i->db);
+ const package_name& nm (i->name);
+
// Here we scratch if evaluate changed its mind or if the resulting
// version doesn't match what we expect it to be.
//
- if (auto sp = db.find<selected_package> (i->name))
+ if (auto sp = db.find<selected_package> (nm))
{
- const version& dv (target_version (i->available, i->system));
+ const version& dv (target_version (db, i->available, i->system));
- if (optional<evaluate_result> r = eval_dep (sp))
- s = dv != target_version (r->available, r->system) ||
+ if (optional<evaluate_result> r = eval_dep (db, sp))
+ s = dv != target_version (db, r->available, r->system) ||
i->system != r->system;
else
s = dv != sp->version || i->system != sp->system ();
@@ -4152,14 +6387,44 @@ namespace bpkg
if (s)
{
- scratch = true; // Rebuild the plan from scratch.
+ scratch_exe = true; // Rebuild the plan from scratch.
+
+ package_key pk (db, nm);
+
+ auto j (find (existing_deps.begin (), existing_deps.end (), pk));
+ if (j != existing_deps.end ())
+ existing_deps.erase (j);
+
+ deorphaned_deps.erase (pk);
+
i = deps.erase (i);
}
else
++i;
}
- if (!scratch)
+ if (scratch_exe)
+ l5 ([&]{trace << "one of dependency evaluation decisions has "
+ << "changed, re-collecting from scratch";});
+
+ // If the execute_plan() call was noop, there are no user expectations
+ // regarding any dependency, and no upgrade is requested, then the
+ // only possible refinement outcome can be recommendations to drop
+ // unused dependencies (that the user has refused to drop on the
+ // previous build or drop command run). Thus, if the --keep-unused|-K
+ // or --no-refinement option is also specified, then we omit the
+ // need_refinement() call altogether and assume that no refinement is
+ // required.
+ //
+ if (!changed && dep_pkgs.empty () && rec_pkgs.empty ())
+ {
+ assert (!scratch_exe); // No reason to change any previous decision.
+
+ if (o.keep_unused () || o.no_refinement ())
+ refine = false;
+ }
+
+ if (!scratch_exe && refine)
{
// First, we check if the refinement is required, ignoring the
// unsatisfiable dependency version constraints. If we end up
@@ -4169,12 +6434,17 @@ namespace bpkg
// make sure that the unsatisfiable dependency, if left, is
// reported.
//
- auto need_refinement = [&eval_dep, &deps, rec_pkgs, &db, &o] (
- bool diag = false) -> bool
+ auto need_refinement = [&eval_dep,
+ &deps,
+ &rec_pkgs,
+ &dep_dbs,
+ &existing_deps,
+ &deorphaned_deps,
+ &o] (bool diag = false) -> bool
{
// Examine the new dependency set for any up/down-grade/drops.
//
- bool r (false); // Presumably no more refinments are necessary.
+ bool r (false); // Presumably no more refinements are necessary.
using query = query<selected_package>;
@@ -4183,22 +6453,46 @@ namespace bpkg
if (rec_pkgs.empty ())
q = q && !query::hold_package;
- for (shared_ptr<selected_package> sp:
- pointer_result (db.query<selected_package> (q)))
+ // It seems right to only evaluate dependencies in the explicitly
+ // linked configurations, recursively. Indeed, we shouldn't be
+ // up/down-grading or dropping packages in configurations that
+ // only contain dependents, some of which we may only reconfigure.
+ //
+ for (database& db: dep_dbs)
{
- if (optional<evaluate_result> er = eval_dep (sp, !diag))
+ for (shared_ptr<selected_package> sp:
+ pointer_result (db.query<selected_package> (q)))
{
- // Skip unused if we were instructed to keep them.
- //
- if (o.keep_unused () && er->available == nullptr)
- continue;
+ if (optional<evaluate_result> er = eval_dep (db, sp, !diag))
+ {
+ // Skip unused if we were instructed to keep them.
+ //
+ if (o.keep_unused () && er->available == nullptr)
+ continue;
- if (!diag)
- deps.push_back (dep {sp->name,
- move (er->available),
- move (er->repository_fragment),
- er->system});
- r = true;
+ if (!diag)
+ {
+ deps.push_back (dep {er->db,
+ sp->name,
+ move (er->available),
+ move (er->repository_fragment),
+ er->system,
+ er->existing,
+ er->upgrade,
+ er->orphan.has_value ()});
+
+ if (er->existing)
+ existing_deps.emplace_back (er->db, sp->name);
+
+ if (er->orphan)
+ {
+ deorphaned_deps[package_key (er->db, sp->name)] =
+ move (*er->orphan);
+ }
+ }
+
+ r = true;
+ }
}
}
@@ -4207,8 +6501,251 @@ namespace bpkg
refine = need_refinement ();
+ // If no further refinement is necessary, then perform the
+ // diagnostics run. Otherwise, if any dependency configuration
+ // negotiation has been performed during the current plan refinement
+ // iteration, then rebuild the plan from scratch (see above for
+ // details). Also rebuild it from from scratch if any unsatisfied
+ // dependents have been ignored, since their unsatisfied constraints
+ // are now added to the dependencies' build_package::constraints
+ // lists.
+ //
if (!refine)
need_refinement (true /* diag */);
+ else if (!postponed_cfgs.empty () || !unsatisfied_depts.empty ())
+ scratch_exe = true;
+ }
+
+ // Note that we prevent building multiple instances of the same
+ // package dependency in different configurations (of the same type)
+ // while creating the build plan. However, we may potentially end up
+ // with the same dependency in multiple configurations since we do not
+ // descend into prerequisites of already configured packages which
+ // require no up/downgrade.
+ //
+ // To prevent this, we additionally verify that none of the dependency
+ // hierarchies of the packages being built contains the same runtime
+ // dependency, built in multiple configurations.
+ //
+ // Note that we also fail for a system dependency configured in
+ // multiple configurations, since these configurations can potentially
+ // be configured differently and so these system packages can refer to
+ // different targets.
+ //
+ if (changed && !refine)
+ {
+ // Verify the specified package dependency hierarchy and return the
+ // set of packages plus their runtime dependencies, including
+ // indirect ones. Fail if a dependency cycle is detected.
+ //
+ // Also add the result into the `package_prereqs` map, to use it as
+ // a cache and for subsequent additional dependency verification.
+ //
+ // Note that all the encountered dependency sub-hierarchies that
+ // reside in configurations of different types (or beneath them) are
+ // also verified but not included into the resulting set.
+ //
+ using prerequisites = set<lazy_shared_ptr<selected_package>,
+ compare_lazy_ptr_id>;
+
+ map<package_key, prerequisites> package_prereqs;
+ small_vector<config_selected_package, 16> chain;
+
+ auto verify_dependencies = [&package_prereqs, &chain]
+ (database& db,
+ shared_ptr<selected_package> sp,
+ const auto& verify_dependencies)
+ -> const prerequisites&
+ {
+ // Return the cached value, if present.
+ //
+ package_key pk {db, sp->name};
+ {
+ auto i (package_prereqs.find (pk));
+
+ if (i != package_prereqs.end ())
+ return i->second;
+ }
+
+ // Make sure there is no dependency cycle.
+ //
+ config_selected_package csp {db, sp};
+ {
+ auto i (find (chain.begin (), chain.end (), csp));
+
+ if (i != chain.end ())
+ {
+ diag_record dr (fail);
+ dr << "dependency cycle detected involving package " << *sp
+ << db;
+
+ // Note: push_back() can invalidate the iterator.
+ //
+ size_t j (i - chain.begin ());
+
+ for (chain.push_back (csp); j != chain.size () - 1; ++j)
+ dr << info << *chain[j].package << chain[j].db
+ << " depends on "
+ << *chain[j + 1].package << chain[j + 1].db;
+ }
+ }
+
+ chain.push_back (csp);
+
+ // Verify all prerequisites, but only collect those corresponding
+ // to the runtime dependencies.
+ //
+ // Indeed, we don't care if a linked host configuration contains a
+ // configured package that we also have configured in our target
+ // configuration. It's also fine if some of our runtime
+ // dependencies from different configurations build-time depend on
+ // the same package (of potentially different versions) configured
+ // in different host configurations.
+ //
+ // Note, however, that we cannot easily determine if the
+ // prerequisite corresponds to the runtime or build-time
+ // dependency, since we don't store this information for
+ // prerequisites. The current implementation relies on the fact
+ // that the build-time dependency configuration type (host or
+ // build2) differs from the dependent configuration type (target
+ // is a common case) and doesn't work well, for example, for the
+ // self-hosted configurations. For them it can fail erroneously.
+ // We can potentially fix that by additionally storing the
+ // build-time flag for a prerequisite. However, let's first see if
+ // it ever becomes a problem.
+ //
+ prerequisites r;
+ const package_prerequisites& prereqs (sp->prerequisites);
+
+ for (const auto& prereq: prereqs)
+ {
+ const lazy_shared_ptr<selected_package>& p (prereq.first);
+ database& pdb (p.database ());
+
+ // Validate prerequisite sub-hierarchy also in configuration of
+ // different type but do not collect it.
+ //
+ const prerequisites& ps (
+ verify_dependencies (pdb, p.load (), verify_dependencies));
+
+ if (pdb.type != db.type)
+ continue;
+
+ // Collect prerequisite sub-hierarchy, checking that none of the
+ // packages are already collected.
+ //
+ for (const lazy_shared_ptr<selected_package>& p: ps)
+ {
+ // Note: compare_id_lazy_ptr only considers package names.
+ //
+ auto i (r.find (p));
+
+ if (i != r.end ())
+ {
+ database& db1 (p.database ());
+ database& db2 (i->database ());
+
+ if (db1 != db2)
+ {
+ bool indirect (prereqs.find (p) == prereqs.end ());
+
+ fail << "package " << p.object_id ()
+ << (indirect ? " indirectly" : "") << " required by "
+ << *sp << db << " is configured in multiple "
+ << "configurations" <<
+ info << *p.load () << db1 <<
+ info << *i->load () << db2;
+ }
+ }
+ else
+ r.insert (p);
+ }
+ }
+
+ chain.pop_back ();
+
+ // Collect the dependent package itself.
+ //
+ r.insert (lazy_shared_ptr<selected_package> (db, move (sp)));
+
+ // Cache the resulting package prerequisites set and return a
+ // reference to it.
+ //
+ auto j (package_prereqs.emplace (move (pk), move (r)));
+ assert (j.second); // A package cannot depend on itself.
+
+ return j.first->second;
+ };
+
+ for (auto& p: build_pkgs)
+ verify_dependencies (p.first,
+ move (p.second),
+ verify_dependencies);
+
+ // Now, verify that none of the build2 modules may simultaneously be
+ // built in multiple configurations which belong to the same linked
+ // configuration cluster.
+ //
+ // For that we use the `package_prereqs` map: its key set refers to
+ // all the packages potentially involved into the build (explicitly
+ // or implicitly).
+ //
+ {
+ // List of module packages together with the linked configuration
+ // clusters they belong to.
+ //
+ vector<pair<package_key, linked_databases>> build2_mods;
+
+ for (const auto& pp: package_prereqs)
+ {
+ const package_key& pk (pp.first);
+
+ // Skip packages other than the build2 modules.
+ //
+ if (!build2_module (pk.name))
+ continue;
+
+ // Skip build2 modules configured as system.
+ //
+ {
+ shared_ptr<selected_package> sp (
+ pk.db.get ().find<selected_package> (pk.name));
+
+ assert (sp != nullptr);
+
+ if (sp->system ())
+ continue;
+ }
+
+ // Make sure the module's database doesn't belong to any other
+ // cluster this module is also configured in.
+ //
+ for (const auto& m: build2_mods)
+ {
+ if (m.first.name != pk.name)
+ continue;
+
+ // The `package_prereqs` map can only contain the same package
+ // twice if databases differ.
+ //
+ assert (m.first.db != pk.db);
+
+ const linked_databases& lcc (m.second);
+
+ if (find (lcc.begin (), lcc.end (), pk.db) != lcc.end ())
+ {
+ fail << "building build system module " << pk.name
+ << " in multiple configurations" <<
+ info << m.first.db.get ().config_orig <<
+ info << pk.db.get ().config_orig;
+ }
+ }
+
+ // Add the module and its cluster to the list.
+ //
+ build2_mods.emplace_back (pk, pk.db.get ().cluster_configs ());
+ }
+ }
}
// Rollback the changes to the database and reload the changed
@@ -4216,13 +6753,18 @@ namespace bpkg
//
t.rollback ();
{
- transaction t (db);
+ transaction t (mdb);
// First reload all the selected_package object that could have been
// modified (conceptually, we should only modify what's on the
// plan). And in case of drop the object is removed from the session
// so we need to bring it back.
//
+ // Make sure that selected packages are only owned by the session
+ // and the build package list.
+ //
+ build_pkgs.clear ();
+
// Note: we use the original pkgs list since the executed ones may
// contain newly created (but now gone) selected_package objects.
//
@@ -4230,64 +6772,314 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
if (*p.action == build_package::drop)
{
assert (p.selected != nullptr);
ses.cache_insert<selected_package> (
- db, p.selected->name, p.selected);
+ pdb, p.selected->name, p.selected);
}
if (p.selected != nullptr)
- db.reload (*p.selected);
+ pdb.reload (*p.selected);
}
// Now remove all the newly created selected_package objects from
// the session. The tricky part is to distinguish newly created ones
// from newly loaded (and potentially cached).
//
- if (selected_packages* sp = selected_packages_session ())
+ for (bool rescan (true); rescan; )
{
- for (bool rescan (true); rescan; )
- {
- rescan = false;
+ rescan = false;
- for (auto i (sp->begin ()); i != sp->end (); )
+ for (const auto& dps: ses.map ())
+ {
+ if (selected_packages* sps = sp_session (dps.second))
{
- bool erased (false);
- auto j (old_sp.find (i->first));
-
+ auto j (old_sp.find (dps.first)); // Find the database.
+
+ // Note that if a database has been introduced only during
+ // simulation, then we could just clear all its selected
+ // packages in one shot. Let's however, be cautious and remove
+ // them iteratively to make sure that none of them are left at
+ // the end (no more rescan is necessary). If any of them is
+ // left, then that would mean that is is referenced from
+ // somewhere besides the session object, which would be a bug.
+ //
if (j == old_sp.end ())
{
- if (i->second.use_count () == 1)
+ if (!sps->empty ())
+ {
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ rescan = true;
+ }
+ else
+ ++i;
+ }
+ }
+
+ continue;
+ }
+
+ const selected_packages& osp (j->second);
+
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ bool erased (false);
+ auto j (osp.find (i->first));
+
+ if (j == osp.end ())
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ erased = true;
+ rescan = true;
+ }
+ }
+ // It may also happen that the object was erased from the
+ // database and then recreated. In this case we restore the
+ // pointer that is stored in the session.
+ //
+ else if (i->second != j->second)
{
// This might cause another object's use count to drop.
//
- i = sp->erase (i);
- erased = true;
+ i->second = j->second;
rescan = true;
}
+
+ if (!erased)
+ ++i;
}
- // It may also happen that the object was erased from the
- // database and then recreated. In this case we restore the
- // pointer that is stored in the session.
- //
- else if (i->second != j->second)
+ }
+ }
+
+ // Verify that all the selected packages of the newly introduced
+ // during simulation databases are erased (see above for the
+ // verification reasoning).
+ //
+ if (!rescan)
+ {
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
{
- // This might cause another object's use count to drop.
- //
- i->second = j->second;
- rescan = true;
+ if (old_sp.find (dps.first) == old_sp.end ())
+ assert (sps->empty ());
}
-
- if (!erased)
- ++i;
}
}
}
+ // Re-link the private configurations that were created during the
+ // collection of the package builds with their parent
+ // configurations. Note that these links were lost on the previous
+ // transaction rollback.
+ //
+ for (const pair<database&, dir_path>& pc: priv_cfgs)
+ cfg_link (pc.first,
+ pc.first.config / pc.second,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */);
+
t.commit ();
}
+
+ if (!refine)
+ {
+ // Cleanup the package build collecting state, preparing for the
+ // re-collection from the very beginning.
+ //
+ auto prepare_recollect = [&refine,
+ &scratch_exe,
+ &deps,
+ &existing_deps,
+ &deorphaned_deps] ()
+ {
+ refine = true;
+ scratch_exe = true;
+
+ deps.clear ();
+ existing_deps.clear ();
+ deorphaned_deps.clear ();
+ };
+
+ // Issue diagnostics and fail if any existing dependents are not
+ // satisfied with their dependencies.
+ //
+ // But first, try to resolve the first encountered unsatisfied
+ // constraint by replacing the collected unsatisfactory dependency
+ // or some of its dependents with some other available package
+ // version. This version, while not being the best possible choice,
+ // must be satisfactory for all its new and existing dependents. If
+ // succeed, punch the replacement version into the command line and
+ // recollect from the very beginning (see unsatisfied_dependents for
+ // details).
+ //
+ if (!unsatisfied_depts.empty ())
+ {
+ if (!cmdline_refine_index) // Not command line adjustments refinement?
+ {
+ const unsatisfied_dependent& dpt (unsatisfied_depts.front ());
+
+ assert (!dpt.ignored_constraints.empty ());
+
+ const ignored_constraint& ic (dpt.ignored_constraints.front ());
+
+ const build_package* p (pkgs.entered_build (ic.dependency));
+ assert (p != nullptr); // The dependency must be collected.
+
+ l5 ([&]{trace << "try to replace unsatisfactory dependency "
+ << p->available_name_version_db () << " with some "
+ << "other version";});
+
+ optional<cmdline_adjustment> a;
+ vector<package_key> unsatisfied_dpts;
+ set<const build_package*> visited_dpts;
+
+ if ((a = try_replace_dependency (o,
+ *p,
+ pkgs,
+ hold_pkgs,
+ dep_pkgs,
+ cmdline_adjs,
+ unsatisfied_dpts,
+ "unsatisfactory dependency")) ||
+ (a = try_replace_dependent (o,
+ *p,
+ &ic.unsatisfied_constraints,
+ pkgs,
+ cmdline_adjs,
+ unsatisfied_dpts,
+ hold_pkgs,
+ dep_pkgs,
+ visited_dpts)) ||
+ !cmdline_adjs.empty ())
+ {
+ if (a)
+ {
+ cmdline_adjs.push (move (*a));
+ }
+ else
+ {
+ cmdline_adjustment a (cmdline_adjs.pop ());
+
+ l5 ([&]{trace << "cannot replace any package, rolling back "
+ << "latest command line adjustment ("
+ << cmdline_adjs.to_string (a) << ')';});
+ }
+
+ prepare_recollect ();
+ }
+ else
+ unsatisfied_depts.diag (pkgs); // Issue the diagnostics and fail.
+ }
+ else // We are in the command line adjustments refinement cycle.
+ {
+ // Since we have failed to collect, then the currently dropped
+ // command line adjustment is essential. Thus, push it back to
+ // the stack, drop the next one, and retry. If this is the last
+ // adjustment in the stack, then we assume that no further
+ // refinement is possible and we just recollect, assuming that
+ // this recollection will be successful.
+ //
+ assert (cmdline_refine_adjustment); // Wouldn't be here otherwise.
+
+ l5 ([&]{trace << "attempt to refine command line adjustments by "
+ << "rolling back adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << " failed, pushing it back";});
+
+ cmdline_adjs.push (move (*cmdline_refine_adjustment));
+
+ // Index of the being previously dropped adjustment must be
+ // valid.
+ //
+ assert (*cmdline_refine_index != cmdline_adjs.size ());
+
+ if (++(*cmdline_refine_index) != cmdline_adjs.size ())
+ {
+ cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */);
+
+ l5 ([&]{trace << "continue with command line adjustments "
+ << "refinement cycle by rolling back adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment);});
+ }
+ else
+ {
+ cmdline_refine_adjustment = nullopt;
+
+ l5 ([&]{trace << "cannot further refine command line "
+ << "adjustments, performing final collection";});
+ }
+
+ prepare_recollect ();
+ }
+ }
+ //
+ // If the collection was successful, then see if we still need to
+ // perform the command line adjustments refinement.
+ //
+ else if (cmdline_adjs.tried () &&
+ (!cmdline_refine_index ||
+ *cmdline_refine_index != cmdline_adjs.size ()))
+ {
+ // If some command line adjustment is currently being dropped,
+ // that means that this adjustment is redundant.
+ //
+ bool initial (!cmdline_refine_index);
+
+ if (!initial)
+ {
+ assert (cmdline_refine_adjustment);
+
+ l5 ([&]{trace << "command line adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << " is redundant, dropping it";});
+
+ cmdline_refine_adjustment = nullopt;
+ cmdline_refine_index = nullopt;
+ }
+
+ // We cannot remove all the adjustments during the refinement.
+ // Otherwise, we shouldn't be failing in the first place.
+ //
+ assert (!cmdline_adjs.empty ());
+
+ // If there is just a single adjustment left, then there is
+ // nothing to refine anymore.
+ //
+ if (cmdline_adjs.size () != 1)
+ {
+ cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */);
+ cmdline_refine_index = 0;
+
+ l5 ([&]{trace << (initial ? "start" : "re-start") << " command "
+ << "line adjustments refinement cycle by rolling "
+ << "back first adjustment ("
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << ')';});
+
+ prepare_recollect ();
+ }
+ }
+ }
}
}
@@ -4295,129 +7087,319 @@ namespace bpkg
// While at it, detect if we have any dependents that the user may want to
// update.
//
+ // For the packages being printed also print the configuration specified
+ // by the user, dependents, and via the reflect clauses. For that we will
+ // use the package skeletons, initializing them if required. Note that for
+ // a system package the skeleton may already be initialized during the
+ // dependency negotiation process. Also note that the freshly-initialized
+ // skeletons will be reused during the plan execution.
+ //
bool update_dependents (false);
// We need the plan and to ask for the user's confirmation only if some
- // implicit action (such as building prerequisite or reconfiguring
- // dependent package) is to be taken or there is a selected package which
- // version must be changed. But if the user explicitly requested it with
- // --plan, then we print it as long as it is not empty.
+ // implicit action (such as building prerequisite, reconfiguring dependent
+ // package, or installing system/distribution packages) is to be taken or
+ // there is a selected package which version must be changed. But if the
+ // user explicitly requested it with --plan, then we print it as long as
+ // it is not empty.
//
string plan;
+ sha256 csum;
bool need_prompt (false);
- if (o.print_only () || !o.yes () || o.plan_specified ())
+ if (!o.yes () ||
+ o.print_only () ||
+ o.plan_specified () ||
+ o.rebuild_checksum_specified ())
{
- bool first (true); // First entry in the plan.
+ // Map the main system/distribution packages that need to be installed
+ // to the system packages which caused their installation (see
+ // build_package::system_install() for details).
+ //
+ using package_names = vector<reference_wrapper<const package_name>>;
+ using system_map = map<string, package_names>;
+
+ system_map sys_map;
+ // Iterate in the reverse order as we will do for printing the action
+ // lines. This way a sys-install action line will be printed right
+ // before the bpkg action line of a package which appears first in the
+ // sys-install action's 'required by' list.
+ //
for (const build_package& p: reverse_iterate (pkgs))
{
- const shared_ptr<selected_package>& sp (p.selected);
+ if (const system_package_status* s = p.system_install ())
+ {
+ package_names& ps (sys_map[s->system_name]);
- string act;
+ if (find (ps.begin (), ps.end (), p.name ()) == ps.end ())
+ ps.push_back (p.name ());
+ }
+ }
+
+ // Start the transaction since we may query available packages for
+ // skeleton initializations.
+ //
+ transaction t (mdb);
+ bool first (true); // First entry in the plan.
+
+ // Print the bpkg package action lines.
+ //
+ // Also print the sys-install action lines for system/distribution
+ // packages which require installation by the system package manager.
+ // Print them before the respective system package action lines, but
+ // only once per (main) system/distribution package. For example:
+ //
+ // sys-install libssl1.1/1.1.1l (required by sys:libssl, sys:libcrypto)
+ // configure sys:libssl/1.1.1 (required by foo)
+ // configure sys:libcrypto/1.1.1 (required by bar)
+ //
+ for (auto i (pkgs.rbegin ()); i != pkgs.rend (); )
+ {
+ build_package& p (*i);
assert (p.action);
- if (*p.action == build_package::drop)
+ string act;
+
+ const system_package_status* s;
+ system_map::iterator j;
+
+ if ((s = p.system_install ()) != nullptr &&
+ (j = sys_map.find (s->system_name)) != sys_map.end ())
{
- act = "drop " + sp->string () + " (unused)";
+ act = "sys-install ";
+ act += s->system_name;
+ act += '/';
+ act += s->system_version;
+ act += " (required by ";
+
+ bool first (true);
+ for (const package_name& n: j->second)
+ {
+ if (first)
+ first = false;
+ else
+ act += ", ";
+
+ act += "sys:";
+ act += n.string ();
+ }
+
+ act += ')';
+
need_prompt = true;
+
+ // Make sure that we print this sys-install action just once.
+ //
+ sys_map.erase (j);
+
+ // Note that we don't increment i in order to re-iterate this pkgs
+ // entry.
}
else
{
- string cause;
- if (*p.action == build_package::adjust)
- {
- assert (sp != nullptr && (p.reconfigure () || p.unhold ()));
+ ++i;
- // This is a dependent needing reconfiguration.
- //
- // This is an implicit reconfiguration which requires the plan to
- // be printed. Will flag that later when composing the list of
- // prerequisites.
- //
- if (p.reconfigure ())
- {
- act = "reconfigure";
- cause = "dependent of";
-
- if (!o.configure_only ())
- update_dependents = true;
- }
-
- // This is a held package needing unhold.
- //
- if (p.unhold ())
- {
- if (act.empty ())
- act = "unhold";
- else
- act += "/unhold";
- }
+ database& pdb (p.db);
+ const shared_ptr<selected_package>& sp (p.selected);
- act += ' ' + sp->name.string ();
+ if (*p.action == build_package::drop)
+ {
+ act = "drop " + sp->string (pdb) + " (unused)";
+ need_prompt = true;
}
else
{
- // Even if we already have this package selected, we have to
- // make sure it is configured and updated.
+ // Print configuration variables.
//
- if (sp == nullptr)
- act = p.system ? "configure" : "new";
- else if (sp->version == p.available_version ())
+ // The idea here is to only print configuration for those packages
+ // for which we call pkg_configure*() in execute_plan().
+ //
+ package_skeleton* cfg (nullptr);
+
+ string cause;
+ if (*p.action == build_package::adjust)
{
- // If this package is already configured and is not part of the
- // user selection (or we are only configuring), then there is
- // nothing we will be explicitly doing with it (it might still
- // get updated indirectly as part of the user selection update).
+ assert (sp != nullptr && (p.reconfigure () || p.unhold ()));
+
+ // This is a dependent needing reconfiguration.
//
- if (!p.reconfigure () &&
- sp->state == package_state::configured &&
- (!p.user_selection () || o.configure_only ()))
- continue;
+ // This is an implicit reconfiguration which requires the plan
+ // to be printed. Will flag that later when composing the list
+ // of prerequisites.
+ //
+ if (p.reconfigure ())
+ {
+ act = "reconfigure";
+ cause = "dependent of";
- act = p.system
- ? "reconfigure"
- : (p.reconfigure ()
- ? (o.configure_only ()
- ? "reconfigure"
- : "reconfigure/update")
- : "update");
+ if (!o.configure_only ())
+ update_dependents = true;
+ }
+
+ // This is a held package needing unhold.
+ //
+ if (p.unhold ())
+ {
+ if (act.empty ())
+ act = "unhold";
+ else
+ act += "/unhold";
+ }
+
+ act += ' ' + sp->name.string ();
+
+ const string& s (pdb.string);
+ if (!s.empty ())
+ act += ' ' + s;
+
+ // This is an adjustment and so there is no available package
+ // specified for the build package object and thus the skeleton
+ // cannot be present.
+ //
+ assert (p.available == nullptr && !p.skeleton);
+
+ // We shouldn't be printing configurations for plain unholds.
+ //
+ if (p.reconfigure ())
+ {
+ // Since there is no available package specified we need to
+ // find it (or create a transient one).
+ //
+ cfg = &p.init_skeleton (o,
+ true /* load_old_dependent_config */,
+ find_available (o, pdb, sp));
+ }
}
else
{
- act = p.system
- ? "reconfigure"
- : sp->version < p.available_version ()
- ? "upgrade"
- : "downgrade";
+ assert (p.available != nullptr); // This is a package build.
- need_prompt = true;
- }
+ bool replace (p.replace ());
- if (p.unhold ())
- act += "/unhold";
+ // Even if we already have this package selected, we have to
+ // make sure it is configured and updated.
+ //
+ if (sp == nullptr)
+ {
+ act = p.system ? "configure" : "new";
- act += ' ' + p.available_name_version ();
- cause = "required by";
- }
+ // For a new non-system package the skeleton must already be
+ // initialized.
+ //
+ assert (p.system || p.skeleton.has_value ());
- string rb;
- if (!p.user_selection ())
- {
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+ }
+ else if (sp->version == p.available_version ())
+ {
+ // If this package is already configured and is not part of
+ // the user selection (or we are only configuring), then there
+ // is nothing we will be explicitly doing with it (it might
+ // still get updated indirectly as part of the user selection
+ // update).
+ //
+ if (!p.reconfigure () &&
+ sp->state == package_state::configured &&
+ (!p.user_selection () ||
+ o.configure_only () ||
+ p.configure_only ()))
+ continue;
+
+ act = p.system
+ ? "reconfigure"
+ : (p.reconfigure ()
+ ? (o.configure_only () || p.configure_only ()
+ ? (replace ? "replace" : "reconfigure")
+ : (replace ? "replace/update" : "reconfigure/update"))
+ : "update");
- // If not user-selected, then there should be another (implicit)
- // reason for the action.
+ if (p.reconfigure ())
+ {
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+ }
+ }
+ else
+ {
+ act += p.system
+ ? "reconfigure"
+ : (sp->version < p.available_version ()
+ ? (replace ? "replace/upgrade" : "upgrade")
+ : (replace ? "replace/downgrade" : "downgrade"));
+
+ // For a non-system package up/downgrade the skeleton must
+ // already be initialized.
+ //
+ assert (p.system || p.skeleton.has_value ());
+
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+
+ need_prompt = true;
+ }
+
+ if (p.unhold ())
+ act += "/unhold";
+
+ act += ' ' + p.available_name_version_db ();
+ cause = p.required_by_dependents ? "required by" : "dependent of";
+
+ if (p.configure_only ())
+ update_dependents = true;
+ }
+
+ // Also list dependents for the newly built user-selected
+ // dependencies.
//
- assert (!rb.empty ());
+ bool us (p.user_selection ());
+ string rb;
+ if (!us || (!p.user_selection (hold_pkgs) && sp == nullptr))
+ {
+ // Note: if we are ever tempted to truncate this, watch out for
+ // the --rebuild-checksum functionality which uses this. But
+ // then it's not clear this information is actually important:
+ // can a dependent-dependency structure change without any of
+ // the package versions changing? Doesn't feel like it should.
+ //
+ for (const package_version_key& pvk: p.required_by)
+ {
+ // Skip the command-line, etc dependents and don't print the
+ // package version (which is not always available; see
+ // build_package::required_by for details).
+ //
+ if (pvk.version) // Is it a real package?
+ {
+ rb += (rb.empty () ? " " : ", ") +
+ pvk.string (true /* ignore_version */);
+ }
+ }
- need_prompt = true;
- }
+ // If not user-selected, then there should be another (implicit)
+ // reason for the action.
+ //
+ assert (!rb.empty ());
+ }
+
+ if (!rb.empty ())
+ act += " (" + cause + rb + ')';
- if (!rb.empty ())
- act += " (" + cause + rb + ')';
+ if (cfg != nullptr && !cfg->empty_print ())
+ {
+ ostringstream os;
+ cfg->print_config (os, o.print_only () ? " " : " ");
+ act += '\n';
+ act += os.str ();
+ }
+
+ if (!us)
+ need_prompt = true;
+ }
}
if (first)
@@ -4441,7 +7423,20 @@ namespace bpkg
// Print indented for better visual separation.
//
plan += (plan.empty () ? " " : "\n ") + act;
+
+ if (o.rebuild_checksum_specified ())
+ csum.append (act);
}
+
+ t.commit ();
+ }
+
+ if (o.rebuild_checksum_specified ())
+ {
+ cout << csum.string () << endl;
+
+ if (o.rebuild_checksum () == csum.string ())
+ return o.noop_exit_specified () ? o.noop_exit () : 0;
}
if (o.print_only ())
@@ -4466,13 +7461,14 @@ namespace bpkg
// Ok, we have "all systems go". The overall action plan is as follows.
//
- // 1. disfigure up/down-graded, reconfigured [left to right]
- // 2. purge up/down-graded [right to left]
- // 3.a fetch/unpack new, up/down-graded
- // 3.b checkout new, up/down-graded
- // 4. configure all
- // 5. unhold unheld
- // 6. build user selection [right to left]
+ // 1. sys-install not installed system/distribution
+ // 2. disfigure up/down-graded, reconfigured [left to right]
+ // 3. purge up/down-graded [right to left]
+ // 4.a fetch/unpack new, up/down-graded, replaced
+ // 4.b checkout new, up/down-graded, replaced
+ // 5. configure all
+ // 6. unhold unheld
+ // 7. build user selection [right to left]
//
// Note that for some actions, e.g., purge or fetch, the order is not
// really important. We will, however, do it right to left since that
@@ -4486,10 +7482,13 @@ namespace bpkg
// prerequsites got upgraded/downgraded and that the user may want to in
// addition update (that update_dependents flag above).
//
- execute_plan (o, c, db, pkgs, false /* simulate */);
+ bool noop (!execute_plan (o,
+ pkgs,
+ nullptr /* simulate */,
+ find_prereq_database));
if (o.configure_only ())
- return 0;
+ return noop && o.noop_exit_specified () ? o.noop_exit () : 0;
// update
//
@@ -4500,18 +7499,25 @@ namespace bpkg
// First add the user selection.
//
+ // Only update user-selected packages which are specified on the command
+ // line as build to hold. Note that the dependency package will be updated
+ // implicitly via their dependents, if the latter are updated.
+ //
for (const build_package& p: reverse_iterate (pkgs))
{
assert (p.action);
- if (*p.action != build_package::build)
+ if (*p.action != build_package::build || p.configure_only ())
continue;
+ database& db (p.db);
const shared_ptr<selected_package>& sp (p.selected);
if (!sp->system () && // System package doesn't need update.
- p.user_selection ())
- upkgs.push_back (pkg_command_vars {sp,
+ p.user_selection (hold_pkgs))
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ !multi_config () && db.main (),
+ sp,
strings () /* vars */,
false /* cwd */});
}
@@ -4525,97 +7531,243 @@ namespace bpkg
{
assert (p.action);
- if (*p.action == build_package::adjust && p.reconfigure ())
- upkgs.push_back (pkg_command_vars {p.selected,
+ database& db (p.db);
+
+ // Note: don't update the re-evaluated and re-collected dependents
+ // unless they are reconfigured.
+ //
+ if ((*p.action == build_package::adjust && p.reconfigure ()) ||
+ (*p.action == build_package::build &&
+ ((p.flags & build_package::build_repoint) != 0 ||
+ ((p.flags & (build_package::build_reevaluate |
+ build_package::build_recollect)) != 0 &&
+ p.reconfigure ()))))
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ !multi_config () && db.main (),
+ p.selected,
strings () /* vars */,
false /* cwd */});
}
}
- pkg_update (c, o, o.for_ (), strings (), upkgs);
+ pkg_update (o, o.for_ (), strings (), upkgs);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: upkgs)
- text << "updated " << *pv.pkg;
+ text << "updated " << pv.string ();
}
return 0;
}
- static void
+ static bool
execute_plan (const pkg_build_options& o,
- const dir_path& c,
- database& db,
build_package_list& build_pkgs,
- bool simulate)
+ unsatisfied_dependents* simulate,
+ const function<find_database_function>& fdb)
{
tracer trace ("execute_plan");
l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");});
- uint16_t verbose (!simulate ? verb : 0);
+ // If unsatisfied dependents are specified then we are in the simulation
+ // mode and thus simulate can be used as bool.
+
+ bool r (false);
+ uint16_t verb (!simulate ? bpkg::verb : 0);
+
+ bool result (verb && !o.no_result ());
+ bool progress (!result &&
+ ((verb == 1 && !o.no_progress () && stderr_term) ||
+ o.progress ()));
+
+ size_t prog_i, prog_n, prog_percent;
+
+ // sys-install
+ //
+ // Install the system/distribution packages required by the respective
+ // system packages (see build_package::system_install() for details).
+ //
+ if (!simulate && o.sys_install ())
+ {
+ // Collect the names of all the system packages being managed by the
+ // system package manager (as opposed to user/fallback), suppressing
+ // duplicates.
+ //
+ vector<package_name> ps;
+
+ for (build_package& p: build_pkgs)
+ {
+ if (p.system_status () &&
+ find (ps.begin (), ps.end (), p.name ()) == ps.end ())
+ {
+ ps.push_back (p.name ());
+ }
+ }
+
+ // Install the system/distribution packages.
+ //
+ if (!ps.empty ())
+ {
+ // Otherwise, we wouldn't get any package statuses.
+ //
+ assert (sys_pkg_mgr && *sys_pkg_mgr != nullptr);
+
+ (*sys_pkg_mgr)->install (ps);
+ }
+ }
// disfigure
//
- for (build_package& p: build_pkgs)
+ // Note: similar code in pkg-drop.
+ //
+ auto disfigure_pred = [] (const build_package& p)
{
// We are only interested in configured packages that are either being
// up/down-graded, need reconfiguration (e.g., dependents), or dropped.
//
+ if (*p.action != build_package::drop && !p.reconfigure ())
+ return false;
+
+ return true;
+ };
+
+ if (progress)
+ {
+ prog_i = 0;
+ prog_n = static_cast<size_t> (count_if (build_pkgs.begin (),
+ build_pkgs.end (),
+ disfigure_pred));
+ prog_percent = 100;
+ }
+
+ // On the package reconfiguration we will try to resolve dependencies to
+ // the same prerequisites (see pkg_configure() for details). For that, we
+ // will save prerequisites before disfiguring a package. Note, though,
+ // that this is not required for the recursively collected packages since
+ // the dependency alternatives are already selected for them.
+ //
+ map<const build_package*, vector<package_name>> previous_prerequisites;
+
+ for (build_package& p: build_pkgs)
+ {
assert (p.action);
- if (*p.action != build_package::drop && !p.reconfigure ())
+ if (!disfigure_pred (p))
continue;
+ database& pdb (p.db);
shared_ptr<selected_package>& sp (p.selected);
+ assert (sp != nullptr); // Shouldn't be here otherwise.
+
// Each package is disfigured in its own transaction, so that we
// always leave the configuration in a valid state.
//
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
- // Reset the flag if the package being unpacked is not an external one.
+ // Figure out if an external package is being replaced with another
+ // external.
//
- if (p.keep_out && !simulate)
+ bool external (false);
+ if (!simulate)
{
- const shared_ptr<available_package>& ap (p.available);
- const package_location& pl (ap->locations[0]);
+ external = (sp->external () && p.external ());
- if (pl.repository_fragment.object_id () == "") // Special root.
- p.keep_out = !exists (pl.location); // Directory case.
- else
- {
+ // Reset the keep_out flag if the package being unpacked is not
+ // external.
+ //
+ if (p.keep_out && !external)
p.keep_out = false;
+ }
- // See if the package comes from the directory-based repository, and
- // so is external.
- //
- // Note that such repository fragments are always preferred over
- // others (see below).
+ // Save prerequisites before disfiguring the package.
+ //
+ // Note that we add the prerequisites list to the map regardless if
+ // there are any prerequisites or not to, in particular, indicate the
+ // package reconfiguration mode to the subsequent
+ // pkg_configure_prerequisites() call (see the function documentation
+ // for details).
+ //
+ if (*p.action != build_package::drop && !p.dependencies && !p.system)
+ {
+ vector<package_name>& ps (previous_prerequisites[&p]);
+
+ if (!sp->prerequisites.empty ())
+ {
+ ps.reserve (sp->prerequisites.size ());
+
+ for (const auto& pp: sp->prerequisites)
+ ps.push_back (pp.first.object_id ());
+ }
+ }
+
+ // For an external package being replaced with another external, keep
+ // the configuration unless requested not to with --disfigure.
+ //
+ bool disfigure (p.disfigure || !external);
+
+ // If the skeleton was not initialized yet (this is an existing package
+ // reconfiguration and no configuration was printed as a part of the
+ // plan, etc), then initialize it now. Whether the skeleton is newly
+ // initialized or not, make sure that the current configuration is
+ // loaded, unless the package project is not being disfigured.
+ //
+ if (*p.action != build_package::drop && !p.system)
+ {
+ if (!p.skeleton)
+ {
+ // If there is no available package specified for the build package
+ // object, then we need to find it (or create a transient one).
//
- for (const package_location& l: ap->locations)
- {
- if (l.repository_fragment.load ()->location.directory_based ())
- {
- p.keep_out = true;
- break;
- }
- }
+ p.init_skeleton (o,
+ true /* load_old_dependent_config */,
+ (p.available == nullptr
+ ? find_available (o, pdb, sp)
+ : nullptr));
}
+
+ if (disfigure)
+ p.skeleton->load_old_config ();
}
// Commits the transaction.
//
- pkg_disfigure (c, o, t, sp, !p.keep_out, simulate);
+ pkg_disfigure (o, pdb, t,
+ sp,
+ !p.keep_out /* clean */,
+ disfigure,
+ simulate);
+
+ r = true;
assert (sp->state == package_state::unpacked ||
sp->state == package_state::transient);
- if (verbose && !o.no_result ())
- text << (sp->state == package_state::transient
- ? "purged "
- : "disfigured ") << *sp;
+ if (result || progress)
+ {
+ const char* what (sp->state == package_state::transient
+ ? "purged"
+ : "disfigured");
+ if (result)
+ text << what << ' ' << *sp << pdb;
+ else if (progress)
+ {
+ size_t p ((++prog_i * 100) / prog_n);
+
+ if (prog_percent != p)
+ {
+ prog_percent = p;
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += "% of packages ";
+ diag_progress += what;
+ }
+ }
+ }
// Selected system package is now gone from the database. Before we drop
// the object we need to make sure the hold state is preserved in the
@@ -4633,14 +7785,26 @@ namespace bpkg
}
}
+ // Clear the progress if shown.
+ //
+ if (progress)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
+ }
+
// purge, fetch/unpack|checkout
//
+ pkg_checkout_cache checkout_cache (o);
for (build_package& p: reverse_iterate (build_pkgs))
{
assert (p.action);
+ database& pdb (p.db);
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
+ const lazy_shared_ptr<repository_fragment>& af (p.repository_fragment);
// Purge the dropped or system package, fetch/unpack or checkout the
// other one.
@@ -4654,11 +7818,15 @@ namespace bpkg
//
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ assert (!sp->system ());
- if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
+
+ if (result)
+ text << "purged " << *sp << pdb;
sp = nullptr;
}
@@ -4683,11 +7851,13 @@ namespace bpkg
{
if (sp != nullptr && !sp->system ())
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
- if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ if (result)
+ text << "purged " << *sp << pdb;
if (!p.hold_package)
p.hold_package = sp->hold_package;
@@ -4702,9 +7872,11 @@ namespace bpkg
}
// Fetch or checkout if this is a new package or if we are
- // up/down-grading.
+ // up/down-grading or replacing.
//
- if (sp == nullptr || sp->version != p.available_version ())
+ if (sp == nullptr ||
+ sp->version != p.available_version () ||
+ p.replace ())
{
sp = nullptr; // For the directory case below.
@@ -4714,7 +7886,7 @@ namespace bpkg
if (pl.repository_fragment.object_id () != "") // Special root?
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Go through package repository fragments to decide if we should
// fetch, checkout or unpack depending on the available repository
@@ -4726,19 +7898,22 @@ namespace bpkg
for (const package_location& l: ap->locations)
{
- const repository_location& rl (
- l.repository_fragment.load ()->location);
-
- if (!basis || rl.local ()) // First or local?
+ if (!rep_masked_fragment (l.repository_fragment))
{
- basis = rl.basis ();
+ const repository_location& rl (
+ l.repository_fragment.load ()->location);
- if (rl.directory_based ())
- break;
+ if (!basis || rl.local ()) // First or local?
+ {
+ basis = rl.basis ();
+
+ if (rl.directory_based ())
+ break;
+ }
}
}
- assert (basis);
+ assert (basis); // Shouldn't be here otherwise.
// All calls commit the transaction.
//
@@ -4747,7 +7922,8 @@ namespace bpkg
case repository_basis::archive:
{
sp = pkg_fetch (o,
- c,
+ pdb,
+ af.database (),
t,
ap->id.name,
p.available_version (),
@@ -4758,28 +7934,33 @@ namespace bpkg
case repository_basis::version_control:
{
sp = p.checkout_root
- ? pkg_checkout (o,
- c,
- t,
- ap->id.name,
- p.available_version (),
- *p.checkout_root,
- true /* replace */,
- p.checkout_purge,
- simulate)
- : pkg_checkout (o,
- c,
- t,
- ap->id.name,
- p.available_version (),
- true /* replace */,
- simulate);
+ ? pkg_checkout (checkout_cache,
+ o,
+ pdb,
+ af.database (),
+ t,
+ ap->id.name,
+ p.available_version (),
+ *p.checkout_root,
+ true /* replace */,
+ p.checkout_purge,
+ simulate)
+ : pkg_checkout (checkout_cache,
+ o,
+ pdb,
+ af.database (),
+ t,
+ ap->id.name,
+ p.available_version (),
+ true /* replace */,
+ simulate);
break;
}
case repository_basis::directory:
{
sp = pkg_unpack (o,
- c,
+ pdb,
+ af.database (),
t,
ap->id.name,
p.available_version (),
@@ -4793,11 +7974,11 @@ namespace bpkg
//
else if (exists (pl.location))
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_fetch (
o,
- c,
+ pdb,
t,
pl.location, // Archive path.
true, // Replace
@@ -4807,10 +7988,12 @@ namespace bpkg
if (sp != nullptr) // Actually fetched or checked out something?
{
+ r = true;
+
assert (sp->state == package_state::fetched ||
sp->state == package_state::unpacked);
- if (verbose && !o.no_result ())
+ if (result)
{
const repository_location& rl (sp->repository_fragment);
@@ -4826,19 +8009,19 @@ namespace bpkg
case repository_basis::archive:
{
assert (sp->state == package_state::fetched);
- dr << "fetched " << *sp;
+ dr << "fetched " << *sp << pdb;
break;
}
case repository_basis::directory:
{
assert (sp->state == package_state::unpacked);
- dr << "using " << *sp << " (external)";
+ dr << "using " << *sp << pdb << " (external)";
break;
}
case repository_basis::version_control:
{
assert (sp->state == package_state::unpacked);
- dr << "checked out " << *sp;
+ dr << "checked out " << *sp << pdb;
break;
}
}
@@ -4853,98 +8036,489 @@ namespace bpkg
{
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Commits the transaction.
//
- sp = pkg_unpack (o, c, t, ap->id.name, simulate);
+ sp = pkg_unpack (o, pdb, t, ap->id.name, simulate);
- if (verbose && !o.no_result ())
- text << "unpacked " << *sp;
+ if (result)
+ text << "unpacked " << *sp << pdb;
}
else
{
const package_location& pl (ap->locations[0]);
assert (pl.repository_fragment.object_id () == ""); // Special root.
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_unpack (o,
- c,
+ pdb,
t,
path_cast<dir_path> (pl.location),
true, // Replace.
false, // Don't purge; commits the transaction.
simulate);
- if (verbose && !o.no_result ())
- text << "using " << *sp << " (external)";
+ if (result)
+ text << "using " << *sp << pdb << " (external)";
}
+ r = true;
+
assert (sp->state == package_state::unpacked);
}
break; // Get out from the breakout loop.
}
}
+ checkout_cache.clear (); // Detect errors.
// configure
//
+ auto configure_pred = [] (const build_package& p)
+ {
+ // Skip package drops.
+ //
+ if (*p.action == build_package::drop)
+ return false;
+
+ // We configure everything that isn't already configured.
+ //
+ if (p.selected != nullptr &&
+ p.selected->state == package_state::configured)
+ return false;
+
+ return true;
+ };
+
+ // On the first pass collect all the build_package's to be configured and
+ // calculate their configure_prerequisites_result's.
+ //
+ struct configure_package
+ {
+ reference_wrapper<build_package> pkg;
+
+ // These are unused for system packages.
+ //
+ configure_prerequisites_result res;
+ build2::variable_overrides ovrs;
+ };
+ vector<configure_package> configure_packages;
+ configure_packages.reserve (build_pkgs.size ());
+
+ // While at it also collect global configuration variable overrides from
+ // each configure_prerequisites_result::config_variables and merge them
+ // into configure_global_vars.
+ //
+ // @@ TODO: Note that the current global override semantics is quite
+ // broken in that we don't force reconfiguration of all the packages.
+ //
+#ifndef BPKG_OUTPROC_CONFIGURE
+ strings configure_global_vars;
+#endif
+
+ // Return the "would be" state of packages that would be configured
+ // by this stage.
+ //
+ function<find_package_state_function> configured_state (
+ [&configure_packages] (const shared_ptr<selected_package>& sp)
+ -> optional<pair<package_state, package_substate>>
+ {
+ for (const configure_package& cp: configure_packages)
+ {
+ const build_package& p (cp.pkg);
+
+ if (p.selected == sp)
+ return make_pair (
+ package_state::configured,
+ p.system ? package_substate::system : package_substate::none);
+ }
+
+ return nullopt;
+ });
+
for (build_package& p: reverse_iterate (build_pkgs))
{
assert (p.action);
+ if (!configure_pred (p))
+ continue;
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
- if (*p.action == build_package::drop) // Skip package drops.
- continue;
-
- // Configure the package.
+ // Collect the package.
//
// At this stage the package is either selected, in which case it's a
// source code one, or just available, in which case it is a system
// one. Note that a system package gets selected as being configured.
//
+ // NOTE: remember to update the preparation of the plan to be presented
+ // to the user if changing anything here.
+ //
assert (sp != nullptr || p.system);
- // We configure everything that isn't already configured.
+ database& pdb (p.db);
+ transaction t (pdb, !simulate /* start */);
+
+ // Show how we got here if things go wrong, for example selecting a
+ // prerequisite is ambiguous due to the dependency package being
+ // configured in multiple linked configurations.
//
- if (sp != nullptr && sp->state == package_state::configured)
- continue;
+ auto g (
+ make_exception_guard (
+ [&p] ()
+ {
+ info << "while configuring " << p.name () << p.db;
+ }));
- transaction t (db, !simulate /* start */);
+ configure_prerequisites_result cpr;
+ if (p.system)
+ {
+ // We have no choice but to configure system packages on the first
+ // pass since otherwise there will be no selected package for
+ // pkg_configure_prerequisites() to find. Luckily they have no
+ // dependencies and so can be configured in any order. We will print
+ // their progress/result on the second pass in the proper order.
+ //
+ // Note: commits the transaction.
+ //
+ sp = pkg_configure_system (ap->id.name,
+ p.available_version (),
+ pdb,
+ t);
+ }
+ else
+ {
+ // Should only be called for packages whose prerequisites are saved.
+ //
+ auto prereqs = [&p, &previous_prerequisites] ()
+ {
+ auto i (previous_prerequisites.find (&p));
+ assert (i != previous_prerequisites.end ());
+ return &i->second;
+ };
+
+ // In the simulation mode unconstrain all the unsatisfactory
+ // dependencies, if any, while configuring the dependent (see
+ // build_packages::collect_dependents() for details).
+ //
+ // Note: must be called at most once.
+ //
+ auto unconstrain_deps = [simulate,
+ &p,
+ &trace,
+ deps = vector<package_key> ()] () mutable
+ {
+ if (simulate)
+ {
+ unsatisfied_dependent* ud (
+ simulate->find_dependent (package_key (p.db, p.name ())));
+
+ if (ud != nullptr)
+ {
+ assert (deps.empty ());
+
+ deps.reserve (ud->ignored_constraints.size ());
+
+ for (const auto& c: ud->ignored_constraints)
+ {
+ l5 ([&]{trace << "while configuring dependent " << p.name ()
+ << p.db << " in simulation mode unconstrain ("
+ << c.dependency << ' ' << c.constraint << ')';});
+
+ deps.emplace_back (c.dependency);
+ }
+ }
+ }
+
+ return !deps.empty () ? &deps : nullptr;
+ };
+
+ if (ap != nullptr)
+ {
+ assert (*p.action == build_package::build);
+
+ // If the package prerequisites builds are collected, then use the
+ // resulting package skeleton and the pre-selected dependency
+ // alternatives.
+ //
+ // Note that we may not collect the package prerequisites builds if
+ // the package is already configured but we still need to
+ // reconfigure it due, for example, to an upgrade of its dependency.
+ // In this case we pass to pkg_configure() the newly created package
+ // skeleton which contains the package configuration variables
+ // specified on the command line but (naturally) no reflection
+ // configuration variables. Note, however, that in this case
+ // pkg_configure() call will evaluate the reflect clauses itself and
+ // so the proper reflection variables will still end up in the
+ // package configuration.
+ //
+ // @@ Note that if we ever allow the user to override the
+ // alternative selection, this will break (and also if the user
+ // re-configures the package manually). Maybe that a good reason
+ // not to allow this? Or we could store this information in the
+ // database.
+ //
+ if (p.dependencies)
+ {
+ assert (p.skeleton);
+
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ *p.dependencies,
+ &*p.alternatives,
+ move (*p.skeleton),
+ nullptr /* prev_prerequisites */,
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
+ else
+ {
+ assert (p.skeleton); // Must be initialized before disfiguring.
+
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ ap->dependencies,
+ nullptr /* alternatives */,
+ move (*p.skeleton),
+ prereqs (),
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
+ }
+ else // Existing dependent.
+ {
+ // This is an adjustment of a dependent which cannot be system
+ // (otherwise it wouldn't be a dependent) and cannot become system
+ // (otherwise it would be a build).
+ //
+ assert (*p.action == build_package::adjust && !sp->system ());
+
+ // Must be in the unpacked state since it was disfigured on the
+ // first pass (see above).
+ //
+ assert (sp->state == package_state::unpacked);
+
+ // The skeleton must be initialized before disfiguring and the
+ // package can't be system.
+ //
+ assert (p.skeleton && p.skeleton->available != nullptr);
+
+ const dependencies& deps (p.skeleton->available->dependencies);
+
+ // @@ Note that on reconfiguration the dependent looses the
+ // potential configuration variables specified by the user on
+ // some previous build, which can be quite surprising. Should we
+ // store this information in the database?
+ //
+ // Note: this now works for external packages via package
+ // skeleton (which extracts user configuration).
+ //
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ deps,
+ nullptr /* alternatives */,
+ move (*p.skeleton),
+ prereqs (),
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
- // Note that pkg_configure() commits the transaction.
+ t.commit ();
+
+ if (verb >= 5 && !simulate && !cpr.config_variables.empty ())
+ {
+ diag_record dr (trace);
+
+ dr << sp->name << pdb << " configuration variables:";
+
+ for (const string& cv: cpr.config_variables)
+ dr << "\n " << cv;
+ }
+
+ if (!simulate)
+ {
+#ifndef BPKG_OUTPROC_CONFIGURE
+ auto& gvs (configure_global_vars);
+
+ // Note that we keep global overrides in cpr.config_variables for
+ // diagnostics and skip them in var_override_function below.
+ //
+ for (const string& v: cpr.config_variables)
+ {
+ // Each package should have exactly the same set of global
+ // overrides by construction since we don't allow package-
+ // specific global overrides.
+ //
+ if (v[0] == '!')
+ {
+ if (find (gvs.begin (), gvs.end (), v) == gvs.end ())
+ gvs.push_back (v);
+ }
+ }
+#endif
+ // Add config.config.disfigure unless already disfigured (see the
+ // high-level pkg_configure() version for background).
+ //
+ if (ap == nullptr || !p.disfigure)
+ {
+ cpr.config_variables.push_back (
+ "config.config.disfigure='config." + sp->name.variable () + "**'");
+ }
+ }
+ }
+
+ configure_packages.push_back (configure_package {p, move (cpr), {}});
+ }
+
+ // Reuse the build state to avoid reloading the dependencies over and over
+ // again. This is a valid optimization since we are configuring in the
+ // dependency-dependent order.
+ //
+ unique_ptr<build2::context> configure_ctx;
+
+#ifndef BPKG_OUTPROC_CONFIGURE
+ if (!simulate)
+ {
+ using build2::context;
+ using build2::variable_override;
+
+ function<context::var_override_function> vof (
+ [&configure_packages] (context& ctx, size_t& i)
+ {
+ for (configure_package& cp: configure_packages)
+ {
+ for (const string& v: cp.res.config_variables)
+ {
+ if (v[0] == '!') // Skip global overrides (see above).
+ continue;
+
+ pair<char, variable_override> p (
+ ctx.parse_variable_override (v, i++, false /* buildspec */));
+
+ variable_override& vo (p.second);
+
+ // @@ TODO: put absolute scope overrides into global_vars.
+ //
+ assert (!(p.first == '!' || (vo.dir && vo.dir->absolute ())));
+
+ cp.ovrs.push_back (move (vo));
+ }
+ }
+ });
+
+ configure_ctx = pkg_configure_context (
+ o, move (configure_global_vars), vof);
+
+ // Only global in configure_global_vars.
//
- if (p.system)
- sp = pkg_configure_system (ap->id.name, p.available_version (), t);
- else if (ap != nullptr)
- pkg_configure (c, o, t, sp, ap->dependencies, p.config_vars, simulate);
- else // Dependent.
+ assert (configure_ctx->var_overrides.empty ());
+ }
+#endif
+
+ if (progress)
+ {
+ prog_i = 0;
+ prog_n = configure_packages.size ();
+ prog_percent = 100;
+ }
+
+ for (configure_package& cp: configure_packages)
+ {
+ build_package& p (cp.pkg);
+
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ // Configure the package (system already configured).
+ //
+ // NOTE: remember to update the preparation of the plan to be presented
+ // to the user if changing anything here.
+ //
+ database& pdb (p.db);
+
+ if (!p.system)
{
- // Must be in the unpacked state since it was disfigured on the first
- // pass (see above).
+ const shared_ptr<available_package>& ap (p.available);
+
+ transaction t (pdb, !simulate /* start */);
+
+ // Show how we got here if things go wrong.
//
- assert (sp->state == package_state::unpacked);
-
- package_manifest m (
- pkg_verify (sp->effective_src_root (c),
- true /* ignore_unknown */,
- [&sp] (version& v) {v = sp->version;}));
-
- pkg_configure (c,
- o,
- t,
- sp,
- convert (move (m.dependencies)),
- p.config_vars,
- simulate);
+ auto g (
+ make_exception_guard (
+ [&p] ()
+ {
+ info << "while configuring " << p.name () << p.db;
+ }));
+
+ // Note that pkg_configure() commits the transaction.
+ //
+ if (ap != nullptr)
+ {
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ move (cp.res),
+ configure_ctx,
+ cp.ovrs,
+ simulate);
+ }
+ else // Dependent.
+ {
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ move (cp.res),
+ configure_ctx,
+ cp.ovrs,
+ simulate);
+ }
}
+ r = true;
+
assert (sp->state == package_state::configured);
- if (verbose && !o.no_result ())
- text << "configured " << *sp;
+ if (result)
+ text << "configured " << *sp << pdb;
+ else if (progress)
+ {
+ size_t p ((++prog_i * 100) / prog_n);
+
+ if (prog_percent != p)
+ {
+ prog_percent = p;
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += "% of packages configured";
+ }
+ }
+ }
+
+#ifndef BPKG_OUTPROC_CONFIGURE
+ configure_ctx.reset (); // Free.
+#endif
+
+ // Clear the progress if shown.
+ //
+ if (progress)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
}
// Update the hold state.
@@ -4959,6 +8533,8 @@ namespace bpkg
if (*p.action == build_package::drop)
continue;
+ database& pdb (p.db);
+
const shared_ptr<selected_package>& sp (p.selected);
assert (sp != nullptr);
@@ -4979,19 +8555,23 @@ namespace bpkg
sp->hold_package = hp;
sp->hold_version = hv;
- transaction t (db, !simulate /* start */);
- db.update (sp);
+ transaction t (pdb, !simulate /* start */);
+ pdb.update (sp);
t.commit ();
- if (verbose > 1)
+ r = true;
+
+ if (verb > 1)
{
if (hp)
- text << "holding package " << sp->name;
+ text << "holding package " << sp->name << pdb;
if (hv)
- text << "holding version " << *sp;
+ text << "holding version " << *sp << pdb;
}
}
}
+
+ return r;
}
}