diff options
Diffstat (limited to 'bpkg/pkg-build.cxx')
-rw-r--r-- | bpkg/pkg-build.cxx | 4704 |
1 files changed, 3959 insertions, 745 deletions
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx index 58bb793..fac79c2 100644 --- a/bpkg/pkg-build.cxx +++ b/bpkg/pkg-build.cxx @@ -21,6 +21,7 @@ #include <bpkg/common-options.hxx> #include <bpkg/cfg-link.hxx> +#include <bpkg/rep-mask.hxx> #include <bpkg/pkg-purge.hxx> #include <bpkg/pkg-fetch.hxx> #include <bpkg/rep-fetch.hxx> @@ -32,7 +33,9 @@ #include <bpkg/pkg-disfigure.hxx> #include <bpkg/package-query.hxx> #include <bpkg/package-skeleton.hxx> + #include <bpkg/system-repository.hxx> +#include <bpkg/system-package-manager.hxx> #include <bpkg/pkg-build-collect.hxx> @@ -41,10 +44,10 @@ using namespace butl; namespace bpkg { - // @@ Overall TODO: - // - // - Configuration vars (both passed and preserved) + // System package manager. Resolved lazily if and when needed. Present NULL + // value means no system package manager is available for this host. // + static optional<unique_ptr<system_package_manager>> sys_pkg_mgr; // Current configurations as specified with --directory|-d (or the current // working directory if none specified). @@ -68,11 +71,31 @@ namespace bpkg // ultimate dependent configurations and add them to the respective // configuration-associated fragment lists. // + // If this package's repository fragment is a root fragment (package is + // fetched/unpacked using the existing archive/directory), then also add + // this repository fragment to the resulting list assuming that this + // package's dependencies can be resolved from this repository fragment or + // its complements (user-added repositories) as well. + // static void add_dependent_repo_fragments (database& db, - const available_package_id& id, + const shared_ptr<selected_package>& p, config_repo_fragments& r) { + available_package_id id (p->name, p->version); + + // Add a repository fragment to the specified list, suppressing duplicates. + // + auto add = [] (shared_ptr<repository_fragment>&& rf, + vector<shared_ptr<repository_fragment>>& rfs) + { + if (find (rfs.begin (), rfs.end (), rf) == rfs.end ()) + rfs.push_back (move (rf)); + }; + + if (p->repository_fragment.empty ()) // Root repository fragment? + add (db.find<repository_fragment> (empty_string), r[db]); + for (database& ddb: dependent_repo_configs (db)) { shared_ptr<available_package> dap (ddb.find<available_package> (id)); @@ -91,39 +114,93 @@ namespace bpkg for (const auto& pl: dap->locations) { - shared_ptr<repository_fragment> rf (pl.repository_fragment.load ()); + const lazy_shared_ptr<repository_fragment>& lrf ( + pl.repository_fragment); - if (find (rfs.begin (), rfs.end (), rf) == rfs.end ()) - rfs.push_back (move (rf)); + if (!rep_masked_fragment (lrf)) + add (lrf.load (), rfs); } + + // Erase the entry from the map if it contains no fragments, which may + // happen if all the available package repositories are masked. + // + if (rfs.empty ()) + r.erase (i); } } } - // Return a patch version constraint for the selected package if it has a - // standard version, otherwise, if requested, issue a warning and return - // nullopt. + // Return a patch version constraint for the specified package version if it + // is a standard version (~ shortcut). Otherwise, if requested, issue a + // warning and return nullopt. // // Note that the function may also issue a warning and return nullopt if the - // selected package minor version reached the limit (see - // standard-version.cxx for details). + // package minor version reached the limit (see standard-version.cxx for + // details). // static optional<version_constraint> + patch_constraint (const package_name& nm, + const version& pv, + bool quiet = false) + { + // Note that we don't pass allow_stub flag so the system wildcard version + // will (naturally) not be patched. + // + string vs (pv.string ()); + optional<standard_version> v (parse_standard_version (vs)); + + if (!v) + { + if (!quiet) + warn << "unable to patch " << package_string (nm, pv) << + info << "package is not using semantic/standard version"; + + return nullopt; + } + + try + { + return version_constraint ('~' + vs); + } + // Note that the only possible reason for invalid_argument exception to be + // thrown is that minor version reached the 99999 limit (see + // standard-version.cxx for details). + // + catch (const invalid_argument&) + { + if (!quiet) + warn << "unable to patch " << package_string (nm, pv) << + info << "minor version limit reached"; + + return nullopt; + } + } + + static inline optional<version_constraint> patch_constraint (const shared_ptr<selected_package>& sp, bool quiet = false) { - const package_name& nm (sp->name); - const version& sv (sp->version); + return patch_constraint (sp->name, sp->version, quiet); + } + // As above but returns a minor version constraint (^ shortcut) instead of + // the patch version constraint (~ shortcut). + // + static optional<version_constraint> + minor_constraint (const package_name& nm, + const version& pv, + bool quiet = false) + { // Note that we don't pass allow_stub flag so the system wildcard version // will (naturally) not be patched. // - string vs (sv.string ()); + string vs (pv.string ()); optional<standard_version> v (parse_standard_version (vs)); if (!v) { if (!quiet) - warn << "unable to patch " << package_string (nm, sv) << + warn << "unable to upgrade " << package_string (nm, pv) + << " to latest minor version" << info << "package is not using semantic/standard version"; return nullopt; @@ -131,22 +208,75 @@ namespace bpkg try { - return version_constraint ("~" + vs); + return version_constraint ('^' + vs); } - // Note that the only possible reason for invalid_argument exception to - // be thrown is that minor version reached the 99999 limit (see + // Note that the only possible reason for invalid_argument exception to be + // thrown is that major version reached the 99999 limit (see // standard-version.cxx for details). // catch (const invalid_argument&) { if (!quiet) - warn << "unable to patch " << package_string (nm, sv) << - info << "minor version limit reached"; + warn << "unable to upgrade " << package_string (nm, pv) + << " to latest minor version" << + info << "major version limit reached"; return nullopt; } } + // Return true if the selected package is not configured as system and its + // repository fragment is not present in any ultimate dependent + // configuration (see dependent_repo_configs() for details) or this exact + // version is not available from this repository fragment nor from its + // complements. Also return true if the selected package repository fragment + // is a root fragment (package is fetched/unpacked using the existing + // archive/directory). + // + // Note that the orphan definition here is stronger than in the rest of the + // code, since we request the available package to also be present in the + // repository fragment and consider packages built as existing archives or + // directories as orphans. It feels that such a definition aligns better + // with the user expectations about deorphaning. + // + static bool + orphan_package (database& db, const shared_ptr<selected_package>& sp) + { + assert (sp != nullptr); + + if (sp->system ()) + return false; + + const string& cn (sp->repository_fragment.canonical_name ()); + + if (cn.empty ()) // Root repository fragment? + return true; + + for (database& ddb: dependent_repo_configs (db)) + { + const shared_ptr<repository_fragment> rf ( + ddb.find<repository_fragment> (cn)); + + if (rf != nullptr && !rep_masked_fragment (ddb, rf)) + { + auto af ( + find_available_one (sp->name, + version_constraint (sp->version), + lazy_shared_ptr<repository_fragment> (ddb, + move (rf)), + false /* prereq */, + true /* revision */)); + + const shared_ptr<available_package>& ap (af.first); + + if (ap != nullptr && !ap->stub ()) + return false; + } + } + + return true; + } + // List of dependency packages (specified with ? on the command line). // // If configuration is not specified for a system dependency package (db is @@ -160,17 +290,29 @@ namespace bpkg // struct dependency_package { - database* db; // Can only be NULL if system. + database* db; // Can only be NULL if system. package_name name; - optional<version_constraint> constraint; // nullopt if unspecified. + optional<version_constraint> constraint; // nullopt if unspecified. + + // Can only be true if constraint is specified. + // + bool hold_version; + shared_ptr<selected_package> selected; bool system; - bool patch; // Only for an empty version. + bool existing; // Build as archive or directory. + + // true -- upgrade, false -- patch. + // + optional<bool> upgrade; // Only for absent constraint. + + bool deorphan; bool keep_out; bool disfigure; optional<dir_path> checkout_root; bool checkout_purge; - strings config_vars; // Only if not system. + strings config_vars; // Only if not system. + const system_package_status* system_status; // See struct pkg_arg. }; using dependency_packages = vector<dependency_package>; @@ -179,8 +321,44 @@ namespace bpkg // this dependency. If the result is a NULL available_package, then it is // either no longer used and can be dropped, or no changes to the dependency // are necessary. Otherwise, the result is available_package to - // upgrade/downgrade to as well as the repository fragment it must come - // from, and the system flag. + // upgrade/downgrade to or replace with the same version (deorphan, rebuild + // as an existing archive or directory, etc) as well as the repository + // fragment it must come from, the system flag, and the database it must be + // configured in. + // + // If the dependency is being rebuilt as an existing archive or directory we + // may end up with the available package version being the same as the + // selected package version. In this case the dependency needs to be + // re-fetched/re-unpacked from this archive/directory. Also note that if the + // dependency needs to be rebuilt as an existing archive or directory the + // caller may need to stash its name/database. This way on the subsequent + // call this function may return the "no change" recommendation rather than + // the "replace" recommendation. + // + // If in the deorphan mode it turns out that the package is not an orphan + // and there is no version constraint specified and upgrade/patch is not + // requested, then assume that no changes are necessary for the dependency. + // Otherwise, if the package version is not constrained and no upgrade/patch + // is requested, then pick the version that matches the dependency version + // best in the following preference order: + // + // - same version, revision, and iteration + // - latest iteration of same version and revision + // - later revision of same version + // - later patch of same version + // - later minor of same version + // - latest available version, including earlier + // + // Otherwise, always upgrade/downgrade the orphan or fail if no satisfactory + // version is available. Note that in the both cases (deorphan and + // upgrade/downgrade+deorphan) we may end up with the available package + // version being the same as the selected package version. In this case the + // dependency needs to be re-fetched from an existing repository. Also note + // that if the dependency needs to be deorphaned the caller may need to + // cache the original orphan version. This way on the subsequent calls this + // function still considers this package as an orphan and uses its original + // version to deduce the best match, which may change due, for example, to a + // change of the constraining dependents set. // // If the package version that satisfies explicitly specified dependency // version constraint can not be found in the dependents repositories, then @@ -191,13 +369,21 @@ namespace bpkg // struct evaluate_result { - // The system flag is meaningless if the unused flag is true. + // The system, existing, upgrade, and orphan members are meaningless if + // the unused flag is true. // reference_wrapper<database> db; shared_ptr<available_package> available; lazy_shared_ptr<bpkg::repository_fragment> repository_fragment; bool unused; bool system; + bool existing; + optional<bool> upgrade; + + // Original orphan version which needs to be deorphaned. May only be + // present for the deorphan mode. + // + optional<version> orphan; }; struct dependent_constraint @@ -213,32 +399,44 @@ namespace bpkg }; using dependent_constraints = vector<dependent_constraint>; + using deorphaned_dependencies = map<package_key, version>; + using existing_dependencies = vector<package_key>; - static optional<evaluate_result> - evaluate_dependency (database&, + static evaluate_result + evaluate_dependency (const common_options&, + database&, const shared_ptr<selected_package>&, const optional<version_constraint>& desired, bool desired_sys, + bool existing, database& desired_db, const shared_ptr<selected_package>& desired_db_sp, - bool patch, + optional<bool> upgrade, + bool deorphan, bool explicitly, const config_repo_fragments&, const dependent_constraints&, + const existing_dependencies&, + const deorphaned_dependencies&, + const build_packages&, bool ignore_unsatisfiable); // If there are no user expectations regarding this dependency, then we give - // no up/down-grade recommendation, unless there are no dependents in which - // case we recommend to drop the dependency. + // no up/down-grade/replace recommendation, unless there are no dependents + // in which case we recommend to drop the dependency. // // Note that the user expectations are only applied for dependencies that // have dependents in the current configurations. // static optional<evaluate_result> - evaluate_dependency (database& db, + evaluate_dependency (const common_options& o, + database& db, const shared_ptr<selected_package>& sp, const dependency_packages& deps, bool no_move, + const existing_dependencies& existing_deps, + const deorphaned_dependencies& deorphaned_deps, + const build_packages& pkgs, bool ignore_unsatisfiable) { tracer trace ("evaluate_dependency"); @@ -253,7 +451,10 @@ namespace bpkg nullptr /* available */, nullptr /* repository_fragment */, false /* unused */, - false /* system */}; + false /* system */, + false /* existing */, + nullopt /* upgrade */, + nullopt /* orphan */}; }; // Only search for the user expectations regarding this dependency if it @@ -302,7 +503,8 @@ namespace bpkg fail << "multiple " << db.type << " configurations specified " << "for dependency package " << nm << info << i->db->config_orig << - info << j->db->config_orig; + info << j->db->config_orig << + info << "consider using the --no-move option"; } } } @@ -387,7 +589,10 @@ namespace bpkg nullptr /* available */, nullptr /* repository_fragment */, true /* unused */, - false /* system */}; + false /* system */, + false /* existing */, + nullopt /* upgrade */, + nullopt /* orphan */}; } // The requested dependency database, version constraint, and system flag. @@ -397,6 +602,8 @@ namespace bpkg database& ddb (i->db != nullptr ? *i->db : db); const optional<version_constraint>& dvc (i->constraint); // May be nullopt. bool dsys (i->system); + bool existing (i->existing); + bool deorphan (i->deorphan); // The selected package in the desired database which we copy over. // @@ -408,14 +615,18 @@ namespace bpkg : ddb.find<selected_package> (nm)); // If a package in the desired database is already selected and matches - // the user expectations then no package change is required. + // the user expectations then no package change is required, unless the + // package is also being built as an existing archive or directory or + // needs to be deorphaned. // if (dsp != nullptr && dvc) { const version& sv (dsp->version); bool ssys (dsp->system ()); - if (ssys == dsys && + if (!existing && + !deorphan && + ssys == dsys && (ssys ? sv == *dvc->min_version : satisfies (sv, dvc))) { l5 ([&]{trace << *dsp << ddb << ": unchanged";}); @@ -436,25 +647,27 @@ namespace bpkg package_dependent& dep (pd.second); shared_ptr<selected_package> p (ddb.load<selected_package> (dep.name)); - - add_dependent_repo_fragments ( - ddb, - available_package_id (p->name, p->version), - repo_frags); + add_dependent_repo_fragments (ddb, p, repo_frags); dpt_constrs.emplace_back (ddb, move (p), move (dep.constraint)); } - return evaluate_dependency (db, + return evaluate_dependency (o, + db, sp, dvc, dsys, + existing, ddb, dsp, - i->patch, + i->upgrade, + deorphan, true /* explicitly */, repo_frags, dpt_constrs, + existing_deps, + deorphaned_deps, + pkgs, ignore_unsatisfiable); } @@ -481,17 +694,23 @@ namespace bpkg } }; - static optional<evaluate_result> - evaluate_dependency (database& db, + static evaluate_result + evaluate_dependency (const common_options& o, + database& db, const shared_ptr<selected_package>& sp, const optional<version_constraint>& dvc, bool dsys, + bool existing, database& ddb, const shared_ptr<selected_package>& dsp, - bool patch, + optional<bool> upgrade, + bool deorphan, bool explicitly, const config_repo_fragments& rfs, const dependent_constraints& dpt_constrs, + const existing_dependencies& existing_deps, + const deorphaned_dependencies& deorphaned_deps, + const build_packages& pkgs, bool ignore_unsatisfiable) { tracer trace ("evaluate_dependency"); @@ -504,7 +723,10 @@ namespace bpkg nullptr /* available */, nullptr /* repository_fragment */, false /* unused */, - false /* system */}; + false /* system */, + false /* existing */, + nullopt /* upgrade */, + nullopt /* orphan */}; }; // Build the list of available packages for the potential up/down-grade @@ -515,36 +737,115 @@ namespace bpkg // the configuration negotiation machinery) and, if fail, fallback to // picking the latest one just to make sure the package is recognized. // - optional<version_constraint> c; + // But first check if this package is specified as an existing archive or + // directory. If that's the case, then only consider its (transient) + // available package instead of the above. + // + bool patch (false); + available_packages afs; - if (!dvc) + if (existing) { - assert (!dsys); // The version can't be empty for the system package. + // By definition such a dependency has a version specified and may not + // be system. + // + assert (dvc && !dsys); - if (patch) + pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>> rp ( + find_existing (ddb, nm, *dvc)); + + // Must have been added to the existing packages registry. + // + assert (rp.first != nullptr); + + afs.push_back (move (rp)); + } + else + { + optional<version_constraint> c; + + if (!dvc) { - c = patch_constraint (sp, ignore_unsatisfiable); + assert (!dsys); // The version can't be empty for the system package. + + patch = (upgrade && !*upgrade); - if (!c) + if (patch) { - l5 ([&]{trace << *sp << db << ": non-patchable";}); - return no_change (); + c = patch_constraint (sp, ignore_unsatisfiable); + + if (!c) + { + l5 ([&]{trace << *sp << db << ": non-patchable";}); + return no_change (); + } } } + else if (!dsys || !wildcard (*dvc)) + c = dvc; + + afs = find_available (nm, c, rfs); + + if (afs.empty () && dsys && c) + afs = find_available (nm, nullopt, rfs); } - else if (!dsys || !wildcard (*dvc)) - c = dvc; - vector<pair<shared_ptr<available_package>, - lazy_shared_ptr<repository_fragment>>> afs ( - find_available (nm, c, rfs)); + // In the deorphan mode check that the dependency is an orphan or was + // deorphaned on some previous refinement iteration. If that's not the + // case, then just disable the deorphan mode for this dependency and, if + // the version is not constrained and upgrade/patch is not requested, bail + // out indicating that no change is required. + // + // Note that in the move mode (dsp != sp) we deorphan the dependency in + // its destination configuration, if present. In the worst case scenario + // both the source and destination selected packages may need to be + // deorphaned since the source selected package may also stay if some + // dependents were not repointed to the new dependency (remember that the + // move mode is actually a copy mode). We, however, have no easy way to + // issue recommendations for both the old and the new dependencies at the + // moment. Given that in the common case the old dependency get dropped, + // let's keep it simple and do nothing about the old dependency and see + // how it goes. + // + const version* deorphaned (nullptr); + + if (deorphan) + { + bool orphan (dsp != nullptr && !dsp->system () && !dsys); + + if (orphan) + { + auto i (deorphaned_deps.find (package_key (ddb, nm))); - if (afs.empty () && dsys && c) - afs = find_available (nm, nullopt, rfs); + if (i == deorphaned_deps.end ()) + orphan = orphan_package (ddb, dsp); + else + deorphaned = &i->second; + } + + if (!orphan) + { + if (!dvc && !upgrade) + { + l5 ([&]{trace << *sp << db << ": non-orphan";}); + return no_change (); + } + + deorphan = false; + } + } // Go through up/down-grade candidates and pick the first one that - // satisfies all the dependents. Collect (and sort) unsatisfied dependents - // per the unsatisfiable version in case we need to print them. + // satisfies all the dependents. In the deorphan mode if the package + // version is not constrained and upgrade/patch is not requested, then + // pick the version that matches the dependency version best (see the + // function description for details). Collect (and sort) unsatisfied + // dependents per the unsatisfiable version in case we need to print them. + // + // NOTE: don't forget to update the find_orphan_match() lambda and the + // try_replace_dependency() function if changing anything deorphan-related + // here. // using sp_set = set<config_selected_package>; @@ -556,14 +857,115 @@ namespace bpkg (ddb.system_repository && ddb.system_repository->find (nm) != nullptr)); - for (auto& af: afs) + // Version to deorphan (original orphan version). + // + const version* dov (deorphaned != nullptr ? deorphaned : + deorphan ? &dsp->version : + nullptr); + + optional<version_constraint> dopc; // Patch constraint for the above. + optional<version_constraint> domc; // Minor constraint for the above. + + bool orphan_best_match (deorphan && !dvc && !upgrade); + + if (orphan_best_match) + { + // Note that non-zero iteration makes a version non-standard, so we + // reset it to 0 to produce the patch/minor constraints. + // + version v (dov->epoch, + dov->upstream, + dov->release, + dov->revision, + 0 /* iteration */); + + dopc = patch_constraint (nm, v, true /* quiet */); + domc = minor_constraint (nm, v, true /* quiet */); + } + + using available = pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>>; + + available deorphan_latest_iteration; + available deorphan_later_revision; + available deorphan_later_patch; + available deorphan_later_minor; + available deorphan_latest_available; + + // If the dependency is deorphaned to the same version as on the previous + // call, then return the "no change" result. Otherwise, return the + // deorphan result. + // + auto deorphan_result = [&sp, &db, + &ddb, &dsp, + dsys, + deorphaned, dov, + existing, + upgrade, + &no_change, + &trace] (available&& a, const char* what) + { + if (deorphaned != nullptr && dsp->version == a.first->version) + { + l5 ([&]{trace << *sp << db << ": already deorphaned";}); + return no_change (); + } + + l5 ([&]{trace << *sp << db << ": deorphan to " << what << ' ' + << package_string (sp->name, a.first->version) + << ddb;}); + + return evaluate_result { + ddb, move (a.first), move (a.second), + false /* unused */, + dsys, + existing, + upgrade, + *dov}; + }; + + auto build_result = [&ddb, dsys, existing, upgrade] (available&& a) + { + return evaluate_result { + ddb, move (a.first), move (a.second), + false /* unused */, + dsys, + existing, + upgrade, + nullopt /* orphan */}; + }; + + // Note that if the selected dependency is the best that we can get, we + // normally issue the "no change" recommendation. However, if the + // configuration variables are specified for this dependency on the + // command line, then we issue the "reconfigure" recommendation instead. + // + // Return true, if the already selected dependency has been specified on + // the command line with the configuration variables, but has not yet been + // built on this pkg-build run. + // + auto reconfigure = [&ddb, &dsp, &nm, dsys, &pkgs] () + { + assert (dsp != nullptr); + + if (!dsys) + { + const build_package* p (pkgs.entered_build (ddb, nm)); + return p != nullptr && !p->action && !p->config_vars.empty (); + } + else + return false; + }; + + for (available& af: afs) { shared_ptr<available_package>& ap (af.first); const version& av (!dsys ? ap->version : *ap->system_version (ddb)); // If we aim to upgrade to the latest version and it tends to be less // then the selected one, then what we currently have is the best that - // we can get, and so we return the "no change" result. + // we can get, and so we return the "no change" result, unless we are + // deorphaning. // // Note that we also handle a package stub here. // @@ -574,14 +976,21 @@ namespace bpkg // For the selected system package we still need to pick a source // package version to downgrade to. // - if (!dsp->system ()) + if (!dsp->system () && !deorphan) { - l5 ([&]{trace << *dsp << ddb << ": best";}); - return no_change (); + if (reconfigure ()) + { + l5 ([&]{trace << *dsp << ddb << ": reconfigure (best)";}); + return build_result (find_available_fragment (o, ddb, dsp)); + } + else + { + l5 ([&]{trace << *dsp << ddb << ": best";}); + return no_change (); + } } - // We can not upgrade the (system) package to a stub version, so just - // skip it. + // We can not upgrade the package to a stub version, so just skip it. // if (ap->stub ()) { @@ -627,33 +1036,161 @@ namespace bpkg continue; } - // If the best satisfactory version and the desired system flag perfectly - // match the ones of the selected package, then no package change is - // required. Otherwise, recommend an up/down-grade. - // - if (dsp != nullptr && av == dsp->version && dsp->system () == dsys) + if (orphan_best_match) { - l5 ([&]{trace << *dsp << ddb << ": unchanged";}); - return no_change (); + // If the exact orphan version is encountered, then we are done. + // + if (av == *dov) + return deorphan_result (move (af), "exactly same version"); + + // If the available package is of the same revision as orphan but a + // different iteration, then save it as the latest iteration of same + // orphan version and revision. + // + if (deorphan_latest_iteration.first == nullptr && + av.compare (*dov, false /* revision */, true /* iteration */) == 0) + deorphan_latest_iteration = af; + + // If the available package is of the same version as orphan and its + // revision is greater, then save it as the later revision of same + // version. + // + if (deorphan_later_revision.first == nullptr && + av.compare (*dov, true /* revision */) == 0 && + av.compare (*dov, false /* revision */, true /* iteration */) > 0) + deorphan_later_revision = af; + + // If the available package is of the same minor version as orphan but + // of the greater patch version, then save it as the later patch of + // same version. + // + if (deorphan_later_patch.first == nullptr && + dopc && satisfies (av, *dopc) && + av.compare (*dov, true /* revision */) > 0) // Patch is greater? + deorphan_later_patch = af; + + // If the available package is of the same major version as orphan but + // of the greater minor version, then save it as the later minor of + // same version. + // + // Note that we can't easily compare minor versions here since these + // are bpkg version objects. Thus, we consider that this is a greater + // minor version if the version is greater (ignoring revisions) and + // the latest patch is not yet saved. + // + if (deorphan_later_minor.first == nullptr && + domc && satisfies (av, *domc) && + av.compare (*dov, true /* revision */) > 0 && + deorphan_later_patch.first == nullptr) + deorphan_later_minor = af; + + // Save the latest available package version. + // + if (deorphan_latest_available.first == nullptr) + deorphan_latest_available = move (af); + + // If the available package version is less then the orphan revision + // then we can bail out from the loop, since all the versions from the + // preference list have already been encountered, if present. + // + if (av.compare (*dov, false /* revision */, true /* iteration */) < 0) + { + assert (deorphan_latest_iteration.first != nullptr || + deorphan_later_revision.first != nullptr || + deorphan_later_patch.first != nullptr || + deorphan_later_minor.first != nullptr || + deorphan_latest_available.first != nullptr); + break; + } + } + else + { + // In the up/downgrade+deorphan mode always replace the dependency, + // re-fetching it from an existing repository if the version stays the + // same. + // + if (deorphan) + return deorphan_result (move (af), "constrained version"); + + // For the regular up/downgrade if the best satisfactory version and + // the desired system flag perfectly match the ones of the selected + // package, then no package change is required. Otherwise, recommend + // an upgrade/downgrade/replacement. + // + // Note: we need to be careful here not to start yo-yo'ing for a + // dependency being built as an existing archive or directory. For + // such a dependency we need to return the "no change" recommendation + // if any version recommendation (which may not change) has already + // been returned. + // + if (dsp != nullptr && + av == dsp->version && + dsp->system () == dsys && + (!existing || + find (existing_deps.begin (), existing_deps.end (), + package_key (ddb, nm)) != existing_deps.end ())) + { + if (reconfigure ()) + { + l5 ([&]{trace << *dsp << ddb << ": reconfigure";}); + return build_result (move (af)); + } + else + { + l5 ([&]{trace << *dsp << ddb << ": unchanged";}); + return no_change (); + } + } + else + { + l5 ([&]{trace << *sp << db << ": update to " + << package_string (nm, av, dsys) << ddb;}); + + return build_result (move (af)); + } } + } - l5 ([&]{trace << *sp << db << ": update to " - << package_string (nm, av, dsys) << ddb;}); + if (orphan_best_match) + { + if (deorphan_latest_iteration.first != nullptr) + return deorphan_result (move (deorphan_latest_iteration), + "latest iteration"); - return evaluate_result { - ddb, move (ap), move (af.second), false /* unused */, dsys}; + if (deorphan_later_revision.first != nullptr) + return deorphan_result (move (deorphan_later_revision), + "later revision"); + + if (deorphan_later_patch.first != nullptr) + return deorphan_result (move (deorphan_later_patch), "later patch"); + + if (deorphan_later_minor.first != nullptr) + return deorphan_result (move (deorphan_later_minor), "later minor"); + + if (deorphan_latest_available.first != nullptr) + return deorphan_result (move (deorphan_latest_available), + "latest available"); } // If we aim to upgrade to the latest version, then what we currently have // is the only thing that we can get, and so returning the "no change" - // result, unless we need to upgrade a package configured as system. + // result, unless we need to upgrade a package configured as system or to + // deorphan. // - if (!dvc && dsp != nullptr && !dsp->system ()) + if (!dvc && dsp != nullptr && !dsp->system () && !deorphan) { assert (!dsys); // Version cannot be empty for the system package. - l5 ([&]{trace << *dsp << ddb << ": only";}); - return no_change (); + if (reconfigure ()) + { + l5 ([&]{trace << *dsp << ddb << ": reconfigure (only)";}); + return build_result (find_available_fragment (o, ddb, dsp)); + } + else + { + l5 ([&]{trace << *dsp << ddb << ": only";}); + return no_change (); + } } // If the version satisfying the desired dependency version constraint is @@ -678,11 +1215,12 @@ namespace bpkg { diag_record dr (fail); - if (!dvc && patch) + if (patch) { - // Otherwise, we should have bailed out earlier (see above). + // Otherwise, we should have bailed out earlier returning "no change" + // (see above). // - assert (dsp != nullptr && dsp->system ()); + assert (dsp != nullptr && (dsp->system () || deorphan)); // Patch (as any upgrade) of a system package is always explicit, so // we always fail and never treat the package as being up to date. @@ -697,10 +1235,10 @@ namespace bpkg << " is not available from its dependents' repositories"; else // The only available package is a stub. { - // Note that we don't advise to "build" the package as a system one as - // it is already as such (see above). + // Otherwise, we should have bailed out earlier, returning "no change" + // rather then setting the stub flag to true (see above). // - assert (!dvc && !dsys && dsp != nullptr && dsp->system ()); + assert (!dvc && !dsys && dsp != nullptr && (dsp->system () || deorphan)); fail << package_string (nm, dvc) << ddb << " is not available in " << "source from its dependents' repositories"; @@ -726,6 +1264,9 @@ namespace bpkg size_t i (0), n (ps.size ()); for (auto p (ps.begin ()); i != n; ++p) { + // It would probably be nice to also print the unsatisfied constraint + // here, but let's keep it simple for now. + // dr << (i == 0 ? " " : ", ") << *p->package << p->db; if (++i == 5 && n != 6) // Printing 'and 1 more' looks stupid. @@ -746,41 +1287,99 @@ namespace bpkg } // List of dependent packages whose immediate/recursive dependencies must be - // upgraded (specified with -i/-r on the command line). + // upgraded and/or deorphaned (specified with -i/-r on the command line). // struct recursive_package { - database& db; - package_name name; - bool upgrade; // true -- upgrade, false -- patch. - bool recursive; // true -- recursive, false -- immediate. + database& db; + package_name name; + + // Recursive/immediate upgrade/patch. Note the upgrade member is only + // meaningful if recursive is present. + // + optional<bool> recursive; // true -- recursive, false -- immediate. + bool upgrade; // true -- upgrade, false -- patch. + + // Recursive/immediate deorphaning. + // + optional<bool> deorphan; // true -- recursive, false -- immediate. }; using recursive_packages = vector<recursive_package>; // Recursively check if immediate dependencies of this dependent must be - // upgraded or patched. Return true if it must be upgraded, false if - // patched, and nullopt otherwise. + // upgraded or patched and/or deorphaned. + // + // Cache the results of this function calls to avoid multiple traversals of + // the same dependency graphs. // - static optional<bool> + struct upgrade_dependencies_key + { + package_key dependent; + bool recursion; + + bool + operator< (const upgrade_dependencies_key& v) const + { + if (recursion != v.recursion) + return recursion < v.recursion; + + return dependent < v.dependent; + } + }; + + struct upgrade_deorphan + { + optional<bool> upgrade; // true -- upgrade, false -- patch. + bool deorphan; + }; + + using upgrade_dependencies_cache = map<upgrade_dependencies_key, + upgrade_deorphan>; + + static upgrade_deorphan upgrade_dependencies (database& db, const package_name& nm, const recursive_packages& rs, + upgrade_dependencies_cache& cache, bool recursion = false) { + // If the result of the upgrade_dependencies() call for these dependent + // and recursion flag value is cached, then return that. Otherwise, cache + // the calculated result prior to returning it to the caller. + // + upgrade_dependencies_key k {package_key (db, nm), recursion}; + { + auto i (cache.find (k)); + + if (i != cache.end ()) + return i->second; + } + auto i (find_if (rs.begin (), rs.end (), [&nm, &db] (const recursive_package& i) -> bool { return i.name == nm && i.db == db; })); - optional<bool> r; + upgrade_deorphan r {nullopt /* upgrade */, false /* deorphan */}; - if (i != rs.end () && i->recursive >= recursion) + if (i != rs.end ()) { - r = i->upgrade; + if (i->recursive && *i->recursive >= recursion) + r.upgrade = i->upgrade; + + if (i->deorphan && *i->deorphan >= recursion) + r.deorphan = true; - if (*r) // Upgrade (vs patch)? + // If we both upgrade and deorphan, then we can bail out since the value + // may not change any further (upgrade wins over patch and deorphaning + // can't be canceled). + // + if (r.upgrade && *r.upgrade && r.deorphan) + { + cache[move (k)] = r; return r; + } } for (database& ddb: db.dependent_configs ()) @@ -791,27 +1390,40 @@ namespace bpkg // configured packages due to a dependency cycle (see order() for // details). // - if (optional<bool> u = upgrade_dependencies (ddb, pd.name, rs, true)) + upgrade_deorphan ud ( + upgrade_dependencies (ddb, pd.name, rs, cache, true /* recursion */)); + + if (ud.upgrade || ud.deorphan) { - if (!r || *r < *u) // Upgrade wins patch. - { - r = u; + // Upgrade wins over patch. + // + if (ud.upgrade && (!r.upgrade || *r.upgrade < *ud.upgrade)) + r.upgrade = *ud.upgrade; + + if (ud.deorphan) + r.deorphan = true; - if (*r) // Upgrade (vs patch)? - return r; + // If we both upgrade and deorphan, then we can bail out (see above + // for details). + // + if (r.upgrade && *r.upgrade && r.deorphan) + { + cache[move (k)] = r; + return r; } } } } + cache[move (k)] = r; return r; } // Evaluate a package (not necessarily dependency) and return a new desired // version. If the result is absent (nullopt), then no changes to the // package are necessary. Otherwise, the result is available_package to - // upgrade/downgrade to as well as the repository fragment it must come - // from. + // upgrade/downgrade to or replace with, as well as the repository fragment + // it must come from. // // If the system package cannot be upgraded to the source one, not being // found in the dependents repositories, then return nullopt if @@ -819,10 +1431,15 @@ namespace bpkg // evaluate_dependency() function description for details). // static optional<evaluate_result> - evaluate_recursive (database& db, + evaluate_recursive (const common_options& o, + database& db, const shared_ptr<selected_package>& sp, const recursive_packages& recs, - bool ignore_unsatisfiable) + const existing_dependencies& existing_deps, + const deorphaned_dependencies& deorphaned_deps, + const build_packages& pkgs, + bool ignore_unsatisfiable, + upgrade_dependencies_cache& cache) { tracer trace ("evaluate_recursive"); @@ -839,7 +1456,7 @@ namespace bpkg // (immediate) dependents that have a hit (direct or indirect) in recs. // Note, however, that we collect constraints from all the dependents. // - optional<bool> upgrade; + upgrade_deorphan ud {nullopt /* upgrade */, false /* deorphan */}; for (database& ddb: db.dependent_configs ()) { @@ -849,10 +1466,17 @@ namespace bpkg dpt_constrs.emplace_back (ddb, p, move (pd.constraint)); - if (optional<bool> u = upgrade_dependencies (ddb, pd.name, recs)) + upgrade_deorphan u (upgrade_dependencies (ddb, pd.name, recs, cache)); + + if (u.upgrade || u.deorphan) { - if (!upgrade || *upgrade < *u) // Upgrade wins patch. - upgrade = u; + // Upgrade wins over patch. + // + if (u.upgrade && (!ud.upgrade || *ud.upgrade < *u.upgrade)) + ud.upgrade = *u.upgrade; + + if (u.deorphan) + ud.deorphan = true; } else continue; @@ -861,32 +1485,37 @@ namespace bpkg // continue to iterate over dependents, collecting the repository // fragments and the constraints. // - add_dependent_repo_fragments ( - ddb, - available_package_id (p->name, p->version), - repo_frags); + add_dependent_repo_fragments (ddb, p, repo_frags); } } - if (!upgrade) + if (!ud.upgrade && !ud.deorphan) { l5 ([&]{trace << *sp << db << ": no hit";}); return nullopt; } - // Recommends the highest possible version. - // + pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>> rp ( + find_existing (db, sp->name, nullopt /* version_constraint */)); + optional<evaluate_result> r ( - evaluate_dependency (db, + evaluate_dependency (o, + db, sp, nullopt /* desired */, false /* desired_sys */, + rp.first != nullptr /* existing */, db, sp, - !*upgrade /* patch */, + ud.upgrade, + ud.deorphan, false /* explicitly */, repo_frags, dpt_constrs, + existing_deps, + deorphaned_deps, + pkgs, ignore_unsatisfiable)); // Translate the "no change" result into nullopt. @@ -895,12 +1524,1404 @@ namespace bpkg return r && r->available == nullptr ? nullopt : r; } - // Return false if the plan execution was noop. + // Stack of the command line adjustments as per unsatisfied_dependents + // description. + // + struct cmdline_adjustment + { + enum class adjustment_type: uint8_t + { + hold_existing, // Adjust constraint in existing build-to-hold spec. + dep_existing, // Adjust constraint in existing dependency spec. + hold_new, // Add new build-to-hold spec. + dep_new // Add new dependency spec. + }; + + adjustment_type type; + reference_wrapper<database> db; + package_name name; + bpkg::version version; // Replacement. + + // Meaningful only for the *_new types. + // + optional<bool> upgrade; + bool deorphan = false; + + // For the newly created or popped from the stack object the following + // three members contain the package version replacement information. + // Otherwise (pushed to the stack), they contain the original command line + // spec information. + // + shared_ptr<available_package> available; // NULL for dep_* types. + lazy_shared_ptr<bpkg::repository_fragment> repository_fragment; // As above. + optional<version_constraint> constraint; + + // Create object of the hold_existing type. + // + cmdline_adjustment (database& d, + const package_name& n, + shared_ptr<available_package>&& a, + lazy_shared_ptr<bpkg::repository_fragment>&& f) + : type (adjustment_type::hold_existing), + db (d), + name (n), + version (a->version), + available (move (a)), + repository_fragment (move (f)), + constraint (version_constraint (version)) {} + + // Create object of the dep_existing type. + // + cmdline_adjustment (database& d, + const package_name& n, + const bpkg::version& v) + : type (adjustment_type::dep_existing), + db (d), + name (n), + version (v), + constraint (version_constraint (version)) {} + + // Create object of the hold_new type. + // + cmdline_adjustment (database& d, + const package_name& n, + shared_ptr<available_package>&& a, + lazy_shared_ptr<bpkg::repository_fragment>&& f, + optional<bool> u, + bool o) + : type (adjustment_type::hold_new), + db (d), + name (n), + version (a->version), + upgrade (u), + deorphan (o), + available (move (a)), + repository_fragment (move (f)), + constraint (version_constraint (version)) {} + + // Create object of the dep_new type. + // + cmdline_adjustment (database& d, + const package_name& n, + const bpkg::version& v, + optional<bool> u, + bool o) + : type (adjustment_type::dep_new), + db (d), + name (n), + version (v), + upgrade (u), + deorphan (o), + constraint (version_constraint (version)) {} + }; + + class cmdline_adjustments + { + public: + cmdline_adjustments (vector<build_package>& hps, dependency_packages& dps) + : hold_pkgs_ (hps), + dep_pkgs_ (dps) {} + + // Apply the specified adjustment to the command line, push the adjustment + // to the stack, and record the resulting command line state as the SHA256 + // checksum. + // + void + push (cmdline_adjustment&& a) + { + using type = cmdline_adjustment::adjustment_type; + + // We always set the `== <version>` constraint in the resulting spec. + // + assert (a.constraint); + + database& db (a.db); + const package_name& nm (a.name); + package_version_key cmd_line (db.main_database (), "command line"); + + switch (a.type) + { + case type::hold_existing: + { + auto i (find_hold_pkg (a)); + assert (i != hold_pkgs_.end ()); // As per adjustment type. + + build_package& bp (*i); + swap (bp.available, a.available); + swap (bp.repository_fragment, a.repository_fragment); + + if (!bp.constraints.empty ()) + { + swap (bp.constraints[0].value, *a.constraint); + } + else + { + bp.constraints.emplace_back (move (*a.constraint), + cmd_line.db, + cmd_line.name.string ()); + a.constraint = nullopt; + } + + break; + } + case type::dep_existing: + { + auto i (find_dep_pkg (a)); + assert (i != dep_pkgs_.end ()); // As per adjustment type. + swap (i->constraint, a.constraint); + break; + } + case type::hold_new: + { + // As per adjustment type. + // + assert (find_hold_pkg (a) == hold_pkgs_.end ()); + + // Start the database transaction to perform the + // database::find<selected_package> call, unless we are already in + // the transaction. + // + transaction t (db, !transaction::has_current ()); + + build_package bp { + build_package::build, + db, + db.find<selected_package> (nm), + move (a.available), + move (a.repository_fragment), + nullopt, // Dependencies. + nullopt, // Dependencies alternatives. + nullopt, // Package skeleton. + nullopt, // Postponed dependency alternatives. + false, // Recursive collection. + true, // Hold package. + false, // Hold version. + {}, // Constraints. + false, // System. + false, // Keep output directory. + false, // Disfigure (from-scratch reconf). + false, // Configure-only. + nullopt, // Checkout root. + false, // Checkout purge. + strings (), // Configuration variables. + a.upgrade, + a.deorphan, + {cmd_line}, // Required by (command line). + false, // Required by dependents. + (a.deorphan + ? build_package::build_replace + : uint16_t (0))}; + + t.commit (); + + bp.constraints.emplace_back (move (*a.constraint), + cmd_line.db, + cmd_line.name.string ()); + + a.constraint = nullopt; + + hold_pkgs_.push_back (move (bp)); + break; + } + case type::dep_new: + { + // As per adjustment type. + // + assert (find_dep_pkg (a) == dep_pkgs_.end ()); + + // Start the database transaction to perform the + // database::find<selected_package> call, unless we are already in + // the transaction. + // + transaction t (db, !transaction::has_current ()); + + dep_pkgs_.push_back ( + dependency_package {&db, + nm, + move (*a.constraint), + false /* hold_version */, + db.find<selected_package> (nm), + false /* system */, + false /* existing */, + a.upgrade, + a.deorphan, + false /* keep_out */, + false /* disfigure */, + nullopt /* checkout_root */, + false /* checkout_purge */, + strings () /* config_vars */, + nullptr /* system_status */}); + + t.commit (); + + a.constraint = nullopt; + break; + } + } + + packages_.insert (package_version_key (db, nm, a.version)); + adjustments_.push_back (move (a)); + former_states_.insert (state ()); + } + + // Roll back the latest (default) or first command line adjustment, pop it + // from the stack, and return the popped adjustment. Assume that the stack + // is not empty. + // + // Note that the returned object can be pushed to the stack again. + // + cmdline_adjustment + pop (bool first = false) + { + using type = cmdline_adjustment::adjustment_type; + + assert (!empty ()); + + // Pop the adjustment. + // + cmdline_adjustment a (move (!first + ? adjustments_.back () + : adjustments_.front ())); + if (!first) + adjustments_.pop_back (); + else + adjustments_.erase (adjustments_.begin ()); + + packages_.erase (package_version_key (a.db, a.name, a.version)); + + // Roll back the adjustment. + // + switch (a.type) + { + case type::hold_existing: + { + auto i (find_hold_pkg (a)); + assert (i != hold_pkgs_.end ()); + + build_package& bp (*i); + swap (bp.available, a.available); + swap (bp.repository_fragment, a.repository_fragment); + + // Must contain the replacement version. + // + assert (!bp.constraints.empty ()); + + version_constraint& c (bp.constraints[0].value); + + if (a.constraint) // Original spec contains a version constraint? + { + swap (c, *a.constraint); + } + else + { + a.constraint = move (c); + bp.constraints.clear (); + } + + break; + } + case type::dep_existing: + { + auto i (find_dep_pkg (a)); + assert (i != dep_pkgs_.end ()); + swap (i->constraint, a.constraint); + break; + } + case type::hold_new: + { + auto i (find_hold_pkg (a)); + assert (i != hold_pkgs_.end ()); + + build_package& bp (*i); + a.available = move (bp.available); + a.repository_fragment = move (bp.repository_fragment); + + // Must contain the replacement version. + // + assert (!bp.constraints.empty ()); + + a.constraint = move (bp.constraints[0].value); + + hold_pkgs_.erase (i); + break; + } + case type::dep_new: + { + auto i (find_dep_pkg (a)); + assert (i != dep_pkgs_.end ()); + + a.constraint = move (i->constraint); + + dep_pkgs_.erase (i); + break; + } + } + + return a; + } + + // Return the specified adjustment's string representation in the + // following form: + // + // hold_existing: '<pkg>[ <constraint>][ <database>]' -> '<pkg> <constraint>' + // dep_existing: '?<pkg>[ <constraint>][ <database>]' -> '?<pkg> <constraint>' + // hold_new: '<pkg> <constraint>[ <database>]' + // dep_new: '?<pkg> <constraint>[ <database>]' + // + // Note: the adjustment is assumed to be newly created or be popped from + // the stack. + // + string + to_string (const cmdline_adjustment& a) const + { + using type = cmdline_adjustment::adjustment_type; + + assert (a.constraint); // Since not pushed. + + const string& s (a.db.get ().string); + + switch (a.type) + { + case type::hold_existing: + { + string r ("'" + a.name.string ()); + + auto i (find_hold_pkg (a)); + assert (i != hold_pkgs_.end ()); + + const build_package& bp (*i); + if (!bp.constraints.empty ()) + r += ' ' + bp.constraints[0].value.string (); + + if (!s.empty ()) + r += ' ' + s; + + r += "' -> '" + a.name.string () + ' ' + a.constraint->string () + + "'"; + + return r; + } + case type::dep_existing: + { + string r ("'?" + a.name.string ()); + + auto i (find_dep_pkg (a)); + assert (i != dep_pkgs_.end ()); + + if (i->constraint) + r += ' ' + i->constraint->string (); + + if (!s.empty ()) + r += ' ' + s; + + r += "' -> '?" + a.name.string () + ' ' + a.constraint->string () + + "'"; + + return r; + } + case type::hold_new: + { + assert (find_hold_pkg (a) == hold_pkgs_.end ()); + + string r ("'" + a.name.string () + ' ' + a.constraint->string ()); + + if (!s.empty ()) + r += ' ' + s; + + r += "'"; + return r; + } + case type::dep_new: + { + assert (find_dep_pkg (a) == dep_pkgs_.end ()); + + string r ("'?" + a.name.string () + ' ' + a.constraint->string ()); + + if (!s.empty ()) + r += ' ' + s; + + r += "'"; + return r; + } + } + + assert (false); // Can't be here. + return ""; + } + + // Return true, if there are no adjustments in the stack. + // + bool + empty () const + { + return adjustments_.empty (); + } + + // Return true, if push() has been called at least once. + // + bool + tried () const + { + return !former_states_.empty (); + } + + // Return the number of adjustments in the stack. + // + size_t + size () const + { + return adjustments_.size (); + } + + // Return true if replacing a package build with the specified version + // will result in a command line which has already been (unsuccessfully) + // tried as a starting point for the package builds re-collection. + // + bool + tried_earlier (database& db, const package_name& n, const version& v) const + { + if (former_states_.empty ()) + return false; + + // Similar to the state() function, calculate the checksum over the + // packages set, but also consider the specified package version as if + // it were present in the set. + // + // Note that the specified package version may not be in the set, since + // we shouldn't be trying to replace with the package version which is + // already in the command line. + // + sha256 cs; + + auto lt = [&db, &n, &v] (const package_version_key& pvk) + { + if (int r = n.compare (pvk.name)) + return r < 0; + + if (int r = v.compare (*pvk.version)) + return r < 0; + + return db < pvk.db; + }; + + bool appended (false); + for (const package_version_key& p: packages_) + { + assert (p.version); // Only the real packages can be here. + + if (!appended && lt (p)) + { + cs.append (db.config.string ()); + cs.append (n.string ()); + cs.append (v.string ()); + + appended = true; + } + + cs.append (p.db.get ().config.string ()); + cs.append (p.name.string ()); + cs.append (p.version->string ()); + } + + if (!appended) + { + cs.append (db.config.string ()); + cs.append (n.string ()); + cs.append (v.string ()); + } + + return former_states_.find (cs.string ()) != former_states_.end (); + } + + private: + // Return the SHA256 checksum of the current command line state. + // + string + state () const + { + // NOTE: remember to update tried_earlier() if changing anything here. + // + sha256 cs; + for (const package_version_key& p: packages_) + { + assert (p.version); // Only the real packages can be here. + + cs.append (p.db.get ().config.string ()); + cs.append (p.name.string ()); + cs.append (p.version->string ()); + } + + return cs.string (); + } + + // Find the command line package spec an adjustment applies to. + // + vector<build_package>::iterator + find_hold_pkg (const cmdline_adjustment& a) const + { + return find_if (hold_pkgs_.begin (), hold_pkgs_.end (), + [&a] (const build_package& p) + { + return p.name () == a.name && p.db == a.db; + }); + } + + dependency_packages::iterator + find_dep_pkg (const cmdline_adjustment& a) const + { + return find_if (dep_pkgs_.begin (), dep_pkgs_.end (), + [&a] (const dependency_package& p) + { + return p.name == a.name && + p.db != nullptr && + *p.db == a.db; + }); + } + + private: + vector<build_package>& hold_pkgs_; + dependency_packages& dep_pkgs_; + + vector<cmdline_adjustment> adjustments_; // Adjustments stack. + set<package_version_key> packages_; // Replacements. + set<string> former_states_; // Command line seen states. + }; + + // Try to replace a collected package with a different available version, + // satisfactory for all its new and/or existing dependents. Return the + // command line adjustment if such a replacement is deduced and nullopt + // otherwise. In the latter case, also return the list of the being built + // dependents which are unsatisfied by some of the dependency available + // versions (unsatisfied_dpts argument). + // + // Specifically, try to find the best available package version considering + // all the imposed constraints as per unsatisfied_dependents description. If + // succeed, return the command line adjustment reflecting the replacement. + // + // Notes: + // + // - Doesn't perform the actual adjustment of the command line. + // + // - Expected to be called after the execution plan is fully refined. That, + // in particular, means that all the existing dependents are also + // collected and thus the constraints they impose are already in their + // dependencies' constraints lists. + // + // - The specified package version may or may not be satisfactory for its + // new and existing dependents. + // + // - The replacement is denied in the following cases: + // + // - If it turns out that the package have been specified on the command + // line (by the user or by us on some previous iteration) with an exact + // version constraint, then we cannot try any other version. + // + // - If the dependency is system, then it is either specified with the + // wildcard version or its exact version have been specified by the user + // or have been deduced by the system package manager. In the former + // case we actually won't be calling this function for this package + // since the wildcard version satisfies any constraint. Thus, an exact + // version has been specified/deduced for this dependency and so we + // cannot try any other version. + // + // - If the dependency is being built as an existing archive/directory, + // then its version is determined and so we cannot try any other + // version. + // + // - If the package is already configured with the version held and the + // user didn't specify this package on the command line and it is not + // requested to be upgraded, patched, and/or deorphaned, then we + // shouldn't be silently up/down-grading it. + // + optional<cmdline_adjustment> + try_replace_dependency (const common_options& o, + const build_package& p, + const build_packages& pkgs, + const vector<build_package>& hold_pkgs, + const dependency_packages& dep_pkgs, + const cmdline_adjustments& cmdline_adjs, + vector<package_key>& unsatisfied_dpts, + const char* what) + { + tracer trace ("try_replace_dependency"); + + assert (p.available != nullptr); // By definition. + + // Bail out for the system package build. + // + if (p.system) + { + l5 ([&]{trace << "replacement of " << what << " version " + << p.available_name_version_db () << " is denied " + << "since it is being configured as system";}); + + return nullopt; + } + + // Bail out for an existing package archive/directory. + // + database& db (p.db); + const package_name& nm (p.name ()); + const version& ver (p.available->version); + + if (find_existing (db, + nm, + nullopt /* version_constraint */).first != nullptr) + { + l5 ([&]{trace << "replacement of " << what << " version " + << p.available_name_version_db () << " is denied since " + << "it is being built as existing archive/directory";}); + + return nullopt; + } + + // Find the package command line entry and stash the reference to its + // version constraint, if any. Bail out if the constraint is specified as + // an exact package version. + // + const build_package* hold_pkg (nullptr); + const dependency_package* dep_pkg (nullptr); + const version_constraint* constraint (nullptr); + + for (const build_package& hp: hold_pkgs) + { + if (hp.name () == nm && hp.db == db) + { + hold_pkg = &hp; + + if (!hp.constraints.empty ()) + { + // Can only contain the user-specified constraint. + // + assert (hp.constraints.size () == 1); + + const version_constraint& c (hp.constraints[0].value); + + if (c.min_version == c.max_version) + { + l5 ([&]{trace << "replacement of " << what << " version " + << p.available_name_version_db () << " is denied " + << "since it is specified on command line as '" + << nm << ' ' << c << "'";}); + + return nullopt; + } + else + constraint = &c; + } + + break; + } + } + + if (hold_pkg == nullptr) + { + for (const dependency_package& dp: dep_pkgs) + { + if (dp.name == nm && dp.db != nullptr && *dp.db == db) + { + dep_pkg = &dp; + + if (dp.constraint) + { + const version_constraint& c (*dp.constraint); + + if (c.min_version == c.max_version) + { + l5 ([&]{trace << "replacement of " << what << " version " + << p.available_name_version_db () << " is denied " + << "since it is specified on command line as '?" + << nm << ' ' << c << "'";}); + + return nullopt; + } + else + constraint = &c; + } + + break; + } + } + } + + // Bail out if the selected package version is held and the package is not + // specified on the command line nor is being upgraded/deorphaned via its + // dependents recursively. + // + const shared_ptr<selected_package>& sp (p.selected); + + if (sp != nullptr && sp->hold_version && + hold_pkg == nullptr && dep_pkg == nullptr && + !p.upgrade && !p.deorphan) + { + l5 ([&]{trace << "replacement of " << what << " version " + << p.available_name_version_db () << " is denied since " + << "it is already built to hold version and it is not " + << "specified on command line nor is being upgraded or " + << "deorphaned";}); + + return nullopt; + } + + transaction t (db); + + // Collect the repository fragments to search the available packages in. + // + config_repo_fragments rfs; + + // Add a repository fragment to the specified list, suppressing duplicates. + // + auto add = [] (shared_ptr<repository_fragment>&& rf, + vector<shared_ptr<repository_fragment>>& rfs) + { + if (find (rfs.begin (), rfs.end (), rf) == rfs.end ()) + rfs.push_back (move (rf)); + }; + + // If the package is specified as build-to-hold on the command line, then + // collect the root repository fragment from its database. Otherwise, + // collect the repository fragments its dependent packages come from. + // + if (hold_pkg != nullptr) + { + add (db.find<repository_fragment> (empty_string), rfs[db]); + } + else + { + // Collect the repository fragments the new dependents come from. + // + if (p.required_by_dependents) + { + for (const package_version_key& dvk: p.required_by) + { + if (dvk.version) // Real package? + { + const build_package* d (pkgs.entered_build (dvk.db, dvk.name)); + + // Must be collected as a package build (see + // build_package::required_by for details). + // + assert (d != nullptr && + d->action && + *d->action == build_package::build && + d->available != nullptr); + + for (const package_location& pl: d->available->locations) + { + const lazy_shared_ptr<repository_fragment>& lrf ( + pl.repository_fragment); + + // Note that here we also handle dependents fetched/unpacked + // using the existing archive/directory adding the root + // repository fragments from their configurations. + // + if (!rep_masked_fragment (lrf)) + add (lrf.load (), rfs[lrf.database ()]); + } + } + } + } + + // Collect the repository fragments the existing dependents come from. + // + // Note that all the existing dependents are already in the map (since + // collect_dependents() has already been called) and are either + // reconfigure adjustments or non-collected recursively builds. + // + if (sp != nullptr) + { + for (database& ddb: db.dependent_configs ()) + { + for (const auto& pd: query_dependents (ddb, nm, db)) + { + const build_package* d (pkgs.entered_build (ddb, pd.name)); + + // See collect_dependents() for details. + // + assert (d != nullptr && d->action); + + if ((*d->action == build_package::adjust && + (d->flags & build_package::adjust_reconfigure) != 0) || + (*d->action == build_package::build && !d->dependencies)) + { + shared_ptr<selected_package> p ( + ddb.load<selected_package> (pd.name)); + + add_dependent_repo_fragments (ddb, p, rfs); + } + } + } + } + } + + // Query the dependency available packages from all the collected + // repository fragments and select the most appropriate one. Note that + // this code is inspired by the evaluate_dependency() function + // implementation, which documents the below logic in great detail. + // + optional<version_constraint> c (constraint != nullptr + ? *constraint + : optional<version_constraint> ()); + + if (!c && p.upgrade && !*p.upgrade) + { + assert (sp != nullptr); // See build_package::upgrade. + + c = patch_constraint (sp); + + assert (c); // See build_package::upgrade. + } + + available_packages afs (find_available (nm, c, rfs)); + + using available = pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>>; + + available ra; + + // Version to deorphan. + // + const version* dov (p.deorphan ? &sp->version : nullptr); + + optional<version_constraint> dopc; // Patch constraint for the above. + optional<version_constraint> domc; // Minor constraint for the above. + + bool orphan_best_match (p.deorphan && constraint == nullptr && !p.upgrade); + + if (orphan_best_match) + { + // Note that non-zero iteration makes a version non-standard, so we + // reset it to 0 to produce the patch/minor constraints. + // + version v (dov->epoch, + dov->upstream, + dov->release, + dov->revision, + 0 /* iteration */); + + dopc = patch_constraint (nm, v, true /* quiet */); + domc = minor_constraint (nm, v, true /* quiet */); + } + + available deorphan_latest_iteration; + available deorphan_later_revision; + available deorphan_later_patch; + available deorphan_later_minor; + available deorphan_latest_available; + + // Return true if a version satisfies all the dependency constraints. + // Otherwise, save all the being built unsatisfied dependents into the + // resulting list, suppressing duplicates. + // + auto satisfactory = [&p, &unsatisfied_dpts] (const version& v) + { + bool r (true); + + for (const auto& c: p.constraints) + { + if (!satisfies (v, c.value)) + { + r = false; + + if (c.dependent.version && !c.selected_dependent) + { + package_key pk (c.dependent.db, c.dependent.name); + + if (find (unsatisfied_dpts.begin (), + unsatisfied_dpts.end (), + pk) == unsatisfied_dpts.end ()) + unsatisfied_dpts.push_back (move (pk)); + } + } + } + + return r; + }; + + for (available& af: afs) + { + shared_ptr<available_package>& ap (af.first); + + if (ap->stub ()) + continue; + + const version& av (ap->version); + + // Skip if the available package version doesn't satisfy all the + // constraints (note: must be checked first since has a byproduct). + // + if (!satisfactory (av)) + continue; + + // Don't offer to replace to the same version. + // + if (av == ver) + continue; + + // Don't repeatedly offer the same adjustments for the same command + // line. + // + if (cmdline_adjs.tried_earlier (db, nm, av)) + { + l5 ([&]{trace << "replacement " << package_version_key (db, nm, av) + << " tried earlier for same command line, skipping";}); + + continue; + } + + // If we aim to upgrade to the latest version and it tends to be less + // then the selected one, then what we currently have is the best that + // we can get. Thus, we use the selected version as a replacement, + // unless it doesn't satisfy all the constraints or we are deorphaning. + // + if (constraint == nullptr && sp != nullptr) + { + const version& sv (sp->version); + if (av < sv && !sp->system () && !p.deorphan) + { + // Only consider the selected package if its version is satisfactory + // for its new dependents (note: must be checked first since has a + // byproduct), differs from the version being replaced, and was + // never used for the same command line (see above for details). + // + if (satisfactory (sv) && sv != ver) + { + if (!cmdline_adjs.tried_earlier (db, nm, sv)) + { + ra = make_available_fragment (o, db, sp); + break; + } + else + l5 ([&]{trace << "selected package replacement " + << package_version_key (db, nm, sp->version) + << " tried earlier for same command line, " + << "skipping";}); + } + } + } + + if (orphan_best_match) + { + if (av == *dov) + { + ra = move (af); + break; + } + + if (deorphan_latest_iteration.first == nullptr && + av.compare (*dov, false /* revision */, true /* iteration */) == 0) + deorphan_latest_iteration = af; + + if (deorphan_later_revision.first == nullptr && + av.compare (*dov, true /* revision */) == 0 && + av.compare (*dov, false /* revision */, true /* iteration */) > 0) + deorphan_later_revision = af; + + if (deorphan_later_patch.first == nullptr && + dopc && satisfies (av, *dopc) && + av.compare (*dov, true /* revision */) > 0) // Patch is greater? + deorphan_later_patch = af; + + if (deorphan_later_minor.first == nullptr && + domc && satisfies (av, *domc) && + av.compare (*dov, true /* revision */) > 0 && + deorphan_later_patch.first == nullptr) + deorphan_later_minor = af; + + if (deorphan_latest_available.first == nullptr) + deorphan_latest_available = move (af); + + if (av.compare (*dov, false /* revision */, true /* iteration */) < 0) + { + assert (deorphan_latest_iteration.first != nullptr || + deorphan_later_revision.first != nullptr || + deorphan_later_patch.first != nullptr || + deorphan_later_minor.first != nullptr || + deorphan_latest_available.first != nullptr); + + break; + } + } + else + { + ra = move (af); + break; + } + } + + shared_ptr<available_package>& rap (ra.first); + + if (rap == nullptr && orphan_best_match) + { + if (deorphan_latest_iteration.first != nullptr) + ra = move (deorphan_latest_iteration); + else if (deorphan_later_revision.first != nullptr) + ra = move (deorphan_later_revision); + else if (deorphan_later_patch.first != nullptr) + ra = move (deorphan_later_patch); + else if (deorphan_later_minor.first != nullptr) + ra = move (deorphan_later_minor); + else if (deorphan_latest_available.first != nullptr) + ra = move (deorphan_latest_available); + } + + t.commit (); + + // Bail out if no appropriate replacement is found and return the + // command line adjustment object otherwise. + // + if (rap == nullptr) + return nullopt; + + optional<cmdline_adjustment> r; + + lazy_shared_ptr<repository_fragment>& raf (ra.second); + + if (hold_pkg != nullptr || dep_pkg != nullptr) // Specified on command line? + { + if (hold_pkg != nullptr) + { + r = cmdline_adjustment (hold_pkg->db, + hold_pkg->name (), + move (rap), + move (raf)); + + if (constraint != nullptr) + { + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " + << r->version << " by overwriting constraint " + << cmdline_adjs.to_string (*r) << " on command line";}); + } + else + { + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " + << r->version << " by adding constraint " + << cmdline_adjs.to_string (*r) << " on command line";}); + } + } + else // dep_pkg != nullptr + { + r = cmdline_adjustment (*dep_pkg->db, dep_pkg->name, rap->version); + + if (constraint != nullptr) + { + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " + << r->version << " by overwriting constraint " + << cmdline_adjs.to_string (*r) << " on command line";}); + } + else + { + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " + << r->version << " by adding constraint " + << cmdline_adjs.to_string (*r) << " on command line";}); + } + } + } + else // The package is not specified on the command line. + { + // If the package is configured as system, then since it is not + // specified by the user (both hold_pkg and dep_pkg are NULL) we may + // only build it as system. Thus we wouldn't be here (see above). + // + assert (sp == nullptr || !sp->system ()); + + // Similar to the collect lambda in collect_build_prerequisites(), issue + // the warning if we are forcing an up/down-grade. + // + if (sp != nullptr && (sp->hold_package || verb >= 2)) + { + const version& av (rap->version); + const version& sv (sp->version); + + int ud (sv.compare (av)); + + if (ud != 0) + { + for (const auto& c: p.constraints) + { + if (c.dependent.version && !satisfies (sv, c.value)) + { + warn << "package " << c.dependent << " dependency on (" + << nm << ' ' << c.value << ") is forcing " + << (ud < 0 ? "up" : "down") << "grade of " << *sp << db + << " to " << av; + + break; + } + } + } + } + + // For the selected built-to-hold package create the build-to-hold + // package spec and the dependency spec otherwise. + // + if (sp != nullptr && sp->hold_package) + { + r = cmdline_adjustment (db, + nm, + move (rap), + move (raf), + p.upgrade, + p.deorphan); + + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " << r->version + << " by adding package spec " + << cmdline_adjs.to_string (*r) + << " to command line";}); + } + else + { + r = cmdline_adjustment (db, nm, rap->version, p.upgrade, p.deorphan); + + l5 ([&]{trace << "replace " << what << " version " + << p.available_name_version () << " with " << r->version + << " by adding package spec " + << cmdline_adjs.to_string (*r) + << " to command line";}); + } + } + + return r; + } + + // Try to replace some of the being built, potentially indirect, dependents + // of the specified dependency with a different available version, + // satisfactory for all its new and existing dependents (if any). Return the + // command line adjustment if such a replacement is deduced and nullopt + // otherwise. It is assumed that the dependency replacement has been + // (unsuccessfully) tried by using the try_replace_dependency() call and its + // resulting list of the dependents, unsatisfied by some of the dependency + // available versions, is also passed to the function call as the + // unsatisfied_dpts argument. + // + // Specifically, try to replace the dependents in the following order by + // calling try_replace_dependency() for them: + // + // - Immediate dependents unsatisfied with the specified dependency. For the + // sake of tracing and documentation, we (naturally) call them unsatisfied + // dependents. + // + // - Immediate dependents satisfied with the dependency but applying the + // version constraint which has prevented us from picking a version which + // would be satisfactory to the unsatisfied dependents. Note that this + // information is only available for the being built unsatisfied + // dependents (added by collect_build() rather than collect_dependents()). + // We call them conflicting dependents. + // + // - Immediate dependents which apply constraint to this dependency, + // incompatible with constraints of some other dependents (both new and + // existing). We call them unsatisfiable dependents. + // + // - Immediate dependents from unsatisfied_dpts argument. We call them + // constraining dependents. + // + // - Dependents of all the above types of dependents, discovered by + // recursively calling try_replace_dependent() for them. + // + optional<cmdline_adjustment> + try_replace_dependent (const common_options& o, + const build_package& p, // Dependency. + const vector<unsatisfied_constraint>* ucs, + const build_packages& pkgs, + const cmdline_adjustments& cmdline_adjs, + const vector<package_key>& unsatisfied_dpts, + vector<build_package>& hold_pkgs, + dependency_packages& dep_pkgs, + set<const build_package*>& visited_dpts) + { + tracer trace ("try_replace_dependent"); + + // Bail out if the dependent has already been visited and add it to the + // visited set otherwise. + // + if (!visited_dpts.insert (&p).second) + return nullopt; + + using constraint_type = build_package::constraint_type; + + const shared_ptr<available_package>& ap (p.available); + assert (ap != nullptr); // By definition. + + const version& av (ap->version); + + // List of the dependents which we have (unsuccessfully) tried to replace + // together with the lists of the constraining dependents. + // + vector<pair<package_key, vector<package_key>>> dpts; + + // Try to replace a dependent, unless we have already tried to replace it. + // + auto try_replace = [&o, + &p, + &pkgs, + &cmdline_adjs, + &hold_pkgs, + &dep_pkgs, + &visited_dpts, + &dpts, + &trace] (package_key dk, const char* what) + -> optional<cmdline_adjustment> + { + if (find_if (dpts.begin (), dpts.end (), + [&dk] (const auto& v) {return v.first == dk;}) == + dpts.end ()) + { + const build_package* d (pkgs.entered_build (dk)); + + // Always come from the dependency's constraints member. + // + assert (d != nullptr); + + // Skip the visited dependents since, by definition, we have already + // tried to replace them. + // + if (find (visited_dpts.begin (), visited_dpts.end (), d) == + visited_dpts.end ()) + { + l5 ([&]{trace << "try to replace " << what << ' ' + << d->available_name_version_db () << " of dependency " + << p.available_name_version_db () << " with some " + << "other version";}); + + vector<package_key> uds; + + if (optional<cmdline_adjustment> a = try_replace_dependency ( + o, + *d, + pkgs, + hold_pkgs, + dep_pkgs, + cmdline_adjs, + uds, + what)) + { + return a; + } + + dpts.emplace_back (move (dk), move (uds)); + } + } + + return nullopt; + }; + + // Try to replace unsatisfied dependents. + // + for (const constraint_type& c: p.constraints) + { + const package_version_key& dvk (c.dependent); + + if (dvk.version && !c.selected_dependent && !satisfies (av, c.value)) + { + if (optional<cmdline_adjustment> a = try_replace ( + package_key (dvk.db, dvk.name), "unsatisfied dependent")) + { + return a; + } + } + } + + // Try to replace conflicting dependents. + // + if (ucs != nullptr) + { + for (const unsatisfied_constraint& uc: *ucs) + { + const package_version_key& dvk (uc.constraint.dependent); + + if (dvk.version) + { + if (optional<cmdline_adjustment> a = try_replace ( + package_key (dvk.db, dvk.name), "conflicting dependent")) + { + return a; + } + } + } + } + + // Try to replace unsatisfiable dependents. + // + for (const constraint_type& c1: p.constraints) + { + const package_version_key& dvk (c1.dependent); + + if (dvk.version && !c1.selected_dependent) + { + const version_constraint& v1 (c1.value); + + bool unsatisfiable (false); + for (const constraint_type& c2: p.constraints) + { + const version_constraint& v2 (c2.value); + + if (!satisfies (v1, v2) && !satisfies (v2, v1)) + { + unsatisfiable = true; + break; + } + } + + if (unsatisfiable) + { + if (optional<cmdline_adjustment> a = try_replace ( + package_key (dvk.db, dvk.name), "unsatisfiable dependent")) + { + return a; + } + } + } + } + + // Try to replace constraining dependents. + // + for (const auto& dk: unsatisfied_dpts) + { + if (optional<cmdline_adjustment> a = try_replace ( + dk, "constraining dependent")) + { + return a; + } + } + + // Try to replace dependents of the above dependents, recursively. + // + for (const auto& dep: dpts) + { + const build_package* d (pkgs.entered_build (dep.first)); + + assert (d != nullptr); + + if (optional<cmdline_adjustment> a = try_replace_dependent ( + o, + *d, + nullptr /* unsatisfied_constraints */, + pkgs, + cmdline_adjs, + dep.second, + hold_pkgs, + dep_pkgs, + visited_dpts)) + { + return a; + } + } + + return nullopt; + } + + // Return false if the plan execution was noop. If unsatisfied dependents + // are specified then we are in the simulation mode. // static bool execute_plan (const pkg_build_options&, build_package_list&, - bool simulate, + unsatisfied_dependents* simulate, const function<find_database_function>&); using pkg_options = pkg_build_pkg_options; @@ -917,13 +2938,14 @@ namespace bpkg dr << fail << "both --immediate|-i and --recursive|-r specified"; // The --immediate or --recursive option can only be specified with an - // explicit --upgrade or --patch. + // explicit --upgrade, --patch, or --deorphan. // if (const char* n = (o.immediate () ? "--immediate" : o.recursive () ? "--recursive" : nullptr)) { - if (!o.upgrade () && !o.patch ()) - dr << fail << n << " requires explicit --upgrade|-u or --patch|-p"; + if (!o.upgrade () && !o.patch () && !o.deorphan ()) + dr << fail << n << " requires explicit --upgrade|-u, --patch|-p, or " + << "--deorphan"; } if (((o.upgrade_immediate () ? 1 : 0) + @@ -933,6 +2955,10 @@ namespace bpkg dr << fail << "multiple --(upgrade|patch)-(immediate|recursive) " << "specified"; + if (o.deorphan_immediate () && o.deorphan_recursive ()) + dr << fail << "both --deorphan-immediate and --deorphan-recursive " + << "specified"; + if (multi_config ()) { if (const char* opt = o.config_name_specified () ? "--config-name" : @@ -959,13 +2985,16 @@ namespace bpkg dst.recursive (src.recursive ()); // If -r|-i was specified at the package level, then so should - // -u|-p. + // -u|-p and --deorphan. // if (!(dst.upgrade () || dst.patch ())) { dst.upgrade (src.upgrade ()); dst.patch (src.patch ()); } + + if (!dst.deorphan ()) + dst.deorphan (src.deorphan ()); } if (!(dst.upgrade_immediate () || dst.upgrade_recursive () || @@ -977,6 +3006,12 @@ namespace bpkg dst.patch_recursive (src.patch_recursive ()); } + if (!(dst.deorphan_immediate () || dst.deorphan_recursive ())) + { + dst.deorphan_immediate (src.deorphan_immediate ()); + dst.deorphan_recursive (src.deorphan_recursive ()); + } + dst.dependency (src.dependency () || dst.dependency ()); dst.keep_out (src.keep_out () || dst.keep_out ()); dst.disfigure (src.disfigure () || dst.disfigure ()); @@ -1020,19 +3055,22 @@ namespace bpkg static bool compare_options (const pkg_options& x, const pkg_options& y) { - return x.keep_out () == y.keep_out () && - x.disfigure () == y.disfigure () && - x.dependency () == y.dependency () && - x.upgrade () == y.upgrade () && - x.patch () == y.patch () && - x.immediate () == y.immediate () && - x.recursive () == y.recursive () && - x.upgrade_immediate () == y.upgrade_immediate () && - x.upgrade_recursive () == y.upgrade_recursive () && - x.patch_immediate () == y.patch_immediate () && - x.patch_recursive () == y.patch_recursive () && - x.checkout_root () == y.checkout_root () && - x.checkout_purge () == y.checkout_purge (); + return x.keep_out () == y.keep_out () && + x.disfigure () == y.disfigure () && + x.dependency () == y.dependency () && + x.upgrade () == y.upgrade () && + x.patch () == y.patch () && + x.deorphan () == y.deorphan () && + x.immediate () == y.immediate () && + x.recursive () == y.recursive () && + x.upgrade_immediate () == y.upgrade_immediate () && + x.upgrade_recursive () == y.upgrade_recursive () && + x.patch_immediate () == y.patch_immediate () && + x.patch_recursive () == y.patch_recursive () && + x.deorphan_immediate () == y.deorphan_immediate () && + x.deorphan_recursive () == y.deorphan_recursive () && + x.checkout_root () == y.checkout_root () && + x.checkout_purge () == y.checkout_purge (); } int @@ -1070,7 +3108,11 @@ namespace bpkg << "specified" << info << "run 'bpkg help pkg-build' for more information"; - if (!args.more () && !o.upgrade () && !o.patch ()) + if (o.sys_no_query () && o.sys_install ()) + fail << "both --sys-no-query and --sys-install specified" << + info << "run 'bpkg help pkg-build' for more information"; + + if (!args.more () && !o.upgrade () && !o.patch () && !o.deorphan ()) fail << "package name argument expected" << info << "run 'bpkg help pkg-build' for more information"; @@ -1093,6 +3135,10 @@ namespace bpkg ? empty_string : '[' + config_dirs[0].representation () + ']')); + // Command line as a dependent. + // + package_version_key cmd_line (mdb, "command line"); + current_configs.push_back (mdb); if (config_dirs.size () != 1) @@ -1118,6 +3164,8 @@ namespace bpkg if (!current (db)) current_configs.push_back (db); } + + t.commit (); } validate_options (o, ""); // Global package options. @@ -1128,7 +3176,7 @@ namespace bpkg // will modify the cached instance, which means our list will always "see" // their updated state. // - // Also note that rep_fetch() must be called in session. + // Also note that rep_fetch() and pkg_fetch() must be called in session. // session ses; @@ -1239,7 +3287,13 @@ namespace bpkg if (a.find ('=') == string::npos) fail << "unexpected group argument '" << a << "'"; - cvs.push_back (move (trim (a))); + trim (a); + + if (a[0] == '!') + fail << "global override in package-specific configuration " + << "variable '" << a << "'"; + + cvs.push_back (move (a)); } } @@ -1515,6 +3569,14 @@ namespace bpkg string () /* reason for "fetching ..." */); } + // Now, as repo_configs is filled and the repositories are fetched mask + // the repositories, if any. + // + if (o.mask_repository_specified () || o.mask_repository_uuid_specified ()) + rep_mask (o.mask_repository (), + o.mask_repository_uuid (), + current_configs); + // Expand the package specs into individual package args, parsing them // into the package scheme, name, and version constraint components, and // also saving associated options and configuration variables. @@ -1536,6 +3598,12 @@ namespace bpkg string value; pkg_options options; strings config_vars; + + // If schema is sys then this member indicates whether the constraint + // came from the system package manager (not NULL) or user/fallback + // (NULL). + // + const system_package_status* system_status; }; auto arg_parsed = [] (const pkg_arg& a) {return !a.name.empty ();}; @@ -1604,16 +3672,19 @@ namespace bpkg const pkg_options& o (a.options); - add_bool ("--keep-out", o.keep_out ()); - add_bool ("--disfigure", o.disfigure ()); - add_bool ("--upgrade", o.upgrade ()); - add_bool ("--patch", o.patch ()); - add_bool ("--immediate", o.immediate ()); - add_bool ("--recursive", o.recursive ()); - add_bool ("--upgrade-immediate", o.upgrade_immediate ()); - add_bool ("--upgrade-recursive", o.upgrade_recursive ()); - add_bool ("--patch-immediate", o.patch_immediate ()); - add_bool ("--patch-recursive", o.patch_recursive ()); + add_bool ("--keep-out", o.keep_out ()); + add_bool ("--disfigure", o.disfigure ()); + add_bool ("--upgrade", o.upgrade ()); + add_bool ("--patch", o.patch ()); + add_bool ("--deorphan", o.deorphan ()); + add_bool ("--immediate", o.immediate ()); + add_bool ("--recursive", o.recursive ()); + add_bool ("--upgrade-immediate", o.upgrade_immediate ()); + add_bool ("--upgrade-recursive", o.upgrade_recursive ()); + add_bool ("--patch-immediate", o.patch_immediate ()); + add_bool ("--patch-recursive", o.patch_recursive ()); + add_bool ("--deorphan-immediate", o.deorphan_immediate ()); + add_bool ("--deorphan-recursive", o.deorphan_recursive ()); if (o.checkout_root_specified ()) add_string ("--checkout-root", o.checkout_root ().string ()); @@ -1651,23 +3722,141 @@ namespace bpkg return r; }; - // Add the system package authoritative information to the database's - // system repository, unless it already contains authoritative information - // for this package. + // Figure out the system package version unless explicitly specified and + // add the system package authoritative information to the database's + // system repository unless the database is NULL or it already contains + // authoritative information for this package. Return the figured out + // system package version as constraint. // // Note that it is assumed that all the possible duplicates are handled // elsewhere/later. // - auto add_system_package = [] (database& db, - const package_name& nm, - const version& v) + auto add_system_package = [&o] (database* db, + const package_name& nm, + optional<version_constraint> vc, + const system_package_status* sps, + vector<shared_ptr<available_package>>* stubs) + -> pair<version_constraint, const system_package_status*> { - assert (db.system_repository); + if (!vc) + { + assert (sps == nullptr); + + // See if we should query the system package manager. + // + if (!sys_pkg_mgr) + sys_pkg_mgr = o.sys_no_query () + ? nullptr + : make_consumption_system_package_manager (o, + host_triplet, + o.sys_distribution (), + o.sys_architecture (), + o.sys_install (), + !o.sys_no_fetch (), + o.sys_yes (), + o.sys_sudo ()); + if (*sys_pkg_mgr != nullptr) + { + system_package_manager& spm (**sys_pkg_mgr); + + // First check the cache. + // + optional<const system_package_status*> os (spm.status (nm, nullptr)); + + available_packages aps; + if (!os) + { + // If no cache hit, then collect the available packages for the + // mapping information. + // + aps = find_available_all (current_configs, nm); + + // If no source/stub for the package (and thus no mapping), issue + // diagnostics consistent with other such places unless explicitly + // allowed by the user. + // + if (aps.empty ()) + { + if (!o.sys_no_stub ()) + fail << "unknown package " << nm << + info << "consider specifying --sys-no-stub or " << nm << "/*"; + + // Add the stub package to the imaginary system repository (like + // the user-specified case below). + // + if (stubs != nullptr) + stubs->push_back (make_shared<available_package> (nm)); + } + } + + // This covers both our diagnostics below as well as anything that + // might be issued by status(). + // + auto df = make_diag_frame ( + [&nm] (diag_record& dr) + { + dr << info << "specify " << nm << "/* if package is not " + << "installed with system package manager"; + + dr << info << "specify --sys-no-query to disable system " + << "package manager interactions"; + }); + + if (!os) + { + os = spm.status (nm, &aps); + assert (os); + } + + if ((sps = *os) != nullptr) + vc = version_constraint (sps->version); + else + { + diag_record dr (fail); - const system_package* sp (db.system_repository->find (nm)); + dr << "no installed " << (o.sys_install () ? "or available " : "") + << "system package for " << nm; + + if (!o.sys_install ()) + dr << info << "specify --sys-install to try to install it"; + } + } + else + vc = version_constraint (wildcard_version); + } + else + { + // The system package may only have an exact/wildcard version + // specified. + // + assert (vc->min_version == vc->max_version); + + // For system packages not associated with a specific repository + // location add the stub package to the imaginary system repository + // (see below for details). + // + if (stubs != nullptr) + stubs->push_back (make_shared<available_package> (nm)); + } + + if (db != nullptr) + { + assert (db->system_repository); + + const system_package* sp (db->system_repository->find (nm)); + + // Note that we don't check for the version match here since that's + // handled by check_dup() lambda at a later stage, which covers both + // db and no-db cases consistently. + // + if (sp == nullptr || !sp->authoritative) + db->system_repository->insert (nm, + *vc->min_version, + true /* authoritative */, + sps); + } - if (sp == nullptr || !sp->authoritative) - db.system_repository->insert (nm, v, true /* authoritative */); + return make_pair (move (*vc), sps); }; // Create the parsed package argument. Issue diagnostics and fail if the @@ -1679,15 +3868,23 @@ namespace bpkg package_name nm, optional<version_constraint> vc, pkg_options os, - strings vs) -> pkg_arg + strings vs, + vector<shared_ptr<available_package>>* stubs = nullptr) + -> pkg_arg { assert (!vc || !vc->empty ()); // May not be empty if present. if (db == nullptr) assert (sc == package_scheme::sys && os.dependency ()); - pkg_arg r { - db, sc, move (nm), move (vc), string (), move (os), move (vs)}; + pkg_arg r {db, + sc, + move (nm), + move (vc), + string () /* value */, + move (os), + move (vs), + nullptr /* system_status */}; // Verify that the package database is specified in the multi-config // mode, unless this is a system dependency package. @@ -1706,17 +3903,16 @@ namespace bpkg { case package_scheme::sys: { - if (!r.constraint) - r.constraint = version_constraint (wildcard_version); + assert (stubs != nullptr); - // The system package may only have an exact/wildcard version - // specified. - // - assert (r.constraint->min_version == r.constraint->max_version); - - if (db != nullptr) - add_system_package (*db, r.name, *r.constraint->min_version); + auto sp (add_system_package (db, + r.name, + move (r.constraint), + nullptr /* system_package_status */, + stubs)); + r.constraint = move (sp.first); + r.system_status = sp.second; break; } case package_scheme::none: break; // Nothing to do. @@ -1738,7 +3934,8 @@ namespace bpkg nullopt /* constraint */, move (v), move (os), - move (vs)}; + move (vs), + nullptr /* system_status */}; }; vector<pkg_arg> pkg_args; @@ -1801,13 +3998,6 @@ namespace bpkg parse_package_version_constraint ( s, sys, version_flags (sc), version_only (sc))); - // For system packages not associated with a specific repository - // location add the stub package to the imaginary system - // repository (see above for details). - // - if (sys && vc) - stubs.push_back (make_shared<available_package> (n)); - pkg_options& o (ps.options); // Disregard the (main) database for a system dependency with @@ -1824,7 +4014,8 @@ namespace bpkg move (n), move (vc), move (o), - move (ps.config_vars))); + move (ps.config_vars), + &stubs)); } else // Add unparsed. pkg_args.push_back (arg_raw (ps.db, @@ -2058,7 +4249,8 @@ namespace bpkg move (n), move (vc), ps.options, - ps.config_vars)); + ps.config_vars, + &stubs)); } } } @@ -2080,6 +4272,32 @@ namespace bpkg dependency_packages dep_pkgs; recursive_packages rec_pkgs; + // Note that the command line adjustments which resolve the unsatisfied + // dependent issue (see unsatisfied_dependents for details) may + // potentially be sub-optimal, since we do not perform the full + // backtracking by trying all the possible adjustments and picking the + // most optimal combination. Instead, we keep collecting adjustments until + // either the package builds collection succeeds or there are no more + // adjustment combinations to try (and we don't try all of them). As a + // result we, for example, may end up with some redundant constraints on + // the command line just because the respective dependents have been + // evaluated first. Generally, dropping all the redundant adjustments can + // potentially be quite time-consuming, since we would need to try + // dropping all their possible combinations. We, however, will implement + // the refinement for only the common case (adjustments are independent), + // trying to drop just one adjustment per the refinement cycle iteration + // and wait and see how it goes. + // + cmdline_adjustments cmdline_adjs (hold_pkgs, dep_pkgs); + + // If both are present, then we are in the command line adjustments + // refinement cycle, where cmdline_refine_adjustment is the adjustment + // being currently dropped and cmdline_refine_index is its index on the + // stack (as it appears at the beginning of the cycle). + // + optional<cmdline_adjustment> cmdline_refine_adjustment; + optional<size_t> cmdline_refine_index; + { // Check if the package is a duplicate. Return true if it is but // harmless. @@ -2131,7 +4349,7 @@ namespace bpkg !compare_options (a.options, pa.options) || a.config_vars != pa.config_vars)) fail << "duplicate package " << pa.name << - info << "first mentioned as " << arg_string (r.first->second) << + info << "first mentioned as " << arg_string (a) << info << "second mentioned as " << arg_string (pa); return !r.second; @@ -2139,6 +4357,120 @@ namespace bpkg transaction t (mdb); + // Return the available package that matches the specified orphan best + // (see evaluate_dependency() description for details). Also return the + // repository fragment the package comes from. Return a pair of NULLs if + // no suitable package has been found. + // + auto find_orphan_match = + [] (const shared_ptr<selected_package>& sp, + const lazy_shared_ptr<repository_fragment>& root) + { + using available = pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>>; + + assert (sp != nullptr); + + const package_name& n (sp->name); + const version& v (sp->version); + optional<version_constraint> vc {version_constraint (v)}; + + // Note that non-zero iteration makes a version non-standard, so we + // reset it to 0 to produce the patch/minor constraints. + // + version vr (v.epoch, + v.upstream, + v.release, + v.revision, + 0 /* iteration */); + + optional<version_constraint> pc ( + patch_constraint (n, vr, true /* quiet */)); + + optional<version_constraint> mc ( + minor_constraint (n, vr, true /* quiet */)); + + // Note: explicit revision makes query_available() to always consider + // revisions (but not iterations) regardless of the revision argument + // value. + // + optional<version_constraint> verc { + version_constraint (version (v.epoch, + v.upstream, + v.release, + v.revision ? v.revision : 0, + 0 /* iteration */))}; + + optional<version_constraint> vlc { + version_constraint (version (v.epoch, + v.upstream, + v.release, + nullopt, + 0 /* iteration */))}; + + // Find the latest available non-stub package, optionally matching a + // constraint and considering revision. If a package is found, then + // cache it together with the repository fragment it comes from and + // return true. + // + available find_result; + const version* find_version (nullptr); + auto find = [&n, + &root, + &find_result, + &find_version] (const optional<version_constraint>& c, + bool revision = false) -> bool + { + available r ( + find_available_one (n, c, root, false /* prereq */, revision)); + + const shared_ptr<available_package>& ap (r.first); + + if (ap != nullptr && !ap->stub ()) + { + find_result = move (r); + find_version = &find_result.first->version; + return true; + } + else + return false; + }; + + if (// Same version, revision, and iteration. + // + find (vc, true) || + // + // Latest iteration of same version and revision. + // + find (verc) || + // + // Later revision of same version. + // + (find (vlc) && + find_version->compare (v, + false /* revision */, + true /* iteration */) > 0) || + // + // Later patch of same version. + // + (pc && find (pc) && + find_version->compare (v, true /* revision */) > 0) || + // + // Later minor of same version. + // + (mc && find (mc) && + find_version->compare (v, true /* revision */) > 0) || + // + // Latest available version, including earlier. + // + find (nullopt)) + { + return find_result; + } + + return available (); + }; + // Here is what happens here: for unparsed package args we are going to // try and guess whether we are dealing with a package archive, package // directory, or package name/version by first trying it as an archive, @@ -2163,6 +4495,7 @@ namespace bpkg // lazy_shared_ptr<repository_fragment> af; shared_ptr<available_package> ap; + bool existing (false); // True if build as an archive or directory. if (!arg_parsed (pa)) { @@ -2188,20 +4521,13 @@ namespace bpkg false /* ignore_toolchain */, false /* expand_values */, true /* load_buildfiles */, - true /* complete_depends */, + true /* complete_values */, diag ? 2 : 1)); // This is a package archive. // l4 ([&]{trace << "archive '" << a << "': " << arg_string (pa);}); - // Supporting this would complicate things a bit, but we may add - // support for it one day. - // - if (pa.options.dependency ()) - fail << "package archive '" << a - << "' may not be built as a dependency"; - pa = arg_package (pdb, package_scheme::none, m.name, @@ -2212,6 +4538,9 @@ namespace bpkg af = root; ap = make_shared<available_package> (move (m)); ap->locations.push_back (package_location {root, move (a)}); + + existing_packages.push_back (make_pair (ref (*pdb), ap)); + existing = true; } } catch (const invalid_path&) @@ -2256,7 +4585,11 @@ namespace bpkg true /* load_buildfiles */, [&o, &d, &pvi] (version& v) { - pvi = package_version (o, d); + // Note that we also query subprojects since the package + // information will be used for the subsequent + // package_iteration() call. + // + pvi = package_version (o, d, b_info_flags::subprojects); if (pvi.version) v = move (*pvi.version); @@ -2268,13 +4601,6 @@ namespace bpkg l4 ([&]{trace << "directory '" << d << "': " << arg_string (pa);}); - // Supporting this would complicate things a bit, but we may - // add support for it one day. - // - if (pa.options.dependency ()) - fail << "package directory '" << d - << "' may not be built as a dependency"; - // Fix-up the package version to properly decide if we need to // upgrade/downgrade the package. // @@ -2299,6 +4625,9 @@ namespace bpkg ap = make_shared<available_package> (move (m)); af = root; ap->locations.push_back (package_location {root, move (d)}); + + existing_packages.push_back (make_pair (ref (*pdb), ap)); + existing = true; } } catch (const invalid_path&) @@ -2320,6 +4649,7 @@ namespace bpkg // shared_ptr<selected_package> sp; bool patch (false); + bool deorphan (false); if (ap == nullptr) { @@ -2359,12 +4689,13 @@ namespace bpkg lazy_shared_ptr<repository_fragment> root (*pdb, empty_string); - // Either get the user-specified version or the latest allowed - // for a source code package. For a system package we will try - // to find the available package that matches the user-specified - // system version (preferable for the configuration negotiation - // machinery) and, if fail, fallback to picking the latest one - // just to make sure the package is recognized. + // Get the user-specified version, the latest allowed version, + // or the orphan best match for a source code package. For a + // system package we will try to find the available package that + // matches the user-specified system version (preferable for the + // configuration negotiation machinery) and, if fail, fallback + // to picking the latest one just to make sure the package is + // recognized. // optional<version_constraint> c; @@ -2394,10 +4725,48 @@ namespace bpkg else if (!sys || !wildcard (*pa.constraint)) c = pa.constraint; - auto rp (find_available_one (pa.name, c, root)); + if (pa.options.deorphan ()) + { + if (!sys) + { + if (sp == nullptr) + sp = pdb->find<selected_package> (pa.name); - if (rp.first == nullptr && sys && c) - rp = find_available_one (pa.name, nullopt, root); + if (sp != nullptr && orphan_package (*pdb, sp)) + deorphan = true; + } + + // If the package is not an orphan, its version is not + // constrained and upgrade/patch is not requested, then just + // skip the package. + // + if (!deorphan && + !pa.constraint && + !pa.options.upgrade () && + !pa.options.patch ()) + { + ++i; + continue; + } + } + + pair<shared_ptr<available_package>, + lazy_shared_ptr<repository_fragment>> rp ( + deorphan && + !pa.constraint && + !pa.options.upgrade () && + !pa.options.patch () + ? find_orphan_match (sp, root) + : find_available_one (pa.name, c, root)); + + if (rp.first == nullptr && sys) + { + available_packages aps ( + find_available_all (repo_configs, pa.name)); + + if (!aps.empty ()) + rp = move (aps.front ()); + } ap = move (rp.first); af = move (rp.second); @@ -2417,22 +4786,55 @@ namespace bpkg continue; // Save (both packages to hold and dependencies) as dependents for - // recursive upgrade. + // recursive upgrade/deorphaning. // { - optional<bool> u; - optional<bool> r; + // Recursive/immediate upgrade/patch. + // + optional<bool> r; // true -- recursive, false -- immediate. + optional<bool> u; // true -- upgrade, false -- patch. + + // Recursive/immediate deorphaning. + // + optional<bool> d; // true -- recursive, false -- immediate. const auto& po (pa.options); - if (po.upgrade_immediate ()) { u = true; r = false; } - else if (po.upgrade_recursive ()) { u = true; r = true; } - else if ( po.patch_immediate ()) { u = false; r = false; } - else if ( po.patch_recursive ()) { u = false; r = true; } - else if ( po.immediate ()) { u = po.upgrade (); r = false; } - else if ( po.recursive ()) { u = po.upgrade (); r = true; } + // Note that, for example, --upgrade-immediate wins over the + // --upgrade --recursive options pair. + // + if (po.immediate ()) + { + if (po.upgrade () || po.patch ()) + { + r = false; + u = po.upgrade (); + } - if (r) + if (po.deorphan ()) + d = false; + } + else if (po.recursive ()) + { + if (po.upgrade () || po.patch ()) + { + r = true; + u = po.upgrade (); + } + + if (po.deorphan ()) + d = true; + } + + if (po.upgrade_immediate ()) { u = true; r = false; } + else if (po.upgrade_recursive ()) { u = true; r = true; } + else if ( po.patch_immediate ()) { u = false; r = false; } + else if ( po.patch_recursive ()) { u = false; r = true; } + + if (po.deorphan_immediate ()) { d = false; } + else if (po.deorphan_recursive ()) { d = true; } + + if (r || d) { l4 ([&]{trace << "stash recursive package " << arg_string (pa);}); @@ -2441,7 +4843,9 @@ namespace bpkg // configuration. // if (pdb != nullptr) - rec_pkgs.push_back (recursive_package {*pdb, pa.name, *u, *r}); + rec_pkgs.push_back (recursive_package {*pdb, pa.name, + r, u && *u, + d}); } } @@ -2453,52 +4857,70 @@ namespace bpkg bool sys (arg_sys (pa)); - // Make sure that the package is known. - // - auto apr (find_available (repo_configs, - pa.name, - !sys ? pa.constraint : nullopt)); + if (pdb != nullptr) + sp = pdb->find<selected_package> (pa.name); - if (apr.empty ()) + // Make sure that the package is known. Only allow to unhold an + // unknown orphaned selected package (with the view that there is + // a good chance it will get dropped; and if not, such an unhold + // should be harmless). + // + if (!existing && + find_available (repo_configs, + pa.name, + !sys ? pa.constraint : nullopt).empty ()) { - string n (arg_string (pa, false /* options */)); - - diag_record dr (fail); - dr << "unknown package " << n; - if (sys) + // Don't fail if the selected package is held and satisfies the + // constraints, if specified. Note that we may still fail later + // with the "not available from its dependents' repositories" + // error if the dependency is requested to be deorphaned and all + // its dependents are orphaned. + // + if (!(sp != nullptr && + sp->hold_package && + (!pa.constraint || satisfies (sp->version, pa.constraint)))) { - // Feels like we can't end up here if the version was specified - // explicitly. - // - dr << info << "consider specifying " << n << "/*"; + string n (arg_string (pa, false /* options */)); + + diag_record dr (fail); + dr << "unknown package " << n; + if (sys) + { + // Feels like we can't end up here if the version was specified + // explicitly. + // + dr << info << "consider specifying " << n << "/*"; + } + else + check_any_available (repo_configs, t, &dr); } - else - check_any_available (repo_configs, t, &dr); } if (pdb != nullptr) - { - // Save before the name move. - // - sp = pdb->find<selected_package> (pa.name); - pkg_confs.emplace_back (*pdb, pa.name); - } + + bool hold_version (pa.constraint.has_value ()); dep_pkgs.push_back ( dependency_package {pdb, move (pa.name), move (pa.constraint), + hold_version, move (sp), sys, - pa.options.patch (), + existing, + (pa.options.upgrade () || pa.options.patch () + ? pa.options.upgrade () + : optional<bool> ()), + pa.options.deorphan (), pa.options.keep_out (), pa.options.disfigure (), (pa.options.checkout_root_specified () ? move (pa.options.checkout_root ()) : optional<dir_path> ()), pa.options.checkout_purge (), - move (pa.config_vars)}); + move (pa.config_vars), + pa.system_status}); continue; } @@ -2558,17 +4980,18 @@ namespace bpkg // if (pa.constraint) { - for (;;) + for (;;) // Breakout loop. { if (ap != nullptr) // Must be that version, see above. break; // Otherwise, our only chance is that the already selected object - // satisfies the version constraint. + // satisfies the version constraint, unless we are deorphaning. // - if (sp != nullptr && - !sp->system () && - satisfies (sp->version, pa.constraint)) + if (sp != nullptr && + !sp->system () && + satisfies (sp->version, pa.constraint) && + !deorphan) break; // Derive ap from sp below. found = false; @@ -2588,14 +5011,17 @@ namespace bpkg // we have a newer version, we treat it as an upgrade request; // otherwise, why specify the package in the first place? We just // need to check if what we already have is "better" (i.e., - // newer). + // newer), unless we are deorphaning. // - if (sp != nullptr && !sp->system () && ap->version < sp->version) + if (sp != nullptr && + !sp->system () && + ap->version < sp->version && + !deorphan) ap = nullptr; // Derive ap from sp below. } else { - if (sp == nullptr || sp->system ()) + if (sp == nullptr || sp->system () || deorphan) found = false; // Otherwise, derive ap from sp below. @@ -2672,8 +5098,16 @@ namespace bpkg bool keep_out (pa.options.keep_out () && sp != nullptr && sp->external ()); + bool replace ((existing && sp != nullptr) || deorphan); + // Finally add this package to the list. // + optional<bool> upgrade (sp != nullptr && + !pa.constraint && + (pa.options.upgrade () || pa.options.patch ()) + ? pa.options.upgrade () + : optional<bool> ()); + // @@ Pass pa.configure_only() when support for package-specific // --configure-only is added. // @@ -2700,9 +5134,11 @@ namespace bpkg : optional<dir_path> ()), pa.options.checkout_purge (), move (pa.config_vars), - {package_key {mdb, ""}}, // Required by (command line). + upgrade, + deorphan, + {cmd_line}, // Required by (command line). false, // Required by dependents. - 0}; // State flags. + replace ? build_package::build_replace : uint16_t (0)}; l4 ([&]{trace << "stash held package " << p.available_name_version_db ();}); @@ -2714,7 +5150,7 @@ namespace bpkg // if (pa.constraint) p.constraints.emplace_back ( - mdb, "command line", move (*pa.constraint)); + move (*pa.constraint), cmd_line.db, cmd_line.name.string ()); pkg_confs.emplace_back (p.db, p.name ()); @@ -2729,7 +5165,7 @@ namespace bpkg // command line option to enable this behavior. // if (hold_pkgs.empty () && dep_pkgs.empty () && - (o.upgrade () || o.patch ())) + (o.upgrade () || o.patch () || o.deorphan ())) { for (database& cdb: current_configs) { @@ -2763,7 +5199,26 @@ namespace bpkg continue; } - auto apr (find_available_one (name, pc, root)); + bool deorphan (false); + + if (o.deorphan ()) + { + // If the package is not an orphan and upgrade/patch is not + // requested, then just skip the package. + // + if (orphan_package (cdb, sp)) + deorphan = true; + else if (!o.upgrade () && !o.patch ()) + continue; + } + + // In the deorphan mode with no upgrade/patch requested pick the + // version that matches the orphan best. Otherwise, pick the patch + // or the latest available version, as requested. + // + auto apr (deorphan && !o.upgrade () && !o.patch () + ? find_orphan_match (sp, root) + : find_available_one (name, pc, root)); shared_ptr<available_package> ap (move (apr.first)); if (ap == nullptr || ap->stub ()) @@ -2771,11 +5226,13 @@ namespace bpkg diag_record dr (fail); dr << name << " is not available"; - if (ap != nullptr) + if (ap != nullptr) // Stub? + { dr << " in source" << info << "consider building it as " - << package_string (name, version (), true /* system */) - << " if it is available from the system"; + << package_string (name, version (), true /* system */) + << " if it is available from the system"; + } // Let's help the new user out here a bit. // @@ -2792,40 +5249,53 @@ namespace bpkg // build_package p { build_package::build, - cdb, - move (sp), - move (ap), - move (apr.second), - nullopt, // Dependencies. - nullopt, // Dependencies alternatives. - nullopt, // Package skeleton. - nullopt, // Postponed dependency alternatives. - false, // Recursive collection. - true, // Hold package. - false, // Hold version. - {}, // Constraints. - false, // System package. - keep_out, - o.disfigure (), - false, // Configure-only. - nullopt, // Checkout root. - false, // Checkout purge. - strings (), // Configuration variables. - {package_key {mdb, ""}}, // Required by (command line). - false, // Required by dependents. - 0}; // State flags. + cdb, + move (sp), + move (ap), + move (apr.second), + nullopt, // Dependencies. + nullopt, // Dependencies alternatives. + nullopt, // Package skeleton. + nullopt, // Postponed dependency alternatives. + false, // Recursive collection. + true, // Hold package. + false, // Hold version. + {}, // Constraints. + false, // System package. + keep_out, + o.disfigure (), + false, // Configure-only. + nullopt, // Checkout root. + false, // Checkout purge. + strings (), // Configuration variables. + (o.upgrade () || o.patch () + ? o.upgrade () + : optional<bool> ()), + deorphan, + {cmd_line}, // Required by (command line). + false, // Required by dependents. + deorphan ? build_package::build_replace : uint16_t (0)}; l4 ([&]{trace << "stash held package " << p.available_name_version_db ();}); hold_pkgs.push_back (move (p)); - // If there are also -i|-r, then we are also upgrading dependencies - // of all held packages. + // If there are also -i|-r, then we are also upgrading and/or + // deorphaning dependencies of all held packages. // if (o.immediate () || o.recursive ()) - rec_pkgs.push_back ( - recursive_package {cdb, name, o.upgrade (), o.recursive ()}); + { + rec_pkgs.push_back (recursive_package { + cdb, name, + (o.upgrade () || o.patch () + ? o.recursive () + : optional<bool> ()), + o.upgrade (), + (o.deorphan () + ? o.recursive () + : optional<bool> ())}); + } } } } @@ -2887,6 +5357,9 @@ namespace bpkg // dependencies to up/down-grade, and unused dependencies to drop. We call // this the plan. // + // Note: for the sake of brevity we also assume the package replacement + // wherever we mention the package up/down-grade in this description. + // // The way we do it is tricky: we first create the plan based on build-to- // holds (i.e., the user selected). Next, to decide whether we need to // up/down-grade or drop any dependecies we need to take into account an @@ -2925,6 +5398,11 @@ namespace bpkg // grade order where any subsequent entry does not affect the decision of // the previous ones. // + // Note that we also need to rebuild the plan from scratch on adding a new + // up/down-grade/drop if any dependency configuration negotiation has been + // performed, since any package replacement may affect the already + // negotiated configurations. + // // Package managers are an easy, already solved problem, right? // build_packages pkgs; @@ -2939,13 +5417,17 @@ namespace bpkg shared_ptr<available_package> available; lazy_shared_ptr<bpkg::repository_fragment> repository_fragment; - bool system; + bool system; + bool existing; // Build as an existing archive or directory. + optional<bool> upgrade; + bool deorphan; }; vector<dep> deps; + existing_dependencies existing_deps; + deorphaned_dependencies deorphaned_deps; replaced_versions replaced_vers; postponed_dependencies postponed_deps; - postponed_positions postponed_poss; unacceptable_alternatives unacceptable_alts; // Map the repointed dependents to the replacement flags (see @@ -3013,12 +5495,12 @@ namespace bpkg // during the package collection) because we want to enter them before // collect_build_postponed() and they could be the dependents that have // the config clauses. In a sense, change to replaced_vers, - // postponed_deps, or postponed_poss maps should not affect the deps + // postponed_deps, or unacceptable_alts maps should not affect the deps // list. But not the other way around: a dependency erased from the deps // list could have caused an entry in the replaced_vers, postponed_deps, - // and/or postponed_poss maps. And so we clean replaced_vers, - // postponed_deps, and postponed_poss on scratch_exe (scratch during the - // plan execution). + // and/or unacceptable_alts maps. And so we clean replaced_vers, + // postponed_deps, and unacceptable_alts on scratch_exe (scratch during + // the plan execution). // for (bool refine (true), scratch_exe (true), scratch_col (false); refine; ) @@ -3132,8 +5614,17 @@ namespace bpkg // Also, if a dependency package already has selected package that // is held, then we need to unhold it. // - auto enter = [&mdb, &pkgs] (database& db, const dependency_package& p) + auto enter = [&pkgs, &cmd_line] (database& db, + const dependency_package& p) { + // Note that we don't set the upgrade and deorphan flags based on + // the --upgrade, --patch, and --deorphan options since an option + // presense doesn't necessarily means that the respective flag needs + // to be set (the package may not be selected, may not be patchable + // and/or an orphan, etc). The proper flags will be provided by + // evaluate_dependency() if/when any upgrade/deorphan recommendation + // is given. + // build_package bp { nullopt, // Action. db, @@ -3146,7 +5637,7 @@ namespace bpkg nullopt, // Postponed dependency alternatives. false, // Recursive collection. false, // Hold package. - p.constraint.has_value (), // Hold version. + p.hold_version, {}, // Constraints. p.system, p.keep_out, @@ -3155,13 +5646,16 @@ namespace bpkg p.checkout_root, p.checkout_purge, p.config_vars, - {package_key {mdb, ""}}, // Required by (command line). + nullopt, // Upgrade. + false, // Deorphan. + {cmd_line}, // Required by (command line). false, // Required by dependents. 0}; // State flags. if (p.constraint) - bp.constraints.emplace_back ( - mdb, "command line", *p.constraint); + bp.constraints.emplace_back (*p.constraint, + cmd_line.db, + cmd_line.name.string ()); pkgs.enter (p.name, move (bp)); }; @@ -3175,12 +5669,11 @@ namespace bpkg // The system package may only have an exact/wildcard version // specified. // - add_system_package (db, + add_system_package (&db, p.name, - (p.constraint - ? *p.constraint->min_version - : wildcard_version)); - + p.constraint, + p.system_status, + nullptr /* stubs */); enter (db, p); }; @@ -3227,10 +5720,13 @@ namespace bpkg } }); - postponed_packages postponed_repo; - postponed_packages postponed_alts; - postponed_configurations postponed_cfgs; - strings postponed_cfgs_history; + postponed_packages postponed_repo; + postponed_packages postponed_alts; + postponed_packages postponed_recs; + postponed_existing_dependencies postponed_edeps; + postponed_configurations postponed_cfgs; + strings postponed_cfgs_history; + unsatisfied_dependents unsatisfied_depts; try { @@ -3242,13 +5738,14 @@ namespace bpkg { replaced_vers.clear (); postponed_deps.clear (); - postponed_poss.clear (); unacceptable_alts.clear (); scratch_exe = false; } - else if (scratch_col) + else { + assert (scratch_col); // See the scratch definition above. + // Reset to detect bogus entries. // for (auto& rv: replaced_vers) @@ -3260,12 +5757,6 @@ namespace bpkg pd.second.with_config = false; } - for (auto& pd: postponed_poss) - { - pd.second.skipped = false; - pd.second.reevaluated = false; - } - scratch_col = false; } @@ -3298,14 +5789,8 @@ namespace bpkg // specify packages on the command line does not matter). // for (const build_package& p: hold_pkgs) - pkgs.collect_build (o, - p, - find_prereq_database, - rpt_depts, - add_priv_cfg, - true /* initial_collection */, - replaced_vers, - postponed_cfgs); + pkgs.collect_build ( + o, p, replaced_vers, postponed_cfgs, unsatisfied_depts); // Collect all the prerequisites of the user selection. // @@ -3318,26 +5803,7 @@ namespace bpkg auto i (postponed_deps.find (pk)); - if (i == postponed_deps.end ()) - { - pkgs.collect_build_prerequisites ( - o, - p.db, - p.name (), - find_prereq_database, - rpt_depts, - add_priv_cfg, - true /* initial_collection */, - replaced_vers, - postponed_repo, - postponed_alts, - 0 /* max_alt_index */, - postponed_deps, - postponed_cfgs, - postponed_poss, - unacceptable_alts); - } - else + if (i != postponed_deps.end ()) { // Even though the user selection may have a configuration, we // treat it as a dependent without any configuration because @@ -3348,6 +5814,37 @@ namespace bpkg l5 ([&]{trace << "dep-postpone user-specified " << pk;}); } + else + { + const postponed_configuration* pcfg ( + postponed_cfgs.find_dependency (pk)); + + if (pcfg != nullptr) + { + l5 ([&]{trace << "dep-postpone user-specified " << pk + << " since already in cluster " << *pcfg;}); + } + else + { + pkgs.collect_build_prerequisites ( + o, + p.db, + p.name (), + find_prereq_database, + add_priv_cfg, + rpt_depts, + replaced_vers, + postponed_repo, + postponed_alts, + 0 /* max_alt_index */, + postponed_recs, + postponed_edeps, + postponed_deps, + postponed_cfgs, + unacceptable_alts, + unsatisfied_depts); + } + } } // Note that we need to collect unheld after prerequisites, not to @@ -3386,10 +5883,12 @@ namespace bpkg replaced_vers, postponed_repo, postponed_alts, + postponed_recs, + postponed_edeps, postponed_deps, postponed_cfgs, - postponed_poss, unacceptable_alts, + unsatisfied_depts, find_prereq_database, add_priv_cfg); } @@ -3426,7 +5925,7 @@ namespace bpkg // Marking upgraded dependencies as "required by command line" // may seem redundant as they should already be pre-entered as // such (see above). But remember dependencies upgraded with - // -i|-r? Note that the required_by data member should never be + // -i|-r? Note that the required_by data member should never be // empty, as it is used in prompts/diagnostics. // build_package p { @@ -3450,67 +5949,104 @@ namespace bpkg nullopt, // Checkout root. false, // Checkout purge. strings (), // Configuration variables. - {package_key {mdb, ""}}, // Required by (command line). + d.upgrade, + d.deorphan, + {cmd_line}, // Required by (command line). false, // Required by dependents. - 0}; // State flags. + (d.existing || d.deorphan + ? build_package::build_replace + : uint16_t (0))}; - build_package_refs dep_chain; + package_key pk {ddb, d.name}; - // Note: recursive. + // Similar to the user-selected packages, collect non- + // recursively the dependencies for which recursive collection + // is postponed (see above for details). // - pkgs.collect_build (o, - move (p), - find_prereq_database, - rpt_depts, - add_priv_cfg, - true /* initial_collection */, - replaced_vers, - postponed_cfgs, - &dep_chain, - &postponed_repo, - &postponed_alts, - &postponed_deps, - &postponed_poss, - &unacceptable_alts); - } - } + auto i (postponed_deps.find (pk)); + if (i != postponed_deps.end ()) + { + i->second.wout_config = true; - // Erase the bogus postponements and re-collect from scratch, if any - // (see postponed_dependencies for details). - // - // Note that we used to re-collect such postponements in-place but - // re-doing from scratch feels more correct (i.e., we may end up - // doing it earlier which will affect dependency alternatives). - // - postponed_deps.cancel_bogus (trace, true /* initial_collection */); + // Note: not recursive. + // + pkgs.collect_build ( + o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts); - // Now remove all the dependencies postponed during the initial - // collection since all this information is already in - // postponed_cfgs. - // - for (auto i (postponed_deps.begin ()); i != postponed_deps.end (); ) - { - if (i->second.initial_collection) - i = postponed_deps.erase (i); - else - ++i; + l5 ([&]{trace << "dep-postpone user-specified dependency " + << pk;}); + } + else + { + const postponed_configuration* pcfg ( + postponed_cfgs.find_dependency (pk)); + + if (pcfg != nullptr) + { + // Note: not recursive. + // + pkgs.collect_build (o, + move (p), + replaced_vers, + postponed_cfgs, + unsatisfied_depts); + + l5 ([&]{trace << "dep-postpone user-specified dependency " + << pk << " since already in cluster " + << *pcfg;}); + } + else + { + build_package_refs dep_chain; + + // Note: recursive. + // + pkgs.collect_build (o, + move (p), + replaced_vers, + postponed_cfgs, + unsatisfied_depts, + &dep_chain, + find_prereq_database, + add_priv_cfg, + &rpt_depts, + &postponed_repo, + &postponed_alts, + &postponed_recs, + &postponed_edeps, + &postponed_deps, + &unacceptable_alts); + } + } + } } // Handle the (combined) postponed collection. // - if (!postponed_repo.empty () || - !postponed_alts.empty () || - postponed_deps.has_bogus () || + if (find_if (postponed_recs.begin (), postponed_recs.end (), + [] (const build_package* p) + { + // Note that we check for the dependencies presence + // rather than for the recursive_collection flag + // (see collect_build_postponed() for details). + // + return !p->dependencies; + }) != postponed_recs.end () || + !postponed_repo.empty () || + !postponed_alts.empty () || + postponed_deps.has_bogus () || !postponed_cfgs.empty ()) pkgs.collect_build_postponed (o, replaced_vers, postponed_repo, postponed_alts, + postponed_recs, + postponed_edeps, postponed_deps, postponed_cfgs, postponed_cfgs_history, - postponed_poss, unacceptable_alts, + unsatisfied_depts, find_prereq_database, rpt_depts, add_priv_cfg); @@ -3519,12 +6055,6 @@ namespace bpkg // (see replaced_versions for details). // replaced_vers.cancel_bogus (trace, true /* scratch */); - - // Erase the bogus existing dependent re-evaluation postponements - // and re-collect from scratch, if any (see postponed_positions for - // details). - // - postponed_poss.cancel_bogus (trace); } catch (const scratch_collection& e) { @@ -3534,7 +6064,7 @@ namespace bpkg l5 ([&]{trace << "collection failed due to " << e.description << (e.package != nullptr - ? " (" + e.package->string () + ")" + ? " (" + e.package->string () + ')' : empty_string) << ", retry from scratch";}); @@ -3555,6 +6085,9 @@ namespace bpkg continue; } + set<package_key> depts ( + pkgs.collect_dependents (rpt_depts, unsatisfied_depts)); + // Now that we have collected all the package versions that we need to // build, arrange them in the "dependency order", that is, with every // package on the list only possibly depending on the ones after @@ -3565,29 +6098,33 @@ namespace bpkg // dependencies between the specified packages). // // The order of dependency upgrades/downgrades/drops is not really - // deterministic. We, however, do them before hold_pkgs so that they - // appear (e.g., on the plan) last. + // deterministic. We, however, do upgrades/downgrades before hold_pkgs + // so that they appear (e.g., on the plan) after the packages being + // built to hold. We handle drops last, though, so that the unused + // packages are likely get purged before the package fetches, so that + // the disk space they occupy can be reused. // for (const dep& d: deps) - pkgs.order (d.db, - d.name, - nullopt /* buildtime */, - find_prereq_database, - false /* reorder */); + { + if (d.available != nullptr) + pkgs.order (d.db, + d.name, + find_prereq_database, + false /* reorder */); + } for (const build_package& p: reverse_iterate (hold_pkgs)) - pkgs.order (p.db, - p.name (), - nullopt /* buildtime */, - find_prereq_database); + pkgs.order (p.db, p.name (), find_prereq_database); for (const auto& rd: rpt_depts) pkgs.order (rd.first.db, rd.first.name, - nullopt /* buildtime */, find_prereq_database, - false /* reorder */); + false /* reorder */); + // Order the existing dependents which have participated in + // negotiation of the configuration of their dependencies. + // for (const postponed_configuration& cfg: postponed_cfgs) { for (const auto& d: cfg.dependents) @@ -3595,23 +6132,27 @@ namespace bpkg if (d.second.existing) { const package_key& p (d.first); - - pkgs.order (p.db, - p.name, - nullopt /* buildtime */, - find_prereq_database); + pkgs.order (p.db, p.name, find_prereq_database); } } } - // Collect and order all the dependents that we will need to - // reconfigure because of the up/down-grades of packages that are now - // on the list. + // Order the existing dependents whose dependencies are being + // up/down-graded or reconfigured. // - pkgs.collect_order_dependents (rpt_depts); + for (const package_key& p: depts) + pkgs.order (p.db, p.name, find_prereq_database, false /* reorder */); - // And, finally, make sure all the packages that we need to unhold - // are on the list. + // Order the re-collected packages (deviated dependents, etc). + // + for (build_package* p: postponed_recs) + { + assert (p->recursive_collection); + + pkgs.order (p->db, p->name (), find_prereq_database); + } + + // Make sure all the packages that we need to unhold are on the list. // for (const dependency_package& p: dep_pkgs) { @@ -3625,9 +6166,8 @@ namespace bpkg if (sp != nullptr && sp->hold_package) pkgs.order (db, p.name, - nullopt /* buildtime */, find_prereq_database, - false /* reorder */); + false /* reorder */); }; if (p.db != nullptr) @@ -3641,6 +6181,43 @@ namespace bpkg } } + // And, finally, order the package drops. + // + for (const dep& d: deps) + { + if (d.available == nullptr) + pkgs.order (d.db, + d.name, + find_prereq_database, + false /* reorder */); + } + + // Make sure all the postponed dependencies of existing dependents + // have been collected and fail if that's not the case. + // + for (const auto& pd: postponed_edeps) + { + const build_package* p (pkgs.entered_build (pd.first)); + assert (p != nullptr && p->available != nullptr); + + if (!p->recursive_collection) + { + // Feels like this shouldn't happen but who knows. + // + diag_record dr (fail); + dr << "package " << p->available_name_version_db () << " is not " + << "built due to its configured dependents deviation in " + << "dependency resolution" << + info << "deviated dependents:"; + + for (const package_key& d: pd.second) + dr << ' ' << d; + + dr << info << "please report in " + << "https://github.com/build2/build2/issues/302"; + } + } + #ifndef NDEBUG pkgs.verify_ordering (); #endif @@ -3689,7 +6266,7 @@ namespace bpkg changed = execute_plan (o, bl, - true /* simulate */, + &unsatisfied_depts, find_prereq_database); if (changed) @@ -3713,20 +6290,31 @@ namespace bpkg // value covers both the "no change is required" and the "no // recommendation available" cases. // - auto eval_dep = [&dep_pkgs, &rec_pkgs, &o] ( - database& db, - const shared_ptr<selected_package>& sp, - bool ignore_unsatisfiable = true) -> optional<evaluate_result> + auto eval_dep = [&dep_pkgs, + &rec_pkgs, + &o, + &existing_deps, + &deorphaned_deps, + &pkgs, + cache = upgrade_dependencies_cache {}] ( + database& db, + const shared_ptr<selected_package>& sp, + bool ignore_unsatisfiable = true) mutable + -> optional<evaluate_result> { optional<evaluate_result> r; // See if there is an optional dependency upgrade recommendation. // if (!sp->hold_package) - r = evaluate_dependency (db, + r = evaluate_dependency (o, + db, sp, dep_pkgs, o.no_move (), + existing_deps, + deorphaned_deps, + pkgs, ignore_unsatisfiable); // If none, then see for the recursive dependency upgrade @@ -3736,7 +6324,15 @@ namespace bpkg // configured as such for a reason. // if (!r && !sp->system () && !rec_pkgs.empty ()) - r = evaluate_recursive (db, sp, rec_pkgs, ignore_unsatisfiable); + r = evaluate_recursive (o, + db, + sp, + rec_pkgs, + existing_deps, + deorphaned_deps, + pkgs, + ignore_unsatisfiable, + cache); // Translate the "no change" result to nullopt. // @@ -3771,11 +6367,12 @@ namespace bpkg bool s (false); database& db (i->db); + const package_name& nm (i->name); // Here we scratch if evaluate changed its mind or if the resulting // version doesn't match what we expect it to be. // - if (auto sp = db.find<selected_package> (i->name)) + if (auto sp = db.find<selected_package> (nm)) { const version& dv (target_version (db, i->available, i->system)); @@ -3791,12 +6388,25 @@ namespace bpkg if (s) { scratch_exe = true; // Rebuild the plan from scratch. + + package_key pk (db, nm); + + auto j (find (existing_deps.begin (), existing_deps.end (), pk)); + if (j != existing_deps.end ()) + existing_deps.erase (j); + + deorphaned_deps.erase (pk); + i = deps.erase (i); } else ++i; } + if (scratch_exe) + l5 ([&]{trace << "one of dependency evaluation decisions has " + << "changed, re-collecting from scratch";}); + // If the execute_plan() call was noop, there are no user expectations // regarding any dependency, and no upgrade is requested, then the // only possible refinement outcome can be recommendations to drop @@ -3824,8 +6434,13 @@ namespace bpkg // make sure that the unsatisfiable dependency, if left, is // reported. // - auto need_refinement = [&eval_dep, &deps, &rec_pkgs, &dep_dbs, &o] ( - bool diag = false) -> bool + auto need_refinement = [&eval_dep, + &deps, + &rec_pkgs, + &dep_dbs, + &existing_deps, + &deorphaned_deps, + &o] (bool diag = false) -> bool { // Examine the new dependency set for any up/down-grade/drops. // @@ -3856,11 +6471,25 @@ namespace bpkg continue; if (!diag) + { deps.push_back (dep {er->db, sp->name, move (er->available), move (er->repository_fragment), - er->system}); + er->system, + er->existing, + er->upgrade, + er->orphan.has_value ()}); + + if (er->existing) + existing_deps.emplace_back (er->db, sp->name); + + if (er->orphan) + { + deorphaned_deps[package_key (er->db, sp->name)] = + move (*er->orphan); + } + } r = true; } @@ -3872,8 +6501,19 @@ namespace bpkg refine = need_refinement (); + // If no further refinement is necessary, then perform the + // diagnostics run. Otherwise, if any dependency configuration + // negotiation has been performed during the current plan refinement + // iteration, then rebuild the plan from scratch (see above for + // details). Also rebuild it from from scratch if any unsatisfied + // dependents have been ignored, since their unsatisfied constraints + // are now added to the dependencies' build_package::constraints + // lists. + // if (!refine) need_refinement (true /* diag */); + else if (!postponed_cfgs.empty () || !unsatisfied_depts.empty ()) + scratch_exe = true; } // Note that we prevent building multiple instances of the same @@ -3969,10 +6609,10 @@ namespace bpkg // that the build-time dependency configuration type (host or // build2) differs from the dependent configuration type (target // is a common case) and doesn't work well, for example, for the - // self-hosted configurations. For them it can fail - // erroneously. We can potentially fix that by additionally - // storing the build-time flag for a prerequisite. However, let's - // first see if it ever becomes a problem. + // self-hosted configurations. For them it can fail erroneously. + // We can potentially fix that by additionally storing the + // build-time flag for a prerequisite. However, let's first see if + // it ever becomes a problem. // prerequisites r; const package_prerequisites& prereqs (sp->prerequisites); @@ -4256,6 +6896,190 @@ namespace bpkg t.commit (); } + + if (!refine) + { + // Cleanup the package build collecting state, preparing for the + // re-collection from the very beginning. + // + auto prepare_recollect = [&refine, + &scratch_exe, + &deps, + &existing_deps, + &deorphaned_deps] () + { + refine = true; + scratch_exe = true; + + deps.clear (); + existing_deps.clear (); + deorphaned_deps.clear (); + }; + + // Issue diagnostics and fail if any existing dependents are not + // satisfied with their dependencies. + // + // But first, try to resolve the first encountered unsatisfied + // constraint by replacing the collected unsatisfactory dependency + // or some of its dependents with some other available package + // version. This version, while not being the best possible choice, + // must be satisfactory for all its new and existing dependents. If + // succeed, punch the replacement version into the command line and + // recollect from the very beginning (see unsatisfied_dependents for + // details). + // + if (!unsatisfied_depts.empty ()) + { + if (!cmdline_refine_index) // Not command line adjustments refinement? + { + const unsatisfied_dependent& dpt (unsatisfied_depts.front ()); + + assert (!dpt.ignored_constraints.empty ()); + + const ignored_constraint& ic (dpt.ignored_constraints.front ()); + + const build_package* p (pkgs.entered_build (ic.dependency)); + assert (p != nullptr); // The dependency must be collected. + + l5 ([&]{trace << "try to replace unsatisfactory dependency " + << p->available_name_version_db () << " with some " + << "other version";}); + + optional<cmdline_adjustment> a; + vector<package_key> unsatisfied_dpts; + set<const build_package*> visited_dpts; + + if ((a = try_replace_dependency (o, + *p, + pkgs, + hold_pkgs, + dep_pkgs, + cmdline_adjs, + unsatisfied_dpts, + "unsatisfactory dependency")) || + (a = try_replace_dependent (o, + *p, + &ic.unsatisfied_constraints, + pkgs, + cmdline_adjs, + unsatisfied_dpts, + hold_pkgs, + dep_pkgs, + visited_dpts)) || + !cmdline_adjs.empty ()) + { + if (a) + { + cmdline_adjs.push (move (*a)); + } + else + { + cmdline_adjustment a (cmdline_adjs.pop ()); + + l5 ([&]{trace << "cannot replace any package, rolling back " + << "latest command line adjustment (" + << cmdline_adjs.to_string (a) << ')';}); + } + + prepare_recollect (); + } + else + unsatisfied_depts.diag (pkgs); // Issue the diagnostics and fail. + } + else // We are in the command line adjustments refinement cycle. + { + // Since we have failed to collect, then the currently dropped + // command line adjustment is essential. Thus, push it back to + // the stack, drop the next one, and retry. If this is the last + // adjustment in the stack, then we assume that no further + // refinement is possible and we just recollect, assuming that + // this recollection will be successful. + // + assert (cmdline_refine_adjustment); // Wouldn't be here otherwise. + + l5 ([&]{trace << "attempt to refine command line adjustments by " + << "rolling back adjustment " + << cmdline_adjs.to_string ( + *cmdline_refine_adjustment) + << " failed, pushing it back";}); + + cmdline_adjs.push (move (*cmdline_refine_adjustment)); + + // Index of the being previously dropped adjustment must be + // valid. + // + assert (*cmdline_refine_index != cmdline_adjs.size ()); + + if (++(*cmdline_refine_index) != cmdline_adjs.size ()) + { + cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */); + + l5 ([&]{trace << "continue with command line adjustments " + << "refinement cycle by rolling back adjustment " + << cmdline_adjs.to_string ( + *cmdline_refine_adjustment);}); + } + else + { + cmdline_refine_adjustment = nullopt; + + l5 ([&]{trace << "cannot further refine command line " + << "adjustments, performing final collection";}); + } + + prepare_recollect (); + } + } + // + // If the collection was successful, then see if we still need to + // perform the command line adjustments refinement. + // + else if (cmdline_adjs.tried () && + (!cmdline_refine_index || + *cmdline_refine_index != cmdline_adjs.size ())) + { + // If some command line adjustment is currently being dropped, + // that means that this adjustment is redundant. + // + bool initial (!cmdline_refine_index); + + if (!initial) + { + assert (cmdline_refine_adjustment); + + l5 ([&]{trace << "command line adjustment " + << cmdline_adjs.to_string ( + *cmdline_refine_adjustment) + << " is redundant, dropping it";}); + + cmdline_refine_adjustment = nullopt; + cmdline_refine_index = nullopt; + } + + // We cannot remove all the adjustments during the refinement. + // Otherwise, we shouldn't be failing in the first place. + // + assert (!cmdline_adjs.empty ()); + + // If there is just a single adjustment left, then there is + // nothing to refine anymore. + // + if (cmdline_adjs.size () != 1) + { + cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */); + cmdline_refine_index = 0; + + l5 ([&]{trace << (initial ? "start" : "re-start") << " command " + << "line adjustments refinement cycle by rolling " + << "back first adjustment (" + << cmdline_adjs.to_string ( + *cmdline_refine_adjustment) + << ')';}); + + prepare_recollect (); + } + } + } } } @@ -4273,10 +7097,11 @@ namespace bpkg bool update_dependents (false); // We need the plan and to ask for the user's confirmation only if some - // implicit action (such as building prerequisite or reconfiguring - // dependent package) is to be taken or there is a selected package which - // version must be changed. But if the user explicitly requested it with - // --plan, then we print it as long as it is not empty. + // implicit action (such as building prerequisite, reconfiguring dependent + // package, or installing system/distribution packages) is to be taken or + // there is a selected package which version must be changed. But if the + // user explicitly requested it with --plan, then we print it as long as + // it is not empty. // string plan; sha256 csum; @@ -4287,6 +7112,31 @@ namespace bpkg o.plan_specified () || o.rebuild_checksum_specified ()) { + // Map the main system/distribution packages that need to be installed + // to the system packages which caused their installation (see + // build_package::system_install() for details). + // + using package_names = vector<reference_wrapper<const package_name>>; + using system_map = map<string, package_names>; + + system_map sys_map; + + // Iterate in the reverse order as we will do for printing the action + // lines. This way a sys-install action line will be printed right + // before the bpkg action line of a package which appears first in the + // sys-install action's 'required by' list. + // + for (const build_package& p: reverse_iterate (pkgs)) + { + if (const system_package_status* s = p.system_install ()) + { + package_names& ps (sys_map[s->system_name]); + + if (find (ps.begin (), ps.end (), p.name ()) == ps.end ()) + ps.push_back (p.name ()); + } + } + // Start the transaction since we may query available packages for // skeleton initializations. // @@ -4294,200 +7144,262 @@ namespace bpkg bool first (true); // First entry in the plan. - for (build_package& p: reverse_iterate (pkgs)) + // Print the bpkg package action lines. + // + // Also print the sys-install action lines for system/distribution + // packages which require installation by the system package manager. + // Print them before the respective system package action lines, but + // only once per (main) system/distribution package. For example: + // + // sys-install libssl1.1/1.1.1l (required by sys:libssl, sys:libcrypto) + // configure sys:libssl/1.1.1 (required by foo) + // configure sys:libcrypto/1.1.1 (required by bar) + // + for (auto i (pkgs.rbegin ()); i != pkgs.rend (); ) { + build_package& p (*i); assert (p.action); - database& pdb (p.db); - const shared_ptr<selected_package>& sp (p.selected); - string act; - if (*p.action == build_package::drop) + const system_package_status* s; + system_map::iterator j; + + if ((s = p.system_install ()) != nullptr && + (j = sys_map.find (s->system_name)) != sys_map.end ()) { - act = "drop " + sp->string (pdb) + " (unused)"; + act = "sys-install "; + act += s->system_name; + act += '/'; + act += s->system_version; + act += " (required by "; + + bool first (true); + for (const package_name& n: j->second) + { + if (first) + first = false; + else + act += ", "; + + act += "sys:"; + act += n.string (); + } + + act += ')'; + need_prompt = true; + + // Make sure that we print this sys-install action just once. + // + sys_map.erase (j); + + // Note that we don't increment i in order to re-iterate this pkgs + // entry. } else { - // Print configuration variables. - // - // The idea here is to only print configuration for those packages - // for which we call pkg_configure*() in execute_plan(). - // - package_skeleton* cfg (nullptr); + ++i; - string cause; - if (*p.action == build_package::adjust) - { - assert (sp != nullptr && (p.reconfigure () || p.unhold ())); + database& pdb (p.db); + const shared_ptr<selected_package>& sp (p.selected); - // This is a dependent needing reconfiguration. + if (*p.action == build_package::drop) + { + act = "drop " + sp->string (pdb) + " (unused)"; + need_prompt = true; + } + else + { + // Print configuration variables. // - // This is an implicit reconfiguration which requires the plan to - // be printed. Will flag that later when composing the list of - // prerequisites. + // The idea here is to only print configuration for those packages + // for which we call pkg_configure*() in execute_plan(). // - if (p.reconfigure ()) - { - act = "reconfigure"; - cause = "dependent of"; + package_skeleton* cfg (nullptr); - if (!o.configure_only ()) - update_dependents = true; - } - - // This is a held package needing unhold. - // - if (p.unhold ()) + string cause; + if (*p.action == build_package::adjust) { - if (act.empty ()) - act = "unhold"; - else - act += "/unhold"; - } - - act += ' ' + sp->name.string (); + assert (sp != nullptr && (p.reconfigure () || p.unhold ())); - const string& s (pdb.string); - if (!s.empty ()) - act += ' ' + s; + // This is a dependent needing reconfiguration. + // + // This is an implicit reconfiguration which requires the plan + // to be printed. Will flag that later when composing the list + // of prerequisites. + // + if (p.reconfigure ()) + { + act = "reconfigure"; + cause = "dependent of"; - // This is an adjustment and so there is no available package - // specified for the build package object and thus the skeleton - // cannot be present. - // - assert (p.available == nullptr && !p.skeleton); + if (!o.configure_only ()) + update_dependents = true; + } - // We shouldn't be printing configurations for plain unholds. - // - if (p.reconfigure ()) - { - // Since there is no available package specified we need to find - // it (or create a transient one). + // This is a held package needing unhold. // - cfg = &p.init_skeleton (o, find_available (o, pdb, sp)); - } - } - else - { - assert (p.available != nullptr); // This is a package build. + if (p.unhold ()) + { + if (act.empty ()) + act = "unhold"; + else + act += "/unhold"; + } - // Even if we already have this package selected, we have to - // make sure it is configured and updated. - // - if (sp == nullptr) - { - act = p.system ? "configure" : "new"; + act += ' ' + sp->name.string (); + + const string& s (pdb.string); + if (!s.empty ()) + act += ' ' + s; - // For a new non-system package the skeleton must already be - // initialized. + // This is an adjustment and so there is no available package + // specified for the build package object and thus the skeleton + // cannot be present. // - assert (p.system || p.skeleton.has_value ()); + assert (p.available == nullptr && !p.skeleton); - // Initialize the skeleton if it is not initialized yet. + // We shouldn't be printing configurations for plain unholds. // - cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o)); + if (p.reconfigure ()) + { + // Since there is no available package specified we need to + // find it (or create a transient one). + // + cfg = &p.init_skeleton (o, + true /* load_old_dependent_config */, + find_available (o, pdb, sp)); + } } - else if (sp->version == p.available_version ()) + else { - // If this package is already configured and is not part of the - // user selection (or we are only configuring), then there is - // nothing we will be explicitly doing with it (it might still - // get updated indirectly as part of the user selection update). - // - if (!p.reconfigure () && - sp->state == package_state::configured && - (!p.user_selection () || - o.configure_only () || - p.configure_only ())) - continue; + assert (p.available != nullptr); // This is a package build. - act = p.system - ? "reconfigure" - : (p.reconfigure () - ? (o.configure_only () || p.configure_only () - ? "reconfigure" - : "reconfigure/update") - : "update"); + bool replace (p.replace ()); - if (p.reconfigure ()) + // Even if we already have this package selected, we have to + // make sure it is configured and updated. + // + if (sp == nullptr) { + act = p.system ? "configure" : "new"; + + // For a new non-system package the skeleton must already be + // initialized. + // + assert (p.system || p.skeleton.has_value ()); + // Initialize the skeleton if it is not initialized yet. // cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o)); } - } - else - { - act = p.system - ? "reconfigure" - : sp->version < p.available_version () - ? "upgrade" - : "downgrade"; - - // For a non-system package up/downgrade the skeleton must - // already be initialized. - // - assert (p.system || p.skeleton.has_value ()); + else if (sp->version == p.available_version ()) + { + // If this package is already configured and is not part of + // the user selection (or we are only configuring), then there + // is nothing we will be explicitly doing with it (it might + // still get updated indirectly as part of the user selection + // update). + // + if (!p.reconfigure () && + sp->state == package_state::configured && + (!p.user_selection () || + o.configure_only () || + p.configure_only ())) + continue; - // Initialize the skeleton if it is not initialized yet. - // - cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o)); + act = p.system + ? "reconfigure" + : (p.reconfigure () + ? (o.configure_only () || p.configure_only () + ? (replace ? "replace" : "reconfigure") + : (replace ? "replace/update" : "reconfigure/update")) + : "update"); - need_prompt = true; - } + if (p.reconfigure ()) + { + // Initialize the skeleton if it is not initialized yet. + // + cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o)); + } + } + else + { + act += p.system + ? "reconfigure" + : (sp->version < p.available_version () + ? (replace ? "replace/upgrade" : "upgrade") + : (replace ? "replace/downgrade" : "downgrade")); + + // For a non-system package up/downgrade the skeleton must + // already be initialized. + // + assert (p.system || p.skeleton.has_value ()); - if (p.unhold ()) - act += "/unhold"; + // Initialize the skeleton if it is not initialized yet. + // + cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o)); - act += ' ' + p.available_name_version_db (); - cause = p.required_by_dependents ? "required by" : "dependent of"; + need_prompt = true; + } - if (p.configure_only ()) - update_dependents = true; - } + if (p.unhold ()) + act += "/unhold"; - // Also list dependents for the newly built user-selected - // dependencies. - // - bool us (p.user_selection ()); - string rb; - if (!us || (!p.user_selection (hold_pkgs) && sp == nullptr)) - { - // Note: if we are ever tempted to truncate this, watch out for - // the --rebuild-checksum functionality which uses this. But then - // it's not clear this information is actually important: can a - // dependent-dependency structure change without any of the - // package versions changing? Doesn't feel like it should. + act += ' ' + p.available_name_version_db (); + cause = p.required_by_dependents ? "required by" : "dependent of"; + + if (p.configure_only ()) + update_dependents = true; + } + + // Also list dependents for the newly built user-selected + // dependencies. // - for (const package_key& pk: p.required_by) + bool us (p.user_selection ()); + string rb; + if (!us || (!p.user_selection (hold_pkgs) && sp == nullptr)) { - // Skip the command-line dependent. + // Note: if we are ever tempted to truncate this, watch out for + // the --rebuild-checksum functionality which uses this. But + // then it's not clear this information is actually important: + // can a dependent-dependency structure change without any of + // the package versions changing? Doesn't feel like it should. // - if (!pk.name.empty ()) - rb += (rb.empty () ? " " : ", ") + pk.string (); + for (const package_version_key& pvk: p.required_by) + { + // Skip the command-line, etc dependents and don't print the + // package version (which is not always available; see + // build_package::required_by for details). + // + if (pvk.version) // Is it a real package? + { + rb += (rb.empty () ? " " : ", ") + + pvk.string (true /* ignore_version */); + } + } + + // If not user-selected, then there should be another (implicit) + // reason for the action. + // + assert (!rb.empty ()); } - // If not user-selected, then there should be another (implicit) - // reason for the action. - // - assert (!rb.empty ()); - } + if (!rb.empty ()) + act += " (" + cause + rb + ')'; - if (!rb.empty ()) - act += " (" + cause + rb + ')'; + if (cfg != nullptr && !cfg->empty_print ()) + { + ostringstream os; + cfg->print_config (os, o.print_only () ? " " : " "); + act += '\n'; + act += os.str (); + } - if (cfg != nullptr && !cfg->empty_print ()) - { - ostringstream os; - cfg->print_config (os, o.print_only () ? " " : " "); - act += '\n'; - act += os.str (); + if (!us) + need_prompt = true; } - - if (!us) - need_prompt = true; } if (first) @@ -4549,13 +7461,14 @@ namespace bpkg // Ok, we have "all systems go". The overall action plan is as follows. // - // 1. disfigure up/down-graded, reconfigured [left to right] - // 2. purge up/down-graded [right to left] - // 3.a fetch/unpack new, up/down-graded - // 3.b checkout new, up/down-graded - // 4. configure all - // 5. unhold unheld - // 6. build user selection [right to left] + // 1. sys-install not installed system/distribution + // 2. disfigure up/down-graded, reconfigured [left to right] + // 3. purge up/down-graded [right to left] + // 4.a fetch/unpack new, up/down-graded, replaced + // 4.b checkout new, up/down-graded, replaced + // 5. configure all + // 6. unhold unheld + // 7. build user selection [right to left] // // Note that for some actions, e.g., purge or fetch, the order is not // really important. We will, however, do it right to left since that @@ -4571,7 +7484,7 @@ namespace bpkg // bool noop (!execute_plan (o, pkgs, - false /* simulate */, + nullptr /* simulate */, find_prereq_database)); if (o.configure_only ()) @@ -4620,10 +7533,15 @@ namespace bpkg database& db (p.db); + // Note: don't update the re-evaluated and re-collected dependents + // unless they are reconfigured. + // if ((*p.action == build_package::adjust && p.reconfigure ()) || (*p.action == build_package::build && - (p.flags & (build_package::build_repoint | - build_package::build_reevaluate)) != 0)) + ((p.flags & build_package::build_repoint) != 0 || + ((p.flags & (build_package::build_reevaluate | + build_package::build_recollect)) != 0 && + p.reconfigure ())))) upkgs.push_back (pkg_command_vars {db.config_orig, !multi_config () && db.main (), p.selected, @@ -4646,13 +7564,16 @@ namespace bpkg static bool execute_plan (const pkg_build_options& o, build_package_list& build_pkgs, - bool simulate, + unsatisfied_dependents* simulate, const function<find_database_function>& fdb) { tracer trace ("execute_plan"); l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");}); + // If unsatisfied dependents are specified then we are in the simulation + // mode and thus simulate can be used as bool. + bool r (false); uint16_t verb (!simulate ? bpkg::verb : 0); @@ -4663,6 +7584,40 @@ namespace bpkg size_t prog_i, prog_n, prog_percent; + // sys-install + // + // Install the system/distribution packages required by the respective + // system packages (see build_package::system_install() for details). + // + if (!simulate && o.sys_install ()) + { + // Collect the names of all the system packages being managed by the + // system package manager (as opposed to user/fallback), suppressing + // duplicates. + // + vector<package_name> ps; + + for (build_package& p: build_pkgs) + { + if (p.system_status () && + find (ps.begin (), ps.end (), p.name ()) == ps.end ()) + { + ps.push_back (p.name ()); + } + } + + // Install the system/distribution packages. + // + if (!ps.empty ()) + { + // Otherwise, we wouldn't get any package statuses. + // + assert (sys_pkg_mgr && *sys_pkg_mgr != nullptr); + + (*sys_pkg_mgr)->install (ps); + } + } + // disfigure // // Note: similar code in pkg-drop. @@ -4689,10 +7644,9 @@ namespace bpkg // On the package reconfiguration we will try to resolve dependencies to // the same prerequisites (see pkg_configure() for details). For that, we - // will save prerequisites before disfiguring the dependents. Note, - // though, that this is not required for dependents with the collected - // prerequisites builds since the dependency alternatives are already - // selected for them. + // will save prerequisites before disfiguring a package. Note, though, + // that this is not required for the recursively collected packages since + // the dependency alternatives are already selected for them. // map<const build_package*, vector<package_name>> previous_prerequisites; @@ -4706,6 +7660,8 @@ namespace bpkg database& pdb (p.db); shared_ptr<selected_package>& sp (p.selected); + assert (sp != nullptr); // Shouldn't be here otherwise. + // Each package is disfigured in its own transaction, so that we // always leave the configuration in a valid state. // @@ -4717,7 +7673,7 @@ namespace bpkg bool external (false); if (!simulate) { - external = sp != nullptr && sp->external () && p.external (); + external = (sp->external () && p.external ()); // Reset the keep_out flag if the package being unpacked is not // external. @@ -4726,31 +7682,62 @@ namespace bpkg p.keep_out = false; } - if (*p.action != build_package::drop && - !p.dependencies && - !sp->prerequisites.empty ()) + // Save prerequisites before disfiguring the package. + // + // Note that we add the prerequisites list to the map regardless if + // there are any prerequisites or not to, in particular, indicate the + // package reconfiguration mode to the subsequent + // pkg_configure_prerequisites() call (see the function documentation + // for details). + // + if (*p.action != build_package::drop && !p.dependencies && !p.system) { vector<package_name>& ps (previous_prerequisites[&p]); - ps.reserve (sp->prerequisites.size ()); + if (!sp->prerequisites.empty ()) + { + ps.reserve (sp->prerequisites.size ()); - for (const auto& pp: sp->prerequisites) - ps.push_back (pp.first.object_id ()); + for (const auto& pp: sp->prerequisites) + ps.push_back (pp.first.object_id ()); + } } // For an external package being replaced with another external, keep // the configuration unless requested not to with --disfigure. // - // Note that for other cases the preservation of the configuration is - // still a @@ TODO (the idea is to use our config.config.{save,load} - // machinery). Also see "parallel" logic in package_skeleton. + bool disfigure (p.disfigure || !external); + + // If the skeleton was not initialized yet (this is an existing package + // reconfiguration and no configuration was printed as a part of the + // plan, etc), then initialize it now. Whether the skeleton is newly + // initialized or not, make sure that the current configuration is + // loaded, unless the package project is not being disfigured. // + if (*p.action != build_package::drop && !p.system) + { + if (!p.skeleton) + { + // If there is no available package specified for the build package + // object, then we need to find it (or create a transient one). + // + p.init_skeleton (o, + true /* load_old_dependent_config */, + (p.available == nullptr + ? find_available (o, pdb, sp) + : nullptr)); + } + + if (disfigure) + p.skeleton->load_old_config (); + } + // Commits the transaction. // pkg_disfigure (o, pdb, t, sp, !p.keep_out /* clean */, - p.disfigure || !external /* disfigure */, + disfigure, simulate); r = true; @@ -4885,9 +7872,11 @@ namespace bpkg } // Fetch or checkout if this is a new package or if we are - // up/down-grading. + // up/down-grading or replacing. // - if (sp == nullptr || sp->version != p.available_version ()) + if (sp == nullptr || + sp->version != p.available_version () || + p.replace ()) { sp = nullptr; // For the directory case below. @@ -4909,19 +7898,22 @@ namespace bpkg for (const package_location& l: ap->locations) { - const repository_location& rl ( - l.repository_fragment.load ()->location); - - if (!basis || rl.local ()) // First or local? + if (!rep_masked_fragment (l.repository_fragment)) { - basis = rl.basis (); + const repository_location& rl ( + l.repository_fragment.load ()->location); - if (rl.directory_based ()) - break; + if (!basis || rl.local ()) // First or local? + { + basis = rl.basis (); + + if (rl.directory_based ()) + break; + } } } - assert (basis); + assert (basis); // Shouldn't be here otherwise. // All calls commit the transaction. // @@ -5048,7 +8040,7 @@ namespace bpkg // Commits the transaction. // - sp = pkg_unpack (o, pdb, af.database (), t, ap->id.name, simulate); + sp = pkg_unpack (o, pdb, t, ap->id.name, simulate); if (result) text << "unpacked " << *sp << pdb; @@ -5099,14 +8091,51 @@ namespace bpkg return true; }; - if (progress) + // On the first pass collect all the build_package's to be configured and + // calculate their configure_prerequisites_result's. + // + struct configure_package { - prog_i = 0; - prog_n = static_cast<size_t> (count_if (build_pkgs.begin (), - build_pkgs.end (), - configure_pred)); - prog_percent = 100; - } + reference_wrapper<build_package> pkg; + + // These are unused for system packages. + // + configure_prerequisites_result res; + build2::variable_overrides ovrs; + }; + vector<configure_package> configure_packages; + configure_packages.reserve (build_pkgs.size ()); + + // While at it also collect global configuration variable overrides from + // each configure_prerequisites_result::config_variables and merge them + // into configure_global_vars. + // + // @@ TODO: Note that the current global override semantics is quite + // broken in that we don't force reconfiguration of all the packages. + // +#ifndef BPKG_OUTPROC_CONFIGURE + strings configure_global_vars; +#endif + + // Return the "would be" state of packages that would be configured + // by this stage. + // + function<find_package_state_function> configured_state ( + [&configure_packages] (const shared_ptr<selected_package>& sp) + -> optional<pair<package_state, package_substate>> + { + for (const configure_package& cp: configure_packages) + { + const build_package& p (cp.pkg); + + if (p.selected == sp) + return make_pair ( + package_state::configured, + p.system ? package_substate::system : package_substate::none); + } + + return nullopt; + }); for (build_package& p: reverse_iterate (build_pkgs)) { @@ -5118,7 +8147,7 @@ namespace bpkg shared_ptr<selected_package>& sp (p.selected); const shared_ptr<available_package>& ap (p.available); - // Configure the package. + // Collect the package. // // At this stage the package is either selected, in which case it's a // source code one, or just available, in which case it is a system @@ -5130,7 +8159,6 @@ namespace bpkg assert (sp != nullptr || p.system); database& pdb (p.db); - transaction t (pdb, !simulate /* start */); // Show how we got here if things go wrong, for example selecting a @@ -5144,137 +8172,319 @@ namespace bpkg info << "while configuring " << p.name () << p.db; })); - auto prereqs = [&p, &previous_prerequisites] () - { - auto i (previous_prerequisites.find (&p)); - return i != previous_prerequisites.end () ? &i->second : nullptr; - }; - - // Note that pkg_configure() commits the transaction. - // + configure_prerequisites_result cpr; if (p.system) { + // We have no choice but to configure system packages on the first + // pass since otherwise there will be no selected package for + // pkg_configure_prerequisites() to find. Luckily they have no + // dependencies and so can be configured in any order. We will print + // their progress/result on the second pass in the proper order. + // + // Note: commits the transaction. + // sp = pkg_configure_system (ap->id.name, p.available_version (), pdb, t); } - else if (ap != nullptr) + else { - assert (*p.action == build_package::build); - - // If the package prerequisites builds are collected, then use the - // resulting package skeleton and the pre-selected dependency - // alternatives. + // Should only be called for packages whose prerequisites are saved. // - // Note that we may not collect the package prerequisites builds if - // the package is already configured but we still need to reconfigure - // it due, for example, to an upgrade of its dependency. In this case - // we pass to pkg_configure() the newly created package skeleton which - // contains the package configuration variables specified on the - // command line but (naturally) no reflection configuration variables. - // Note, however, that in this case pkg_configure() call will evaluate - // the reflect clauses itself and so the proper reflection variables - // will still end up in the package configuration. + auto prereqs = [&p, &previous_prerequisites] () + { + auto i (previous_prerequisites.find (&p)); + assert (i != previous_prerequisites.end ()); + return &i->second; + }; + + // In the simulation mode unconstrain all the unsatisfactory + // dependencies, if any, while configuring the dependent (see + // build_packages::collect_dependents() for details). // - // @@ Note that if we ever allow the user to override the alternative - // selection, this will break (and also if the user re-configures - // the package manually). Maybe that a good reason not to allow - // this? Or we could store this information in the database. + // Note: must be called at most once. // - if (p.dependencies) + auto unconstrain_deps = [simulate, + &p, + &trace, + deps = vector<package_key> ()] () mutable { - assert (p.skeleton); + if (simulate) + { + unsatisfied_dependent* ud ( + simulate->find_dependent (package_key (p.db, p.name ()))); - pkg_configure (o, - pdb, - t, - sp, - *p.dependencies, - &*p.alternatives, - move (*p.skeleton), - nullptr /* previous_prerequisites */, - p.disfigure, - simulate, - fdb); + if (ud != nullptr) + { + assert (deps.empty ()); + + deps.reserve (ud->ignored_constraints.size ()); + + for (const auto& c: ud->ignored_constraints) + { + l5 ([&]{trace << "while configuring dependent " << p.name () + << p.db << " in simulation mode unconstrain (" + << c.dependency << ' ' << c.constraint << ')';}); + + deps.emplace_back (c.dependency); + } + } + } + + return !deps.empty () ? &deps : nullptr; + }; + + if (ap != nullptr) + { + assert (*p.action == build_package::build); + + // If the package prerequisites builds are collected, then use the + // resulting package skeleton and the pre-selected dependency + // alternatives. + // + // Note that we may not collect the package prerequisites builds if + // the package is already configured but we still need to + // reconfigure it due, for example, to an upgrade of its dependency. + // In this case we pass to pkg_configure() the newly created package + // skeleton which contains the package configuration variables + // specified on the command line but (naturally) no reflection + // configuration variables. Note, however, that in this case + // pkg_configure() call will evaluate the reflect clauses itself and + // so the proper reflection variables will still end up in the + // package configuration. + // + // @@ Note that if we ever allow the user to override the + // alternative selection, this will break (and also if the user + // re-configures the package manually). Maybe that a good reason + // not to allow this? Or we could store this information in the + // database. + // + if (p.dependencies) + { + assert (p.skeleton); + + cpr = pkg_configure_prerequisites (o, + pdb, + t, + *p.dependencies, + &*p.alternatives, + move (*p.skeleton), + nullptr /* prev_prerequisites */, + simulate, + fdb, + configured_state, + unconstrain_deps ()); + } + else + { + assert (p.skeleton); // Must be initialized before disfiguring. + + cpr = pkg_configure_prerequisites (o, + pdb, + t, + ap->dependencies, + nullptr /* alternatives */, + move (*p.skeleton), + prereqs (), + simulate, + fdb, + configured_state, + unconstrain_deps ()); + } } - else + else // Existing dependent. { - assert (sp != nullptr); // See above. + // This is an adjustment of a dependent which cannot be system + // (otherwise it wouldn't be a dependent) and cannot become system + // (otherwise it would be a build). + // + assert (*p.action == build_package::adjust && !sp->system ()); + + // Must be in the unpacked state since it was disfigured on the + // first pass (see above). + // + assert (sp->state == package_state::unpacked); - // Note that the skeleton can be present if, for example, this is a - // dependency which configuration has been negotiated but it is not - // collected recursively since it has no buildfile clauses. + // The skeleton must be initialized before disfiguring and the + // package can't be system. // - if (!p.skeleton) - p.init_skeleton (o); + assert (p.skeleton && p.skeleton->available != nullptr); - pkg_configure (o, - pdb, - t, - sp, - ap->dependencies, - nullptr /* alternatives */, - move (*p.skeleton), - prereqs (), - p.disfigure, - simulate, - fdb); + const dependencies& deps (p.skeleton->available->dependencies); + + // @@ Note that on reconfiguration the dependent looses the + // potential configuration variables specified by the user on + // some previous build, which can be quite surprising. Should we + // store this information in the database? + // + // Note: this now works for external packages via package + // skeleton (which extracts user configuration). + // + cpr = pkg_configure_prerequisites (o, + pdb, + t, + deps, + nullptr /* alternatives */, + move (*p.skeleton), + prereqs (), + simulate, + fdb, + configured_state, + unconstrain_deps ()); + } + + t.commit (); + + if (verb >= 5 && !simulate && !cpr.config_variables.empty ()) + { + diag_record dr (trace); + + dr << sp->name << pdb << " configuration variables:"; + + for (const string& cv: cpr.config_variables) + dr << "\n " << cv; + } + + if (!simulate) + { +#ifndef BPKG_OUTPROC_CONFIGURE + auto& gvs (configure_global_vars); + + // Note that we keep global overrides in cpr.config_variables for + // diagnostics and skip them in var_override_function below. + // + for (const string& v: cpr.config_variables) + { + // Each package should have exactly the same set of global + // overrides by construction since we don't allow package- + // specific global overrides. + // + if (v[0] == '!') + { + if (find (gvs.begin (), gvs.end (), v) == gvs.end ()) + gvs.push_back (v); + } + } +#endif + // Add config.config.disfigure unless already disfigured (see the + // high-level pkg_configure() version for background). + // + if (ap == nullptr || !p.disfigure) + { + cpr.config_variables.push_back ( + "config.config.disfigure='config." + sp->name.variable () + "**'"); + } } } - else // Dependent. - { - // This is an adjustment of a dependent which cannot be system - // (otherwise it wouldn't be a dependent) and cannot become system - // (otherwise it would be a build). - // - assert (*p.action == build_package::adjust && - !p.system && - !sp->system ()); - // Must be in the unpacked state since it was disfigured on the first - // pass (see above). - // - assert (sp->state == package_state::unpacked); + configure_packages.push_back (configure_package {p, move (cpr), {}}); + } - // Initialize the skeleton if it is not initialized yet. - // - // Note that the skeleton can only be present here if it was - // initialized during the preparation of the plan and so this plan - // execution is not simulated (see above for details). - // - // Also note that there is no available package specified for the - // build package object here and so we need to find it (or create a - // transient one). - // - assert (p.available == nullptr && (!p.skeleton || !simulate)); + // Reuse the build state to avoid reloading the dependencies over and over + // again. This is a valid optimization since we are configuring in the + // dependency-dependent order. + // + unique_ptr<build2::context> configure_ctx; - if (!p.skeleton) - p.init_skeleton (o, find_available (o, pdb, sp)); +#ifndef BPKG_OUTPROC_CONFIGURE + if (!simulate) + { + using build2::context; + using build2::variable_override; - assert (p.skeleton->available != nullptr); // Can't be system. + function<context::var_override_function> vof ( + [&configure_packages] (context& ctx, size_t& i) + { + for (configure_package& cp: configure_packages) + { + for (const string& v: cp.res.config_variables) + { + if (v[0] == '!') // Skip global overrides (see above). + continue; - const dependencies& deps (p.skeleton->available->dependencies); + pair<char, variable_override> p ( + ctx.parse_variable_override (v, i++, false /* buildspec */)); - // @@ Note that on reconfiguration the dependent looses the potential - // configuration variables specified by the user on some previous - // build, which can be quite surprising. Should we store this - // information in the database? + variable_override& vo (p.second); + + // @@ TODO: put absolute scope overrides into global_vars. + // + assert (!(p.first == '!' || (vo.dir && vo.dir->absolute ()))); + + cp.ovrs.push_back (move (vo)); + } + } + }); + + configure_ctx = pkg_configure_context ( + o, move (configure_global_vars), vof); + + // Only global in configure_global_vars. + // + assert (configure_ctx->var_overrides.empty ()); + } +#endif + + if (progress) + { + prog_i = 0; + prog_n = configure_packages.size (); + prog_percent = 100; + } + + for (configure_package& cp: configure_packages) + { + build_package& p (cp.pkg); + + const shared_ptr<selected_package>& sp (p.selected); + + // Configure the package (system already configured). + // + // NOTE: remember to update the preparation of the plan to be presented + // to the user if changing anything here. + // + database& pdb (p.db); + + if (!p.system) + { + const shared_ptr<available_package>& ap (p.available); + + transaction t (pdb, !simulate /* start */); + + // Show how we got here if things go wrong. // - // I believe this now works for external packages via package - // skeleton (which extracts user configuration). + auto g ( + make_exception_guard ( + [&p] () + { + info << "while configuring " << p.name () << p.db; + })); + + // Note that pkg_configure() commits the transaction. // - pkg_configure (o, - pdb, - t, - sp, - deps, - nullptr /* alternatives */, - move (*p.skeleton), - prereqs (), - false /* disfigured */, - simulate, - fdb); + if (ap != nullptr) + { + pkg_configure (o, + pdb, + t, + sp, + move (cp.res), + configure_ctx, + cp.ovrs, + simulate); + } + else // Dependent. + { + pkg_configure (o, + pdb, + t, + sp, + move (cp.res), + configure_ctx, + cp.ovrs, + simulate); + } } r = true; @@ -5299,6 +8509,10 @@ namespace bpkg } } +#ifndef BPKG_OUTPROC_CONFIGURE + configure_ctx.reset (); // Free. +#endif + // Clear the progress if shown. // if (progress) |