aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bpkg/auth.cxx8
-rw-r--r--bpkg/buildfile11
-rw-r--r--bpkg/fetch-git.cxx212
-rwxr-xr-xbpkg/odb.sh11
-rw-r--r--bpkg/pkg-build-collect.cxx153
-rw-r--r--bpkg/pkg-build-collect.hxx16
-rw-r--r--bpkg/pkg-build.cxx187
-rw-r--r--bpkg/rep-fetch.cxx4
-rw-r--r--bpkg/rep-mask.cxx11
-rw-r--r--bpkg/rep-remove.cxx55
-rw-r--r--bpkg/rep-remove.hxx7
-rw-r--r--bpkg/version.hxx.in8
-rw-r--r--manifest3
-rw-r--r--repositories.manifest18
-rw-r--r--tests/pkg-build.testscript168
15 files changed, 630 insertions, 242 deletions
diff --git a/bpkg/auth.cxx b/bpkg/auth.cxx
index 191da0a..9fb4e20 100644
--- a/bpkg/auth.cxx
+++ b/bpkg/auth.cxx
@@ -96,7 +96,7 @@ namespace bpkg
// Return true if some openssl commands (openssl x509 -fingerprint, etc) may
// issue the 'Reading certificate from stdin since no -in or -new option is
// given' warning. This is the case for the openssl version in the [3.2.0
- // 3.3.0) range (see GH issue #353 for details).
+ // 3.2.2) range (see GH issue #353 for details).
//
// Note that there is no easy way to suppress this warning on Windows and
// thus we don't define this function there.
@@ -105,8 +105,12 @@ namespace bpkg
static inline bool
openssl_warn_stdin (const common_options& co)
{
+ // Use 3.2.3 in the comparison rather than 3.2.2, to make sure that, for
+ // example, 3.2.2-dev (denotes a pre-release of 3.2.2) also falls into the
+ // range.
+ //
const semantic_version& v (openssl_version (co));
- return v >= semantic_version {3, 2, 0} && v < semantic_version {3, 3, 0};
+ return v >= semantic_version {3, 2, 0} && v < semantic_version {3, 2, 3};
}
#endif
diff --git a/bpkg/buildfile b/bpkg/buildfile
index 0ba60dc..8836712 100644
--- a/bpkg/buildfile
+++ b/bpkg/buildfile
@@ -15,15 +15,12 @@ import libs = build2%lib{build2}
for m: bash bin c cc cli cxx in version
import libs += build2%lib{build2-$m}
+# @@ TMP we require libsqlite3 to be interface dependency of libbut-odb only
+# for the database migrations to schema versions 13 and 14.
+#
import libs += libbpkg%lib{bpkg}
import libs += libbutl%lib{butl}
-import libs += libodb%lib{odb}
-import libs += libodb-sqlite%lib{odb-sqlite}
-
-# @@ TMP Only required for the database migrations to schema versions 13 and
-# 14.
-#
-import libs += libsqlite3%lib{sqlite3}
+import libs += libbutl%lib{butl-odb}
options_topics = \
bpkg-options \
diff --git a/bpkg/fetch-git.cxx b/bpkg/fetch-git.cxx
index d2c30a1..47b65da 100644
--- a/bpkg/fetch-git.cxx
+++ b/bpkg/fetch-git.cxx
@@ -6,7 +6,7 @@
#include <map>
#include <libbutl/git.hxx>
-#include <libbutl/filesystem.hxx> // path_entry
+#include <libbutl/filesystem.hxx> // path_entry(), try_rmsymlink()
#include <libbutl/path-pattern.hxx>
#include <libbutl/semantic-version.hxx>
#include <libbutl/standard-version.hxx> // parse_standard_version()
@@ -927,11 +927,19 @@ namespace bpkg
static repository_refs_map repository_refs;
- // It is assumed that sense_capabilities() function was already called for
- // the URL.
+ // If the advertized refs/commits are already cached for the specified URL,
+ // then return them from the cache. Otherwise, query them and cache. In the
+ // latter case, optionally, probe the URL first, calling the specified probe
+ // function. Otherwise (the probe function is not specified), it is assumed
+ // that the URL has already been probed (sense_capabilities() function was
+ // already called for this URL, etc).
//
+ using probe_function = void ();
+
static const refs&
- load_refs (const common_options& co, const repository_url& url)
+ load_refs (const common_options& co,
+ const repository_url& url,
+ const function<probe_function>& probe = nullptr)
{
tracer trace ("load_refs");
@@ -944,6 +952,9 @@ namespace bpkg
if ((verb && !co.no_progress ()) || co.progress ())
text << "querying " << url;
+ if (probe)
+ probe ();
+
refs rs;
for (;;) // Breakout loop.
@@ -1175,31 +1186,29 @@ namespace bpkg
return *cap;
};
- auto references = [&co, &url, &caps] (const string& refname,
- bool abbr_commit)
+ function<probe_function> probe ([&caps] () {caps ();});
+
+ auto references = [&co, &url, &probe] (const string& refname,
+ bool abbr_commit)
-> refs::search_result
{
// Make sure the URL is probed before running git-ls-remote (see
// load_refs() for details).
//
- caps ();
-
- return load_refs (co, url ()).search_names (refname, abbr_commit);
+ return load_refs (co, url (), probe).search_names (refname, abbr_commit);
};
// Return the default reference set (see repository-types(1) for details).
//
- auto default_references = [&co, &url, &caps] () -> refs::search_result
+ auto default_references = [&co, &url, &probe] () -> refs::search_result
{
// Make sure the URL is probed before running git-ls-remote (see
// load_refs() for details).
//
- caps ();
-
refs::search_result r;
vector<standard_version> vs; // Parallel to search_result.
- for (const ref& rf: load_refs (co, url ()))
+ for (const ref& rf: load_refs (co, url (), probe))
{
if (!rf.peeled && rf.name.compare (0, 11, "refs/tags/v") == 0)
{
@@ -1342,6 +1351,9 @@ namespace bpkg
{
// Reduce the reference to the commit id.
//
+ // Note that it is assumed that the URL has already been probed by
+ // the above default_references() or references() call.
+ //
const string& c (load_refs (co, url ()).peel (r).commit);
if (!rf.exclusion)
@@ -1358,18 +1370,23 @@ namespace bpkg
remove_spec (c);
}
}
+ //
// Check if this is a commit exclusion and remove the corresponding
// fetch spec if that's the case.
//
else if (rf.exclusion)
+ {
remove_spec (*rf.commit);
-
+ }
+ //
// Check if the commit is already fetched and, if that's the case, save
// it, indicating that no fetch is required.
//
else if (commit_fetched (co, dir, *rf.commit))
+ {
add_spec (*rf.commit);
-
+ }
+ //
// If the shallow fetch is possible for the commit, then we fetch it.
//
else if (shallow ())
@@ -1378,6 +1395,7 @@ namespace bpkg
add_spec (*rf.commit, strings ({*rf.commit}), true /* shallow */);
}
+ //
// If the shallow fetch is not possible for the commit but the refname
// containing the commit is specified, then we fetch the whole history
// of references the refname translates to.
@@ -1402,6 +1420,7 @@ namespace bpkg
add_spec (*rf.commit, move (specs)); // Fetch deep.
}
+ //
// Otherwise, if the refname is not specified and the commit is not
// advertised, we have to fetch the whole repository history.
//
@@ -2412,64 +2431,6 @@ namespace bpkg
}
void
- git_checkout (const common_options& co,
- const dir_path& dir,
- const string& commit)
- {
- // For some (probably valid) reason the hard reset command doesn't remove
- // a submodule directory that is not plugged into the repository anymore.
- // It also prints the non-suppressible warning like this:
- //
- // warning: unable to rmdir libbar: Directory not empty
- //
- // That's why we run the clean command afterwards. It may also be helpful
- // if we produce any untracked files in the tree between checkouts down
- // the road.
- //
- if (!run_git (co,
- co.git_option (),
- "-C", dir,
- "reset",
- "--hard",
- verb < 2 ? "-q" : nullptr,
- commit))
- fail << "unable to reset to " << commit << endg;
-
- if (!run_git (co,
- co.git_option (),
- "-C", dir,
- "clean",
- "-d",
- "-x",
- "-ff",
- verb < 2 ? "-q" : nullptr))
- fail << "unable to clean " << dir << endg;
-
- // Iterate over the registered submodules and "deinitialize" those whose
- // tip commit has changed.
- //
- // Note that not doing so will make git treat the repository worktree as
- // modified (new commits in submodule). Also the caller may proceed with
- // an inconsistent repository, having no indication that they need to
- // re-run git_checkout_submodules().
- //
- for (const submodule& sm:
- find_submodules (co, dir, dir_path () /* prefix */))
- {
- dir_path sd (dir / sm.path); // Submodule full directory path.
-
- optional<string> commit (submodule_commit (co, sd));
-
- // Note that we may re-initialize the submodule later due to the empty
- // directory (see checkout_submodules() for details). Seems that git
- // has no problem with such a re-initialization.
- //
- if (commit && *commit != sm.commit)
- rm_r (sd, false /* dir_itself */);
- }
- }
-
- void
git_checkout_submodules (const common_options& co,
const repository_location& rl,
const dir_path& dir)
@@ -2593,6 +2554,111 @@ namespace bpkg
submodule_failure ("unable to list repository files", prefix);
}
+ static void
+ git_checkout (const common_options& co,
+ const dir_path& dir,
+ const string& commit,
+#ifdef _WIN32
+ const dir_path& prefix)
+ {
+ // Note that on Windows git may incorrectly deduce the type of a symlink
+ // it needs to create. Thus, it is recommended to specify the link type
+ // for directory symlinks in the project's .gitattributes file (see the
+ // "Using Symlinks in build2 Projects" article for background). However,
+ // it turns out that if, for example, such a type has not been specified
+ // for some early package version and this have been fixed in some later
+ // version, then it may still cause problems even when this later package
+ // version is being built. That happens because during the git repository
+ // fetch, to produce the available packages list, bpkg sequentially checks
+ // out multiple package versions. Git, on the other hand, does not bother
+ // re-creating an existing symlink on check out (or git-reset which we
+ // use) even though .gitattributes indicates that its type has changed.
+ // Thus, on Windows, let's just remove all the existing symlinks prior to
+ // running git-reset.
+ //
+ for (const auto& l: find_symlinks (co, dir, prefix))
+ {
+ // Note that the symlinks may be filesystem-agnostic (see
+ // fixup_worktree() for details) and thus we check the types of the
+ // filesystem entries prior to their removal. Also note that the
+ // try_rmsymlink() implementation doesn't actually distinguish between
+ // the directory and file symlinks and thus we always remove them as the
+ // file symlinks.
+ //
+ path p (dir / l.first);
+
+ pair<bool, entry_stat> e (
+ path_entry (p, false /* follow_symlink */, true /* ignore_error */));
+
+ if (e.first && e.second.type == entry_type::symlink)
+ try_rmsymlink (p, false /* dir */, true /* ignore_error */);
+ }
+#else
+ const dir_path&)
+ {
+#endif
+
+ // For some (probably valid) reason the hard reset command doesn't remove
+ // a submodule directory that is not plugged into the repository anymore.
+ // It also prints the non-suppressible warning like this:
+ //
+ // warning: unable to rmdir libbar: Directory not empty
+ //
+ // That's why we run the clean command afterwards. It may also be helpful
+ // if we produce any untracked files in the tree between checkouts down
+ // the road.
+ //
+ if (!run_git (co,
+ co.git_option (),
+ "-C", dir,
+ "reset",
+ "--hard",
+ verb < 2 ? "-q" : nullptr,
+ commit))
+ fail << "unable to reset to " << commit << endg;
+
+ if (!run_git (co,
+ co.git_option (),
+ "-C", dir,
+ "clean",
+ "-d",
+ "-x",
+ "-ff",
+ verb < 2 ? "-q" : nullptr))
+ fail << "unable to clean " << dir << endg;
+
+ // Iterate over the registered submodules and "deinitialize" those whose
+ // tip commit has changed.
+ //
+ // Note that not doing so will make git treat the repository worktree as
+ // modified (new commits in submodule). Also the caller may proceed with
+ // an inconsistent repository, having no indication that they need to
+ // re-run git_checkout_submodules().
+ //
+ for (const submodule& sm:
+ find_submodules (co, dir, dir_path () /* prefix */))
+ {
+ dir_path sd (dir / sm.path); // Submodule full directory path.
+
+ optional<string> commit (submodule_commit (co, sd));
+
+ // Note that we may re-initialize the submodule later due to the empty
+ // directory (see checkout_submodules() for details). Seems that git
+ // has no problem with such a re-initialization.
+ //
+ if (commit && *commit != sm.commit)
+ rm_r (sd, false /* dir_itself */);
+ }
+ }
+
+ void
+ git_checkout (const common_options& co,
+ const dir_path& dir,
+ const string& commit)
+ {
+ git_checkout (co, dir, commit, dir_path () /* prefix */);
+ }
+
// Verify symlinks in a working tree of a top repository or submodule,
// recursively.
//
diff --git a/bpkg/odb.sh b/bpkg/odb.sh
index 75c6d2d..1387773 100755
--- a/bpkg/odb.sh
+++ b/bpkg/odb.sh
@@ -16,8 +16,9 @@ if test -d ../.bdep; then
sed -r -ne 's#^(@[^ ]+ )?([^ ]+)/ .*default.*$#\2#p')"
fi
- inc+=("-I$(echo "$cfg"/libodb-[1-9]*/)")
- inc+=("-I$(echo "$cfg"/libodb-sqlite-[1-9]*/)")
+ # Note: there is nothing generated in libbutl-odb.
+ #
+ inc+=("-I../../libbutl/libbutl-odb")
inc+=("-I$cfg/libbutl")
inc+=("-I../../libbutl")
@@ -30,11 +31,7 @@ sed -r -ne 's#^(@[^ ]+ )?([^ ]+)/ .*default.*$#\2#p')"
else
- inc+=("-I$HOME/work/odb/builds/default/libodb-sqlite-default")
- inc+=("-I$HOME/work/odb/libodb-sqlite")
-
- inc+=("-I$HOME/work/odb/builds/default/libodb-default")
- inc+=("-I$HOME/work/odb/libodb")
+ inc+=("-I../../libbutl/libbutl-odb")
inc+=(-I.. -I../../libbpkg -I../../libbutl)
diff --git a/bpkg/pkg-build-collect.cxx b/bpkg/pkg-build-collect.cxx
index 6f1195c..e99557a 100644
--- a/bpkg/pkg-build-collect.cxx
+++ b/bpkg/pkg-build-collect.cxx
@@ -1490,78 +1490,87 @@ namespace bpkg
// applied. Ignore the replacement if its version doesn't satisfy the
// dependency constraints specified by the caller. Also ignore if this is
// a drop and the required-by package names of the specified build package
- // object have the "required by dependents" semantics
+ // object have the "required by dependents" semantics.
//
auto vi (replaced_vers.find (pk));
+ const version* replacement_version (nullptr);
- if (vi != replaced_vers.end () && !vi->second.replaced)
+ if (vi != replaced_vers.end ())
{
- l5 ([&]{trace << "apply version replacement for "
- << pkg.available_name_version_db ();});
-
replaced_version& v (vi->second);
if (v.available != nullptr)
+ replacement_version = (v.system
+ ? v.available->system_version (pk.db)
+ : &v.available->version);
+
+ if (!vi->second.replaced)
{
- const version& rv (v.system
- ? *v.available->system_version (pk.db)
- : v.available->version);
+ l5 ([&]{trace << "apply version replacement for "
+ << pkg.available_name_version_db ();});
- bool replace (true);
- for (const constraint_type& c: pkg.constraints)
+ if (v.available != nullptr)
{
- if (!satisfies (rv, c.value))
+ assert (replacement_version != nullptr);
+
+ const version& rv (*replacement_version);
+
+ bool replace (true);
+ for (const constraint_type& c: pkg.constraints)
{
- replace = false;
+ if (!satisfies (rv, c.value))
+ {
+ replace = false;
- l5 ([&]{trace << "replacement to " << rv << " is denied since "
- << c.dependent << " depends on (" << pk.name << ' '
- << c.value << ')';});
+ l5 ([&]{trace << "replacement to " << rv << " is denied since "
+ << c.dependent << " depends on (" << pk.name << ' '
+ << c.value << ')';});
+ }
}
- }
- if (replace)
- {
- v.replaced = true;
+ if (replace)
+ {
+ v.replaced = true;
- pkg.available = v.available;
- pkg.repository_fragment = v.repository_fragment;
- pkg.system = v.system;
+ pkg.available = v.available;
+ pkg.repository_fragment = v.repository_fragment;
+ pkg.system = v.system;
- l5 ([&]{trace << "replacement: "
- << pkg.available_name_version_db ();});
+ l5 ([&]{trace << "replacement: "
+ << pkg.available_name_version_db ();});
+ }
}
- }
- else
- {
- if (!pkg.required_by_dependents)
+ else
{
- v.replaced = true;
+ if (!pkg.required_by_dependents)
+ {
+ v.replaced = true;
- l5 ([&]{trace << "replacement: drop";});
+ l5 ([&]{trace << "replacement: drop";});
- // We shouldn't be replacing a package build with the drop if someone
- // depends on this package.
- //
- assert (pkg.selected != nullptr);
+ // We shouldn't be replacing a package build with the drop if someone
+ // depends on this package.
+ //
+ assert (pkg.selected != nullptr);
- collect_drop (options, pkg.db, pkg.selected, replaced_vers);
- return nullptr;
- }
- else
- {
- assert (!pkg.required_by.empty ());
+ collect_drop (options, pkg.db, pkg.selected, replaced_vers);
+ return nullptr;
+ }
+ else
+ {
+ assert (!pkg.required_by.empty ());
- l5 ([&]
- {
- diag_record dr (trace);
- dr << "replacement to drop is denied since " << pk
- << " is required by ";
- for (auto b (pkg.required_by.begin ()), i (b);
- i != pkg.required_by.end ();
- ++i)
- dr << (i != b ? ", " : "") << *i;
- });
+ l5 ([&]
+ {
+ diag_record dr (trace);
+ dr << "replacement to drop is denied since " << pk
+ << " is required by ";
+ for (auto b (pkg.required_by.begin ()), i (b);
+ i != pkg.required_by.end ();
+ ++i)
+ dr << (i != b ? ", " : "") << *i;
+ });
+ }
}
}
}
@@ -1629,19 +1638,28 @@ namespace bpkg
build_package* p2 (&pkg);
// Pick with the following preference order: user selection over
- // implicit one, source package over a system one, newer version over
- // an older one. So get the preferred into p1 and the other into p2.
+ // implicit one, source package over a system one, replacement version
+ // over a non-replacement one, newer version over an older one. So get
+ // the preferred into p1 and the other into p2.
//
{
+ const version& v1 (p1->available_version ());
+ const version& v2 (p2->available_version ());
+
int us (p1->user_selection () - p2->user_selection ());
int sf (p1->system - p2->system);
+ int rv (replacement_version != nullptr
+ ? (v1 == *replacement_version) - (v2 == *replacement_version)
+ : 0);
if (us < 0 ||
(us == 0 && sf > 0) ||
(us == 0 &&
sf == 0 &&
- p2->available_version () > p1->available_version ()))
+ (rv < 0 || (rv == 0 && v2 > v1))))
+ {
swap (p1, p2);
+ }
}
// If the versions differ, pick the satisfactory one and if both are
@@ -3349,7 +3367,7 @@ namespace bpkg
pdb,
nm,
pkg.available_version (),
- false /* selected_dependent */);
+ false /* existing_dependent */);
if (!satisfies (v2, c1.value))
{
@@ -3522,7 +3540,7 @@ namespace bpkg
pdb,
nm,
pkg.available_version (),
- false /* selected_dependent */);
+ false /* existing_dependent */);
// Now collect this prerequisite. If it was actually collected
// (i.e., it wasn't already there) and we are forcing a downgrade
@@ -7490,11 +7508,16 @@ namespace bpkg
{
using constraint_type = build_package::constraint_type;
+ // Pre-entered entries are always converted to adjustments (see
+ // above).
+ //
+ assert (dp.action);
+
constraint_type c (move (*dc),
ddb,
move (dn),
dp.selected->version,
- true /* selected_dependent */);
+ *dp.action != build_package::build);
if (find_if (p.constraints.begin (), p.constraints.end (),
[&c] (const constraint_type& v)
@@ -7539,7 +7562,7 @@ namespace bpkg
const build_package& p,
string& indent,
set<package_key>& printed,
- optional<bool> selected_dependent) const
+ optional<bool> existing_dependent) const
{
using constraint_type = build_package::constraint_type;
@@ -7555,8 +7578,8 @@ namespace bpkg
for (const constraint_type& c: cs)
{
- if (!selected_dependent ||
- *selected_dependent == c.selected_dependent)
+ if (!existing_dependent ||
+ *existing_dependent == c.existing_dependent)
{
if (const build_package* d = dependent_build (c))
{
@@ -7564,7 +7587,7 @@ namespace bpkg
<< ' ' << c.value << ')';
indent += " ";
- print_constraints (dr, *d, indent, printed, selected_dependent);
+ print_constraints (dr, *d, indent, printed, existing_dependent);
indent.resize (indent.size () - 2);
}
else
@@ -7577,8 +7600,8 @@ namespace bpkg
{
for (const constraint_type& c: cs)
{
- if (!selected_dependent ||
- *selected_dependent == c.selected_dependent)
+ if (!existing_dependent ||
+ *existing_dependent == c.existing_dependent)
{
dr << '\n' << indent << "...";
break;
@@ -7593,11 +7616,11 @@ namespace bpkg
const package_key& pk,
string& indent,
set<package_key>& printed,
- optional<bool> selected_dependent) const
+ optional<bool> existing_dependent) const
{
const build_package* p (entered_build (pk));
assert (p != nullptr); // Expected to be collected.
- print_constraints (dr, *p, indent, printed, selected_dependent);
+ print_constraints (dr, *p, indent, printed, existing_dependent);
}
void build_packages::
@@ -7964,7 +7987,7 @@ namespace bpkg
dpt.db,
dpt.name,
*dpt.version,
- true /* selected_package */);
+ true /* existing_package */);
}
// Note: not recursive.
diff --git a/bpkg/pkg-build-collect.hxx b/bpkg/pkg-build-collect.hxx
index f84c86f..5c8a635 100644
--- a/bpkg/pkg-build-collect.hxx
+++ b/bpkg/pkg-build-collect.hxx
@@ -221,9 +221,9 @@ namespace bpkg
package_version_key dependent;
// False for non-packages. Otherwise, indicates whether the constraint
- // comes from the selected dependent or not.
+ // comes from the existing rather than the being built dependent.
//
- bool selected_dependent;
+ bool existing_dependent;
// Create constraint for a package dependent.
//
@@ -231,17 +231,17 @@ namespace bpkg
database& db,
package_name nm,
version ver,
- bool s)
+ bool e)
: value (move (v)),
dependent (db, move (nm), move (ver)),
- selected_dependent (s) {}
+ existing_dependent (e) {}
// Create constraint for a non-package dependent.
//
constraint_type (version_constraint v, database& db, string nm)
: value (move (v)),
dependent (db, move (nm)),
- selected_dependent (false) {}
+ existing_dependent (false) {}
};
vector<constraint_type> constraints;
@@ -1683,7 +1683,7 @@ namespace bpkg
// constraints for the same package twice, printing "..." instead. Noop if
// there are no constraints for this package.
//
- // Optionally, only print constraints from the selected or being built
+ // Optionally, only print constraints from the existing or being built
// dependents (see build_package::constraint_type for details).
//
void
@@ -1691,14 +1691,14 @@ namespace bpkg
const build_package&,
string& indent,
std::set<package_key>& printed,
- optional<bool> selected_dependent = nullopt) const;
+ optional<bool> existing_dependent = nullopt) const;
void
print_constraints (diag_record&,
const package_key&,
string& indent,
std::set<package_key>& printed,
- optional<bool> selected_dependent = nullopt) const;
+ optional<bool> existing_dependent = nullopt) const;
// Verify that builds ordering is consistent across all the data
// structures and the ordering expectations are fulfilled (real build
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index fac79c2..6a9ad7d 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -2097,6 +2097,8 @@ namespace bpkg
// Specifically, try to find the best available package version considering
// all the imposed constraints as per unsatisfied_dependents description. If
// succeed, return the command line adjustment reflecting the replacement.
+ // If allow_downgrade is false, then don't return a downgrade adjustment for
+ // the package, unless it is being deorphaned.
//
// Notes:
//
@@ -2133,9 +2135,10 @@ namespace bpkg
// requested to be upgraded, patched, and/or deorphaned, then we
// shouldn't be silently up/down-grading it.
//
- optional<cmdline_adjustment>
+ static optional<cmdline_adjustment>
try_replace_dependency (const common_options& o,
const build_package& p,
+ bool allow_downgrade,
const build_packages& pkgs,
const vector<build_package>& hold_pkgs,
const dependency_packages& dep_pkgs,
@@ -2423,7 +2426,7 @@ namespace bpkg
{
r = false;
- if (c.dependent.version && !c.selected_dependent)
+ if (c.dependent.version && !c.existing_dependent)
{
package_key pk (c.dependent.db, c.dependent.name);
@@ -2473,18 +2476,21 @@ namespace bpkg
// then the selected one, then what we currently have is the best that
// we can get. Thus, we use the selected version as a replacement,
// unless it doesn't satisfy all the constraints or we are deorphaning.
+ // Bail out if we cannot stay with the selected version and downgrade is
+ // not allowed.
//
if (constraint == nullptr && sp != nullptr)
{
const version& sv (sp->version);
- if (av < sv && !sp->system () && !p.deorphan)
+ if (av < sv && !p.deorphan)
{
- // Only consider the selected package if its version is satisfactory
- // for its new dependents (note: must be checked first since has a
- // byproduct), differs from the version being replaced, and was
- // never used for the same command line (see above for details).
+ // Only consider to keep the selected non-system package if its
+ // version is satisfactory for its new dependents (note: must be
+ // checked first since has a byproduct), differs from the version
+ // being replaced, and was never used for the same command line (see
+ // above for details).
//
- if (satisfactory (sv) && sv != ver)
+ if (!sp->system () && satisfactory (sv) && sv != ver)
{
if (!cmdline_adjs.tried_earlier (db, nm, sv))
{
@@ -2493,9 +2499,17 @@ namespace bpkg
}
else
l5 ([&]{trace << "selected package replacement "
- << package_version_key (db, nm, sp->version)
- << " tried earlier for same command line, "
- << "skipping";});
+ << package_version_key (db, nm, sv) << " tried "
+ << "earlier for same command line, skipping";});
+ }
+
+ if (!allow_downgrade)
+ {
+ l5 ([&]{trace << "downgrade for "
+ << package_version_key (db, nm, sv) << " is not "
+ << "allowed, bailing out";});
+
+ break;
}
}
}
@@ -2693,11 +2707,12 @@ namespace bpkg
// of the specified dependency with a different available version,
// satisfactory for all its new and existing dependents (if any). Return the
// command line adjustment if such a replacement is deduced and nullopt
- // otherwise. It is assumed that the dependency replacement has been
- // (unsuccessfully) tried by using the try_replace_dependency() call and its
- // resulting list of the dependents, unsatisfied by some of the dependency
- // available versions, is also passed to the function call as the
- // unsatisfied_dpts argument.
+ // otherwise. If allow_downgrade is false, then don't return a downgrade
+ // adjustment, except for a being deorphaned dependent. It is assumed that
+ // the dependency replacement has been (unsuccessfully) tried by using the
+ // try_replace_dependency() call and its resulting list of the dependents,
+ // unsatisfied by some of the dependency available versions, is also passed
+ // to the function call as the unsatisfied_dpts argument.
//
// Specifically, try to replace the dependents in the following order by
// calling try_replace_dependency() for them:
@@ -2723,9 +2738,10 @@ namespace bpkg
// - Dependents of all the above types of dependents, discovered by
// recursively calling try_replace_dependent() for them.
//
- optional<cmdline_adjustment>
+ static optional<cmdline_adjustment>
try_replace_dependent (const common_options& o,
const build_package& p, // Dependency.
+ bool allow_downgrade,
const vector<unsatisfied_constraint>* ucs,
const build_packages& pkgs,
const cmdline_adjustments& cmdline_adjs,
@@ -2758,6 +2774,7 @@ namespace bpkg
//
auto try_replace = [&o,
&p,
+ allow_downgrade,
&pkgs,
&cmdline_adjs,
&hold_pkgs,
@@ -2793,6 +2810,7 @@ namespace bpkg
if (optional<cmdline_adjustment> a = try_replace_dependency (
o,
*d,
+ allow_downgrade,
pkgs,
hold_pkgs,
dep_pkgs,
@@ -2816,7 +2834,7 @@ namespace bpkg
{
const package_version_key& dvk (c.dependent);
- if (dvk.version && !c.selected_dependent && !satisfies (av, c.value))
+ if (dvk.version && !c.existing_dependent && !satisfies (av, c.value))
{
if (optional<cmdline_adjustment> a = try_replace (
package_key (dvk.db, dvk.name), "unsatisfied dependent"))
@@ -2851,7 +2869,7 @@ namespace bpkg
{
const package_version_key& dvk (c1.dependent);
- if (dvk.version && !c1.selected_dependent)
+ if (dvk.version && !c1.existing_dependent)
{
const version_constraint& v1 (c1.value);
@@ -2900,6 +2918,7 @@ namespace bpkg
if (optional<cmdline_adjustment> a = try_replace_dependent (
o,
*d,
+ allow_downgrade,
nullptr /* unsatisfied_constraints */,
pkgs,
cmdline_adjs,
@@ -4298,6 +4317,14 @@ namespace bpkg
optional<cmdline_adjustment> cmdline_refine_adjustment;
optional<size_t> cmdline_refine_index;
+ // If an --upgrade* or --patch* option is used on the command line, then
+ // we try to avoid any package downgrades initially. However, if the
+ // resolution fails in this mode, we fall back to allowing such
+ // downgrades. Without this logic, we may end up downgrading one package
+ // in order to upgrade another, which would be incorrect.
+ //
+ bool cmdline_allow_downgrade (true);
+
{
// Check if the package is a duplicate. Return true if it is but
// harmless.
@@ -4843,9 +4870,14 @@ namespace bpkg
// configuration.
//
if (pdb != nullptr)
+ {
+ if (u)
+ cmdline_allow_downgrade = false;
+
rec_pkgs.push_back (recursive_package {*pdb, pa.name,
r, u && *u,
d});
+ }
}
}
@@ -4901,6 +4933,10 @@ namespace bpkg
bool hold_version (pa.constraint.has_value ());
+ optional<bool> upgrade (pa.options.upgrade () || pa.options.patch ()
+ ? pa.options.upgrade ()
+ : optional<bool> ());
+
dep_pkgs.push_back (
dependency_package {pdb,
move (pa.name),
@@ -4909,9 +4945,7 @@ namespace bpkg
move (sp),
sys,
existing,
- (pa.options.upgrade () || pa.options.patch ()
- ? pa.options.upgrade ()
- : optional<bool> ()),
+ upgrade,
pa.options.deorphan (),
pa.options.keep_out (),
pa.options.disfigure (),
@@ -4921,6 +4955,10 @@ namespace bpkg
pa.options.checkout_purge (),
move (pa.config_vars),
pa.system_status});
+
+ if (upgrade)
+ cmdline_allow_downgrade = false;
+
continue;
}
@@ -5154,6 +5192,9 @@ namespace bpkg
pkg_confs.emplace_back (p.db, p.name ());
+ if (p.upgrade)
+ cmdline_allow_downgrade = false;
+
hold_pkgs.push_back (move (p));
}
@@ -5279,6 +5320,9 @@ namespace bpkg
l4 ([&]{trace << "stash held package "
<< p.available_name_version_db ();});
+ if (p.upgrade)
+ cmdline_allow_downgrade = false;
+
hold_pkgs.push_back (move (p));
// If there are also -i|-r, then we are also upgrading and/or
@@ -6761,7 +6805,7 @@ namespace bpkg
// so we need to bring it back.
//
// Make sure that selected packages are only owned by the session
- // and the build package list.
+ // and the build package (pkgs) and the dependency (dep_pkgs) lists.
//
build_pkgs.clear ();
@@ -6871,12 +6915,83 @@ namespace bpkg
//
if (!rescan)
{
+ // Return true if the specified package is loaded as a
+ // prerequisite of some dependent package, cached in the
+ // session, and contained in a different database. In this case
+ // unload this package from all such dependents.
+ //
+ auto check_unload_prereq = [&ses, &sp_session]
+ (const shared_ptr<selected_package>& sp,
+ const odb::database* db)
+ {
+ bool r (false);
+
+ for (const auto& dps: ses.map ())
+ {
+ // Skip dependents from the same database.
+ //
+ if (dps.first == db)
+ continue;
+
+ if (const selected_packages* sps = sp_session (dps.second))
+ {
+ for (const auto& p: *sps)
+ {
+ for (auto& pr: p.second->prerequisites)
+ {
+ const lazy_shared_ptr<selected_package>& lp (pr.first);
+
+ if (lp.loaded () && lp.get_eager () == sp)
+ {
+ lp.unload ();
+ r = true;
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+ };
+
for (const auto& dps: ses.map ())
{
if (const selected_packages* sps = sp_session (dps.second))
{
if (old_sp.find (dps.first) == old_sp.end ())
- assert (sps->empty ());
+ {
+ // Note that the valid reason for these packages to still
+ // be present in the session is that some of them may be
+ // referenced as prerequisites by some dependent packages
+ // from other databases and reference the remaining
+ // packages. For example:
+ //
+ // new session: A (X, 2) -> B (X, 2) -> C (Y, 2) -> D (Y, 2)
+ // old session: A
+ //
+ // Here C and D are the packages in question, package A is
+ // present in both sessions, X and Y are the databases,
+ // the numbers are the package reference counts, and the
+ // arrows denote the loaded prerequisite lazy pointers.
+ //
+ // Let's verify that's the only situation by unloading
+ // these packages from such dependent prerequisites and
+ // rescanning.
+ //
+ if (!sps->empty ())
+ {
+ for (const auto& p: *sps)
+ {
+ if (check_unload_prereq (p.second, dps.first))
+ rescan = true;
+ }
+
+ // If we didn't unload any of these packages, then we
+ // consider this a bug.
+ //
+ assert (rescan);
+ }
+ }
}
}
}
@@ -6951,6 +7066,7 @@ namespace bpkg
if ((a = try_replace_dependency (o,
*p,
+ cmdline_allow_downgrade,
pkgs,
hold_pkgs,
dep_pkgs,
@@ -6959,6 +7075,7 @@ namespace bpkg
"unsatisfactory dependency")) ||
(a = try_replace_dependent (o,
*p,
+ cmdline_allow_downgrade,
&ic.unsatisfied_constraints,
pkgs,
cmdline_adjs,
@@ -6984,7 +7101,27 @@ namespace bpkg
prepare_recollect ();
}
else
- unsatisfied_depts.diag (pkgs); // Issue the diagnostics and fail.
+ {
+ // If we fail to resolve the unsatisfied dependency
+ // constraints with the downgrades disallowed, then allow
+ // downgrades and retry from the very beginning.
+ //
+ if (!cmdline_allow_downgrade)
+ {
+ l5 ([&]{trace << "cannot resolve unsatisfied dependency "
+ << "constraints, now allowing downgrades";});
+
+ cmdline_allow_downgrade = true;
+
+ prepare_recollect ();
+ }
+ else
+ {
+ // Issue the diagnostics and fail.
+ //
+ unsatisfied_depts.diag (pkgs);
+ }
+ }
}
else // We are in the command line adjustments refinement cycle.
{
diff --git a/bpkg/rep-fetch.cxx b/bpkg/rep-fetch.cxx
index d02a064..fe25b86 100644
--- a/bpkg/rep-fetch.cxx
+++ b/bpkg/rep-fetch.cxx
@@ -1552,6 +1552,10 @@ namespace bpkg
}
}
+#ifndef NDEBUG
+ rep_remove_verify (db, t);
+#endif
+
// Make sure that the external packages are available from a single
// directory-based repository.
//
diff --git a/bpkg/rep-mask.cxx b/bpkg/rep-mask.cxx
index d7f9c6a..e670005 100644
--- a/bpkg/rep-mask.cxx
+++ b/bpkg/rep-mask.cxx
@@ -6,6 +6,7 @@
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
+#include <bpkg/rep-remove.hxx> // rep_remove_verify()
#include <bpkg/diagnostics.hxx>
#include <bpkg/package-query.hxx> // repo_configs
#include <bpkg/manifest-utility.hxx> // repository_name()
@@ -273,6 +274,16 @@ namespace bpkg
for (database& db: repo_configs)
{
+ // While at it, verify that the repository information has stayed
+ // consistent after the potential repository removals.
+ //
+ // Note that rep_remove() doesn't remove the available packages in the
+ // mask mode and thus we don't verify them.
+ //
+#ifndef NDEBUG
+ rep_remove_verify (db, t, false /* verify_packages */);
+#endif
+
// Add the repository location canonical name to the database-specific
// unmasked repositories or repository fragments lists. Note that
// repository location is used only for tracing.
diff --git a/bpkg/rep-remove.cxx b/bpkg/rep-remove.cxx
index ad10f56..22702a5 100644
--- a/bpkg/rep-remove.cxx
+++ b/bpkg/rep-remove.cxx
@@ -178,12 +178,6 @@ namespace bpkg
for (const repository::fragment_type& fr: r->fragments)
rep_remove_fragment (db, t, fr.fragment.load (), mask);
- // If there are no repositories stayed in the database then no repository
- // fragments should stay either.
- //
- if (db.query_value<repository_count> () == 0)
- assert (db.query_value<repository_fragment_count> () == 0);
-
// Unless in the mask repositories mode, cleanup the repository state if
// present and there are no more repositories referring this state.
//
@@ -272,20 +266,6 @@ namespace bpkg
//
db.erase (rf);
- // If there are no repository fragments stayed in the database then no
- // repositories nor packages should stay either.
- //
- // Note that a repository is removed prior to the removal of fragments it
- // contains (see rep_remove()). Also note that the packages contained in a
- // repository fragment are removed, if this is the only containing
- // fragment, prior to the fragment removal (see above).
- //
- if (db.query_value<repository_fragment_count> () == 0)
- {
- assert (db.query_value<repository_count> () == 0);
- assert (mask || db.query_value<available_package_count> () == 0);
- }
-
// Remove dangling complements and prerequisites.
//
// Prior to removing a prerequisite/complement we need to make sure it
@@ -521,6 +501,10 @@ namespace bpkg
text << "removed " << r.object_id ();
}
+#ifndef NDEBUG
+ rep_remove_verify (db, t);
+#endif
+
// If the --all option is specified then no user-added repositories should
// remain.
//
@@ -538,4 +522,35 @@ namespace bpkg
return 0;
}
+
+ void
+ rep_remove_verify (database& db, transaction&, bool verify_packages)
+ {
+ size_t rn (db.query_value<repository_count> ());
+ size_t fn (db.query_value<repository_fragment_count> ());
+
+ // If there are no repositories stayed in the database then no repository
+ // fragments should stay either.
+ //
+ assert (rn != 0 || fn == 0);
+
+ // If there are no repository fragments stayed in the database then no
+ // repositories with fragments nor packages should stay either.
+ //
+ // Note that repositories may not have any fragments if they are not
+ // fetched yet or due to the refname exclusions in the repository URL
+ // fragments (see repository-types(1) for details).
+ //
+ if (fn == 0)
+ {
+ // If there are some repositories have stayed, then make sure that none
+ // of them have any fragments.
+ //
+ assert (rn == 0 ||
+ db.query_value<fragment_repository_count> ("repository!=''") == 0);
+
+ if (verify_packages)
+ assert (db.query_value<available_package_count> () == 0);
+ }
+ }
}
diff --git a/bpkg/rep-remove.hxx b/bpkg/rep-remove.hxx
index 0fc82e8..94a38c6 100644
--- a/bpkg/rep-remove.hxx
+++ b/bpkg/rep-remove.hxx
@@ -57,6 +57,13 @@ namespace bpkg
rep_remove_package_locations (database&,
transaction&,
const string& fragment_name);
+
+ // Verify that after all the repository/fragment removals the repository
+ // information is consistent in the database (if no repositories stayed then
+ // no fragments stayed either, etc).
+ //
+ void
+ rep_remove_verify (database&, transaction&, bool verify_packages = true);
}
#endif // BPKG_REP_REMOVE_HXX
diff --git a/bpkg/version.hxx.in b/bpkg/version.hxx.in
index 22da973..603a5f7 100644
--- a/bpkg/version.hxx.in
+++ b/bpkg/version.hxx.in
@@ -43,14 +43,6 @@ $libbutl.check(LIBBUTL_VERSION, LIBBUTL_SNAPSHOT)$
$libbpkg.check(LIBBPKG_VERSION, LIBBPKG_SNAPSHOT)$
-#include <odb/version.hxx>
-
-$libodb.check(LIBODB_VERSION, LIBODB_SNAPSHOT)$
-
-#include <odb/sqlite/version.hxx>
-
-$libodb_sqlite.check(LIBODB_SQLITE_VERSION, LIBODB_SQLITE_SNAPSHOT)$
-
// User agent.
//
#if defined(_WIN32)
diff --git a/manifest b/manifest
index e826689..07e97ff 100644
--- a/manifest
+++ b/manifest
@@ -18,9 +18,6 @@ depends: * build2 >= 0.16.0-
depends: * bpkg >= 0.16.0-
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
-depends: libodb [2.5.0-b.26.1 2.5.0-b.27)
-depends: libodb-sqlite [2.5.0-b.26.1 2.5.0-b.27)
-depends: libsqlite3 ^3.21.0 ; ATTACH in transaction
depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
depends: libbpkg [0.17.0-a.0.1 0.17.0-a.1)
depends: build2 [0.17.0-a.0.1 0.17.0-a.1)
diff --git a/repositories.manifest b/repositories.manifest
index 29cb1cf..288344c 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -3,24 +3,12 @@ summary: build2 package dependency manager repository
:
role: prerequisite
-location: ../build2.git##HEAD
+location: ../build2.git#HEAD
:
role: prerequisite
-location: ../libbutl.git##HEAD
+location: ../libbutl.git#HEAD
:
role: prerequisite
-location: ../libbpkg.git##HEAD
-
-:
-role: prerequisite
-location: https://git.build2.org/packaging/sqlite/sqlite.git##HEAD
-
-:
-role: prerequisite
-location: https://git.codesynthesis.com/odb/libodb.git##HEAD
-
-:
-role: prerequisite
-location: https://git.codesynthesis.com/odb/libodb-sqlite.git##HEAD
+location: ../libbpkg.git#HEAD
diff --git a/tests/pkg-build.testscript b/tests/pkg-build.testscript
index 9d19846..9326541 100644
--- a/tests/pkg-build.testscript
+++ b/tests/pkg-build.testscript
@@ -2369,8 +2369,8 @@ test.arguments += --sys-no-query
trace: execute_plan: simulate: yes
%.*
trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
- trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
- trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
trace: pkg_build: refine package collection/plan execution from scratch
trace: execute_plan: simulate: yes
%.*
@@ -2584,8 +2584,8 @@ test.arguments += --sys-no-query
trace: execute_plan: simulate: yes
%.*
trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
- trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
- trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
trace: pkg_build: refine package collection/plan execution from scratch
trace: collect_build: add foo/1.0.0
trace: collect_build_prerequisites: skip configured foo/1.0.0
@@ -2663,8 +2663,8 @@ test.arguments += --sys-no-query
trace: execute_plan: simulate: yes
%.*
trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
- trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
- trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
trace: pkg_build: refine package collection/plan execution from scratch
trace: collect_build: add foo/1.0.0
trace: collect_build_prerequisites: skip configured foo/1.0.0
@@ -5217,7 +5217,7 @@ test.arguments += --sys-no-query
$pkg_drop libfoo libfox
}
- : unsatisfied-dependent
+ : unsatisfied-dependent-noop
:
: This test demonstrates a case when the dependency resolution
: machinery resolves unsatisfied dependency constraints by
@@ -5231,6 +5231,16 @@ test.arguments += --sys-no-query
{
$clone_cfg;
+ # Dependencies:
+ #
+ # libbox/2.0.0: depends: libbax == 1.0.0
+ # libbox/1.0.0: depends: libbax
+ #
+ # libbix/1.0.0: depends: libbax == 1.0.0
+ # libbix/2.0.0: depends: libbax == 2.0.0
+ #
+ # libbux: depends: libbix
+ #
$* libbox ?libbix/1.0.0 libbux 2>!;
$pkg_status -ar >>EOO;
@@ -5272,8 +5282,10 @@ test.arguments += --sys-no-query
trace: execute_plan: simulate: yes
%.*
trace: pkg_build: try to replace unsatisfactory dependency libbax/2.0.0 with some other version
- trace: try_replace_dependent: try to replace unsatisfiable dependent libbix/2.0.0 of dependency libbax/2.0.0 with some other version
- trace: try_replace_dependency: replace unsatisfiable dependent version libbix/2.0.0 with 1.0.0 by adding package spec '?libbix == 1.0.0' to command line
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: downgrade for libbox/2.0.0 is not allowed, bailing out
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbix/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbix/2.0.0 with 1.0.0 by adding package spec '?libbix == 1.0.0' to command line
trace: pkg_build: refine package collection/plan execution from scratch
trace: collect_build: add libbox/2.0.0
trace: collect_build: add libbux/1.0.0
@@ -5299,6 +5311,144 @@ test.arguments += --sys-no-query
$pkg_drop libbox libbux
}
+ : unsatisfied-dependent
+ :
+ : Similar to the above, but this time noop is not a valid constraints
+ : resolution due to ?libbix/2.0.0. Thus, the dependency resolution
+ : machinery ends up with the downgrade of libbox/2.0.0 to 1.0.0.
+ :
+ {
+ $clone_cfg;
+
+ $* libbox ?libbix/1.0.0 libbux 2>!;
+
+ $pkg_status -ar >>EOO;
+ libbax configured 1.0.0 available 2.0.0
+ !libbox configured 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ !libbux configured 1.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ EOO
+
+ $* { libbox libbux ?libbax } +{ --upgrade --recursive } ?libbix/2.0.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/2.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbix/1.0.0: update to libbix/2.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libbux/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbix/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/2.0.0 of dependent libbix/2.0.0
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbax/2.0.0 due to constraint (libbax == 1.0.0)
+ trace: collect_build_prerequisites: skip being built existing dependent libbix of dependency libbax
+ trace: collect_build_prerequisites: begin libbax/2.0.0
+ trace: collect_build_prerequisites: end libbax/2.0.0
+ trace: collect_build_prerequisites: end libbix/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbax/2.0.0 (== 1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: downgrade for libbox/2.0.0 is not allowed, bailing out
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbix/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version libbix/2.0.0 is denied since it is specified on command line as '?libbix == 2.0.0'
+ trace: pkg_build: cannot resolve unsatisfied dependency constraints, now allowing downgrades
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/2.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbix/1.0.0: update to libbix/2.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libbux/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbix/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/2.0.0 of dependent libbix/2.0.0
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbax/2.0.0 due to constraint (libbax == 1.0.0)
+ trace: collect_build_prerequisites: skip being built existing dependent libbix of dependency libbax
+ trace: collect_build_prerequisites: begin libbax/2.0.0
+ trace: collect_build_prerequisites: end libbax/2.0.0
+ trace: collect_build_prerequisites: end libbix/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbax/2.0.0 (== 1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbox/2.0.0 with 1.0.0 by adding constraint 'libbox' -> 'libbox == 1.0.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbix/1.0.0: update to libbix/2.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libbux/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbix/2.0.0
+ trace: collect_build: pick libbax/2.0.0 over libbax/1.0.0
+ trace: collect_build: libbax/1.0.0 package version needs to be replaced with libbax/2.0.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build: apply version replacement for libbax/1.0.0
+ trace: collect_build: replacement: libbax/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/2.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: skip being built existing dependent libbox of dependency libbax
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbix of dependency libbax/2.0.0 due to constraint (libbax == 1.0.0)
+ trace: collect_build_prerequisites: begin libbax/2.0.0
+ trace: collect_build_prerequisites: end libbax/2.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reeval libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libbux/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbix/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/2.0.0 of dependent libbix/2.0.0
+ trace: collect_build_prerequisites: end libbix/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ upgrade libbax/2.0.0
+ downgrade libbox/1.0.0
+ upgrade libbix/2.0.0
+ reconfigure libbux/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbax configured 2.0.0
+ !libbox configured 1.0.0 available 2.0.0
+ libbax configured 2.0.0
+ libbix configured !2.0.0
+ libbax configured 2.0.0
+ !libbux configured 1.0.0
+ libbix configured !2.0.0
+ libbax configured 2.0.0
+ EOO
+
+ $pkg_drop libbox libbux
+ }
+
: indirect
:
: Test replacement of indirect dependents of an unsatisfactory