aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaren Arutyunov <karen@codesynthesis.com>2023-05-17 21:16:13 +0300
committerKaren Arutyunov <karen@codesynthesis.com>2023-05-23 12:25:56 +0300
commitf83ae9ce7a2d7f3158ca043d947b230f27dbe7bd (patch)
treef81571262df62bd33c8d9cdc4720371bf9d2aa5d
parent8bb9424799cbcae6f0455e96dde9e0ecfc6a4411 (diff)
Postpone failure due to unsatisfied dependency constraint for existing dependent
-rw-r--r--bpkg/pkg-build-collect.cxx255
-rw-r--r--bpkg/pkg-build-collect.hxx65
-rw-r--r--bpkg/pkg-build.cxx53
-rw-r--r--bpkg/pkg-configure.cxx31
-rw-r--r--bpkg/pkg-configure.hxx26
-rw-r--r--tests/pkg-build.testscript79
6 files changed, 394 insertions, 115 deletions
diff --git a/bpkg/pkg-build-collect.cxx b/bpkg/pkg-build-collect.cxx
index 8442b15..2bcd515 100644
--- a/bpkg/pkg-build-collect.cxx
+++ b/bpkg/pkg-build-collect.cxx
@@ -400,6 +400,84 @@ namespace bpkg
}
}
+ // unsatisfied_dependents
+ //
+ unsatisfied_dependent* unsatisfied_dependents::
+ find_dependent (const package_key& dk)
+ {
+ auto i (find_if (begin (), end (),
+ [&dk] (const unsatisfied_dependent& v)
+ {
+ return dk == v.dependent;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+
+ void unsatisfied_dependents::
+ diag ()
+ {
+ assert (!empty ());
+
+ const unsatisfied_dependent& dpt (front ());
+
+ assert (!dpt.dependencies.empty ());
+
+ const package_key& dk (dpt.dependent);
+ build_package& p (*dpt.dependencies.front ().first);
+ const version_constraint& c (dpt.dependencies.front ().second);
+
+ database& pdb (p.db);
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ const package_name& n (sp->name);
+ const version& av (p.available_version ());
+
+ // See if we are up/downgrading this package. In particular, the available
+ // package could be NULL meaning we are just adjusting.
+ //
+ int ud (p.available != nullptr
+ ? sp->version.compare (p.available_version ())
+ : 0);
+
+ // Otherwise, the dependent must be satisfied with the already configured
+ // dependency.
+ //
+ assert (ud != 0);
+
+ diag_record dr (fail);
+
+ dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
+ << "package " << *sp << pdb << " to ";
+
+ // Print both (old and new) package names in full if the system
+ // attribution changes.
+ //
+ if (p.system != sp->system ())
+ dr << p.available_name_version ();
+ else
+ dr << av; // Can't be the wildcard otherwise would satisfy.
+
+ dr << info << "because package " << dk << " depends on (" << n << " "
+ << c << ")";
+
+ string rb;
+ if (!p.user_selection ())
+ {
+ for (const package_key& pk: p.required_by)
+ rb += (rb.empty () ? " " : ", ") + pk.string ();
+ }
+
+ if (!rb.empty ())
+ dr << info << "package " << p.available_name_version ()
+ << " required by" << rb;
+
+ dr << info << "explicitly request up/downgrade of package "
+ << dk.name;
+
+ dr << info << "or explicitly specify package " << n
+ << " version to manually satisfy these constraints" << endf;
+ }
+
// postponed_configuration
//
postponed_configuration::dependency*
@@ -1131,40 +1209,39 @@ namespace bpkg
// Add the version replacement entry, call the verification function if
// specified, and throw replace_version.
//
- auto replace_ver = [&pk, &vpb, &vi, &replaced_vers]
- (const build_package& p)
- {
- replaced_version rv (p.available, p.repository_fragment, p.system);
+ auto replace_ver = [&pk, &vpb, &vi, &replaced_vers] (const build_package& p)
+ {
+ replaced_version rv (p.available, p.repository_fragment, p.system);
- if (vi != replaced_vers.end ())
- vi->second = move (rv);
- else
- replaced_vers.emplace (move (pk), move (rv));
+ if (vi != replaced_vers.end ())
+ vi->second = move (rv);
+ else
+ replaced_vers.emplace (move (pk), move (rv));
- if (vpb)
- vpb (p, true /* scratch */);
+ if (vpb)
+ vpb (p, true /* scratch */);
- throw replace_version ();
- };
+ throw replace_version ();
+ };
auto i (map_.find (pk));
- // If we already have an entry for this package name, then we have to
- // pick one over the other.
+ // If we already have an entry for this package name, then we have to pick
+ // one over the other.
//
// If the existing entry is a drop, then we override it. If the existing
// entry is a pre-entered or is non-build one, then we merge it into the
- // new build entry. Otherwise (both are builds), we pick one and merge
- // the other into it.
+ // new build entry. Otherwise (both are builds), we pick one and merge the
+ // other into it.
//
if (i != map_.end ())
{
build_package& bp (i->second.package);
// Note that we used to think that the scenario when the build could
- // replace drop could never happen since we would start collecting
- // from scratch. This has changed when we introduced replaced_versions
- // for collecting drops.
+ // replace drop could never happen since we would start collecting from
+ // scratch. This has changed when we introduced replaced_versions for
+ // collecting drops.
//
if (bp.action && *bp.action == build_package::drop) // Drop.
{
@@ -1177,16 +1254,15 @@ namespace bpkg
}
else // Build.
{
- // At the end we want p1 to point to the object that we keep
- // and p2 to the object that we merge from.
+ // At the end we want p1 to point to the object that we keep and p2 to
+ // the object that we merge from.
//
build_package* p1 (&bp);
build_package* p2 (&pkg);
// Pick with the following preference order: user selection over
- // implicit one, source package over a system one, newer version
- // over an older one. So get the preferred into p1 and the other
- // into p2.
+ // implicit one, source package over a system one, newer version over
+ // an older one. So get the preferred into p1 and the other into p2.
//
{
int us (p1->user_selection () - p2->user_selection ());
@@ -1254,8 +1330,8 @@ namespace bpkg
}
// See if we are replacing the object. If not, then we don't need to
- // collect its prerequisites since that should have already been
- // done. Remember, p1 points to the object we want to keep.
+ // collect its prerequisites since that should have already been done.
+ // Remember, p1 points to the object we want to keep.
//
bool replace (p1 != &i->second.package);
@@ -3018,7 +3094,7 @@ namespace bpkg
das.buildtime,
prereqs,
nullptr /* diag_record */,
- true /* dru_run */));
+ true /* dry_run */));
if (r.builds && r.reused)
{
@@ -3046,7 +3122,7 @@ namespace bpkg
das.buildtime,
nullptr /* prereqs */,
nullptr /* diag_record */,
- true /* dru_run */));
+ true /* dry_run */));
if (r.builds && r.reused)
{
@@ -5690,7 +5766,8 @@ namespace bpkg
}
void build_packages::
- collect_order_dependents (const repointed_dependents& rpt_depts)
+ collect_order_dependents (const repointed_dependents& rpt_depts,
+ unsatisfied_dependents& unsatisfied_depts)
{
// For each package on the list we want to insert all its dependents
// before it so that they get configured after the package on which they
@@ -5710,13 +5787,14 @@ namespace bpkg
// Dropped package may have no dependents.
//
if (*p.action != build_package::drop && p.reconfigure ())
- collect_order_dependents (i, rpt_depts);
+ collect_order_dependents (i, rpt_depts, unsatisfied_depts);
}
}
void build_packages::
collect_order_dependents (iterator pos,
- const repointed_dependents& rpt_depts)
+ const repointed_dependents& rpt_depts,
+ unsatisfied_dependents& unsatisfied_depts)
{
tracer trace ("collect_order_dependents");
@@ -5788,47 +5866,45 @@ namespace bpkg
if (check)
{
const version& av (p.available_version ());
- const version_constraint& c (*pd.constraint);
+ version_constraint& c (*pd.constraint);
+ // If the new dependency version doesn't satisfy the existing
+ // dependent, then postpone the failure in the hope that this
+ // problem will be resolved naturally (the dependent will also be
+ // up/downgraded, etc; see unsatisfied_dependents for details).
+ //
if (!satisfies (av, c))
{
- diag_record dr (fail);
+ package_key dk (ddb, dn);
+ unsatisfied_dependent* ud (unsatisfied_depts.find_dependent (dk));
- dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
- << "package " << *sp << pdb << " to ";
+ if (ud != nullptr)
+ {
+ vector<pair<build_package*, version_constraint>>& deps (
+ ud->dependencies);
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (p.system != sp->system ())
- dr << p.available_name_version ();
- else
- dr << av; // Can't be the wildcard otherwise would satisfy.
+ auto i (find_if (deps.begin (), deps.end (),
+ [&p] (const auto& v) {return v.first == &p;}));
- dr << info << "because package " << dn << ddb << " depends on ("
- << n << " " << c << ")";
+ // It doesn't seems that we can be adding the same
+ // unsatisfactory dependency twice.
+ //
+ assert (i == deps.end ());
- string rb;
- if (!p.user_selection ())
+ deps.push_back (make_pair (&p, move (c)));
+ }
+ else
{
- for (const package_key& pk: p.required_by)
- rb += (rb.empty () ? " " : ", ") + pk.string ();
+ unsatisfied_depts.push_back (
+ unsatisfied_dependent {move (dk), {make_pair (&p, move (c))}});
}
-
- if (!rb.empty ())
- dr << info << "package " << p.available_name_version ()
- << " required by" << rb;
-
- dr << info << "explicitly request up/downgrade of package "
- << dn;
-
- dr << info << "or explicitly specify package " << n
- << " version to manually satisfy these constraints";
}
-
- // Add this contraint to the list for completeness.
- //
- p.constraints.emplace_back (ddb, dn.string (), c);
+ else
+ {
+ // Add this contraint to the list for completeness.
+ //
+ p.constraints.emplace_back (ddb, dn.string (), move (c));
+ }
}
auto adjustment = [&dn, &ddb, &n, &pdb] () -> build_package
@@ -5927,7 +6003,9 @@ namespace bpkg
// configured packages due to a dependency cycle (see order() for
// details).
//
- collect_order_dependents (i->second.position, rpt_depts);
+ collect_order_dependents (i->second.position,
+ rpt_depts,
+ unsatisfied_depts);
}
}
}
@@ -5987,6 +6065,14 @@ namespace bpkg
lazy_shared_ptr<selected_package> sp (db, name);
+ // Lazily search for the dependency build and detect if it is being
+ // up/downgraded. Note that we will only do that if the dependency has an
+ // existing dependent which imposes a version constraint on this
+ // dependency.
+ //
+ const build_package* dep (nullptr);
+ int ud (0);
+
for (database& ddb: db.dependent_configs ())
{
for (auto& pd: query_dependents (ddb, name, db))
@@ -6047,6 +6133,49 @@ namespace bpkg
continue;
}
+ // Ignore dependent if this dependency up/downgrade won't satisfy
+ // the dependent's constraint. The thinking here is that we will
+ // either fail for this reason later or the problem will be resolved
+ // naturally due to the execution plan refinement (see
+ // unsatisfied_dependents for details).
+ //
+ if (pd.constraint)
+ {
+ // Search for the dependency build and detect if it is being
+ // up/downgraded, if not done yet. In particular, the available
+ // package could be NULL meaning we are just adjusting.
+ //
+ if (dep == nullptr)
+ {
+ dep = entered_build (db, name);
+
+ assert (dep != nullptr); // Expected to be being built.
+
+ if (dep->available != nullptr)
+ {
+ const shared_ptr<selected_package>& sp (dep->selected);
+
+ // Expected to be selected since it has an existing dependent.
+ //
+ assert (sp != nullptr);
+
+ ud = sp->version.compare (dep->available_version ());
+ }
+ }
+
+ if (ud != 0 &&
+ !satisfies (dep->available_version (), *pd.constraint))
+ {
+ l5 ([&]{trace << "skip unsatisfied existing dependent " << pk
+ << " of dependency "
+ << dep->available_name_version_db () << " due to "
+ << "constraint (" << name << ' ' << *pd.constraint
+ << ')';});
+
+ continue;
+ }
+ }
+
r.push_back (existing_dependent {ddb, move (dsp), pos});
}
}
diff --git a/bpkg/pkg-build-collect.hxx b/bpkg/pkg-build-collect.hxx
index 6c79abe..30f993b 100644
--- a/bpkg/pkg-build-collect.hxx
+++ b/bpkg/pkg-build-collect.hxx
@@ -733,6 +733,50 @@ namespace bpkg
cancel_bogus (tracer&, bool scratch);
};
+ // Existing dependents with their up/downgraded dependencies which don't
+ // satisfy the version constraints.
+ //
+ // Note that after collecting/ordering of all the explicitly specified
+ // packages and their dependencies for the build we also collect/order their
+ // existing dependents for reconfiguration, recursively. It may happen that
+ // some of the up/downgraded dependencies don't satisfy the version
+ // constraints which some of the existing dependents impose on them. Rather
+ // than fail immediately in such a case, we postpone the failure in the hope
+ // that these problems will be resolved naturally as a result of the
+ // execution plan refinement (dependents will also be up/downgraded or
+ // dropped, dependencies will be up/downgraded to a different versions,
+ // etc).
+ //
+ // Specifically, we cache such unsatisfied constraints, pretend that the
+ // dependents don't impose them and proceed with the remaining
+ // collecting/ordering, simulating the plan execution, and evaluating the
+ // dependency versions. After that we check if the execution plan is
+ // finalized or a further refinement is required. In the former case we
+ // report the first encountered unsatisfied dependency constraint and
+ // fail. Otherwise, we drop the cache and proceed with the next iteration of
+ // the execution plan refinement which may resolve these problem naturally.
+ //
+ struct unsatisfied_dependent
+ {
+ package_key dependent;
+ vector<pair<build_package*, version_constraint>> dependencies;
+ };
+
+ class unsatisfied_dependents: public vector<unsatisfied_dependent>
+ {
+ public:
+ // Try to find the dependent entry and return NULL if not found.
+ //
+ unsatisfied_dependent*
+ find_dependent (const package_key&);
+
+ // Issue the diagnostics for the first unsatisfied dependency constraint
+ // and throw failed.
+ //
+ [[noreturn]] void
+ diag ();
+ };
+
// List of dependency groups whose recursive processing should be postponed
// due to dependents with configuration clauses, together with these
// dependents (we will call them package clusters).
@@ -1376,9 +1420,12 @@ namespace bpkg
// If a configured package is being up/down-graded then that means all its
// dependents could be affected and we have to reconfigure them. This
- // function examines every package that is already on the list and collects
- // and orders all its dependents. We also need to make sure the dependents
- // are ok with the up/downgrade.
+ // function examines every package that is already on the list and
+ // collects and orders all its dependents. We also need to make sure the
+ // dependents are ok with the up/downgrade. If some dependency constraints
+ // are not satisfied, then cache them and proceed further as if no
+ // problematic constraints are imposed (see unsatisfied_dependents for
+ // details).
//
// Should we reconfigure just the direct depends or also include indirect,
// recursively? Consider this plauisible scenario as an example: We are
@@ -1389,10 +1436,13 @@ namespace bpkg
// package's indirect ones) to also notice this.
//
void
- collect_order_dependents (const repointed_dependents&);
+ collect_order_dependents (const repointed_dependents&,
+ unsatisfied_dependents&);
void
- collect_order_dependents (iterator, const repointed_dependents&);
+ collect_order_dependents (iterator,
+ const repointed_dependents&,
+ unsatisfied_dependents&);
void
clear ();
@@ -1411,7 +1461,10 @@ namespace bpkg
// Return the list of existing dependents that has a configuration clause
// for the specified dependency. Skip dependents which are being built and
// require recursive recollection or dropped (present in the map) or
- // expected to be built or dropped (present in rpt_depts or replaced_vers).
+ // expected to be built or dropped (present in rpt_depts or
+ // replaced_vers). Also skip dependents which impose the version
+ // constraint on this dependency and the dependency doesn't satisfy this
+ // constraint.
//
// Optionally, specify the function which can verify the dependent build
// and decide whether to override the default behavior and still add the
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 4f5b423..e887018 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -897,12 +897,13 @@ namespace bpkg
return r && r->available == nullptr ? nullopt : r;
}
- // Return false if the plan execution was noop.
+ // Return false if the plan execution was noop. If unsatisfied dependents
+ // are specified then we are in the simulation mode.
//
static bool
execute_plan (const pkg_build_options&,
build_package_list&,
- bool simulate,
+ unsatisfied_dependents* simulate,
const function<find_database_function>&);
using pkg_options = pkg_build_pkg_options;
@@ -3374,6 +3375,7 @@ namespace bpkg
postponed_packages postponed_alts;
postponed_configurations postponed_cfgs;
strings postponed_cfgs_history;
+ unsatisfied_dependents unsatisfied_depts;
try
{
@@ -3751,7 +3753,7 @@ namespace bpkg
// reconfigure because of the up/down-grades of packages that are now
// on the list.
//
- pkgs.collect_order_dependents (rpt_depts);
+ pkgs.collect_order_dependents (rpt_depts, unsatisfied_depts);
// And, finally, make sure all the packages that we need to unhold
// are on the list.
@@ -3832,7 +3834,7 @@ namespace bpkg
changed = execute_plan (o,
bl,
- true /* simulate */,
+ &unsatisfied_depts,
find_prereq_database);
if (changed)
@@ -4385,6 +4387,13 @@ namespace bpkg
}
}
+ // Issue diagnostics and fail if the execution plan is finalized and
+ // any existing dependents are not satisfied with their
+ // dependencies.
+ //
+ if (!refine && !unsatisfied_depts.empty ())
+ unsatisfied_depts.diag ();
+
// Re-link the private configurations that were created during the
// collection of the package builds with their parent
// configurations. Note that these links were lost on the previous
@@ -4794,7 +4803,7 @@ namespace bpkg
//
bool noop (!execute_plan (o,
pkgs,
- false /* simulate */,
+ nullptr /* simulate */,
find_prereq_database));
if (o.configure_only ())
@@ -4869,13 +4878,16 @@ namespace bpkg
static bool
execute_plan (const pkg_build_options& o,
build_package_list& build_pkgs,
- bool simulate,
+ unsatisfied_dependents* simulate,
const function<find_database_function>& fdb)
{
tracer trace ("execute_plan");
l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");});
+ // If unsatisfied dependents are specified then we are in the simulation
+ // mode and thus simulate can be used as bool.
+
bool r (false);
uint16_t verb (!simulate ? bpkg::verb : 0);
@@ -5524,7 +5536,7 @@ namespace bpkg
configured_state);
}
}
- else // Dependent.
+ else // Existing dependent.
{
// This is an adjustment of a dependent which cannot be system
// (otherwise it wouldn't be a dependent) and cannot become system
@@ -5556,6 +5568,28 @@ namespace bpkg
const dependencies& deps (p.skeleton->available->dependencies);
+ // In the simulation mode unconstrain all the unsatisfactory
+ // dependencies, if any, while configuring the dependent.
+ //
+ vector<package_key> unconstrain_deps;
+
+ if (simulate)
+ {
+ unsatisfied_dependent* ud (
+ simulate->find_dependent (package_key (pdb, p.name ())));
+
+ if (ud != nullptr)
+ {
+ unconstrain_deps.reserve (ud->dependencies.size ());
+
+ for (const auto& d: ud->dependencies)
+ {
+ const build_package& p (*d.first);
+ unconstrain_deps.emplace_back (p.db, p.name ());
+ }
+ }
+ }
+
// @@ Note that on reconfiguration the dependent looses the
// potential configuration variables specified by the user on
// some previous build, which can be quite surprising. Should we
@@ -5573,7 +5607,10 @@ namespace bpkg
prereqs (),
simulate,
fdb,
- configured_state);
+ configured_state,
+ (!unconstrain_deps.empty ()
+ ? &unconstrain_deps
+ : nullptr));
}
t.commit ();
diff --git a/bpkg/pkg-configure.cxx b/bpkg/pkg-configure.cxx
index 45ab10b..2801539 100644
--- a/bpkg/pkg-configure.cxx
+++ b/bpkg/pkg-configure.cxx
@@ -30,6 +30,8 @@ using namespace butl;
namespace bpkg
{
+ static optional<version_constraint> absent_constraint;
+
configure_prerequisites_result
pkg_configure_prerequisites (const common_options& o,
database& db,
@@ -40,10 +42,15 @@ namespace bpkg
const vector<package_name>* prev_prereqs,
bool simulate,
const function<find_database_function>& fdb,
- const function<find_package_state_function>& fps)
+ const function<find_package_state_function>& fps,
+ const vector<package_key>* unconstrain_deps)
{
tracer trace ("pkg_configure_prerequisites");
+ // Unconstraining dependencies are only allowed in the simulation mode.
+ //
+ assert (unconstrain_deps == nullptr || simulate);
+
tracer_guard tg (db, trace);
package_prerequisites prereqs;
@@ -161,12 +168,28 @@ namespace bpkg
if (dp == nullptr)
break;
+ database& pdb (*spd.second);
+
optional<pair<package_state, package_substate>> dps;
if (fps != nullptr)
dps = fps (dp);
+ const optional<version_constraint>* dc (&d.constraint);
+
+ // Unconstrain this dependency, if requested.
+ //
+ if (unconstrain_deps != nullptr)
+ {
+ const vector<package_key>& uds (*unconstrain_deps);
+ if (find (uds.begin (), uds.end (), package_key (pdb, n)) !=
+ uds.end ())
+ {
+ dc = &absent_constraint;
+ }
+ }
+
if ((dps ? dps->first : dp->state) != package_state::configured ||
- !satisfies (dp->version, d.constraint) ||
+ !satisfies (dp->version, *dc) ||
(pps != nullptr &&
find (pps->begin (), pps->end (), dp->name) == pps->end ()))
break;
@@ -177,8 +200,8 @@ namespace bpkg
bool conf (da.prefer || da.require);
prerequisites.emplace_back (
- lazy_shared_ptr<selected_package> (*spd.second, dp),
- prerequisite_info {d.constraint,
+ lazy_shared_ptr<selected_package> (pdb, dp),
+ prerequisite_info {*dc,
make_pair (conf ? di + 1 : 0,
conf ? dai + 1 : 0)});
}
diff --git a/bpkg/pkg-configure.hxx b/bpkg/pkg-configure.hxx
index c4c2758..099e1e8 100644
--- a/bpkg/pkg-configure.hxx
+++ b/bpkg/pkg-configure.hxx
@@ -68,6 +68,9 @@ namespace bpkg
// dependency decisions" mode). Failed that, select an alternative as if no
// prerequisites are specified (the "make dependency decisions" mode).
//
+ // Optionally, remove constraints from the specified dependencies
+ // (unconstrain_deps). Only allowed in the simulation mode.
+ //
struct configure_prerequisites_result
{
package_prerequisites prerequisites;
@@ -92,17 +95,18 @@ namespace bpkg
// Note: loads selected packages.
//
configure_prerequisites_result
- pkg_configure_prerequisites (const common_options&,
- database&,
- transaction&,
- const dependencies&,
- const vector<size_t>* alternatives,
- package_skeleton&&,
- const vector<package_name>* prev_prerequisites,
- bool simulate,
- const function<find_database_function>&,
- const function<find_package_state_function>&);
-
+ pkg_configure_prerequisites (
+ const common_options&,
+ database&,
+ transaction&,
+ const dependencies&,
+ const vector<size_t>* alternatives,
+ package_skeleton&&,
+ const vector<package_name>* prev_prerequisites,
+ bool simulate,
+ const function<find_database_function>&,
+ const function<find_package_state_function>&,
+ const vector<package_key>* unconstrain_deps = nullptr);
// Configure the package, update its state, and commit the transaction.
//
diff --git a/tests/pkg-build.testscript b/tests/pkg-build.testscript
index d8ab81c..fc89df5 100644
--- a/tests/pkg-build.testscript
+++ b/tests/pkg-build.testscript
@@ -260,7 +260,7 @@
# | |-- libbar-0.1.0.tar.gz -> libbaz
# | `-- repositories.manifest
# |
-# |-- t12b -> t12b (prerequisite repository)
+# |-- t12b -> t12a (prerequisite repository)
# | |-- libbaz-0.1.0.tar.gz
# | |-- libbar-1.0.0.tar.gz -> libbaz == 0.1.0
# | |-- foo-0.1.0.tar.gz
@@ -1156,8 +1156,9 @@ test.arguments += --sys-no-query
:
{
$clone_cfg;
- $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
- $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+ $pkg_build libfoo/1.1.0 libbar/1.1.0 -d cfg 2>!;
$* libfoo-1.2.0.tar.gz 2>>EOE != 0;
error: unknown package libfoo-1.2.0.tar.gz
@@ -1186,10 +1187,11 @@ test.arguments += --sys-no-query
{
$clone_cfg;
$cfg_create -d cfg2 &cfg2/***;
- $rep_add -d cfg2 $rep/t4c && $rep_fetch -d cfg2;
+ $rep_add -d cfg2 $rep/t4a $rep/t4b $rep/t4c && $rep_fetch -d cfg2;
$cfg_link -d cfg2 cfg;
- $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
- $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+ $pkg_build libfoo/1.1.0 libbar/1.1.0 -d cfg 2>!;
$* libfoo-1.2.0.tar.gz 2>>EOE != 0;
error: unknown package libfoo-1.2.0.tar.gz
@@ -1214,6 +1216,49 @@ test.arguments += --sys-no-query
$pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
$pkg_purge libfoo 2>'purged libfoo/1.1.0'
}
+
+ : able-downgrade
+ :
+ : Similar to the above unable-downgrade, but this time libfoo and libbar
+ : are configured manually and so are not held. Thus, libfoo downgrades
+ : successfully since libbar is just dropped having no dependents.
+ :
+ {
+ $clone_cfg;
+
+ $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
+ $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $* libfoo/1.0.0 >>EOO;
+ downgrade libfoo/1.0.0
+ drop libbar/1.1.0 (unused)
+ EOO
+
+ $pkg_drop libbar libfoo
+ }
+
+ : able-downgrade-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t4c && $rep_fetch -d cfg2;
+ $cfg_link -d cfg2 cfg;
+
+ $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
+ $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* libfoo/1.0.0 +{ --config-id 1 } >>~%EOO%;
+ %downgrade libfoo/1.0.0 \[cfg.\]%
+ %drop libbar/1.1.0 \[cfg.\] \(unused\)%
+ EOO
+
+ $pkg_drop libbar libfoo
+ }
}
: dependent-reconfiguration
@@ -4994,21 +5039,6 @@ test.arguments += --sys-no-query
: dropped, which will happen some later execution plan refinement
: iteration.
:
- : @@ This scenario is not supported yet and fails with:
- :
- : error: unable to upgrade package libbaz/0.1.0 to 1.0.0
- : info: because package libbar depends on (libbaz == 0.1.0)
- : info: package libbaz/1.0.0 required by baz
- : info: explicitly request up/downgrade of package libbar
- : info: or explicitly specify package libbaz version to manually satisfy these constraints
- :
- : We could probably fix this postponing the constraints check in
- : collect_order_dependents() until the final execution plan is produced
- : (after all that refinement iterations). We could have an additional
- : iteration after all the refinements which would enable the constraint
- : check in collect_order_dependents().
- :
- if false
{
$clone_cfg;
@@ -5020,12 +5050,15 @@ test.arguments += --sys-no-query
libbaz configured 0.1.0 available [1.0.0]
EOO
- $* baz foo/0.1.0 2>|;
+ $* baz foo/0.1.0 2>!;
$pkg_status -r >>EOO;
+ !foo configured !0.1.0 available 1.0.0
+ !baz configured 1.0.0
+ libbaz configured 1.0.0
EOO
- $pkg_drop foo
+ $pkg_drop baz foo
}
}