aboutsummaryrefslogtreecommitdiff
path: root/bpkg
diff options
context:
space:
mode:
authorKaren Arutyunov <karen@codesynthesis.com>2018-03-21 21:40:28 +0300
committerKaren Arutyunov <karen@codesynthesis.com>2018-04-19 19:39:55 +0300
commit15dff3c592385466406732cd6ced809dc28cf2e2 (patch)
tree1da9f0738293eb7906d92ab010a79c689087b655 /bpkg
parent46842f6cf74d085ced382dd0c187f6a7a578913c (diff)
Implement build plan simulation
Diffstat (limited to 'bpkg')
-rw-r--r--bpkg/auth.cxx2
-rw-r--r--bpkg/cfg-create.cxx2
-rw-r--r--bpkg/database.cxx3
-rw-r--r--bpkg/database.hxx54
-rw-r--r--bpkg/fetch-pkg.cxx46
-rw-r--r--bpkg/fetch.hxx4
-rw-r--r--bpkg/forward.hxx2
-rw-r--r--bpkg/package.cxx29
-rw-r--r--bpkg/package.hxx33
-rw-r--r--bpkg/pkg-build.cxx733
-rw-r--r--bpkg/pkg-checkout.cxx100
-rw-r--r--bpkg/pkg-checkout.hxx3
-rw-r--r--bpkg/pkg-command.cxx4
-rw-r--r--bpkg/pkg-configure.cxx97
-rw-r--r--bpkg/pkg-configure.hxx14
-rw-r--r--bpkg/pkg-disfigure.cxx134
-rw-r--r--bpkg/pkg-disfigure.hxx3
-rw-r--r--bpkg/pkg-drop.cxx15
-rw-r--r--bpkg/pkg-fetch.cxx69
-rw-r--r--bpkg/pkg-fetch.hxx6
-rw-r--r--bpkg/pkg-purge.cxx32
-rw-r--r--bpkg/pkg-purge.hxx4
-rw-r--r--bpkg/pkg-status.cxx2
-rw-r--r--bpkg/pkg-unpack.cxx242
-rw-r--r--bpkg/pkg-unpack.hxx9
-rw-r--r--bpkg/rep-add.cxx2
-rw-r--r--bpkg/rep-fetch.cxx4
-rw-r--r--bpkg/rep-list.cxx2
-rw-r--r--bpkg/rep-remove.cxx4
29 files changed, 1125 insertions, 529 deletions
diff --git a/bpkg/auth.cxx b/bpkg/auth.cxx
index c8ec955..89f2b56 100644
--- a/bpkg/auth.cxx
+++ b/bpkg/auth.cxx
@@ -622,7 +622,7 @@ namespace bpkg
else
{
database db (open (*conf, trace));
- transaction t (db.begin ());
+ transaction t (db);
r = auth_cert (co, *conf, db, pem, rl);
t.commit ();
}
diff --git a/bpkg/cfg-create.cxx b/bpkg/cfg-create.cxx
index 8fd3a05..9b56cad 100644
--- a/bpkg/cfg-create.cxx
+++ b/bpkg/cfg-create.cxx
@@ -98,7 +98,7 @@ namespace bpkg
// Add the special, root repository object with empty location.
//
- transaction t (db.begin ());
+ transaction t (db);
db.persist (repository (repository_location ()));
t.commit ();
diff --git a/bpkg/database.cxx b/bpkg/database.cxx
index a590eca..5b9e14f 100644
--- a/bpkg/database.cxx
+++ b/bpkg/database.cxx
@@ -31,6 +31,7 @@ namespace bpkg
try
{
+
// We don't need the thread pool.
//
unique_ptr<connection_factory> cf (new single_connection_factory);
@@ -51,6 +52,8 @@ namespace bpkg
// also fail if the database is inaccessible (e.g., file does not
// exist, already used by another process, etc).
//
+ using odb::sqlite::transaction; // Skip the wrapper.
+
try
{
db.connection ()->execute ("PRAGMA locking_mode = EXCLUSIVE");
diff --git a/bpkg/database.hxx b/bpkg/database.hxx
index f90aa6b..d97792a 100644
--- a/bpkg/database.hxx
+++ b/bpkg/database.hxx
@@ -25,7 +25,59 @@ namespace bpkg
using odb::session;
using odb::sqlite::database;
- using odb::sqlite::transaction;
+
+ // Transaction wrapper that allow the creation of dummy transactions (start
+ // is false) that in reality use an existing transaction.
+ //
+ struct transaction
+ {
+ using database_type = bpkg::database;
+
+ explicit
+ transaction (database_type& db, bool start = true)
+ : db_ (db), start_ (start), t_ () // Finalized.
+ {
+ if (start)
+ t_.reset (db.begin ());
+ }
+
+ void
+ commit ()
+ {
+ if (start_)
+ t_.commit ();
+ }
+
+ void
+ rollback ()
+ {
+ if (start_)
+ t_.rollback ();
+ }
+
+ database_type&
+ database ()
+ {
+ return db_;
+ }
+
+ static bool
+ has_current ()
+ {
+ return odb::sqlite::transaction::has_current ();
+ }
+
+ static odb::sqlite::transaction&
+ current ()
+ {
+ return odb::sqlite::transaction::current ();
+ }
+
+ private:
+ database_type& db_;
+ bool start_;
+ odb::sqlite::transaction t_;
+ };
database
open (const dir_path& configuration, tracer&, bool create = false);
diff --git a/bpkg/fetch-pkg.cxx b/bpkg/fetch-pkg.cxx
index aba95ab..5c2fec7 100644
--- a/bpkg/fetch-pkg.cxx
+++ b/bpkg/fetch-pkg.cxx
@@ -86,18 +86,16 @@ namespace bpkg
info << "re-run with -v for more information" << endf;
}
- static path
+ static void
fetch_file (const common_options& o,
const repository_url& u,
- const dir_path& d)
+ const path& df)
{
- path r (d / u.path->leaf ());
+ if (exists (df))
+ fail << "file " << df << " already exists";
- if (exists (r))
- fail << "file " << r << " already exists";
-
- auto_rmfile arm (r);
- process pr (start_fetch (o, u.string (), r));
+ auto_rmfile arm (df);
+ process pr (start_fetch (o, u.string (), df));
if (!pr.wait ())
{
@@ -109,24 +107,19 @@ namespace bpkg
}
arm.cancel ();
- return r;
}
- static path
- fetch_file (const path& f, const dir_path& d)
+ static void
+ fetch_file (const path& sf, const path& df)
{
- path r (d / f.leaf ());
-
try
{
- cpfile (f, r);
+ cpfile (sf, df);
}
catch (const system_error& e)
{
- fail << "unable to copy " << f << " to " << r << ": " << e;
+ fail << "unable to copy " << sf << " to " << df << ": " << e;
}
-
- return r;
}
// If o is nullptr, then don't calculate the checksum.
@@ -230,27 +223,27 @@ namespace bpkg
: fetch_manifest<signature_manifest> (nullptr, f, iu).first;
}
- path
+ void
pkg_fetch_archive (const common_options& o,
const repository_location& rl,
const path& a,
- const dir_path& d)
+ const path& df)
{
assert (!a.empty () && a.relative ());
assert (rl.remote () || rl.absolute ());
repository_url u (rl.url ());
- path& f (*u.path);
- f /= a;
+ path& sf (*u.path);
+ sf /= a;
auto bad_loc = [&u] () {fail << "invalid archive location " << u;};
try
{
- f.normalize ();
+ sf.normalize ();
- if (*f.begin () == "..") // Can be the case for the remote location.
+ if (*sf.begin () == "..") // Can be the case for the remote location.
bad_loc ();
}
catch (const invalid_path&)
@@ -258,8 +251,9 @@ namespace bpkg
bad_loc ();
}
- return rl.remote ()
- ? fetch_file (o, u, d)
- : fetch_file (f, d);
+ if (rl.remote ())
+ fetch_file (o, u, df);
+ else
+ fetch_file (sf, df);
}
}
diff --git a/bpkg/fetch.hxx b/bpkg/fetch.hxx
index fc6b763..e784cb6 100644
--- a/bpkg/fetch.hxx
+++ b/bpkg/fetch.hxx
@@ -40,11 +40,11 @@ namespace bpkg
const repository_location&,
bool ignore_unknown);
- path
+ void
pkg_fetch_archive (const common_options&,
const repository_location&,
const path& archive,
- const dir_path& destdir);
+ const path& dest);
// Repository type git (fetch-git.cxx).
//
diff --git a/bpkg/forward.hxx b/bpkg/forward.hxx
index c253e72..3845003 100644
--- a/bpkg/forward.hxx
+++ b/bpkg/forward.hxx
@@ -10,7 +10,7 @@
namespace bpkg
{
using odb::sqlite::database;
- using odb::sqlite::transaction;
+ struct transaction;
// <bpkg/package.hxx>
//
diff --git a/bpkg/package.cxx b/bpkg/package.cxx
index da862ef..29460e4 100644
--- a/bpkg/package.cxx
+++ b/bpkg/package.cxx
@@ -145,6 +145,28 @@ namespace bpkg
return result ();
}
+ vector<shared_ptr<available_package>>
+ filter (const vector<shared_ptr<repository>>& rps,
+ odb::result<available_package>&& apr,
+ bool prereq)
+ {
+ vector<shared_ptr<available_package>> aps;
+
+ for (shared_ptr<available_package> ap: pointer_result (apr))
+ {
+ for (const shared_ptr<repository> r: rps)
+ {
+ if (filter (r, ap, prereq) != nullptr)
+ {
+ aps.push_back (move (ap));
+ break;
+ }
+ }
+ }
+
+ return aps;
+ }
+
// selected_package
//
string selected_package::
@@ -201,6 +223,9 @@ namespace bpkg
string mc (sha256 (o, d / manifest_file));
+ // The selected package must not be "simulated" (see pkg-build for
+ // details).
+ //
assert (p->manifest_checksum);
bool changed (mc != *p->manifest_checksum);
@@ -211,9 +236,7 @@ namespace bpkg
//
if (!changed && p->external ())
{
- dir_path src_root (p->src_root->absolute ()
- ? *p->src_root
- : c / *p->src_root);
+ dir_path src_root (p->effective_src_root (c));
// We need to complete and normalize the source directory as it may
// generally be completed against the configuration directory (unlikely
diff --git a/bpkg/package.hxx b/bpkg/package.hxx
index 58338bd..49c6bd0 100644
--- a/bpkg/package.hxx
+++ b/bpkg/package.hxx
@@ -484,7 +484,7 @@ namespace bpkg
operator size_t () const {return result;}
};
- // Only return packages that are in the specified repository, its
+ // Only return packages that are in the specified repositories, their
// complements or prerequisites (if prereq is true), recursively. While you
// could maybe come up with a (barely comprehensible) view/query to achieve
// this, doing it on the "client side" is definitely more straightforward.
@@ -504,6 +504,11 @@ namespace bpkg
const shared_ptr<available_package>&,
bool prereq = true);
+ vector<shared_ptr<available_package>>
+ filter (const vector<shared_ptr<repository>>&,
+ odb::result<available_package>&&,
+ bool prereq = true);
+
// package_state
//
enum class package_state
@@ -613,7 +618,11 @@ namespace bpkg
bool purge_src;
// The checksum of the manifest file located in the source directory.
- // Must be present if the source directory is present.
+ //
+ // Must be present if the source directory is present, unless the object
+ // is created/updated during the package build simulation (see pkg-build
+ // for details). Note that during the simulation the manifest may not be
+ // available.
//
optional<string> manifest_checksum;
@@ -659,6 +668,26 @@ namespace bpkg
string
version_string () const;
+ // Return the relative source directory completed using the configuration
+ // directory. Return the absolute source directory as is.
+ //
+ dir_path
+ effective_src_root (const dir_path& configuration) const
+ {
+ assert (src_root);
+ return src_root->absolute () ? *src_root : configuration / *src_root;
+ }
+
+ // Return the output directory using the configuration directory. Note
+ // that the output directory is always relative.
+ //
+ dir_path
+ effective_out_root (const dir_path& configuration) const
+ {
+ assert (out_root);
+ return configuration / *out_root;
+ }
+
// Database mapping.
//
#pragma db member(name) id
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 4b703b4..395fb14 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -39,26 +39,14 @@ using namespace butl;
namespace bpkg
{
- // @@ TODO
- //
- // - Detect and complain about dependency cycles.
- // - Configuration vars (both passed and preserved)
+ // Query the available packages that optionally satisfy the specified version
+ // version constraint and return them in the version descending order. Note
+ // that a stub satisfies any constraint.
//
-
- // Try to find a package that optionally satisfies the specified
- // version constraint. Look in the specified repository, its
- // prerequisite repositories, and their complements, recursively
- // (note: recursivity applies to complements, not prerequisites).
- // Return the package and the repository in which it was found or
- // NULL for both if not found. Note that a stub satisfies any
- // constraint.
- //
- static pair<shared_ptr<available_package>, shared_ptr<repository>>
- find_available (database& db,
- const string& name,
- const shared_ptr<repository>& r,
- const optional<dependency_constraint>& c,
- bool prereq = true)
+ odb::result<available_package>
+ query_available (database& db,
+ const string& name,
+ const optional<dependency_constraint>& c)
{
using query = query<available_package>;
@@ -128,11 +116,34 @@ namespace bpkg
}
q += order_by_version_desc (vm);
+ return db.query<available_package> (q);
+ }
+ // @@ TODO
+ //
+ // - Detect and complain about dependency cycles.
+ // - Configuration vars (both passed and preserved)
+ //
+
+ // Try to find a package that optionally satisfies the specified
+ // version constraint. Look in the specified repository, its
+ // prerequisite repositories, and their complements, recursively
+ // (note: recursivity applies to complements, not prerequisites).
+ // Return the package and the repository in which it was found or
+ // NULL for both if not found. Note that a stub satisfies any
+ // constraint.
+ //
+ static pair<shared_ptr<available_package>, shared_ptr<repository>>
+ find_available (database& db,
+ const string& name,
+ const shared_ptr<repository>& r,
+ const optional<dependency_constraint>& c,
+ bool prereq = true)
+ {
// Filter the result based on the repository to which each version
// belongs.
//
- return filter_one (r, db.query<available_package> (q), prereq);
+ return filter_one (r, query_available (db, name, c), prereq);
}
// Create a transient (or fake, if you prefer) available_package object
@@ -147,7 +158,7 @@ namespace bpkg
//
static pair<shared_ptr<available_package>, shared_ptr<repository>>
make_available (const common_options& options,
- const dir_path& cd,
+ const dir_path& c,
database& db,
const shared_ptr<selected_package>& sp)
{
@@ -172,12 +183,11 @@ namespace bpkg
// be able to get its manifest.
//
const optional<path>& a (sp->archive);
- const optional<dir_path>& d (sp->src_root);
package_manifest m (
sp->state == package_state::fetched
- ? pkg_verify (options, a->absolute () ? *a : cd / *a, true)
- : pkg_verify (d->absolute () ? *d : cd / *d, true));
+ ? pkg_verify (options, a->absolute () ? *a : c / *a, true)
+ : pkg_verify (sp->effective_src_root (c), true));
// Copy the possibly fixed up version from the selected package.
//
@@ -223,6 +233,12 @@ namespace bpkg
shared_ptr<available_package> available; // Can be NULL, fake/transient.
shared_ptr<bpkg::repository> repository; // Can be NULL (orphan) or root.
+ const string&
+ name () const
+ {
+ return selected != nullptr ? selected->name : available->id.name;
+ }
+
// Hold flags. Note that we only "increase" the hold_package value that is
// already in the selected package.
//
@@ -313,7 +329,7 @@ namespace bpkg
}
string
- available_name () const
+ available_name_version () const
{
assert (available != nullptr);
@@ -326,7 +342,9 @@ namespace bpkg
}
};
- struct build_packages: list<reference_wrapper<build_package>>
+ using build_packages = list<reference_wrapper<build_package>>;
+
+ struct build_package_map: build_packages
{
// Collect the package. Return its pointer if this package version was, in
// fact, added to the map and NULL if it was already there or the existing
@@ -336,7 +354,7 @@ namespace bpkg
collect (const common_options& options,
const dir_path& cd,
database& db,
- build_package&& pkg,
+ build_package pkg,
bool recursively)
{
using std::swap; // ...and not list::swap().
@@ -399,8 +417,8 @@ namespace bpkg
fail << "unable to satisfy constraints on package " << n <<
info << d1 << " depends on (" << n << " " << c1->value << ")" <<
info << d2 << " depends on (" << n << " " << c2->value << ")" <<
- info << "available " << p1->available_name () <<
- info << "available " << p2->available_name () <<
+ info << "available " << p1->available_name_version () <<
+ info << "available " << p2->available_name_version () <<
info << "explicitly specify " << n << " version to manually "
<< "satisfy both constraints";
}
@@ -408,8 +426,8 @@ namespace bpkg
swap (p1, p2);
}
- l4 ([&]{trace << "pick " << p1->available_name ()
- << " over " << p2->available_name ();});
+ l4 ([&]{trace << "pick " << p1->available_name_version ()
+ << " over " << p2->available_name_version ();});
}
// If versions are the same, then we still need to pick the entry as
// one of them can build a package from source while another configure
@@ -472,7 +490,7 @@ namespace bpkg
{
// This is the first time we are adding this package name to the map.
//
- l4 ([&]{trace << "add " << pkg.available_name ();});
+ l4 ([&]{trace << "add " << pkg.available_name_version ();});
string n (pkg.available->id.name); // Note: copy; see emplace() below.
i = map_.emplace (move (n), data_type {end (), move (pkg)}).first;
@@ -517,7 +535,7 @@ namespace bpkg
make_exception_guard (
[&pkg] ()
{
- info << "while satisfying " << pkg.available_name ();
+ info << "while satisfying " << pkg.available_name_version ();
}));
const shared_ptr<available_package>& ap (pkg.available);
@@ -614,7 +632,8 @@ namespace bpkg
// need the repository to allow orphans without prerequisites).
//
if (ar == nullptr)
- fail << "package " << pkg.available_name () << " is orphaned" <<
+ fail << "package " << pkg.available_name_version ()
+ << " is orphaned" <<
info << "explicitly upgrade it to a new version";
// We look for prerequisites only in the repositories of this
@@ -747,7 +766,7 @@ namespace bpkg
// attribution changes.
//
if (dsp->system ())
- dr << p->available_name ();
+ dr << p->available_name_version ();
else
dr << av; // Can't be a system version so is never wildcard.
@@ -975,7 +994,7 @@ namespace bpkg
// attribution changes.
//
if (p.system != sp->system ())
- dr << p.available_name ();
+ dr << p.available_name_version ();
else
dr << av; // Can't be the wildcard otherwise would satisfy.
@@ -990,7 +1009,7 @@ namespace bpkg
}
if (!rb.empty ())
- dr << info << "package " << p.available_name ()
+ dr << info << "package " << p.available_name_version ()
<< " required by" << rb;
dr << info << "explicitly request up/downgrade of package " << dn;
@@ -1178,6 +1197,161 @@ namespace bpkg
return os;
}
+ // If an upgrade/downgrade of the selected dependency is possible to the
+ // specified version (empty means the highest possible one), then return the
+ // version upgrade/downgrade to. Otherwise return the empty version with the
+ // reason of the impossibility to upgrade/downgrade. The empty reason means
+ // that the dependency is unused. Note that it should be called in session.
+ //
+ static pair<version, string>
+ evaluate_dependency (transaction& t, const string& n, const version& v)
+ {
+ tracer trace ("evaluate_dependency");
+
+ database& db (t.database ());
+ tracer_guard tg (db, trace);
+
+ shared_ptr<selected_package> sp (db.find<selected_package> (n));
+
+ if (sp == nullptr)
+ {
+ l5 ([&]{trace << n << "/" << v << ": unselected";});
+ return make_pair (version (), string ());
+ }
+
+ const version& sv (sp->version);
+
+ l6 ([&]{trace << n << "/" << v << ": current: " << sv;});
+
+ // Build the set of repositories the dependent packages now come from.
+ // Also cash the dependents and the constraints they apply to the
+ // dependency package.
+ //
+ vector<shared_ptr<repository>> repos;
+
+ vector<pair<shared_ptr<selected_package>,
+ optional<dependency_constraint>>> dependents;
+ {
+ set<shared_ptr<repository>> rps;
+
+ auto pds (db.query<package_dependent> (
+ query<package_dependent>::name == n));
+
+ if (pds.empty ())
+ {
+ l5 ([&]{trace << n << "/" << v << ": unused";});
+ return make_pair (version (), string ());
+ }
+
+ for (auto& pd: pds)
+ {
+ shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
+
+ l6 ([&]{trace << n << "/" << v << ": dependent: "
+ << dsp->name << "/" << dsp->version;});
+
+ shared_ptr<available_package> dap (
+ db.find<available_package> (
+ available_package_id (dsp->name, dsp->version)));
+
+ if (dap != nullptr)
+ {
+ assert (!dap->locations.empty ());
+
+ for (const auto& pl: dap->locations)
+ {
+ shared_ptr<repository> r (pl.repository.load ());
+
+ if (rps.insert (r).second)
+ l6 ([&]{trace << n << "/" << v << ": " << r->location;});
+ }
+ }
+ else
+ l6 ([&]{trace << n << "/" << v << ": dependent unavailable";});
+
+ dependents.emplace_back (move (dsp), move (pd.constraint));
+ }
+
+ repos = vector<shared_ptr<repository>> (rps.begin (), rps.end ());
+ }
+
+ // Build the list of available packages for the potential upgrade/downgrade
+ // to, in the version-descending order.
+ //
+ auto apr (v.empty ()
+ ? query_available (db, n, nullopt)
+ : query_available (db, n, dependency_constraint (v)));
+
+ vector<shared_ptr<available_package>> aps (filter (repos, move (apr)));
+
+ if (aps.empty ())
+ {
+ l5 ([&]{trace << n << "/" << v << ": unavailable";});
+ return make_pair (version (), "unavailable");
+ }
+
+ // Go through upgrade/downgrade to candidates and pick the first one that
+ // satisfies all the dependents.
+ //
+ bool highest (v.empty () || v == wildcard_version);
+
+ for (const shared_ptr<available_package>& ap: aps)
+ {
+ const version& av (ap->version);
+
+ // If we are aim to upgrade to the highest possible version and it tends
+ // to be not higher then the selected one, then just return the selected
+ // one to indicate that what we currently have is best what we can get.
+ //
+ if (highest && av <= sv)
+ {
+ l5 ([&]{trace << n << "/" << v << ": " << av
+ << " not better than current";});
+
+ return make_pair (sv, string ());
+ }
+
+ bool satisfy (true);
+
+ for (const auto& dp: dependents)
+ {
+ if (!satisfies (av, dp.second))
+ {
+ satisfy = false;
+
+ l6 ([&]{trace << n << "/" << v << ": " << av
+ << " unsatisfy selected "
+ << dp.first->name << "/" << dp.first->version;});
+
+ break;
+ }
+ }
+
+ if (satisfy)
+ {
+ l5 ([&]{trace << n << "/" << v << ": "
+ << (av > sv
+ ? "upgrade to "
+ : av < sv
+ ? "downgrade to "
+ : "leave ") << av;});
+
+ return make_pair (av, string ());
+ }
+ }
+
+ l5 ([&]{trace << n << "/" << v << ": unsatisfied";});
+ return make_pair (version (), "unsatisfied");
+ }
+
+ static void
+ execute_plan (const pkg_build_options&,
+ const dir_path&,
+ database&,
+ build_packages&,
+ bool,
+ set<shared_ptr<selected_package>>& drop_pkgs);
+
int
pkg_build (const pkg_build_options& o, cli::group_scanner& args)
{
@@ -1212,15 +1386,15 @@ namespace bpkg
database db (open (c, trace)); // Also populates the system repository.
- // Note that the session spans all our transactions. The idea here is
- // that selected_package objects in the build_packages list below will
- // be cached in this session. When subsequent transactions modify any
- // of these objects, they will modify the cached instance, which means
- // our list will always "see" their updated state.
+ // Note that the session spans all our transactions. The idea here is that
+ // selected_package objects in the build_package_map below will be cached
+ // in this session. When subsequent transactions modify any of these
+ // objects, they will modify the cached instance, which means our list
+ // will always "see" their updated state.
//
// Also note that rep_fetch() must be called in session.
//
- session s;
+ session ses;
// Preparse the (possibly grouped) package specs splitting them into the
// packages and location parts, and also parsing their options.
@@ -1240,7 +1414,7 @@ namespace bpkg
{
vector<repository_location> locations;
- transaction t (db.begin ());
+ transaction t (db);
while (args.more ())
{
@@ -1361,7 +1535,7 @@ namespace bpkg
//
vector<pkg_arg> pkg_args;
{
- transaction t (db.begin ());
+ transaction t (db);
for (pkg_spec& ps: specs)
{
@@ -1480,10 +1654,10 @@ namespace bpkg
return 0;
}
- // Assemble the list of packages we will need to build.
+ // Separate the packages specified on the command line into to hold and to
+ // up/down-grade as dependencies.
//
- build_packages pkgs;
- strings names;
+ vector<build_package> hold_pkgs;
{
// Check if the package is a duplicate. Return true if it is but
// harmless.
@@ -1504,7 +1678,7 @@ namespace bpkg
return !r.second;
};
- transaction t (db.begin ());
+ transaction t (db);
shared_ptr<repository> root (db.load<repository> (""));
@@ -1522,6 +1696,20 @@ namespace bpkg
{
pkg_arg& pa (*i);
+ if (pa.options.dependency ())
+ {
+ assert (false); // @@ TODO: we want stash <pkg>/[ver] somewhere
+ // to be used during the refinment phase.
+ // It should probably be passes to
+ // evaluate_dependency().
+
+ //@@ TODO: we also need to handle "unhold"
+ //@@ TODO: we probably also need to pre-enter version somehow if
+ // specified so that constraint resolution does not fail
+ // (i.e., this could be a manual resulution of the
+ // previouly failed constraint).
+ }
+
// Reduce all the potential variations (archive, directory, package
// name, package name/version) to a single available_package object.
//
@@ -1548,7 +1736,7 @@ namespace bpkg
// This is a package archive (note that we shouldn't throw
// failed from here on).
//
- l4 ([&]{trace << "archive " << a;});
+ l4 ([&]{trace << "archive '" << a << "': " << pa;});
pa = pkg_arg (package_scheme::none,
m.name,
@@ -1600,7 +1788,7 @@ namespace bpkg
//
package_dir = true;
- l4 ([&]{trace << "directory " << d;});
+ l4 ([&]{trace << "directory '" << d << "': " << pa;});
// Fix-up the package version to properly decide if we need to
// upgrade/downgrade the package. Note that throwing failed
@@ -1674,13 +1862,11 @@ namespace bpkg
move (pa.options));
}
- l4 ([&]{trace << (pa.system () ? "system " : "")
- << "package " << pa.name << "; "
- << "version " << pa.version;});
+ l4 ([&]{trace << "package: " << pa;});
// Either get the user-specified version or the latest for a
// source code package. For a system package we peek the latest
- // one just to ensure the package is recognized.
+ // one just to make sure the package is recognized.
//
auto rp (
pa.version.empty () || pa.system ()
@@ -1691,6 +1877,11 @@ namespace bpkg
dependency_constraint (pa.version)));
ap = rp.first;
ar = rp.second;
+
+ // @@ TMP
+ //
+ if (pa.options.dependency ())
+ evaluate_dependency (t, pa.name, pa.version);
}
catch (const failed&)
{
@@ -1701,7 +1892,7 @@ namespace bpkg
// We are handling this argument.
//
- if (check_dup (*i++) || pa.options.dependency ())
+ if (check_dup (*i++))
continue;
// Load the package that may have already been selected and
@@ -1863,7 +2054,7 @@ namespace bpkg
{""}, // Required by (command line).
false}; // Reconfigure.
- l4 ([&]{trace << "collect " << p.available_name ();});
+ l4 ([&]{trace << "collect " << p.available_name_version ();});
// "Fix" the version the user asked for by adding the '==' constraint.
//
@@ -1875,38 +2066,211 @@ namespace bpkg
"command line",
dependency_constraint (pa.version));
- // Pre-collect user selection to make sure dependency-forced
- // up/down-grades are handled properly (i.e., the order in which we
- // specify packages on the command line does not matter).
- //
- pkgs.collect (o, c, db, move (p), false);
- names.push_back (pa.name);
+ hold_pkgs.push_back (move (p));
}
- // Collect all the packages prerequisites.
+ t.commit ();
+ }
+
+ // Assemble the list of packages we will need to build.
+ //
+ build_package_map pkgs;
+ {
+ // Iteratively refine the plan with dependency up/down-grades/drops.
//
- for (const string& n: names)
- pkgs.collect_prerequisites (o, c, db, n);
-
- // Now that we have collected all the package versions that we need
- // to build, arrange them in the "dependency order", that is, with
- // every package on the list only possibly depending on the ones
- // after it. Iterate over the names we have collected on the previous
- // step in reverse so that when we iterate over the packages (also in
- // reverse), things will be built as close as possible to the order
- // specified by the user (it may still get altered if there are
- // dependencies between the specified packages).
+ // @@ TODO: maybe not build_package, maybe just name & version so that
+ // we don't end up with selected_package object that has been rolled
+ // back?
//
- for (const string& n: reverse_iterate (names))
- pkgs.order (n);
+ vector<build_package> dep_pkgs;
- // Finally, collect and order all the dependents that we will need
- // to reconfigure because of the up/down-grades of packages that
- // are now on the list.
- //
- pkgs.collect_order_dependents (db);
+ for (bool refine (true), scratch (true); refine; )
+ {
+ transaction t (db);
- t.commit ();
+ if (scratch)
+ {
+ // Pre-collect user selection to make sure dependency-forced
+ // up/down-grades are handled properly (i.e., the order in which we
+ // specify packages on the command line does not matter).
+ //
+ for (const build_package& p: hold_pkgs)
+ pkgs.collect (o, c, db, p, false /* recursively */);
+
+ // Collect all the prerequisites.
+ //
+ for (const build_package& p: hold_pkgs)
+ pkgs.collect_prerequisites (o, c, db, p.name ());
+
+ scratch = false;
+ }
+
+ // Add dependencies to upgrade/downgrade/drop that were discovered on
+ // the previous iterations.
+ //
+ // Looks like keeping them as build_package objects would
+ // be natural? BUT: what if the selected_package is from
+ // the temporary changes/session?! So maybe not...
+ //
+ //@@ TODO: use empty version in build_package to indicate drop?
+ //@@ TODO: always put drops at the back of dep_pkgs so that they
+ // appear in the plan last (could also do it as a post-
+ // collection step if less hairy).
+ //
+ for (const build_package& p: dep_pkgs)
+ pkgs.collect (o, c, db, p, true /* recursively */);
+
+ // Now that we have collected all the package versions that we need to
+ // build, arrange them in the "dependency order", that is, with every
+ // package on the list only possibly depending on the ones after
+ // it. Iterate over the names we have collected on the previous step
+ // in reverse so that when we iterate over the packages (also in
+ // reverse), things will be built as close as possible to the order
+ // specified by the user (it may still get altered if there are
+ // dependencies between the specified packages).
+ //
+ // The order of dependency upgrades/downgrades/drops is not really
+ // deterministic. We, however, do them before hold_pkgs so that they
+ // appear (e.g., on the plan) last.
+ //
+
+ //@@ TODO: need to clear the list on subsequent iterations.
+
+ for (const build_package& p: dep_pkgs)
+ pkgs.order (p.name ());
+
+ for (const build_package& p: reverse_iterate (hold_pkgs))
+ pkgs.order (p.name ());
+
+ // Once we have the final plan, collect and order all the dependents
+ // that we will need to reconfigure because of the up/down-grades of
+ // packages that are now on the list.
+ //
+ pkgs.collect_order_dependents (db);
+
+ // We are about to execute the plan on the database (but not on the
+ // filesystem / actual packages). Save the session state for the
+ // selected_package objects so that we can restore it later (see
+ // below)
+ //
+ using selected_packages = session::object_map<selected_package>;
+
+ auto selected_packages_session = [&db, &ses] () -> selected_packages*
+ {
+ auto& m (ses.map ()[&db]);
+ auto i = m.find (&typeid (selected_package));
+ return (i != m.end ()
+ ? &static_cast<selected_packages&> (*i->second)
+ : nullptr);
+ };
+
+ selected_packages old_sp;
+
+ if (const selected_packages* sp = selected_packages_session ())
+ old_sp = *sp;
+
+ // We also need to perform the execution on the copy of the
+ // build_package objects to preserve their original ones. Note that
+ // the selected package objects will still be changed so we will
+ // reload them afterwards (see below).
+ //
+ {
+ vector<build_package> tmp_pkgs (pkgs.begin (), pkgs.end ());
+ build_packages ref_pkgs (tmp_pkgs.begin (), tmp_pkgs.end ());
+
+ set<shared_ptr<selected_package>> dummy;
+ execute_plan (o, c, db, ref_pkgs, true /* simulate */, dummy);
+ }
+
+ // Verify that none of the previously-made upgrade/downgrade/drop
+ // decisions have changed.
+ //
+ /*
+ for (auto i (dep_pkgs.begin ()); i != dep_pkgs.end (); )
+ {
+ shared_ptr<selected_package> p = db.find (...);
+
+ if (upgrade_sependency (p) != p->version)
+ {
+ dep_pkgs.erase (i);
+ scratch = true; // Start from scratch.
+ }
+ else
+ ++i;
+ }
+ */
+
+ if (!scratch)
+ {
+ // Examine the new dependency set for any upgrade/downgrade/drops.
+ //
+ refine = false; // Presumably no more refinments necessary.
+
+ /*
+ for (shared_ptr<selected_package> p = ...
+ <query all dependency (non-held) packages in the database>)
+ {
+ version v (evaluate_dependency (p));
+
+ if (v != p->version)
+ {
+ dep_pkgs.push_back (p->name, v);
+ refine = true;
+ }
+ }
+ */
+ }
+
+ // Rollback the changes to the database and reload the changed
+ // objects.
+ //
+ t.rollback ();
+ {
+ transaction t (db);
+
+ // First reload all the selected_package object that could have been
+ // modified (conceptually, we should only modify what's on the
+ // plan).
+ //
+ // Note: we use the original pkgs list since the executed one may
+ // contain newly created (but now gone) selected_package objects.
+ //
+ for (build_package& p: pkgs)
+ {
+ if (p.selected != nullptr)
+ db.reload (*p.selected);
+ }
+
+ // Now drop all the newly created selected_package objects. The
+ // tricky part is to distinguish newly created ones from newly
+ // loaded (and potentially cached).
+ //
+ if (selected_packages* sp = selected_packages_session ())
+ {
+ for (bool rescan (true); rescan; )
+ {
+ rescan = false;
+
+ for (auto i (sp->begin ()); i != sp->end (); ++i)
+ {
+ if (old_sp.find (i->first) == old_sp.end ())
+ {
+ if (i->second.use_count () == 1)
+ {
+ sp->erase (i);
+
+ // This might cause another object's use count to drop.
+ //
+ rescan = true;
+ }
+ }
+ }
+ }
+ }
+
+ t.commit ();
+ }
+ }
}
// Print what we are going to do, then ask for the user's confirmation.
@@ -1980,7 +2344,7 @@ namespace bpkg
print_plan = true;
}
- act += p.available_name ();
+ act += p.available_name_version ();
cause = "required by";
}
@@ -2070,7 +2434,78 @@ namespace bpkg
// the user may want to in addition update (that update_dependents flag
// above). This case we handle in house.
//
+
set<shared_ptr<selected_package>> drop_pkgs;
+ execute_plan (o, c, db, pkgs, false /* simulate */, drop_pkgs);
+
+ // Now that we have the final dependency state, see if we need to drop
+ // packages that are no longer necessary.
+ //
+ if (!drop_pkgs.empty ())
+ drop_pkgs = pkg_drop (
+ c, o, db, drop_pkgs, !(o.yes () || o.drop_prerequisite ()));
+
+ if (o.configure_only ())
+ return 0;
+
+ // update
+ //
+ // Here we want to update all the packages at once, to facilitate
+ // parallelism.
+ //
+ vector<pkg_command_vars> upkgs;
+
+ // First add the user selection.
+ //
+ for (const build_package& p: reverse_iterate (pkgs))
+ {
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ if (!sp->system () && // System package doesn't need update.
+ p.user_selection ())
+ upkgs.push_back (pkg_command_vars {sp, strings ()});
+ }
+
+ // Then add dependents. We do it as a separate step so that they are
+ // updated after the user selection.
+ //
+ if (update_dependents)
+ {
+ for (const build_package& p: reverse_iterate (pkgs))
+ {
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ if (p.reconfigure () && p.available == nullptr)
+ {
+ // Note that it is entirely possible this package got dropped so
+ // we need to check for that.
+ //
+ if (drop_pkgs.find (sp) == drop_pkgs.end ())
+ upkgs.push_back (pkg_command_vars {sp, strings ()});
+ }
+ }
+ }
+
+ pkg_update (c, o, o.for_ (), strings (), upkgs);
+
+ if (verb && !o.no_result ())
+ {
+ for (const pkg_command_vars& pv: upkgs)
+ text << "updated " << *pv.pkg;
+ }
+
+ return 0;
+ }
+
+ static void
+ execute_plan (const pkg_build_options& o,
+ const dir_path& c,
+ database& db,
+ build_packages& pkgs,
+ bool simulate,
+ set<shared_ptr<selected_package>>& drop_pkgs)
+ {
+ uint16_t verbose (!simulate ? verb : 0);
// disfigure
//
@@ -2087,7 +2522,7 @@ namespace bpkg
// Each package is disfigured in its own transaction, so that we
// always leave the configuration in a valid state.
//
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
// Collect prerequisites to be potentially dropped.
//
@@ -2132,11 +2567,14 @@ namespace bpkg
}
}
- pkg_disfigure (c, o, t, sp, !p.keep_out); // Commits the transaction.
+ // Commits the transaction.
+ //
+ pkg_disfigure (c, o, t, sp, !p.keep_out, simulate);
+
assert (sp->state == package_state::unpacked ||
sp->state == package_state::transient);
- if (verb && !o.no_result ())
+ if (verbose && !o.no_result ())
text << (sp->state == package_state::transient
? "purged "
: "disfigured ") << *sp;
@@ -2180,10 +2618,10 @@ namespace bpkg
{
if (sp != nullptr && !sp->system ())
{
- transaction t (db.begin ());
- pkg_purge (c, t, sp); // Commits the transaction.
+ transaction t (db, !simulate /* start */);
+ pkg_purge (c, t, sp, simulate); // Commits the transaction.
- if (verb && !o.no_result ())
+ if (verbose && !o.no_result ())
text << "purged " << *sp;
if (!p.hold_package)
@@ -2211,7 +2649,7 @@ namespace bpkg
if (pl.repository.object_id () != "") // Special root?
{
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
// Go through package repositories to decide if we should fetch,
// checkout or unpack depending on the available repository basis.
@@ -2246,7 +2684,8 @@ namespace bpkg
t,
ap->id.name,
p.available_version (),
- true /* replace */);
+ true /* replace */,
+ simulate);
break;
}
case repository_basis::version_control:
@@ -2256,7 +2695,8 @@ namespace bpkg
t,
ap->id.name,
p.available_version (),
- true /* replace */);
+ true /* replace */,
+ simulate);
break;
}
case repository_basis::directory:
@@ -2266,7 +2706,8 @@ namespace bpkg
t,
ap->id.name,
p.available_version (),
- true /* replace */);
+ true /* replace */,
+ simulate);
break;
}
}
@@ -2275,14 +2716,16 @@ namespace bpkg
//
else if (exists (pl.location))
{
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
+
sp = pkg_fetch (
o,
c,
t,
pl.location, // Archive path.
true, // Replace
- false); // Don't purge; commits the transaction.
+ false, // Don't purge; commits the transaction.
+ simulate);
}
if (sp != nullptr) // Actually fetched or checked out something?
@@ -2290,7 +2733,7 @@ namespace bpkg
assert (sp->state == package_state::fetched ||
sp->state == package_state::unpacked);
- if (verb && !o.no_result ())
+ if (verbose && !o.no_result ())
{
const repository_location& rl (sp->repository);
@@ -2333,10 +2776,13 @@ namespace bpkg
{
if (sp != nullptr)
{
- transaction t (db.begin ());
- sp = pkg_unpack (o, c, t, ap->id.name); // Commits the transaction.
+ transaction t (db, !simulate /* start */);
- if (verb && !o.no_result ())
+ // Commits the transaction.
+ //
+ sp = pkg_unpack (o, c, t, ap->id.name, simulate);
+
+ if (verbose && !o.no_result ())
text << "unpacked " << *sp;
}
else
@@ -2344,15 +2790,16 @@ namespace bpkg
const package_location& pl (ap->locations[0]);
assert (pl.repository.object_id () == ""); // Special root.
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
sp = pkg_unpack (o,
c,
t,
path_cast<dir_path> (pl.location),
true, // Replace.
- false); // Don't purge; commits the transaction.
+ false, // Don't purge; commits the transaction.
+ simulate);
- if (verb && !o.no_result ())
+ if (verbose && !o.no_result ())
text << "using " << *sp << " (external)";
}
@@ -2375,18 +2822,28 @@ namespace bpkg
if (sp != nullptr && sp->state == package_state::configured)
continue;
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
// Note that pkg_configure() commits the transaction.
//
if (p.system)
sp = pkg_configure_system (ap->id.name, p.available_version (), t);
- else
- pkg_configure (c, o, t, sp, strings ());
+ else if (ap != nullptr)
+ pkg_configure (c, o, t, sp, ap->dependencies, strings (), simulate);
+ else // Dependent.
+ {
+ // Must be in the unpacked state since it was disfigured on the first
+ // pass (see above).
+ //
+ assert (sp->state == package_state::unpacked);
+
+ package_manifest m (pkg_verify (sp->effective_src_root (c), true));
+ pkg_configure (c, o, t, sp, m.dependencies, strings (), simulate);
+ }
assert (sp->state == package_state::configured);
- if (verb && !o.no_result ())
+ if (verbose && !o.no_result ())
text << "configured " << *sp;
}
@@ -2411,7 +2868,7 @@ namespace bpkg
sp->hold_package = hp;
sp->hold_version = hv;
- transaction t (db.begin ());
+ transaction t (db, !simulate /* start */);
db.update (sp);
t.commit ();
@@ -2424,7 +2881,7 @@ namespace bpkg
drop_pkgs.erase (i);
}
- if (verb > 1)
+ if (verbose > 1)
{
if (hp)
text << "holding package " << sp->name;
@@ -2434,63 +2891,5 @@ namespace bpkg
}
}
}
-
- // Now that we have the final dependency state, see if we need to drop
- // packages that are no longer necessary.
- //
- if (!drop_pkgs.empty ())
- drop_pkgs = pkg_drop (
- c, o, db, drop_pkgs, !(o.yes () || o.drop_prerequisite ()));
-
- if (o.configure_only ())
- return 0;
-
- // update
- //
- // Here we want to update all the packages at once, to facilitate
- // parallelism.
- //
- vector<pkg_command_vars> upkgs;
-
- // First add the user selection.
- //
- for (const build_package& p: reverse_iterate (pkgs))
- {
- const shared_ptr<selected_package>& sp (p.selected);
-
- if (!sp->system () && // System package doesn't need update.
- p.user_selection ())
- upkgs.push_back (pkg_command_vars {sp, strings ()});
- }
-
- // Then add dependents. We do it as a separate step so that they are
- // updated after the user selection.
- //
- if (update_dependents)
- {
- for (const build_package& p: reverse_iterate (pkgs))
- {
- const shared_ptr<selected_package>& sp (p.selected);
-
- if (p.reconfigure () && p.available == nullptr)
- {
- // Note that it is entirely possible this package got dropped so
- // we need to check for that.
- //
- if (drop_pkgs.find (sp) == drop_pkgs.end ())
- upkgs.push_back (pkg_command_vars {sp, strings ()});
- }
- }
- }
-
- pkg_update (c, o, o.for_ (), strings (), upkgs);
-
- if (verb && !o.no_result ())
- {
- for (const pkg_command_vars& pv: upkgs)
- text << "updated " << *pv.pkg;
- }
-
- return 0;
}
}
diff --git a/bpkg/pkg-checkout.cxx b/bpkg/pkg-checkout.cxx
index 6e4a7a5..c53e545 100644
--- a/bpkg/pkg-checkout.cxx
+++ b/bpkg/pkg-checkout.cxx
@@ -14,6 +14,7 @@
#include <bpkg/manifest-utility.hxx>
#include <bpkg/pkg-purge.hxx>
+#include <bpkg/pkg-verify.hxx>
#include <bpkg/pkg-configure.hxx>
using namespace std;
@@ -27,15 +28,11 @@ namespace bpkg
transaction& t,
string n,
version v,
- bool replace)
+ bool replace,
+ bool simulate)
{
tracer trace ("pkg_checkout");
- dir_path d (c / dir_path (n + '-' + v.string ()));
-
- if (exists (d))
- fail << "package directory " << d << " already exists";
-
database& db (t.database ());
tracer_guard tg (db, trace);
@@ -124,46 +121,57 @@ namespace bpkg
// Verify the package prerequisites are all configured since the dist
// meta-operation generally requires all imports to be resolvable.
//
- pkg_configure_prerequisites (o, t, sd);
+ package_manifest m (pkg_verify (sd, true));
+ pkg_configure_prerequisites (o, t, m.dependencies, m.name);
- // The temporary out of source directory that is required for the dist
- // meta-operation.
- //
- auto_rmdir rmo (temp_dir / dir_path (n));
- const dir_path& od (rmo.path);
+ auto_rmdir rmd;
+ optional<string> mc;
+ dir_path d (c / dir_path (n + '-' + v.string ()));
- if (exists (od))
- rm_r (od);
+ if (!simulate)
+ {
+ if (exists (d))
+ fail << "package directory " << d << " already exists";
- // Form the buildspec.
- //
- string bspec ("dist(");
- bspec += sd.representation ();
- bspec += '@';
- bspec += od.representation ();
- bspec += ')';
+ // The temporary out of source directory that is required for the dist
+ // meta-operation.
+ //
+ auto_rmdir rmo (temp_dir / dir_path (n));
+ const dir_path& od (rmo.path);
- // Remove the resulting package distribution directory on failure.
- //
- auto_rmdir rmd (d);
+ if (exists (od))
+ rm_r (od);
- // Distribute.
- //
- // Note that on failure the package stays in the existing (working) state.
- //
- // At first it may seem we have a problem: an existing package with the
- // same name will cause a conflict since we now have multiple package
- // locations for the same package name. We are luck, however: subprojects
- // are only loaded if used and since we don't support dependency cycles,
- // the existing project should never be loaded by any of our dependencies.
- //
- run_b (o,
- c,
- bspec,
- false /* quiet */,
- strings ({"config.dist.root=" + c.representation ()}));
+ // Form the buildspec.
+ //
+ string bspec ("dist(");
+ bspec += sd.representation ();
+ bspec += '@';
+ bspec += od.representation ();
+ bspec += ')';
+
+ // Remove the resulting package distribution directory on failure.
+ //
+ rmd = auto_rmdir (d);
- string mc (sha256 (o, d / manifest_file));
+ // Distribute.
+ //
+ // Note that on failure the package stays in the existing (working) state.
+ //
+ // At first it may seem we have a problem: an existing package with the
+ // same name will cause a conflict since we now have multiple package
+ // locations for the same package name. We are luck, however: subprojects
+ // are only loaded if used and since we don't support dependency cycles,
+ // the existing project should never be loaded by any of our dependencies.
+ //
+ run_b (o,
+ c,
+ bspec,
+ false /* quiet */,
+ strings ({"config.dist.root=" + c.representation ()}));
+
+ mc = sha256 (o, d / manifest_file);
+ }
if (p != nullptr)
{
@@ -171,7 +179,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things go
// badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p);
+ pkg_purge_fs (c, t, p, simulate);
p->version = move (v);
p->state = package_state::unpacked;
@@ -220,7 +228,7 @@ namespace bpkg
l4 ([&]{trace << "configuration: " << c;});
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s;
shared_ptr<selected_package> p;
@@ -239,7 +247,13 @@ namespace bpkg
// Commits the transaction.
//
- p = pkg_checkout (o, c, t, move (n), move (v), o.replace ());
+ p = pkg_checkout (o,
+ c,
+ t,
+ move (n),
+ move (v),
+ o.replace (),
+ false /* simulate */);
if (verb && !o.no_result ())
text << "checked out " << *p;
diff --git a/bpkg/pkg-checkout.hxx b/bpkg/pkg-checkout.hxx
index b217100..084c752 100644
--- a/bpkg/pkg-checkout.hxx
+++ b/bpkg/pkg-checkout.hxx
@@ -27,7 +27,8 @@ namespace bpkg
transaction&,
string name,
version,
- bool replace);
+ bool replace,
+ bool simulate);
}
#endif // BPKG_PKG_CHECKOUT_HXX
diff --git a/bpkg/pkg-command.cxx b/bpkg/pkg-command.cxx
index f287886..a4260a8 100644
--- a/bpkg/pkg-command.cxx
+++ b/bpkg/pkg-command.cxx
@@ -67,7 +67,7 @@ namespace bpkg
assert (p->state == package_state::configured);
assert (p->out_root); // Should be present since configured.
- dir_path out_root (c / *p->out_root); // Always relative.
+ dir_path out_root (p->effective_out_root (c));
l4 ([&]{trace << p->name << " out_root: " << out_root;});
if (bspec.back () != '(')
@@ -122,7 +122,7 @@ namespace bpkg
vector<pkg_command_vars> ps;
{
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
while (args.more ())
{
diff --git a/bpkg/pkg-configure.cxx b/bpkg/pkg-configure.cxx
index e23fca1..a7e1a17 100644
--- a/bpkg/pkg-configure.cxx
+++ b/bpkg/pkg-configure.cxx
@@ -22,14 +22,14 @@ namespace bpkg
package_prerequisites
pkg_configure_prerequisites (const common_options& o,
transaction& t,
- const dir_path& source)
+ const dependencies& deps,
+ const string& package)
{
package_prerequisites r;
- package_manifest m (pkg_verify (source, true));
database& db (t.database ());
- for (const dependency_alternatives& da: m.dependencies)
+ for (const dependency_alternatives& da: deps)
{
assert (!da.conditional); //@@ TODO
@@ -45,7 +45,7 @@ namespace bpkg
if (n == "build2")
{
if (d.constraint)
- satisfy_build2 (o, m.name, d);
+ satisfy_build2 (o, package, d);
satisfied = true;
break;
@@ -53,7 +53,7 @@ namespace bpkg
else if (n == "bpkg")
{
if (d.constraint)
- satisfy_bpkg (o, m.name, d);
+ satisfy_bpkg (o, package, d);
satisfied = true;
break;
@@ -111,7 +111,9 @@ namespace bpkg
const common_options& o,
transaction& t,
const shared_ptr<selected_package>& p,
- const strings& vars)
+ const dependencies& deps,
+ const strings& vars,
+ bool simulate)
{
tracer trace ("pkg_configure");
@@ -121,12 +123,10 @@ namespace bpkg
database& db (t.database ());
tracer_guard tg (db, trace);
- // Calculate package's src_root and out_root.
- //
- dir_path src_root (p->src_root->absolute ()
- ? *p->src_root
- : c / *p->src_root);
+ dir_path src_root (p->effective_src_root (c));
+ // Calculate package's out_root.
+ //
dir_path out_root (p->external ()
? c / dir_path (p->name)
: c / dir_path (p->name + "-" + p->version.string ()));
@@ -138,47 +138,51 @@ namespace bpkg
// prerequisites list.
//
assert (p->prerequisites.empty ());
- p->prerequisites = pkg_configure_prerequisites (o, t, src_root);
-
- // Form the buildspec.
- //
- string bspec;
- // Use path representation to get canonical trailing slash.
- //
- if (src_root == out_root)
- bspec = "configure('" + out_root.representation () + "')";
- else
- bspec = "configure('" +
- src_root.representation () + "'@'" +
- out_root.representation () + "')";
+ p->prerequisites = pkg_configure_prerequisites (o, t, deps, p->name);
- l4 ([&]{trace << "buildspec: " << bspec;});
-
- // Configure.
- //
- try
+ if (!simulate)
{
- run_b (o, c, bspec, true, vars); // Run quiet.
- }
- catch (const failed&)
- {
- // If we failed to configure the package, make sure we revert
- // it back to the unpacked state by running disfigure (it is
- // valid to run disfigure on an un-configured build). And if
- // disfigure fails as well, then the package will be set into
- // the broken state.
+ // Form the buildspec.
//
+ string bspec;
- // Indicate to pkg_disfigure() we are partially configured.
+ // Use path representation to get canonical trailing slash.
//
- p->out_root = out_root.leaf ();
- p->state = package_state::broken;
+ if (src_root == out_root)
+ bspec = "configure('" + out_root.representation () + "')";
+ else
+ bspec = "configure('" +
+ src_root.representation () + "'@'" +
+ out_root.representation () + "')";
+
+ l4 ([&]{trace << "buildspec: " << bspec;});
- // Commits the transaction.
+ // Configure.
//
- pkg_disfigure (c, o, t, p, true /* clean */);
- throw;
+ try
+ {
+ run_b (o, c, bspec, true, vars); // Run quiet.
+ }
+ catch (const failed&)
+ {
+ // If we failed to configure the package, make sure we revert
+ // it back to the unpacked state by running disfigure (it is
+ // valid to run disfigure on an un-configured build). And if
+ // disfigure fails as well, then the package will be set into
+ // the broken state.
+ //
+
+ // Indicate to pkg_disfigure() we are partially configured.
+ //
+ p->out_root = out_root.leaf ();
+ p->state = package_state::broken;
+
+ // Commits the transaction.
+ //
+ pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
+ throw;
+ }
}
p->out_root = out_root.leaf ();
@@ -255,7 +259,7 @@ namespace bpkg
fail << "configuration variables specified for a system package";
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s;
shared_ptr<selected_package> p;
@@ -299,7 +303,8 @@ namespace bpkg
l4 ([&]{trace << *p;});
- pkg_configure (c, o, t, p, vars);
+ package_manifest m (pkg_verify (p->effective_src_root (c), true));
+ pkg_configure (c, o, t, p, m.dependencies, vars, false /* simulate */);
}
if (verb && !o.no_result ())
diff --git a/bpkg/pkg-configure.hxx b/bpkg/pkg-configure.hxx
index 9ad36f4..faf37bc 100644
--- a/bpkg/pkg-configure.hxx
+++ b/bpkg/pkg-configure.hxx
@@ -24,21 +24,25 @@ namespace bpkg
const common_options&,
transaction&,
const shared_ptr<selected_package>&,
- const strings& config_vars);
+ const dependencies&,
+ const strings& config_vars,
+ bool simulate);
// Configure a system package and commit the transaction.
//
shared_ptr<selected_package>
pkg_configure_system (const string& name, const version&, transaction&);
- // Verify that a directory is a valid package and return its prerequisites.
- // Fail if the directory is not a valid package or some of the prerequisites
- // are not configured or don't satisfy the package's dependency constraints.
+ // Return package prerequisites given its dependencies. Fail if some of the
+ // prerequisites are not configured or don't satisfy the package's
+ // dependency constraints. Note that the package argument is used for
+ // diagnostics only.
//
package_prerequisites
pkg_configure_prerequisites (const common_options&,
transaction&,
- const dir_path& package);
+ const dependencies&,
+ const string& package);
}
#endif // BPKG_PKG_CONFIGURE_HXX
diff --git a/bpkg/pkg-disfigure.cxx b/bpkg/pkg-disfigure.cxx
index 4dcecaa..0e1f8a6 100644
--- a/bpkg/pkg-disfigure.cxx
+++ b/bpkg/pkg-disfigure.cxx
@@ -19,7 +19,8 @@ namespace bpkg
const common_options& o,
transaction& t,
const shared_ptr<selected_package>& p,
- bool clean)
+ bool clean,
+ bool simulate)
{
assert (p->state == package_state::configured ||
p->state == package_state::broken);
@@ -69,85 +70,84 @@ namespace bpkg
//
p->prerequisites.clear ();
- // Calculate package's src_root and out_root.
- //
assert (p->src_root); // Must be set since unpacked.
assert (p->out_root); // Must be set since configured.
- dir_path src_root (p->src_root->absolute ()
- ? *p->src_root
- : c / *p->src_root);
- dir_path out_root (c / *p->out_root); // Always relative.
+ if (!simulate)
+ {
+ dir_path src_root (p->effective_src_root (c));
+ dir_path out_root (p->effective_out_root (c));
- l4 ([&]{trace << "src_root: " << src_root << ", "
- << "out_root: " << out_root;});
+ l4 ([&]{trace << "src_root: " << src_root << ", "
+ << "out_root: " << out_root;});
- // Form the buildspec.
- //
- string bspec;
-
- // Use path representation to get canonical trailing slash.
- //
- const string& rep (out_root.representation ());
-
- if (p->state == package_state::configured)
- {
- if (clean)
- bspec = "clean('" + rep + "') ";
+ // Form the buildspec.
+ //
+ string bspec;
- bspec += "disfigure('" + rep + "')";
- }
- else
- {
- // Why do we need to specify src_root? While it's unnecessary
- // for a completely configured package, here we disfigure a
- // partially configured one.
+ // Use path representation to get canonical trailing slash.
//
- if (src_root == out_root)
- bspec = "disfigure('" + rep + "')";
- else
- bspec = "disfigure('" + src_root.representation () + "'@'" +
- rep + "')";
- }
+ const string& rep (out_root.representation ());
- l4 ([&]{trace << "buildspec: " << bspec;});
+ if (p->state == package_state::configured)
+ {
+ if (clean)
+ bspec = "clean('" + rep + "') ";
- // Disfigure.
- //
- try
- {
- if (exists (out_root))
+ bspec += "disfigure('" + rep + "')";
+ }
+ else
{
- // Note that for external packages this is just the output directory.
- // It is also possible that the buildfiles in the source directory
- // have changed in a way that they don't clean everything. So in this
- // case we just remove the output directory manually rather then
- // running 'b clean disfigure'.
+ // Why do we need to specify src_root? While it's unnecessary
+ // for a completely configured package, here we disfigure a
+ // partially configured one.
//
- if (clean && p->external ())
- rm_r (out_root);
+ if (src_root == out_root)
+ bspec = "disfigure('" + rep + "')";
else
- run_b (o, c, bspec, true); // Run quiet.
+ bspec = "disfigure('" + src_root.representation () + "'@'" +
+ rep + "')";
}
- // Make sure the out directory is gone unless it is the same as src, or
- // we didn't clean it.
- //
- if (out_root != src_root && clean && exists (out_root))
- fail << "package output directory " << out_root << " still exists";
- }
- catch (const failed&)
- {
- // If we failed to disfigure the package, set it to the broken
- // state. The user can then try to clean things up with pkg-purge.
+ l4 ([&]{trace << "buildspec: " << bspec;});
+
+ // Disfigure.
//
- p->state = package_state::broken;
- db.update (p);
- t.commit ();
+ try
+ {
+ if (exists (out_root))
+ {
+ // Note that for external packages this is just the output
+ // directory. It is also possible that the buildfiles in the source
+ // directory have changed in a way that they don't clean everything.
+ // So in this case we just remove the output directory manually
+ // rather then running 'b clean disfigure'.
+ //
+ if (clean && p->external ())
+ rm_r (out_root);
+ else
+ run_b (o, c, bspec, true); // Run quiet.
+ }
+
+ // Make sure the out directory is gone unless it is the same as src,
+ // or we didn't clean it.
+ //
+ if (out_root != src_root && clean && exists (out_root))
+ fail << "package output directory " << out_root << " still exists";
+ }
+ catch (const failed&)
+ {
+ // If we failed to disfigure the package, set it to the broken
+ // state. The user can then try to clean things up with pkg-purge.
+ //
+ p->state = package_state::broken;
+ db.update (p);
+ t.commit ();
- info << "package " << p->name << " is now broken; "
- << "use 'pkg-purge' to remove";
- throw;
+ info << "package " << p->name << " is now broken; "
+ << "use 'pkg-purge' to remove";
+ throw;
+ }
}
p->out_root = nullopt;
@@ -172,7 +172,7 @@ namespace bpkg
string n (args.next ());
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -183,7 +183,9 @@ namespace bpkg
fail << "package " << n << " is " << p->state <<
info << "expected it to be configured";
- pkg_disfigure (c, o, t, p, !o.keep_out ()); // Commits the transaction.
+ // Commits the transaction.
+ //
+ pkg_disfigure (c, o, t, p, !o.keep_out (), false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
diff --git a/bpkg/pkg-disfigure.hxx b/bpkg/pkg-disfigure.hxx
index d475f03..a59e0e4 100644
--- a/bpkg/pkg-disfigure.hxx
+++ b/bpkg/pkg-disfigure.hxx
@@ -27,7 +27,8 @@ namespace bpkg
const common_options&,
transaction&,
const shared_ptr<selected_package>&,
- bool clean);
+ bool clean,
+ bool simulate);
}
#endif // BPKG_PKG_DISFIGURE_HXX
diff --git a/bpkg/pkg-drop.cxx b/bpkg/pkg-drop.cxx
index a238803..0127fb6 100644
--- a/bpkg/pkg-drop.cxx
+++ b/bpkg/pkg-drop.cxx
@@ -353,11 +353,11 @@ namespace bpkg
// Each package is disfigured in its own transaction, so that we always
// leave the configuration in a valid state.
//
- transaction t (db.begin ());
+ transaction t (db);
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, true /* clean */);
+ pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
@@ -388,8 +388,11 @@ namespace bpkg
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
- transaction t (db.begin ());
- pkg_purge (c, t, p); // Commits the transaction, p is now transient.
+ transaction t (db);
+
+ // Commits the transaction, p is now transient.
+ //
+ pkg_purge (c, t, p, false /* simulate */);
if (verb && !o.no_result ())
text << "purged " << p->name;
@@ -444,7 +447,7 @@ namespace bpkg
//
bool print_plan (false);
{
- transaction t (db.begin ());
+ transaction t (db);
// The first step is to load and collect all the packages specified
// by the user.
@@ -586,7 +589,7 @@ namespace bpkg
//
drop_packages pkgs;
{
- transaction t (db.begin ());
+ transaction t (db);
// First add all the "caller selection" of packages to the list and
// collect their prerequisites (these will be the candidates to drop
diff --git a/bpkg/pkg-fetch.cxx b/bpkg/pkg-fetch.cxx
index 71bd968..ecc3535 100644
--- a/bpkg/pkg-fetch.cxx
+++ b/bpkg/pkg-fetch.cxx
@@ -29,7 +29,8 @@ namespace bpkg
version v,
path a,
repository_location rl,
- bool purge)
+ bool purge,
+ bool simulate)
{
tracer trace ("pkg_fetch");
@@ -53,7 +54,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things
// go badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p);
+ pkg_purge_fs (c, t, p, simulate);
p->version = move (v);
p->state = package_state::fetched;
@@ -133,7 +134,8 @@ namespace bpkg
transaction& t,
path a,
bool replace,
- bool purge)
+ bool purge,
+ bool simulate)
{
tracer trace ("pkg_fetch");
@@ -160,7 +162,8 @@ namespace bpkg
move (m.version),
move (a),
repository_location (),
- purge);
+ purge,
+ simulate);
}
shared_ptr<selected_package>
@@ -169,7 +172,8 @@ namespace bpkg
transaction& t,
string n,
version v,
- bool replace)
+ bool replace,
+ bool simulate)
{
tracer trace ("pkg_fetch");
@@ -225,22 +229,28 @@ namespace bpkg
text << "fetching " << pl->location.leaf () << " "
<< "from " << pl->repository->name;
- path a (pkg_fetch_archive (co, pl->repository->location, pl->location, c));
- auto_rmfile arm (a);
-
- // We can't be fetching an archive for a transient object.
- //
- assert (ap->sha256sum);
+ auto_rmfile arm;
+ path a (c / pl->location.leaf ());
- const string& sha256sum (sha256 (co, a));
- if (sha256sum != *ap->sha256sum)
+ if (!simulate)
{
- fail << "checksum mismatch for " << n << " " << v <<
- info << pl->repository->name << " has " << *ap->sha256sum <<
- info << "fetched archive has " << sha256sum <<
- info << "consider re-fetching package list and trying again" <<
- info << "if problem persists, consider reporting this to "
- << "the repository maintainer";
+ pkg_fetch_archive (co, pl->repository->location, pl->location, a);
+ arm = auto_rmfile (a);
+
+ // We can't be fetching an archive for a transient object.
+ //
+ assert (ap->sha256sum);
+
+ const string& sha256sum (sha256 (co, a));
+ if (sha256sum != *ap->sha256sum)
+ {
+ fail << "checksum mismatch for " << n << " " << v <<
+ info << pl->repository->name << " has " << *ap->sha256sum <<
+ info << "fetched archive has " << sha256sum <<
+ info << "consider re-fetching package list and trying again" <<
+ info << "if problem persists, consider reporting this to "
+ << "the repository maintainer";
+ }
}
shared_ptr<selected_package> p (
@@ -250,7 +260,8 @@ namespace bpkg
move (v),
move (a),
pl->repository->location,
- true)); // Purge.
+ true /* purge */,
+ simulate));
arm.cancel ();
return p;
@@ -265,7 +276,7 @@ namespace bpkg
l4 ([&]{trace << "configuration: " << c;});
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s;
shared_ptr<selected_package> p;
@@ -278,7 +289,13 @@ namespace bpkg
fail << "archive path argument expected" <<
info << "run 'bpkg help pkg-fetch' for more information";
- p = pkg_fetch (o, c, t, path (args.next ()), o.replace (), o.purge ());
+ p = pkg_fetch (o,
+ c,
+ t,
+ path (args.next ()),
+ o.replace (),
+ o.purge (),
+ false /* simulate */);
}
else
{
@@ -294,7 +311,13 @@ namespace bpkg
fail << "package version expected" <<
info << "run 'bpkg help pkg-fetch' for more information";
- p = pkg_fetch (o, c, t, move (n), move (v), o.replace ());
+ p = pkg_fetch (o,
+ c,
+ t,
+ move (n),
+ move (v),
+ o.replace (),
+ false /* simulate */);
}
if (verb && !o.no_result ())
diff --git a/bpkg/pkg-fetch.hxx b/bpkg/pkg-fetch.hxx
index fe51ba6..5d59764 100644
--- a/bpkg/pkg-fetch.hxx
+++ b/bpkg/pkg-fetch.hxx
@@ -26,7 +26,8 @@ namespace bpkg
transaction&,
path archive,
bool replace,
- bool purge);
+ bool purge,
+ bool simulate);
// Fetch the package from an archive-based repository and commit the
// transaction.
@@ -37,7 +38,8 @@ namespace bpkg
transaction&,
string name,
version,
- bool replace);
+ bool replace,
+ bool simulate);
}
#endif // BPKG_PKG_FETCH_HXX
diff --git a/bpkg/pkg-purge.cxx b/bpkg/pkg-purge.cxx
index e1cd113..ba717bf 100644
--- a/bpkg/pkg-purge.cxx
+++ b/bpkg/pkg-purge.cxx
@@ -18,6 +18,7 @@ namespace bpkg
pkg_purge_fs (const dir_path& c,
transaction& t,
const shared_ptr<selected_package>& p,
+ bool simulate,
bool archive)
{
tracer trace ("pkg_purge_archive");
@@ -32,10 +33,13 @@ namespace bpkg
{
if (p->purge_src)
{
- dir_path d (p->src_root->absolute () ? *p->src_root : c / *p->src_root);
+ if (!simulate)
+ {
+ dir_path d (p->effective_src_root (c));
- if (exists (d)) // Don't complain if someone did our job for us.
- rm_r (d);
+ if (exists (d)) // Don't complain if someone did our job for us.
+ rm_r (d);
+ }
p->purge_src = false;
}
@@ -50,10 +54,13 @@ namespace bpkg
{
if (p->purge_archive)
{
- path a (p->archive->absolute () ? *p->archive : c / *p->archive);
+ if (!simulate)
+ {
+ path a (p->archive->absolute () ? *p->archive : c / *p->archive);
- if (exists (a))
- rm (a);
+ if (exists (a))
+ rm (a);
+ }
p->purge_archive = false;
}
@@ -78,7 +85,8 @@ namespace bpkg
void
pkg_purge (const dir_path& c,
transaction& t,
- const shared_ptr<selected_package>& p)
+ const shared_ptr<selected_package>& p,
+ bool simulate)
{
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
@@ -89,7 +97,7 @@ namespace bpkg
tracer_guard tg (db, trace);
assert (!p->out_root);
- pkg_purge_fs (c, t, p, true);
+ pkg_purge_fs (c, t, p, simulate, true);
db.erase (p);
t.commit ();
@@ -112,7 +120,7 @@ namespace bpkg
string n (args.next ());
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -162,7 +170,7 @@ namespace bpkg
{
if (p->out_root)
{
- dir_path d (c / *p->out_root); // Always relative.
+ dir_path d (p->effective_out_root (c));
if (exists (d))
fail << "output directory of broken package " << n
@@ -172,7 +180,7 @@ namespace bpkg
if (p->purge_src)
{
- dir_path d (p->src_root->absolute () ? *p->src_root : c / *p->src_root);
+ dir_path d (p->effective_src_root (c));
if (exists (d))
fail << "source directory of broken package " << n
@@ -192,7 +200,7 @@ namespace bpkg
else
{
assert (!p->out_root);
- pkg_purge_fs (c, t, p, !o.keep ());
+ pkg_purge_fs (c, t, p, false /* simulate */, !o.keep ());
}
// Finally, update the database state.
diff --git a/bpkg/pkg-purge.hxx b/bpkg/pkg-purge.hxx
index 9468a02..d9c26c5 100644
--- a/bpkg/pkg-purge.hxx
+++ b/bpkg/pkg-purge.hxx
@@ -22,7 +22,8 @@ namespace bpkg
void
pkg_purge (const dir_path& configuration,
transaction&,
- const shared_ptr<selected_package>&);
+ const shared_ptr<selected_package>&,
+ bool simulate);
// Remove package's filesystem objects (the source directory and, if
// the archive argument is true, the package archive). If this fails,
@@ -32,6 +33,7 @@ namespace bpkg
pkg_purge_fs (const dir_path& configuration,
transaction&,
const shared_ptr<selected_package>&,
+ bool simulate,
bool archive = true);
}
diff --git a/bpkg/pkg-status.cxx b/bpkg/pkg-status.cxx
index 8250766..ea39b25 100644
--- a/bpkg/pkg-status.cxx
+++ b/bpkg/pkg-status.cxx
@@ -265,7 +265,7 @@ namespace bpkg
l4 ([&]{trace << "configuration: " << c;});
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s;
packages pkgs;
diff --git a/bpkg/pkg-unpack.cxx b/bpkg/pkg-unpack.cxx
index 41c72f4..d790613 100644
--- a/bpkg/pkg-unpack.cxx
+++ b/bpkg/pkg-unpack.cxx
@@ -71,14 +71,18 @@ namespace bpkg
version v,
dir_path d,
repository_location rl,
- bool purge)
+ bool purge,
+ bool simulate)
{
tracer trace ("pkg_unpack");
database& db (t.database ());
tracer_guard tg (db, trace);
- string mc (sha256 (o, d / manifest_file));
+ optional<string> mc;
+
+ if (!simulate)
+ mc = sha256 (o, d / manifest_file);
// Make the package and configuration paths absolute and normalized.
// If the package is inside the configuration, use the relative path.
@@ -98,7 +102,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things
// go badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p);
+ pkg_purge_fs (c, t, p, simulate);
p->version = move (v);
p->state = package_state::unpacked;
@@ -142,7 +146,8 @@ namespace bpkg
transaction& t,
const dir_path& d,
bool replace,
- bool purge)
+ bool purge,
+ bool simulate)
{
tracer trace ("pkg_unpack");
@@ -177,7 +182,8 @@ namespace bpkg
move (m.version),
d,
repository_location (),
- purge);
+ purge,
+ simulate);
}
shared_ptr<selected_package>
@@ -186,7 +192,8 @@ namespace bpkg
transaction& t,
string n,
version v,
- bool replace)
+ bool replace,
+ bool simulate)
{
tracer trace ("pkg_unpack");
@@ -245,14 +252,16 @@ namespace bpkg
move (v),
path_cast<dir_path> (rl.path () / pl->location),
rl,
- false); // Purge.
+ false /* purge */,
+ simulate);
}
shared_ptr<selected_package>
pkg_unpack (const common_options& co,
const dir_path& c,
transaction& t,
- const string& name)
+ const string& name,
+ bool simulate)
{
tracer trace ("pkg_unpack");
@@ -272,13 +281,6 @@ namespace bpkg
assert (p->archive); // Should have archive in the fetched state.
- // If the archive path is not absolute, then it must be relative
- // to the configuration.
- //
- path a (p->archive->absolute () ? *p->archive : c / *p->archive);
-
- l4 ([&]{trace << "archive: " << a;});
-
// Extract the package directory.
//
// Also, since we must have verified the archive during fetch,
@@ -289,119 +291,134 @@ namespace bpkg
if (exists (d))
fail << "package directory " << d << " already exists";
- // What should we do if tar or something after it fails? Cleaning
- // up the package directory sounds like the right thing to do.
- //
- auto_rmdir arm (d);
+ auto_rmdir arm;
+ optional<string> mc;
- cstrings args;
-
- // See if we need to decompress.
- //
+ if (!simulate)
{
- string e (a.extension ());
-
- if (e == "gz") args.push_back ("gzip");
- else if (e == "bzip2") args.push_back ("bzip2");
- else if (e == "xz") args.push_back ("xz");
- else if (e != "tar")
- fail << "unknown compression method in package " << a;
- }
+ // If the archive path is not absolute, then it must be relative
+ // to the configuration.
+ //
+ path a (p->archive->absolute () ? *p->archive : c / *p->archive);
- size_t i (0); // The tar command line start.
- if (!args.empty ())
- {
- args.push_back ("-dc");
- args.push_back (a.string ().c_str ());
- args.push_back (nullptr);
- i = args.size ();
- }
+ l4 ([&]{trace << "archive: " << a;});
- args.push_back (co.tar ().string ().c_str ());
+ // What should we do if tar or something after it fails? Cleaning
+ // up the package directory sounds like the right thing to do.
+ //
+ arm = auto_rmdir (d);
- // Add extra options.
- //
- for (const string& o: co.tar_option ())
- args.push_back (o.c_str ());
+ cstrings args;
- // -C/--directory -- change to directory.
- //
- args.push_back ("-C");
+ // See if we need to decompress.
+ //
+ {
+ string e (a.extension ());
-#ifndef _WIN32
- args.push_back (c.string ().c_str ());
-#else
- // Note that tar misinterprets -C option's absolute paths on Windows,
- // unless only forward slashes are used as directory separators:
- //
- // tar -C c:\a\cfg --force-local -xf c:\a\cfg\libbutl-0.7.0.tar.gz
- // tar: c\:\a\\cfg: Cannot open: No such file or directory
- // tar: Error is not recoverable: exiting now
- //
- string cwd (c.string ());
- replace (cwd.begin (), cwd.end (), '\\', '/');
+ if (e == "gz") args.push_back ("gzip");
+ else if (e == "bzip2") args.push_back ("bzip2");
+ else if (e == "xz") args.push_back ("xz");
+ else if (e != "tar")
+ fail << "unknown compression method in package " << a;
+ }
- args.push_back (cwd.c_str ());
+ size_t i (0); // The tar command line start.
+ if (!args.empty ())
+ {
+ args.push_back ("-dc");
+ args.push_back (a.string ().c_str ());
+ args.push_back (nullptr);
+ i = args.size ();
+ }
- // An archive name that has a colon in it specifies a file or device on a
- // remote machine. That makes it impossible to use absolute Windows paths
- // unless we add the --force-local option. Note that BSD tar doesn't
- // support this option.
- //
- args.push_back ("--force-local");
-#endif
+ args.push_back (co.tar ().string ().c_str ());
- args.push_back ("-xf");
- args.push_back (i == 0 ? a.string ().c_str () : "-");
- args.push_back (nullptr);
- args.push_back (nullptr); // Pipe end.
+ // Add extra options.
+ //
+ for (const string& o: co.tar_option ())
+ args.push_back (o.c_str ());
- size_t what;
- try
- {
- process_path dpp;
- process_path tpp;
+ // -C/--directory -- change to directory.
+ //
+ args.push_back ("-C");
- process dpr;
- process tpr;
+#ifndef _WIN32
+ args.push_back (c.string ().c_str ());
+#else
+ // Note that tar misinterprets -C option's absolute paths on Windows,
+ // unless only forward slashes are used as directory separators:
+ //
+ // tar -C c:\a\cfg --force-local -xf c:\a\cfg\libbutl-0.7.0.tar.gz
+ // tar: c\:\a\\cfg: Cannot open: No such file or directory
+ // tar: Error is not recoverable: exiting now
+ //
+ string cwd (c.string ());
+ replace (cwd.begin (), cwd.end (), '\\', '/');
- if (i != 0)
- dpp = process::path_search (args[what = 0]);
+ args.push_back (cwd.c_str ());
- tpp = process::path_search (args[what = i]);
+ // An archive name that has a colon in it specifies a file or device on a
+ // remote machine. That makes it impossible to use absolute Windows paths
+ // unless we add the --force-local option. Note that BSD tar doesn't
+ // support this option.
+ //
+ args.push_back ("--force-local");
+#endif
- if (verb >= 2)
- print_process (args);
+ args.push_back ("-xf");
+ args.push_back (i == 0 ? a.string ().c_str () : "-");
+ args.push_back (nullptr);
+ args.push_back (nullptr); // Pipe end.
- if (i != 0)
+ size_t what;
+ try
{
- dpr = process (dpp, &args[what = 0], 0, -1);
- tpr = process (tpp, &args[what = i], dpr);
+ process_path dpp;
+ process_path tpp;
+
+ process dpr;
+ process tpr;
+
+ if (i != 0)
+ dpp = process::path_search (args[what = 0]);
+
+ tpp = process::path_search (args[what = i]);
+
+ if (verb >= 2)
+ print_process (args);
+
+ if (i != 0)
+ {
+ dpr = process (dpp, &args[what = 0], 0, -1);
+ tpr = process (tpp, &args[what = i], dpr);
+ }
+ else
+ tpr = process (tpp, &args[what = 0]);
+
+ // While it is reasonable to assuming the child process issued
+ // diagnostics, tar, specifically, doesn't mention the archive name.
+ //
+ if (!(what = i, tpr.wait ()) ||
+ !(what = 0, dpr.wait ()))
+ fail << "unable to extract package archive " << a;
}
- else
- tpr = process (tpp, &args[what = 0]);
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[what] << ": " << e;
- // While it is reasonable to assuming the child process issued
- // diagnostics, tar, specifically, doesn't mention the archive name.
- //
- if (!(what = i, tpr.wait ()) ||
- !(what = 0, dpr.wait ()))
- fail << "unable to extract package archive " << a;
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[what] << ": " << e;
+ if (e.child)
+ exit (1);
- if (e.child)
- exit (1);
+ throw failed ();
+ }
- throw failed ();
+ mc = sha256 (co, d / manifest_file);
}
p->src_root = d.leaf (); // For now assuming to be in configuration.
p->purge_src = true;
- p->manifest_checksum = sha256 (co, d / manifest_file);
+ p->manifest_checksum = move (mc);
p->state = package_state::unpacked;
@@ -422,7 +439,7 @@ namespace bpkg
l4 ([&]{trace << "configuration: " << c;});
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
shared_ptr<selected_package> p;
bool external (o.existing ());
@@ -435,8 +452,13 @@ namespace bpkg
fail << "package directory argument expected" <<
info << "run 'bpkg help pkg-unpack' for more information";
- p = pkg_unpack (
- o, c, t, dir_path (args.next ()), o.replace (), o.purge ());
+ p = pkg_unpack (o,
+ c,
+ t,
+ dir_path (args.next ()),
+ o.replace (),
+ o.purge (),
+ false /* simulate */);
}
else
{
@@ -460,8 +482,14 @@ namespace bpkg
// "unpack" it from the directory-based repository.
//
p = v.empty ()
- ? pkg_unpack (o, c, t, n)
- : pkg_unpack (o, c, t, move (n), move (v), o.replace ());
+ ? pkg_unpack (o, c, t, n, false /* simulate */)
+ : pkg_unpack (o,
+ c,
+ t,
+ move (n),
+ move (v),
+ o.replace (),
+ false /* simulate */);
}
if (verb && !o.no_result ())
diff --git a/bpkg/pkg-unpack.hxx b/bpkg/pkg-unpack.hxx
index 83acb97..fd92e4c 100644
--- a/bpkg/pkg-unpack.hxx
+++ b/bpkg/pkg-unpack.hxx
@@ -26,7 +26,8 @@ namespace bpkg
transaction&,
const dir_path&,
bool replace,
- bool purge);
+ bool purge,
+ bool simulate);
// Unpack the fetched package and commit the transaction.
//
@@ -34,7 +35,8 @@ namespace bpkg
pkg_unpack (const common_options&,
const dir_path& configuration,
transaction&,
- const string& name);
+ const string& name,
+ bool simulate);
// Unpack the package as a source directory from a directory-based
// repository and commit the transaction.
@@ -45,7 +47,8 @@ namespace bpkg
transaction&,
string name,
version,
- bool replace);
+ bool replace,
+ bool simulate);
}
#endif // BPKG_PKG_UNPACK_HXX
diff --git a/bpkg/rep-add.cxx b/bpkg/rep-add.cxx
index 704da68..3569584 100644
--- a/bpkg/rep-add.cxx
+++ b/bpkg/rep-add.cxx
@@ -67,7 +67,7 @@ namespace bpkg
info << "run 'bpkg help rep-add' for more information";
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s; // Repository dependencies can have cycles.
while (args.more ())
diff --git a/bpkg/rep-fetch.cxx b/bpkg/rep-fetch.cxx
index 9558301..6d37f9f 100644
--- a/bpkg/rep-fetch.cxx
+++ b/bpkg/rep-fetch.cxx
@@ -877,7 +877,7 @@ namespace bpkg
vector<lazy_shared_ptr<repository>> repos;
repos.reserve (rls.size ());
- transaction t (db.begin ());
+ transaction t (db);
shared_ptr<repository> root (db.load<repository> (""));
repository::complements_type& ua (root->complements); // User-added repos.
@@ -913,7 +913,7 @@ namespace bpkg
vector<lazy_shared_ptr<repository>> repos;
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s; // Repository dependencies can have cycles.
shared_ptr<repository> root (db.load<repository> (""));
diff --git a/bpkg/rep-list.cxx b/bpkg/rep-list.cxx
index 3ee52a9..8d308dd 100644
--- a/bpkg/rep-list.cxx
+++ b/bpkg/rep-list.cxx
@@ -101,7 +101,7 @@ namespace bpkg
info << "run 'bpkg help rep-list' for more information";
database db (open (c, trace));
- transaction t (db.begin ());
+ transaction t (db);
session s; // Repository dependencies can have cycles.
shared_ptr<repository> root (db.load<repository> (""));
diff --git a/bpkg/rep-remove.cxx b/bpkg/rep-remove.cxx
index 53a7de1..f161c71 100644
--- a/bpkg/rep-remove.cxx
+++ b/bpkg/rep-remove.cxx
@@ -198,7 +198,7 @@ namespace bpkg
{
// Note that we don't rely on being in session nor create one.
//
- transaction t (db.begin ());
+ transaction t (db);
db.erase_query<available_package> ();
@@ -296,7 +296,7 @@ namespace bpkg
//
vector<lazy_shared_ptr<repository>> repos;
- transaction t (db.begin ());
+ transaction t (db);
session s; // Repository dependencies can have cycles.
shared_ptr<repository> root (db.load<repository> (""));