From cbd8296c7b86f7fc368d1133da3be3670b7923be Mon Sep 17 00:00:00 2001 From: Boris Kolpackov Date: Wed, 9 Dec 2015 11:32:50 +0200 Subject: Clean up command names, add aliases --- bpkg/bpkg-options.cli | 78 +-- bpkg/bpkg.cxx | 22 +- bpkg/build | 17 - bpkg/build-options.cli | 63 --- bpkg/build.cxx | 1224 -------------------------------------------- bpkg/buildfile | 20 +- bpkg/cfg-add | 17 + bpkg/cfg-add-options.cli | 30 ++ bpkg/cfg-add.cxx | 70 +++ bpkg/cfg-fetch | 17 + bpkg/cfg-fetch-options.cli | 28 + bpkg/cfg-fetch.cxx | 245 +++++++++ bpkg/common-options.cli | 2 +- bpkg/drop | 17 - bpkg/drop-options.cli | 62 --- bpkg/drop.cxx | 512 ------------------ bpkg/package | 2 +- bpkg/pkg-build | 17 + bpkg/pkg-build-options.cli | 63 +++ bpkg/pkg-build.cxx | 1224 ++++++++++++++++++++++++++++++++++++++++++++ bpkg/pkg-drop | 17 + bpkg/pkg-drop-options.cli | 62 +++ bpkg/pkg-drop.cxx | 512 ++++++++++++++++++ bpkg/pkg-fetch.cxx | 4 +- bpkg/rep-add | 17 - bpkg/rep-add-options.cli | 30 -- bpkg/rep-add.cxx | 70 --- bpkg/rep-fetch | 17 - bpkg/rep-fetch-options.cli | 28 - bpkg/rep-fetch.cxx | 245 --------- tests/test.sh | 398 +++++++------- 31 files changed, 2559 insertions(+), 2571 deletions(-) delete mode 100644 bpkg/build delete mode 100644 bpkg/build-options.cli delete mode 100644 bpkg/build.cxx create mode 100644 bpkg/cfg-add create mode 100644 bpkg/cfg-add-options.cli create mode 100644 bpkg/cfg-add.cxx create mode 100644 bpkg/cfg-fetch create mode 100644 bpkg/cfg-fetch-options.cli create mode 100644 bpkg/cfg-fetch.cxx delete mode 100644 bpkg/drop delete mode 100644 bpkg/drop-options.cli delete mode 100644 bpkg/drop.cxx create mode 100644 bpkg/pkg-build create mode 100644 bpkg/pkg-build-options.cli create mode 100644 bpkg/pkg-build.cxx create mode 100644 bpkg/pkg-drop create mode 100644 bpkg/pkg-drop-options.cli create mode 100644 bpkg/pkg-drop.cxx delete mode 100644 bpkg/rep-add delete mode 100644 bpkg/rep-add-options.cli delete mode 100644 bpkg/rep-add.cxx delete mode 100644 bpkg/rep-fetch delete mode 100644 bpkg/rep-fetch-options.cli delete mode 100644 bpkg/rep-fetch.cxx diff --git a/bpkg/bpkg-options.cli b/bpkg/bpkg-options.cli index cfdf9d9..8cd4f9c 100644 --- a/bpkg/bpkg-options.cli +++ b/bpkg/bpkg-options.cli @@ -39,101 +39,101 @@ namespace bpkg "" } - bool build + bool cfg-create|create { - "...", - "Build one or more packages \- \l{bpkg-build-options(1)}.", + "", + "Create configuration.", "" } - bool drop + bool cfg-add|add { - "...", - "Drop one or more packages.", + "", + "Add repository to configuration.", "" } - bool pkg-verify + bool cfg-fetch|fetch { - "", - "Verify archive is a valid package.", + "", + "Fetch list of available packages.", "" } - bool pkg-status + bool pkg-status|status { - "", // [/]: 24 + "", "Print package status.", "" } - bool pkg-fetch + bool pkg-build|build { - "/", - "Fetch package archive.", + "", + "Build package(s) \- \l{bpkg-pkg-build(1)}.", "" } - bool pkg-unpack + bool pkg-drop|drop { "", - "Unpack package archive.", + "Drop package(s).", "" } - bool pkg-purge + bool pkg-update|update { "", - "Purge package.", + "Update package.", "" } - bool pkg-configure + bool pkg-clean|clean { "", - "Configure package.", + "Clean package.", "" } - bool pkg-disfigure + bool pkg-verify { - "", - "Disfigure package.", + "", + "Verify archive is a valid package.", "" } - bool pkg-update + bool pkg-fetch { - "", - "Update package.", + "/", + "Fetch package archive.", "" } - bool pkg-clean + bool pkg-unpack { "", - "Clean package.", + "Unpack package archive.", "" } - bool cfg-create + bool pkg-configure { - "[]", - "Create configuration.", + "", + "Configure package.", "" } - bool rep-add + bool pkg-disfigure { - "", - "Add repository to configuration.", + "", + "Disfigure package.", "" } - bool rep-fetch + bool pkg-purge { - "", - "Fetch available packages list.", + "", + "Purge package.", "" } @@ -147,7 +147,7 @@ namespace bpkg bool rep-create { "[]", - "Create repository manifest file.", + "Generate repository packages manifest.", "" } }; @@ -160,7 +160,7 @@ namespace bpkg bool common-options { - "Options common for all commands \- \l{bpkg-common-options(1)}." + "Details on common options \- \l{bpkg-common-options(1)}." } }; diff --git a/bpkg/bpkg.cxx b/bpkg/bpkg.cxx index 2f483c3..6b2ef85 100644 --- a/bpkg/bpkg.cxx +++ b/bpkg/bpkg.cxx @@ -16,9 +16,8 @@ // #include -#include -#include - +#include +#include #include #include #include @@ -29,10 +28,10 @@ #include #include +#include #include +#include -#include -#include #include #include @@ -172,17 +171,12 @@ try break; \ } - // High-level commands. - // -#define COMMAND(CMD) COMMAND_IMPL(, "", CMD) - - COMMAND(build); - COMMAND(drop); - // pkg-* commands // #define PKG_COMMAND(CMD) COMMAND_IMPL(pkg_, "pkg-", CMD) + PKG_COMMAND (build); + PKG_COMMAND (drop); PKG_COMMAND (verify); PKG_COMMAND (status); PKG_COMMAND (fetch); @@ -197,14 +191,14 @@ try // #define CFG_COMMAND(CMD) COMMAND_IMPL(cfg_, "cfg-", CMD) + CFG_COMMAND (add); CFG_COMMAND (create); + CFG_COMMAND (fetch); // rep-* commands // #define REP_COMMAND(CMD) COMMAND_IMPL(rep_, "rep-", CMD) - REP_COMMAND (add); - REP_COMMAND (fetch); REP_COMMAND (info); REP_COMMAND (create); diff --git a/bpkg/build b/bpkg/build deleted file mode 100644 index a5c047d..0000000 --- a/bpkg/build +++ /dev/null @@ -1,17 +0,0 @@ -// file : bpkg/build -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#ifndef BPKG_BUILD -#define BPKG_BUILD - -#include -#include - -namespace bpkg -{ - int - build (const build_options&, cli::scanner& args); -} - -#endif // BPKG_BUILD diff --git a/bpkg/build-options.cli b/bpkg/build-options.cli deleted file mode 100644 index 77cfd72..0000000 --- a/bpkg/build-options.cli +++ /dev/null @@ -1,63 +0,0 @@ -// file : bpkg/build-options.cli -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -include ; - -"\section=1" -"\name=bpkg-build" -"\summary=build one or more packages" - -namespace bpkg -{ - { - " ", - - "\h|SYNOPSIS| - - \c{\b{bpkg build} [] ([/] | | )...} - - \h|DESCRIPTION| - - The \cb{build} command builds one or more packages including all their - prerequisites. Each package can be specified as just the name () with - optional package version () in which case the package will be - automatically fetched from one of the configuration's source repositories - (see the \cb{rep-add} and \cb{rep-fetch} commands). Alternatively, the - package can be specified as either the path to the package source archive - () or package source directory (). See the \cb{pkg-fetch} and - \cb{pkg-unpack} commands for more information on the semantics of - specifying the package as an archive or directory. - - Packages that are specified explicitly on the command line will be - \i{held}, that is, they will not be considered for automatic removal if - they no longer have any dependents. Packages that are specified with the - explicit package version () or as an archive or directory, will, in - addition, have their versions held, that is, they will not be - automatically upgraded. - - The \cb{build} command also supports several \cb{--*-only} options that - allow you to limit the amount of work that will be done." - } - - class build_options: configuration_options - { - "\h|BUILD OPTIONS|" - - bool --yes|-y - { - "Assume the answer to all prompts is \cb{yes}." - } - - bool --configure-only|-c - { - "Configure all the packages but don't update." - } - - bool --print-only|-p - { - "Print to \cb{STDOUT} what would be done without actually doing - anything." - } - }; -} diff --git a/bpkg/build.cxx b/bpkg/build.cxx deleted file mode 100644 index 3cb96f4..0000000 --- a/bpkg/build.cxx +++ /dev/null @@ -1,1224 +0,0 @@ -// file : bpkg/build.cxx -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#include - -#include -#include -#include // make_move_iterator() -#include // cout -#include // find() -#include // reference_wrapper - -#include // reverse_iterate() - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace butl; - -namespace bpkg -{ - // @@ TODO - // - // - Detect and complain about dependency cycles. - // - Configuration vars (both passed and preserved) - // - - // Try to find a package that optionally satisfies the specified - // version constraint. Look in the specified repository, its - // prerequisite repositories, and their complements, recursively - // (note: recursivity applies to complements, not prerequisites). - // Return the package and the repository in which it was found or - // NULL for both if not found. - // - std::pair, shared_ptr> - find_available (database& db, - const string& name, - const shared_ptr& r, - const optional& c) - { - using query = query; - - query q (query::id.name == name); - const auto& vm (query::id.version); - - // If there is a constraint, then translate it to the query. Otherwise, - // get the latest version. - // - bool order (true); - if (c) - { - const version& v (c->version); - - // Note that the constraint's version is always rhs (libfoo >= 1.2.3). - // - switch (c->operation) - { - case comparison::eq: q = q && vm == v; order = false; break; - case comparison::lt: q = q && vm < v; break; - case comparison::gt: q = q && vm > v; break; - case comparison::le: q = q && vm <= v; break; - case comparison::ge: q = q && vm >= v; break; - } - } - - if (order) - q += order_by_version_desc (vm); - - // Filter the result based on the repository to which each version - // belongs. - // - return filter_one (r, db.query (q)); - } - - // Create a transient (or fake, if you prefer) available_package - // object corresponding to the specified selected object. Note - // that the package locations list is left empty and that the - // returned repository could be NULL if the package is an orphan. - // - std::pair, shared_ptr> - make_available (const common_options& options, - const dir_path& cd, - database& db, - const shared_ptr& sp) - { - assert (sp != nullptr && sp->state != package_state::broken); - - // First see if we can find its repository. - // - shared_ptr ar ( - db.find ( - sp->repository.canonical_name ())); - - // The package is in at least fetched state, which means we should - // be able to get its manifest. - // - const optional& a (sp->archive); - const optional& d (sp->src_root); - - package_manifest m ( - sp->state == package_state::fetched - ? pkg_verify (options, a->absolute () ? *a : cd / *a, true) - : pkg_verify (d->absolute () ? *d : cd / *d, true)); - - return make_pair (make_shared (move (m)), move (ar)); - } - - // A "dependency-ordered" list of packages and their prerequisites. - // That is, every package on the list only possibly depending on the - // ones after it. In a nutshell, the usage is as follows: we first - // add one or more packages (the "initial selection"; for example, a - // list of packages the user wants built). The list then satisfies all - // the prerequisites of the packages that were added, recursively. At - // the end of this process we have an ordered list of all the packages - // that we have to build, from last to first, in order to build our - // initial selection. - // - // This process is split into two phases: satisfaction of all the - // dependencies (the collect() function) and ordering of the list - // (the order() function). - // - // During the satisfaction phase, we collect all the packages, their - // prerequisites (and so on, recursively) in a map trying to satisfy - // any dependency constraints. Specifically, during this step, we may - // "upgrade" or "downgrade" a package that is already in a map as a - // result of another package depending on it and, for example, requiring - // a different version. One notable side-effect of this process is that - // we may end up with a lot more packages in the map than we will have - // on the list. This is because some of the prerequisites of "upgraded" - // or "downgraded" packages may no longer need to be built. - // - // Note also that we don't try to do exhaustive constraint satisfaction - // (i.e., there is no backtracking). Specifically, if we have two - // candidate packages each satisfying a constraint of its dependent - // package, then if neither of them satisfy both constraints, then we - // give up and ask the user to resolve this manually by explicitly - // specifying the version that will satisfy both constraints. - // - struct build_package - { - shared_ptr selected; // NULL if not selected. - shared_ptr available; // Can be NULL, fake/transient. - shared_ptr repository; // Can be NULL (orphan) or root. - - // Hold flags. Note that we can only "increase" the values that are - // already in the selected package. - // - bool hold_package; - bool hold_version; - - // Constraint value plus, normally, the dependent package name that - // placed this constraint but can also be some other name for the - // initial selection (e.g., package version specified by the user - // on the command line). - // - struct constraint_type - { - string dependent; - dependency_constraint value; - - constraint_type () = default; - constraint_type (string d, dependency_constraint v) - : dependent (move (d)), value (move (v)) {} - }; - - vector constraints; - - // True if we need to reconfigure this package. If available package - // is NULL, then reconfigure must be true (this is a dependent that - // needs to be reconfigured because its prerequisite is being up/down- - // graded or reconfigured). Note that in some cases reconfigure is - // naturally implied. For example, if an already configured package - // is being up/down-graded. For such cases we don't guarantee that - // the reconfigure flag is true. We only make sure to set it for - // cases that would otherwise miss the need for the reconfiguration. - // As a result, use the reconfigure() accessor which detects both - // explicit and implied cases. - // - // At first, it may seem that this flag is redundant and having the - // available package set to NULL is sufficient. But consider the case - // where the user asked us to build a package that is already in the - // configured state (so all we have to do is pkg-update). Next, add - // to this a prerequisite package that is being upgraded. Now our - // original package has to be reconfigured. But without this flag - // we won't know (available for our package won't be NULL). - // - bool reconfigure_; - - bool - reconfigure () const - { - return selected != nullptr && - selected->state == package_state::configured && - (reconfigure_ || // Must be checked first, available could be NULL. - selected->version != available->version); - } - }; - - struct build_packages: list> - { - // Collect the package. Return true if this package version was, - // in fact, added to the map and false if it was already there - // or the existing version was preferred. - // - bool - collect (const common_options& options, - const dir_path& cd, - database& db, - build_package&& pkg) - { - using std::swap; // ...and not list::swap(). - - tracer trace ("collect"); - - assert (pkg.available != nullptr); // No dependents allowed here. - auto i (map_.find (pkg.available->id.name)); - - // If we already have an entry for this package name, then we - // have to pick one over the other. - // - if (i != map_.end ()) - { - const string& n (i->first); - - // At the end we want p1 to point to the object that we keep - // and p2 to the object whose constraints we should copy. - // - build_package* p1 (&i->second.package); - build_package* p2 (&pkg); - - // If versions are the same, then all we have to do is copy the - // constraint (p1/p2 already point to where we would want them to). - // - if (p1->available->version != p2->available->version) - { - using constraint_type = build_package::constraint_type; - - // If the versions differ, we have to pick one. Start with the - // newest version since if both satisfy, then that's the one we - // should prefer. So get the first to try into p1 and the second - // to try -- into p2. - // - if (p2->available->version > p1->available->version) - swap (p1, p2); - - // See if pv's version satisfies pc's constraints. Return the - // pointer to the unsatisfied constraint or NULL if all are - // satisfied. - // - auto test = [] (build_package* pv, build_package* pc) - -> const constraint_type* - { - for (const constraint_type& c: pc->constraints) - if (!satisfies (pv->available->version, c.value)) - return &c; - - return nullptr; - }; - - // First see if p1 satisfies p2's constraints. - // - if (auto c2 = test (p1, p2)) - { - // If not, try the other way around. - // - if (auto c1 = test (p2, p1)) - { - const string& d1 (c1->dependent); - const string& d2 (c2->dependent); - - fail << "unable to satisfy constraints on package " << n << - info << d1 << " depends on (" << n << " " << c1->value << ")" << - info << d2 << " depends on (" << n << " " << c2->value << ")" << - info << "available " << n << " " << p1->available->version << - info << "available " << n << " " << p2->available->version << - info << "explicitly specify " << n << " version to manually " - << "satisfy both constraints"; - } - else - swap (p1, p2); - } - - level4 ([&]{trace << "pick " << n << " " << p1->available->version - << " over " << p2->available->version;}); - } - - // See if we are replacing the object. If not, then we don't - // need to collect its prerequisites since that should have - // already been done. Remember, p1 points to the object we - // want to keep. - // - bool replace (p1 != &i->second.package); - - if (replace) - { - swap (*p1, *p2); - swap (p1, p2); // Setup for constraints copying below. - } - - p1->constraints.insert (p1->constraints.end (), - make_move_iterator (p2->constraints.begin ()), - make_move_iterator (p2->constraints.end ())); - - if (!replace) - return false; - } - else - { - string n (pkg.available->id.name); // Note: copy; see emplace() below. - - level4 ([&]{trace << "add " << n << " " << pkg.available->version;}); - - // This is the first time we are adding this package name to the - // map. If it is already selected, then we need to make sure that - // packages that already depend on it (called dependents) are ok - // with the up/downgrade. We will also have to keep doing this - // every time we choose a new available package above. So what - // we are going to do is copy the dependents' constrains over to - // our constraint list; this way they will be automatically taken - // into account by the rest of the logic. - // - const shared_ptr& sp (pkg.selected); - const shared_ptr& ap (pkg.available); - - int r; - if (sp != nullptr && - sp->state == package_state::configured && - (r = sp->version.compare (ap->version)) != 0) - { - using query = query; - - for (const auto& pd: db.query (query::name == n)) - { - if (!pd.constraint) - continue; - - const version& v (ap->version); - const dependency_constraint& c (*pd.constraint); - - if (satisfies (v, c)) - { - pkg.constraints.emplace_back (pd.name, c); - continue; - } - - fail << "unable to " << (r < 0 ? "up" : "down") << "grade " - << "package " << n << " " << sp->version << " to " << v << - info << pd.name << " depends on (" << n << " " << c << ")" << - info << "explicitly specify " << n << " version to manually " - << "satisfy this constraint"; - } - } - - i = map_.emplace (move (n), data_type {end (), move (pkg)}).first; - } - - // Now collect all the prerequisites recursively. But first "prune" - // this process if the package is already configured since that would - // mean all its prerequisites are configured as well. Note that this - // is not merely an optimization: the package could be an orphan in - // which case the below logic will fail (no repository in which to - // search for prerequisites). By skipping the prerequisite check we - // are able to gracefully handle configured orphans. - // - const build_package& p (i->second.package); - const shared_ptr& sp (p.selected); - const shared_ptr& ap (p.available); - - if (sp != nullptr && - sp->version == ap->version && - sp->state == package_state::configured) - return true; - - // Show how we got here if things go wrong. - // - auto g ( - make_exception_guard ( - [&ap] () - { - info << "while satisfying " << ap->id.name << " " << ap->version; - })); - - const shared_ptr& ar (p.repository); - const string& name (ap->id.name); - - for (const dependency_alternatives& da: ap->dependencies) - { - if (da.conditional) // @@ TODO - fail << "conditional dependencies are not yet supported"; - - if (da.size () != 1) // @@ TODO - fail << "multiple dependency alternatives not yet supported"; - - const dependency& d (da.front ()); - - // The first step is to always find the available package even - // if, in the end, it won't be the one we select. If we cannot - // find the package then that means the repository is broken. - // And if we have no repository to look in, then that means the - // package is an orphan (we delay this check until we actually - // need the repository to allow orphans without prerequisites). - // - if (ar == nullptr) - fail << "package " << name << " " << ap->version << " is orphaned" << - info << "explicitly upgrade it to a new version"; - - auto rp (find_available (db, d.name, ar, d.constraint)); - - if (rp.first == nullptr) - { - diag_record dr; - dr << fail << "unknown prerequisite " << d << " of package " << name; - - if (!ar->location.empty ()) - dr << info << "repository " << ar->location << " appears to " - << "be broken"; - } - - // Next see if this package is already selected. If we already - // have it in the configuraion and it satisfies our dependency - // constraint, then we don't want to be forcing its upgrade (or, - // worse, downgrade). - // - bool force (false); - shared_ptr dsp (db.find (d.name)); - if (dsp != nullptr) - { - if (dsp->state == package_state::broken) - fail << "unable to build broken package " << d.name << - info << "use 'pkg-purge --force' to remove"; - - if (satisfies (dsp->version, d.constraint)) - rp = make_available (options, cd, db, dsp); - else - // Remember that we may be forcing up/downgrade; we will deal - // with it below. - // - force = true; - } - - build_package dp { - dsp, - rp.first, - rp.second, - false, // Hold package. - false, // Hold version. - {}, // Constraints. - false}; // Reconfigure. - - // Add our constraint, if we have one. - // - if (d.constraint) - dp.constraints.emplace_back (name, *d.constraint); - - // Now collect this prerequisite. If it was actually collected - // (i.e., it wasn't already there) and we are forcing an upgrade - // and the version is not held, then warn, unless we are running - // quiet. Downgrade or upgrade of a held version -- refuse. - // - if (collect (options, cd, db, move (dp)) && force) - { - const version& sv (dsp->version); - const version& av (rp.first->version); - - bool u (av > sv); - bool f (dsp->hold_version || !u); // Fail if downgrade or held. - - if (verb || f) - { - bool c (d.constraint); - diag_record dr; - - (f ? dr << fail : dr << warn) - << "package " << name << " dependency on " - << (c ? "(" : "") << d << (c ? ")" : "") << " is forcing " - << (u ? "up" : "down") << "grade of " << d.name << " " << sv - << " to " << av; - - if (dsp->hold_version) - dr << info << "package version " << d.name << " " << sv - << " is held"; - - if (f) - dr << info << "explicitly request version " - << (u ? "up" : "down") << "grade to continue"; - } - } - } - - return true; - } - - // Order the previously-collected package with the specified name - // returning its positions. If reorder is true, then reorder this - // package to be considered as "early" as possible. - // - iterator - order (const string& name, bool reorder = true) - { - // Every package that we order should have already been collected. - // - auto mi (map_.find (name)); - assert (mi != map_.end ()); - - // If this package is already in the list, then that would also - // mean all its prerequisites are in the list and we can just - // return its position. Unless we want it reordered. - // - iterator& pos (mi->second.position); - if (pos != end ()) - { - if (reorder) - erase (pos); - else - return pos; - } - - // Order all the prerequisites of this package and compute the - // position of its "earliest" prerequisite -- this is where it - // will be inserted. - // - build_package& p (mi->second.package); - const shared_ptr& sp (p.selected); - const shared_ptr& ap (p.available); - - assert (ap != nullptr); // No dependents allowed here. - - // Unless this package needs something to be before it, add it to - // the end of the list. - // - iterator i (end ()); - - // Figure out if j is before i, in which case set i to j. The goal - // here is to find the position of our "earliest" prerequisite. - // - auto update = [this, &i] (iterator j) - { - for (iterator k (j); i != j && k != end ();) - if (++k == i) - i = j; - }; - - // Similar to collect(), we can prune if the package is already - // configured, right? Not so fast. While in collect() we didn't - // need to add prerequisites of such a package, it doesn't mean - // that they actually never ended up in the map via another way. - // For example, some can be a part of the initial selection. And - // in that case we must order things properly. - // - // So here we are going to do things differently depending on - // whether the package is already configured or not. If it is, - // then that means we can use its prerequisites list. Otherwise, - // we use the manifest data. - // - if (sp != nullptr && - sp->version == ap->version && - sp->state == package_state::configured) - { - for (const auto& p: sp->prerequisites) - { - const string& name (p.first.object_id ()); - - // The prerequisites may not necessarily be in the map. - // - if (map_.find (name) != map_.end ()) - update (order (name, false)); - } - } - else - { - // We are iterating in reverse so that when we iterate over - // the dependency list (also in reverse), prerequisites will - // be built in the order that is as close to the manifest as - // possible. - // - for (const dependency_alternatives& da: - reverse_iterate (p.available->dependencies)) - { - assert (!da.conditional && da.size () == 1); // @@ TODO - const dependency& d (da.front ()); - - update (order (d.name, false)); - } - } - - return pos = insert (i, p); - } - - // If a configured package is being up/down-graded then that means - // all its dependents could be affected and we have to reconfigure - // them. This function examines every package that is already on - // the list and collects and orders all its dependents. - // - // Should we reconfigure just the direct depends or also include - // indirect, recursively? Consider this plauisible scenario as an - // example: We are upgrading a package to a version that provides - // an additional API. When its direct dependent gets reconfigured, - // it notices this new API and exposes its own extra functionality - // that is based on it. Now it would make sense to let its own - // dependents (which would be our original package's indirect ones) - // to also notice this. - // - void - collect_order_dependents (database& db) - { - // For each package on the list we want to insert all its dependents - // before it so that they get configured after the package on which - // they depend is configured (remember, our build order is reverse, - // with the last package being built first). This applies to both - // packages that are already on the list as well as the ones that - // we add, recursively. - // - for (auto i (begin ()); i != end (); ++i) - { - const build_package& p (*i); - - // Prune if this is not a configured package being up/down-graded - // or reconfigured. - // - if (p.reconfigure ()) - collect_order_dependents (db, i); - } - } - - void - collect_order_dependents (database& db, iterator pos) - { - tracer trace ("collect_order_dependents"); - - const build_package& p (*pos); - const string& n (p.selected->name); - - using query = query; - - for (auto& pd: db.query (query::name == n)) - { - string& dn (pd.name); - - // We can have three cases here: the package is already on the - // list, the package is in the map (but not on the list) and it - // is in neither. - // - auto i (map_.find (dn)); - - if (i != map_.end ()) - { - build_package& dp (i->second.package); - - // Force reconfiguration in both cases. - // - dp.reconfigure_ = true; - - if (i->second.position == end ()) - { - // Clean the build_package object up to make sure we don't - // inadvertently force up/down-grade. - // - dp.available = nullptr; - dp.repository = nullptr; - - i->second.position = insert (pos, dp); - } - } - else - { - shared_ptr dsp (db.load (dn)); - - i = map_.emplace ( - move (dn), - data_type - { - end (), - build_package { - move (dsp), - nullptr, - nullptr, - false, // Hold package. - false, // Hold version. - {}, // Constraints. - true} // Reconfigure. - }).first; - - i->second.position = insert (pos, i->second.package); - } - - // Collect our own dependents inserting them before us. - // - collect_order_dependents (db, i->second.position); - } - } - - private: - struct data_type - { - iterator position; // Note: can be end(), see collect(). - build_package package; - }; - - map map_; - }; - - int - build (const build_options& o, cli::scanner& args) - { - tracer trace ("build"); - - const dir_path& c (o.directory ()); - level4 ([&]{trace << "configuration: " << c;}); - - if (!args.more ()) - fail << "package name argument expected" << - info << "run 'bpkg help build' for more information"; - - database db (open (c, trace)); - - // Note that the session spans all our transactions. The idea here is - // that selected_package objects in the build_packages list below will - // be cached in this session. When subsequent transactions modify any - // of these objects, they will modify the cached instance, which means - // our list will always "see" their updated state. - // - session s; - - // Assemble the list of packages we will need to build. - // - build_packages pkgs; - strings names; - { - transaction t (db.begin ()); - - shared_ptr root (db.load ("")); - - while (args.more ()) - { - const char* s (args.next ()); - - // Reduce all the potential variations (archive, directory, package - // name, package name/version) to a single available_package object. - // - string n; - version v; - - shared_ptr ar; - shared_ptr ap; - - // Is this a package archive? - // - try - { - path a (s); - if (exists (a)) - { - package_manifest m (pkg_verify (o, a, true, false)); - - // This is a package archive (note that we shouldn't throw - // failed from here on). - // - level4 ([&]{trace << "archive " << a;}); - n = m.name; - v = m.version; - ar = root; - ap = make_shared (move (m)); - ap->locations.push_back (package_location {root, move (a)}); - } - } - catch (const invalid_path&) - { - // Not a valid path so cannot be an archive. - } - catch (const failed&) - { - // Not a valid package archive. - } - - // Is this a package directory? - // - try - { - dir_path d (s); - if (exists (d)) - { - package_manifest m (pkg_verify (d, true, false)); - - // This is a package directory (note that we shouldn't throw - // failed from here on). - // - level4 ([&]{trace << "directory " << d;}); - n = m.name; - v = m.version; - ap = make_shared (move (m)); - ar = root; - ap->locations.push_back (package_location {root, move (d)}); - } - } - catch (const invalid_path&) - { - // Not a valid path so cannot be an archive. - } - catch (const failed&) - { - // Not a valid package archive. - } - - // Then it got to be a package name with optional version. - // - if (ap == nullptr) - { - n = parse_package_name (s); - v = parse_package_version (s); - level4 ([&]{trace << "package " << n << "; version " << v;}); - - // Either get the user-specified version or the latest. - // - auto rp ( - v.empty () - ? find_available (db, n, root, nullopt) - : find_available (db, n, root, - dependency_constraint {comparison::eq, v})); - - ap = rp.first; - ar = rp.second; - } - - // Load the package that may have already been selected and - // figure out what exactly we need to do here. The end goal - // is the available_package object corresponding to the actual - // package that we will be building (which may or may not be - // the same as the selected package). - // - shared_ptr sp (db.find (n)); - - if (sp != nullptr && sp->state == package_state::broken) - fail << "unable to build broken package " << n << - info << "use 'pkg-purge --force' to remove"; - - bool found (true); - - // If the user asked for a specific version, then that's what - // we ought to be building. - // - if (!v.empty ()) - { - for (;;) - { - if (ap != nullptr) // Must be that version, see above. - break; - - // Otherwise, our only chance is that the already selected - // object is that exact version. - // - if (sp != nullptr && sp->version == v) - break; // Derive ap from sp below. - - found = false; - break; - } - } - // - // No explicit version was specified by the user. - // - else - { - if (ap != nullptr) - { - // Even if this package is already in the configuration, should - // we have a newer version, we treat it as an upgrade request; - // otherwise, why specify the package in the first place? We just - // need to check if what we already have is "better" (i.e., newer). - // - if (sp != nullptr && ap->id.version < sp->version) - ap = nullptr; // Derive ap from sp below. - } - else - { - if (sp == nullptr) - found = false; - - // Otherwise, derive ap from sp below. - } - } - - if (!found) - { - diag_record dr; - - dr << fail << "unknown package " << n; - if (!v.empty ()) - dr << " " << v; - - // Let's help the new user out here a bit. - // - if (db.query_value () == 0) - dr << info << "configuration " << c << " has no repositories" - << info << "use 'bpkg rep-add' to add a repository"; - else if (db.query_value () == 0) - dr << info << "configuration " << c << " has no available packages" - << info << "use 'bpkg rep-fetch' to fetch available packages " - << "list"; - } - - // If the available_package object is still NULL, then it means - // we need to get one corresponding to the selected package. - // - if (ap == nullptr) - { - assert (sp != nullptr); - - auto rp (make_available (o, c, db, sp)); - ap = rp.first; - ar = rp.second; // Could be NULL (orphan). - } - - // Finally add this package to the list. - // - level4 ([&]{trace << "collect " << ap->id.name << " " - << ap->version;}); - - build_package p { - move (sp), - move (ap), - move (ar), - true, // Hold package. - !v.empty (), // Hold version. - {}, // Constraints. - false}; // Reconfigure. - - // "Fix" the version the user asked for by adding the '==' constraint. - // - if (!v.empty ()) - p.constraints.emplace_back ( - "command line", - dependency_constraint {comparison::eq, v}); - - pkgs.collect (o, c, db, move (p)); - names.push_back (n); - } - - // Now that we have collected all the package versions that we need - // to build, arrange them in the "dependency order", that is, with - // every package on the list only possibly depending on the ones - // after it. Iterate over the names we have collected on the previous - // step in reverse so that when we iterate over the packages (also in - // reverse), things will be built as close as possible to the order - // specified by the user (it may still get altered if there are - // dependencies between the specified packages). - // - for (const string& n: reverse_iterate (names)) - pkgs.order (n); - - // Finally, collect and order all the dependents that we will need - // to reconfigure because of the up/down-grades of packages that - // are now on the list. - // - pkgs.collect_order_dependents (db); - - t.commit (); - } - - // Print what we are going to do, then ask for the user's confirmation. - // - if (o.print_only () || !o.yes ()) - { - for (const build_package& p: reverse_iterate (pkgs)) - { - const shared_ptr& sp (p.selected); - const shared_ptr& ap (p.available); - - const char* act; - string n; - version v; - - if (ap == nullptr) - { - // This is a dependent needing reconfiguration. - // - assert (sp != nullptr && p.reconfigure ()); - - n = sp->name; - act = "reconfigure"; - } - else - { - n = ap->id.name; - v = ap->version; - - // Even if we already have this package selected, we have to - // make sure it is configured and updated. - // - if (sp == nullptr || sp->version == v) - act = p.reconfigure () ? "reconfigure/build" : "build"; - else - act = sp->version < v ? "upgrade" : "downgrade"; - } - - if (o.print_only ()) - cout << act << " " << n << (v.empty () ? "" : " ") << v << endl; - else if (verb) - text << act << " " << n << (v.empty () ? "" : " ") << v; - } - } - - if (o.print_only ()) - return 0; - - // Ask the user if we should continue. - // - if (!(o.yes () || yn_prompt ("continue? [Y/n]", 'y'))) - return 1; - - // Ok, we have "all systems go". The overall action plan is as follows. - // - // 1. disfigure up/down-graded, reconfigured [left to right] - // 2. purge up/down-graded - // 3. fetch new, up/down-graded - // 4. unpack new, up/down-graded - // 5. configure all [right to left] - // 6. build user selection [right to left] - // - // Note that for some actions, e.g., purge or fetch, the order is not - // really important. We will, however, do it right to left since that - // is the order closest to that of the user selection. - // - // We are also going to combine purge/fetch/unpack into a single step - // and use the replace mode so it will become just fetch/unpack. - // - - // disfigure - // - for (const build_package& p: pkgs) - { - // We are only interested in configured packages that are either - // up/down-graded or need reconfiguration (e.g., dependents). - // - if (!p.reconfigure ()) - continue; - - const shared_ptr& sp (p.selected); - - // Each package is disfigured in its own transaction, so that we - // always leave the configuration in a valid state. - // - transaction t (db.begin ()); - pkg_disfigure (c, o, t, sp); // Commits the transaction. - assert (sp->state == package_state::unpacked); - - if (verb) - text << "disfigured " << sp->name << " " << sp->version; - } - - // fetch/unpack - // - for (build_package& p: reverse_iterate (pkgs)) - { - shared_ptr& sp (p.selected); - const shared_ptr& ap (p.available); - - if (ap == nullptr) // Skip dependents. - continue; - - // Fetch if this is a new package or if we are up/down-grading. - // - if (sp == nullptr || sp->version != ap->version) - { - sp.reset (); // For the directory case below. - - // Distinguish between the package and archive/directory cases. - // - const package_location& pl (ap->locations[0]); // Got to have one. - - if (pl.repository.object_id () != "") // Special root? - { - transaction t (db.begin ()); - sp = pkg_fetch (o, - c, - t, - ap->id.name, - ap->version, - true); // Replace; commits the transaction. - } - else if (exists (pl.location)) // Directory case is handled by unpack. - { - transaction t (db.begin ()); - sp = pkg_fetch (o, - c, - t, - pl.location, // Archive path. - true, // Replace - false); // Don't purge; commits the transaction. - } - - if (sp != nullptr) // Actually unpacked something? - { - assert (sp->state == package_state::fetched); - - if (verb) - text << "fetched " << sp->name << " " << sp->version; - } - } - - // Unpack. Note that the package can still be NULL if this is the - // directory case (see the fetch code above). - // - if (sp == nullptr || sp->state == package_state::fetched) - { - if (sp != nullptr) - { - transaction t (db.begin ()); - sp = pkg_unpack (o, c, t, ap->id.name); // Commits the transaction. - } - else - { - const package_location& pl (ap->locations[0]); - assert (pl.repository.object_id () == ""); // Special root. - - transaction t (db.begin ()); - sp = pkg_unpack (c, - t, - path_cast (pl.location), - true, // Replace. - false); // Don't purge; commits the transaction. - } - - assert (sp->state == package_state::unpacked); - - if (verb) - text << "unpacked " << sp->name << " " << sp->version; - } - } - - // configure - // - for (const build_package& p: reverse_iterate (pkgs)) - { - const shared_ptr& sp (p.selected); - - assert (sp != nullptr); - - // We configure everything that isn't already configured. - // - if (sp->state == package_state::configured) - continue; - - transaction t (db.begin ()); - pkg_configure (c, o, t, sp, strings ()); // Commits the transaction. - assert (sp->state == package_state::configured); - - if (verb) - text << "configured " << sp->name << " " << sp->version; - } - - // Small detour: update the hold state. While we could have tried - // to "weave" it into one of the previous actions, things there - // are already convoluted enough. - // - for (const build_package& p: reverse_iterate (pkgs)) - { - const shared_ptr& sp (p.selected); - assert (sp != nullptr); - - // Note that we should only "increase" the hold state. - // - bool hp (p.hold_package && sp->hold_package != p.hold_package); - bool hv (p.hold_version && sp->hold_version != p.hold_version); - - if (hp || hv) - { - if (hp) sp->hold_package = true; - if (hv) sp->hold_version = true; - - transaction t (db.begin ()); - db.update (sp); - t.commit (); - - if (verb > 1) - { - if (hp) - text << "hold package " << sp->name; - - if (hv) - text << "hold version " << sp->name << " " << sp->version; - } - } - } - - if (o.configure_only ()) - return 0; - - // update - // - for (const build_package& p: reverse_iterate (pkgs)) - { - const shared_ptr& sp (p.selected); - - // Update the user selection only. - // - if (find (names.begin (), names.end (), sp->name) == names.end ()) - continue; - - pkg_update (c, o, sp); - - if (verb) - text << "updated " << sp->name << " " << sp->version; - } - - return 0; - } -} diff --git a/bpkg/buildfile b/bpkg/buildfile index 2e49f17..580f303 100644 --- a/bpkg/buildfile +++ b/bpkg/buildfile @@ -10,20 +10,22 @@ import libs += libodb-sqlite%lib{odb-sqlite} exe{bpkg}: \ {hxx }{ bpkg-version } \ { cxx}{ bpkg } {hxx ixx cxx}{ bpkg-options } \ -{hxx cxx}{ build } {hxx ixx cxx}{ build-options } \ +{hxx cxx}{ cfg-add } {hxx ixx cxx}{ cfg-add-options } \ {hxx cxx}{ cfg-create } {hxx ixx cxx}{ cfg-create-options } \ +{hxx cxx}{ cfg-fetch } {hxx ixx cxx}{ cfg-fetch-options } \ {hxx ixx cxx}{ common-options } \ {hxx ixx cxx}{ configuration-options } \ {hxx cxx}{ database } \ {hxx cxx}{ diagnostics } \ -{hxx cxx}{ drop } {hxx ixx cxx}{ drop-options } \ {hxx cxx}{ fetch } \ {hxx }{ forward } \ {hxx cxx}{ help } {hxx ixx cxx}{ help-options } \ {hxx cxx}{ manifest-utility } \ {hxx ixx cxx}{ package } \ {hxx ixx cxx}{ package-odb } file{ package.xml } \ +{hxx cxx}{ pkg-build } {hxx ixx cxx}{ pkg-build-options } \ {hxx }{ pkg-clean } {hxx ixx cxx}{ pkg-clean-options } \ +{hxx cxx}{ pkg-drop } {hxx ixx cxx}{ pkg-drop-options } \ {hxx cxx}{ pkg-command } \ {hxx cxx}{ pkg-configure } {hxx ixx cxx}{ pkg-configure-options } \ {hxx cxx}{ pkg-disfigure } {hxx ixx cxx}{ pkg-disfigure-options } \ @@ -33,9 +35,7 @@ exe{bpkg}: \ {hxx cxx}{ pkg-unpack } {hxx ixx cxx}{ pkg-unpack-options } \ {hxx }{ pkg-update } {hxx ixx cxx}{ pkg-update-options } \ {hxx cxx}{ pkg-verify } {hxx ixx cxx}{ pkg-verify-options } \ -{hxx cxx}{ rep-add } {hxx ixx cxx}{ rep-add-options } \ {hxx cxx}{ rep-create } {hxx ixx cxx}{ rep-create-options } \ -{hxx cxx}{ rep-fetch } {hxx ixx cxx}{ rep-fetch-options } \ {hxx cxx}{ rep-info } {hxx ixx cxx}{ rep-info-options } \ {hxx cxx}{ satisfaction } \ {hxx }{ types } \ @@ -62,14 +62,14 @@ if! $cli.loaded {hxx ixx cxx}{configuration-options}: cli{configuration-options} {hxx ixx cxx}{bpkg-options}: cli{bpkg-options} -# High-level commands. +# Help command. # {hxx ixx cxx}{help-options}: cli{help-options} -{hxx ixx cxx}{build-options}: cli{build-options} -{hxx ixx cxx}{drop-options}: cli{drop-options} # pkg-* command. # +{hxx ixx cxx}{pkg-build-options}: cli{pkg-build-options} +{hxx ixx cxx}{pkg-drop-options}: cli{pkg-drop-options} {hxx ixx cxx}{pkg-status-options}: cli{pkg-status-options} {hxx ixx cxx}{pkg-fetch-options}: cli{pkg-fetch-options} {hxx ixx cxx}{pkg-unpack-options}: cli{pkg-unpack-options} @@ -86,8 +86,8 @@ if! $cli.loaded # rep-* command. # -{hxx ixx cxx}{rep-add-options}: cli{rep-add-options} -{hxx ixx cxx}{rep-fetch-options}: cli{rep-fetch-options} +{hxx ixx cxx}{cfg-add-options}: cli{cfg-add-options} +{hxx ixx cxx}{cfg-fetch-options}: cli{cfg-fetch-options} {hxx ixx cxx}{rep-info-options}: cli{rep-info-options} {hxx ixx cxx}{rep-create-options}: cli{rep-create-options} @@ -97,7 +97,7 @@ cli.options += -I $src_root --include-with-brackets --include-prefix bpkg \ --guard-prefix BPKG --cxx-prologue "#include " \ --cli-namespace bpkg::cli --generate-file-scanner --generate-specifier \ --generate-parse --ansi-color --page-usage 'bpkg::print_$name$_' \ ---include-base-last --long-usage --option-length 20 +--include-base-last --long-usage --option-length 22 cli.cxx{common-options}: cli.options += --short-usage # Both short and long. cli.cxx{bpkg-options}: cli.options += --suppress-undocumented # --help, etc. diff --git a/bpkg/cfg-add b/bpkg/cfg-add new file mode 100644 index 0000000..9f8ddeb --- /dev/null +++ b/bpkg/cfg-add @@ -0,0 +1,17 @@ +// file : bpkg/cfg-add -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef BPKG_CFG_ADD +#define BPKG_CFG_ADD + +#include +#include + +namespace bpkg +{ + int + cfg_add (const cfg_add_options&, cli::scanner& args); +} + +#endif // BPKG_CFG_ADD diff --git a/bpkg/cfg-add-options.cli b/bpkg/cfg-add-options.cli new file mode 100644 index 0000000..185fcfd --- /dev/null +++ b/bpkg/cfg-add-options.cli @@ -0,0 +1,30 @@ +// file : bpkg/cfg-add-options.cli +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +include ; + +"\section=1" +"\name=bpkg-cfg-add" +"\summary=add repository to configuration" + +/* +"\h{SYNOPSIS} + +bpkg cfg-add [] " + +"\h{DESCRIPTION} + +The \cb{cfg-add} command adds the specified source repository to the +configuration. Note that this command doesn't fetch the available +packages list for the newly added repository. To do that, use the +\cb{cfg-fetch} command. +" +*/ + +namespace bpkg +{ + class cfg_add_options: configuration_options + { + }; +} diff --git a/bpkg/cfg-add.cxx b/bpkg/cfg-add.cxx new file mode 100644 index 0000000..33c4ba5 --- /dev/null +++ b/bpkg/cfg-add.cxx @@ -0,0 +1,70 @@ +// file : bpkg/cfg-add.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include // invalid_argument + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace bpkg +{ + int + cfg_add (const cfg_add_options& o, cli::scanner& args) + { + tracer trace ("cfg_add"); + + dir_path c (o.directory ()); + level4 ([&]{trace << "configuration: " << c;}); + + if (!args.more ()) + fail << "repository location argument expected" << + info << "run 'bpkg help cfg-add' for more information"; + + repository_location rl (parse_location (args.next ())); + const string& rn (rl.canonical_name ()); + + // Create the new repository and add is as a complement to the root. + // + database db (open (c, trace)); + transaction t (db.begin ()); + session s; // Repository dependencies can have cycles. + + // It is possible that this repository is already in the database. + // For example, it might be a prerequisite of one of the already + // added repository. + // + shared_ptr r (db.find (rl.canonical_name ())); + + if (r == nullptr) + { + r.reset (new repository (rl)); + db.persist (r); + } + + shared_ptr root (db.load ("")); + + if (!root->complements.insert (lazy_shared_ptr (db, r)).second) + { + fail << rn << " is already a repository of this configuration"; + } + + db.update (root); + t.commit (); + + if (verb) + text << "added repository " << rn; + + return 0; + } +} diff --git a/bpkg/cfg-fetch b/bpkg/cfg-fetch new file mode 100644 index 0000000..d311c24 --- /dev/null +++ b/bpkg/cfg-fetch @@ -0,0 +1,17 @@ +// file : bpkg/cfg-fetch -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef BPKG_CFG_FETCH +#define BPKG_CFG_FETCH + +#include +#include + +namespace bpkg +{ + int + cfg_fetch (const cfg_fetch_options&, cli::scanner& args); +} + +#endif // BPKG_CFG_FETCH diff --git a/bpkg/cfg-fetch-options.cli b/bpkg/cfg-fetch-options.cli new file mode 100644 index 0000000..696627f --- /dev/null +++ b/bpkg/cfg-fetch-options.cli @@ -0,0 +1,28 @@ +// file : bpkg/cfg-fetch-options.cli +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +include ; + +"\section=1" +"\name=bpkg-cfg-fetch" +"\summary=fetch available packages list" + +/* +"\h{SYNOPSIS} + +bpkg cfg-fetch []" + +"\h{DESCRIPTION} + +The \cb{cfg-fetch} command recursively fetches the prerequisite repository +and available package lists for all the repositories that were added +(\cb{cfg-add}) to the configuration." +*/ + +namespace bpkg +{ + class cfg_fetch_options: configuration_options + { + }; +} diff --git a/bpkg/cfg-fetch.cxx b/bpkg/cfg-fetch.cxx new file mode 100644 index 0000000..f765678 --- /dev/null +++ b/bpkg/cfg-fetch.cxx @@ -0,0 +1,245 @@ +// file : bpkg/cfg-fetch.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace bpkg +{ + static void + cfg_fetch (const common_options& co, + transaction& t, + const shared_ptr& r) + { + tracer trace ("cfg_fetch(rep)"); + + database& db (t.database ()); + tracer_guard tg (db, trace); + + const repository_location& rl (r->location); + level4 ([&]{trace << r->name << " " << rl;}); + assert (rl.absolute () || rl.remote ()); + + // The fetch_*() functions below will be quiet at level 1, which + // can be quite confusing if the download hangs. + // + if (verb >= (rl.remote () ? 1 : 2)) + text << "fetching " << r->name; + + r->fetched = true; // Mark as being fetched. + + // Load the 'repositories' file and use it to populate the + // prerequisite and complement repository sets. + // + repository_manifests rms (fetch_repositories (co, rl, true)); + + for (repository_manifest& rm: rms) + { + repository_role rr (rm.effective_role ()); + + if (rr == repository_role::base) + continue; // Entry for this repository. + + // If the location is relative, complete it using this repository + // as a base. + // + if (rm.location.relative ()) + { + try + { + rm.location = repository_location (rm.location, rl); + } + catch (const invalid_argument& e) + { + fail << "invalid relative repository location '" << rm.location + << "': " << e.what () << + info << "base repository location is " << rl; + } + } + + // We might already have this repository in the database. + // + shared_ptr pr ( + db.find ( + rm.location.canonical_name ())); + + if (pr == nullptr) + { + pr = make_shared (move (rm.location)); + db.persist (pr); // Enter into session, important if recursive. + } + + // Load the prerequisite repository unless it has already been + // (or is already being) fetched. + // + if (!pr->fetched) + cfg_fetch (co, t, pr); + + // @@ What if we have duplicated? Ideally, we would like to check + // this once and as early as possible. The original idea was to + // do it during manifest parsing and serialization. But at that + // stage we have no way of completing relative locations (which + // is required to calculate canonical names). Current thinking is + // that we should have something like rep-verify (similar to + // pkg-verify) that performs (potentially expensive) repository + // verifications, including making sure prerequisites can be + // satisfied from the listed repositories, etc. Perhaps we can + // also re-use some of that functionality here. I.e., instead of + // calling the "naked" fetch_repositories() above, we will call + // a function from rep-verify that will perform extra verifications. + // + // @@ Also check for self-prerequisite. + // + switch (rr) + { + case repository_role::complement: + { + level4 ([&]{trace << pr->name << " complement of " << r->name;}); + r->complements.insert (lazy_shared_ptr (db, pr)); + break; + } + case repository_role::prerequisite: + { + level4 ([&]{trace << pr->name << " prerequisite of " << r->name;}); + r->prerequisites.insert (lazy_weak_ptr (db, pr)); + break; + } + case repository_role::base: + assert (false); + } + } + + // Load the 'packages' file. + // + // @@ We need to check that that 'repositories' file hasn't + // changed since. + // + package_manifests pms (fetch_packages (co, rl, true)); + + // "Suspend" session while persisting packages to reduce memory + // consumption. + // + session& s (session::current ()); + session::reset_current (); + + for (package_manifest& pm: pms) + { + // We might already have this package in the database. + // + bool persist (false); + + shared_ptr p ( + db.find ( + available_package_id (pm.name, pm.version))); + + if (p == nullptr) + { + p = make_shared (move (pm)); + persist = true; + } + + // This repository shouldn't already be in the location set since + // that would mean it has already been loaded and we shouldn't be + // here. + // + p->locations.push_back ( + package_location {lazy_shared_ptr (db, r), + move (*pm.location)}); + + if (persist) + db.persist (p); + else + db.update (p); + } + + session::current (s); // "Resume". + + // Save the changes to the repository object. + // + db.update (r); + } + + int + cfg_fetch (const cfg_fetch_options& o, cli::scanner&) + { + tracer trace ("cfg_fetch"); + + dir_path c (o.directory ()); + level4 ([&]{trace << "configuration: " << c;}); + + database db (open (c, trace)); + transaction t (db.begin ()); + session s; // Repository dependencies can have cycles. + + shared_ptr root (db.load ("")); + const auto& ua (root->complements); // User-added repositories. + + if (ua.empty ()) + fail << "configuration " << c << " has no repositories" << + info << "use 'bpkg cfg-add' to add a repository"; + + // Clean repositories and available packages. At the end only + // repositories that were explicitly added by the user and the + // special root repository should remain. + // + db.erase_query (); + + for (shared_ptr r: pointer_result (db.query ())) + { + if (r == root) + { + level5 ([&]{trace << "skipping root";}); + } + else if (ua.find (lazy_shared_ptr (db, r)) != ua.end ()) + { + level4 ([&]{trace << "cleaning " << r->name;}); + + r->complements.clear (); + r->prerequisites.clear (); + r->fetched = false; + db.update (r); + } + else + { + level4 ([&]{trace << "erasing " << r->name;}); + db.erase (r); + } + } + + // Now recursively fetch prerequisite/complement repositories and + // their packages. + // + for (const lazy_shared_ptr& lp: ua) + cfg_fetch (o, t, lp.load ()); + + size_t rcount, pcount; + if (verb) + { + rcount = db.query_value (); + pcount = db.query_value (); + } + + t.commit (); + + if (verb) + text << pcount << " package(s) in " << rcount << " repository(s)"; + + return 0; + } +} diff --git a/bpkg/common-options.cli b/bpkg/common-options.cli index 02cc605..86955d7 100644 --- a/bpkg/common-options.cli +++ b/bpkg/common-options.cli @@ -93,7 +93,7 @@ namespace bpkg path --fetch { "", - "The fetch program to be used to download remote resources. Currently, + "The fetch program to be used to download resources. Currently, \cb{bpkg} recognizes \cb{curl}, \cb{wget}, and \cb{fetch}. Note that the last component of must contain one of these names as a substring in order for \cb{bpkg} to recognize which program is being diff --git a/bpkg/drop b/bpkg/drop deleted file mode 100644 index 0054045..0000000 --- a/bpkg/drop +++ /dev/null @@ -1,17 +0,0 @@ -// file : bpkg/drop -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#ifndef BPKG_DROP -#define BPKG_DROP - -#include -#include - -namespace bpkg -{ - int - drop (const drop_options&, cli::scanner& args); -} - -#endif // BPKG_DROP diff --git a/bpkg/drop-options.cli b/bpkg/drop-options.cli deleted file mode 100644 index 6901398..0000000 --- a/bpkg/drop-options.cli +++ /dev/null @@ -1,62 +0,0 @@ -// file : bpkg/drop-options.cli -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -include ; - -"\section=1" -"\name=bpkg-drop" -"\summary=drop one or more packages" - -/* -"\h{SYNOPSIS} - -bpkg drop [] ..." - -"\h{DESCRIPTION} - -The \cb{drop} command drops one or more packages from the configuration. -If the packages being dropped still have dependents, then those will have -to be drop as well and you will be prompted for a confirmation. Similarly, -if the packages being dropped have prerequisites that are no longer needed, -you will be offered to drop those as well. - -The \cb{drop} command also supports several \cb{--*-only} options that allow -you to limit the amount of work that will be done. -*/ - -namespace bpkg -{ - class drop_options: configuration_options - { - bool --yes|-y - { - "Assume the answer to all prompts is \cb{yes}. Note that this option - does not apply to the dropping of dependents; use \cb{--drop-dependent} - for that." - } - - bool --no|-n - { - "Assume the answer to all prompts is \cb{no}. Only makes sense together - with \cb{--print-only|-p}." - } - - bool --drop-dependent - { - "Don't warn about or ask for confirmation of dropping dependent - packages." - } - - bool --disfigure-only - { - "Disfigure all the packages but don't purge." - } - - bool --print-only|-p - { - "Print to \cb{STDOUT} what would be done without actually doing - anything." - } - }; -} diff --git a/bpkg/drop.cxx b/bpkg/drop.cxx deleted file mode 100644 index 87c475d..0000000 --- a/bpkg/drop.cxx +++ /dev/null @@ -1,512 +0,0 @@ -// file : bpkg/drop.cxx -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#include - -#include -#include -#include -#include // cout -#include // reference_wrapper - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -using namespace std; -using namespace butl; - -namespace bpkg -{ - enum class drop_reason - { - user, // User selection. - dependent, // Dependent of a user or another dependent. - prerequisite // Prerequisite of a user, dependent, or another prerequisite. - }; - - struct drop_package - { - shared_ptr package; - drop_reason reason; - }; - - // List of packages that are dependent on the user selection. - // - struct dependent_name - { - string name; - string prq_name; // Prerequisite package name. - }; - using dependent_names = vector; - - // A "dependency-ordered" list of packages and their prerequisites. - // That is, every package on the list only possibly depending on the - // ones after it. In a nutshell, the usage is as follows: we first add - // the packages specified by the user (the "user selection"). We then - // collect all the dependent packages of the user selection, if any. - // These will either have to be dropped as well or we cannot continue. - // If the user gave the go ahead to drop the dependents, then, for our - // purposes, this list of dependents can from now own be treated as if - // it was a part of the user selection. The next step is to collect all - // the non-held prerequisites of the user selection with the goal of - // figuring out which ones will no longer be needed and offering to - // drop them as well. This part is a bit tricky and has to be done in - // three steps: We first collect all the prerequisites that we could - // possibly be dropping. We then order all the packages. And, finally, - // we filter out prerequisites that we cannot drop. See the comment to - // the call to collect_prerequisites() for details on why it has to be - // done this way. - // - struct drop_packages: list> - { - // Collect a package to be dropped, by default, as a user selection. - // - bool - collect (shared_ptr p, drop_reason r = drop_reason::user) - { - string n (p->name); // Because of move(p) below. - return map_.emplace (move (n), data_type {end (), {move (p), r}}).second; - } - - // Collect all the dependets of the user selection retutning the list - // of their names. Dependents of dependents are collected recursively. - // - dependent_names - collect_dependents (database& db) - { - dependent_names dns; - - for (const auto& pr: map_) - { - const drop_package& dp (pr.second.package); - - // Unconfigured package cannot have any dependents. - // - if (dp.reason == drop_reason::user && - dp.package->state == package_state::configured) - collect_dependents (db, dns, dp.package); - } - - return dns; - } - - void - collect_dependents (database& db, - dependent_names& dns, - const shared_ptr& p) - { - using query = query; - - for (auto& pd: db.query (query::name == p->name)) - { - const string& dn (pd.name); - - if (map_.find (dn) == map_.end ()) - { - shared_ptr dp (db.load (dn)); - dns.push_back (dependent_name {dn, p->name}); - collect (dp, drop_reason::dependent); - collect_dependents (db, dns, dp); - } - } - } - - // Collect prerequisites of the user selection and its dependents, - // returning true if any were collected. Prerequisites of prerequisites - // are collected recursively. - // - bool - collect_prerequisites (database& db) - { - bool r (false); - - for (const auto& pr: map_) - { - const drop_package& dp (pr.second.package); - - // Unconfigured package cannot have any prerequisites. - // - if ((dp.reason == drop_reason::user || - dp.reason == drop_reason::dependent) && - dp.package->state == package_state::configured) - r = collect_prerequisites (db, dp.package) || r; - } - - return r; - } - - bool - collect_prerequisites (database& db, const shared_ptr& p) - { - bool r (false); - - for (const auto& pair: p->prerequisites) - { - const string& pn (pair.first.object_id ()); - - if (map_.find (pn) == map_.end ()) - { - shared_ptr pp (db.load (pn)); - - if (!pp->hold_package) // Prune held packages. - { - collect (pp, drop_reason::prerequisite); - collect_prerequisites (db, pp); - r = true; - } - } - } - - return r; - } - - // Order the previously-collected package with the specified name - // returning its positions. - // - iterator - order (const string& name) - { - // Every package that we order should have already been collected. - // - auto mi (map_.find (name)); - assert (mi != map_.end ()); - - // If this package is already in the list, then that would also - // mean all its prerequisites are in the list and we can just - // return its position. - // - iterator& pos (mi->second.position); - if (pos != end ()) - return pos; - - // Order all the prerequisites of this package and compute the - // position of its "earliest" prerequisite -- this is where it - // will be inserted. - // - drop_package& dp (mi->second.package); - const shared_ptr& p (dp.package); - - // Unless this package needs something to be before it, add it to - // the end of the list. - // - iterator i (end ()); - - // Figure out if j is before i, in which case set i to j. The goal - // here is to find the position of our "earliest" prerequisite. - // - auto update = [this, &i] (iterator j) - { - for (iterator k (j); i != j && k != end ();) - if (++k == i) - i = j; - }; - - // Only configured packages have prerequisites. - // - if (p->state == package_state::configured) - { - for (const auto& pair: p->prerequisites) - { - const string& pn (pair.first.object_id ()); - - // The prerequisites may not necessarily be in the map (e.g., - // a held package that we prunned). - // - if (map_.find (pn) != map_.end ()) - update (order (pn)); - } - } - - return pos = insert (i, dp); - } - - // Remove prerequisite packages that we cannot possibly drop, returning - // true if any remain. - // - bool - filter_prerequisites (database& db) - { - bool r (false); - - // Iterate from "more" to "less"-dependent. - // - for (auto i (begin ()); i != end (); ) - { - const drop_package& dp (*i); - - if (dp.reason == drop_reason::prerequisite) - { - const shared_ptr& p (dp.package); - - bool keep (true); - - // Get our dependents (which, BTW, could only have been before us - // on the list). If they are all in the map, then we can be dropped. - // - using query = query; - - for (auto& pd: db.query (query::name == p->name)) - { - if (map_.find (pd.name) == map_.end ()) - { - keep = false; - break; - } - } - - if (!keep) - { - i = erase (i); - map_.erase (p->name); - continue; - } - - r = true; - } - - ++i; - } - - return r; - } - - private: - struct data_type - { - iterator position; // Note: can be end(), see collect(). - drop_package package; - }; - - map map_; - }; - - int - drop (const drop_options& o, cli::scanner& args) - { - tracer trace ("drop"); - - if (o.yes () && o.no ()) - fail << "both --yes|-y and --no|-n specified"; - - const dir_path& c (o.directory ()); - level4 ([&]{trace << "configuration: " << c;}); - - if (!args.more ()) - fail << "package name argument expected" << - info << "run 'bpkg help drop' for more information"; - - database db (open (c, trace)); - - // Note that the session spans all our transactions. The idea here is - // that drop_package objects in the drop_packages list below will be - // cached in this session. When subsequent transactions modify any of - // these objects, they will modify the cached instance, which means - // our list will always "see" their updated state. - // - session s; - - // Assemble the list of packages we will need to drop. - // - drop_packages pkgs; - bool drop_prq (false); - { - transaction t (db.begin ()); - - // The first step is to load and collect all the packages specified - // by the user. - // - strings names; - while (args.more ()) - { - string n (args.next ()); - level4 ([&]{trace << "package " << n;}); - - shared_ptr p (db.find (n)); - - if (p == nullptr) - fail << "package " << n << " does not exist in configuration " << c; - - if (p->state == package_state::broken) - fail << "unable to drop broken package " << n << - info << "use 'pkg-purge --force' to remove"; - - if (pkgs.collect (move (p))) - names.push_back (move (n)); - } - - // The next step is to see if there are any dependents that are not - // already on the list. We will either have to drop those as well or - // abort. - // - dependent_names dnames (pkgs.collect_dependents (db)); - if (!dnames.empty () && !o.drop_dependent ()) - { - { - diag_record dr (text); - - dr << "following dependent packages will have to be dropped " - << "as well:"; - - for (const dependent_name& dn: dnames) - dr << text << dn.name - << " (because dropping " << dn.prq_name << ")"; - } - - if (o.yes ()) - fail << "refusing to drop dependent packages with just --yes" << - info << "specify --drop-dependent to confirm"; - - if (o.no () || !yn_prompt ("drop dependent packages? [y/N]", 'n')) - return 1; - } - - // Collect all the prerequisites that are not held. These will be - // the candidates to drop as well. Note that we cannot make the - // final decision who we can drop until we have the complete and - // ordered list of all the packages that we could potentially be - // dropping. The ordered part is important: we will have to decide - // about the "more dependent" prerequisite before we can decide - // about the "less dependent" one since the former could be depending - // on the latter and, if that's the case and "more" cannot be dropped, - // then neither can "less". - // - pkgs.collect_prerequisites (db); - - // Now that we have collected all the packages we could possibly be - // dropping, arrange them in the "dependency order", that is, with - // every package on the list only possibly depending on the ones - // after it. - // - // First order the user selection so that we stay as close to the - // order specified by the user as possible. Then order the dependent - // packages. Since each of them depends on one or more packages from - // the user selection, it will be inserted before the first package - // on which it depends. - // - for (const string& n: names) - pkgs.order (n); - - for (const dependent_name& dn: dnames) - pkgs.order (dn.name); - - // Filter out prerequisites that we cannot possibly drop (e.g., they - // have dependents other than the ones we are dropping). If there are - // some that we can drop, ask the user for confirmation. - // - if (pkgs.filter_prerequisites (db) && !(drop_prq = o.yes ()) && !o.no ()) - { - { - diag_record dr (text); - - dr << "following prerequisite packages were automatically " - << "built and will no longer be necessary:"; - - for (const drop_package& dp: pkgs) - { - if (dp.reason == drop_reason::prerequisite) - dr << text << dp.package->name; - } - } - - drop_prq = yn_prompt ("drop prerequisite packages? [Y/n]", 'y'); - } - - t.commit (); - } - - // Print what we are going to do, then ask for the user's confirmation. - // - if (o.print_only () || !(o.yes () || o.no ())) - { - for (const drop_package& dp: pkgs) - { - // Skip prerequisites if we weren't instructed to drop them. - // - if (dp.reason == drop_reason::prerequisite && !drop_prq) - continue; - - const shared_ptr& p (dp.package); - - if (o.print_only ()) - cout << "drop " << p->name << endl; - else if (verb) - text << "drop " << p->name; - } - - if (o.print_only ()) - return 0; - } - - // Ask the user if we should continue. - // - if (o.no () || !(o.yes () || yn_prompt ("continue? [Y/n]", 'y'))) - return 1; - - // All that's left to do is first disfigure configured packages and - // then purge all of them. We do both left to right (i.e., from more - // dependent to less dependent). For disfigure this order is required. - // For purge, it will be the order closest to the one specified by the - // user. - // - for (const drop_package& dp: pkgs) - { - // Skip prerequisites if we weren't instructed to drop them. - // - if (dp.reason == drop_reason::prerequisite && !drop_prq) - continue; - - const shared_ptr& p (dp.package); - - if (p->state != package_state::configured) - continue; - - // Each package is disfigured in its own transaction, so that we - // always leave the configuration in a valid state. - // - transaction t (db.begin ()); - pkg_disfigure (c, o, t, p); // Commits the transaction. - assert (p->state == package_state::unpacked); - - if (verb) - text << "disfigured " << p->name; - } - - if (o.disfigure_only ()) - return 0; - - // Purge. - // - for (const drop_package& dp: pkgs) - { - // Skip prerequisites if we weren't instructed to drop them. - // - if (dp.reason == drop_reason::prerequisite && !drop_prq) - continue; - - const shared_ptr& p (dp.package); - - assert (p->state == package_state::fetched || - p->state == package_state::unpacked); - - transaction t (db.begin ()); - pkg_purge (c, t, p); // Commits the transaction, p is now transient. - - if (verb) - text << "purged " << p->name; - } - - return 0; - } -} diff --git a/bpkg/package b/bpkg/package index 4269605..f07a2c1 100644 --- a/bpkg/package +++ b/bpkg/package @@ -370,7 +370,7 @@ namespace bpkg // Repository from which this package came. Note that it is not // a pointer to the repository object because it could be wiped - // out (e.g., as a result of rep-fetch). We call such packages + // out (e.g., as a result of cfg-fetch). We call such packages // "orphans". While we can get a list of orphan's prerequisites // (by loading its manifest), we wouldn't know which repository // to use as a base to resolve them. As a result, an orphan that diff --git a/bpkg/pkg-build b/bpkg/pkg-build new file mode 100644 index 0000000..562321f --- /dev/null +++ b/bpkg/pkg-build @@ -0,0 +1,17 @@ +// file : bpkg/pkg-build -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef BPKG_PKG_BUILD +#define BPKG_PKG_BUILD + +#include +#include + +namespace bpkg +{ + int + pkg_build (const pkg_build_options&, cli::scanner& args); +} + +#endif // BPKG_PKG_BUILD diff --git a/bpkg/pkg-build-options.cli b/bpkg/pkg-build-options.cli new file mode 100644 index 0000000..ceeef65 --- /dev/null +++ b/bpkg/pkg-build-options.cli @@ -0,0 +1,63 @@ +// file : bpkg/pkg-build-options.cli +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +include ; + +"\section=1" +"\name=bpkg-pkg-build" +"\summary=build one or more packages" + +namespace bpkg +{ + { + " ", + + "\h|SYNOPSIS| + + \c{\b{bpkg pkg-build} [] ([/] | | )...} + + \h|DESCRIPTION| + + The \cb{pkg-build} command builds one or more packages including all their + prerequisites. Each package can be specified as just the name () with + optional package version () in which case the package will be + automatically fetched from one of the configuration's source repositories + (see the \cb{cfg-add} and \cb{cfg-fetch} commands). Alternatively, the + package can be specified as either the path to the package source archive + () or package source directory (). See the \cb{pkg-fetch} and + \cb{pkg-unpack} commands for more information on the semantics of + specifying the package as an archive or directory. + + Packages that are specified explicitly on the command line will be + \i{held}, that is, they will not be considered for automatic removal if + they no longer have any dependents. Packages that are specified with the + explicit package version () or as an archive or directory, will, in + addition, have their versions held, that is, they will not be + automatically upgraded. + + The \cb{pkg-build} command also supports several \cb{--*-only} options + that allow you to limit the amount of work that will be done." + } + + class pkg_build_options: configuration_options + { + "\h|PKG-BUILD OPTIONS|" + + bool --yes|-y + { + "Assume the answer to all prompts is \cb{yes}." + } + + bool --configure-only|-c + { + "Configure all the packages but don't update." + } + + bool --print-only|-p + { + "Print to \cb{STDOUT} what would be done without actually doing + anything." + } + }; +} diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx new file mode 100644 index 0000000..cb108eb --- /dev/null +++ b/bpkg/pkg-build.cxx @@ -0,0 +1,1224 @@ +// file : bpkg/pkg-build.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include // make_move_iterator() +#include // cout +#include // find() +#include // reference_wrapper + +#include // reverse_iterate() + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace bpkg +{ + // @@ TODO + // + // - Detect and complain about dependency cycles. + // - Configuration vars (both passed and preserved) + // + + // Try to find a package that optionally satisfies the specified + // version constraint. Look in the specified repository, its + // prerequisite repositories, and their complements, recursively + // (note: recursivity applies to complements, not prerequisites). + // Return the package and the repository in which it was found or + // NULL for both if not found. + // + std::pair, shared_ptr> + find_available (database& db, + const string& name, + const shared_ptr& r, + const optional& c) + { + using query = query; + + query q (query::id.name == name); + const auto& vm (query::id.version); + + // If there is a constraint, then translate it to the query. Otherwise, + // get the latest version. + // + bool order (true); + if (c) + { + const version& v (c->version); + + // Note that the constraint's version is always rhs (libfoo >= 1.2.3). + // + switch (c->operation) + { + case comparison::eq: q = q && vm == v; order = false; break; + case comparison::lt: q = q && vm < v; break; + case comparison::gt: q = q && vm > v; break; + case comparison::le: q = q && vm <= v; break; + case comparison::ge: q = q && vm >= v; break; + } + } + + if (order) + q += order_by_version_desc (vm); + + // Filter the result based on the repository to which each version + // belongs. + // + return filter_one (r, db.query (q)); + } + + // Create a transient (or fake, if you prefer) available_package + // object corresponding to the specified selected object. Note + // that the package locations list is left empty and that the + // returned repository could be NULL if the package is an orphan. + // + std::pair, shared_ptr> + make_available (const common_options& options, + const dir_path& cd, + database& db, + const shared_ptr& sp) + { + assert (sp != nullptr && sp->state != package_state::broken); + + // First see if we can find its repository. + // + shared_ptr ar ( + db.find ( + sp->repository.canonical_name ())); + + // The package is in at least fetched state, which means we should + // be able to get its manifest. + // + const optional& a (sp->archive); + const optional& d (sp->src_root); + + package_manifest m ( + sp->state == package_state::fetched + ? pkg_verify (options, a->absolute () ? *a : cd / *a, true) + : pkg_verify (d->absolute () ? *d : cd / *d, true)); + + return make_pair (make_shared (move (m)), move (ar)); + } + + // A "dependency-ordered" list of packages and their prerequisites. + // That is, every package on the list only possibly depending on the + // ones after it. In a nutshell, the usage is as follows: we first + // add one or more packages (the "initial selection"; for example, a + // list of packages the user wants built). The list then satisfies all + // the prerequisites of the packages that were added, recursively. At + // the end of this process we have an ordered list of all the packages + // that we have to build, from last to first, in order to build our + // initial selection. + // + // This process is split into two phases: satisfaction of all the + // dependencies (the collect() function) and ordering of the list + // (the order() function). + // + // During the satisfaction phase, we collect all the packages, their + // prerequisites (and so on, recursively) in a map trying to satisfy + // any dependency constraints. Specifically, during this step, we may + // "upgrade" or "downgrade" a package that is already in a map as a + // result of another package depending on it and, for example, requiring + // a different version. One notable side-effect of this process is that + // we may end up with a lot more packages in the map than we will have + // on the list. This is because some of the prerequisites of "upgraded" + // or "downgraded" packages may no longer need to be built. + // + // Note also that we don't try to do exhaustive constraint satisfaction + // (i.e., there is no backtracking). Specifically, if we have two + // candidate packages each satisfying a constraint of its dependent + // package, then if neither of them satisfy both constraints, then we + // give up and ask the user to resolve this manually by explicitly + // specifying the version that will satisfy both constraints. + // + struct build_package + { + shared_ptr selected; // NULL if not selected. + shared_ptr available; // Can be NULL, fake/transient. + shared_ptr repository; // Can be NULL (orphan) or root. + + // Hold flags. Note that we can only "increase" the values that are + // already in the selected package. + // + bool hold_package; + bool hold_version; + + // Constraint value plus, normally, the dependent package name that + // placed this constraint but can also be some other name for the + // initial selection (e.g., package version specified by the user + // on the command line). + // + struct constraint_type + { + string dependent; + dependency_constraint value; + + constraint_type () = default; + constraint_type (string d, dependency_constraint v) + : dependent (move (d)), value (move (v)) {} + }; + + vector constraints; + + // True if we need to reconfigure this package. If available package + // is NULL, then reconfigure must be true (this is a dependent that + // needs to be reconfigured because its prerequisite is being up/down- + // graded or reconfigured). Note that in some cases reconfigure is + // naturally implied. For example, if an already configured package + // is being up/down-graded. For such cases we don't guarantee that + // the reconfigure flag is true. We only make sure to set it for + // cases that would otherwise miss the need for the reconfiguration. + // As a result, use the reconfigure() accessor which detects both + // explicit and implied cases. + // + // At first, it may seem that this flag is redundant and having the + // available package set to NULL is sufficient. But consider the case + // where the user asked us to build a package that is already in the + // configured state (so all we have to do is pkg-update). Next, add + // to this a prerequisite package that is being upgraded. Now our + // original package has to be reconfigured. But without this flag + // we won't know (available for our package won't be NULL). + // + bool reconfigure_; + + bool + reconfigure () const + { + return selected != nullptr && + selected->state == package_state::configured && + (reconfigure_ || // Must be checked first, available could be NULL. + selected->version != available->version); + } + }; + + struct build_packages: list> + { + // Collect the package. Return true if this package version was, + // in fact, added to the map and false if it was already there + // or the existing version was preferred. + // + bool + collect (const common_options& options, + const dir_path& cd, + database& db, + build_package&& pkg) + { + using std::swap; // ...and not list::swap(). + + tracer trace ("collect"); + + assert (pkg.available != nullptr); // No dependents allowed here. + auto i (map_.find (pkg.available->id.name)); + + // If we already have an entry for this package name, then we + // have to pick one over the other. + // + if (i != map_.end ()) + { + const string& n (i->first); + + // At the end we want p1 to point to the object that we keep + // and p2 to the object whose constraints we should copy. + // + build_package* p1 (&i->second.package); + build_package* p2 (&pkg); + + // If versions are the same, then all we have to do is copy the + // constraint (p1/p2 already point to where we would want them to). + // + if (p1->available->version != p2->available->version) + { + using constraint_type = build_package::constraint_type; + + // If the versions differ, we have to pick one. Start with the + // newest version since if both satisfy, then that's the one we + // should prefer. So get the first to try into p1 and the second + // to try -- into p2. + // + if (p2->available->version > p1->available->version) + swap (p1, p2); + + // See if pv's version satisfies pc's constraints. Return the + // pointer to the unsatisfied constraint or NULL if all are + // satisfied. + // + auto test = [] (build_package* pv, build_package* pc) + -> const constraint_type* + { + for (const constraint_type& c: pc->constraints) + if (!satisfies (pv->available->version, c.value)) + return &c; + + return nullptr; + }; + + // First see if p1 satisfies p2's constraints. + // + if (auto c2 = test (p1, p2)) + { + // If not, try the other way around. + // + if (auto c1 = test (p2, p1)) + { + const string& d1 (c1->dependent); + const string& d2 (c2->dependent); + + fail << "unable to satisfy constraints on package " << n << + info << d1 << " depends on (" << n << " " << c1->value << ")" << + info << d2 << " depends on (" << n << " " << c2->value << ")" << + info << "available " << n << " " << p1->available->version << + info << "available " << n << " " << p2->available->version << + info << "explicitly specify " << n << " version to manually " + << "satisfy both constraints"; + } + else + swap (p1, p2); + } + + level4 ([&]{trace << "pick " << n << " " << p1->available->version + << " over " << p2->available->version;}); + } + + // See if we are replacing the object. If not, then we don't + // need to collect its prerequisites since that should have + // already been done. Remember, p1 points to the object we + // want to keep. + // + bool replace (p1 != &i->second.package); + + if (replace) + { + swap (*p1, *p2); + swap (p1, p2); // Setup for constraints copying below. + } + + p1->constraints.insert (p1->constraints.end (), + make_move_iterator (p2->constraints.begin ()), + make_move_iterator (p2->constraints.end ())); + + if (!replace) + return false; + } + else + { + string n (pkg.available->id.name); // Note: copy; see emplace() below. + + level4 ([&]{trace << "add " << n << " " << pkg.available->version;}); + + // This is the first time we are adding this package name to the + // map. If it is already selected, then we need to make sure that + // packages that already depend on it (called dependents) are ok + // with the up/downgrade. We will also have to keep doing this + // every time we choose a new available package above. So what + // we are going to do is copy the dependents' constrains over to + // our constraint list; this way they will be automatically taken + // into account by the rest of the logic. + // + const shared_ptr& sp (pkg.selected); + const shared_ptr& ap (pkg.available); + + int r; + if (sp != nullptr && + sp->state == package_state::configured && + (r = sp->version.compare (ap->version)) != 0) + { + using query = query; + + for (const auto& pd: db.query (query::name == n)) + { + if (!pd.constraint) + continue; + + const version& v (ap->version); + const dependency_constraint& c (*pd.constraint); + + if (satisfies (v, c)) + { + pkg.constraints.emplace_back (pd.name, c); + continue; + } + + fail << "unable to " << (r < 0 ? "up" : "down") << "grade " + << "package " << n << " " << sp->version << " to " << v << + info << pd.name << " depends on (" << n << " " << c << ")" << + info << "explicitly specify " << n << " version to manually " + << "satisfy this constraint"; + } + } + + i = map_.emplace (move (n), data_type {end (), move (pkg)}).first; + } + + // Now collect all the prerequisites recursively. But first "prune" + // this process if the package is already configured since that would + // mean all its prerequisites are configured as well. Note that this + // is not merely an optimization: the package could be an orphan in + // which case the below logic will fail (no repository in which to + // search for prerequisites). By skipping the prerequisite check we + // are able to gracefully handle configured orphans. + // + const build_package& p (i->second.package); + const shared_ptr& sp (p.selected); + const shared_ptr& ap (p.available); + + if (sp != nullptr && + sp->version == ap->version && + sp->state == package_state::configured) + return true; + + // Show how we got here if things go wrong. + // + auto g ( + make_exception_guard ( + [&ap] () + { + info << "while satisfying " << ap->id.name << " " << ap->version; + })); + + const shared_ptr& ar (p.repository); + const string& name (ap->id.name); + + for (const dependency_alternatives& da: ap->dependencies) + { + if (da.conditional) // @@ TODO + fail << "conditional dependencies are not yet supported"; + + if (da.size () != 1) // @@ TODO + fail << "multiple dependency alternatives not yet supported"; + + const dependency& d (da.front ()); + + // The first step is to always find the available package even + // if, in the end, it won't be the one we select. If we cannot + // find the package then that means the repository is broken. + // And if we have no repository to look in, then that means the + // package is an orphan (we delay this check until we actually + // need the repository to allow orphans without prerequisites). + // + if (ar == nullptr) + fail << "package " << name << " " << ap->version << " is orphaned" << + info << "explicitly upgrade it to a new version"; + + auto rp (find_available (db, d.name, ar, d.constraint)); + + if (rp.first == nullptr) + { + diag_record dr; + dr << fail << "unknown prerequisite " << d << " of package " << name; + + if (!ar->location.empty ()) + dr << info << "repository " << ar->location << " appears to " + << "be broken"; + } + + // Next see if this package is already selected. If we already + // have it in the configuraion and it satisfies our dependency + // constraint, then we don't want to be forcing its upgrade (or, + // worse, downgrade). + // + bool force (false); + shared_ptr dsp (db.find (d.name)); + if (dsp != nullptr) + { + if (dsp->state == package_state::broken) + fail << "unable to build broken package " << d.name << + info << "use 'pkg-purge --force' to remove"; + + if (satisfies (dsp->version, d.constraint)) + rp = make_available (options, cd, db, dsp); + else + // Remember that we may be forcing up/downgrade; we will deal + // with it below. + // + force = true; + } + + build_package dp { + dsp, + rp.first, + rp.second, + false, // Hold package. + false, // Hold version. + {}, // Constraints. + false}; // Reconfigure. + + // Add our constraint, if we have one. + // + if (d.constraint) + dp.constraints.emplace_back (name, *d.constraint); + + // Now collect this prerequisite. If it was actually collected + // (i.e., it wasn't already there) and we are forcing an upgrade + // and the version is not held, then warn, unless we are running + // quiet. Downgrade or upgrade of a held version -- refuse. + // + if (collect (options, cd, db, move (dp)) && force) + { + const version& sv (dsp->version); + const version& av (rp.first->version); + + bool u (av > sv); + bool f (dsp->hold_version || !u); // Fail if downgrade or held. + + if (verb || f) + { + bool c (d.constraint); + diag_record dr; + + (f ? dr << fail : dr << warn) + << "package " << name << " dependency on " + << (c ? "(" : "") << d << (c ? ")" : "") << " is forcing " + << (u ? "up" : "down") << "grade of " << d.name << " " << sv + << " to " << av; + + if (dsp->hold_version) + dr << info << "package version " << d.name << " " << sv + << " is held"; + + if (f) + dr << info << "explicitly request version " + << (u ? "up" : "down") << "grade to continue"; + } + } + } + + return true; + } + + // Order the previously-collected package with the specified name + // returning its positions. If reorder is true, then reorder this + // package to be considered as "early" as possible. + // + iterator + order (const string& name, bool reorder = true) + { + // Every package that we order should have already been collected. + // + auto mi (map_.find (name)); + assert (mi != map_.end ()); + + // If this package is already in the list, then that would also + // mean all its prerequisites are in the list and we can just + // return its position. Unless we want it reordered. + // + iterator& pos (mi->second.position); + if (pos != end ()) + { + if (reorder) + erase (pos); + else + return pos; + } + + // Order all the prerequisites of this package and compute the + // position of its "earliest" prerequisite -- this is where it + // will be inserted. + // + build_package& p (mi->second.package); + const shared_ptr& sp (p.selected); + const shared_ptr& ap (p.available); + + assert (ap != nullptr); // No dependents allowed here. + + // Unless this package needs something to be before it, add it to + // the end of the list. + // + iterator i (end ()); + + // Figure out if j is before i, in which case set i to j. The goal + // here is to find the position of our "earliest" prerequisite. + // + auto update = [this, &i] (iterator j) + { + for (iterator k (j); i != j && k != end ();) + if (++k == i) + i = j; + }; + + // Similar to collect(), we can prune if the package is already + // configured, right? Not so fast. While in collect() we didn't + // need to add prerequisites of such a package, it doesn't mean + // that they actually never ended up in the map via another way. + // For example, some can be a part of the initial selection. And + // in that case we must order things properly. + // + // So here we are going to do things differently depending on + // whether the package is already configured or not. If it is, + // then that means we can use its prerequisites list. Otherwise, + // we use the manifest data. + // + if (sp != nullptr && + sp->version == ap->version && + sp->state == package_state::configured) + { + for (const auto& p: sp->prerequisites) + { + const string& name (p.first.object_id ()); + + // The prerequisites may not necessarily be in the map. + // + if (map_.find (name) != map_.end ()) + update (order (name, false)); + } + } + else + { + // We are iterating in reverse so that when we iterate over + // the dependency list (also in reverse), prerequisites will + // be built in the order that is as close to the manifest as + // possible. + // + for (const dependency_alternatives& da: + reverse_iterate (p.available->dependencies)) + { + assert (!da.conditional && da.size () == 1); // @@ TODO + const dependency& d (da.front ()); + + update (order (d.name, false)); + } + } + + return pos = insert (i, p); + } + + // If a configured package is being up/down-graded then that means + // all its dependents could be affected and we have to reconfigure + // them. This function examines every package that is already on + // the list and collects and orders all its dependents. + // + // Should we reconfigure just the direct depends or also include + // indirect, recursively? Consider this plauisible scenario as an + // example: We are upgrading a package to a version that provides + // an additional API. When its direct dependent gets reconfigured, + // it notices this new API and exposes its own extra functionality + // that is based on it. Now it would make sense to let its own + // dependents (which would be our original package's indirect ones) + // to also notice this. + // + void + collect_order_dependents (database& db) + { + // For each package on the list we want to insert all its dependents + // before it so that they get configured after the package on which + // they depend is configured (remember, our build order is reverse, + // with the last package being built first). This applies to both + // packages that are already on the list as well as the ones that + // we add, recursively. + // + for (auto i (begin ()); i != end (); ++i) + { + const build_package& p (*i); + + // Prune if this is not a configured package being up/down-graded + // or reconfigured. + // + if (p.reconfigure ()) + collect_order_dependents (db, i); + } + } + + void + collect_order_dependents (database& db, iterator pos) + { + tracer trace ("collect_order_dependents"); + + const build_package& p (*pos); + const string& n (p.selected->name); + + using query = query; + + for (auto& pd: db.query (query::name == n)) + { + string& dn (pd.name); + + // We can have three cases here: the package is already on the + // list, the package is in the map (but not on the list) and it + // is in neither. + // + auto i (map_.find (dn)); + + if (i != map_.end ()) + { + build_package& dp (i->second.package); + + // Force reconfiguration in both cases. + // + dp.reconfigure_ = true; + + if (i->second.position == end ()) + { + // Clean the build_package object up to make sure we don't + // inadvertently force up/down-grade. + // + dp.available = nullptr; + dp.repository = nullptr; + + i->second.position = insert (pos, dp); + } + } + else + { + shared_ptr dsp (db.load (dn)); + + i = map_.emplace ( + move (dn), + data_type + { + end (), + build_package { + move (dsp), + nullptr, + nullptr, + false, // Hold package. + false, // Hold version. + {}, // Constraints. + true} // Reconfigure. + }).first; + + i->second.position = insert (pos, i->second.package); + } + + // Collect our own dependents inserting them before us. + // + collect_order_dependents (db, i->second.position); + } + } + + private: + struct data_type + { + iterator position; // Note: can be end(), see collect(). + build_package package; + }; + + map map_; + }; + + int + pkg_build (const pkg_build_options& o, cli::scanner& args) + { + tracer trace ("pkg_build"); + + const dir_path& c (o.directory ()); + level4 ([&]{trace << "configuration: " << c;}); + + if (!args.more ()) + fail << "package name argument expected" << + info << "run 'bpkg help pkg-build' for more information"; + + database db (open (c, trace)); + + // Note that the session spans all our transactions. The idea here is + // that selected_package objects in the build_packages list below will + // be cached in this session. When subsequent transactions modify any + // of these objects, they will modify the cached instance, which means + // our list will always "see" their updated state. + // + session s; + + // Assemble the list of packages we will need to build. + // + build_packages pkgs; + strings names; + { + transaction t (db.begin ()); + + shared_ptr root (db.load ("")); + + while (args.more ()) + { + const char* s (args.next ()); + + // Reduce all the potential variations (archive, directory, package + // name, package name/version) to a single available_package object. + // + string n; + version v; + + shared_ptr ar; + shared_ptr ap; + + // Is this a package archive? + // + try + { + path a (s); + if (exists (a)) + { + package_manifest m (pkg_verify (o, a, true, false)); + + // This is a package archive (note that we shouldn't throw + // failed from here on). + // + level4 ([&]{trace << "archive " << a;}); + n = m.name; + v = m.version; + ar = root; + ap = make_shared (move (m)); + ap->locations.push_back (package_location {root, move (a)}); + } + } + catch (const invalid_path&) + { + // Not a valid path so cannot be an archive. + } + catch (const failed&) + { + // Not a valid package archive. + } + + // Is this a package directory? + // + try + { + dir_path d (s); + if (exists (d)) + { + package_manifest m (pkg_verify (d, true, false)); + + // This is a package directory (note that we shouldn't throw + // failed from here on). + // + level4 ([&]{trace << "directory " << d;}); + n = m.name; + v = m.version; + ap = make_shared (move (m)); + ar = root; + ap->locations.push_back (package_location {root, move (d)}); + } + } + catch (const invalid_path&) + { + // Not a valid path so cannot be an archive. + } + catch (const failed&) + { + // Not a valid package archive. + } + + // Then it got to be a package name with optional version. + // + if (ap == nullptr) + { + n = parse_package_name (s); + v = parse_package_version (s); + level4 ([&]{trace << "package " << n << "; version " << v;}); + + // Either get the user-specified version or the latest. + // + auto rp ( + v.empty () + ? find_available (db, n, root, nullopt) + : find_available (db, n, root, + dependency_constraint {comparison::eq, v})); + + ap = rp.first; + ar = rp.second; + } + + // Load the package that may have already been selected and + // figure out what exactly we need to do here. The end goal + // is the available_package object corresponding to the actual + // package that we will be building (which may or may not be + // the same as the selected package). + // + shared_ptr sp (db.find (n)); + + if (sp != nullptr && sp->state == package_state::broken) + fail << "unable to build broken package " << n << + info << "use 'pkg-purge --force' to remove"; + + bool found (true); + + // If the user asked for a specific version, then that's what + // we ought to be building. + // + if (!v.empty ()) + { + for (;;) + { + if (ap != nullptr) // Must be that version, see above. + break; + + // Otherwise, our only chance is that the already selected + // object is that exact version. + // + if (sp != nullptr && sp->version == v) + break; // Derive ap from sp below. + + found = false; + break; + } + } + // + // No explicit version was specified by the user. + // + else + { + if (ap != nullptr) + { + // Even if this package is already in the configuration, should + // we have a newer version, we treat it as an upgrade request; + // otherwise, why specify the package in the first place? We just + // need to check if what we already have is "better" (i.e., newer). + // + if (sp != nullptr && ap->id.version < sp->version) + ap = nullptr; // Derive ap from sp below. + } + else + { + if (sp == nullptr) + found = false; + + // Otherwise, derive ap from sp below. + } + } + + if (!found) + { + diag_record dr; + + dr << fail << "unknown package " << n; + if (!v.empty ()) + dr << " " << v; + + // Let's help the new user out here a bit. + // + if (db.query_value () == 0) + dr << info << "configuration " << c << " has no repositories" + << info << "use 'bpkg cfg-add' to add a repository"; + else if (db.query_value () == 0) + dr << info << "configuration " << c << " has no available packages" + << info << "use 'bpkg cfg-fetch' to fetch available packages " + << "list"; + } + + // If the available_package object is still NULL, then it means + // we need to get one corresponding to the selected package. + // + if (ap == nullptr) + { + assert (sp != nullptr); + + auto rp (make_available (o, c, db, sp)); + ap = rp.first; + ar = rp.second; // Could be NULL (orphan). + } + + // Finally add this package to the list. + // + level4 ([&]{trace << "collect " << ap->id.name << " " + << ap->version;}); + + build_package p { + move (sp), + move (ap), + move (ar), + true, // Hold package. + !v.empty (), // Hold version. + {}, // Constraints. + false}; // Reconfigure. + + // "Fix" the version the user asked for by adding the '==' constraint. + // + if (!v.empty ()) + p.constraints.emplace_back ( + "command line", + dependency_constraint {comparison::eq, v}); + + pkgs.collect (o, c, db, move (p)); + names.push_back (n); + } + + // Now that we have collected all the package versions that we need + // to build, arrange them in the "dependency order", that is, with + // every package on the list only possibly depending on the ones + // after it. Iterate over the names we have collected on the previous + // step in reverse so that when we iterate over the packages (also in + // reverse), things will be built as close as possible to the order + // specified by the user (it may still get altered if there are + // dependencies between the specified packages). + // + for (const string& n: reverse_iterate (names)) + pkgs.order (n); + + // Finally, collect and order all the dependents that we will need + // to reconfigure because of the up/down-grades of packages that + // are now on the list. + // + pkgs.collect_order_dependents (db); + + t.commit (); + } + + // Print what we are going to do, then ask for the user's confirmation. + // + if (o.print_only () || !o.yes ()) + { + for (const build_package& p: reverse_iterate (pkgs)) + { + const shared_ptr& sp (p.selected); + const shared_ptr& ap (p.available); + + const char* act; + string n; + version v; + + if (ap == nullptr) + { + // This is a dependent needing reconfiguration. + // + assert (sp != nullptr && p.reconfigure ()); + + n = sp->name; + act = "reconfigure"; + } + else + { + n = ap->id.name; + v = ap->version; + + // Even if we already have this package selected, we have to + // make sure it is configured and updated. + // + if (sp == nullptr || sp->version == v) + act = p.reconfigure () ? "reconfigure/build" : "build"; + else + act = sp->version < v ? "upgrade" : "downgrade"; + } + + if (o.print_only ()) + cout << act << " " << n << (v.empty () ? "" : " ") << v << endl; + else if (verb) + text << act << " " << n << (v.empty () ? "" : " ") << v; + } + } + + if (o.print_only ()) + return 0; + + // Ask the user if we should continue. + // + if (!(o.yes () || yn_prompt ("continue? [Y/n]", 'y'))) + return 1; + + // Ok, we have "all systems go". The overall action plan is as follows. + // + // 1. disfigure up/down-graded, reconfigured [left to right] + // 2. purge up/down-graded + // 3. fetch new, up/down-graded + // 4. unpack new, up/down-graded + // 5. configure all [right to left] + // 6. build user selection [right to left] + // + // Note that for some actions, e.g., purge or fetch, the order is not + // really important. We will, however, do it right to left since that + // is the order closest to that of the user selection. + // + // We are also going to combine purge/fetch/unpack into a single step + // and use the replace mode so it will become just fetch/unpack. + // + + // disfigure + // + for (const build_package& p: pkgs) + { + // We are only interested in configured packages that are either + // up/down-graded or need reconfiguration (e.g., dependents). + // + if (!p.reconfigure ()) + continue; + + const shared_ptr& sp (p.selected); + + // Each package is disfigured in its own transaction, so that we + // always leave the configuration in a valid state. + // + transaction t (db.begin ()); + pkg_disfigure (c, o, t, sp); // Commits the transaction. + assert (sp->state == package_state::unpacked); + + if (verb) + text << "disfigured " << sp->name << " " << sp->version; + } + + // fetch/unpack + // + for (build_package& p: reverse_iterate (pkgs)) + { + shared_ptr& sp (p.selected); + const shared_ptr& ap (p.available); + + if (ap == nullptr) // Skip dependents. + continue; + + // Fetch if this is a new package or if we are up/down-grading. + // + if (sp == nullptr || sp->version != ap->version) + { + sp.reset (); // For the directory case below. + + // Distinguish between the package and archive/directory cases. + // + const package_location& pl (ap->locations[0]); // Got to have one. + + if (pl.repository.object_id () != "") // Special root? + { + transaction t (db.begin ()); + sp = pkg_fetch (o, + c, + t, + ap->id.name, + ap->version, + true); // Replace; commits the transaction. + } + else if (exists (pl.location)) // Directory case is handled by unpack. + { + transaction t (db.begin ()); + sp = pkg_fetch (o, + c, + t, + pl.location, // Archive path. + true, // Replace + false); // Don't purge; commits the transaction. + } + + if (sp != nullptr) // Actually unpacked something? + { + assert (sp->state == package_state::fetched); + + if (verb) + text << "fetched " << sp->name << " " << sp->version; + } + } + + // Unpack. Note that the package can still be NULL if this is the + // directory case (see the fetch code above). + // + if (sp == nullptr || sp->state == package_state::fetched) + { + if (sp != nullptr) + { + transaction t (db.begin ()); + sp = pkg_unpack (o, c, t, ap->id.name); // Commits the transaction. + } + else + { + const package_location& pl (ap->locations[0]); + assert (pl.repository.object_id () == ""); // Special root. + + transaction t (db.begin ()); + sp = pkg_unpack (c, + t, + path_cast (pl.location), + true, // Replace. + false); // Don't purge; commits the transaction. + } + + assert (sp->state == package_state::unpacked); + + if (verb) + text << "unpacked " << sp->name << " " << sp->version; + } + } + + // configure + // + for (const build_package& p: reverse_iterate (pkgs)) + { + const shared_ptr& sp (p.selected); + + assert (sp != nullptr); + + // We configure everything that isn't already configured. + // + if (sp->state == package_state::configured) + continue; + + transaction t (db.begin ()); + pkg_configure (c, o, t, sp, strings ()); // Commits the transaction. + assert (sp->state == package_state::configured); + + if (verb) + text << "configured " << sp->name << " " << sp->version; + } + + // Small detour: update the hold state. While we could have tried + // to "weave" it into one of the previous actions, things there + // are already convoluted enough. + // + for (const build_package& p: reverse_iterate (pkgs)) + { + const shared_ptr& sp (p.selected); + assert (sp != nullptr); + + // Note that we should only "increase" the hold state. + // + bool hp (p.hold_package && sp->hold_package != p.hold_package); + bool hv (p.hold_version && sp->hold_version != p.hold_version); + + if (hp || hv) + { + if (hp) sp->hold_package = true; + if (hv) sp->hold_version = true; + + transaction t (db.begin ()); + db.update (sp); + t.commit (); + + if (verb > 1) + { + if (hp) + text << "hold package " << sp->name; + + if (hv) + text << "hold version " << sp->name << " " << sp->version; + } + } + } + + if (o.configure_only ()) + return 0; + + // update + // + for (const build_package& p: reverse_iterate (pkgs)) + { + const shared_ptr& sp (p.selected); + + // Update the user selection only. + // + if (find (names.begin (), names.end (), sp->name) == names.end ()) + continue; + + pkg_update (c, o, sp); + + if (verb) + text << "updated " << sp->name << " " << sp->version; + } + + return 0; + } +} diff --git a/bpkg/pkg-drop b/bpkg/pkg-drop new file mode 100644 index 0000000..cb70ed0 --- /dev/null +++ b/bpkg/pkg-drop @@ -0,0 +1,17 @@ +// file : bpkg/pkg-drop -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef BPKG_PKG_DROP +#define BPKG_PKG_DROP + +#include +#include + +namespace bpkg +{ + int + pkg_drop (const pkg_drop_options&, cli::scanner& args); +} + +#endif // BPKG_PKG_DROP diff --git a/bpkg/pkg-drop-options.cli b/bpkg/pkg-drop-options.cli new file mode 100644 index 0000000..6409ee8 --- /dev/null +++ b/bpkg/pkg-drop-options.cli @@ -0,0 +1,62 @@ +// file : bpkg/pkg-drop-options.cli +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +include ; + +"\section=1" +"\name=bpkg-pkg-drop" +"\summary=drop one or more packages" + +/* +"\h{SYNOPSIS} + +bpkg pkg-drop [] ..." + +"\h{DESCRIPTION} + +The \cb{pkg-drop} command drops one or more packages from the configuration. +If the packages being dropped still have dependents, then those will have +to be drop as well and you will be prompted for a confirmation. Similarly, +if the packages being dropped have prerequisites that are no longer needed, +you will be offered to drop those as well. + +The \cb{pkg-drop} command also supports several \cb{--*-only} options that +allow you to limit the amount of work that will be done. +*/ + +namespace bpkg +{ + class pkg_drop_options: configuration_options + { + bool --yes|-y + { + "Assume the answer to all prompts is \cb{yes}. Note that this option + does not apply to the dropping of dependents; use \cb{--drop-dependent} + for that." + } + + bool --no|-n + { + "Assume the answer to all prompts is \cb{no}. Only makes sense together + with \cb{--print-only|-p}." + } + + bool --drop-dependent + { + "Don't warn about or ask for confirmation of dropping dependent + packages." + } + + bool --disfigure-only + { + "Disfigure all the packages but don't purge." + } + + bool --print-only|-p + { + "Print to \cb{STDOUT} what would be done without actually doing + anything." + } + }; +} diff --git a/bpkg/pkg-drop.cxx b/bpkg/pkg-drop.cxx new file mode 100644 index 0000000..f7d897d --- /dev/null +++ b/bpkg/pkg-drop.cxx @@ -0,0 +1,512 @@ +// file : bpkg/pkg-drop.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include // cout +#include // reference_wrapper + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +using namespace std; +using namespace butl; + +namespace bpkg +{ + enum class drop_reason + { + user, // User selection. + dependent, // Dependent of a user or another dependent. + prerequisite // Prerequisite of a user, dependent, or another prerequisite. + }; + + struct drop_package + { + shared_ptr package; + drop_reason reason; + }; + + // List of packages that are dependent on the user selection. + // + struct dependent_name + { + string name; + string prq_name; // Prerequisite package name. + }; + using dependent_names = vector; + + // A "dependency-ordered" list of packages and their prerequisites. + // That is, every package on the list only possibly depending on the + // ones after it. In a nutshell, the usage is as follows: we first add + // the packages specified by the user (the "user selection"). We then + // collect all the dependent packages of the user selection, if any. + // These will either have to be dropped as well or we cannot continue. + // If the user gave the go ahead to drop the dependents, then, for our + // purposes, this list of dependents can from now own be treated as if + // it was a part of the user selection. The next step is to collect all + // the non-held prerequisites of the user selection with the goal of + // figuring out which ones will no longer be needed and offering to + // drop them as well. This part is a bit tricky and has to be done in + // three steps: We first collect all the prerequisites that we could + // possibly be dropping. We then order all the packages. And, finally, + // we filter out prerequisites that we cannot drop. See the comment to + // the call to collect_prerequisites() for details on why it has to be + // done this way. + // + struct drop_packages: list> + { + // Collect a package to be dropped, by default, as a user selection. + // + bool + collect (shared_ptr p, drop_reason r = drop_reason::user) + { + string n (p->name); // Because of move(p) below. + return map_.emplace (move (n), data_type {end (), {move (p), r}}).second; + } + + // Collect all the dependets of the user selection retutning the list + // of their names. Dependents of dependents are collected recursively. + // + dependent_names + collect_dependents (database& db) + { + dependent_names dns; + + for (const auto& pr: map_) + { + const drop_package& dp (pr.second.package); + + // Unconfigured package cannot have any dependents. + // + if (dp.reason == drop_reason::user && + dp.package->state == package_state::configured) + collect_dependents (db, dns, dp.package); + } + + return dns; + } + + void + collect_dependents (database& db, + dependent_names& dns, + const shared_ptr& p) + { + using query = query; + + for (auto& pd: db.query (query::name == p->name)) + { + const string& dn (pd.name); + + if (map_.find (dn) == map_.end ()) + { + shared_ptr dp (db.load (dn)); + dns.push_back (dependent_name {dn, p->name}); + collect (dp, drop_reason::dependent); + collect_dependents (db, dns, dp); + } + } + } + + // Collect prerequisites of the user selection and its dependents, + // returning true if any were collected. Prerequisites of prerequisites + // are collected recursively. + // + bool + collect_prerequisites (database& db) + { + bool r (false); + + for (const auto& pr: map_) + { + const drop_package& dp (pr.second.package); + + // Unconfigured package cannot have any prerequisites. + // + if ((dp.reason == drop_reason::user || + dp.reason == drop_reason::dependent) && + dp.package->state == package_state::configured) + r = collect_prerequisites (db, dp.package) || r; + } + + return r; + } + + bool + collect_prerequisites (database& db, const shared_ptr& p) + { + bool r (false); + + for (const auto& pair: p->prerequisites) + { + const string& pn (pair.first.object_id ()); + + if (map_.find (pn) == map_.end ()) + { + shared_ptr pp (db.load (pn)); + + if (!pp->hold_package) // Prune held packages. + { + collect (pp, drop_reason::prerequisite); + collect_prerequisites (db, pp); + r = true; + } + } + } + + return r; + } + + // Order the previously-collected package with the specified name + // returning its positions. + // + iterator + order (const string& name) + { + // Every package that we order should have already been collected. + // + auto mi (map_.find (name)); + assert (mi != map_.end ()); + + // If this package is already in the list, then that would also + // mean all its prerequisites are in the list and we can just + // return its position. + // + iterator& pos (mi->second.position); + if (pos != end ()) + return pos; + + // Order all the prerequisites of this package and compute the + // position of its "earliest" prerequisite -- this is where it + // will be inserted. + // + drop_package& dp (mi->second.package); + const shared_ptr& p (dp.package); + + // Unless this package needs something to be before it, add it to + // the end of the list. + // + iterator i (end ()); + + // Figure out if j is before i, in which case set i to j. The goal + // here is to find the position of our "earliest" prerequisite. + // + auto update = [this, &i] (iterator j) + { + for (iterator k (j); i != j && k != end ();) + if (++k == i) + i = j; + }; + + // Only configured packages have prerequisites. + // + if (p->state == package_state::configured) + { + for (const auto& pair: p->prerequisites) + { + const string& pn (pair.first.object_id ()); + + // The prerequisites may not necessarily be in the map (e.g., + // a held package that we prunned). + // + if (map_.find (pn) != map_.end ()) + update (order (pn)); + } + } + + return pos = insert (i, dp); + } + + // Remove prerequisite packages that we cannot possibly drop, returning + // true if any remain. + // + bool + filter_prerequisites (database& db) + { + bool r (false); + + // Iterate from "more" to "less"-dependent. + // + for (auto i (begin ()); i != end (); ) + { + const drop_package& dp (*i); + + if (dp.reason == drop_reason::prerequisite) + { + const shared_ptr& p (dp.package); + + bool keep (true); + + // Get our dependents (which, BTW, could only have been before us + // on the list). If they are all in the map, then we can be dropped. + // + using query = query; + + for (auto& pd: db.query (query::name == p->name)) + { + if (map_.find (pd.name) == map_.end ()) + { + keep = false; + break; + } + } + + if (!keep) + { + i = erase (i); + map_.erase (p->name); + continue; + } + + r = true; + } + + ++i; + } + + return r; + } + + private: + struct data_type + { + iterator position; // Note: can be end(), see collect(). + drop_package package; + }; + + map map_; + }; + + int + pkg_drop (const pkg_drop_options& o, cli::scanner& args) + { + tracer trace ("pkg_drop"); + + if (o.yes () && o.no ()) + fail << "both --yes|-y and --no|-n specified"; + + const dir_path& c (o.directory ()); + level4 ([&]{trace << "configuration: " << c;}); + + if (!args.more ()) + fail << "package name argument expected" << + info << "run 'bpkg help pkg-drop' for more information"; + + database db (open (c, trace)); + + // Note that the session spans all our transactions. The idea here is + // that drop_package objects in the drop_packages list below will be + // cached in this session. When subsequent transactions modify any of + // these objects, they will modify the cached instance, which means + // our list will always "see" their updated state. + // + session s; + + // Assemble the list of packages we will need to drop. + // + drop_packages pkgs; + bool drop_prq (false); + { + transaction t (db.begin ()); + + // The first step is to load and collect all the packages specified + // by the user. + // + strings names; + while (args.more ()) + { + string n (args.next ()); + level4 ([&]{trace << "package " << n;}); + + shared_ptr p (db.find (n)); + + if (p == nullptr) + fail << "package " << n << " does not exist in configuration " << c; + + if (p->state == package_state::broken) + fail << "unable to drop broken package " << n << + info << "use 'pkg-purge --force' to remove"; + + if (pkgs.collect (move (p))) + names.push_back (move (n)); + } + + // The next step is to see if there are any dependents that are not + // already on the list. We will either have to drop those as well or + // abort. + // + dependent_names dnames (pkgs.collect_dependents (db)); + if (!dnames.empty () && !o.drop_dependent ()) + { + { + diag_record dr (text); + + dr << "following dependent packages will have to be dropped " + << "as well:"; + + for (const dependent_name& dn: dnames) + dr << text << dn.name + << " (because dropping " << dn.prq_name << ")"; + } + + if (o.yes ()) + fail << "refusing to drop dependent packages with just --yes" << + info << "specify --drop-dependent to confirm"; + + if (o.no () || !yn_prompt ("drop dependent packages? [y/N]", 'n')) + return 1; + } + + // Collect all the prerequisites that are not held. These will be + // the candidates to drop as well. Note that we cannot make the + // final decision who we can drop until we have the complete and + // ordered list of all the packages that we could potentially be + // dropping. The ordered part is important: we will have to decide + // about the "more dependent" prerequisite before we can decide + // about the "less dependent" one since the former could be depending + // on the latter and, if that's the case and "more" cannot be dropped, + // then neither can "less". + // + pkgs.collect_prerequisites (db); + + // Now that we have collected all the packages we could possibly be + // dropping, arrange them in the "dependency order", that is, with + // every package on the list only possibly depending on the ones + // after it. + // + // First order the user selection so that we stay as close to the + // order specified by the user as possible. Then order the dependent + // packages. Since each of them depends on one or more packages from + // the user selection, it will be inserted before the first package + // on which it depends. + // + for (const string& n: names) + pkgs.order (n); + + for (const dependent_name& dn: dnames) + pkgs.order (dn.name); + + // Filter out prerequisites that we cannot possibly drop (e.g., they + // have dependents other than the ones we are dropping). If there are + // some that we can drop, ask the user for confirmation. + // + if (pkgs.filter_prerequisites (db) && !(drop_prq = o.yes ()) && !o.no ()) + { + { + diag_record dr (text); + + dr << "following prerequisite packages were automatically " + << "built and will no longer be necessary:"; + + for (const drop_package& dp: pkgs) + { + if (dp.reason == drop_reason::prerequisite) + dr << text << dp.package->name; + } + } + + drop_prq = yn_prompt ("drop prerequisite packages? [Y/n]", 'y'); + } + + t.commit (); + } + + // Print what we are going to do, then ask for the user's confirmation. + // + if (o.print_only () || !(o.yes () || o.no ())) + { + for (const drop_package& dp: pkgs) + { + // Skip prerequisites if we weren't instructed to drop them. + // + if (dp.reason == drop_reason::prerequisite && !drop_prq) + continue; + + const shared_ptr& p (dp.package); + + if (o.print_only ()) + cout << "drop " << p->name << endl; + else if (verb) + text << "drop " << p->name; + } + + if (o.print_only ()) + return 0; + } + + // Ask the user if we should continue. + // + if (o.no () || !(o.yes () || yn_prompt ("continue? [Y/n]", 'y'))) + return 1; + + // All that's left to do is first disfigure configured packages and + // then purge all of them. We do both left to right (i.e., from more + // dependent to less dependent). For disfigure this order is required. + // For purge, it will be the order closest to the one specified by the + // user. + // + for (const drop_package& dp: pkgs) + { + // Skip prerequisites if we weren't instructed to drop them. + // + if (dp.reason == drop_reason::prerequisite && !drop_prq) + continue; + + const shared_ptr& p (dp.package); + + if (p->state != package_state::configured) + continue; + + // Each package is disfigured in its own transaction, so that we + // always leave the configuration in a valid state. + // + transaction t (db.begin ()); + pkg_disfigure (c, o, t, p); // Commits the transaction. + assert (p->state == package_state::unpacked); + + if (verb) + text << "disfigured " << p->name; + } + + if (o.disfigure_only ()) + return 0; + + // Purge. + // + for (const drop_package& dp: pkgs) + { + // Skip prerequisites if we weren't instructed to drop them. + // + if (dp.reason == drop_reason::prerequisite && !drop_prq) + continue; + + const shared_ptr& p (dp.package); + + assert (p->state == package_state::fetched || + p->state == package_state::unpacked); + + transaction t (db.begin ()); + pkg_purge (c, t, p); // Commits the transaction, p is now transient. + + if (verb) + text << "purged " << p->name; + } + + return 0; + } +} diff --git a/bpkg/pkg-fetch.cxx b/bpkg/pkg-fetch.cxx index 1718c5b..8926a9d 100644 --- a/bpkg/pkg-fetch.cxx +++ b/bpkg/pkg-fetch.cxx @@ -180,11 +180,11 @@ namespace bpkg if (db.query_value () == 0) fail << "configuration " << c << " has no repositories" << - info << "use 'bpkg rep-add' to add a repository"; + info << "use 'bpkg cfg-add' to add a repository"; if (db.query_value () == 0) fail << "configuration " << c << " has no available packages" << - info << "use 'bpkg rep-fetch' to fetch available packages list"; + info << "use 'bpkg cfg-fetch' to fetch available packages list"; shared_ptr ap ( db.find (available_package_id (n, v))); diff --git a/bpkg/rep-add b/bpkg/rep-add deleted file mode 100644 index 10d85f4..0000000 --- a/bpkg/rep-add +++ /dev/null @@ -1,17 +0,0 @@ -// file : bpkg/rep-add -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#ifndef BPKG_REP_ADD -#define BPKG_REP_ADD - -#include -#include - -namespace bpkg -{ - int - rep_add (const rep_add_options&, cli::scanner& args); -} - -#endif // BPKG_REP_ADD diff --git a/bpkg/rep-add-options.cli b/bpkg/rep-add-options.cli deleted file mode 100644 index e9abf51..0000000 --- a/bpkg/rep-add-options.cli +++ /dev/null @@ -1,30 +0,0 @@ -// file : bpkg/rep-add-options.cli -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -include ; - -"\section=1" -"\name=bpkg-rep-add" -"\summary=add repository to configuration" - -/* -"\h{SYNOPSIS} - -bpkg rep-add [] " - -"\h{DESCRIPTION} - -The \cb{rep-add} command adds the specified source repository to the -configuration. Note that this command doesn't fetch the available -packages list for the newly added repository. To do that, use the -\cb{rep-fetch} command. -" -*/ - -namespace bpkg -{ - class rep_add_options: configuration_options - { - }; -} diff --git a/bpkg/rep-add.cxx b/bpkg/rep-add.cxx deleted file mode 100644 index 0c06ce6..0000000 --- a/bpkg/rep-add.cxx +++ /dev/null @@ -1,70 +0,0 @@ -// file : bpkg/rep-add.cxx -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#include - -#include // invalid_argument - -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace butl; - -namespace bpkg -{ - int - rep_add (const rep_add_options& o, cli::scanner& args) - { - tracer trace ("rep_add"); - - dir_path c (o.directory ()); - level4 ([&]{trace << "configuration: " << c;}); - - if (!args.more ()) - fail << "repository location argument expected" << - info << "run 'bpkg help rep-add' for more information"; - - repository_location rl (parse_location (args.next ())); - const string& rn (rl.canonical_name ()); - - // Create the new repository and add is as a complement to the root. - // - database db (open (c, trace)); - transaction t (db.begin ()); - session s; // Repository dependencies can have cycles. - - // It is possible that this repository is already in the database. - // For example, it might be a prerequisite of one of the already - // added repository. - // - shared_ptr r (db.find (rl.canonical_name ())); - - if (r == nullptr) - { - r.reset (new repository (rl)); - db.persist (r); - } - - shared_ptr root (db.load ("")); - - if (!root->complements.insert (lazy_shared_ptr (db, r)).second) - { - fail << rn << " is already a repository of this configuration"; - } - - db.update (root); - t.commit (); - - if (verb) - text << "added repository " << rn; - - return 0; - } -} diff --git a/bpkg/rep-fetch b/bpkg/rep-fetch deleted file mode 100644 index 9175755..0000000 --- a/bpkg/rep-fetch +++ /dev/null @@ -1,17 +0,0 @@ -// file : bpkg/rep-fetch -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#ifndef BPKG_REP_FETCH -#define BPKG_REP_FETCH - -#include -#include - -namespace bpkg -{ - int - rep_fetch (const rep_fetch_options&, cli::scanner& args); -} - -#endif // BPKG_REP_FETCH diff --git a/bpkg/rep-fetch-options.cli b/bpkg/rep-fetch-options.cli deleted file mode 100644 index 8cb9ddb..0000000 --- a/bpkg/rep-fetch-options.cli +++ /dev/null @@ -1,28 +0,0 @@ -// file : bpkg/rep-fetch-options.cli -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -include ; - -"\section=1" -"\name=bpkg-rep-fetch" -"\summary=fetch available packages list" - -/* -"\h{SYNOPSIS} - -bpkg rep-fetch []" - -"\h{DESCRIPTION} - -The \cb{rep-fetch} command recursively fetches the prerequisite repository -and available package lists for all the repositories that were added -(\cb{rep-add}) to the configuration." -*/ - -namespace bpkg -{ - class rep_fetch_options: configuration_options - { - }; -} diff --git a/bpkg/rep-fetch.cxx b/bpkg/rep-fetch.cxx deleted file mode 100644 index 1136e98..0000000 --- a/bpkg/rep-fetch.cxx +++ /dev/null @@ -1,245 +0,0 @@ -// file : bpkg/rep-fetch.cxx -*- C++ -*- -// copyright : Copyright (c) 2014-2015 Code Synthesis Ltd -// license : MIT; see accompanying LICENSE file - -#include - -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace butl; - -namespace bpkg -{ - static void - rep_fetch (const common_options& co, - transaction& t, - const shared_ptr& r) - { - tracer trace ("rep_fetch(rep)"); - - database& db (t.database ()); - tracer_guard tg (db, trace); - - const repository_location& rl (r->location); - level4 ([&]{trace << r->name << " " << rl;}); - assert (rl.absolute () || rl.remote ()); - - // The fetch_*() functions below will be quiet at level 1, which - // can be quite confusing if the download hangs. - // - if (verb >= (rl.remote () ? 1 : 2)) - text << "fetching " << r->name; - - r->fetched = true; // Mark as being fetched. - - // Load the 'repositories' file and use it to populate the - // prerequisite and complement repository sets. - // - repository_manifests rms (fetch_repositories (co, rl, true)); - - for (repository_manifest& rm: rms) - { - repository_role rr (rm.effective_role ()); - - if (rr == repository_role::base) - continue; // Entry for this repository. - - // If the location is relative, complete it using this repository - // as a base. - // - if (rm.location.relative ()) - { - try - { - rm.location = repository_location (rm.location, rl); - } - catch (const invalid_argument& e) - { - fail << "invalid relative repository location '" << rm.location - << "': " << e.what () << - info << "base repository location is " << rl; - } - } - - // We might already have this repository in the database. - // - shared_ptr pr ( - db.find ( - rm.location.canonical_name ())); - - if (pr == nullptr) - { - pr = make_shared (move (rm.location)); - db.persist (pr); // Enter into session, important if recursive. - } - - // Load the prerequisite repository unless it has already been - // (or is already being) fetched. - // - if (!pr->fetched) - rep_fetch (co, t, pr); - - // @@ What if we have duplicated? Ideally, we would like to check - // this once and as early as possible. The original idea was to - // do it during manifest parsing and serialization. But at that - // stage we have no way of completing relative locations (which - // is required to calculate canonical names). Current thinking is - // that we should have something like rep-verify (similar to - // pkg-verify) that performs (potentially expensive) repository - // verifications, including making sure prerequisites can be - // satisfied from the listed repositories, etc. Perhaps we can - // also re-use some of that functionality here. I.e., instead of - // calling the "naked" fetch_repositories() above, we will call - // a function from rep-verify that will perform extra verifications. - // - // @@ Also check for self-prerequisite. - // - switch (rr) - { - case repository_role::complement: - { - level4 ([&]{trace << pr->name << " complement of " << r->name;}); - r->complements.insert (lazy_shared_ptr (db, pr)); - break; - } - case repository_role::prerequisite: - { - level4 ([&]{trace << pr->name << " prerequisite of " << r->name;}); - r->prerequisites.insert (lazy_weak_ptr (db, pr)); - break; - } - case repository_role::base: - assert (false); - } - } - - // Load the 'packages' file. - // - // @@ We need to check that that 'repositories' file hasn't - // changed since. - // - package_manifests pms (fetch_packages (co, rl, true)); - - // "Suspend" session while persisting packages to reduce memory - // consumption. - // - session& s (session::current ()); - session::reset_current (); - - for (package_manifest& pm: pms) - { - // We might already have this package in the database. - // - bool persist (false); - - shared_ptr p ( - db.find ( - available_package_id (pm.name, pm.version))); - - if (p == nullptr) - { - p = make_shared (move (pm)); - persist = true; - } - - // This repository shouldn't already be in the location set since - // that would mean it has already been loaded and we shouldn't be - // here. - // - p->locations.push_back ( - package_location {lazy_shared_ptr (db, r), - move (*pm.location)}); - - if (persist) - db.persist (p); - else - db.update (p); - } - - session::current (s); // "Resume". - - // Save the changes to the repository object. - // - db.update (r); - } - - int - rep_fetch (const rep_fetch_options& o, cli::scanner&) - { - tracer trace ("rep_fetch"); - - dir_path c (o.directory ()); - level4 ([&]{trace << "configuration: " << c;}); - - database db (open (c, trace)); - transaction t (db.begin ()); - session s; // Repository dependencies can have cycles. - - shared_ptr root (db.load ("")); - const auto& ua (root->complements); // User-added repositories. - - if (ua.empty ()) - fail << "configuration " << c << " has no repositories" << - info << "use 'bpkg rep-add' to add a repository"; - - // Clean repositories and available packages. At the end only - // repositories that were explicitly added by the user and the - // special root repository should remain. - // - db.erase_query (); - - for (shared_ptr r: pointer_result (db.query ())) - { - if (r == root) - { - level5 ([&]{trace << "skipping root";}); - } - else if (ua.find (lazy_shared_ptr (db, r)) != ua.end ()) - { - level4 ([&]{trace << "cleaning " << r->name;}); - - r->complements.clear (); - r->prerequisites.clear (); - r->fetched = false; - db.update (r); - } - else - { - level4 ([&]{trace << "erasing " << r->name;}); - db.erase (r); - } - } - - // Now recursively fetch prerequisite/complement repositories and - // their packages. - // - for (const lazy_shared_ptr& lp: ua) - rep_fetch (o, t, lp.load ()); - - size_t rcount, pcount; - if (verb) - { - rcount = db.query_value (); - pcount = db.query_value (); - } - - t.commit (); - - if (verb) - text << pcount << " package(s) in " << rcount << " repository(s)"; - - return 0; - } -} diff --git a/tests/test.sh b/tests/test.sh index e3c613c..2473bf3 100755 --- a/tests/test.sh +++ b/tests/test.sh @@ -236,58 +236,58 @@ stat libfoo unknown ## -## rep-add +## cfg-add ## test cfg-create --wipe -fail rep-add # repository location expected -fail rep-add stable # invalid location -fail rep-add http:// # invalid location +fail cfg-add # repository location expected +fail cfg-add stable # invalid location +fail cfg-add http:// # invalid location # relative path # -test rep-add ./1/bar/stable -fail rep-add ./1/../1/bar/stable # duplicate +test cfg-add ./1/bar/stable +fail cfg-add ./1/../1/bar/stable # duplicate # absolute path # -test rep-add /tmp/1/foo/stable -fail rep-add /tmp/1/../1/foo/stable # duplicate +test cfg-add /tmp/1/foo/stable +fail cfg-add /tmp/1/../1/foo/stable # duplicate # remote URL # -test rep-add http://pkg.example.org/1/testing -fail rep-add http://www.example.org/1/testing # duplicate +test cfg-add http://pkg.example.org/1/testing +fail cfg-add http://www.example.org/1/testing # duplicate ## -## rep-fetch +## cfg-fetch ## test cfg-create --wipe -fail rep-fetch # no repositories +fail cfg-fetch # no repositories # hello repository # test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch +test cfg-fetch # bar/unstable repository # test cfg-create --wipe -test rep-add $rep/common/bar/unstable -test rep-fetch -test rep-fetch +test cfg-add $rep/common/bar/unstable +test cfg-fetch +test cfg-fetch # both # test cfg-create --wipe -test rep-add $rep/common/hello -test rep-add $rep/common/bar/unstable -test rep-fetch -test rep-fetch +test cfg-add $rep/common/hello +test cfg-add $rep/common/bar/unstable +test cfg-fetch +test cfg-fetch ## @@ -304,14 +304,14 @@ fail pkg-fetch libfoo # package version expected fail pkg-fetch libfoo/1/2/3 # invalid package version fail pkg-fetch libfoo/1.0.0 # no repositories -test rep-add $rep/fetch/t1 +test cfg-add $rep/fetch/t1 fail pkg-fetch libfoo/1.0.0 # no packages -test rep-fetch +test cfg-fetch fail pkg-fetch libfoo/2+1.0.0 # not available test cfg-create --wipe -test rep-add $rep/fetch/t1 -test rep-fetch +test cfg-add $rep/fetch/t1 +test cfg-fetch test pkg-fetch libfoo/1.0.0 stat libfoo/1.0.0 fetched fail pkg-fetch libfoo/1.0.0 @@ -334,8 +334,8 @@ test pkg-purge libfoo # hello # test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch test pkg-fetch libhello/1.0.0 test pkg-purge libhello @@ -348,8 +348,8 @@ fail pkg-unpack -r # replace only with existing fail pkg-unpack -e # package directory expected fail pkg-unpack # package name expected -test rep-add $rep/fetch/t1 -test rep-fetch +test cfg-add $rep/fetch/t1 +test cfg-fetch # existing # @@ -391,8 +391,8 @@ test pkg-purge libfoo # hello # test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch test pkg-fetch libhello/1.0.0 test pkg-unpack libhello test pkg-purge libhello @@ -499,8 +499,8 @@ stat libfoo unknown ## pkg-configure/pkg-disfigure ## test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch fail pkg-configure # package name expected fail pkg-configure config.dist.root=/tmp # ditto @@ -589,8 +589,8 @@ stat libhello unknown # test rep-create repository/1/depend/stable test cfg-create --wipe -test rep-add $rep/depend/stable -test rep-fetch +test cfg-add $rep/depend/stable +test cfg-fetch test pkg-fetch libbar/1.0.0 test pkg-unpack libbar @@ -666,8 +666,8 @@ test rep-create repository/1/status/unstable test cfg-create --wipe stat libfoo/1.0.0 "unknown" stat libfoo "unknown" -test rep-add $rep/status/stable -test rep-fetch +test cfg-add $rep/status/stable +test cfg-fetch stat libfoo/1.0.0 "available" stat libfoo "available 1.0.0" test pkg-fetch libfoo/1.0.0 @@ -677,21 +677,21 @@ stat libfoo "fetched 1.0.0" # multiple versions/revisions # test cfg-create --wipe -test rep-add $rep/status/extra -test rep-fetch +test cfg-add $rep/status/extra +test cfg-fetch stat libbar "available 1.1.0-1" -test rep-add $rep/status/stable -test rep-fetch +test cfg-add $rep/status/stable +test cfg-fetch stat libbar "available 1.1.0-1 1.0.0" test cfg-create --wipe -test rep-add $rep/status/testing -test rep-fetch +test cfg-add $rep/status/testing +test cfg-fetch stat libbar "available 1.1.0 1.0.0-1 1.0.0" test cfg-create --wipe -test rep-add $rep/status/unstable -test rep-fetch +test cfg-add $rep/status/unstable +test cfg-fetch stat libbar "available 2.0.0 1.1.0 1.0.0-1 1.0.0" test pkg-fetch libbar/1.0.0-1 stat libbar "fetched 1.0.0-1; available 2.0.0 1.1.0" @@ -704,8 +704,8 @@ stat libbar "fetched 2.0.0" ## pkg-update ## test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch fail pkg-update # package name expected fail pkg-update libhello # no such package @@ -738,8 +738,8 @@ test pkg-purge libhello ## pkg-clean ## test cfg-create --wipe -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch fail pkg-clean # package name expected fail pkg-clean libhello # no such package @@ -778,8 +778,8 @@ test pkg-purge libhello # build and clean package # test cfg-create --wipe cxx -test rep-add $rep/common/hello -test rep-fetch +test cfg-add $rep/common/hello +test cfg-fetch test pkg-fetch libhello/1.0.0 test pkg-unpack libhello test pkg-configure libhello @@ -788,14 +788,8 @@ test pkg-clean libhello test pkg-disfigure libhello test pkg-purge libhello - -## -## High-level commands. -## - - ## -## build +## pkg-build ## # 1 (libfoo) @@ -803,47 +797,47 @@ test pkg-purge libhello test rep-create repository/1/satisfy/t1 test cfg-create --wipe -fail build -p # package name expected -fail build -p libfoo # unknown package -fail build -p libfoo/1.0.0 # unknown package -test build -p repository/1/satisfy/libfoo-1.1.0.tar.gz <