From 886868dc67e069734b44d81d9f56d48a0a47538e Mon Sep 17 00:00:00 2001 From: Karen Arutyunov Date: Tue, 23 Aug 2022 23:16:40 +0300 Subject: Split pkg-build.cxx --- bpkg/package-query.cxx | 588 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 588 insertions(+) create mode 100644 bpkg/package-query.cxx (limited to 'bpkg/package-query.cxx') diff --git a/bpkg/package-query.cxx b/bpkg/package-query.cxx new file mode 100644 index 0000000..8d7b652 --- /dev/null +++ b/bpkg/package-query.cxx @@ -0,0 +1,588 @@ +// file : bpkg/package-query.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include + +using namespace std; + +namespace bpkg +{ + vector> imaginary_stubs; + + shared_ptr + find_imaginary_stub (const package_name& name) + { + auto i (find_if (imaginary_stubs.begin (), imaginary_stubs.end (), + [&name] (const shared_ptr& p) + { + return p->id.name == name; + })); + + return i != imaginary_stubs.end () ? *i : nullptr; + } + + linked_databases repo_configs; + + linked_databases + dependent_repo_configs (database& db) + { + linked_databases r; + for (database& ddb: db.dependent_configs ()) + { + if (find (repo_configs.begin (), repo_configs.end (), ddb) != + repo_configs.end ()) + r.push_back (ddb); + } + + return r; + } + + odb::result + query_available (database& db, + const package_name& name, + const optional& c, + bool order, + bool revision) + { + using query = query; + + query q (query::id.name == name); + const auto& vm (query::id.version); + + // If there is a constraint, then translate it to the query. Otherwise, + // get the latest version or stub versions if present. + // + if (c) + { + assert (c->complete ()); + + query qs (compare_version_eq (vm, + canonical_version (wildcard_version), + false /* revision */, + false /* iteration */)); + + if (c->min_version && + c->max_version && + *c->min_version == *c->max_version) + { + const version& v (*c->min_version); + + q = q && + (compare_version_eq (vm, + canonical_version (v), + revision || v.revision.has_value (), + revision /* iteration */) || + qs); + } + else + { + query qr (true); + + if (c->min_version) + { + const version& v (*c->min_version); + canonical_version cv (v); + bool rv (revision || v.revision); + + if (c->min_open) + qr = compare_version_gt (vm, cv, rv, revision /* iteration */); + else + qr = compare_version_ge (vm, cv, rv, revision /* iteration */); + } + + if (c->max_version) + { + const version& v (*c->max_version); + canonical_version cv (v); + bool rv (revision || v.revision); + + if (c->max_open) + qr = qr && compare_version_lt (vm, cv, rv, revision); + else + qr = qr && compare_version_le (vm, cv, rv, revision); + } + + q = q && (qr || qs); + } + } + + if (order) + q += order_by_version_desc (vm); + + return db.query (q); + } + + // Check if the package is available from the specified repository fragment, + // its prerequisite repositories, or one of their complements, recursively. + // Return the first repository fragment that contains the package or NULL if + // none are. + // + // Note that we can end up with a repository dependency cycle since the + // root repository can be the default complement for dir and git + // repositories (see rep_fetch() implementation for details). Thus we need + // to make sure that the repository fragment is not in the dependency chain + // yet. + // + using repository_fragments = + vector>>; + + static shared_ptr + find (const shared_ptr& rf, + const shared_ptr& ap, + repository_fragments& chain, + bool prereq) + { + // Prerequisites are not searched through recursively. + // + assert (!prereq || chain.empty ()); + + if (find_if (chain.begin (), chain.end (), + [&rf] (const shared_ptr& i) -> bool + { + return i == rf; + }) != chain.end ()) + return nullptr; + + chain.emplace_back (rf); + + unique_ptr deleter ( + &chain, [] (repository_fragments* rf) {rf->pop_back ();}); + + const auto& cs (rf->complements); + const auto& ps (rf->prerequisites); + + for (const package_location& pl: ap->locations) + { + const lazy_shared_ptr& lrf (pl.repository_fragment); + + // First check the repository itself. + // + if (lrf.object_id () == rf->name) + return rf; + + // Then check all the complements and prerequisites repository fragments + // without loading them. Though, we still need to load complement and + // prerequisite repositories. + // + auto pr = [&lrf] (const repository::fragment_type& i) + { + return i.fragment == lrf; + }; + + for (const lazy_weak_ptr& r: cs) + { + const auto& frs (r.load ()->fragments); + + if (find_if (frs.begin (), frs.end (), pr) != frs.end ()) + return lrf.load (); + } + + if (prereq) + { + for (const lazy_weak_ptr& r: ps) + { + const auto& frs (r.load ()->fragments); + + if (find_if (frs.begin (), frs.end (), pr) != frs.end ()) + return lrf.load (); + } + } + + // Finally, load the complements and prerequisites and check them + // recursively. + // + for (const lazy_weak_ptr& cr: cs) + { + for (const auto& fr: cr.load ()->fragments) + { + // Should we consider prerequisites of our complements as our + // prerequisites? I'd say not. + // + if (shared_ptr r = + find (fr.fragment.load (), ap, chain, false)) + return r; + } + } + + if (prereq) + { + for (const lazy_weak_ptr& pr: ps) + { + for (const auto& fr: pr.load ()->fragments) + { + if (shared_ptr r = + find (fr.fragment.load (), ap, chain, false)) + return r; + } + } + } + } + + return nullptr; + } + + shared_ptr + filter (const shared_ptr& r, + const shared_ptr& ap, + bool prereq) + { + repository_fragments chain; + return find (r, ap, chain, prereq); + } + + vector> + filter (const shared_ptr& r, + result&& apr, + bool prereq) + { + vector> aps; + + for (shared_ptr ap: pointer_result (apr)) + { + if (filter (r, ap, prereq) != nullptr) + aps.push_back (move (ap)); + } + + return aps; + } + + pair, shared_ptr> + filter_one (const shared_ptr& r, + result&& apr, + bool prereq) + { + using result = pair, + shared_ptr>; + + for (shared_ptr ap: pointer_result (apr)) + { + if (shared_ptr pr = filter (r, ap, prereq)) + return result (move (ap), move (pr)); + } + + return result (); + } + + vector, shared_ptr>> + filter (const vector>& rps, + odb::result&& apr, + bool prereq) + { + vector, + shared_ptr>> aps; + + for (shared_ptr ap: pointer_result (apr)) + { + for (const shared_ptr& r: rps) + { + if (shared_ptr rf = filter (r, ap, prereq)) + { + aps.emplace_back (move (ap), move (rf)); + break; + } + } + } + + return aps; + } + + pair, shared_ptr> + filter_one (const vector>& rps, + odb::result&& apr, + bool prereq) + { + using result = pair, + shared_ptr>; + + for (shared_ptr ap: pointer_result (apr)) + { + for (const shared_ptr& r: rps) + { + if (shared_ptr rf = filter (r, ap, prereq)) + return result (move (ap), move (rf)); + } + } + + return result (); + } + + // Sort the available package fragments in the package version descending + // order and suppress duplicate packages. + // + static void + sort_dedup (vector, + lazy_shared_ptr>>& pfs) + { + sort (pfs.begin (), pfs.end (), + [] (const auto& x, const auto& y) + { + return x.first->version > y.first->version; + }); + + pfs.erase (unique (pfs.begin(), pfs.end(), + [] (const auto& x, const auto& y) + { + return x.first->version == y.first->version; + }), + pfs.end ()); + } + + vector, + lazy_shared_ptr>> + find_available (const linked_databases& dbs, + const package_name& name, + const optional& c) + { + vector, + lazy_shared_ptr>> r; + + for (database& db: dbs) + { + for (shared_ptr ap: + pointer_result (query_available (db, name, c))) + { + // An available package should come from at least one fetched + // repository fragment. + // + assert (!ap->locations.empty ()); + + // All repository fragments the package comes from are equally good, so + // we pick the first one. + // + r.emplace_back (move (ap), ap->locations[0].repository_fragment); + } + } + + // If there are multiple databases specified, then sort the result in the + // package version descending order and suppress duplicates. + // + if (dbs.size () > 1) + sort_dedup (r); + + // Adding a stub from the imaginary system repository to the non-empty + // results isn't necessary but may end up with a duplicate. That's why we + // only add it if nothing else is found. + // + if (r.empty ()) + { + if (shared_ptr ap = find_imaginary_stub (name)) + r.emplace_back (move (ap), nullptr); + } + + return r; + } + + vector, + lazy_shared_ptr>> + find_available (const package_name& name, + const optional& c, + const config_repo_fragments& rfs, + bool prereq) + { + vector, + lazy_shared_ptr>> r; + + for (const auto& dfs: rfs) + { + database& db (dfs.first); + for (auto& af: filter (dfs.second, + query_available (db, name, c), + prereq)) + { + r.emplace_back ( + move (af.first), + lazy_shared_ptr (db, move (af.second))); + } + } + + if (rfs.size () > 1) + sort_dedup (r); + + if (r.empty ()) + { + if (shared_ptr ap = find_imaginary_stub (name)) + r.emplace_back (move (ap), nullptr); + } + + return r; + } + + vector> + find_available (const package_name& name, + const optional& c, + const lazy_shared_ptr& rf, + bool prereq) + { + vector> r; + + database& db (rf.database ()); + for (auto& ap: filter (rf.load (), query_available (db, name, c), prereq)) + r.emplace_back (move (ap)); + + if (r.empty ()) + { + if (shared_ptr ap = find_imaginary_stub (name)) + r.emplace_back (move (ap)); + } + + return r; + } + + pair, + lazy_shared_ptr> + find_available_one (const package_name& name, + const optional& c, + const lazy_shared_ptr& rf, + bool prereq, + bool revision) + { + // Filter the result based on the repository fragment to which each + // version belongs. + // + database& db (rf.database ()); + auto r ( + filter_one (rf.load (), + query_available (db, name, c, true /* order */, revision), + prereq)); + + if (r.first == nullptr) + r.first = find_imaginary_stub (name); + + return make_pair (r.first, + (r.second != nullptr + ? lazy_shared_ptr (db, + move (r.second)) + : nullptr)); + } + + pair, shared_ptr> + find_available_one (database& db, + const package_name& name, + const optional& c, + const vector>& rfs, + bool prereq, + bool revision) + { + // Filter the result based on the repository fragments to which each + // version belongs. + // + auto r ( + filter_one (rfs, + query_available (db, name, c, true /* order */, revision), + prereq)); + + if (r.first == nullptr) + r.first = find_imaginary_stub (name); + + return r; + } + + pair, + lazy_shared_ptr> + find_available_one (const linked_databases& dbs, + const package_name& name, + const optional& c, + bool prereq, + bool revision) + { + for (database& db: dbs) + { + auto r ( + filter_one (db.load (""), + query_available (db, name, c, true /* order */, revision), + prereq)); + + if (r.first != nullptr) + return make_pair ( + move (r.first), + lazy_shared_ptr (db, move (r.second))); + } + + return make_pair (find_imaginary_stub (name), nullptr); + } + + shared_ptr + find_available (const common_options& options, + database& db, + const shared_ptr& sp) + { + available_package_id pid (sp->name, sp->version); + for (database& ddb: dependent_repo_configs (db)) + { + shared_ptr ap (ddb.find (pid)); + + if (ap != nullptr && !ap->stub ()) + return ap; + } + + return make_available (options, db, sp); + } + + pair, + lazy_shared_ptr> + find_available_fragment (const common_options& options, + database& db, + const shared_ptr& sp) + { + available_package_id pid (sp->name, sp->version); + for (database& ddb: dependent_repo_configs (db)) + { + shared_ptr ap (ddb.find (pid)); + + if (ap != nullptr && !ap->stub ()) + { + if (shared_ptr f = ddb.find ( + sp->repository_fragment.canonical_name ())) + return make_pair (ap, + lazy_shared_ptr (ddb, + move (f))); + } + } + + return make_pair (find_available (options, db, sp), nullptr); + } + + pair, + lazy_shared_ptr> + make_available_fragment (const common_options& options, + database& db, + const shared_ptr& sp) + { + shared_ptr ap (make_available (options, db, sp)); + + if (sp->system ()) + return make_pair (move (ap), nullptr); + + // First see if we can find its repository fragment. + // + // Note that this is package's "old" repository fragment and there is no + // guarantee that its dependencies are still resolvable from it. But this + // is our best chance (we could go nuclear and point all orphans to the + // root repository fragment but that feels a bit too drastic at the + // moment). + // + // Also note that the repository information for this selected package can + // potentially be in one of the ultimate dependent configurations as + // determined at the time of the run when the package was configured. This + // configurations set may differ from the current one, but let's try + // anyway. + // + lazy_shared_ptr rf; + + for (database& ddb: dependent_repo_configs (db)) + { + if (shared_ptr f = ddb.find ( + sp->repository_fragment.canonical_name ())) + { + rf = lazy_shared_ptr (ddb, move (f)); + break; + } + } + + return make_pair (move (ap), move (rf)); + } +} -- cgit v1.1