From 7fa57f8522ed7741c00ed3cd64cf956716aebd2c Mon Sep 17 00:00:00 2001 From: Karen Arutyunov Date: Mon, 11 Mar 2024 21:23:21 +0300 Subject: Add support for build auxiliary machines/configurations --- libbrep/build-extra.sql | 34 +++ libbrep/build-package.hxx | 24 +- libbrep/build.cxx | 50 +++- libbrep/build.hxx | 45 ++- libbrep/build.xml | 73 +++++ libbrep/common.hxx | 63 ++++- libbrep/package.cxx | 31 +-- libbrep/package.hxx | 42 ++- libbrep/package.xml | 91 ++++++ load/load.cxx | 3 + mod/build-result-module.cxx | 2 + mod/build.cxx | 150 ++++++++++ mod/build.hxx | 19 ++ mod/mod-build-configs.cxx | 3 + mod/mod-build-log.cxx | 27 +- mod/mod-build-result.cxx | 175 ++---------- mod/mod-build-task.cxx | 664 ++++++++++++++++++++++++++++++++++---------- mod/mod-upload.cxx | 7 +- mod/module.cli | 72 +++-- monitor/monitor.cxx | 13 + 20 files changed, 1212 insertions(+), 376 deletions(-) diff --git a/libbrep/build-extra.sql b/libbrep/build-extra.sql index c15ddc1..374cf73 100644 --- a/libbrep/build-extra.sql +++ b/libbrep/build-extra.sql @@ -6,12 +6,16 @@ -- package-extra.sql file for details. -- +DROP FOREIGN TABLE IF EXISTS build_package_config_auxiliaries; + DROP FOREIGN TABLE IF EXISTS build_package_config_constraints; DROP FOREIGN TABLE IF EXISTS build_package_config_builds; DROP FOREIGN TABLE IF EXISTS build_package_configs; +DROP FOREIGN TABLE IF EXISTS build_package_auxiliaries; + DROP FOREIGN TABLE IF EXISTS build_package_constraints; DROP FOREIGN TABLE IF EXISTS build_package_builds; @@ -187,6 +191,22 @@ CREATE FOREIGN TABLE build_package_constraints ( comment TEXT NOT NULL) SERVER package_server OPTIONS (table_name 'package_build_constraints'); +-- The foreign table for the build_package object auxiliaries member (that is +-- of a container type). +-- +CREATE FOREIGN TABLE build_package_auxiliaries ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + index BIGINT NOT NULL, + environment_name TEXT NOT NULL, + config TEXT NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_auxiliaries'); + -- The foreign tables for the build_package object configs member (that is a -- container of values containing containers. -- @@ -236,3 +256,17 @@ CREATE FOREIGN TABLE build_package_config_constraints ( target TEXT NULL, comment TEXT NOT NULL) SERVER package_server OPTIONS (table_name 'package_build_config_constraints'); + +CREATE FOREIGN TABLE build_package_config_auxiliaries ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + config_index BIGINT NOT NULL, + index BIGINT NOT NULL, + environment_name TEXT NOT NULL, + config TEXT NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_config_auxiliaries'); diff --git a/libbrep/build-package.hxx b/libbrep/build-package.hxx index a0e1082..8377158 100644 --- a/libbrep/build-package.hxx +++ b/libbrep/build-package.hxx @@ -132,11 +132,13 @@ namespace brep lazy_shared_ptr internal_repository; bool buildable; - // Mapped to the package object builds, build_constraints, and - // build_configs members using the PostgreSQL foreign table mechanism. + // Mapped to the package object builds, build_constraints, + // build_auxiliaries, and build_configs members using the PostgreSQL + // foreign table mechanism. // build_class_exprs builds; build_constraints constraints; + build_auxiliaries auxiliaries; build_package_configs configs; // Group the builds and constraints members of this object as well as of @@ -191,7 +193,7 @@ namespace brep #pragma db member(requirements_tests_section) load(lazy) update(always) - // builds and constraints + // builds, constraints, and auxiliaries // #pragma db member(builds) id_column("") value_column("") \ section(constraints_section) @@ -199,9 +201,12 @@ namespace brep #pragma db member(constraints) id_column("") value_column("") \ section(constraints_section) + #pragma db member(auxiliaries) id_column("") value_column("") \ + section(constraints_section) + // configs // - // Note that build_package_config::{builds,constraints} are + // Note that build_package_config::{builds,constraints,auxiliaries} are // persisted/loaded via the separate nested containers (see commons.hxx // for details). // @@ -229,6 +234,17 @@ namespace brep id_column("") key_column("") value_column("") \ section(constraints_section) + #pragma db member(config_auxiliaries) \ + virtual(build_auxiliaries_map) \ + after(config_constraints) \ + get(odb::nested_get ( \ + brep::build_package_config_auxiliaries (this.configs))) \ + set(brep::build_package_config_auxiliaries as; \ + odb::nested_set (as, std::move (?)); \ + move (as).to_configs (this.configs)) \ + id_column("") key_column("") value_column("") \ + section(constraints_section) + #pragma db member(constraints_section) load(lazy) update(always) private: diff --git a/libbrep/build.cxx b/libbrep/build.cxx index 8fadfa3..13f0818 100644 --- a/libbrep/build.cxx +++ b/libbrep/build.cxx @@ -65,7 +65,8 @@ namespace brep string tnm, version tvr, optional inr, optional afp, optional ach, - string mnm, string msm, + build_machine mcn, + vector ams, string ccs, string mcs) : id (package_id (move (tnt), move (pnm), pvr), @@ -86,8 +87,8 @@ namespace brep timestamp (timestamp_type::clock::now ()), force (force_state::unforced), agent_fingerprint (move (afp)), agent_challenge (move (ach)), - machine (move (mnm)), - machine_summary (move (msm)), + machine (move (mcn)), + auxiliary_machines (move (ams)), controller_checksum (move (ccs)), machine_checksum (move (mcs)) { @@ -121,6 +122,43 @@ namespace brep } build:: + build (string tnt, + package_name_type pnm, + version pvr, + target_triplet trg, + string tcf, + string pcf, + string tnm, version tvr, + result_status rst, + operation_results ors, + build_machine mcn, + vector ams) + : id (package_id (move (tnt), move (pnm), pvr), + move (trg), + move (tcf), + move (pcf), + move (tnm), tvr), + tenant (id.package.tenant), + package_name (id.package.name), + package_version (move (pvr)), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), + toolchain_name (id.toolchain_name), + toolchain_version (move (tvr)), + state (build_state::built), + timestamp (timestamp_type::clock::now ()), + force (force_state::unforced), + status (rst), + soft_timestamp (timestamp), + hard_timestamp (timestamp), + machine (move (mcn)), + auxiliary_machines (move (ams)), + results (move (ors)) + { + } + + build:: build (build&& b) : id (move (b.id)), tenant (id.package.tenant), @@ -141,7 +179,8 @@ namespace brep agent_fingerprint (move (b.agent_fingerprint)), agent_challenge (move (b.agent_challenge)), machine (move (b.machine)), - machine_summary (move (b.machine_summary)), + auxiliary_machines (move (b.auxiliary_machines)), + auxiliary_machines_section (move (b.auxiliary_machines_section)), results (move (b.results)), results_section (move (b.results_section)), controller_checksum (move (b.controller_checksum)), @@ -170,7 +209,8 @@ namespace brep agent_fingerprint = move (b.agent_fingerprint); agent_challenge = move (b.agent_challenge); machine = move (b.machine); - machine_summary = move (b.machine_summary); + auxiliary_machines = move (b.auxiliary_machines); + auxiliary_machines_section = move (b.auxiliary_machines_section); results = move (b.results); results_section = move (b.results_section); controller_checksum = move (b.controller_checksum); diff --git a/libbrep/build.hxx b/libbrep/build.hxx index 1e9a9fc..be7cdf5 100644 --- a/libbrep/build.hxx +++ b/libbrep/build.hxx @@ -28,7 +28,7 @@ // #define LIBBREP_BUILD_SCHEMA_VERSION_BASE 20 -#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 24, closed) +#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 25, closed) // We have to keep these mappings at the global scope instead of inside the // brep namespace because they need to be also effective in the bbot namespace @@ -230,6 +230,13 @@ namespace brep using bbot::operation_results; + #pragma db value + struct build_machine + { + string name; + string summary; + }; + #pragma db object pointer(shared_ptr) session class build { @@ -249,7 +256,8 @@ namespace brep optional interactive, optional agent_fingerprint, optional agent_challenge, - string machine, string machine_summary, + build_machine, + vector auxiliary_machines, string controller_checksum, string machine_checksum); @@ -262,6 +270,21 @@ namespace brep string package_config_name, string toolchain_name, version toolchain_version); + // Create the build object with the built state, the specified status and + // operation results, all the timestamps set to now, and the force state + // set to unforced. + // + build (string tenant, + package_name_type, version, + target_triplet, + string target_config_name, + string package_config_name, + string toolchain_name, version toolchain_version, + result_status, + operation_results, + build_machine, + vector auxiliary_machines = {}); + // Move-only type. // build (build&&); @@ -325,8 +348,9 @@ namespace brep optional agent_fingerprint; optional agent_challenge; - string machine; - string machine_summary; + build_machine machine; + vector auxiliary_machines; + odb::section auxiliary_machines_section; // Note that the logs are stored as std::string/TEXT which is Ok since // they are UTF-8 and our database is UTF-8. @@ -368,6 +392,19 @@ namespace brep // #pragma db member(timestamp) index + #pragma db member(machine) transient + + #pragma db member(machine_name) virtual(std::string) \ + access(machine.name) column("machine") + + #pragma db member(machine_summary) virtual(std::string) \ + access(machine.summary) + + #pragma db member(auxiliary_machines) id_column("") value_column("") \ + section(auxiliary_machines_section) + + #pragma db member(auxiliary_machines_section) load(lazy) update(always) + #pragma db member(results) id_column("") value_column("") \ section(results_section) diff --git a/libbrep/build.xml b/libbrep/build.xml index e757aba..0dc37ee 100644 --- a/libbrep/build.xml +++ b/libbrep/build.xml @@ -1,4 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libbrep/common.hxx b/libbrep/common.hxx index 9c398c8..6220149 100644 --- a/libbrep/common.hxx +++ b/libbrep/common.hxx @@ -347,6 +347,13 @@ namespace brep #pragma db value(build_constraint) definition + // build_auxiliaries + // + using bpkg::build_auxiliary; + using build_auxiliaries = vector; + + #pragma db value(build_auxiliary) definition + // email // using bpkg::email; @@ -360,10 +367,6 @@ namespace brep #pragma db value(build_package_config) definition - // @@ TMP AUXILIARY - // - #pragma db member(build_package_config::auxiliaries) transient - // 1 for the default configuration which is always present. // using build_package_configs = small_vector; @@ -376,11 +379,12 @@ namespace brep // Note that ODB doesn't support containers of value types which contain // containers. Thus, we will persist/load - // package_build_config::{builds,constraint} via the separate nested - // containers using the adapter classes. + // package_build_config::{builds,constraint,auxiliaries} via the separate + // nested containers using the adapter classes. + // + // build_package_config::builds // #pragma db member(build_package_config::builds) transient - #pragma db member(build_package_config::constraints) transient using build_class_expr_key = odb::nested_key; using build_class_exprs_map = std::map; @@ -419,6 +423,10 @@ namespace brep } }; + // build_package_config::constraints + // + #pragma db member(build_package_config::constraints) transient + using build_constraint_key = odb::nested_key; using build_constraints_map = std::map; @@ -456,6 +464,47 @@ namespace brep } }; + // build_package_config::auxiliaries + // + #pragma db member(build_package_config::auxiliaries) transient + + using build_auxiliary_key = odb::nested_key; + using build_auxiliaries_map = std::map; + + #pragma db value(build_auxiliary_key) + #pragma db member(build_auxiliary_key::outer) column("config_index") + #pragma db member(build_auxiliary_key::inner) column("index") + + // Adapter for build_package_config::auxiliaries. + // + class build_package_config_auxiliaries: + public small_vector // 1 as for build_package_configs. + { + public: + build_package_config_auxiliaries () = default; + + explicit + build_package_config_auxiliaries (const build_package_configs& cs) + { + reserve (cs.size ()); + for (const build_package_config& c: cs) + push_back (c.auxiliaries); + } + + void + to_configs (build_package_configs& cs) && + { + // Note that the empty trailing entries will be missing (see ODB's + // nested-container.hxx for details). + // + assert (size () <= cs.size ()); + + auto i (cs.begin ()); + for (build_auxiliaries& bas: *this) + i++->auxiliaries = move (bas); + } + }; + // The primary reason why a package is unbuildable by the build bot // controller service. // diff --git a/libbrep/package.cxx b/libbrep/package.cxx index 2320547..37795f0 100644 --- a/libbrep/package.cxx +++ b/libbrep/package.cxx @@ -81,6 +81,7 @@ namespace brep small_vector ts, build_class_exprs bs, build_constraints_type bc, + build_auxiliaries_type ac, build_package_configs bcs, optional lc, optional fr, @@ -114,28 +115,17 @@ namespace brep tests (move (ts)), builds (move (bs)), build_constraints (move (bc)), + build_auxiliaries (move (ac)), + build_configs (move (bcs)), internal_repository (move (rp)), location (move (lc)), fragment (move (fr)), sha256sum (move (sh)) { - // Add the default build configuration at the beginning, unless it is - // specified explicitly. + // The default configuration is always added by the package manifest + // parser (see libbpkg/manifest.cxx for details). // - if (find_if (bcs.begin (), bcs.end (), - [] (const build_package_config& c) - {return c.name == "default";}) != bcs.end ()) - { - build_configs = move (bcs); - } - else - { - build_configs.reserve (bcs.size () + 1); - build_configs.emplace_back ("default"); - build_configs.insert (build_configs.end (), - make_move_iterator (bcs.begin ()), - make_move_iterator (bcs.end ())); - } + assert (find ("default", build_configs) != nullptr); if (stub ()) unbuildable_reason = brep::unbuildable_reason::stub; @@ -152,6 +142,8 @@ namespace brep version_type vr, build_class_exprs bs, build_constraints_type bc, + build_auxiliaries_type ac, + build_package_configs bcs, shared_ptr rp) : id (rp->tenant, move (nm), vr), tenant (id.tenant), @@ -159,11 +151,18 @@ namespace brep version (move (vr)), builds (move (bs)), build_constraints (move (bc)), + build_auxiliaries (move (ac)), + build_configs (move (bcs)), buildable (false), unbuildable_reason (stub () ? brep::unbuildable_reason::stub : brep::unbuildable_reason::external) { + // The default configuration is always added by the package manifest + // parser (see libbpkg/manifest.cxx for details). + // + assert (find ("default", build_configs) != nullptr); + assert (!rp->internal); other_repositories.emplace_back (move (rp)); } diff --git a/libbrep/package.hxx b/libbrep/package.hxx index 9bb9af9..96e51a3 100644 --- a/libbrep/package.hxx +++ b/libbrep/package.hxx @@ -20,7 +20,7 @@ // #define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 27 -#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 30, closed) +#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 31, closed) namespace brep { @@ -453,11 +453,11 @@ namespace brep using dependencies_type = brep::dependencies; using requirements_type = brep::requirements; using build_constraints_type = brep::build_constraints; + using build_auxiliaries_type = brep::build_auxiliaries; // Create internal package object. // - // Note: adds the default build package config at the first position if it - // is not present yet. + // Note: the default build package config is expected to always be present. // package (package_name, version_type, @@ -485,6 +485,7 @@ namespace brep small_vector tests, build_class_exprs, build_constraints_type, + build_auxiliaries_type, build_package_configs, optional location, optional fragment, @@ -500,14 +501,20 @@ namespace brep // // External package can also be a separate test for some primary package // (and belong to a complement but yet external repository), and so we may - // need its build class expressions and constraints to decide if to build - // it together with the primary package or not (see test-exclude task - // manifest value for details). + // need its build class expressions, constraints, and configurations to + // decide if to build it together with the primary package or not (see + // test-exclude task manifest value for details). Additionally, when the + // test package is being built the auxiliary machines may also be + // required. + // + // Note: the default build package config is expected to always be present. // package (package_name name, version_type, build_class_exprs, build_constraints_type, + build_auxiliaries_type, + build_package_configs, shared_ptr); bool @@ -561,11 +568,12 @@ namespace brep requirements_type requirements; // Note: foreign-mapped in build. small_vector tests; // Note: foreign-mapped in build. - // Common build classes/constraints that apply to all configurations - // unless overridden. + // Common build classes, constraints, and auxiliaries that apply to all + // configurations unless overridden. // build_class_exprs builds; // Note: foreign-mapped in build. build_constraints_type build_constraints; // Note: foreign-mapped in build. + build_auxiliaries_type build_auxiliaries; // Note: foreign-mapped in build. build_package_configs build_configs; // Note: foreign-mapped in build. @@ -718,9 +726,14 @@ namespace brep #pragma db member(build_constraints) id_column("") value_column("") \ section(build_section) + // build_auxiliaries + // + #pragma db member(build_auxiliaries) id_column("") value_column("") \ + section(build_section) + // build_configs // - // Note that build_package_config::{builds,constraints} are + // Note that build_package_config::{builds,constraints,auxiliaries} are // persisted/loaded via the separate nested containers (see commons.hxx // for details). // @@ -749,6 +762,17 @@ namespace brep id_column("") key_column("") value_column("") \ section(build_section) + #pragma db member(build_config_auxiliaries) \ + virtual(build_auxiliaries_map) \ + after(build_config_constraints) \ + get(odb::nested_get ( \ + brep::build_package_config_auxiliaries (this.build_configs))) \ + set(brep::build_package_config_auxiliaries as; \ + odb::nested_set (as, std::move (?)); \ + move (as).to_configs (this.build_configs)) \ + id_column("") key_column("") value_column("") \ + section(build_section) + #pragma db member(build_section) load(lazy) update(always) // other_repositories diff --git a/libbrep/package.xml b/libbrep/package.xml index 6852f75..966861b 100644 --- a/libbrep/package.xml +++ b/libbrep/package.xml @@ -1,4 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/load/load.cxx b/load/load.cxx index b8b4e1f..14b8374 100644 --- a/load/load.cxx +++ b/load/load.cxx @@ -621,6 +621,7 @@ load_packages (const shared_ptr& rp, move (ts), move (pm.builds), move (pm.build_constraints), + move (pm.build_auxiliaries), move (pm.build_configs), move (pm.location), move (pm.fragment), @@ -634,6 +635,8 @@ load_packages (const shared_ptr& rp, move (pm.version), move (pm.builds), move (pm.build_constraints), + move (pm.build_auxiliaries), + move (pm.build_configs), rp); db.persist (p); diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx index 7823e3a..68fbe4c 100644 --- a/mod/build-result-module.cxx +++ b/mod/build-result-module.cxx @@ -4,7 +4,9 @@ #include #include +#include #include +#include namespace brep { diff --git a/mod/build.cxx b/mod/build.cxx index 4abd416..5c37acb 100644 --- a/mod/build.cxx +++ b/mod/build.cxx @@ -3,12 +3,22 @@ #include +#include +#include +#include + +#include +#include + #include +#include + #include namespace brep { + using namespace std; using namespace web; string @@ -57,4 +67,144 @@ namespace brep "&tv=" + b.toolchain_version.string () + "&reason="; } + + void + send_notification_email (const options::build_email_notification& o, + const odb::core::connection_ptr& conn, + const build& b, + const build_package& p, + const build_package_config& pc, + const string& what, + const basic_mark& error, + const basic_mark* trace) + { + using namespace odb::core; + using namespace butl; + + assert (b.state == build_state::built && b.status); + + // Bail out if sending build notification emails is disabled for this + // toolchain for this package. + // + { + const map& tes (o.build_toolchain_email ()); + auto i (tes.find (b.id.toolchain_name)); + build_email mode (i != tes.end () ? i->second : build_email::latest); + + if (mode == build_email::none) + { + return; + } + else if (mode == build_email::latest) + { + transaction t (conn->begin ()); + database& db (t.database ()); + + const auto& id (query::build_package::id); + + buildable_package lp ( + db.query_value ( + (id.tenant == b.tenant && id.name == b.package_name) + + order_by_version_desc (id.version) + + "LIMIT 1")); + + t.commit (); + + if (lp.package->version != p.version) + return; + } + } + + string subj (what + ' ' + + to_string (*b.status) + ": " + + b.package_name.string () + '/' + + b.package_version.string () + ' ' + + b.target_config_name + '/' + + b.target.string () + ' ' + + b.package_config_name + ' ' + + b.toolchain_name + '-' + b.toolchain_version.string ()); + + // Send notification emails to the interested parties. + // + auto send_email = [&b, &subj, &o, &error, trace] (const string& to) + { + try + { + if (trace != nullptr) + *trace << "email '" << subj << "' to " << to; + + // Redirect the diagnostics to webserver error log. + // + sendmail sm ([trace] (const char* args[], size_t n) + { + if (trace != nullptr) + *trace << process_args {args, n}; + }, + 2, + o.email (), + subj, + {to}); + + if (b.results.empty ()) + { + sm.out << "No operation results available." << endl; + } + else + { + const string& host (o.host ()); + const dir_path& root (o.root ()); + + ostream& os (sm.out); + + os << "combined: " << *b.status << endl << endl + << " " << build_log_url (host, root, b) << endl << endl; + + for (const auto& r: b.results) + os << r.operation << ": " << r.status << endl << endl + << " " << build_log_url (host, root, b, &r.operation) + << endl << endl; + + os << "Force rebuild (enter the reason, use '+' instead of spaces):" + << endl << endl + << " " << build_force_url (host, root, b) << endl; + } + + sm.out.close (); + + if (!sm.wait ()) + error << "sendmail " << *sm.exit; + } + // Handle process_error and io_error (both derive from system_error). + // + catch (const system_error& e) + { + error << "sendmail error: " << e; + } + }; + + // Send the build notification email if a non-empty package build email is + // specified. + // + if (const optional& e = pc.effective_email (p.build_email)) + { + if (!e->empty ()) + send_email (*e); + } + + // Send the build warning/error notification emails, if requested. + // + if (*b.status >= result_status::warning) + { + if (const optional& e = + pc.effective_warning_email (p.build_warning_email)) + send_email (*e); + } + + if (*b.status >= result_status::error) + { + if (const optional& e = + pc.effective_error_email (p.build_error_email)) + send_email (*e); + } + } } diff --git a/mod/build.hxx b/mod/build.hxx index f0846be..07e4411 100644 --- a/mod/build.hxx +++ b/mod/build.hxx @@ -4,10 +4,16 @@ #ifndef MOD_BUILD_HXX #define MOD_BUILD_HXX +#include // odb::core::connection_ptr + #include #include #include +#include + +#include +#include // Various package build-related utilities. // @@ -25,6 +31,19 @@ namespace brep // string build_force_url (const string& host, const dir_path& root, const build&); + + // Send the notification email for the specified package configuration + // build. The build is expected to be in the built state. + // + void + send_notification_email (const options::build_email_notification&, + const odb::core::connection_ptr&, + const build&, + const build_package&, + const build_package_config&, + const string& what, // build, rebuild, etc. + const basic_mark& error, + const basic_mark* trace); } #endif // MOD_BUILD_HXX diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx index 74d502a..9282544 100644 --- a/mod/mod-build-configs.cxx +++ b/mod/mod-build-configs.cxx @@ -37,6 +37,9 @@ init (scanner& s) if (options_->build_config_specified ()) build_config_module::init (*options_); + + if (options_->root ().empty ()) + options_->root (dir_path ("/")); } bool brep::build_configs:: diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx index fae506b..c8e803b 100644 --- a/mod/mod-build-log.cxx +++ b/mod/mod-build-log.cxx @@ -229,9 +229,14 @@ handle (request& rq, response& rs) b = move (pb.build); if (b->state != build_state::built) + { config_expired ("state is " + to_string (b->state)); + } else + { build_db_->load (*b, b->results_section); + build_db_->load (*b, b->auxiliary_machines_section); + } t.commit (); } @@ -250,14 +255,20 @@ handle (request& rq, response& rs) if (!b->tenant.empty ()) os << options_->tenant_name () << ": " << b->tenant << endl << endl; - os << "package: " << b->package_name << endl - << "version: " << b->package_version << endl - << "toolchain: " << b->toolchain_name << '-' << b->toolchain_version << endl - << "target: " << b->target << endl - << "tgt config: " << b->target_config_name << endl - << "pkg config: " << b->package_config_name << endl - << "machine: " << b->machine << " (" << b->machine_summary << ")" << endl - << "timestamp: "; + os << "package: " << b->package_name << endl + << "version: " << b->package_version << endl + << "toolchain: " << b->toolchain_name << '-' + << b->toolchain_version << endl + << "target: " << b->target << endl + << "target config: " << b->target_config_name << endl + << "package config: " << b->package_config_name << endl + << "build machine: " << b->machine.name << " -- " + << b->machine.summary << endl; + + for (const build_machine& m: b->auxiliary_machines) + os << "auxiliary machine: " << m.name << " -- " << m.summary << endl; + + os << "timestamp: "; butl::to_stream (os, b->timestamp, diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx index 22613eb..77018d9 100644 --- a/mod/mod-build-result.cxx +++ b/mod/mod-build-result.cxx @@ -6,12 +6,8 @@ #include #include -#include -#include -#include #include #include -#include #include @@ -19,10 +15,10 @@ #include #include -#include -#include +#include +#include -#include // *_url() +#include // send_notification_email() #include #include @@ -171,11 +167,6 @@ handle (request& rq, response&) tc = i->second; } - auto print_args = [&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }; - // Load and update the package build configuration (if present). // // NULL if the package build doesn't exist or is not updated for any reason @@ -189,15 +180,11 @@ handle (request& rq, response&) // Not NULL if bld is not NULL. // shared_ptr pkg; - const build_package_config* cfg (nullptr); + build_package_config* cfg (nullptr); - // True if the built package version is the latest buildable version of this - // package in the tenant. + // Don't send email to the build-email address for the success-to-success + // status change, unless the build was forced. // - // Note: is only meaningful if bld is not NULL. - // - bool latest_version (false); - bool build_notify (false); bool unforced (true); @@ -314,6 +301,12 @@ handle (request& rq, response&) // greater then its soft_timestamp as an indication that the build // object represents the interrupted rebuild (see the build_task // handler for details). + // + // @@ Actually, we also unable to restore the pre-rebuild machine + // and auxiliary machines, which are also displayed in the build + // log and may potentially be confusing. Should we drop them from + // the log in this case or replace with the "machine: unknown" + // record? build_db_->update (b); } @@ -414,9 +407,6 @@ handle (request& rq, response&) unforced = (b->force == force_state::unforced); - // Don't send email to the build-email address for the - // success-to-success status change, unless the build was forced. - // build_notify = !(rs == result_status::success && b->status && *b->status == rs && @@ -477,27 +467,7 @@ handle (request& rq, response&) build_db_->load (*pkg, pkg->constraints_section); if (!exclude (*cfg, pkg->builds, pkg->constraints, *tc)) - { bld = b; - - // While at it, check if the built package version is the latest - // buildable version of this package. - // - // Ideally we would like to avoid this query unless necessary - // (mode is latest and package manifest has build-*-email - // values), but that will make things quite hairy so let's - // keep it simple for now. - // - const auto& id (query::build_package::id); - - buildable_package p ( - build_db_->query_value ( - (id.tenant == b->tenant && id.name == b->package_name) + - order_by_version_desc (id.version) + - "LIMIT 1")); - - latest_version = (p.package->version == b->package_version); - } } } else @@ -552,115 +522,22 @@ handle (request& rq, response&) update_tenant_service_state (conn, b.tenant, f); } - if (bld == nullptr) - return true; - - // Bail out if sending build notification emails is disabled for this - // toolchain for this package. - // + if (bld != nullptr) { - const map& tes (options_->build_toolchain_email ()); - auto i (tes.find (bld->id.toolchain_name)); - build_email mode (i != tes.end () ? i->second : build_email::latest); - - if (mode == build_email::none || - (mode == build_email::latest && !latest_version)) - return true; - } - - string subj ((unforced ? "build " : "rebuild ") + - to_string (*bld->status) + ": " + - bld->package_name.string () + '/' + - bld->package_version.string () + ' ' + - bld->target_config_name + '/' + - bld->target.string () + ' ' + - bld->package_config_name + ' ' + - bld->toolchain_name + '-' + bld->toolchain_version.string ()); - - // Send notification emails to the interested parties. - // - auto send_email = [&bld, &subj, &error, &trace, &print_args, this] - (const string& to) - { - try - { - l2 ([&]{trace << "email '" << subj << "' to " << to;}); - - // Redirect the diagnostics to webserver error log. - // - // Note: if using this somewhere else, then need to factor out all this - // exit status handling code. - // - sendmail sm (print_args, - 2, - options_->email (), - subj, - {to}); - - if (bld->results.empty ()) - sm.out << "No operation results available." << endl; - else - { - const string& host (options_->host ()); - const dir_path& root (options_->root ()); - - ostream& os (sm.out); - - assert (bld->status); - os << "combined: " << *bld->status << endl << endl - << " " << build_log_url (host, root, *bld) << endl << endl; - - for (const auto& r: bld->results) - os << r.operation << ": " << r.status << endl << endl - << " " << build_log_url (host, root, *bld, &r.operation) - << endl << endl; - - os << "Force rebuild (enter the reason, use '+' instead of spaces):" - << endl << endl - << " " << build_force_url (host, root, *bld) << endl; - } - - sm.out.close (); - - if (!sm.wait ()) - error << "sendmail " << *sm.exit; - } - // Handle process_error and io_error (both derive from system_error). + // Don't sent the notification email for success-to-success status change, + // etc. // - catch (const system_error& e) - { - error << "sendmail error: " << e; - } - }; - - // Send the build notification email if a non-empty package build email is - // specified. - // - if (build_notify) - { - if (const optional& e = cfg->effective_email (pkg->build_email)) - { - if (!e->empty ()) - send_email (*pkg->build_email); - } - } - - assert (bld->status); - - // Send the build warning/error notification emails, if requested. - // - if (*bld->status >= result_status::warning) - { - if (const optional& e = - cfg->effective_warning_email (pkg->build_warning_email)) - send_email (*e); - } - - if (*bld->status >= result_status::error) - { - if (const optional& e = - cfg->effective_error_email (pkg->build_error_email)) - send_email (*e); + if (!build_notify) + (cfg->email ? cfg->email : pkg->build_email) = email (); + + send_notification_email (*options_, + conn, + *bld, + *pkg, + *cfg, + unforced ? "build" : "rebuild", + error, + verb_ >= 2 ? &trace : nullptr); } return true; diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx index e04688f..b0b74b1 100644 --- a/mod/mod-build-task.cxx +++ b/mod/mod-build-task.cxx @@ -32,9 +32,9 @@ #include #include -#include - +#include // send_notification_email() #include +#include using namespace std; using namespace butl; @@ -52,10 +52,12 @@ rand (size_t min_val, size_t max_val) // Note that size_t is not whitelisted as a type the // uniform_int_distribution class template can be instantiated with. // - return static_cast ( - uniform_int_distribution ( - static_cast (min_val), - static_cast (max_val)) (rand_gen)); + return min_val == max_val + ? min_val + : static_cast ( + uniform_int_distribution ( + static_cast (min_val), + static_cast (max_val)) (rand_gen)); } brep::build_task:: @@ -227,9 +229,16 @@ handle (request& rq, response& rs) agent_fp = move (tqm.fingerprint); } - task_response_manifest tsm; + // The resulting task manifest and the related build, package, and + // configuration objects. Note that the latter 3 are only meaningful if the + // session in the task manifest is not empty. + // + task_response_manifest task_response; + shared_ptr task_build; + shared_ptr task_package; + const build_package_config* task_config; - auto serialize_task_response_manifest = [&tsm, &rs] () + auto serialize_task_response_manifest = [&task_response, &rs] () { // @@ Probably it would be a good idea to also send some cache control // headers to avoid caching by HTTP proxies. That would require @@ -238,7 +247,7 @@ handle (request& rq, response& rs) manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"), "task_response_manifest"); - tsm.serialize (s); + task_response.serialize (s); }; interactive_mode imode (tqm.effective_interactive_mode ()); @@ -278,12 +287,13 @@ handle (request& rq, response& rs) for (const build_target_config& c: *target_conf_) { - for (auto& m: tqm.machines) + for (machine_header_manifest& m: tqm.machines) { - // The same story as in exclude() from build-config.cxx. - // + if (m.effective_role () == machine_role::build) try { + // The same story as in exclude() from build-target-config.cxx. + // if (path_match (dash_components_to_path (m.name), dash_components_to_path (c.machine_pattern), dir_path () /* start */, @@ -298,6 +308,36 @@ handle (request& rq, response& rs) } } + // Collect the auxiliary configurations/machines available for the build. + // + struct auxiliary_config_machine + { + string config; + const machine_header_manifest* machine; + }; + + vector auxiliary_config_machines; + + for (const machine_header_manifest& m: tqm.machines) + { + if (m.effective_role () == machine_role::auxiliary) + { + // Derive the auxiliary configuration name by stripping the first + // (architecture) component from the machine name. + // + size_t p (m.name.find ('-')); + + if (p == string::npos || p == 0 || p == m.name.size () - 1) + throw invalid_request (400, + (string ("no ") + + (p == 0 ? "architecture" : "OS") + + " component in machine name '" + m.name + "'")); + + auxiliary_config_machines.push_back ( + auxiliary_config_machine {string (m.name, p + 1), &m}); + } + } + // Go through package build configurations until we find one that has no // build target configuration present in the database, or is in the building // state but expired (collectively called unbuilt). If such a target @@ -319,8 +359,10 @@ handle (request& rq, response& rs) // transaction. // auto task = [this] (const build& b, - build_package&& p, - build_package_config&& pc, + const build_package& p, + const build_package_config& pc, + small_vector&& tests, + vector&& ams, optional&& interactive, const config_machine& cm) -> task_response_manifest { @@ -351,81 +393,27 @@ handle (request& rq, response& rs) if (r->certificate_fingerprint) fps.emplace_back (move (*r->certificate_fingerprint)); - // Exclude external test packages which exclude the task build - // configuration. - // - small_vector tests; - - build_db_->load (p, p.requirements_tests_section); - - for (const build_test_dependency& td: p.tests) - { - // Don't exclude unresolved external tests. - // - // Note that this may result in the build task failure. However, - // silently excluding such tests could end up with missed software - // bugs which feels much worse. - // - if (td.package != nullptr) - { - shared_ptr p (td.package.load ()); - - // Use the default test package configuration. - // - // Note that potentially the test package default configuration may - // contain some (bpkg) arguments associated, but we currently don't - // provide build bot worker with such information. This, however, is - // probably too far fetched so let's keep it simple for now. - // - const build_package_config* pc (find ("default", p->configs)); - assert (pc != nullptr); // Must always be present. - - // Use the `all` class as a least restrictive default underlying - // build class set. Note that we should only apply the explicit - // build restrictions to the external test packages (think about - // the `builds: all` and `builds: -windows` manifest values for - // the primary and external test packages, respectively). - // - build_db_->load (*p, p->constraints_section); - - if (exclude (*pc, - p->builds, - p->constraints, - *cm.config, - nullptr /* reason */, - true /* default_all_ucs */)) - continue; - } - - tests.emplace_back (move (td.name), - td.type, - td.buildtime, - move (td.constraint), - move (td.enable), - move (td.reflect)); - } - - package_name& pn (p.id.name); + const package_name& pn (p.id.name); bool module_pkg (pn.string ().compare (0, 10, "libbuild2-") == 0); // Note that the auxiliary environment is crafted by the bbot agent // after the auxiliary machines are booted. // - task_manifest task (move (pn), - move (p.version), + task_manifest task (pn, + p.version, move (r->location), move (fps), - move (p.requirements), + p.requirements, move (tests), b.dependency_checksum, cm.machine->name, - {} /* auxiliary_machines */, // @@ TMP AUXILIARY + move (ams), cm.config->target, cm.config->environment, nullopt /* auxiliary_environment */, cm.config->args, - move (pc.arguments), + pc.arguments, belongs (*cm.config, module_pkg ? "build2" : "host"), cm.config->warning_regexes, move (interactive), @@ -836,9 +824,9 @@ handle (request& rq, response& rs) // If there are any non-archived interactive build tenants, then the // chosen randomization approach doesn't really work since interactive // tenants must be preferred over non-interactive ones, which is - // achieved by proper ordering of the package query result (see - // below). Thus, we just disable randomization if there are any - // interactive tenants. + // achieved by proper ordering of the package query result (see below). + // Thus, we just disable randomization if there are any interactive + // tenants. // // But shouldn't we randomize the order between packages in multiple // interactive tenants? Given that such a tenant may only contain a @@ -992,6 +980,12 @@ handle (request& rq, response& rs) // Return the machine id as a machine checksum. // + // Note that we don't include auxiliary machine ids into this checksum + // since a different machine will most likely get picked for a pattern. + // And we view all auxiliary machines that match a pattern as equal for + // testing purposes (in other words, pattern is not the way to get + // coverage). + // auto machine_checksum = [] (const machine_header_manifest& m) { return m.id; @@ -1102,7 +1096,232 @@ handle (request& rq, response& rs) return r; }; - for (bool done (false); tsm.session.empty () && !done; ) + // Collect the auxiliary machines required for testing of the specified + // package configuration and the external test packages, if present for + // the specified target configuration (task_auxiliary_machines), + // together with the auxiliary machines information that needs to be + // persisted in the database as a part of the build object + // (build_auxiliary_machines, which is parallel to + // task_auxiliary_machines). While at it collect the involved test + // dependencies. Return nullopt if any auxiliary configuration patterns + // may not be resolved to the auxiliary machines (no matching + // configuration, auxiliary machines RAM limit is exceeded, etc). + // + // Note that if the same auxiliary environment name is used for multiple + // packages (for example, for the main and tests packages or for the + // tests and examples packages, etc), then a shared auxiliary machine is + // used for all these packages. In this case all the respective + // configuration patterns must match the configuration derived from this + // machine name. If they don't, then return nullopt. The thinking here + // is that on the next task request a machine whose derived + // configuration matches all the patterns can potentially be picked. + // + struct collect_auxiliaries_result + { + vector task_auxiliary_machines; + vector build_auxiliary_machines; + small_vector tests; + }; + + auto collect_auxiliaries = [&tqm, &auxiliary_config_machines, this] + (const shared_ptr& p, + const build_package_config& pc, + const build_target_config& tc) + -> optional + { + // The list of the picked build auxiliary machines together with the + // environment names they have been picked for. + // + vector> picked_machines; + + // Try to randomly pick the auxiliary machine that matches the + // specified pattern and which can be supplied with the minimum + // required RAM, if specified. Return false if such a machine is not + // available. If a machine is already picked for the specified + // environment name, then return true if the machine's configuration + // matches the specified pattern and false otherwise. + // + auto pick_machine = + [&tqm, + &picked_machines, + used_ram = uint64_t (0), + available_machines = auxiliary_config_machines] + (const build_auxiliary& ba) mutable -> bool + { + vector ams; // Indexes of the available matching machines. + optional ar (tqm.auxiliary_ram); + + // If the machine configuration name pattern (which is legal) or any + // of the machine configuration names (illegal) are invalid paths, + // then we assume we cannot pick the machine. + // + try + { + // The same story as in exclude() from build-target-config.cxx. + // + auto match = [pattern = dash_components_to_path (ba.config)] + (const string& config) + { + return path_match (dash_components_to_path (config), + pattern, + dir_path () /* start */, + path_match_flags::match_absent); + }; + + // Check if a machine is already picked for the specified + // environment name. + // + for (const auto& m: picked_machines) + { + if (m.second == ba.environment_name) + return match (m.first.config); + } + + // Collect the matching machines from the list of the available + // machines and bail out if there are none. + // + for (size_t i (0); i != available_machines.size (); ++i) + { + const auxiliary_config_machine& m (available_machines[i]); + optional mr (m.machine->ram_minimum); + + if (match (m.config) && (!mr || !ar || used_ram + *mr <= *ar)) + ams.push_back (i); + } + + if (ams.empty ()) + return false; + } + catch (const invalid_path&) + { + return false; + } + + // Pick the matching machine randomly. + // + size_t i (ams[rand (0, ams.size () - 1)]); + auxiliary_config_machine& cm (available_machines[i]); + + // Bump the used RAM. + // + if (optional r = cm.machine->ram_minimum) + used_ram += *r; + + // Move out the picked machine from the available machines list. + // + picked_machines.emplace_back (move (cm), ba.environment_name); + available_machines.erase (available_machines.begin () + i); + return true; + }; + + // Collect auxiliary machines for the main package build configuration. + // + for (const build_auxiliary& ba: + pc.effective_auxiliaries (p->auxiliaries)) + { + if (!pick_machine (ba)) + return nullopt; // No matched auxiliary machine. + } + + // Collect the test packages and the auxiliary machines for their + // default build configurations. Exclude external test packages which + // exclude the task build configuration. + // + small_vector tests; + + build_db_->load (*p, p->requirements_tests_section); + + for (const build_test_dependency& td: p->tests) + { + // Don't exclude unresolved external tests. + // + // Note that this may result in the build task failure. However, + // silently excluding such tests could end up with missed software + // bugs which feels much worse. + // + if (td.package != nullptr) + { + shared_ptr tp (td.package.load ()); + + // Use the default test package configuration. + // + // Note that potentially the test package default configuration + // may contain some (bpkg) arguments associated, but we currently + // don't provide build bot worker with such information. This, + // however, is probably too far fetched so let's keep it simple + // for now. + // + const build_package_config* tpc (find ("default", tp->configs)); + assert (tpc != nullptr); // Must always be present. + + // Use the `all` class as a least restrictive default underlying + // build class set. Note that we should only apply the explicit + // build restrictions to the external test packages (think about + // the `builds: all` and `builds: -windows` manifest values for + // the primary and external test packages, respectively). + // + build_db_->load (*tp, tp->constraints_section); + + if (exclude (*tpc, + tp->builds, + tp->constraints, + tc, + nullptr /* reason */, + true /* default_all_ucs */)) + continue; + + for (const build_auxiliary& ba: + tpc->effective_auxiliaries (tp->auxiliaries)) + { + if (!pick_machine (ba)) + return nullopt; // No matched auxiliary machine. + } + } + + tests.emplace_back (td.name, + td.type, + td.buildtime, + td.constraint, + td.enable, + td.reflect); + } + + vector tms; + vector bms; + + tms.reserve (picked_machines.size ()); + bms.reserve (picked_machines.size ()); + + for (pair& pm: picked_machines) + { + const machine_header_manifest& m (*pm.first.machine); + tms.push_back (auxiliary_machine {m.name, move (pm.second)}); + bms.push_back (build_machine {m.name, m.summary}); + } + + return collect_auxiliaries_result { + move (tms), move (bms), move (tests)}; + }; + + // While at it, collect the aborted for various reasons builds + // (interactive builds in multiple configurations, builds with too many + // auxiliary machines, etc) to send the notification emails at the end + // of the request handling. + // + struct aborted_build + { + shared_ptr b; + shared_ptr p; + const build_package_config* pc; + const char* what; + }; + vector aborted_builds; + + // Note: is only used for crafting of the notification email subjects. + // + bool unforced (true); + + for (bool done (false); task_response.session.empty () && !done; ) { transaction t (conn->begin ()); @@ -1248,9 +1467,9 @@ handle (request& rq, response& rs) // struct build_config { - package_id pid; - string pc; - const build_target_config* tc; + shared_ptr p; + const build_package_config* pc; + const build_target_config* tc; }; small_vector build_configs; @@ -1272,8 +1491,7 @@ handle (request& rq, response& rs) for (const auto& tc: *target_conf_) { if (!exclude (pc, p->builds, p->constraints, tc)) - build_configs.push_back ( - build_config {p->id, pc.name, &tc}); + build_configs.push_back (build_config {p, &pc, &tc}); } } } @@ -1287,10 +1505,11 @@ handle (request& rq, response& rs) // for (build_config& c: build_configs) { - const string& pc (c.pc); + shared_ptr& p (c.p); + const string& pc (c.pc->name); const build_target_config& tc (*c.tc); - build_id bid (c.pid, + build_id bid (p->id, tc.target, tc.name, pc, @@ -1305,40 +1524,30 @@ handle (request& rq, response& rs) if (b == nullptr) { - b = make_shared ( - move (bid.package.tenant), - move (bid.package.name), - p->version, - move (bid.target), - move (bid.target_config_name), - move (bid.package_config_name), - move (bid.toolchain_name), - toolchain_version, - nullopt /* interactive */, - nullopt /* agent_fp */, - nullopt /* agent_challenge */, - "brep" /* machine */, - "build task module" /* machine_summary */, - "" /* controller_checksum */, - "" /* machine_checksum */); - - b->state = build_state::built; - b->status = result_status::abort; - - b->soft_timestamp = b->timestamp; - b->hard_timestamp = b->soft_timestamp; - - // Mark the section as loaded, so results are updated. - // - b->results_section.load (); - - b->results.push_back ( - operation_result { - "configure", - result_status::abort, - "error: multiple configurations for interactive build\n"}); + b = make_shared (move (bid.package.tenant), + move (bid.package.name), + p->version, + move (bid.target), + move (bid.target_config_name), + move (bid.package_config_name), + move (bid.toolchain_name), + toolchain_version, + result_status::abort, + operation_results ({ + operation_result { + "configure", + result_status::abort, + "error: multiple configurations " + "for interactive build\n"}}), + build_machine { + "brep", "build task module"}); build_db_->persist (b); + + // Schedule the build notification email. + // + aborted_builds.push_back (aborted_build { + move (b), move (p), c.pc, "build"}); } } @@ -1351,7 +1560,7 @@ handle (request& rq, response& rs) } } - for (build_package_config& pc: p->configs) + for (const build_package_config& pc: p->configs) { pkg_config = pc.name; @@ -1389,17 +1598,23 @@ handle (request& rq, response& rs) if (!configs.empty ()) { // Find the first build configuration that is not excluded by - // the package configuration. + // the package configuration and for which all the requested + // auxiliary machines can be provided. // auto i (configs.begin ()); auto e (configs.end ()); build_db_->load (*p, p->constraints_section); - for (; - i != e && - exclude (pc, p->builds, p->constraints, *i->second.config); - ++i) ; + optional aux; + for (; i != e; ++i) + { + const build_target_config& tc (*i->second.config); + + if (!exclude (pc, p->builds, p->constraints, tc) && + (aux = collect_auxiliaries (p, pc, tc))) + break; + } if (i != e) { @@ -1440,8 +1655,9 @@ handle (request& rq, response& rs) move (login), move (agent_fp), move (cl), - mh.name, - move (mh.summary), + build_machine { + mh.name, move (mh.summary)}, + move (aux->build_auxiliary_machines), controller_checksum (*cm.config), machine_checksum (*cm.machine)); @@ -1467,6 +1683,8 @@ handle (request& rq, response& rs) b->state = build_state::building; b->interactive = move (login); + unforced = (b->force == force_state::unforced); + // Switch the force state not to reissue the task after the // forced rebuild timeout. Note that the result handler will // still recognize that the rebuild was forced. @@ -1479,8 +1697,15 @@ handle (request& rq, response& rs) b->agent_fingerprint = move (agent_fp); b->agent_challenge = move (cl); - b->machine = mh.name; - b->machine_summary = move (mh.summary); + b->machine = build_machine {mh.name, move (mh.summary)}; + + // Mark the section as loaded, so auxiliary_machines are + // updated. + // + b->auxiliary_machines_section.load (); + + b->auxiliary_machines = + move (aux->build_auxiliary_machines); string ccs (controller_checksum (*cm.config)); string mcs (machine_checksum (*cm.machine)); @@ -1552,8 +1777,17 @@ handle (request& rq, response& rs) } } - tsm = task ( - *b, move (*p), move (pc), move (bp.interactive), cm); + task_response = task (*b, + *p, + pc, + move (aux->tests), + move (aux->task_auxiliary_machines), + move (bp.interactive), + cm); + + task_build = move (b); + task_package = move (p); + task_config = &pc; break; // Bail out from the package configurations loop. } @@ -1563,7 +1797,7 @@ handle (request& rq, response& rs) // If the task response manifest is prepared, then bail out from the // package loop, commit the transaction and respond. // - if (!tsm.session.empty ()) + if (!task_response.session.empty ()) break; } @@ -1573,7 +1807,7 @@ handle (request& rq, response& rs) // If we don't have an unbuilt package, then let's see if we have a // build configuration to rebuild. // - if (tsm.session.empty () && !rebuilds.empty ()) + if (task_response.session.empty () && !rebuilds.empty ()) { // Sort the configuration rebuild list with the following sort // priority: @@ -1636,9 +1870,10 @@ handle (request& rq, response& rs) assert (i != conf_machines.end ()); const config_machine& cm (i->second); - // Rebuild the package if still present, is buildable, doesn't - // exclude the configuration, and matches the request's - // interactive mode. + // Rebuild the package configuration if still present, is + // buildable, doesn't exclude the target configuration, can be + // provided with all the requested auxiliary machines, and + // matches the request's interactive mode. // // Note that while change of the latter seems rather far fetched, // let's check it for good measure. @@ -1664,7 +1899,11 @@ handle (request& rq, response& rs) { build_db_->load (*p, p->constraints_section); - if (!exclude (*pc, p->builds, p->constraints, *cm.config)) + const build_target_config& tc (*cm.config); + + optional aux; + if (!exclude (*pc, p->builds, p->constraints, tc) && + (aux = collect_auxiliaries (p, *pc, tc))) { assert (b->status); @@ -1684,14 +1923,23 @@ handle (request& rq, response& rs) ? tqm.interactive_login : nullopt; + unforced = (b->force == force_state::unforced); + // Can't move from, as may need them on the next iteration. // b->agent_fingerprint = agent_fp; b->agent_challenge = cl; const machine_header_manifest& mh (*cm.machine); - b->machine = mh.name; - b->machine_summary = mh.summary; + b->machine = build_machine {mh.name, mh.summary}; + + // Mark the section as loaded, so auxiliary_machines are + // updated. + // + b->auxiliary_machines_section.load (); + + b->auxiliary_machines = + move (aux->build_auxiliary_machines); // Issue the hard rebuild if the timeout expired, rebuild is // forced, or the configuration or machine has changed. @@ -1752,8 +2000,17 @@ handle (request& rq, response& rs) } } - tsm = task ( - *b, move (*p), move (*pc), move (t->interactive), cm); + task_response = task (*b, + *p, + *pc, + move (aux->tests), + move (aux->task_auxiliary_machines), + move (t->interactive), + cm); + + task_build = move (b); + task_package = move (p); + task_config = pc; } } } @@ -1765,13 +2022,13 @@ handle (request& rq, response& rs) // Just try with the next rebuild. But first, reset the task // response manifest that we may have prepared. // - tsm = task_response_manifest (); + task_response = task_response_manifest (); } // If the task response manifest is prepared, then bail out from the // package configuration rebuilds loop and respond. // - if (!tsm.session.empty ()) + if (!task_response.session.empty ()) break; } } @@ -1842,6 +2099,131 @@ handle (request& rq, response& rs) if (auto f = tsb->build_building (ss, b)) update_tenant_service_state (conn, b.tenant, f); } + + // If the task response manifest is prepared, then check that the number + // of the build auxiliary machines is less than 10. If that's not the + // case, then turn the build into the built state with the abort status. + // + if (task_response.task->auxiliary_machines.size () > 9) + { + // Respond with the no-task manifest. + // + task_response = task_response_manifest (); + + // If the package tenant has a third-party service state associated + // with it, then check if the tenant_service_build_built callback is + // registered for the type of the associated service. If it is, then + // stash the state, the build object, and the callback pointer for the + // subsequent service `built` notification. + // + const tenant_service_build_built* tsb (nullptr); + optional>> tss; + { + transaction t (conn->begin ()); + + shared_ptr b (build_db_->find (task_build->id)); + + // For good measure, check that the build object is in the building + // state and has not been updated. + // + if (b->state == build_state::building && + b->timestamp == task_build->timestamp) + { + b->state = build_state::built; + b->status = result_status::abort; + b->force = force_state::unforced; + + // Cleanup the interactive build login information. + // + b->interactive = nullopt; + + // Cleanup the authentication data. + // + b->agent_fingerprint = nullopt; + b->agent_challenge = nullopt; + + b->timestamp = system_clock::now (); + b->soft_timestamp = b->timestamp; + b->hard_timestamp = b->soft_timestamp; + + // Mark the section as loaded, so results are updated. + // + b->results_section.load (); + + b->results = operation_results ({ + operation_result { + "configure", + result_status::abort, + "error: not more than 9 auxiliary machines are allowed"}}); + + b->agent_checksum = nullopt; + b->worker_checksum = nullopt; + b->dependency_checksum = nullopt; + + build_db_->update (b); + + // Schedule the `built` notification, if the + // tenant_service_build_built callback is registered for the + // tenant. + // + shared_ptr t ( + build_db_->load (b->tenant)); + + if (t->service) + { + auto i (tenant_service_map_.find (t->service->type)); + + if (i != tenant_service_map_.end ()) + { + tsb = dynamic_cast ( + i->second.get ()); + + // If required, stash the service notification information. + // + if (tsb != nullptr) + tss = make_pair (move (*t->service), b); + } + } + + // Schedule the build notification email. + // + aborted_builds.push_back ( + aborted_build {move (b), + move (task_package), + task_config, + unforced ? "build" : "rebuild"}); + } + + t.commit (); + } + + // If a third-party service needs to be notified about the built + // package, then call the tenant_service_build_built::build_built() + // callback function and update the service state, if requested. + // + if (tsb != nullptr) + { + assert (tss); // Wouldn't be here otherwise. + + const tenant_service& ss (tss->first); + const build& b (*tss->second); + + if (auto f = tsb->build_built (ss, b)) + update_tenant_service_state (conn, b.tenant, f); + } + } + + // Send notification emails for all the aborted builds. + // + for (const aborted_build& ab: aborted_builds) + send_notification_email (*options_, + conn, + *ab.b, + *ab.p, + *ab.pc, + ab.what, + error, + verb_ >= 2 ? &trace : nullptr); } } diff --git a/mod/mod-upload.cxx b/mod/mod-upload.cxx index 1474363..9f8b9de 100644 --- a/mod/mod-upload.cxx +++ b/mod/mod-upload.cxx @@ -82,9 +82,6 @@ init (scanner& s) build_result_module::init (*options_, *options_); } - - if (options_->root ().empty ()) - options_->root (dir_path ("/")); } bool brep::upload:: @@ -499,8 +496,8 @@ handle (request& rq, response& rs) s.next ("toolchain-version", sess.toolchain_version.string ()); s.next ("repository-name", rep->canonical_name); - s.next ("machine-name", bld->machine); - s.next ("machine-summary", bld->machine_summary); + s.next ("machine-name", bld->machine.name); + s.next ("machine-summary", bld->machine.summary); // Serialize the request parameters. // diff --git a/mod/module.cli b/mod/module.cli index ec86b7b..a107ffe 100644 --- a/mod/module.cli +++ b/mod/module.cli @@ -31,7 +31,7 @@ namespace brep } }; - class handler + class repository_url { string host { @@ -51,7 +51,29 @@ namespace brep Specify '\cb{/}' to use the web server root (\cb{http://example.org/})." } + }; + + class build_email_notification: repository_email, repository_url + { + std::map build-toolchain-email + { + "=", + "Enable or disable package build notification emails. The valid + values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is + specified for a toolchain name, then emails are sent according to the + \cb{build-*email} package manifest values when all versions of a + package are built with this toolchain. If \cb{latest} is specified, + then for this toolchain name the emails are only sent for the latest + version of a package. If \cb{none} is specified, then no emails are + sent for this toolchain name. By default the \cb{latest} mode is + assumed. Repeat this option to enable/disable emails for multiple + toolchains. See \l{bpkg#manifest-package Package Manifest} for + details on \cb{build-*email} values." + } + }; + class handler + { string tenant-name = "tenant" { "", @@ -486,7 +508,7 @@ namespace brep // Handler options. // - class packages: search, package_db, page, handler + class packages: search, package_db, page, repository_url, handler { string search-title = "Packages" { @@ -504,13 +526,18 @@ namespace brep } }; - class package_details: package, search, package_db, page, handler + class package_details: package, package_db, + search, + page, + repository_url, + handler { }; class package_version_details: package, package_db, build, build_db, page, + repository_url, handler { dir_path bindist-root @@ -538,11 +565,14 @@ namespace brep } }; - class repository_details: package_db, page, handler + class repository_details: package_db, page, repository_url, handler { }; - class build_task: build, build_db, build_upload, handler + class build_task: build, build_db, + build_upload, + build_email_notification, + handler { size_t build-task-request-max-size = 102400 { @@ -583,7 +613,9 @@ namespace brep } }; - class build_result: build, package_db, build_db, repository_email, handler + class build_result: build, build_db, + build_email_notification, + handler { size_t build-result-request-max-size = 10485760 { @@ -593,25 +625,9 @@ namespace brep face of recoverable failures (deadlock, loss of connection, etc). The default is 10M." } - - std::map build-toolchain-email - { - "=", - "Enable or disable package build notification emails. The valid - values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is - specified for a toolchain name, then emails are sent according to the - \cb{build-*email} package manifest values when all versions of a - package are built with this toolchain. If \cb{latest} is specified, - then for this toolchain name the emails are only sent for the latest - version of a package. If \cb{none} is specified, then no emails are - sent for this toolchain name. By default the \cb{latest} mode is - assumed. Repeat this option to enable/disable emails for multiple - toolchains. See \l{bpkg#manifest-package Package Manifest} for - details on \cb{build-*email} values." - } }; - class build_log: build, build_db, handler + class build_log: build, build_db, repository_url, handler { }; @@ -619,7 +635,7 @@ namespace brep { }; - class builds: build, build_db, page, handler + class builds: build, build_db, page, repository_url, handler { uint16_t build-page-entries = 20 { @@ -634,7 +650,7 @@ namespace brep } }; - class build_configs: build, page, handler + class build_configs: build, page, repository_url, handler { uint16_t build-config-page-entries = 20 { @@ -649,7 +665,7 @@ namespace brep } }; - class submit: page, repository_email, handler + class submit: page, repository_email, repository_url, handler { dir_path submit-data { @@ -784,7 +800,7 @@ namespace brep { }; - class ci: ci_start, page, handler + class ci: ci_start, page, repository_url, handler { // Classic CI-specific options. // @@ -810,7 +826,7 @@ namespace brep { }; - class repository_root: handler + class repository_root: repository_url, handler { string root-global-view = "packages" { diff --git a/monitor/monitor.cxx b/monitor/monitor.cxx index 27e8b84..2f49f81 100644 --- a/monitor/monitor.cxx +++ b/monitor/monitor.cxx @@ -832,6 +832,19 @@ namespace brep { for (const build_target_config& tc: configs) { + // Note that we also don't build a package configuration if we + // are unable to assign all the required auxiliary machines + // for the build (see mod/mod-build-task.cxx for details). + // That means that we will also report delays which happen due + // to such an inability, which can potentially be not only + // because of the infrastructural problem but also because of + // an error in the package manifest (build auxiliary + // configuration pattern doesn't match any machine + // configuration anymore, etc). It doesn't seem easy to + // distinguish here which type of problem causes a delay. + // Thus, for now let's wait and see if it ever becomes a + // problem. + // if (exclude (pc, p->builds, p->constraints, -- cgit v1.1