aboutsummaryrefslogtreecommitdiff
path: root/mod
diff options
context:
space:
mode:
Diffstat (limited to 'mod')
-rw-r--r--mod/build-result-module.cxx2
-rw-r--r--mod/build.cxx150
-rw-r--r--mod/build.hxx19
-rw-r--r--mod/mod-build-configs.cxx3
-rw-r--r--mod/mod-build-log.cxx27
-rw-r--r--mod/mod-build-result.cxx175
-rw-r--r--mod/mod-build-task.cxx664
-rw-r--r--mod/mod-upload.cxx7
-rw-r--r--mod/module.cli72
9 files changed, 788 insertions, 331 deletions
diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx
index 7823e3a..68fbe4c 100644
--- a/mod/build-result-module.cxx
+++ b/mod/build-result-module.cxx
@@ -4,7 +4,9 @@
#include <mod/build-result-module.hxx>
#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx>
#include <libbutl/process-io.hxx>
+#include <libbutl/semantic-version.hxx>
namespace brep
{
diff --git a/mod/build.cxx b/mod/build.cxx
index 4abd416..5c37acb 100644
--- a/mod/build.cxx
+++ b/mod/build.cxx
@@ -3,12 +3,22 @@
#include <mod/build.hxx>
+#include <odb/database.hxx>
+#include <odb/connection.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbutl/sendmail.hxx>
+#include <libbutl/process-io.hxx>
+
#include <web/server/mime-url-encoding.hxx>
+#include <libbrep/build-package-odb.hxx>
+
#include <mod/utility.hxx>
namespace brep
{
+ using namespace std;
using namespace web;
string
@@ -57,4 +67,144 @@ namespace brep
"&tv=" + b.toolchain_version.string () +
"&reason=";
}
+
+ void
+ send_notification_email (const options::build_email_notification& o,
+ const odb::core::connection_ptr& conn,
+ const build& b,
+ const build_package& p,
+ const build_package_config& pc,
+ const string& what,
+ const basic_mark& error,
+ const basic_mark* trace)
+ {
+ using namespace odb::core;
+ using namespace butl;
+
+ assert (b.state == build_state::built && b.status);
+
+ // Bail out if sending build notification emails is disabled for this
+ // toolchain for this package.
+ //
+ {
+ const map<string, build_email>& tes (o.build_toolchain_email ());
+ auto i (tes.find (b.id.toolchain_name));
+ build_email mode (i != tes.end () ? i->second : build_email::latest);
+
+ if (mode == build_email::none)
+ {
+ return;
+ }
+ else if (mode == build_email::latest)
+ {
+ transaction t (conn->begin ());
+ database& db (t.database ());
+
+ const auto& id (query<buildable_package>::build_package::id);
+
+ buildable_package lp (
+ db.query_value<buildable_package> (
+ (id.tenant == b.tenant && id.name == b.package_name) +
+ order_by_version_desc (id.version) +
+ "LIMIT 1"));
+
+ t.commit ();
+
+ if (lp.package->version != p.version)
+ return;
+ }
+ }
+
+ string subj (what + ' ' +
+ to_string (*b.status) + ": " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + ' ' +
+ b.target_config_name + '/' +
+ b.target.string () + ' ' +
+ b.package_config_name + ' ' +
+ b.toolchain_name + '-' + b.toolchain_version.string ());
+
+ // Send notification emails to the interested parties.
+ //
+ auto send_email = [&b, &subj, &o, &error, trace] (const string& to)
+ {
+ try
+ {
+ if (trace != nullptr)
+ *trace << "email '" << subj << "' to " << to;
+
+ // Redirect the diagnostics to webserver error log.
+ //
+ sendmail sm ([trace] (const char* args[], size_t n)
+ {
+ if (trace != nullptr)
+ *trace << process_args {args, n};
+ },
+ 2,
+ o.email (),
+ subj,
+ {to});
+
+ if (b.results.empty ())
+ {
+ sm.out << "No operation results available." << endl;
+ }
+ else
+ {
+ const string& host (o.host ());
+ const dir_path& root (o.root ());
+
+ ostream& os (sm.out);
+
+ os << "combined: " << *b.status << endl << endl
+ << " " << build_log_url (host, root, b) << endl << endl;
+
+ for (const auto& r: b.results)
+ os << r.operation << ": " << r.status << endl << endl
+ << " " << build_log_url (host, root, b, &r.operation)
+ << endl << endl;
+
+ os << "Force rebuild (enter the reason, use '+' instead of spaces):"
+ << endl << endl
+ << " " << build_force_url (host, root, b) << endl;
+ }
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+ };
+
+ // Send the build notification email if a non-empty package build email is
+ // specified.
+ //
+ if (const optional<email>& e = pc.effective_email (p.build_email))
+ {
+ if (!e->empty ())
+ send_email (*e);
+ }
+
+ // Send the build warning/error notification emails, if requested.
+ //
+ if (*b.status >= result_status::warning)
+ {
+ if (const optional<email>& e =
+ pc.effective_warning_email (p.build_warning_email))
+ send_email (*e);
+ }
+
+ if (*b.status >= result_status::error)
+ {
+ if (const optional<email>& e =
+ pc.effective_error_email (p.build_error_email))
+ send_email (*e);
+ }
+ }
}
diff --git a/mod/build.hxx b/mod/build.hxx
index f0846be..07e4411 100644
--- a/mod/build.hxx
+++ b/mod/build.hxx
@@ -4,10 +4,16 @@
#ifndef MOD_BUILD_HXX
#define MOD_BUILD_HXX
+#include <odb/forward.hxx> // odb::core::connection_ptr
+
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <libbrep/build.hxx>
+#include <libbrep/build-package.hxx>
+
+#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
// Various package build-related utilities.
//
@@ -25,6 +31,19 @@ namespace brep
//
string
build_force_url (const string& host, const dir_path& root, const build&);
+
+ // Send the notification email for the specified package configuration
+ // build. The build is expected to be in the built state.
+ //
+ void
+ send_notification_email (const options::build_email_notification&,
+ const odb::core::connection_ptr&,
+ const build&,
+ const build_package&,
+ const build_package_config&,
+ const string& what, // build, rebuild, etc.
+ const basic_mark& error,
+ const basic_mark* trace);
}
#endif // MOD_BUILD_HXX
diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx
index 74d502a..9282544 100644
--- a/mod/mod-build-configs.cxx
+++ b/mod/mod-build-configs.cxx
@@ -37,6 +37,9 @@ init (scanner& s)
if (options_->build_config_specified ())
build_config_module::init (*options_);
+
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
}
bool brep::build_configs::
diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx
index fae506b..c8e803b 100644
--- a/mod/mod-build-log.cxx
+++ b/mod/mod-build-log.cxx
@@ -229,9 +229,14 @@ handle (request& rq, response& rs)
b = move (pb.build);
if (b->state != build_state::built)
+ {
config_expired ("state is " + to_string (b->state));
+ }
else
+ {
build_db_->load (*b, b->results_section);
+ build_db_->load (*b, b->auxiliary_machines_section);
+ }
t.commit ();
}
@@ -250,14 +255,20 @@ handle (request& rq, response& rs)
if (!b->tenant.empty ())
os << options_->tenant_name () << ": " << b->tenant << endl << endl;
- os << "package: " << b->package_name << endl
- << "version: " << b->package_version << endl
- << "toolchain: " << b->toolchain_name << '-' << b->toolchain_version << endl
- << "target: " << b->target << endl
- << "tgt config: " << b->target_config_name << endl
- << "pkg config: " << b->package_config_name << endl
- << "machine: " << b->machine << " (" << b->machine_summary << ")" << endl
- << "timestamp: ";
+ os << "package: " << b->package_name << endl
+ << "version: " << b->package_version << endl
+ << "toolchain: " << b->toolchain_name << '-'
+ << b->toolchain_version << endl
+ << "target: " << b->target << endl
+ << "target config: " << b->target_config_name << endl
+ << "package config: " << b->package_config_name << endl
+ << "build machine: " << b->machine.name << " -- "
+ << b->machine.summary << endl;
+
+ for (const build_machine& m: b->auxiliary_machines)
+ os << "auxiliary machine: " << m.name << " -- " << m.summary << endl;
+
+ os << "timestamp: ";
butl::to_stream (os,
b->timestamp,
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index 22613eb..77018d9 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -6,12 +6,8 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/sendmail.hxx>
-#include <libbutl/fdstream.hxx>
-#include <libbutl/process-io.hxx>
#include <libbutl/manifest-parser.hxx>
#include <libbutl/manifest-serializer.hxx>
-#include <libbutl/semantic-version.hxx>
#include <libbbot/manifest.hxx>
@@ -19,10 +15,10 @@
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
-#include <libbrep/package.hxx>
-#include <libbrep/package-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
-#include <mod/build.hxx> // *_url()
+#include <mod/build.hxx> // send_notification_email()
#include <mod/module-options.hxx>
#include <mod/tenant-service.hxx>
@@ -171,11 +167,6 @@ handle (request& rq, response&)
tc = i->second;
}
- auto print_args = [&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- };
-
// Load and update the package build configuration (if present).
//
// NULL if the package build doesn't exist or is not updated for any reason
@@ -189,15 +180,11 @@ handle (request& rq, response&)
// Not NULL if bld is not NULL.
//
shared_ptr<build_package> pkg;
- const build_package_config* cfg (nullptr);
+ build_package_config* cfg (nullptr);
- // True if the built package version is the latest buildable version of this
- // package in the tenant.
+ // Don't send email to the build-email address for the success-to-success
+ // status change, unless the build was forced.
//
- // Note: is only meaningful if bld is not NULL.
- //
- bool latest_version (false);
-
bool build_notify (false);
bool unforced (true);
@@ -314,6 +301,12 @@ handle (request& rq, response&)
// greater then its soft_timestamp as an indication that the build
// object represents the interrupted rebuild (see the build_task
// handler for details).
+ //
+ // @@ Actually, we also unable to restore the pre-rebuild machine
+ // and auxiliary machines, which are also displayed in the build
+ // log and may potentially be confusing. Should we drop them from
+ // the log in this case or replace with the "machine: unknown"
+ // record?
build_db_->update (b);
}
@@ -414,9 +407,6 @@ handle (request& rq, response&)
unforced = (b->force == force_state::unforced);
- // Don't send email to the build-email address for the
- // success-to-success status change, unless the build was forced.
- //
build_notify = !(rs == result_status::success &&
b->status &&
*b->status == rs &&
@@ -477,27 +467,7 @@ handle (request& rq, response&)
build_db_->load (*pkg, pkg->constraints_section);
if (!exclude (*cfg, pkg->builds, pkg->constraints, *tc))
- {
bld = b;
-
- // While at it, check if the built package version is the latest
- // buildable version of this package.
- //
- // Ideally we would like to avoid this query unless necessary
- // (mode is latest and package manifest has build-*-email
- // values), but that will make things quite hairy so let's
- // keep it simple for now.
- //
- const auto& id (query<buildable_package>::build_package::id);
-
- buildable_package p (
- build_db_->query_value<buildable_package> (
- (id.tenant == b->tenant && id.name == b->package_name) +
- order_by_version_desc (id.version) +
- "LIMIT 1"));
-
- latest_version = (p.package->version == b->package_version);
- }
}
}
else
@@ -552,115 +522,22 @@ handle (request& rq, response&)
update_tenant_service_state (conn, b.tenant, f);
}
- if (bld == nullptr)
- return true;
-
- // Bail out if sending build notification emails is disabled for this
- // toolchain for this package.
- //
+ if (bld != nullptr)
{
- const map<string, build_email>& tes (options_->build_toolchain_email ());
- auto i (tes.find (bld->id.toolchain_name));
- build_email mode (i != tes.end () ? i->second : build_email::latest);
-
- if (mode == build_email::none ||
- (mode == build_email::latest && !latest_version))
- return true;
- }
-
- string subj ((unforced ? "build " : "rebuild ") +
- to_string (*bld->status) + ": " +
- bld->package_name.string () + '/' +
- bld->package_version.string () + ' ' +
- bld->target_config_name + '/' +
- bld->target.string () + ' ' +
- bld->package_config_name + ' ' +
- bld->toolchain_name + '-' + bld->toolchain_version.string ());
-
- // Send notification emails to the interested parties.
- //
- auto send_email = [&bld, &subj, &error, &trace, &print_args, this]
- (const string& to)
- {
- try
- {
- l2 ([&]{trace << "email '" << subj << "' to " << to;});
-
- // Redirect the diagnostics to webserver error log.
- //
- // Note: if using this somewhere else, then need to factor out all this
- // exit status handling code.
- //
- sendmail sm (print_args,
- 2,
- options_->email (),
- subj,
- {to});
-
- if (bld->results.empty ())
- sm.out << "No operation results available." << endl;
- else
- {
- const string& host (options_->host ());
- const dir_path& root (options_->root ());
-
- ostream& os (sm.out);
-
- assert (bld->status);
- os << "combined: " << *bld->status << endl << endl
- << " " << build_log_url (host, root, *bld) << endl << endl;
-
- for (const auto& r: bld->results)
- os << r.operation << ": " << r.status << endl << endl
- << " " << build_log_url (host, root, *bld, &r.operation)
- << endl << endl;
-
- os << "Force rebuild (enter the reason, use '+' instead of spaces):"
- << endl << endl
- << " " << build_force_url (host, root, *bld) << endl;
- }
-
- sm.out.close ();
-
- if (!sm.wait ())
- error << "sendmail " << *sm.exit;
- }
- // Handle process_error and io_error (both derive from system_error).
+ // Don't sent the notification email for success-to-success status change,
+ // etc.
//
- catch (const system_error& e)
- {
- error << "sendmail error: " << e;
- }
- };
-
- // Send the build notification email if a non-empty package build email is
- // specified.
- //
- if (build_notify)
- {
- if (const optional<email>& e = cfg->effective_email (pkg->build_email))
- {
- if (!e->empty ())
- send_email (*pkg->build_email);
- }
- }
-
- assert (bld->status);
-
- // Send the build warning/error notification emails, if requested.
- //
- if (*bld->status >= result_status::warning)
- {
- if (const optional<email>& e =
- cfg->effective_warning_email (pkg->build_warning_email))
- send_email (*e);
- }
-
- if (*bld->status >= result_status::error)
- {
- if (const optional<email>& e =
- cfg->effective_error_email (pkg->build_error_email))
- send_email (*e);
+ if (!build_notify)
+ (cfg->email ? cfg->email : pkg->build_email) = email ();
+
+ send_notification_email (*options_,
+ conn,
+ *bld,
+ *pkg,
+ *cfg,
+ unforced ? "build" : "rebuild",
+ error,
+ verb_ >= 2 ? &trace : nullptr);
}
return true;
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index e04688f..b0b74b1 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -32,9 +32,9 @@
#include <libbrep/build-package.hxx>
#include <libbrep/build-package-odb.hxx>
-#include <mod/build-target-config.hxx>
-
+#include <mod/build.hxx> // send_notification_email()
#include <mod/module-options.hxx>
+#include <mod/build-target-config.hxx>
using namespace std;
using namespace butl;
@@ -52,10 +52,12 @@ rand (size_t min_val, size_t max_val)
// Note that size_t is not whitelisted as a type the
// uniform_int_distribution class template can be instantiated with.
//
- return static_cast<size_t> (
- uniform_int_distribution<unsigned long long> (
- static_cast<unsigned long long> (min_val),
- static_cast<unsigned long long> (max_val)) (rand_gen));
+ return min_val == max_val
+ ? min_val
+ : static_cast<size_t> (
+ uniform_int_distribution<unsigned long long> (
+ static_cast<unsigned long long> (min_val),
+ static_cast<unsigned long long> (max_val)) (rand_gen));
}
brep::build_task::
@@ -227,9 +229,16 @@ handle (request& rq, response& rs)
agent_fp = move (tqm.fingerprint);
}
- task_response_manifest tsm;
+ // The resulting task manifest and the related build, package, and
+ // configuration objects. Note that the latter 3 are only meaningful if the
+ // session in the task manifest is not empty.
+ //
+ task_response_manifest task_response;
+ shared_ptr<build> task_build;
+ shared_ptr<build_package> task_package;
+ const build_package_config* task_config;
- auto serialize_task_response_manifest = [&tsm, &rs] ()
+ auto serialize_task_response_manifest = [&task_response, &rs] ()
{
// @@ Probably it would be a good idea to also send some cache control
// headers to avoid caching by HTTP proxies. That would require
@@ -238,7 +247,7 @@ handle (request& rq, response& rs)
manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"),
"task_response_manifest");
- tsm.serialize (s);
+ task_response.serialize (s);
};
interactive_mode imode (tqm.effective_interactive_mode ());
@@ -278,12 +287,13 @@ handle (request& rq, response& rs)
for (const build_target_config& c: *target_conf_)
{
- for (auto& m: tqm.machines)
+ for (machine_header_manifest& m: tqm.machines)
{
- // The same story as in exclude() from build-config.cxx.
- //
+ if (m.effective_role () == machine_role::build)
try
{
+ // The same story as in exclude() from build-target-config.cxx.
+ //
if (path_match (dash_components_to_path (m.name),
dash_components_to_path (c.machine_pattern),
dir_path () /* start */,
@@ -298,6 +308,36 @@ handle (request& rq, response& rs)
}
}
+ // Collect the auxiliary configurations/machines available for the build.
+ //
+ struct auxiliary_config_machine
+ {
+ string config;
+ const machine_header_manifest* machine;
+ };
+
+ vector<auxiliary_config_machine> auxiliary_config_machines;
+
+ for (const machine_header_manifest& m: tqm.machines)
+ {
+ if (m.effective_role () == machine_role::auxiliary)
+ {
+ // Derive the auxiliary configuration name by stripping the first
+ // (architecture) component from the machine name.
+ //
+ size_t p (m.name.find ('-'));
+
+ if (p == string::npos || p == 0 || p == m.name.size () - 1)
+ throw invalid_request (400,
+ (string ("no ") +
+ (p == 0 ? "architecture" : "OS") +
+ " component in machine name '" + m.name + "'"));
+
+ auxiliary_config_machines.push_back (
+ auxiliary_config_machine {string (m.name, p + 1), &m});
+ }
+ }
+
// Go through package build configurations until we find one that has no
// build target configuration present in the database, or is in the building
// state but expired (collectively called unbuilt). If such a target
@@ -319,8 +359,10 @@ handle (request& rq, response& rs)
// transaction.
//
auto task = [this] (const build& b,
- build_package&& p,
- build_package_config&& pc,
+ const build_package& p,
+ const build_package_config& pc,
+ small_vector<bpkg::test_dependency, 1>&& tests,
+ vector<auxiliary_machine>&& ams,
optional<string>&& interactive,
const config_machine& cm) -> task_response_manifest
{
@@ -351,81 +393,27 @@ handle (request& rq, response& rs)
if (r->certificate_fingerprint)
fps.emplace_back (move (*r->certificate_fingerprint));
- // Exclude external test packages which exclude the task build
- // configuration.
- //
- small_vector<bpkg::test_dependency, 1> tests;
-
- build_db_->load (p, p.requirements_tests_section);
-
- for (const build_test_dependency& td: p.tests)
- {
- // Don't exclude unresolved external tests.
- //
- // Note that this may result in the build task failure. However,
- // silently excluding such tests could end up with missed software
- // bugs which feels much worse.
- //
- if (td.package != nullptr)
- {
- shared_ptr<build_package> p (td.package.load ());
-
- // Use the default test package configuration.
- //
- // Note that potentially the test package default configuration may
- // contain some (bpkg) arguments associated, but we currently don't
- // provide build bot worker with such information. This, however, is
- // probably too far fetched so let's keep it simple for now.
- //
- const build_package_config* pc (find ("default", p->configs));
- assert (pc != nullptr); // Must always be present.
-
- // Use the `all` class as a least restrictive default underlying
- // build class set. Note that we should only apply the explicit
- // build restrictions to the external test packages (think about
- // the `builds: all` and `builds: -windows` manifest values for
- // the primary and external test packages, respectively).
- //
- build_db_->load (*p, p->constraints_section);
-
- if (exclude (*pc,
- p->builds,
- p->constraints,
- *cm.config,
- nullptr /* reason */,
- true /* default_all_ucs */))
- continue;
- }
-
- tests.emplace_back (move (td.name),
- td.type,
- td.buildtime,
- move (td.constraint),
- move (td.enable),
- move (td.reflect));
- }
-
- package_name& pn (p.id.name);
+ const package_name& pn (p.id.name);
bool module_pkg (pn.string ().compare (0, 10, "libbuild2-") == 0);
// Note that the auxiliary environment is crafted by the bbot agent
// after the auxiliary machines are booted.
//
- task_manifest task (move (pn),
- move (p.version),
+ task_manifest task (pn,
+ p.version,
move (r->location),
move (fps),
- move (p.requirements),
+ p.requirements,
move (tests),
b.dependency_checksum,
cm.machine->name,
- {} /* auxiliary_machines */, // @@ TMP AUXILIARY
+ move (ams),
cm.config->target,
cm.config->environment,
nullopt /* auxiliary_environment */,
cm.config->args,
- move (pc.arguments),
+ pc.arguments,
belongs (*cm.config, module_pkg ? "build2" : "host"),
cm.config->warning_regexes,
move (interactive),
@@ -836,9 +824,9 @@ handle (request& rq, response& rs)
// If there are any non-archived interactive build tenants, then the
// chosen randomization approach doesn't really work since interactive
// tenants must be preferred over non-interactive ones, which is
- // achieved by proper ordering of the package query result (see
- // below). Thus, we just disable randomization if there are any
- // interactive tenants.
+ // achieved by proper ordering of the package query result (see below).
+ // Thus, we just disable randomization if there are any interactive
+ // tenants.
//
// But shouldn't we randomize the order between packages in multiple
// interactive tenants? Given that such a tenant may only contain a
@@ -992,6 +980,12 @@ handle (request& rq, response& rs)
// Return the machine id as a machine checksum.
//
+ // Note that we don't include auxiliary machine ids into this checksum
+ // since a different machine will most likely get picked for a pattern.
+ // And we view all auxiliary machines that match a pattern as equal for
+ // testing purposes (in other words, pattern is not the way to get
+ // coverage).
+ //
auto machine_checksum = [] (const machine_header_manifest& m)
{
return m.id;
@@ -1102,7 +1096,232 @@ handle (request& rq, response& rs)
return r;
};
- for (bool done (false); tsm.session.empty () && !done; )
+ // Collect the auxiliary machines required for testing of the specified
+ // package configuration and the external test packages, if present for
+ // the specified target configuration (task_auxiliary_machines),
+ // together with the auxiliary machines information that needs to be
+ // persisted in the database as a part of the build object
+ // (build_auxiliary_machines, which is parallel to
+ // task_auxiliary_machines). While at it collect the involved test
+ // dependencies. Return nullopt if any auxiliary configuration patterns
+ // may not be resolved to the auxiliary machines (no matching
+ // configuration, auxiliary machines RAM limit is exceeded, etc).
+ //
+ // Note that if the same auxiliary environment name is used for multiple
+ // packages (for example, for the main and tests packages or for the
+ // tests and examples packages, etc), then a shared auxiliary machine is
+ // used for all these packages. In this case all the respective
+ // configuration patterns must match the configuration derived from this
+ // machine name. If they don't, then return nullopt. The thinking here
+ // is that on the next task request a machine whose derived
+ // configuration matches all the patterns can potentially be picked.
+ //
+ struct collect_auxiliaries_result
+ {
+ vector<auxiliary_machine> task_auxiliary_machines;
+ vector<build_machine> build_auxiliary_machines;
+ small_vector<bpkg::test_dependency, 1> tests;
+ };
+
+ auto collect_auxiliaries = [&tqm, &auxiliary_config_machines, this]
+ (const shared_ptr<build_package>& p,
+ const build_package_config& pc,
+ const build_target_config& tc)
+ -> optional<collect_auxiliaries_result>
+ {
+ // The list of the picked build auxiliary machines together with the
+ // environment names they have been picked for.
+ //
+ vector<pair<auxiliary_config_machine, string>> picked_machines;
+
+ // Try to randomly pick the auxiliary machine that matches the
+ // specified pattern and which can be supplied with the minimum
+ // required RAM, if specified. Return false if such a machine is not
+ // available. If a machine is already picked for the specified
+ // environment name, then return true if the machine's configuration
+ // matches the specified pattern and false otherwise.
+ //
+ auto pick_machine =
+ [&tqm,
+ &picked_machines,
+ used_ram = uint64_t (0),
+ available_machines = auxiliary_config_machines]
+ (const build_auxiliary& ba) mutable -> bool
+ {
+ vector<size_t> ams; // Indexes of the available matching machines.
+ optional<uint64_t> ar (tqm.auxiliary_ram);
+
+ // If the machine configuration name pattern (which is legal) or any
+ // of the machine configuration names (illegal) are invalid paths,
+ // then we assume we cannot pick the machine.
+ //
+ try
+ {
+ // The same story as in exclude() from build-target-config.cxx.
+ //
+ auto match = [pattern = dash_components_to_path (ba.config)]
+ (const string& config)
+ {
+ return path_match (dash_components_to_path (config),
+ pattern,
+ dir_path () /* start */,
+ path_match_flags::match_absent);
+ };
+
+ // Check if a machine is already picked for the specified
+ // environment name.
+ //
+ for (const auto& m: picked_machines)
+ {
+ if (m.second == ba.environment_name)
+ return match (m.first.config);
+ }
+
+ // Collect the matching machines from the list of the available
+ // machines and bail out if there are none.
+ //
+ for (size_t i (0); i != available_machines.size (); ++i)
+ {
+ const auxiliary_config_machine& m (available_machines[i]);
+ optional<uint64_t> mr (m.machine->ram_minimum);
+
+ if (match (m.config) && (!mr || !ar || used_ram + *mr <= *ar))
+ ams.push_back (i);
+ }
+
+ if (ams.empty ())
+ return false;
+ }
+ catch (const invalid_path&)
+ {
+ return false;
+ }
+
+ // Pick the matching machine randomly.
+ //
+ size_t i (ams[rand (0, ams.size () - 1)]);
+ auxiliary_config_machine& cm (available_machines[i]);
+
+ // Bump the used RAM.
+ //
+ if (optional<uint64_t> r = cm.machine->ram_minimum)
+ used_ram += *r;
+
+ // Move out the picked machine from the available machines list.
+ //
+ picked_machines.emplace_back (move (cm), ba.environment_name);
+ available_machines.erase (available_machines.begin () + i);
+ return true;
+ };
+
+ // Collect auxiliary machines for the main package build configuration.
+ //
+ for (const build_auxiliary& ba:
+ pc.effective_auxiliaries (p->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
+
+ // Collect the test packages and the auxiliary machines for their
+ // default build configurations. Exclude external test packages which
+ // exclude the task build configuration.
+ //
+ small_vector<bpkg::test_dependency, 1> tests;
+
+ build_db_->load (*p, p->requirements_tests_section);
+
+ for (const build_test_dependency& td: p->tests)
+ {
+ // Don't exclude unresolved external tests.
+ //
+ // Note that this may result in the build task failure. However,
+ // silently excluding such tests could end up with missed software
+ // bugs which feels much worse.
+ //
+ if (td.package != nullptr)
+ {
+ shared_ptr<build_package> tp (td.package.load ());
+
+ // Use the default test package configuration.
+ //
+ // Note that potentially the test package default configuration
+ // may contain some (bpkg) arguments associated, but we currently
+ // don't provide build bot worker with such information. This,
+ // however, is probably too far fetched so let's keep it simple
+ // for now.
+ //
+ const build_package_config* tpc (find ("default", tp->configs));
+ assert (tpc != nullptr); // Must always be present.
+
+ // Use the `all` class as a least restrictive default underlying
+ // build class set. Note that we should only apply the explicit
+ // build restrictions to the external test packages (think about
+ // the `builds: all` and `builds: -windows` manifest values for
+ // the primary and external test packages, respectively).
+ //
+ build_db_->load (*tp, tp->constraints_section);
+
+ if (exclude (*tpc,
+ tp->builds,
+ tp->constraints,
+ tc,
+ nullptr /* reason */,
+ true /* default_all_ucs */))
+ continue;
+
+ for (const build_auxiliary& ba:
+ tpc->effective_auxiliaries (tp->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
+ }
+
+ tests.emplace_back (td.name,
+ td.type,
+ td.buildtime,
+ td.constraint,
+ td.enable,
+ td.reflect);
+ }
+
+ vector<auxiliary_machine> tms;
+ vector<build_machine> bms;
+
+ tms.reserve (picked_machines.size ());
+ bms.reserve (picked_machines.size ());
+
+ for (pair<auxiliary_config_machine, string>& pm: picked_machines)
+ {
+ const machine_header_manifest& m (*pm.first.machine);
+ tms.push_back (auxiliary_machine {m.name, move (pm.second)});
+ bms.push_back (build_machine {m.name, m.summary});
+ }
+
+ return collect_auxiliaries_result {
+ move (tms), move (bms), move (tests)};
+ };
+
+ // While at it, collect the aborted for various reasons builds
+ // (interactive builds in multiple configurations, builds with too many
+ // auxiliary machines, etc) to send the notification emails at the end
+ // of the request handling.
+ //
+ struct aborted_build
+ {
+ shared_ptr<build> b;
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const char* what;
+ };
+ vector<aborted_build> aborted_builds;
+
+ // Note: is only used for crafting of the notification email subjects.
+ //
+ bool unforced (true);
+
+ for (bool done (false); task_response.session.empty () && !done; )
{
transaction t (conn->begin ());
@@ -1248,9 +1467,9 @@ handle (request& rq, response& rs)
//
struct build_config
{
- package_id pid;
- string pc;
- const build_target_config* tc;
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const build_target_config* tc;
};
small_vector<build_config, 1> build_configs;
@@ -1272,8 +1491,7 @@ handle (request& rq, response& rs)
for (const auto& tc: *target_conf_)
{
if (!exclude (pc, p->builds, p->constraints, tc))
- build_configs.push_back (
- build_config {p->id, pc.name, &tc});
+ build_configs.push_back (build_config {p, &pc, &tc});
}
}
}
@@ -1287,10 +1505,11 @@ handle (request& rq, response& rs)
//
for (build_config& c: build_configs)
{
- const string& pc (c.pc);
+ shared_ptr<build_package>& p (c.p);
+ const string& pc (c.pc->name);
const build_target_config& tc (*c.tc);
- build_id bid (c.pid,
+ build_id bid (p->id,
tc.target,
tc.name,
pc,
@@ -1305,40 +1524,30 @@ handle (request& rq, response& rs)
if (b == nullptr)
{
- b = make_shared<build> (
- move (bid.package.tenant),
- move (bid.package.name),
- p->version,
- move (bid.target),
- move (bid.target_config_name),
- move (bid.package_config_name),
- move (bid.toolchain_name),
- toolchain_version,
- nullopt /* interactive */,
- nullopt /* agent_fp */,
- nullopt /* agent_challenge */,
- "brep" /* machine */,
- "build task module" /* machine_summary */,
- "" /* controller_checksum */,
- "" /* machine_checksum */);
-
- b->state = build_state::built;
- b->status = result_status::abort;
-
- b->soft_timestamp = b->timestamp;
- b->hard_timestamp = b->soft_timestamp;
-
- // Mark the section as loaded, so results are updated.
- //
- b->results_section.load ();
-
- b->results.push_back (
- operation_result {
- "configure",
- result_status::abort,
- "error: multiple configurations for interactive build\n"});
+ b = make_shared<build> (move (bid.package.tenant),
+ move (bid.package.name),
+ p->version,
+ move (bid.target),
+ move (bid.target_config_name),
+ move (bid.package_config_name),
+ move (bid.toolchain_name),
+ toolchain_version,
+ result_status::abort,
+ operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: multiple configurations "
+ "for interactive build\n"}}),
+ build_machine {
+ "brep", "build task module"});
build_db_->persist (b);
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (aborted_build {
+ move (b), move (p), c.pc, "build"});
}
}
@@ -1351,7 +1560,7 @@ handle (request& rq, response& rs)
}
}
- for (build_package_config& pc: p->configs)
+ for (const build_package_config& pc: p->configs)
{
pkg_config = pc.name;
@@ -1389,17 +1598,23 @@ handle (request& rq, response& rs)
if (!configs.empty ())
{
// Find the first build configuration that is not excluded by
- // the package configuration.
+ // the package configuration and for which all the requested
+ // auxiliary machines can be provided.
//
auto i (configs.begin ());
auto e (configs.end ());
build_db_->load (*p, p->constraints_section);
- for (;
- i != e &&
- exclude (pc, p->builds, p->constraints, *i->second.config);
- ++i) ;
+ optional<collect_auxiliaries_result> aux;
+ for (; i != e; ++i)
+ {
+ const build_target_config& tc (*i->second.config);
+
+ if (!exclude (pc, p->builds, p->constraints, tc) &&
+ (aux = collect_auxiliaries (p, pc, tc)))
+ break;
+ }
if (i != e)
{
@@ -1440,8 +1655,9 @@ handle (request& rq, response& rs)
move (login),
move (agent_fp),
move (cl),
- mh.name,
- move (mh.summary),
+ build_machine {
+ mh.name, move (mh.summary)},
+ move (aux->build_auxiliary_machines),
controller_checksum (*cm.config),
machine_checksum (*cm.machine));
@@ -1467,6 +1683,8 @@ handle (request& rq, response& rs)
b->state = build_state::building;
b->interactive = move (login);
+ unforced = (b->force == force_state::unforced);
+
// Switch the force state not to reissue the task after the
// forced rebuild timeout. Note that the result handler will
// still recognize that the rebuild was forced.
@@ -1479,8 +1697,15 @@ handle (request& rq, response& rs)
b->agent_fingerprint = move (agent_fp);
b->agent_challenge = move (cl);
- b->machine = mh.name;
- b->machine_summary = move (mh.summary);
+ b->machine = build_machine {mh.name, move (mh.summary)};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
string ccs (controller_checksum (*cm.config));
string mcs (machine_checksum (*cm.machine));
@@ -1552,8 +1777,17 @@ handle (request& rq, response& rs)
}
}
- tsm = task (
- *b, move (*p), move (pc), move (bp.interactive), cm);
+ task_response = task (*b,
+ *p,
+ pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (bp.interactive),
+ cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = &pc;
break; // Bail out from the package configurations loop.
}
@@ -1563,7 +1797,7 @@ handle (request& rq, response& rs)
// If the task response manifest is prepared, then bail out from the
// package loop, commit the transaction and respond.
//
- if (!tsm.session.empty ())
+ if (!task_response.session.empty ())
break;
}
@@ -1573,7 +1807,7 @@ handle (request& rq, response& rs)
// If we don't have an unbuilt package, then let's see if we have a
// build configuration to rebuild.
//
- if (tsm.session.empty () && !rebuilds.empty ())
+ if (task_response.session.empty () && !rebuilds.empty ())
{
// Sort the configuration rebuild list with the following sort
// priority:
@@ -1636,9 +1870,10 @@ handle (request& rq, response& rs)
assert (i != conf_machines.end ());
const config_machine& cm (i->second);
- // Rebuild the package if still present, is buildable, doesn't
- // exclude the configuration, and matches the request's
- // interactive mode.
+ // Rebuild the package configuration if still present, is
+ // buildable, doesn't exclude the target configuration, can be
+ // provided with all the requested auxiliary machines, and
+ // matches the request's interactive mode.
//
// Note that while change of the latter seems rather far fetched,
// let's check it for good measure.
@@ -1664,7 +1899,11 @@ handle (request& rq, response& rs)
{
build_db_->load (*p, p->constraints_section);
- if (!exclude (*pc, p->builds, p->constraints, *cm.config))
+ const build_target_config& tc (*cm.config);
+
+ optional<collect_auxiliaries_result> aux;
+ if (!exclude (*pc, p->builds, p->constraints, tc) &&
+ (aux = collect_auxiliaries (p, *pc, tc)))
{
assert (b->status);
@@ -1684,14 +1923,23 @@ handle (request& rq, response& rs)
? tqm.interactive_login
: nullopt;
+ unforced = (b->force == force_state::unforced);
+
// Can't move from, as may need them on the next iteration.
//
b->agent_fingerprint = agent_fp;
b->agent_challenge = cl;
const machine_header_manifest& mh (*cm.machine);
- b->machine = mh.name;
- b->machine_summary = mh.summary;
+ b->machine = build_machine {mh.name, mh.summary};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
// Issue the hard rebuild if the timeout expired, rebuild is
// forced, or the configuration or machine has changed.
@@ -1752,8 +2000,17 @@ handle (request& rq, response& rs)
}
}
- tsm = task (
- *b, move (*p), move (*pc), move (t->interactive), cm);
+ task_response = task (*b,
+ *p,
+ *pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (t->interactive),
+ cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = pc;
}
}
}
@@ -1765,13 +2022,13 @@ handle (request& rq, response& rs)
// Just try with the next rebuild. But first, reset the task
// response manifest that we may have prepared.
//
- tsm = task_response_manifest ();
+ task_response = task_response_manifest ();
}
// If the task response manifest is prepared, then bail out from the
// package configuration rebuilds loop and respond.
//
- if (!tsm.session.empty ())
+ if (!task_response.session.empty ())
break;
}
}
@@ -1842,6 +2099,131 @@ handle (request& rq, response& rs)
if (auto f = tsb->build_building (ss, b))
update_tenant_service_state (conn, b.tenant, f);
}
+
+ // If the task response manifest is prepared, then check that the number
+ // of the build auxiliary machines is less than 10. If that's not the
+ // case, then turn the build into the built state with the abort status.
+ //
+ if (task_response.task->auxiliary_machines.size () > 9)
+ {
+ // Respond with the no-task manifest.
+ //
+ task_response = task_response_manifest ();
+
+ // If the package tenant has a third-party service state associated
+ // with it, then check if the tenant_service_build_built callback is
+ // registered for the type of the associated service. If it is, then
+ // stash the state, the build object, and the callback pointer for the
+ // subsequent service `built` notification.
+ //
+ const tenant_service_build_built* tsb (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ {
+ transaction t (conn->begin ());
+
+ shared_ptr<build> b (build_db_->find<build> (task_build->id));
+
+ // For good measure, check that the build object is in the building
+ // state and has not been updated.
+ //
+ if (b->state == build_state::building &&
+ b->timestamp == task_build->timestamp)
+ {
+ b->state = build_state::built;
+ b->status = result_status::abort;
+ b->force = force_state::unforced;
+
+ // Cleanup the interactive build login information.
+ //
+ b->interactive = nullopt;
+
+ // Cleanup the authentication data.
+ //
+ b->agent_fingerprint = nullopt;
+ b->agent_challenge = nullopt;
+
+ b->timestamp = system_clock::now ();
+ b->soft_timestamp = b->timestamp;
+ b->hard_timestamp = b->soft_timestamp;
+
+ // Mark the section as loaded, so results are updated.
+ //
+ b->results_section.load ();
+
+ b->results = operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: not more than 9 auxiliary machines are allowed"}});
+
+ b->agent_checksum = nullopt;
+ b->worker_checksum = nullopt;
+ b->dependency_checksum = nullopt;
+
+ build_db_->update (b);
+
+ // Schedule the `built` notification, if the
+ // tenant_service_build_built callback is registered for the
+ // tenant.
+ //
+ shared_ptr<build_tenant> t (
+ build_db_->load<build_tenant> (b->tenant));
+
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsb = dynamic_cast<const tenant_service_build_built*> (
+ i->second.get ());
+
+ // If required, stash the service notification information.
+ //
+ if (tsb != nullptr)
+ tss = make_pair (move (*t->service), b);
+ }
+ }
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (
+ aborted_build {move (b),
+ move (task_package),
+ task_config,
+ unforced ? "build" : "rebuild"});
+ }
+
+ t.commit ();
+ }
+
+ // If a third-party service needs to be notified about the built
+ // package, then call the tenant_service_build_built::build_built()
+ // callback function and update the service state, if requested.
+ //
+ if (tsb != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
+
+ if (auto f = tsb->build_built (ss, b))
+ update_tenant_service_state (conn, b.tenant, f);
+ }
+ }
+
+ // Send notification emails for all the aborted builds.
+ //
+ for (const aborted_build& ab: aborted_builds)
+ send_notification_email (*options_,
+ conn,
+ *ab.b,
+ *ab.p,
+ *ab.pc,
+ ab.what,
+ error,
+ verb_ >= 2 ? &trace : nullptr);
}
}
diff --git a/mod/mod-upload.cxx b/mod/mod-upload.cxx
index 1474363..9f8b9de 100644
--- a/mod/mod-upload.cxx
+++ b/mod/mod-upload.cxx
@@ -82,9 +82,6 @@ init (scanner& s)
build_result_module::init (*options_, *options_);
}
-
- if (options_->root ().empty ())
- options_->root (dir_path ("/"));
}
bool brep::upload::
@@ -499,8 +496,8 @@ handle (request& rq, response& rs)
s.next ("toolchain-version", sess.toolchain_version.string ());
s.next ("repository-name", rep->canonical_name);
- s.next ("machine-name", bld->machine);
- s.next ("machine-summary", bld->machine_summary);
+ s.next ("machine-name", bld->machine.name);
+ s.next ("machine-summary", bld->machine.summary);
// Serialize the request parameters.
//
diff --git a/mod/module.cli b/mod/module.cli
index ec86b7b..a107ffe 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -31,7 +31,7 @@ namespace brep
}
};
- class handler
+ class repository_url
{
string host
{
@@ -51,7 +51,29 @@ namespace brep
Specify '\cb{/}' to use the web server root
(\cb{http://example.org/})."
}
+ };
+
+ class build_email_notification: repository_email, repository_url
+ {
+ std::map<string, build_email> build-toolchain-email
+ {
+ "<name>=<mode>",
+ "Enable or disable package build notification emails. The valid <mode>
+ values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is
+ specified for a toolchain name, then emails are sent according to the
+ \cb{build-*email} package manifest values when all versions of a
+ package are built with this toolchain. If \cb{latest} is specified,
+ then for this toolchain name the emails are only sent for the latest
+ version of a package. If \cb{none} is specified, then no emails are
+ sent for this toolchain name. By default the \cb{latest} mode is
+ assumed. Repeat this option to enable/disable emails for multiple
+ toolchains. See \l{bpkg#manifest-package Package Manifest} for
+ details on \cb{build-*email} values."
+ }
+ };
+ class handler
+ {
string tenant-name = "tenant"
{
"<name>",
@@ -486,7 +508,7 @@ namespace brep
// Handler options.
//
- class packages: search, package_db, page, handler
+ class packages: search, package_db, page, repository_url, handler
{
string search-title = "Packages"
{
@@ -504,13 +526,18 @@ namespace brep
}
};
- class package_details: package, search, package_db, page, handler
+ class package_details: package, package_db,
+ search,
+ page,
+ repository_url,
+ handler
{
};
class package_version_details: package, package_db,
build, build_db,
page,
+ repository_url,
handler
{
dir_path bindist-root
@@ -538,11 +565,14 @@ namespace brep
}
};
- class repository_details: package_db, page, handler
+ class repository_details: package_db, page, repository_url, handler
{
};
- class build_task: build, build_db, build_upload, handler
+ class build_task: build, build_db,
+ build_upload,
+ build_email_notification,
+ handler
{
size_t build-task-request-max-size = 102400
{
@@ -583,7 +613,9 @@ namespace brep
}
};
- class build_result: build, package_db, build_db, repository_email, handler
+ class build_result: build, build_db,
+ build_email_notification,
+ handler
{
size_t build-result-request-max-size = 10485760
{
@@ -593,25 +625,9 @@ namespace brep
face of recoverable failures (deadlock, loss of connection, etc). The
default is 10M."
}
-
- std::map<string, build_email> build-toolchain-email
- {
- "<name>=<mode>",
- "Enable or disable package build notification emails. The valid <mode>
- values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is
- specified for a toolchain name, then emails are sent according to the
- \cb{build-*email} package manifest values when all versions of a
- package are built with this toolchain. If \cb{latest} is specified,
- then for this toolchain name the emails are only sent for the latest
- version of a package. If \cb{none} is specified, then no emails are
- sent for this toolchain name. By default the \cb{latest} mode is
- assumed. Repeat this option to enable/disable emails for multiple
- toolchains. See \l{bpkg#manifest-package Package Manifest} for
- details on \cb{build-*email} values."
- }
};
- class build_log: build, build_db, handler
+ class build_log: build, build_db, repository_url, handler
{
};
@@ -619,7 +635,7 @@ namespace brep
{
};
- class builds: build, build_db, page, handler
+ class builds: build, build_db, page, repository_url, handler
{
uint16_t build-page-entries = 20
{
@@ -634,7 +650,7 @@ namespace brep
}
};
- class build_configs: build, page, handler
+ class build_configs: build, page, repository_url, handler
{
uint16_t build-config-page-entries = 20
{
@@ -649,7 +665,7 @@ namespace brep
}
};
- class submit: page, repository_email, handler
+ class submit: page, repository_email, repository_url, handler
{
dir_path submit-data
{
@@ -784,7 +800,7 @@ namespace brep
{
};
- class ci: ci_start, page, handler
+ class ci: ci_start, page, repository_url, handler
{
// Classic CI-specific options.
//
@@ -810,7 +826,7 @@ namespace brep
{
};
- class repository_root: handler
+ class repository_root: repository_url, handler
{
string root-global-view = "packages"
{