aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--AUTHORS7
-rw-r--r--CONTRIBUTING.md13
-rw-r--r--LICENSE22
-rw-r--r--NEWS41
-rw-r--r--bbot/agent/agent.cli139
-rw-r--r--bbot/agent/agent.cxx3276
-rw-r--r--bbot/agent/agent.hxx6
-rw-r--r--bbot/agent/http-service.cxx364
-rw-r--r--bbot/agent/http-service.hxx71
-rw-r--r--bbot/agent/machine.cxx148
-rw-r--r--bbot/agent/machine.hxx15
-rw-r--r--bbot/agent/tftp.cxx4
-rw-r--r--bbot/agent/tftp.hxx4
-rw-r--r--bbot/bbot-agent@.service12
-rw-r--r--bbot/bootstrap-manifest.cxx6
-rw-r--r--bbot/bootstrap-manifest.hxx4
-rw-r--r--bbot/bootstrap-manifest.test.cxx9
-rw-r--r--bbot/bootstrap-manifest.test.testscript2
-rw-r--r--bbot/buildfile33
-rw-r--r--bbot/common.cli2
-rw-r--r--bbot/diagnostics.cxx2
-rw-r--r--bbot/diagnostics.hxx8
-rw-r--r--bbot/machine-manifest.cxx60
-rw-r--r--bbot/machine-manifest.hxx19
-rw-r--r--bbot/machine-manifest.test.cxx9
-rw-r--r--bbot/machine-manifest.test.testscript160
-rw-r--r--bbot/types-parsers.cxx2
-rw-r--r--bbot/types-parsers.hxx2
-rw-r--r--bbot/types.hxx26
-rw-r--r--bbot/utility.hxx36
-rw-r--r--bbot/utility.txx25
-rw-r--r--bbot/version.hxx.in2
-rw-r--r--bbot/worker/worker.cli2
-rw-r--r--bbot/worker/worker.cxx6047
-rw-r--r--build/bootstrap.build2
-rw-r--r--build/export.build2
-rw-r--r--build/root.build11
-rw-r--r--buildfile2
-rw-r--r--doc/buildfile2
-rwxr-xr-xdoc/cli.sh4
-rw-r--r--doc/manual.cli1838
m---------doc/style0
-rw-r--r--etc/bootstrap/bbot-bootstrap-clang.bat6
-rwxr-xr-xetc/bootstrap/bbot-bootstrap-freebsd.sh2
-rwxr-xr-xetc/bootstrap/bbot-bootstrap-linux.sh4
-rwxr-xr-xetc/bootstrap/bbot-bootstrap-macos.sh2
-rw-r--r--etc/bootstrap/bbot-bootstrap-mingw.bat2
-rw-r--r--etc/bootstrap/bbot-bootstrap-msvc-14.bat2
-rw-r--r--etc/bootstrap/bbot-bootstrap-msvc.bat5
-rw-r--r--etc/bootstrap/bbot-bootstrap.service10
-rwxr-xr-xetc/bootstrap/bbot-bootstrap.sh24
-rw-r--r--etc/buildfile2
-rwxr-xr-xetc/environments/default-aarch6440
-rw-r--r--etc/environments/default-clang.bat9
-rwxr-xr-xetc/environments/default-emcc2
-rw-r--r--etc/environments/default-mingw.bat2
-rw-r--r--etc/environments/default-msvc-14.bat2
-rw-r--r--etc/environments/default-msvc.bat9
-rwxr-xr-xetc/environments/default-x86_64 (renamed from etc/environments/default)4
-rw-r--r--manifest18
-rw-r--r--tests/agent/buildfile2
-rw-r--r--tests/agent/testscript6
-rw-r--r--tests/build/bootstrap.build2
-rw-r--r--tests/build/root.build2
-rw-r--r--tests/buildfile2
-rw-r--r--tests/integration/buildfile2
-rw-r--r--tests/integration/testscript408
-rw-r--r--tests/integration/tftp-map2
-rw-r--r--tests/machine/buildfile2
-rw-r--r--tests/machine/testscript4
-rw-r--r--tests/worker/bootstrap.testscript2
-rw-r--r--tests/worker/build.testscript14
-rw-r--r--tests/worker/buildfile2
-rw-r--r--tests/worker/startup.testscript2
75 files changed, 11004 insertions, 2038 deletions
diff --git a/.gitignore b/.gitignore
index c3de2e7..5046596 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..9780708
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,7 @@
+This file contains information about the build2 authors for copyright
+purposes.
+
+The copyright for the code is held by the contributors of the code. The
+revision history in the version control system is the primary source of
+authorship information for copyright purposes. Contributors that have
+requested to also be noted explicitly in this file are listed below:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a3216bb..6bfc34f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,3 +1,16 @@
This project is part of the `build2` toolchain; see its
[Community](https://build2.org/community.xhtml) page for various ways to
contribute.
+
+The copyright for the code is held by the contributors of the code (see the
+`AUTHORS` file). The code is licensed under permissive open source licensing
+terms (see the `LICENSE` file). When you contribute code to this project, you
+license it under these terms. Before contributing please make sure that these
+terms are acceptable to you (and to your employer(s), if they have rights to
+intellectual property that you create) and that the code being contributed is
+your original creation.
+
+The revision history in the version control system is the primary source of
+authorship information for copyright purposes. If, however, you would like
+to also be noted explicitly, please include the appropriate change to the
+`AUTHORS` file along with your contribution.
diff --git a/LICENSE b/LICENSE
index 9020145..e63bcec 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,5 +1,21 @@
-Copyright (c) 2014-2021 Code Synthesis Ltd.
+MIT License
-TBC; All rights reserved
+Copyright (c) 2014-2024 the build2 authors (see the AUTHORS file).
-@@ Keep manual under MIT? Also man pages.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/NEWS b/NEWS
index 8515e55..bf4df52 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,44 @@
+Version 0.16.0
+
+ * New bpkg.bindist.*, bbot.sys-install.*, bbot.install.ldconfig steps.
+
+ * Support for build artifact upload (bbot.upload step).
+
+ * Support for controller URL priorities, build job interrupts, CPU boosting.
+
+ * Support for *-package-config package manifest value functionality.
+
+ * Support for the `none` interactive CI breakpoint.
+
+ * Support for aarch64 in the environment scripts.
+
+ * New --install option in the bootstrap scripts.
+
+ * New --{bootstrap,build}-startup agent options.
+
+Version 0.15.0
+
+ * Support for MSVC 17 (2022) in bootstrap and environment batch files.
+
+ * Packages are now configured with config.<pkg>.develop=false to force
+ skeleton load.
+
+Version 0.14.0
+
+ * New worker script with the target/host configuration split awareness.
+
+ * Support for interactive builds.
+
+ * Support for soft and hard rebuilds.
+
+ * Re-distribution phase for checked out packages.
+
+ * Emscripten environment script.
+
+ * Compression of result manifests uploaded by worker.
+
+ * Truncation of large operation result logs.
+
Version 0.13.0
* Build logs are now UTF-8-sanitized by the worker.
diff --git a/bbot/agent/agent.cli b/bbot/agent/agent.cli
index b50a43a..23765cf 100644
--- a/bbot/agent/agent.cli
+++ b/bbot/agent/agent.cli
@@ -1,5 +1,5 @@
// file : bbot/agent.cli
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
include <libbbot/manifest.hxx>;
@@ -12,19 +12,77 @@ include <bbot/common.cli>;
namespace bbot
{
{
- "<options> <url>",
+ "<options> <priority> <url>",
"
\h|SYNOPSIS|
\c{\b{bbot-agent --help}\n
\b{bbot-agent --version}\n
- \b{bbot-agent} [<options>] <url>...}
+ \b{bbot-agent} [<options>] [<priority>=]<url>...}
\h|DESCRIPTION|
\cb{bbot-agent} @@ TODO.
+ The controller URL <priority> is a four or five-digit decimal value. If it
+ is absent, then \cb{0} (lowest priority) is assumed. URLs with equal
+ priority are queried at random.
+
+ The <priority> value has the \c{[\i{F}]\i{DCBA}} form which encodes four
+ priority levels (\ci{DCBA}) each occupying one decimal digit (so there are
+ 10 distinct priorities in each level) plus the optional boost flag
+ (\ci{F}). These levels offer different trade-offs between the speed of
+ completing a higher priority task and potentially discarding work that has
+ already been done.
+
+ The first priority level (\ci{A}) is a simple preference: among the URLs
+ with equal values for other levels (\ci{DCB}), those with higher first
+ level priorities are queried first.
+
+ The second priority level (\ci{B}) has the semantics of the first level
+ plus it prevents URLs with lower second priority level from being
+ queried until the task with a higher second priority level has completed,
+ effectively conserving the resources for the higher priority task.
+
+ The third priority level (\ci{C}) has the semantics of the second level
+ plus it may interrupt one lower third priority level task in order to
+ perform the higher third priority task (the interrupt is necessary if the
+ desired machine is used by the lower priority task or the number of tasks
+ already being performed is the maximum allowed to be performed
+ concurrently; see \cb{--instance-max}).
+
+ Finally, the fourth priority level (\ci{D}) has the semantics of the third
+ level except that not one but all the lower fourth priority level tasks
+ are interrupting, effectively dedicating all the available resources to
+ the higher priority task. This level can also be combined with the boost
+ flag \ci{F}. If this flag is \cb{1} then the higher priority task's CPU
+ number (\cb{--cpu}) is boosted to the full number of available hardware
+ threads (or, to view it another way, the fourth priority level has 20
+ possible values, not 10, with the first 0-9 being without the boost while
+ the last 10-19 being with the boost). Note that this boosting semantics
+ may not be accurate if the agent is executed with CPU affinity. Also note
+ that there is no corresponding RAM boosting and it's possible that in some
+ configurations the amount of RAM will be insufficient for the boosted CPU
+ count.
+
+ Note that the priority levels are hierarchical in a sense that within a
+ given higher level URLs can be further prioritized using the lower
+ levels. As an example, consider a deployment with three controller URLs:
+ background package rebuilds (\cb{pkg.example.org}), user-initiated CI
+ (\cb{ci.example.org}), and user-initiated interactive CI
+ (\cb{ici.example.org}). Given the following priorities:
+
+ \
+ 0000=https://pkg.example.org
+ 0100=https://ci.example.org
+ 0101=https://ici.example.org
+ \
+
+ Both types of CI tasks will interrupt one background rebuild task if
+ necessary while the interactive CI tasks will be merely preferred over
+ non-interactive.
+
Note that on termination \cb{bbot-agent} may leave behind a machine lock
and working machine snapshot. It is expected that the caller (normally
Build OS monitor) cleans them up before restarting the agent.
@@ -61,10 +119,20 @@ namespace bbot
"<num>",
"Toolchain number, 1 by default. If agents are running for several
toolchains, then each of them should have a unique toolchain number
- between 1 and 99. This number is used as an offset for network ports,
+ between 1 and 9. This number is used as an offset for network ports,
interfaces, etc."
}
+ string --toolchain-lock // Note: string to allow empty path.
+ {
+ "<path>",
+ "Absolute path to the global toolchain lock file. If unspecified, then
+ \c{\b{/var/lock/bbot-agent-}\i{toolchain-name}\b{.lock}} is used by
+ default. If empty path is specified then no global locking is
+ performed. If one of the \cb{--fake-*} options is specified, then no
+ locking is performed by default."
+ }
+
standard_version --toolchain-ver
{
"<stdver>",
@@ -101,16 +169,44 @@ namespace bbot
network ports, interfaces, etc."
}
+ uint16_t --instance-max = 0
+ {
+ "<num>",
+ "Maximum number of instances that can perform tasks concurrently. If the
+ number of instances that have been started is greater than this number
+ (normally by just one), then when the maximum number of tasks is
+ already being performed, the extra instances operate in the \i{priority
+ monitor} mode: they only query controller URLs with priorities higher
+ than of the existing tasks and can only perform a task by interrupting
+ one of them. If the maximum number of instances is \cb{0} (default),
+ then it is assumed the number of instances started is the maximum
+ number, essentially disabling the priority monitor functionality."
+ }
+
size_t --cpu = 1
{
"<num>",
"Number of CPUs (threads) to use, 1 by default."
}
- size_t --ram (1024 * 1024) // 1G
+ size_t --build-ram (4 * 1024 * 1024) // 4GiB
{
"<num>",
- "Amount of RAM (in kB) to use, 1G by default."
+ "Amount of RAM (in KiB) to use for the build machine, 4GiB by default."
+ }
+
+ size_t --auxiliary-ram = 0
+ {
+ "<num>",
+ "Amount of RAM (in KiB) to use for auxiliary machines. To disable
+ running auxiliary machines, specify \cb{0}. If unspecified, then
+ currently the behavior is the same as specifying \cb{0} but this
+ may change in the future (for example, to support a more dynamic
+ allocation strategy)."
+
+ // Note: it's not going to be easy to set it to unspecified in
+ // bbot-agent@.service so we may have to invent some special value,
+ // like `auto`.
}
string --bridge = "br1"
@@ -155,21 +251,35 @@ namespace bbot
}
// Low 23401+, 23501+, 23601+, etc., all look good collision-wise with
- // with anything useful.
+ // anything useful.
//
uint16_t --tftp-port = 23400
{
"<num>",
"TFTP server port base, 23400 by default. The actual port is calculated
- by adding an offset calculated based on the toolchain and instance
- numbers."
+ by adding an offset calculated based on the toolchain, instance, and
+ machine numbers."
+ }
+
+ size_t --bootstrap-startup = 300
+ {
+ "<sec>",
+ "Maximum number of seconds to wait for build machine bootstrap startup,
+ 300 (5 minutes) by default."
}
size_t --bootstrap-timeout = 3600
{
"<sec>",
- "Maximum number of seconds to wait for machine bootstrap completion,
- 3600 (60 minutes) by default."
+ "Maximum number of seconds to wait for build machine bootstrap
+ completion, 3600 (60 minutes) by default."
+ }
+
+ size_t --bootstrap-auxiliary = 900
+ {
+ "<sec>",
+ "Maximum number of seconds to wait for auxiliary machine bootstrap
+ completion, 900 (15 minutes) by default."
}
size_t --bootstrap-retries = 2
@@ -179,6 +289,13 @@ namespace bbot
by default."
}
+ size_t --build-startup = 240
+ {
+ "<sec>",
+ "Maximum number of seconds to wait for build startup, 240 (4 minutes) by
+ default. This value is used for both build and auxiliary machines."
+ }
+
size_t --build-timeout = 5400
{
"<sec>",
diff --git a/bbot/agent/agent.cxx b/bbot/agent/agent.cxx
index 60f7271..75f7228 100644
--- a/bbot/agent/agent.cxx
+++ b/bbot/agent/agent.cxx
@@ -1,15 +1,16 @@
// file : bbot/agent/agent.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/agent/agent.hxx>
#include <pwd.h> // getpwuid()
#include <limits.h> // PATH_MAX
-#include <signal.h> // signal()
-#include <stdlib.h> // rand_r()
-#include <unistd.h> // sleep(), getuid(), fsync(), [f]stat()
+#include <signal.h> // signal(), kill()
+#include <stdlib.h> // rand_r(), strto[u]ll()
+#include <string.h> // strchr()
+#include <unistd.h> // sleep(), getpid(), getuid(), fsync(), [f]stat()
#include <ifaddrs.h> // getifaddrs(), freeifaddrs()
-#include <sys/types.h> // stat
+#include <sys/types.h> // stat, pid_t
#include <sys/stat.h> // [f]stat()
#include <sys/file.h> // flock()
@@ -19,15 +20,22 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
+#include <map>
+#include <atomic>
#include <chrono>
+#include <thread> // thread::hardware_concurrency()
#include <random>
+#include <iomanip> // setw()
+#include <numeric> // iota()
#include <iostream>
#include <system_error> // generic_category()
-#include <libbutl/pager.mxx>
-#include <libbutl/sha256.mxx>
-#include <libbutl/openssl.mxx>
-#include <libbutl/filesystem.mxx> // dir_iterator, try_rmfile(), readsymlink()
+#include <libbutl/pager.hxx>
+#include <libbutl/base64.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/openssl.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator, try_rmfile(), readsymlink()
+#include <libbutl/semantic-version.hxx>
#include <libbbot/manifest.hxx>
@@ -40,6 +48,7 @@
#include <bbot/agent/tftp.hxx>
#include <bbot/agent/machine.hxx>
+#include <bbot/agent/http-service.hxx>
using namespace butl;
using namespace bbot;
@@ -47,6 +56,57 @@ using namespace bbot;
using std::cout;
using std::endl;
+// If RAM minimum is not specified for a machine, then let's assume something
+// plausible like 256MiB. This way we won't end up with degenerate cases where
+// we attempt to start a machine with some absurd amount of RAM.
+//
+const std::uint64_t default_ram_minimum = 262144;
+
+static inline std::uint64_t
+effective_ram_minimum (const machine_header_manifest& m)
+{
+ // Note: neither ram_minimum nor ram_maximum should be 0.
+ //
+ assert ((!m.ram_minimum || *m.ram_minimum != 0) &&
+ (!m.ram_maximum || *m.ram_maximum != 0));
+
+ return (m.ram_minimum
+ ? *m.ram_minimum
+ : (m.ram_maximum && *m.ram_maximum < default_ram_minimum
+ ? *m.ram_maximum
+ : default_ram_minimum));
+}
+
+static std::mt19937 rand_gen (std::random_device {} ());
+
+// According to the standard, atomic's use in the signal handler is only safe
+// if it's lock-free.
+//
+#if !defined(ATOMIC_INT_LOCK_FREE) || ATOMIC_INT_LOCK_FREE != 2
+#error int is not lock-free on this architecture
+#endif
+
+// While we can use memory_order_relaxed in a single-threaded program, let's
+// use consume/release in case this process becomes multi-threaded in the
+// future.
+//
+static std::atomic<unsigned int> sigurs1;
+
+using std::memory_order_consume;
+using std::memory_order_release;
+
+extern "C" void
+handle_signal (int sig)
+{
+ switch (sig)
+ {
+ case SIGHUP: exit (3); // Unimplemented feature.
+ case SIGTERM: exit (0);
+ case SIGUSR1: sigurs1.fetch_add (1, std::memory_order_release); break;
+ default: assert (false);
+ }
+}
+
namespace bbot
{
agent_options ops;
@@ -55,10 +115,12 @@ namespace bbot
string tc_name;
uint16_t tc_num;
+ path tc_lock; // Empty if no locking.
standard_version tc_ver;
string tc_id;
- uint16_t inst;
+ uint16_t inst; // 1-based.
+ uint16_t inst_max; // 0 if priority monitoring is disabled.
uint16_t offset;
@@ -112,16 +174,16 @@ btrfs_exit (tracer& t, A&&... a)
"btrfs", forward<A> (a)...);
}
-// Bootstrap the machine. Return the bootstrapped machine manifest if
-// successful and nullopt otherwise (in which case the machine directory
-// should be cleaned and the machine ignored for now).
+// Bootstrap a build machine. Return the bootstrapped machine manifest if
+// successful and nullopt otherwise (in which case the caller should clean up
+// the machine directory and ignore the machine for now).
//
static optional<bootstrapped_machine_manifest>
-bootstrap_machine (const dir_path& md,
- const machine_manifest& mm,
- optional<bootstrapped_machine_manifest> obmm)
+bootstrap_build_machine (const dir_path& md,
+ const machine_manifest& mm,
+ optional<bootstrapped_machine_manifest> obmm)
{
- tracer trace ("bootstrap_machine", md.string ().c_str ());
+ tracer trace ("bootstrap_build_machine", md.string ().c_str ());
bootstrapped_machine_manifest r {
mm,
@@ -143,10 +205,12 @@ bootstrap_machine (const dir_path& md,
else
try
{
+ // Note: similar code in bootstrap_auxiliary_machine().
+
// Start the TFTP server (server chroot is --tftp). Map:
//
- // GET requests to .../toolchains/<name>/*
- // PUT requests to .../bootstrap/<name>-<instance>/*
+ // GET requests to .../toolchains/<toolchain>/*
+ // PUT requests to .../bootstrap/<toolchain>-<instance>/*
//
const string in_name (tc_name + '-' + to_string (inst));
auto_rmdir arm ((dir_path (ops.tftp ()) /= "bootstrap") /= in_name);
@@ -170,7 +234,7 @@ bootstrap_machine (const dir_path& md,
{
tftp_server tftpd ("Gr ^/?(.+)$ /toolchains/" + tc_name + "/\\1\n" +
"Pr ^/?(.+)$ /bootstrap/" + in_name + "/\\1\n",
- ops.tftp_port () + offset);
+ ops.tftp_port () + offset + 0 /* build machine */);
l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
@@ -179,6 +243,9 @@ bootstrap_machine (const dir_path& md,
unique_ptr<machine> m (
start_machine (md,
mm,
+ 0 /* machine_num (build) */,
+ ops.cpu (),
+ ops.build_ram (),
obmm ? obmm->machine.mac : nullopt,
ops.bridge (),
tftpd.port (),
@@ -193,8 +260,11 @@ bootstrap_machine (const dir_path& md,
make_exception_guard (
[&m, &md] ()
{
- info << "trying to force machine " << md << " down";
- try {m->forcedown (false);} catch (const failed&) {}
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << md << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
}));
// What happens if the bootstrap process hangs? The simple thing would
@@ -218,6 +288,8 @@ bootstrap_machine (const dir_path& md,
m->wait (false);
m->cleanup ();
info << "resuming after machine suspension";
+
+ // Note: snapshot cleaned up by the caller.
}
catch (const failed&) {}
@@ -250,13 +322,14 @@ bootstrap_machine (const dir_path& md,
};
// The first request should be the toolchain download. Wait for up to
- // 5 minutes for that to arrive. In a sense we use it as an indication
- // that the machine has booted and the bootstrap process has started.
- // Why wait so long you may wonder? Well, we may be using a new MAC
- // address and operating systems like Windows may need to digest that.
+ // 5 minutes (by default) for that to arrive. In a sense we use it as
+ // an indication that the machine has booted and the bootstrap process
+ // has started. Why wait so long you may wonder? Well, we may be using
+ // a new MAC address and operating systems like Windows may need to
+ // digest that.
//
size_t to;
- const size_t startup_to (5 * 60);
+ const size_t startup_to (ops.bootstrap_startup ());
const size_t bootstrap_to (ops.bootstrap_timeout ());
const size_t shutdown_to (5 * 60);
@@ -268,7 +341,9 @@ bootstrap_machine (const dir_path& md,
break;
if (!check_machine ())
- return nullopt;
+ {
+ return nullopt; // Note: snapshot cleaned up by the caller.
+ }
}
// This can mean two things: machine mis-configuration or what we
@@ -289,6 +364,7 @@ bootstrap_machine (const dir_path& md,
m->print_info (dr);
try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
continue;
}
@@ -311,7 +387,9 @@ bootstrap_machine (const dir_path& md,
// The exit/upload is racy so we re-check.
//
if (!(file_not_empty (mf) || file_not_empty (mfo)))
- return nullopt;
+ {
+ return nullopt; // Note: snapshot cleaned up by the caller.
+ }
}
bool old (false);
@@ -361,7 +439,329 @@ bootstrap_machine (const dir_path& md,
return r;
}
-// Machine locking.
+// Bootstrap an auxiliary machine. Return the bootstrapped machine manifest if
+// successful and nullopt otherwise (in which case the caller should clean up
+// the machine directory and ignore the machine for now).
+//
+static vector<size_t>
+divide_auxiliary_ram (const vector<const machine_header_manifest*>&);
+
+static optional<bootstrapped_machine_manifest>
+bootstrap_auxiliary_machine (const dir_path& md,
+ const machine_manifest& mm,
+ optional<bootstrapped_machine_manifest> obmm)
+{
+ tracer trace ("bootstrap_auxiliary_machine", md.string ().c_str ());
+
+ bootstrapped_machine_manifest r {
+ mm,
+ toolchain_manifest {}, // Unused for auxiliary,
+ bootstrap_manifest {} // Unused for auxiliary.
+ };
+
+ if (ops.fake_bootstrap ())
+ {
+ r.machine.mac = "de:ad:be:ef:de:ad";
+ }
+ else
+ try
+ {
+ // Similar to bootstrap_build_machine() except here we just wait for the
+ // upload of the environment.
+
+ // Start the TFTP server (server chroot is --tftp). Map:
+ //
+ // GET requests to /dev/null
+ // PUT requests to .../bootstrap/<toolchain>-<instance>/*
+ //
+ const string in_name (tc_name + '-' + to_string (inst));
+ auto_rmdir arm ((dir_path (ops.tftp ()) /= "bootstrap") /= in_name);
+ try_mkdir_p (arm.path);
+
+ // Environment upload.
+ //
+ path ef (arm.path / "environment");
+ try_rmfile (ef);
+
+ // Note that unlike build, here we use the same VM snapshot for retries,
+ // which is not ideal.
+ //
+ for (size_t retry (0);; ++retry)
+ {
+ tftp_server tftpd ("Gr ^/?(.+)$ " + string ("/dev/null") + '\n' +
+ "Pr ^/?(.+)$ /bootstrap/" + in_name + "/\\1\n",
+ ops.tftp_port () + offset + 1 /* auxiliary machine */);
+
+ l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
+
+ // If the machine specified RAM minimum, use that to make sure the
+ // machine can actually function with this amount of RAM. Otherwise, use
+ // the minium of RAM maximum (if specified) and the available auxiliary
+ // RAM (so we know this machine will at least work alone). For the
+ // latter case use divide_auxiliary_ram() to be consistent with the
+ // build case (see that function implementation for nuances).
+ //
+ size_t ram;
+ if (mm.ram_minimum)
+ ram = *mm.ram_minimum;
+ else
+ {
+ vector<size_t> rams (divide_auxiliary_ram ({&mm}));
+ assert (!rams.empty ()); // We should have skipped such a machine.
+ ram = rams.front ();
+ }
+
+ // Start the machine.
+ //
+ unique_ptr<machine> m (
+ start_machine (md,
+ mm,
+ 1 /* machine_num (first auxiliary) */,
+ ops.cpu (),
+ ram,
+ obmm ? obmm->machine.mac : nullopt,
+ ops.bridge (),
+ tftpd.port (),
+ false /* pub_vnc */));
+
+ {
+ // NOTE: see bootstrap_build_machine() for comments.
+
+ auto mg (
+ make_exception_guard (
+ [&m, &md] ()
+ {
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << md << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
+ }));
+
+ auto soft_fail = [&md, &m] (const char* msg)
+ {
+ {
+ diag_record dr (error);
+ dr << msg << " for machine " << md << ", suspending";
+ m->print_info (dr);
+ }
+
+ try
+ {
+ m->suspend (false);
+ m->wait (false);
+ m->cleanup ();
+ info << "resuming after machine suspension";
+
+ // Note: snapshot cleaned up by the caller.
+ }
+ catch (const failed&) {}
+
+ return nullopt;
+ };
+
+ auto check_machine = [&md, &m] ()
+ {
+ try
+ {
+ size_t t (0);
+ if (!m->wait (t /* seconds */, false /* fail_hard */))
+ return true; // Still running.
+
+ // Exited successfully.
+ }
+ catch (const failed&)
+ {
+ // Failed, exit code diagnostics has already been issued.
+ }
+
+ diag_record dr (error);
+ dr << "machine " << md << " exited unexpectedly";
+ m->print_info (dr);
+
+ return false;
+ };
+
+ // Wait up to the specified timeout for the auxiliary machine to
+ // bootstrap. Note that such a machine may do extra setup work on the
+ // first boot (such as install some packages, etc) which may take some
+ // time.
+ //
+ size_t to;
+ const size_t bootstrap_to (ops.bootstrap_auxiliary ());
+ const size_t shutdown_to (5 * 60);
+
+ // Serve TFTP requests while periodically checking for the environment
+ // file.
+ //
+ for (to = bootstrap_to; to != 0; )
+ {
+ if (tftpd.serve (to, 2))
+ continue;
+
+ if (!check_machine ())
+ {
+ if (!file_not_empty (ef))
+ {
+ return nullopt; // Note: snapshot cleaned up by the caller.
+ }
+ }
+
+ if (file_not_empty (ef))
+ {
+ if (!tftpd.serve (to, 5))
+ break;
+ }
+ }
+
+ if (to == 0)
+ {
+ if (retry > ops.bootstrap_retries ())
+ return soft_fail ("bootstrap timeout");
+
+ // Note: keeping the logs behind (no cleanup).
+
+ diag_record dr (warn);
+ dr << "machine " << mm.name << " mis-booted, retrying";
+ m->print_info (dr);
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
+ continue;
+ }
+
+ l3 ([&]{trace << "completed bootstrap in " << bootstrap_to - to << "s";});
+
+ // Shut the machine down cleanly.
+ //
+ if (!m->shutdown ((to = shutdown_to)))
+ return soft_fail ("bootstrap shutdown timeout");
+
+ l3 ([&]{trace << "completed shutdown in " << shutdown_to - to << "s";});
+
+ m->cleanup ();
+ }
+
+ r.machine.mac = m->mac; // Save the MAC address.
+
+ break;
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "bootstrap error: " << e;
+ }
+
+ serialize_manifest (r, md / "manifest", "bootstrapped machine");
+ return r;
+}
+
+// Global toolchain lock.
+//
+// The overall locking protocol is as follows:
+//
+// 1. Before enumerating the machines each agent instance acquires the global
+// toolchain lock.
+//
+// 2. As the agent enumerates over the machines, it tries to acquire the lock
+// for each machine.
+//
+// 3. If the agent encounters a machine that it needs to bootstrap, it
+// releases all the other machine locks followed by the global lock,
+// proceeds to bootstrap the machine, releases its lock, and restarts the
+// process from scratch.
+//
+// 4. Otherwise, upon receiving a task response for one of the machines (plus,
+// potentially, a number of auxiliary machines), the agent releases all the
+// other machine locks followed by the global lock, proceeds to perform the
+// task on the selected machine(s), releases their locks, and restarts the
+// process from scratch.
+//
+// One notable implication of this protocol is that the machine locks are
+// only acquired while holding the global toolchain lock but can be released
+// while not holding this lock.
+//
+// (Note that because of this implication it can theoretically be possible
+// to omit acquiring all the machine locks during the enumeration process,
+// instead only acquiring the lock of the machine we need to bootstrap or
+// build. However, the current approach is simpler since we still need
+// to detect machines that are already locked, which entails acquiring
+// the lock anyway.)
+//
+// Note that unlike the machine lock below, here we don't bother with removing
+// the lock file.
+//
+class toolchain_lock
+{
+public:
+ toolchain_lock () = default; // Empty lock.
+
+ // Note: returns true if locking is disabled.
+ //
+ bool
+ locked () const
+ {
+ return tc_lock.empty () || fl_;
+ }
+
+ void
+ unlock (bool ignore_errors = false)
+ {
+ if (fl_)
+ {
+ fl_ = false; // We have tried.
+
+ if (flock (fd_.get (), LOCK_UN) != 0 && !ignore_errors)
+ throw_generic_error (errno);
+ }
+ }
+
+ ~toolchain_lock ()
+ {
+ unlock (true /* ignore_errors */);
+ }
+
+ toolchain_lock (toolchain_lock&&) = default;
+ toolchain_lock& operator= (toolchain_lock&&) = default;
+
+ toolchain_lock (const toolchain_lock&) = delete;
+ toolchain_lock& operator= (const toolchain_lock&) = delete;
+
+ // Implementation details.
+ //
+public:
+ explicit
+ toolchain_lock (auto_fd&& fd)
+ : fd_ (move (fd)), fl_ (true) {}
+
+private:
+ auto_fd fd_;
+ bool fl_ = false;
+};
+
+// Note: returns empty lock if toolchain locking is disabled.
+//
+static optional<toolchain_lock>
+lock_toolchain (unsigned int timeout)
+{
+ if (tc_lock.empty ())
+ return toolchain_lock ();
+
+ auto_fd fd (fdopen (tc_lock, fdopen_mode::out | fdopen_mode::create));
+
+ for (; flock (fd.get (), LOCK_EX | LOCK_NB) != 0; sleep (1), --timeout)
+ {
+ if (errno != EWOULDBLOCK)
+ throw_generic_error (errno);
+
+ if (timeout == 0)
+ return nullopt;
+ }
+
+ return toolchain_lock (move (fd));
+}
+
+// Per-toolchain machine lock.
//
// We use flock(2) which is straightforward. The tricky part is cleaning the
// file up. Here we may have a race when two processes are trying to open &
@@ -374,14 +774,29 @@ bootstrap_machine (const dir_path& md,
// guaranteed to be atomic (in case later we want to support exclusive
// bootstrap and shared build).
//
+// Note also that we per-toolchain lock auxiliary machines even though they
+// are not toolchain-specific. Doing it this way allows us to handle both
+// types of machines consistently with regards to priorities, interrupts, etc.
+// It also means we will have each auxiliary machine available per-toolchain
+// rather than a single machine shared between all the toolchains, which is
+// a good thing.
+//
class machine_lock
{
public:
- machine_lock () = default; // Empty lock.
+ // A lock is either locked by this process or it contains information about
+ // the process holding the lock.
+ //
+ pid_t pid; // Process using the machine.
+ optional<uint64_t> prio; // Task priority (absent means being bootstrapped
+ // or have been suspended).
- ~machine_lock ()
+ machine_lock () = default; // Uninitialized lock.
+
+ bool
+ locked () const
{
- unlock (true /* ignore_errors */);
+ return fl_;
}
void
@@ -391,13 +806,69 @@ public:
{
fl_ = false; // We have tried.
- try_rmfile (fp_, ignore_errors);
+ if (fd_ != nullfd)
+ {
+ try_rmfile (fp_, ignore_errors);
- if (flock (fd_.get (), LOCK_UN) != 0 && !ignore_errors)
- throw_generic_error (errno);
+ if (flock (fd_.get (), LOCK_UN) != 0 && !ignore_errors)
+ throw_generic_error (errno);
+ }
}
}
+ // Write the holding process information to the lock file.
+ //
+ // Must be called while holding the toolchain lock (see the lock_machine()
+ // implementation for rationale).
+ //
+ void
+ bootstrap (const toolchain_lock& tl)
+ {
+ assert (tl.locked () && fl_);
+
+ if (fd_ != nullfd)
+ write (nullopt);
+ }
+
+ void
+ perform_task (const toolchain_lock& tl, uint64_t prio)
+ {
+ assert (tl.locked () && fl_);
+
+ if (fd_ != nullfd)
+ write (prio);
+ }
+
+ // Truncate the holding process information after the call to perform_task()
+ // so that it doesn't contain the priority, marking the machine as being
+ // suspended.
+ //
+ // Note that this one can be called without holding the toolchain lock.
+ //
+ void
+ suspend_task ()
+ {
+ assert (fl_);
+
+ if (fd_ != nullfd)
+ {
+ assert (tp_ != 0); // Must be called after perform_task().
+
+ // While there is no direct statement to this effect in POSIX, the
+ // consensus on the internet is that truncation is atomic, in a sense
+ // that the reader shouldn't see a partially truncated content. Feels
+ // like should be doubly so when actually truncating as opposed to
+ // extending the size, which is what we do.
+ //
+ fdtruncate (fd_.get (), tp_);
+ }
+ }
+
+ ~machine_lock ()
+ {
+ unlock (true /* ignore_errors */);
+ }
+
machine_lock (machine_lock&&) = default;
machine_lock& operator= (machine_lock&&) = default;
@@ -407,30 +878,119 @@ public:
// Implementation details.
//
public:
+ // If fd is nullfd, treat it as a fake lock (used for fake machines).
+ //
machine_lock (path&& fp, auto_fd&& fd)
: fp_ (move (fp)), fd_ (move (fd)), fl_ (true) {}
+ machine_lock (pid_t pi, optional<uint64_t> pr)
+ : pid (pi), prio (pr), fl_ (false) {}
+
private:
- path fp_;
- auto_fd fd_;
- bool fl_ = false;
+ void
+ write (optional<uint64_t> prio)
+ {
+ pid_t pid (getpid ());
+
+ string l (to_string (pid));
+
+ if (prio)
+ {
+ tp_ = l.size (); // Truncate position.
+
+ l += ' ';
+ l += to_string (*prio);
+ }
+
+ auto n (fdwrite (fd_.get (), l.c_str (), l.size ()));
+
+ if (n == -1)
+ throw_generic_ios_failure (errno);
+
+ if (static_cast<size_t> (n) != l.size ())
+ throw_generic_ios_failure (EFBIG);
+ }
+
+private:
+ path fp_;
+ auto_fd fd_;
+ bool fl_ = false;
+ uint64_t tp_ = 0; // Truncate position.
};
-// Try to lock the machine given its -<toolchain> directory.
+// Try to lock the machine given its -<toolchain> directory. Return unlocked
+// lock with pid/prio if already in use. Must be called while holding the
+// toolchain lock.
//
-static optional<machine_lock>
-lock_machine (const dir_path& tp)
+static machine_lock
+lock_machine (const toolchain_lock& tl, const dir_path& tp)
{
+ assert (tl.locked ());
+
path fp (tp + ".lock"); // The -<toolchain>.lock file.
for (;;)
{
- auto_fd fd (fdopen (fp, fdopen_mode::out | fdopen_mode::create));
+ auto_fd fd (fdopen (fp, (fdopen_mode::in |
+ fdopen_mode::out |
+ fdopen_mode::create)));
if (flock (fd.get (), LOCK_EX | LOCK_NB) != 0)
{
if (errno == EWOULDBLOCK)
- return nullopt;
+ {
+ // The file should contain a line in the following format:
+ //
+ // <pid>[ <prio>]
+ //
+ char buf[64]; // Sufficient for 2 64-bit numbers (20 decimals max).
+
+ auto sn (fdread (fd.get (), buf, sizeof (buf)));
+
+ if (sn == -1)
+ throw_generic_ios_failure (errno);
+
+ size_t n (static_cast<size_t> (sn));
+
+ // While there would be a race between locking the file then writing
+ // to it in one process and reading from it in another process, we are
+ // protected by the global toolchain lock, which must be held by both
+ // sides during this dance.
+ //
+ assert (n > 0 && n < sizeof (buf));
+ buf[n] = '\0';
+
+ // Note also that it's possible that by the time we read the pid/prio
+ // the lock has already been released. But this case is no different
+ // from the lock being released after we have read pid/prio but before
+ // acting on this information (e.g., trying to interrupt the other
+ // process), which we have to deal with anyway.
+ //
+ pid_t pid;
+ optional<uint64_t> prio;
+ {
+ char* p (strchr (buf, ' '));
+ char* e;
+
+ {
+ errno = 0;
+ pid = strtoll (buf, &e, 10); // Note: pid_t is signed.
+ assert (errno != ERANGE &&
+ e != buf &&
+ (p != nullptr ? e == p : *e == '\0'));
+ }
+
+ if (p != nullptr)
+ {
+ ++p;
+ errno = 0;
+ prio = strtoull (p, &e, 10);
+ assert (errno != ERANGE && e != p && *e == '\0');
+ }
+ }
+
+ return machine_lock (pid, prio);
+ }
throw_generic_error (errno);
}
@@ -444,7 +1004,7 @@ lock_machine (const dir_path& tp)
if (st1.st_ino == st2.st_ino)
return machine_lock (move (fp), move (fd));
- // Note: unlocked by close().
+ // Retry (note: lock is unlocked by auto_fd::close()).
}
}
@@ -461,375 +1021,1131 @@ snapshot_path (const dir_path& tp)
to_string (inst));
}
-// Return available machines, (re-)bootstrapping them if necessary.
+// Compare bbot and library versions returning -1 if older, 0 if the same,
+// and +1 if newer.
+//
+static int
+compare_bbot (const bootstrap_manifest& m)
+{
+ auto cmp = [&m] (const string& n, const char* v) -> int
+ {
+ standard_version sv (v);
+ auto i = m.versions.find (n);
+
+ return (i == m.versions.end () || i->second < sv
+ ? -1
+ : i->second > sv ? 1 : 0);
+ };
+
+ // Start from the top assuming a new dependency cannot be added without
+ // changing the dependent's version.
+ //
+ int r;
+ return (
+ (r = cmp ("bbot", BBOT_VERSION_STR)) != 0 ? r :
+ (r = cmp ("libbbot", LIBBBOT_VERSION_STR)) != 0 ? r :
+ (r = cmp ("libbpkg", LIBBPKG_VERSION_STR)) != 0 ? r :
+ (r = cmp ("libbutl", LIBBUTL_VERSION_STR)) != 0 ? r : 0);
+};
+
+// Return the global toolchain lock and the list of available machines,
+// (re-)bootstrapping them if necessary.
+//
+// Note that this function returns both machines that this process managed to
+// lock as well as the machines locked by other processes (including those
+// that are being bootstrapped or that have been suspended), in case the
+// caller needs to interrupt one of them for a higher-priority task. In the
+// latter case, the manifest is empty if the machine is bootstrapping or
+// suspended and only has the machine_manifest information otherwise. (The
+// bootstrapping/suspended machines have to be returned to get the correct
+// count of currently active instances for the inst_max comparison.)
+//
+// Note that both build and auxiliary machines are returned. For auxiliary,
+// toolchain and bootstrap manifests are unused and therefore always empty.
//
struct bootstrapped_machine
{
- dir_path path;
- bootstrapped_machine_manifest manifest;
machine_lock lock;
+ const dir_path path;
+ bootstrapped_machine_manifest manifest;
};
using bootstrapped_machines = vector<bootstrapped_machine>;
-static bootstrapped_machines
+static pair<toolchain_lock, bootstrapped_machines>
enumerate_machines (const dir_path& machines)
try
{
tracer trace ("enumerate_machines", machines.string ().c_str ());
- bootstrapped_machines r;
-
- if (ops.fake_machine_specified ())
+ for (;;) // From-scratch retry loop for after bootstrap (see below).
{
- auto mh (
- parse_manifest<machine_header_manifest> (
- ops.fake_machine (), "machine header"));
-
- r.push_back (
- bootstrapped_machine {
- dir_path (ops.machines ()) /= mh.name, // For diagnostics.
- bootstrapped_machine_manifest {
- machine_manifest {
- move (mh.id),
- move (mh.name),
- move (mh.summary),
- machine_type::kvm,
- string ("de:ad:be:ef:de:ad"),
- nullopt,
- strings ()},
- toolchain_manifest {tc_id},
- bootstrap_manifest {}},
- machine_lock ()});
-
- return r;
- }
-
- // Notice and warn if there are no machines (as opposed to all of them being
- // locked).
- //
- bool none (true);
+ pair<toolchain_lock, bootstrapped_machines> pr;
- // The first level are machine volumes.
- //
- for (const dir_entry& ve: dir_iterator (machines,
- false /* ignore_dangling */))
- {
- const string vn (ve.path ().string ());
+ {
+ optional<toolchain_lock> l;
+ while (!(l = lock_toolchain (60 /* seconds */)))
+ {
+ // One typical situation where this can happen is when another agent
+ // takes a while to request a task (e.g., due to network issues). So
+ // this is an info as opposed to a warning.
+ //
+ info << "unable to acquire global toolchain lock " << tc_lock
+ << " for 60s";
+ }
+ pr.first = move (*l);
+ }
- // Ignore hidden directories.
- //
- if (ve.type () != entry_type::directory || vn[0] == '.')
- continue;
+ toolchain_lock& tl (pr.first);
+ bootstrapped_machines& r (pr.second);
- const dir_path vd (dir_path (machines) /= vn);
+ if (ops.fake_machine_specified ())
+ {
+ auto mh (
+ parse_manifest<machine_header_manifest> (
+ ops.fake_machine (), "machine header"));
+
+ r.push_back (
+ bootstrapped_machine {
+ machine_lock (path (), nullfd), // Fake lock.
+ dir_path (ops.machines ()) /= mh.name, // For diagnostics.
+ bootstrapped_machine_manifest {
+ machine_manifest {
+ move (mh.id),
+ move (mh.name),
+ move (mh.summary),
+ machine_type::kvm,
+ string ("de:ad:be:ef:de:ad"),
+ nullopt,
+ strings ()},
+ toolchain_manifest {tc_id},
+ bootstrap_manifest {}}});
+
+ return pr;
+ }
- // Inside we have machines.
+ // Notice and warn if there are no build machines (as opposed to all of
+ // them being busy).
//
- try
+ bool none (true);
+
+ // We used to (re)-bootstrap machines as we are iterating. But with the
+ // introduction of the priority monitoring functionality we need to
+ // respect the --instance-max value. Which means we first need to try to
+ // lock all the machines in order to determine how many of them are busy
+ // then check this count against --instance-max, and only bootstrap if we
+ // are not over the limit. Which means we have to store all the
+ // information about a (first) machine that needs bootstrapping until
+ // after we have enumerated all of them.
+ //
+ struct pending_bootstrap
{
- for (const dir_entry& me: dir_iterator (vd, false /* ignore_dangling */))
- {
- const string mn (me.path ().string ());
+ machine_lock ml;
+ dir_path tp; // -<toolchain>
+ dir_path xp; // -<toolchain>-<xxx>
+ machine_manifest mm;
+ optional<bootstrapped_machine_manifest> bmm;
+ };
+ optional<pending_bootstrap> pboot;
- if (me.type () != entry_type::directory || mn[0] == '.')
- continue;
+ // The first level are machine volumes.
+ //
+ for (const dir_entry& ve: dir_iterator (machines, dir_iterator::no_follow))
+ {
+ const string vn (ve.path ().string ());
- const dir_path md (dir_path (vd) /= mn);
+ // Ignore hidden directories.
+ //
+ if (ve.type () != entry_type::directory || vn[0] == '.')
+ continue;
- // Our endgoal here is to obtain a bootstrapped snapshot of this
- // machine while watching out for potential race conditions (other
- // instances as well as machines being added/upgraded/removed; see the
- // manual for details).
- //
- // So here is our overall plan:
- //
- // 1. Resolve current subvolume link for our bootstrap protocol.
- //
- // 2. Lock the machine. This excludes any other instance from trying
- // to perform the following steps.
- //
- // 3. If there is no link, cleanup old bootstrap (if any) and ignore
- // this machine.
- //
- // 4. Try to create a snapshot of current subvolume (this operation is
- // atomic). If failed (e.g., someone changed the link and removed
- // the subvolume in the meantime), retry from #1.
- //
- // 5. Compare the snapshot to the already bootstrapped version (if
- // any) and see if we need to re-bootstrap. If so, use the snapshot
- // as a starting point. Rename to bootstrapped at the end (atomic).
- //
- dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
- dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
+ const dir_path vd (dir_path (machines) /= vn);
- auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
+ // Inside we have machines.
+ //
+ try
+ {
+ for (const dir_entry& me: dir_iterator (vd, dir_iterator::no_follow))
{
- run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
- run_btrfs (trace, "subvolume", "delete", tp);
- };
+ const string mn (me.path ().string ());
- for (size_t retry (0);; ++retry)
- {
- if (retry != 0)
- sleep (1);
+ if (me.type () != entry_type::directory || mn[0] == '.')
+ continue;
- // Resolve the link to subvolume path.
+ const dir_path md (dir_path (vd) /= mn);
+
+ // Our endgoal here is to obtain a bootstrapped snapshot of this
+ // machine while watching out for potential race conditions (other
+ // instances as well as machines being added/upgraded/removed; see
+ // the manual for details).
+ //
+ // So here is our overall plan:
//
- dir_path sp; // <name>-<P>.<R>
+ // 1. Resolve current subvolume link for our bootstrap protocol.
+ //
+ // 2. Lock the machine. This excludes any other instance from trying
+ // to perform the following steps.
+ //
+ // 3. If there is no link, cleanup old bootstrap (if any) and ignore
+ // this machine.
+ //
+ // 4. Try to create a snapshot of current subvolume (this operation
+ // is atomic). If failed (e.g., someone changed the link and
+ // removed the subvolume in the meantime), retry from #1.
+ //
+ // 5. Compare the snapshot to the already bootstrapped version (if
+ // any) and see if we need to re-bootstrap. If so, use the
+ // snapshot as a starting point. Rename to bootstrapped at the
+ // end (atomic).
+ //
+ dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
+ dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
- try
+ auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
{
- sp = path_cast<dir_path> (readsymlink (lp));
+ run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
+ run_btrfs (trace, "subvolume", "delete", tp);
+ };
- if (sp.relative ())
- sp = md / sp;
- }
- catch (const system_error& e)
+ for (size_t retry (0);; ++retry)
{
- // Leave the subvolume path empty if the subvolume link doesn't
- // exist and fail on any other error.
+ if (retry != 0)
+ sleep (1);
+
+ // Resolve the link to subvolume path.
//
- if (e.code ().category () != std::generic_category () ||
- e.code ().value () != ENOENT)
- fail << "unable to read subvolume link " << lp << ": " << e;
- }
+ dir_path sp; // <name>-<P>.<R>
- none = none && sp.empty ();
+ try
+ {
+ sp = path_cast<dir_path> (readsymlink (lp));
- // Try to lock the machine, skipping it if already locked.
- //
- optional<machine_lock> ml (lock_machine (tp));
+ if (sp.relative ())
+ sp = md / sp;
+ }
+ catch (const system_error& e)
+ {
+ // Leave the subvolume path empty if the subvolume link doesn't
+ // exist and fail on any other error.
+ //
+ if (e.code ().category () != std::generic_category () ||
+ e.code ().value () != ENOENT)
+ fail << "unable to read subvolume link " << lp << ": " << e;
+ }
- if (!ml)
- {
- l4 ([&]{trace << "skipping " << md << ": locked";});
- break;
- }
+ // Try to lock the machine.
+ //
+ machine_lock ml (lock_machine (tl, tp));
- bool te (dir_exists (tp));
+ if (!ml.locked ())
+ {
+ machine_manifest mm;
+ if (ml.prio)
+ {
+ // Get the machine manifest (subset of the steps performed for
+ // the locked case below).
+ //
+ // Note that it's possible the machine we get is not what was
+ // originally locked by the other process (e.g., it has been
+ // upgraded since). It's also possible that if and when we
+ // interrupt and lock this machine, it will be a different
+ // machine (e.g., it has been upgraded since we read this
+ // machine manifest). To deal with all of that we will be
+ // reloading this information if/when we acquire the lock to
+ // this machine.
+ //
+ if (sp.empty ())
+ {
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
+
+ l3 ([&]{trace << "keeping " << md << ": locked by " << ml.pid
+ << " with priority " << *ml.prio;});
+
+ mm = parse_manifest<machine_manifest> (
+ sp / "manifest", "machine");
+
+ none = none && mm.effective_role () == machine_role::auxiliary;
+ }
+ else // Bootstrapping/suspended.
+ {
+ l3 ([&]{trace << "keeping " << md << ": being bootstrapped "
+ << "or suspened by " << ml.pid;});
- // If the resolution fails, then this means there is no current
- // machine subvolume (for this bootstrap protocol). In this case we
- // clean up our toolchain subvolume (-<toolchain>, if any) and
- // ignore this machine.
- //
- if (sp.empty ())
- {
- if (te)
- delete_bootstrapped ();
+ // Assume it is a build machine (we cannot determine whether
+ // it is build or auxiliary without loading its manifest).
+ //
+ none = false;
+ }
- l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
- break;
- }
+ // Add the machine to the lists and bail out.
+ //
+ r.push_back (bootstrapped_machine {
+ move (ml),
+ move (tp),
+ bootstrapped_machine_manifest {move (mm), {}, {}}});
- // <name>-<toolchain>-<xxx>
- //
- const dir_path xp (snapshot_path (tp));
+ break;
+ }
- if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
- {
- if (retry >= 10)
- fail << "unable to snapshot subvolume " << sp;
+ bool te (dir_exists (tp));
- continue;
- }
+ // If the resolution fails, then this means there is no current
+ // machine subvolume (for this bootstrap protocol). In this case
+ // we clean up our toolchain subvolume (-<toolchain>, if any) and
+ // ignore this machine.
+ //
+ if (sp.empty ())
+ {
+ if (te)
+ delete_bootstrapped ();
- // Load the (original) machine manifest.
- //
- auto mm (
- parse_manifest<machine_manifest> (sp / "manifest", "machine"));
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
- // If we already have <name>-<toolchain>, see if it needs to be re-
- // bootstrapped. Things that render it obsolete:
- //
- // 1. New machine revision (compare machine ids).
- // 2. New toolchain (compare toolchain ids).
- // 3. New bbot/libbbot (compare versions).
- //
- // The last case has a complication: what should we do if we have
- // bootstrapped a newer version of bbot? This would mean that we are
- // about to be stopped and upgraded (and the upgraded version will
- // probably be able to use the result). So we simply ignore this
- // machine for this run.
+ // <name>-<toolchain>-<xxx>
+ //
+ dir_path xp (snapshot_path (tp));
- // Return -1 if older, 0 if the same, and +1 if newer.
- //
- auto compare_bbot = [] (const bootstrap_manifest& m) -> int
- {
- auto cmp = [&m] (const string& n, const char* v) -> int
+ if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
{
- standard_version sv (v);
- auto i = m.versions.find (n);
+ if (retry >= 10)
+ fail << "unable to snapshot subvolume " << sp;
- return (i == m.versions.end () || i->second < sv
- ? -1
- : i->second > sv ? 1 : 0);
- };
+ continue;
+ }
- // Start from the top assuming a new dependency cannot be added
- // without changing the dependent's version.
+ // Load the (original) machine manifest.
//
- int r;
- return
- (r = cmp ("bbot", BBOT_VERSION_STR)) != 0 ? r :
- (r = cmp ("libbbot", LIBBBOT_VERSION_STR)) != 0 ? r :
- (r = cmp ("libbpkg", LIBBPKG_VERSION_STR)) != 0 ? r :
- (r = cmp ("libbutl", LIBBUTL_VERSION_STR)) != 0 ? r : 0;
- };
+ machine_manifest mm (
+ parse_manifest<machine_manifest> (sp / "manifest", "machine"));
- optional<bootstrapped_machine_manifest> bmm;
- if (te)
- {
- bmm = parse_manifest<bootstrapped_machine_manifest> (
- tp / "manifest", "bootstrapped machine");
+ bool aux (mm.effective_role () == machine_role::auxiliary);
- if (bmm->machine.id != mm.id)
+ // Skip machines for which we don't have sufficient RAM.
+ //
+ if (effective_ram_minimum (mm) >
+ (aux ? ops.auxiliary_ram () : ops.build_ram ()))
{
- l3 ([&]{trace << "re-bootstrapping " << tp << ": new machine";});
- te = false;
+ l3 ([&]{trace << "skipping " << md << ": insufficient RAM";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
}
- if (!tc_id.empty () && bmm->toolchain.id != tc_id)
- {
- l3 ([&]{trace << "re-bootstrapping " << tp << ": new toolchain";});
- te = false;
- }
+ none = none && aux;
- if (int i = compare_bbot (bmm->bootstrap))
+ // If we already have <name>-<toolchain>, see if it needs to be
+ // re-bootstrapped. Things that render it obsolete:
+ //
+ // 1. New machine revision (compare machine ids).
+ // 2. New toolchain (compare toolchain ids, not auxiliary).
+ // 3. New bbot/libbbot (compare versions, not auxiliary).
+ //
+ // The last case has a complication: what should we do if we have
+ // bootstrapped a newer version of bbot? This would mean that we
+ // are about to be stopped and upgraded (and the upgraded version
+ // will probably be able to use the result). So we simply ignore
+ // this machine for this run.
+ //
+ // Note: see similar code in the machine interruption logic.
+ //
+ optional<bootstrapped_machine_manifest> bmm;
+ if (te)
{
- if (i < 0)
+ bmm = parse_manifest<bootstrapped_machine_manifest> (
+ tp / "manifest", "bootstrapped machine");
+
+ if (bmm->machine.id != mm.id)
{
- l3 ([&]{trace << "re-bootstrapping " << tp << ": new bbot";});
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
te = false;
}
- else
+
+ if (!aux)
{
- l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
- run_btrfs (trace, "subvolume", "delete", xp);
- break;
+ if (!tc_id.empty () && bmm->toolchain.id != tc_id)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
+ te = false;
+ }
+
+ if (int i = compare_bbot (bmm->bootstrap))
+ {
+ if (i < 0)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ te = false;
+ }
+ else
+ {
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
+ }
+ }
}
+
+ if (!te)
+ delete_bootstrapped ();
}
+ else
+ l3 ([&]{trace << "bootstrap " << tp;});
if (!te)
- delete_bootstrapped ();
- }
- else
- l3 ([&]{trace << "bootstrapping " << tp;});
+ {
+ // Ignore any other machines that need bootstrapping.
+ //
+ if (!pboot)
+ {
+ pboot = pending_bootstrap {
+ move (ml), move (tp), move (xp), move (mm), move (bmm)};
+ }
+ else
+ run_btrfs (trace, "subvolume", "delete", xp);
- if (!te)
- {
- // Use the <name>-<toolchain>-<xxx> snapshot that we have made to
- // bootstrap the new machine. Then atomically rename it to
- // <name>-<toolchain>.
- //
- // Also release all the machine locks that we have acquired so far
- // since the bootstrap will take a while and other instances might
- // be able to use them.
+ break;
+ }
+ else
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ // Add the machine to the lists.
//
- r.clear ();
+ r.push_back (
+ bootstrapped_machine {move (ml), move (tp), move (*bmm)});
- bmm = bootstrap_machine (xp, mm, move (bmm));
+ break;
+ } // Retry loop.
+ } // Inner dir_iterator loop.
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << vd << ": " << e;
+ }
+ } // Outer dir_iterator loop.
- if (!bmm)
- {
- l3 ([&]{trace << "ignoring " << tp << ": failed to bootstrap";});
- run_btrfs (trace, "subvolume", "delete", xp);
- break;
- }
+ // See if there is a pending bootstrap and whether we can perform it.
+ //
+ // What should we do if we can't (i.e., we are in the priority monitor
+ // mode)? Well, we could have found some machines that are already
+ // bootstrapped (busy or not) and there may be a higher-priority task for
+ // one of them, so it feels natural to return whatever we've got.
+ //
+ if (pboot)
+ {
+ dir_path& tp (pboot->tp);
+ dir_path& xp (pboot->xp);
- try
- {
- mvdir (xp, tp);
- }
- catch (const system_error& e)
- {
- fail << "unable to rename " << xp << " to " << tp;
- }
+ // Determine how many machines are busy (locked by other processes) and
+ // make sure it's below the --instance-max limit, if specified.
+ //
+ // We should only count build machines unless being bootstrapped (see
+ // above).
+ //
+ if (inst_max != 0)
+ {
+ size_t busy (0);
+ for (const bootstrapped_machine& m: r)
+ {
+ if (!m.lock.locked () &&
+ (!m.lock.prio ||
+ m.manifest.machine.effective_role () != machine_role::auxiliary))
+ ++busy;
+ }
- l2 ([&]{trace << "bootstrapped " << bmm->machine.name;});
+ assert (busy <= inst_max);
- // Check the bootstrapped bbot version as above and ignore this
- // machine if it's newer than us.
- //
- if (int i = compare_bbot (bmm->bootstrap))
- {
- if (i > 0)
- {
- l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
- break;
- }
- else
- warn << "bootstrapped " << tp << " bbot worker is older "
- << "than agent; assuming test setup";
- }
- }
+ if (busy == inst_max)
+ {
+ l3 ([&]{trace << "instance max reached attempting to bootstrap "
+ << tp;});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ return pr;
+ }
+ }
+
+ machine_lock& ml (pboot->ml);
+
+ l3 ([&]{trace << "bootstrapping " << tp;});
+
+ // Use the -<toolchain>-<xxx> snapshot that we have made to bootstrap
+ // the new machine. Then atomically rename it to -<toolchain>.
+ //
+ // Also release all the machine locks that we have acquired so far as
+ // well as the global toolchain lock, since the bootstrap will take a
+ // while and other instances might be able to use them. Because we are
+ // releasing the global lock, we have to restart the enumeration process
+ // from scratch.
+ //
+ r.clear ();
+ ml.bootstrap (tl);
+ tl.unlock ();
+
+ bool aux (pboot->mm.effective_role () == machine_role::auxiliary);
+
+ optional<bootstrapped_machine_manifest> bmm (
+ aux
+ ? bootstrap_auxiliary_machine (xp, pboot->mm, move (pboot->bmm))
+ : bootstrap_build_machine (xp, pboot->mm, move (pboot->bmm)));
+
+ if (!bmm)
+ {
+ l3 ([&]{trace << "ignoring " << tp << ": failed to bootstrap";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ continue;
+ }
+
+ try
+ {
+ mvdir (xp, tp);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to rename " << xp << " to " << tp;
+ }
+
+ l2 ([&]{trace << "bootstrapped " << bmm->machine.name;});
+
+ // Check the bootstrapped bbot version as above and ignore this build
+ // machine if it's newer than us.
+ //
+ if (!aux)
+ {
+ if (int i = compare_bbot (bmm->bootstrap))
+ {
+ if (i > 0)
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
else
- run_btrfs (trace, "subvolume", "delete", xp);
+ warn << "bootstrapped " << tp << " bbot worker is older "
+ << "than agent; assuming test setup";
+ }
+ }
- // Add the machine to the lists.
- //
- r.push_back (
- bootstrapped_machine {move (tp), move (*bmm), move (*ml)});
+ continue; // Re-enumerate from scratch.
+ }
+
+ if (none)
+ warn << "no build machines for toolchain " << tc_name;
+
+ return pr;
+
+ } // From-scratch retry loop.
+
+ // Unreachable.
+}
+catch (const system_error& e)
+{
+ fail << "unable to iterate over " << machines << ": " << e << endf;
+}
+
+// Perform the build task throwing interrupt if it has been interrupted.
+//
+struct interrupt {};
+
+// Start an auxiliary machine (steps 1-3 described in perfrom_task() below).
+//
+// Note that if the returned machine is NULL, then it means it has failed to
+// start up (in which case the diagnostics has already been issued and
+// snapshot cleaned up).
+//
+// Note: can throw interrupt.
+//
+struct auxiliary_machine_result
+{
+ dir_path snapshot;
+ unique_ptr<bbot::machine> machine;
+};
+
+using auxiliary_machine_results = vector<auxiliary_machine_result>;
+
+static pair<auxiliary_machine_result, string /* environment */>
+start_auxiliary_machine (bootstrapped_machine& am,
+ const string& env_name,
+ uint16_t machine_num,
+ size_t ram,
+ const string& in_name, // <toolchain>-<instance>
+ const dir_path& tftp_put_dir,
+ optional<size_t> boost_cpus)
+try
+{
+ tracer trace ("start_auxiliary_machine", am.path.string ().c_str ());
+ // NOTE: a simplified version of perform_task() below.
+
+ machine_lock& ml (am.lock);
+ const dir_path& md (am.path);
+ const bootstrapped_machine_manifest& mm (am.manifest);
+
+ path ef (tftp_put_dir / "environment"); // Environment upload file.
+ path efm (ef + '-' + mm.machine.name); // Environment upload saved file.
+ try_rmfile (ef);
+ try_rmfile (efm);
+
+ // <name>-<toolchain>-<xxx>
+ //
+ const dir_path xp (snapshot_path (md));
+
+ for (size_t retry (0);; ++retry)
+ {
+ if (retry != 0)
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ run_btrfs (trace, "subvolume", "snapshot", md, xp);
+
+ // Start the TFTP server. Map:
+ //
+ // GET requests to /dev/null
+ // PUT requests to .../build/<toolchain>-<instance>/put/*
+ //
+ // Note that we only need to run the TFTP server until we get the
+ // environment upload. Which means we could have reused the same port as
+ // the build machine. But let's keep things parallel to the VNC ports and
+ // use a seperate TFTP port for each auxiliary machine.
+ //
+ tftp_server tftpd ("Gr ^/?(.+)$ " + string ("/dev/null") + '\n' +
+ "Pr ^/?(.+)$ /build/" + in_name + "/put/\\1\n",
+ ops.tftp_port () + offset + machine_num);
+
+ l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
+
+ // Note: the machine handling logic is similar to bootstrap. Except here
+ // we have to cleanup the snapshot ourselves in case of suspension or
+ // unexpected exit.
+
+ // Start the machine.
+ //
+ // Note that for now we don't support logging in into auxiliary machines
+ // in the interactive mode. Maybe one day.
+ //
+ unique_ptr<machine> m (
+ start_machine (xp,
+ mm.machine,
+ machine_num,
+ boost_cpus ? *boost_cpus : ops.cpu (),
+ ram,
+ mm.machine.mac,
+ ops.bridge (),
+ tftpd.port (),
+ false /* public_vnc */));
+
+ auto mg (
+ make_exception_guard (
+ [&m, &xp] ()
+ {
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << xp << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
+ }));
+
+ auto soft_fail = [&trace, &ml, &xp, &m] (const char* msg)
+ {
+ {
+ diag_record dr (error);
+ dr << msg << " for machine " << xp << ", suspending";
+ m->print_info (dr);
+ }
+
+ try
+ {
+ // Update the information in the machine lock to signal that the
+ // machine is suspended and cannot be interrupted.
+ //
+ ml.suspend_task ();
+
+ m->suspend (false);
+ m->wait (false);
+ m->cleanup ();
+ run_btrfs (trace, "subvolume", "delete", xp);
+ info << "resuming after machine suspension";
+ }
+ catch (const failed&) {}
+
+ return make_pair (auxiliary_machine_result {move (xp), nullptr},
+ string ());
+ };
+
+ auto check_machine = [&xp, &m] ()
+ {
+ try
+ {
+ size_t t (0);
+ if (!m->wait (t /* seconds */, false /* fail_hard */))
+ return true;
+ }
+ catch (const failed&) {}
+
+ diag_record dr (warn);
+ dr << "machine " << xp << " exited unexpectedly";
+ m->print_info (dr);
+
+ return false;
+ };
+
+ auto check_interrupt = [&trace, &xp, &m] ()
+ {
+ if (sigurs1.load (std::memory_order_consume) == 0)
+ return;
+
+ l2 ([&]{trace << "machine " << xp << " interruped";});
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ throw interrupt ();
+ };
+
+ // Wait for up to 4 minutes (by default) for the environment upload (the
+ // same logic as in bootstrap_auxiliary_machine() except here the machine
+ // cannot just exit).
+ //
+ size_t to;
+ const size_t startup_to (ops.build_startup ());
+
+ for (to = startup_to; to != 0; )
+ {
+ check_interrupt ();
+
+ if (tftpd.serve (to, 2))
+ continue;
+
+ if (!check_machine ())
+ {
+ // An auxiliary machine should not just exit.
+ //
+ return make_pair (auxiliary_machine_result {move (xp), nullptr},
+ string ());
+ }
+
+ if (file_not_empty (ef))
+ {
+ if (!tftpd.serve (to, 5))
break;
+ }
+ }
+
+ if (to == 0)
+ {
+ if (retry > ops.build_retries ())
+ return soft_fail ("build startup timeout");
+
+ // Note: keeping the logs behind (no cleanup).
+
+ diag_record dr (warn);
+ dr << "machine " << mm.machine.name << " mis-booted, retrying";
+ m->print_info (dr);
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
+ continue;
+ }
+
+ l3 ([&]{trace << "completed startup in " << startup_to - to << "s";});
+
+ // Read the uploaded environment and, if necessary, append the name prefix
+ // (which we first make a valid C identifier and uppercase).
+ //
+ // Note that it may seem like a good idea to validate the format here.
+ // But that means we will essentially need to parse it twice (here and in
+ // worker). Plus, in worker we can comminucate some diagnostics by writing
+ // it to the build log (here all we can easily do is abort the task). So
+ // here we just append the name prefix to trimmed non-blank/comment lines.
+ //
+ string env_pfx (env_name.empty ()
+ ? string ()
+ : ucase (sanitize_identifier (env_name)) + '_');
+ string env;
+ try
+ {
+ ifdstream is (ef, ifdstream::badbit);
+ for (string l; !eof (getline (is, l)); )
+ {
+ trim (l);
+
+ if (!env_pfx.empty () && !l.empty () && l.front () != '#')
+ l.insert (0, env_pfx);
+
+ env += l; env += '\n';
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << ef << ": " << e;
+ }
+
+ // Rename and keep the environment file for debugging (it will be removed
+ // at the end as part of the tftp_put_dir cleanup).
+ //
+ mvfile (ef, efm);
+
+ return make_pair (auxiliary_machine_result {move (xp), move (m)},
+ move (env));
+ }
+
+ // Unreachable.
+}
+catch (const system_error& e)
+{
+ fail << "auxiliary machine startup error: " << e << endf;
+}
+
+// Divide the auxiliary RAM among the specified machines.
+//
+// Issue diagnostics and return empty vector if the auxiliary RAM is
+// insufficient.
+//
+static vector<size_t> // Parallel to mms.
+divide_auxiliary_ram (const vector<const machine_header_manifest*>& mms)
+{
+ size_t ram (ops.auxiliary_ram ());
+
+ vector<size_t> rams;
+ vector<size_t> rnds; // Allocation rounds (see below).
+
+ // First pass: allocate the minimums.
+ //
+ for (const machine_header_manifest* mm: mms)
+ {
+ size_t v (effective_ram_minimum (*mm));
+
+ assert (!mm->ram_maximum || v <= *mm->ram_maximum); // Sanity check.
+
+ rams.push_back (v);
+ rnds.push_back (0);
+
+ if (ram >= v)
+ ram -= v;
+ else
+ {
+ diag_record dr (error);
+ dr << "insufficient auxiliary RAM " << ops.auxiliary_ram () << "KiB";
+
+ for (size_t i (0); i != rams.size (); ++i)
+ dr << info << mms[i]->name << " requires minimum " << rams[i] << "KiB";
+
+ return {};
+ }
+ }
+
+ // Second pass: distribute the remaining RAM.
+ //
+ // We are going to do it in the ram_minimum increments to avoid ending up
+ // with odd amounts (while Linux can probably grok anything, who knows about
+ // Windows).
+ //
+ // To make the distribution fair we are going to count how many times we
+ // have increased each machine's allocation (the rnds vector).
+ //
+ for (size_t a (1); ram != 0; ) // Allocation round.
+ {
+ // Find a machine that would be satisfied with the least amount of RAM but
+ // which hasn't yet been given anything on this allocation round.
+ //
+ size_t min_i; // Min index.
+ size_t min_v (0); // Min value.
+
+ // We are done if we couldn't give out any RAM and haven't seen any
+ // machines that have already been given something on this allocation
+ // round.
+ //
+ bool done (true);
+
+ for (size_t i (0); i != rams.size (); ++i)
+ {
+ if (rnds[i] != a)
+ {
+ const machine_header_manifest& mm (*mms[i]);
+
+ size_t o (rams[i]);
+ size_t v (effective_ram_minimum (mm));
+
+ // Don't allocate past maximum.
+ //
+ if (mm.ram_maximum && *mm.ram_maximum < o + v)
+ {
+ v = *mm.ram_maximum - o;
+
+ if (v == 0)
+ continue;
+ }
+
+ if (v <= ram && (min_v == 0 || min_v > v))
+ {
+ min_i = i;
+ min_v = v;
}
}
+ else
+ done = false;
}
- catch (const system_error& e)
+
+ if (min_v != 0)
{
- fail << "unable to iterate over " << vd << ": " << e;
+ rnds[min_i] = a;
+ rams[min_i] += min_v;
+ ram -= min_v;
+ }
+ else
+ {
+ if (done)
+ break;
+
+ ++a; // Next allocation round.
}
}
- if (none)
- warn << "no build machines for toolchain " << tc_name;
+ return rams;
+}
- return r;
+// Stop all the auxiliary machines and clear the passed list.
+//
+static void
+stop_auxiliary_machines (auxiliary_machine_results& amrs)
+{
+ tracer trace ("stop_auxiliary_machines");
+
+ if (!amrs.empty ())
+ {
+ // Do it in two passes to make sure all the machines are at least down.
+ //
+ for (const auxiliary_machine_result& amr: amrs)
+ {
+ if (amr.machine != nullptr)
+ {
+ try {amr.machine->forcedown (false);} catch (const failed&) {}
+ }
+ }
+
+ // Make sure we don't retry the above even if the below fails.
+ //
+ auxiliary_machine_results tmp;
+ tmp.swap (amrs);
+
+ for (const auxiliary_machine_result& amr: tmp)
+ {
+ if (amr.machine != nullptr)
+ {
+ amr.machine->cleanup ();
+ run_btrfs (trace, "subvolume", "delete", amr.snapshot);
+ }
+ }
+ }
}
-catch (const system_error& e)
+
+// Start all the auxiliary machines and patch in their combined environment
+// into tm.auxiliary_environment.
+//
+// Return the started machines or empty list if any of them failed to start up
+// (which means this function should only be called for non-empty ams).
+//
+// Note that the order of auxiliary machines in ams may not match that in
+// tm.auxiliary_machines.
+//
+static auxiliary_machine_results
+start_auxiliary_machines (const vector<bootstrapped_machine*>& ams,
+ task_manifest& tm,
+ const string& in_name, // <toolchain>-<instance>
+ const dir_path& tftp_put_dir,
+ optional<size_t> boost_cpus)
{
- fail << "unable to iterate over " << machines << ": " << e << endf;
+ tracer trace ("start_auxiliary_machines");
+
+ size_t n (tm.auxiliary_machines.size ());
+
+ assert (n != 0 && ams.size () == n);
+
+ auxiliary_machine_results amrs;
+
+ // Divide the auxiliary RAM among the machines.
+ //
+ vector<size_t> rams;
+ {
+ vector<const machine_header_manifest*> mms;
+ mms.reserve (n);
+ for (bootstrapped_machine* am: ams)
+ mms.push_back (&am->manifest.machine);
+
+ rams = divide_auxiliary_ram (mms);
+ if (rams.empty ())
+ return amrs;
+
+ if (verb > 3) // l3
+ for (size_t i (0); i != n; ++i)
+ trace << mms[i]->name << " allocated " << rams[i] << "KiB";
+ }
+
+ // Start the machines.
+ //
+ // Let's use the order in which they were specified in the task manifest
+ // (which will naturally be the order in which they are specified in the
+ // package manifest). This way amrs and tm.auxiliary_machines will be
+ // parallel.
+ //
+ string envs; // Combined environments.
+
+ auto amg (
+ make_exception_guard (
+ [&amrs] ()
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs);
+ }
+ }));
+
+ for (size_t i (0); i != n; ++i)
+ {
+ const auxiliary_machine& tam (tm.auxiliary_machines[i]);
+
+ auto b (ams.begin ()), e (ams.end ());
+ auto j (find_if (b, e,
+ [&tam] (const bootstrapped_machine* m)
+ {
+ return m->manifest.machine.name == tam.name;
+ }));
+ assert (j != e);
+
+ // Note: can throw interrupt.
+ //
+ pair<auxiliary_machine_result, string> p (
+ start_auxiliary_machine (**j,
+ tam.environment_name,
+ i + 1,
+ rams[j - b], // Parallel to ams.
+ in_name,
+ tftp_put_dir,
+ boost_cpus));
+
+ if (p.first.machine == nullptr)
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs); // amrs is now empty.
+ }
+
+ return amrs;
+ }
+
+ amrs.push_back (move (p.first));
+
+ // Add the machine name as a header before its environment.
+ //
+ if (i != 0) envs += '\n';
+ envs += "# "; envs += tam.name; envs += '\n';
+ envs += "#\n";
+ envs += p.second; // Always includes trailing newline.
+ }
+
+ tm.auxiliary_environment = move (envs);
+
+ return amrs;
}
-static result_manifest
-perform_task (const dir_path& md,
- const bootstrapped_machine_manifest& mm,
- const task_manifest& tm)
+struct perform_task_result
+{
+ auto_rmdir work_dir; // <tftp>/build/<toolchain>-<instance>/
+ result_manifest manifest;
+
+ // Uploaded archive, if any (somewhere inside work_dir).
+ //
+ optional<path> upload_archive;
+
+ // Create the special empty result.
+ //
+ perform_task_result () = default;
+
+ // Create task result without build artifacts.
+ //
+ explicit
+ perform_task_result (auto_rmdir&& d, result_manifest&& m)
+ : work_dir (move (d)),
+ manifest (move (m)) {}
+
+ // Create task result with build artifacts.
+ //
+ perform_task_result (auto_rmdir&& d, result_manifest&& m, path&& a)
+ : work_dir (move (d)),
+ manifest (move (m)),
+ upload_archive (move (a)) {}
+};
+
+// Note that the task manifest is not const since we may need to patch in the
+// auxiliary_environment value.
+//
+static perform_task_result
+perform_task (toolchain_lock tl, // Note: assumes ownership.
+ bootstrapped_machine& bm, // Build machine.
+ const vector<bootstrapped_machine*>& ams, // Auxiliary machines.
+ task_manifest& tm,
+ optional<size_t> boost_cpus)
try
{
- tracer trace ("perform_task", md.string ().c_str ());
+ tracer trace ("perform_task", bm.path.string ().c_str ());
+
+ // Arm the interrupt handler and release the global toolchain lock.
+ //
+ // Note that there can be no interrupt while we are holding the global lock.
+ //
+ sigurs1.store (0, std::memory_order_release);
+ tl.unlock ();
+
+ machine_lock& ml (bm.lock);
+ const dir_path& md (bm.path);
+ const bootstrapped_machine_manifest& mm (bm.manifest);
+
+ const string in_name (tc_name + '-' + to_string (inst));
+ auto_rmdir arm ((dir_path (ops.tftp ()) /= "build") /= in_name);
+
+ try_mkdir_p (arm.path);
result_manifest r {
tm.name,
tm.version,
result_status::abort,
- operation_results {}};
+ operation_results {},
+ nullopt /* worker_checksum */,
+ nullopt /* dependency_checksum */};
if (ops.fake_build ())
- return r;
+ return perform_task_result (move (arm), move (r));
// The overall plan is as follows:
//
- // 1. Snapshot the (bootstrapped) machine.
+ // 1. Snapshot the (bootstrapped) build machine.
//
// 2. Save the task manifest to the TFTP directory (to be accessed by the
// worker).
//
// 3. Start the TFTP server and the machine.
//
- // 4. Serve TFTP requests while watching out for the result manifest.
+ // 4. Serve TFTP requests while watching out for the result manifest and
+ // interrupts.
//
// 5. Clean up (force the machine down and delete the snapshot).
//
+ // If the task requires any auxiliary machines, then for each such machine
+ // perform the following steps 1-3 before step 1 above, and step 4 after
+ // step 5 above (that is, start all the auxiliary machines before the build
+ // machine and clean them up after):
+ //
+ // 1. Snapshot the (bootstrapped) auxiliary machine.
+ //
+ // 2. Start the TFTP server and the machine.
+ //
+ // 3. Handle TFTP upload requests until received the environment upload.
+ //
+ // 4. Clean up (force the machine down and delete the snapshot).
// TFTP server mapping (server chroot is --tftp):
//
- // GET requests to .../build/<name>-<instance>/get/*
- // PUT requests to .../build/<name>-<instance>/put/*
+ // GET requests to .../build/<toolchain>-<instance>/get/*
+ // PUT requests to .../build/<toolchain>-<instance>/put/*
//
- const string in_name (tc_name + '-' + to_string (inst));
- auto_rmdir arm ((dir_path (ops.tftp ()) /= "build") /= in_name);
-
dir_path gd (dir_path (arm.path) /= "get");
dir_path pd (dir_path (arm.path) /= "put");
@@ -838,11 +2154,15 @@ try
path tf (gd / "task.manifest"); // Task manifest file.
path rf (pd / "result.manifest.lz4"); // Result manifest file.
-
- serialize_manifest (tm, tf, "task");
+ path af (pd / "upload.tar"); // Archive of build artifacts to upload.
if (ops.fake_machine_specified ())
{
+ // Note: not handling interrupts here. Nor starting any auxiliary
+ // machines, naturally.
+
+ serialize_manifest (tm, tf, "task");
+
// Simply wait for the file to appear.
//
for (size_t i (0);; sleep (1))
@@ -863,7 +2183,40 @@ try
}
else
{
+ // Start the auxiliary machines if any.
+ //
+ // If anything goes wrong, force them all down (failed that, the machine
+ // destructor will block waiting for their exit).
+ //
+ auxiliary_machine_results amrs;
+
+ auto amg (
+ make_exception_guard (
+ [&amrs] ()
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs);
+ }
+ }));
+
+ if (!ams.empty ())
+ {
+ amrs = start_auxiliary_machines (ams, tm, in_name, pd, boost_cpus);
+
+ if (amrs.empty ())
+ return perform_task_result (move (arm), move (r)); // Abort.
+ }
+
+ // Note: tm.auxiliary_environment patched in by start_auxiliary_machines().
+ //
+ serialize_manifest (tm, tf, "task");
+
+ // Start the build machine and perform the build.
+ //
try_rmfile (rf);
+ try_rmfile (af);
// <name>-<toolchain>-<xxx>
//
@@ -880,32 +2233,45 @@ try
//
tftp_server tftpd ("Gr ^/?(.+)$ /build/" + in_name + "/get/\\1\n" +
"Pr ^/?(.+)$ /build/" + in_name + "/put/\\1\n",
- ops.tftp_port () + offset);
+ ops.tftp_port () + offset + 0 /* build machine */);
l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
- // Start the machine.
+ // Note: the machine handling logic is similar to bootstrap. Except here
+ // we have to cleanup the snapshot ourselves in case of suspension or
+ // unexpected exit.
//
- unique_ptr<machine> m (
- start_machine (xp,
- mm.machine,
- mm.machine.mac,
- ops.bridge (),
- tftpd.port (),
- tm.interactive.has_value ()));
-
- // Note: the machine handling logic is similar to bootstrap.
+ // NOTE: see similar code in start_auxiliary_machine() above.
//
{
+ // Start the machine.
+ //
+ unique_ptr<machine> m (
+ start_machine (xp,
+ mm.machine,
+ 0 /* machine_num (build) */,
+ boost_cpus ? *boost_cpus : ops.cpu (),
+ ops.build_ram (),
+ mm.machine.mac,
+ ops.bridge (),
+ tftpd.port (),
+ tm.interactive.has_value () /* public_vnc */));
+
auto mg (
make_exception_guard (
[&m, &xp] ()
{
- info << "trying to force machine " << xp << " down";
- try {m->forcedown (false);} catch (const failed&) {}
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << xp << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
}));
- auto soft_fail = [&xp, &m, &r] (const char* msg)
+ auto soft_fail = [&trace,
+ &amrs,
+ &ml, &xp, &m,
+ &arm, &r] (const char* msg)
{
{
diag_record dr (error);
@@ -913,16 +2279,36 @@ try
m->print_info (dr);
}
+ // What should we do about auxiliary machines? We could force them
+ // all down before suspending (and thus freeing them for use). That
+ // is the easy option. We could suspend them as well, but that feels
+ // like it will be a pain (will need to resume all of them when done
+ // investigating). Theoretically we could just let them run, but
+ // that won't play well with our interrupt logic since someone may
+ // attempt to interrupt us via one of them. So let's do easy for
+ // now.
+ //
+ // Note: always stop/suspend the build machine before the auxiliary
+ // machines to avoid any errors due the auxiliary machines being
+ // unavailable.
try
{
+ // Update the information in the machine lock to signal that the
+ // machine is suspended and cannot be interrupted.
+ //
+ ml.suspend_task ();
+
m->suspend (false);
+ stop_auxiliary_machines (amrs);
m->wait (false);
m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
info << "resuming after machine suspension";
}
catch (const failed&) {}
- return r;
+ return perform_task_result (move (arm), move (r));
};
auto check_machine = [&xp, &m] ()
@@ -933,9 +2319,7 @@ try
if (!m->wait (t /* seconds */, false /* fail_hard */))
return true;
}
- catch (const failed&)
- {
- }
+ catch (const failed&) {}
diag_record dr (warn);
dr << "machine " << xp << " exited unexpectedly";
@@ -944,26 +2328,76 @@ try
return false;
};
+ auto check_auxiliary_machines = [&amrs] ()
+ {
+ for (auxiliary_machine_result& amr: amrs)
+ {
+ try
+ {
+ size_t t (0);
+ if (!amr.machine->wait (t /* seconds */, false /* fail_hard */))
+ continue;
+ }
+ catch (const failed&) {}
+
+ diag_record dr (warn);
+ dr << "machine " << amr.snapshot << " exited unexpectedly";
+ amr.machine->print_info (dr);
+
+ return false;
+ }
+
+ return true;
+ };
+
+ auto check_interrupt = [&trace, &amrs, &xp, &m] ()
+ {
+ if (sigurs1.load (std::memory_order_consume) == 0)
+ return;
+
+ l2 ([&]{trace << "machine " << xp << " interruped";});
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
+ m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ throw interrupt ();
+ };
+
// The first request should be the task manifest download. Wait for up
- // to 2 minutes for that to arrive (again, that long to deal with
- // flaky Windows networking). In a sense we use it as an indication
- // that the machine has booted and the worker process has started.
+ // to 4 minutes (by default) for that to arrive (again, that long to
+ // deal with flaky Windows networking, etc). In a sense we use it as
+ // an indication that the machine has booted and the worker process
+ // has started.
//
size_t to;
- const size_t startup_to (120);
+ const size_t startup_to (ops.build_startup ());
const size_t build_to (tm.interactive
? ops.intactive_timeout ()
: ops.build_timeout ());
- // Wait periodically making sure the machine is still alive.
+ // Wait periodically making sure the machine is still alive and
+ // checking for interrupts.
//
for (to = startup_to; to != 0; )
{
+ check_interrupt ();
+
if (tftpd.serve (to, 2))
break;
- if (!check_machine ())
- return r;
+ bool bm; // Build machine still running.
+ if (!(bm = check_machine ()) || !check_auxiliary_machines ())
+ {
+ if (bm)
+ try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
+ return perform_task_result (move (arm), move (r));
+ }
}
if (to == 0)
@@ -978,26 +2412,38 @@ try
m->print_info (dr);
try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
continue;
}
l3 ([&]{trace << "completed startup in " << startup_to - to << "s";});
- // Next the worker builds things and then uploads the result manifest.
- // So on our side we serve TFTP requests while checking for the
- // manifest file. To workaround some obscure filesystem races (the
- // file's mtime/size is updated several seconds later; maybe tmpfs
- // issue?), we periodically re-check.
+ // Next the worker builds things and then uploads optional archive of
+ // build artifacts and the result manifest afterwards. So on our side
+ // we serve TFTP requests while checking for the manifest file. To
+ // workaround some obscure filesystem races (the file's mtime/size is
+ // updated several seconds later; maybe tmpfs issue?), we periodically
+ // re-check.
//
for (to = build_to; to != 0; )
{
+ check_interrupt ();
+
if (tftpd.serve (to, 2))
continue;
- if (!check_machine ())
+ bool bm; // Build machine still running.
+ if (!(bm = check_machine ()) || !check_auxiliary_machines ())
{
- if (!file_not_empty (rf))
- return r;
+ if (bm || !file_not_empty (rf))
+ {
+ if (bm)
+ try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
+ return perform_task_result (move (arm), move (r));
+ }
}
if (file_not_empty (rf))
@@ -1050,7 +2496,9 @@ try
// lease instead of a new one.
//
try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
}
}
@@ -1059,7 +2507,7 @@ try
}
}
- // Update package name/version if the returned value as "unknown".
+ // Update package name/version if the returned value is "unknown".
//
if (r.version == bpkg::version ("0"))
{
@@ -1069,23 +2517,16 @@ try
r.version = tm.version;
}
- return r;
+ return (!r.status || !file_exists (af)
+ ? perform_task_result (move (arm), move (r))
+ : perform_task_result (move (arm), move (r), move (af)));
}
catch (const system_error& e)
{
fail << "build error: " << e << endf;
}
-extern "C" void
-handle_signal (int sig)
-{
- switch (sig)
- {
- case SIGHUP: exit (3); // Unimplemented feature.
- case SIGTERM: exit (0);
- default: assert (false);
- }
-}
+static const string agent_checksum ("2"); // Logic version.
int
main (int argc, char* argv[])
@@ -1096,6 +2537,21 @@ try
verb = ops.verbose ();
+#if 0
+ // ./bbot-agent --auxiliary-ram 4194304
+ //
+ machine_header_manifest m1 {
+ "m1", "m1", "m1", machine_role::auxiliary, 512*1024, nullopt};
+ machine_header_manifest m2 {
+ "m2", "m2", "m2", machine_role::auxiliary, 1024*1024, 3*512*1024};
+ vector<const machine_header_manifest*> mms {&m1, &m2};
+ vector<size_t> rams (divide_auxiliary_ram (mms));
+ for (size_t i (0); i != rams.size (); ++i)
+ text << mms[i]->name << ' ' << rams[i] / 1024;
+
+ return 0;
+#endif
+
// @@ systemd 231 added JOURNAL_STREAM environment variable which allows
// detecting if stderr is connected to the journal.
//
@@ -1175,7 +2631,7 @@ try
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
<< "Copyright (c) " << BBOT_COPYRIGHT << "." << endl
- << "TBC; All rights reserved" << endl;
+ << "This is free software released under the MIT license." << endl;
return 0;
}
@@ -1194,40 +2650,102 @@ try
tc_name = ops.toolchain_name ();
tc_num = ops.toolchain_num ();
+
+ if (ops.toolchain_lock_specified ())
+ {
+ const string& l (ops.toolchain_lock ());
+
+ if (!l.empty ())
+ {
+ tc_lock = path (l);
+
+ if (!tc_lock.absolute ())
+ fail << "--toolchain-lock value '" << l << "' should be absolute path";
+ }
+ }
+ else if (!(ops.fake_bootstrap () ||
+ ops.fake_build () ||
+ ops.fake_machine_specified () ||
+ ops.fake_request_specified ()))
+ tc_lock = path ("/var/lock/bbot-agent-" + tc_name + ".lock");
+
tc_ver = (ops.toolchain_ver_specified ()
? ops.toolchain_ver ()
: standard_version (BBOT_VERSION_STR));
tc_id = ops.toolchain_id ();
- if (tc_num == 0 || tc_num > 99)
- fail << "invalid --toolchain-num value " << tc_num;
+ if (tc_num == 0 || tc_num > 9)
+ fail << "--toolchain-num value " << tc_num << " out of range";
inst = ops.instance ();
if (inst == 0 || inst > 99)
- fail << "invalid --instance value " << inst;
+ fail << "--instance value " << inst << " out of range";
- offset = (tc_num - 1) * 100 + inst;
+ inst_max = ops.instance_max ();
- // Controller URLs.
+ // The last decimal position is used for machine number, 0 for the build
+ // machine, non-0 for auxiliary machines (of which we can have maximum 9).
//
- if (argc < 2 &&
- !ops.dump_machines () &&
- !ops.fake_request_specified ())
- {
- fail << "controller url expected" <<
- info << "run " << argv[0] << " --help for details";
- }
+ offset = (tc_num - 1) * 1000 + inst * 10;
- strings controllers;
+ // Controller priority to URLs map.
+ //
+ std::map<uint64_t, strings> controllers;
for (int i (1); i != argc; ++i)
- controllers.push_back (argv[i]);
+ {
+ // [<prio>=]<url>
+ //
+ string a (argv[i]);
+
+ // See if we have priority, falling back to priority 0 if absent.
+ //
+ uint64_t prio (0);
+
+ // Note that we can also have `=` in <url> (e.g., parameters) so we will
+ // only consider `=` as ours if prior to it we only have digits.
+ //
+ size_t p (a.find ('='));
+ if (p != string::npos && a.find_first_not_of ("0123456789") == p)
+ {
+ // Require exactly four or five digits in case we later need to extend
+ // the priority levels beyond the 10 possible values (e.g., DDCCBBAA).
+ //
+ if (p != 4 && p != 5)
+ fail << "four or five-digit controller url priority expected in '"
+ << a << "'";
+
+ char* e;
+ errno = 0;
+ prio = strtoull (a.c_str (), &e, 10);
+ assert (errno != ERANGE && e == a.c_str () + p);
+
+ if (prio > 19999)
+ fail << "out of bounds controller url priority in '" << a << "'";
+
+ a.erase (0, p + 1);
+ }
+
+ controllers[prio].push_back (move (a));
+ }
+
+ if (controllers.empty ())
+ {
+ if (ops.dump_machines () || ops.fake_request_specified ())
+ {
+ controllers[0].push_back ("https://example.org");
+ }
+ else
+ fail << "controller url expected" <<
+ info << "run " << argv[0] << " --help for details";
+ }
// Handle SIGHUP and SIGTERM.
//
if (signal (SIGHUP, &handle_signal) == SIG_ERR ||
- signal (SIGTERM, &handle_signal) == SIG_ERR)
+ signal (SIGTERM, &handle_signal) == SIG_ERR ||
+ signal (SIGUSR1, &handle_signal) == SIG_ERR)
fail << "unable to set signal handler: "
<< system_error (errno, std::generic_category ()); // Sanitize.
@@ -1267,7 +2785,8 @@ try
dr <<
info << "cpu(s) " << ops.cpu () <<
- info << "ram(kB) " << ops.ram () <<
+ info << "build ram(KiB) " << ops.build_ram () <<
+ info << "auxil ram(KiB) " << ops.auxiliary_ram () <<
info << "bridge " << ops.bridge ();
if (fingerprint)
@@ -1281,8 +2800,19 @@ try
info << "toolchain id " << tc_id <<
info << "instance num " << inst;
- for (const string& u: controllers)
- dr << info << "controller url " << u;
+ if (inst_max != 0)
+ dr << info << "instance max " << inst_max;
+
+ // Note: keep last since don't restore fill/setw.
+ //
+ for (const pair<const uint64_t, strings>& p: controllers)
+ {
+ for (const string& u: p.second)
+ {
+ dr.os.fill ('0');
+ dr << info << "controller url " << std::setw (4) << p.first << '=' << u;
+ }
+ }
}
// The work loop. The steps we go through are:
@@ -1296,9 +2826,11 @@ try
// 4. If a build task is returned, do it, upload the result, and go to #1
// (immediately).
//
- auto rand_sleep = [g = std::mt19937 (std::random_device {} ())] () mutable
+ // NOTE: consider updating agent_checksum if making any logic changes.
+ //
+ auto rand_sleep = [] ()
{
- return std::uniform_int_distribution<unsigned int> (50, 60) (g);
+ return std::uniform_int_distribution<unsigned int> (50, 60) (rand_gen);
};
optional<interactive_mode> imode;
@@ -1307,78 +2839,259 @@ try
if (ops.interactive () != interactive_mode::false_)
{
imode = ops.interactive ();
- ilogin = machine_vnc (true /* public */);
+ ilogin = machine_vnc (0 /* machine_num */, true /* public */);
+ }
+
+ // Use the pkeyutl openssl command for signing the task response challenge
+ // if openssl version is greater or equal to 3.0.0 and the rsautl command
+ // otherwise.
+ //
+ // Note that openssl 3.0.0 deprecates rsautl in favor of pkeyutl.
+ //
+ const char* sign_cmd;
+
+ try
+ {
+ optional<openssl_info> oi (openssl::info (trace, 2, ops.openssl ()));
+
+ sign_cmd = oi &&
+ oi->name == "OpenSSL" &&
+ oi->version >= semantic_version {3, 0, 0}
+ ? "pkeyutl"
+ : "rsautl";
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain openssl version: " << e << endf;
}
for (unsigned int sleep (0);; ::sleep (sleep), sleep = 0)
{
- bootstrapped_machines ms (enumerate_machines (ops.machines ()));
-
- // Prepare task request.
- //
- task_request_manifest tq {
- hname,
- tc_name,
- tc_ver,
- imode,
- ilogin,
- fingerprint,
- machine_header_manifests {}
- };
+ pair<toolchain_lock, bootstrapped_machines> er (
+ enumerate_machines (ops.machines ()));
- // Note: below we assume tq.size () == ms.size ().
- //
- for (const bootstrapped_machine& m: ms)
- tq.machines.emplace_back (m.manifest.machine.id,
- m.manifest.machine.name,
- m.manifest.machine.summary);
+ toolchain_lock& tl (er.first);
+ bootstrapped_machines& ms (er.second);
- if (ops.dump_machines ())
+ // Determine the existing task priority range (using [0,0] if there are
+ // none) as well as whether we we should operate in the priority monitor
+ // mode.
+ //
+ uint64_t prio_min (~uint64_t (0));
+ uint64_t prio_max (0);
+ bool prio_mon (false);
{
- for (const machine_header_manifest& m: tq.machines)
- serialize_manifest (m, cout, "stdout", "machine");
+ uint16_t busy (0); // Number of build machines locked by other processes.
+ bool task (false); // There is a build machine performing a task.
- return 0;
- }
+ for (const bootstrapped_machine& m: ms)
+ {
+ if (!m.lock.locked ())
+ {
+ if (m.lock.prio) // Not bootstrapping/suspended.
+ {
+ if (m.manifest.machine.effective_role () != machine_role::auxiliary)
+ {
+ ++busy;
+ task = true;
- if (tq.machines.empty ())
- {
- // Normally this means all the machines are locked so sleep a bit less.
- //
- sleep = rand_sleep () / 2;
- continue;
+ if (prio_min > *m.lock.prio)
+ prio_min = *m.lock.prio;
+
+ if (prio_max < *m.lock.prio)
+ prio_max = *m.lock.prio;
+ }
+ }
+ else
+ ++busy; // Assume build machine (see enumerate_machines()).
+ }
+ }
+
+ if (prio_min > prio_max) // No tasks.
+ prio_min = prio_max;
+
+ if (inst_max != 0)
+ {
+ assert (busy <= inst_max);
+
+ if (busy == inst_max)
+ {
+ if (!task) // All bootstrapping/suspended.
+ {
+ sleep = rand_sleep ();
+ continue;
+ }
+
+ l2 ([&]{trace << "priority monitor, range [" << prio_min << ", "
+ << prio_max << "]";});
+
+ prio_mon = true;
+ }
+ }
}
- // Send task requests.
- //
- // Note that we have to do it while holding the lock on all the machines
- // since we don't know which machine we will need.
+ // If we get a task, these contain all the corresponding information.
//
- string url;
+ task_request_manifest tq;
task_response_manifest tr;
+ uint64_t prio;
+ string url;
- if (ops.fake_request_specified ())
+ // Iterate over controller priorities in reverse, that is, from highest to
+ // lowest (see the agent(1) man page for background on the priority
+ // levels).
+ //
+ // The following factors determine the lower bound of priorities we should
+ // consider:
+ //
+ // 1. If in the priority monitor mode, then we should only consider
+ // priorities that can interrupt the existing task with the lowest
+ // priority.
+ //
+ // Here is a representative sample of existing/interrupt priorities
+ // from which we derive the below formulae (remember that we only start
+ // interrupting from priority level 3):
+ //
+ // existing interrupt
+ // -------- ---------
+ // 5 >= 100
+ // 55 >= 100
+ // 555 >= 600
+ // 999 >= 1000
+ // 5055 >= 5100
+ // 5555 >= 5600
+ // 9999 >= 10000
+ //
+ // Essentially, what we need to do is discard the lowest 2 levels and
+ // add 100, moving the priority to the next 3rd level.
+ //
+ // 2. Otherwise, we should factor in the "don't ask for lower-priority
+ // tasks" semantics that applies from the second priority level.
+ //
+ // Note also that the other half of this logic is below where we determine
+ // which machines we offer for each priority.
+ //
+ auto ce (controllers.end ());
+ auto cb (controllers.lower_bound (
+ prio_mon ? ((prio_min / 100) * 100) + 100 :
+ prio_max >= 10 ? prio_max - 1 : // Including this priority.
+ 0)); // Any priority.
+
+ for (; cb != ce; )
{
- auto t (parse_manifest<task_manifest> (ops.fake_request (), "task"));
+ const pair<const uint64_t, strings>& pu (*--ce);
- tr = task_response_manifest {
- "fake-session", // Dummy session.
- nullopt, // No challenge.
- url, // Empty result URL.
- move (t)};
+ prio = pu.first;
+ const strings& urls (pu.second);
- url = "http://example.org";
- }
- else
- {
- // Note that after completing each task we always start from the
- // beginning of the list. This fact can be used to implement a poor
- // man's priority system where we will continue serving the first listed
- // controller for as long as it has tasks (and maybe in the future we
- // will implement a proper priority system).
+ // Prepare task request (it will be the same within a given priority).
+ //
+ tq = task_request_manifest {
+ hname,
+ tc_name,
+ tc_ver,
+ imode,
+ ilogin,
+ fingerprint,
+ ops.auxiliary_ram (),
+ machine_header_manifests {}};
+
+ // Determine which machines we need to offer for this priority.
+ //
+ bool aux_only (true); // Only auxiliary machines are available.
+ {
+ bool interruptable (false); // There is build machine we can interrupt.
+ for (const bootstrapped_machine& m: ms)
+ {
+ const machine_manifest& mm (m.manifest.machine);
+ machine_role role (mm.effective_role ());
+
+ if (!m.lock.locked ())
+ {
+ if (!m.lock.prio) // Skip bootstrapping/suspended.
+ continue;
+
+ uint64_t eprio (*m.lock.prio);
+
+ // Determine if our priority can interrupt the existing task.
+ //
+ // Based on the above discussion of the priority lower bound
+ // determination (and some menditation) it's clear that we can
+ // only interrupt the existing task if our priority is (at least)
+ // on a higher 3rd level.
+ //
+ if ((prio / 100) <= (eprio / 100))
+ continue;
+
+ if (role != machine_role::auxiliary)
+ interruptable = true;
+ }
+
+ tq.machines.emplace_back (mm.id,
+ mm.name,
+ mm.summary,
+ role,
+ effective_ram_minimum (mm),
+ mm.ram_maximum);
+
+ aux_only = aux_only && role == machine_role::auxiliary;
+ }
+
+ // Sanity check: in the priority monitor mode we should only ask for a
+ // task if we can interrupt one (this should be taken care of by the
+ // priority lower bound calculation above).
+ //
+ assert (!prio_mon || interruptable);
+ }
+
+ if (ops.dump_machines ())
+ {
+ for (const machine_header_manifest& m: tq.machines)
+ serialize_manifest (m, cout, "stdout", "machine");
+
+ return 0;
+ }
+
+ if (aux_only)
+ tq.machines.clear ();
+
+ if (tq.machines.empty ())
+ {
+ // If we have no build machines for this priority then we won't have
+ // any for any lower priority so bail out.
+ //
+ break;
+ }
+
+ // Send task requests.
+ //
+ // Note that we have to do it while holding the lock on all the machines
+ // since we don't know which machine(s) we will need.
//
- for (const string& u: controllers)
+ vector<strings::const_iterator> rurls (urls.size ());
+ std::iota (rurls.begin (), rurls.end (), urls.begin ());
+ std::shuffle (rurls.begin (), rurls.end (), rand_gen);
+
+ for (strings::const_iterator i: rurls)
{
+ const string& u (*i);
+
+ if (ops.fake_request_specified ())
+ {
+ auto t (parse_manifest<task_manifest> (ops.fake_request (), "task"));
+
+ tr = task_response_manifest {
+ "fake-session", // Dummy session.
+ nullopt, // No challenge.
+ string (), // Empty result URL.
+ vector<upload_url> (),
+ agent_checksum,
+ move (t)};
+
+ url = u;
+ break;
+ }
+
task_response_manifest r;
try
@@ -1401,7 +3114,11 @@ try
try
{
- serialize_manifest (tq, c.out, u, "task request", false);
+ serialize_manifest (tq,
+ c.out,
+ u,
+ "task request",
+ false /* fail_hard */);
}
catch (const failed&) {f = true;}
@@ -1436,8 +3153,8 @@ try
{
const task_manifest& t (*r.task);
- // For security reasons let's require the repository location to be
- // remote.
+ // For security reasons let's require the repository location to
+ // be remote.
//
if (t.repository.local ())
{
@@ -1460,13 +3177,27 @@ try
l2 ([&]{trace << "task for " << t.name << '/' << t.version << " "
<< "on " << t.machine << " "
- << "from " << u;});
+ << "from " << u << " "
+ << "priority " << prio;});
tr = move (r);
url = u;
break;
}
- }
+ } // url loop.
+
+ if (!tr.session.empty ()) // Got a task.
+ break;
+
+ } // prio loop.
+
+ if (tq.machines.empty ()) // No machines (auxiliary-only already handled).
+ {
+ // Normally this means all the machines are busy so sleep a bit less.
+ //
+ l2 ([&]{trace << "all machines are busy, sleeping";});
+ sleep = rand_sleep () / 2;
+ continue;
}
if (tr.session.empty ()) // No task from any of the controllers.
@@ -1478,22 +3209,69 @@ try
// We have a build task.
//
- // First find the index of the machine we were asked to use (and verify it
- // is one of those we sent). Also unlock all the other machines.
+ task_manifest& t (*tr.task);
+
+ // First verify the requested machines are from those we sent in tq and
+ // their roles match.
+ //
+ // Also verify the same machine is not picked multiple times by blanking
+ // out the corresponding entry in tq.machines. (Currently we are only
+ // capable of running one instance of each machine though we may want to
+ // relax that in the future, at which point we should send as many entries
+ // for the same machine in the task request as we are capable of running,
+ // applying the priority logic for each entry, etc).
//
- size_t i (ms.size ());
- for (size_t j (0); j != ms.size (); ++j)
{
- if (tq.machines[j].name == tr.task->machine)
- i = j;
- else
- ms[j].lock.unlock ();
+ auto check = [&tq, &url] (const string& name, machine_role r)
+ {
+ auto i (find_if (tq.machines.begin (), tq.machines.end (),
+ [&name] (const machine_header_manifest& mh)
+ {
+ return mh.name == name; // Yes, names, not ids.
+ }));
+
+ if (i == tq.machines.end ())
+ {
+ error << "task from " << url << " for unknown machine " << name;
+ return false;
+ }
+
+ if (i->effective_role () != r)
+ {
+ error << "task from " << url << " with mismatched role "
+ << " for machine " << name;
+ return false;
+ }
+
+ i->name.clear (); // Blank out.
+
+ return true;
+ };
+
+ auto check_aux = [&check] (const vector<auxiliary_machine>& ams)
+ {
+ for (const auxiliary_machine& am: ams)
+ if (!check (am.name, machine_role::auxiliary))
+ return false;
+ return true;
+ };
+
+ if (!check (t.machine, machine_role::build) ||
+ !check_aux (t.auxiliary_machines))
+ {
+ if (ops.dump_task ())
+ return 0;
+
+ continue;
+ }
}
- if (i == ms.size ())
+ // Also verify there are no more than 9 auxiliary machines (see the offset
+ // global variable for details).
+ //
+ if (t.auxiliary_machines.size () > 9)
{
- error << "task from " << url << " for unknown machine "
- << tr.task->machine;
+ error << "task from " << url << " with more than 9 auxiliary machines";
if (ops.dump_task ())
return 0;
@@ -1501,8 +3279,6 @@ try
continue;
}
- task_manifest& t (*tr.task);
-
if (ops.dump_task ())
{
serialize_manifest (t, cout, "stdout", "task");
@@ -1515,16 +3291,349 @@ try
if (!ops.trust ().empty ())
t.trust = ops.trust ();
- const dir_path& d (); // The -<toolchain> directory.
- const bootstrapped_machine_manifest& m ();
+ // Reset the worker checksum if the task's agent checksum doesn't match
+ // the current one.
+ //
+ // Note that since the checksums are hierarchical, such reset will trigger
+ // resets of the "subordinate" checksums (dependency checksum, etc).
+ //
+ if (!tr.agent_checksum || *tr.agent_checksum != agent_checksum)
+ t.worker_checksum = nullopt;
+
+ // Handle interrupts.
+ //
+ // Note that the interrupt can be triggered both by another process (the
+ // interrupt exception is thrown from perform_task()) as well as by this
+ // process in case we were unable to interrupt the other process (seeing
+ // that we have already received a task, responding with an interrupt
+ // feels like the most sensible option).
+ //
+ perform_task_result r;
+ bootstrapped_machine* pm (nullptr); // Build machine.
+ vector<bootstrapped_machine*> ams; // Auxiliary machines.
+ try
+ {
+ // First find the bootstrapped_machine instance in ms corresponding to
+ // the requested build machine. Also unlock all the other machines.
+ //
+ // While at it also see if we need to interrupt the selected machine (if
+ // busy), one of the existing (if we are at the max allowed instances,
+ // that is in the priority monitor mode), or all existing (if this is a
+ // priority level 4 task).
+ //
+ // Auxiliary machines complicate the matter a bit: we may now need to
+ // interrupt some subset of {build machine, auxiliary machines} that are
+ // necessary to perform this task. Note, however, that auxiliary
+ // machines are always subordinate to build machines, meaning that if
+ // there is a busy auxiliary machine, then there will be a busy build
+ // machine with the same pid/priority (and so if we interrup one
+ // auxiliary, then we will also interrupt the corresponding build plus
+ // any other auxiliaries it may be using). Based on that let's try to
+ // divide and conquer this by first dealing with build machines and then
+ // adding any auxiliary ones.
+ //
+ vector<bootstrapped_machine*> ims; // Machines to be interrupted.
+ size_t imt (0); // Number of "target" machines to interrupt (see below).
+
+ // First pass: build machines.
+ //
+ for (bootstrapped_machine& m: ms)
+ {
+ const machine_manifest& mm (m.manifest.machine);
- result_manifest r (perform_task (ms[i].path, ms[i].manifest, t));
+ if (mm.effective_role () == machine_role::auxiliary)
+ continue;
- ms[i].lock.unlock (); // No need to hold the lock any longer.
+ if (mm.name == t.machine)
+ {
+ assert (pm == nullptr); // Sanity check.
+ pm = &m;
+ }
+ else if (m.lock.locked ())
+ m.lock.unlock ();
+ else if (m.lock.prio) // Not bootstrapping/suspended.
+ {
+ // Only consider machines that we can interrupt (see above).
+ //
+ if ((prio / 100) > (*m.lock.prio / 100))
+ {
+ if (prio >= 1000) // Priority level 4 (interrupt all).
+ ims.push_back (&m);
+ else if (prio_mon)
+ {
+ // Find the lowest priority task to interrupt.
+ //
+ if (ims.empty ())
+ ims.push_back (&m);
+ else if (*m.lock.prio < *ims.back ()->lock.prio)
+ ims.back () = &m;
+ }
+ }
+ }
+ }
+
+ assert (pm != nullptr); // Sanity check.
+
+ if (!pm->lock.locked ())
+ {
+ assert (pm->lock.prio); // Sanity check (not bootstrapping/suspended).
+
+ if (prio >= 1000)
+ ims.insert (ims.begin (), pm); // Interrupt first (see below).
+ else
+ ims = {pm};
+
+ imt++;
+ }
+
+ // Second pass: auxiliary machines.
+ //
+ for (bootstrapped_machine& m: ms)
+ {
+ const machine_manifest& mm (m.manifest.machine);
+
+ if (mm.effective_role () != machine_role::auxiliary)
+ continue;
+
+ if (find_if (t.auxiliary_machines.begin (), t.auxiliary_machines.end (),
+ [&mm] (const auxiliary_machine& am)
+ {
+ return am.name == mm.name;
+ }) != t.auxiliary_machines.end ())
+ {
+ if (!m.lock.locked ())
+ {
+ assert (m.lock.prio); // Sanity check (not bootstrapping/suspended).
+
+ if (ims.empty ())
+ {
+ ims.push_back (&m);
+ }
+ else if (ims.front () == pm)
+ {
+ ims.insert (ims.begin () + 1, &m); // Interrupt early (see below).
+ }
+ else if (prio < 1000 && prio_mon && ams.empty () /* first */)
+ {
+ // Tricky: replace the lowest priority task we have picked on
+ // the first pass with this one.
+ //
+ assert (ims.size () == 1); // Sanity check.
+ ims.back () = &m;
+ }
+ else
+ ims.insert (ims.begin (), &m); // Interrupt first (see below).
+
+ imt++;
+ }
+
+ ams.push_back (&m);
+ }
+ else if (m.lock.locked ())
+ m.lock.unlock ();
+ }
+
+ // Note: the order of machines may not match.
+ //
+ assert (ams.size () == t.auxiliary_machines.size ()); // Sanity check.
+
+ assert (!prio_mon || !ims.empty ()); // We should have at least one.
+
+ // Move the toolchain lock into this scope so that it's automatically
+ // released on any failure (on the happy path it is released by
+ // perform_task()).
+ //
+ toolchain_lock& rtl (tl);
+ toolchain_lock tl (move (rtl));
+
+ // Interrupt the machines, if necessary.
+ //
+ // Note that if we are interrupting multiple machines, then the target
+ // build machine, if needs to be interrupted, must be first, followed
+ // but all the target auxiliary machines. This way if we are unable to
+ // successfully interrupt them, we don't interrupt the rest.
+ //
+ vector<pid_t> pids; // Avoid re-interrupting the same pid.
+ for (size_t i (0); i != ims.size (); ++i)
+ {
+ bootstrapped_machine* im (ims[i]);
+
+ // Sanity checks.
+ //
+ assert (!im->lock.locked () && im->lock.prio);
+ assert (im != pm || i == 0);
+
+ const dir_path& tp (im->path); // -<toolchain> path.
+ pid_t pid (im->lock.pid);
+
+ l2 ([&]{trace << "interrupting "
+ << (i < imt ? "target" : "lower priority")
+ << " machine " << tp << ", pid " << pid;});
+
+ // The plan is to send the interrupt and then wait for the lock.
+ //
+ // Note that the interrupt cannot be "lost" (or attributed to a
+ // different task) since we are sending it while holding the global
+ // lock and the other process arms it also while holding the global
+ // lock.
+ //
+ // But what can happen is the other task becomes suspended, which we
+ // will not be able to interrupt.
+ //
+ if (find (pids.begin (), pids.end (), pid) == pids.end ())
+ {
+ if (kill (pid, SIGUSR1) == -1)
+ {
+ // Ignore the case where there is no such process (e.g., the other
+ // process has terminated in which case the lock should be
+ // released automatically).
+ //
+ if (errno != ESRCH)
+ throw_generic_error (errno);
+ }
+
+ pids.push_back (pid);
+ }
+
+ // If we are interrupting additional machine in order to free up
+ // resources, there is no use acquiring their lock (or failing if
+ // unable to) since this is merely a performance optimization.
+ //
+ if (i >= imt)
+ continue;
+
+ // Try to lock the machine.
+ //
+ // While this normally shouldn't take long, there could be parts of
+ // the perform_task() logic that we do not interrupt and that may take
+ // some time.
+ //
+ machine_lock ml;
+
+ size_t retry (0);
+ for (; retry != 31; ++retry)
+ {
+ if (retry != 0)
+ ::sleep (1);
+
+ ml = lock_machine (tl, tp);
+
+ if (ml.locked ())
+ break;
+
+ if (ml.pid != pid)
+ {
+ error << "interrupted machine " << tp << " changed pid";
+ throw interrupt ();
+ }
+
+ if (!ml.prio) // Got suspended.
+ {
+ l2 ([&]{trace << "interrupted machine " << tp << " suspended";});
+ throw interrupt ();
+ }
+ }
+
+ if (!ml.locked ())
+ {
+ warn << "unable to lock interrupted machine " << tp << " within "
+ << (retry - 1) << "s";
+ throw interrupt ();
+ }
+
+ // This is an interrupted machine (build or auxiliary) that we will be
+ // using. See if it needs a re-bootstrap, the same as in
+ // enumerate_machines(). If not, then transfer the bootstrap manifest
+ // and lock.
+ //
+ const machine_manifest& mm (im->manifest.machine);
+
+ bootstrapped_machine_manifest bmm (
+ parse_manifest<bootstrapped_machine_manifest> (
+ tp / "manifest", "bootstrapped machine"));
+
+ bool rb (false);
+
+ if (bmm.machine.id != mm.id)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
+ rb = true;
+ }
+
+ if (im == pm) // Only for build machine.
+ {
+ if (!tc_id.empty () && bmm.toolchain.id != tc_id)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
+ rb = true;
+ }
+
+ if (int i = compare_bbot (bmm.bootstrap))
+ {
+ if (i < 0)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ rb = true;
+ }
+ else
+ {
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
+ rb = true;
+ }
+ }
+ }
+
+ // We are not going to try to re-bootstrap this machine "inline".
+ //
+ if (rb)
+ throw interrupt ();
+
+ im->manifest = move (bmm);
+ im->lock = move (ml);
+ }
+
+ // Check if we need to boost the number of CPUs to the full hardware
+ // concurrency.
+ //
+ optional<size_t> bcpus;
+ if (prio >= 10000)
+ bcpus = std::thread::hardware_concurrency ();
+
+ pm->lock.perform_task (tl, prio); // Build machine.
+ for (bootstrapped_machine* am: ams) // Auxiliary machines.
+ am->lock.perform_task (tl, prio);
+
+ r = perform_task (move (tl), *pm, ams, t, bcpus);
+ }
+ catch (const interrupt&)
+ {
+ // Note: no work_dir.
+ //
+ r = perform_task_result (
+ auto_rmdir (),
+ result_manifest {
+ t.name,
+ t.version,
+ result_status::interrupt,
+ operation_results {},
+ nullopt /* worker_checksum */,
+ nullopt /* dependency_checksum */});
+ }
+
+ // No need to hold the locks any longer.
+ //
+ if (pm != nullptr && pm->lock.locked ())
+ pm->lock.unlock ();
+
+ for (bootstrapped_machine* am: ams)
+ if (am->lock.locked ())
+ am->lock.unlock ();
+
+ result_manifest& rm (r.manifest);
if (ops.dump_result ())
{
- serialize_manifest (r, cout, "stdout", "result");
+ serialize_manifest (rm, cout, "stdout", "result");
return 0;
}
@@ -1539,7 +3648,7 @@ try
openssl os (trace,
fdstream_mode::text, path ("-"), 2,
- ops.openssl (), "rsautl",
+ ops.openssl (), sign_cmd,
ops.openssl_option (), "-sign", "-inkey", ops.auth_key ());
os.out << *tr.challenge;
@@ -1560,9 +3669,258 @@ try
fail << "unable to sign task response challenge: " << e;
}
+ // Re-package the build artifacts, if present, into the type/instance-
+ // specific archives and upload them to the type-specific URLs, if
+ // provided.
+ //
+ // Note that the initial upload archive content is organized as a bunch of
+ // upload/<type>/<instance>/*, where the second level directories are the
+ // upload types and the third level sub-directories are their instances.
+ // The resulting <instance>.tar archives content (which is what we submit
+ // to the type-specific handler) are organized as <instance>/*.
+ //
+ if (r.upload_archive && !tr.upload_urls.empty ())
+ {
+ const path& ua (*r.upload_archive);
+
+ // Extract the archive content into the parent directory of the archive
+ // file. But first, make sure the resulting directory doesn't exist.
+ //
+ // Note that while we don't assume where inside the working directory
+ // the archive is, we do assume that there is nothing clashing/precious
+ // in the upload/ directory which we are going to cleanup.
+ //
+ dir_path d (ua.directory ());
+
+ const dir_path ud (d / dir_path ("upload"));
+ try_rmdir_r (ud);
+
+ try
+ {
+ process_exit pe (
+ process_run_callback (
+ trace,
+ fdopen_null (), // Don't expect to read from stdin.
+ 2, // Redirect stdout to stderr.
+ 2,
+ "tar",
+ "-xf", ua,
+ "-C", d));
+
+ if (!pe)
+ fail << "tar " << pe;
+ }
+ catch (const system_error& e)
+ {
+ // There must be something wrong with the setup or there is no space
+ // left on the disk, thus the failure is fatal.
+ //
+ fail << "unable to extract build artifacts from archive: " << e;
+ }
+
+ try_rmfile (ua); // Let's free up the disk space.
+
+ // To decrease nesting a bit, let's collect the type-specific upload
+ // directories and the corresponding URLs first. This way we can also
+ // create the archive files as the upload/ directory sub-entries without
+ // interfering with iterating over this directory.
+ //
+ vector<pair<dir_path, string>> urls;
+
+ try
+ {
+ for (const dir_entry& te: dir_iterator (ud, dir_iterator::no_follow))
+ {
+ const string& t (te.path ().string ());
+
+ // Can only be a result of the worker malfunction, thus the failure
+ // is fatal.
+ //
+ if (te.type () != entry_type::directory)
+ fail << "unexpected filesystem entry '" << t << "' in " << ud;
+
+ auto i (find_if (tr.upload_urls.begin (), tr.upload_urls.end (),
+ [&t] (const upload_url& u) {return u.type == t;}));
+
+ if (i == tr.upload_urls.end ())
+ continue;
+
+ urls.emplace_back (ud / path_cast<dir_path> (te.path ()), i->url);
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << ud << ": " << e;
+ }
+
+ // Now create archives and upload.
+ //
+ for (const pair<dir_path, string>& p: urls)
+ {
+ const dir_path& td (p.first); // <type>/
+ const string& url (p.second);
+
+ try
+ {
+ for (const dir_entry& ie: dir_iterator (td, dir_iterator::no_follow))
+ {
+ const string& i (ie.path ().string ()); // <instance>
+
+ // Can only be a result of the worker malfunction, thus the
+ // failure is fatal.
+ //
+ if (ie.type () != entry_type::directory)
+ fail << "unexpected filesystem entry '" << i << "' in " << td;
+
+ // Archive the upload instance files and, while at it, calculate
+ // the resulting archive checksum.
+ //
+ sha256 sha;
+ auto_rmfile ari (ud / (i + ".tar"));
+
+ try
+ {
+ // Instruct tar to print the archive to stdout.
+ //
+ fdpipe in_pipe (fdopen_pipe (fdopen_mode::binary));
+
+ process pr (
+ process_start_callback (
+ trace,
+ fdopen_null (), // Don't expect to read from stdin.
+ in_pipe,
+ 2 /* stderr */,
+ "tar",
+ "--format", "ustar",
+ "-c",
+ "-C", td,
+ i));
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ in_pipe.out.close ();
+
+ ifdstream is (
+ move (in_pipe.in), fdstream_mode::skip, ifdstream::badbit);
+
+ ofdstream os (ari.path, fdopen_mode::binary);
+
+ char buf[8192];
+ while (!eof (is))
+ {
+ is.read (buf, sizeof (buf));
+
+ if (size_t n = static_cast<size_t> (is.gcount ()))
+ {
+ sha.append (buf, n);
+ os.write (buf, n);
+ }
+ }
+
+ os.close ();
+
+ if (!pr.wait ())
+ fail << "tar " << *pr.exit;
+ }
+ catch (const system_error& e)
+ {
+ // There must be something wrong with the setup or there is no
+ // space left on the disk, thus the failure is fatal.
+ //
+ fail << "unable to archive " << td << i << "/: " << e;
+ }
+
+ // Post the upload instance archive.
+ //
+ using namespace http_service;
+
+ parameters params ({
+ {parameter::text, "session", tr.session},
+ {parameter::text, "instance", i},
+ {parameter::file, "archive", ari.path.string ()},
+ {parameter::text, "sha256sum", sha.string ()}});
+
+ if (challenge)
+ params.push_back ({
+ parameter::text, "challenge", base64_encode (*challenge)});
+
+ result pr (post (ops, url, params));
+
+ // Turn the potential upload failure into the "upload" operation
+ // error, amending the task result manifest.
+ //
+ if (pr.error)
+ {
+ // The "upload" operation result must be present (otherwise
+ // there would be nothing to upload). We can assume it is last.
+ //
+ assert (!rm.results.empty ());
+
+ operation_result& r (rm.results.back ());
+
+ // The "upload" operation result must be the last, if present.
+ //
+ assert (r.operation == "upload");
+
+ auto log = [&r, indent = false] (const string& t,
+ const string& l) mutable
+ {
+ if (indent)
+ r.log += " ";
+ else
+ indent = true;
+
+ r.log += t;
+ r.log += ": ";
+ r.log += l;
+ r.log += '\n';
+ };
+
+ log ("error",
+ "unable to upload " + td.leaf ().string () + '/' + i +
+ " build artifacts");
+
+ log ("error", *pr.error);
+
+ if (!pr.message.empty ())
+ log ("reason", pr.message);
+
+ if (pr.reference)
+ log ("reference", *pr.reference);
+
+ for (const manifest_name_value& nv: pr.body)
+ {
+ if (!nv.name.empty ())
+ log (nv.name, nv.value);
+ }
+
+ r.status |= result_status::error;
+ rm.status |= r.status;
+
+ break;
+ }
+ }
+
+ // Bail out on the instance archive upload failure.
+ //
+ if (!rm.status)
+ break;
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << td << ": " << e;
+ }
+ }
+ }
+
+ result_status rs (rm.status);
+
// Upload the result.
//
- result_request_manifest rq {tr.session, move (challenge), move (r)};
+ result_request_manifest rq {tr.session,
+ move (challenge),
+ agent_checksum,
+ move (rm)};
{
const string& u (*tr.result_url);
@@ -1586,7 +3944,20 @@ try
try
{
- serialize_manifest (rq, c.out, u, "task request");
+ // Don't break lines in the manifest values not to further increase
+ // the size of the result request manifest encoded representation.
+ // Note that this manifest can contain quite a few lines in the
+ // operation logs, potentially truncated to fit the upload limit
+ // (see worker/worker.cxx for details). Breaking these lines can
+ // increase the request size beyond this limit and so we can end up
+ // with the request failure.
+ //
+ serialize_manifest (rq,
+ c.out,
+ u,
+ "result request",
+ true /* fail_hard */,
+ true /* long_lines */);
}
catch (const failed&) {f = true;}
@@ -1602,8 +3973,9 @@ try
}
}
- l2 ([&]{trace << "built " << t.name << '/' << t.version << " "
- << "on " << t.machine << " "
+ l2 ([&]{trace << "built " << t.name << '/' << t.version << ' '
+ << "status " << rs << ' '
+ << "on " << t.machine << ' '
<< "for " << url;});
}
}
diff --git a/bbot/agent/agent.hxx b/bbot/agent/agent.hxx
index ba3719e..9c8400f 100644
--- a/bbot/agent/agent.hxx
+++ b/bbot/agent/agent.hxx
@@ -1,5 +1,5 @@
// file : bbot/agent/agent.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_AGENT_AGENT_HXX
#define BBOT_AGENT_AGENT_HXX
@@ -22,14 +22,14 @@ namespace bbot
extern standard_version tc_ver; // Toolchain version.
extern string tc_id; // Toolchain id.
- extern uint16_t inst; // Instance number.
+ extern uint16_t inst; // Instance number (1-based).
extern string hname; // Our host name.
extern string hip; // Our IP address.
extern uid_t uid; // Our effective user id.
extern string uname; // Our effective user name.
- extern uint16_t offset; // Agent offset.
+ extern uint16_t offset; // Agent offset (10-9990; used for ports).
// Random number generator (currently not MT-safe and limited to RAND_MAX).
//
diff --git a/bbot/agent/http-service.cxx b/bbot/agent/http-service.cxx
new file mode 100644
index 0000000..1921edc
--- /dev/null
+++ b/bbot/agent/http-service.cxx
@@ -0,0 +1,364 @@
+// file : bbot/agent/http-service.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bbot/agent/http-service.hxx>
+
+#include <libbutl/curl.hxx>
+
+#include <bbot/diagnostics.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace bbot
+{
+ namespace http_service
+ {
+ result
+ post (const agent_options& o, const string& u, const parameters& params)
+ {
+ tracer trace ("http_service::post");
+
+ using parser = manifest_parser;
+ using parsing = manifest_parsing;
+ using name_value = manifest_name_value;
+
+ // The overall plan is to post the data using the curl program, read
+ // the HTTP response status and content type, read and parse the body
+ // according to the content type, and obtain the result message and
+ // optional reference in case of both the request success and failure.
+ //
+ // The successful request response (HTTP status code 200) is expected to
+ // contain the result manifest (text/manifest content type). The faulty
+ // response (HTTP status code other than 200) can either contain the
+ // result manifest or a plain text error description (text/plain content
+ // type) or some other content (for example text/html). We will return
+ // the manifest message value, if available or the first line of the
+ // plain text error description or, as a last resort, construct the
+ // message from the HTTP status code and reason phrase. We will also
+ // return the error description if anything goes wrong with the HTTP
+ // request or the response manifest status value is not 200.
+ //
+ string message;
+ optional<uint16_t> status; // Request result manifest status value.
+ optional<string> reference;
+ vector<name_value> body;
+ optional<string> error;
+
+ // None of the 3XX redirect code semantics assume automatic re-posting.
+ // We will treat all such codes as failures, adding the location header
+ // value to the message for troubleshooting.
+ //
+ optional<string> location;
+
+ // Convert the submit arguments to curl's --form* options and cache the
+ // pointer to the file_text parameter value, if present, for writing
+ // into curl's stdin.
+ //
+ strings fos;
+ const string* file_text (nullptr);
+
+ for (const parameter& p: params)
+ {
+ if (p.type == parameter::file_text)
+ {
+ assert (file_text == nullptr);
+ file_text = &p.value;
+ }
+
+ fos.emplace_back (p.type == parameter::file ||
+ p.type == parameter::file_text
+ ? "--form"
+ : "--form-string");
+
+ fos.emplace_back (
+ p.type == parameter::file ? p.name + "=@" + p.value :
+ p.type == parameter::file_text ? p.name + "=@-" :
+ p.name + '=' + p.value);
+ }
+
+ // Note that we prefer the low-level process API for running curl over
+ // using butl::curl because in this context it is restrictive and
+ // inconvenient.
+ //
+ // Start curl program.
+ //
+ // Text mode seems appropriate.
+ //
+ fdpipe in_pipe;
+ fdpipe out_pipe;
+ process pr;
+
+ try
+ {
+ in_pipe = fdopen_pipe ();
+
+ out_pipe = (file_text != nullptr
+ ? fdopen_pipe ()
+ : fdpipe {fdopen_null (), nullfd});
+
+ pr = process_start_callback (trace,
+ out_pipe.in.get () /* stdin */,
+ in_pipe /* stdout */,
+ 2 /* stderr */,
+ "curl",
+
+ // Include the response headers in the
+ // output so we can get the status
+ // code/reason, content type, and the
+ // redirect location.
+ //
+ "--include",
+
+ "--max-time", o.request_timeout (),
+ "--connect-timeout", o.connect_timeout (),
+ fos,
+ u);
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ in_pipe.out.close ();
+ out_pipe.in.close ();
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute curl: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to open pipe: " << e;
+ }
+
+ auto finish = [&pr, &error] (bool io_read = false, bool io_write = false)
+ {
+ if (!pr.wait ())
+ error = "curl " + to_string (*pr.exit);
+ else if (io_read)
+ error = "error reading curl output";
+ else if (io_write)
+ error = "error writing curl input";
+ };
+
+ bool io_write (false);
+ bool io_read (false);
+
+ try
+ {
+ // First we read the HTTP response status line and headers. At this
+ // stage we will read until the empty line (containing just CRLF). Not
+ // being able to reach such a line is an error, which is the reason
+ // for the exception mask choice.
+ //
+ ifdstream is (
+ move (in_pipe.in),
+ fdstream_mode::skip,
+ ifdstream::badbit | ifdstream::failbit | ifdstream::eofbit);
+
+ if (file_text != nullptr)
+ {
+ ofdstream os (move (out_pipe.out));
+ os << *file_text;
+ os.close ();
+
+ // Indicate to the potential IO error handling that we are done with
+ // writing.
+ //
+ file_text = nullptr;
+ }
+
+ auto bad_response = [] (const string& d) {throw runtime_error (d);};
+
+ curl::http_status rs;
+
+ try
+ {
+ rs = curl::read_http_status (is, false /* skip_headers */);
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_response (
+ string ("unable to read HTTP response status line: ") + e.what ());
+ }
+
+ // Read through the response headers until the empty line is
+ // encountered and obtain the content type and/or the redirect
+ // location, if present.
+ //
+ optional<string> ctype;
+
+ // Check if the line contains the specified header and return its
+ // value if that's the case. Return nullopt otherwise.
+ //
+ // Note that we don't expect the header values that we are interested
+ // in to span over multiple lines.
+ //
+ string l;
+ auto header = [&l] (const char* name) -> optional<string>
+ {
+ size_t n (string::traits_type::length (name));
+ if (!(icasecmp (name, l, n) == 0 && l[n] == ':'))
+ return nullopt;
+
+ string r;
+ size_t p (l.find_first_not_of (' ', n + 1)); // The value begin.
+ if (p != string::npos)
+ {
+ size_t e (l.find_last_not_of (' ')); // The value end.
+ assert (e != string::npos && e >= p);
+
+ r = string (l, p, e - p + 1);
+ }
+
+ return optional<string> (move (r));
+ };
+
+ while (!(l = curl::read_http_response_line (is)).empty ())
+ {
+ if (optional<string> v = header ("Content-Type"))
+ ctype = move (v);
+ else if (optional<string> v = header ("Location"))
+ {
+ if ((rs.code >= 301 && rs.code <= 303) || rs.code == 307)
+ location = move (v);
+ }
+ }
+
+ assert (!eof (is)); // Would have already failed otherwise.
+
+ // Now parse the response payload if the content type is specified and
+ // is recognized (text/manifest or text/plain), skip it (with the
+ // ifdstream's close() function) otherwise.
+ //
+ // Note that eof and getline() fail conditions are not errors anymore,
+ // so we adjust the exception mask accordingly.
+ //
+ is.exceptions (ifdstream::badbit);
+
+ if (ctype)
+ {
+ if (icasecmp ("text/manifest", *ctype, 13) == 0)
+ {
+ parser p (is, "manifest");
+ name_value nv (p.next ());
+
+ if (nv.empty ())
+ bad_response ("empty manifest");
+
+ const string& n (nv.name);
+ string& v (nv.value);
+
+ // The format version pair is verified by the parser.
+ //
+ assert (n.empty () && v == "1");
+
+ body.push_back (move (nv)); // Save the format version pair.
+
+ auto bad_value = [&p, &nv] (const string& d) {
+ throw parsing (p.name (), nv.value_line, nv.value_column, d);};
+
+ // Get and verify the HTTP status.
+ //
+ nv = p.next ();
+ if (n != "status")
+ bad_value ("no status specified");
+
+ uint16_t c (curl::parse_http_status_code (v));
+ if (c == 0)
+ bad_value ("invalid HTTP status '" + v + '\'');
+
+ if (c != rs.code)
+ bad_value ("status " + v + " doesn't match HTTP response "
+ "code " + to_string (rs.code));
+
+ // Get the message.
+ //
+ nv = p.next ();
+ if (n != "message" || v.empty ())
+ bad_value ("no message specified");
+
+ message = move (v);
+
+ // Try to get an optional reference.
+ //
+ nv = p.next ();
+
+ if (n == "reference")
+ {
+ if (v.empty ())
+ bad_value ("empty reference specified");
+
+ reference = move (v);
+
+ nv = p.next ();
+ }
+
+ // Save the remaining name/value pairs.
+ //
+ for (; !nv.empty (); nv = p.next ())
+ body.push_back (move (nv));
+
+ status = c;
+ }
+ else if (icasecmp ("text/plain", *ctype, 10) == 0)
+ getline (is, message); // Can result in the empty message.
+ }
+
+ is.close (); // Detect errors.
+
+ // The only meaningful result we expect is the manifest (status code
+ // is not necessarily 200). We unable to interpret any other cases and
+ // so report them as a bad response.
+ //
+ if (!status)
+ {
+ if (rs.code == 200)
+ bad_response ("manifest expected");
+
+ if (message.empty ())
+ {
+ message = "HTTP status code " + to_string (rs.code);
+
+ if (!rs.reason.empty ())
+ message += " (" + lcase (rs.reason) + ')';
+ }
+
+ if (location)
+ message += ", new location: " + *location;
+
+ bad_response ("bad server response");
+ }
+ }
+ catch (const io_error&)
+ {
+ // Presumably the child process failed and issued diagnostics so let
+ // finish() try to deal with that first.
+ //
+ (file_text != nullptr ? io_write : io_read) = true;
+ }
+ // Handle all parsing errors, including the manifest_parsing exception
+ // that inherits from the runtime_error exception.
+ //
+ // Note that the io_error class inherits from the runtime_error class,
+ // so this catch-clause must go last.
+ //
+ catch (const runtime_error& e)
+ {
+ finish (); // Sets the error variable on process failure.
+
+ if (!error)
+ error = e.what ();
+ }
+
+ if (!error)
+ finish (io_read, io_write);
+
+ assert (error || (status && !message.empty ()));
+
+ if (!error && *status != 200)
+ error = "status code " + to_string (*status);
+
+ return result {
+ move (error), move (message), move (reference), move (body)};
+ }
+ }
+}
diff --git a/bbot/agent/http-service.hxx b/bbot/agent/http-service.hxx
new file mode 100644
index 0000000..b50c6b7
--- /dev/null
+++ b/bbot/agent/http-service.hxx
@@ -0,0 +1,71 @@
+// file : bbot/agent/http-service.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BBOT_AGENT_HTTP_SERVICE_HXX
+#define BBOT_AGENT_HTTP_SERVICE_HXX
+
+#include <libbutl/manifest-types.hxx>
+
+#include <bbot/types.hxx>
+#include <bbot/utility.hxx>
+
+#include <bbot/agent/agent-options.hxx>
+
+// NOTE: this implementation is inspired by the bdep's http_service::post()
+// function. The key difference is the result::error member which is used
+// to return rather than fail on upload errors.
+
+namespace bbot
+{
+ namespace http_service
+ {
+ // If type is file, then the value is a path to be uploaded. If type is
+ // file_text, then the value is a file content to be uploaded.
+ //
+ struct parameter
+ {
+ enum {text, file, file_text} type;
+ string name;
+ string value;
+ };
+ using parameters = vector<parameter>;
+
+ struct result
+ {
+ // If error is present, then it contains the description of why the
+ // upload failed. In this case message contains additional information.
+ //
+ optional<string> error;
+ string message;
+ optional<string> reference;
+
+ // Does not include status, message, or reference.
+ //
+ vector<butl::manifest_name_value> body;
+ };
+
+ // Submit text parameters and/or upload files to an HTTP service via the
+ // POST method. Use the multipart/form-data content type if any files are
+ // uploaded and application/x-www-form-urlencoded otherwise.
+ //
+ // Note: currently only one file_text parameter can be specified.
+ //
+ // Return the response manifest message and reference (if present, see
+ // below) and the rest of the manifest values, if any. If unable to
+ // retrieve the response manifest, the message can also be set to the
+ // first line of the plain text error description or, as a last resort,
+ // constructed from the HTTP status code and reason phrase. Issue
+ // diagnostics and throw failed if something is wrong with the setup
+ // (unable to execute curl, etc).
+ //
+ // Note that the HTTP service is expected to respond with the result
+ // manifest that starts with the 'status' (HTTP status code) and 'message'
+ // (diagnostics message) values optionally followed by 'reference' and
+ // then other manifest values.
+ //
+ result
+ post (const agent_options&, const string& url, const parameters&);
+ }
+}
+
+#endif // BBOT_AGENT_HTTP_SERVICE_HXX
diff --git a/bbot/agent/machine.cxx b/bbot/agent/machine.cxx
index a7dc192..74c9b93 100644
--- a/bbot/agent/machine.cxx
+++ b/bbot/agent/machine.cxx
@@ -1,5 +1,5 @@
// file : bbot/agent/machine.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/agent/machine.hxx>
@@ -83,9 +83,9 @@ namespace bbot
}
static string
- create_tap (const string& br, uint16_t port)
+ create_tap (const string& br, uint16_t machine_num, uint16_t port)
{
- string t ("tap" + to_string (offset));
+ string t ("tap" + to_string (offset + machine_num));
tracer trace ("create_tap", t.c_str ());
@@ -126,8 +126,10 @@ namespace bbot
string bridge; // Bridge interface to which this tap belongs
uint16_t port; // UDP port to forward TFTP traffic to.
- tap (string b, uint16_t p)
- : iface (create_tap (b, p)), bridge (move (b)), port (p) {}
+ tap (string b, uint16_t machine_num, uint16_t p)
+ : iface (create_tap (b, machine_num, p)),
+ bridge (move (b)),
+ port (p) {}
~tap ()
{
@@ -169,6 +171,9 @@ namespace bbot
public:
kvm_machine (const dir_path&,
const machine_manifest&,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
@@ -213,71 +218,69 @@ namespace bbot
kvm_machine::
kvm_machine (const dir_path& md,
const machine_manifest& mm,
+ uint16_t m_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& omac,
const string& br,
- uint16_t port,
+ uint16_t tftp_port,
bool pub_vnc)
: machine (mm.mac ? *mm.mac : // Fixed mac from machine manifest.
omac ? *omac : // Generated mac from previous bootstrap.
generate_mac ()),
kvm ("kvm"),
- net (br, port),
- vnc (machine_vnc (pub_vnc)),
+ net (br, m_num, tftp_port),
+ vnc (machine_vnc (m_num, pub_vnc)),
monitor ("/tmp/monitor-" + tc_name + '-' + to_string (inst))
{
tracer trace ("kvm_machine", md.string ().c_str ());
+ // Monitor path.
+ //
+ if (m_num != 0)
+ {
+ monitor += '-';
+ monitor += to_string (m_num);
+ }
+
if (sizeof (sockaddr_un::sun_path) <= monitor.size ())
throw invalid_argument ("monitor unix socket path too long");
// Machine name.
//
// While we currently can only have one running machine per toolchain, add
- // the instance number for debuggability.
+ // the instance number and non-0 machine number for debuggability.
//
string name (mm.name + '-' + tc_name + '-' + to_string (inst));
+ if (m_num != 0)
+ {
+ name += '-';
+ name += to_string (m_num);
+ }
+
// Machine log. Note that it is only removed with an explicit cleanup()
// call.
//
log = path ("/tmp/" + path::traits_type::temp_name (name) + ".log");
- // Map logical CPUs to sockets/cores/threads keeping the number of and
- // cores even. Failed that, QEMU just makes it a machine with that number
- // of sockets and some operating systems (like Windows) can only do two.
+ // Map logical CPUs to sockets/cores/threads keeping the number of sockets
+ // and cores even. Failed that, QEMU just makes it a machine with that
+ // number of sockets and some operating systems (like Windows) can only do
+ // two.
//
// Note that for best results you may want to adjust (e.g., by over-
// committing) the number of CPUs to be power of 2.
//
- size_t cpus (ops.cpu ()), cores (cpus);
+ size_t cores (cpus);
- size_t sockets (cores >= 16 && cores % 4 == 0 ? 2 :
- cores >= 64 && cores % 8 == 0 ? 4 : 1);
+ size_t sockets (cores >= 256 && cores % 8 == 0 ? 4 :
+ cores >= 128 && cores % 4 == 0 ? 2 : 1);
cores /= sockets;
- size_t threads (cores >= 8 && cores % 4 == 0 ? 2 : 1);
+ size_t threads (cores >= 16 && cores % 4 == 0 ? 2 : 1);
cores /= threads;
- // We probably don't want to commit all the available RAM to the VM since
- // some of it could be used on the host side for caching, etc. So the
- // heuristics that we will use is 4G or 1G per CPU, whichever is greater
- // and the rest divide equally between the host and the VM.
- //
- // But the experience showed that we actually want to be able to precisely
- // control the amount of RAM assigned to VMs (e.g., for tmpfs size) and
- // without back-fudging for this heuristics.
- //
-#if 0
- size_t ram ((cpus < 4 ? 4 : cpus) * 1024 * 1024); // Kb.
-
- if (ram > ops.ram ())
- ram = ops.ram ();
- else
- ram += (ops.ram () - ram) / 2;
-#else
- size_t ram (ops.ram ());
-#endif
-
// If we have options, use that instead of the default network and
// disk configuration.
//
@@ -309,11 +312,21 @@ namespace bbot
}
else
{
- auto add = [&os] (string o, string v)
+ // @@ TMP: libstud-optional issue #1.
+ //
+#if 0
+ auto add = [&os] (string o, optional<string> v = {})
{
os.push_back (move (o));
- os.push_back (move (v));
+ if (v) os.push_back (move (*v));
};
+#else
+ auto add = [&os] (string o, string v = {})
+ {
+ os.push_back (move (o));
+ if (!v.empty ()) os.push_back (move (v));
+ };
+#endif
// Network.
//
@@ -334,6 +347,17 @@ namespace bbot
//"-drive", "if=none,id=disk0,format=raw,file=disk.img"
//"-device", "virtio-scsi-pci,id=scsi"
//"-device", "scsi-hd,drive=disk0"
+
+ // USB settings.
+ //
+ // These options should make graphical VMs usable from VNC.
+ //
+ // Note that the "standard" USB bus may not be available on
+ // architectures other than x86 (e.g., aarch64).
+ //
+ add ("-usb");
+ add ("-device", "usb-kbd");
+ add ("-device", "usb-tablet");
}
// Setup QMP (QEMU Machine Protocol) monitor to act as a log.
@@ -373,6 +397,7 @@ namespace bbot
2, // 1>&2 (QMP goes to stdout)
qmp_out,
process_env (kvm, md, env), // Run from the machine's directory.
+ "-enable-kvm",
"-name", name + ",debug-threads=on",
"-S", // Start suspended.
"-boot", "c", // Boot from disk.
@@ -382,14 +407,10 @@ namespace bbot
// RTC settings.
//
"-rtc", "clock=vm,driftfix=slew",
+#ifdef __x86_64__
"-no-hpet",
"-global", "kvm-pit.lost_tick_policy=discard",
-
- // USB settings.
- //
- // This option should make graphical VMs usable from VNC.
- //
- "-usb", "-device", "usb-tablet",
+#endif
// These can override the above but not below.
//
@@ -397,7 +418,7 @@ namespace bbot
// RAM and CPU configuration.
//
- "-m", to_string (ram / 1024) + "M",
+ "-m", to_string (ram / 1024) + 'M',
"-smp", (to_string (cpus) +
",sockets=" + to_string (sockets) +
",cores=" + to_string (cores) +
@@ -413,7 +434,7 @@ namespace bbot
// collision-wise with anything useful.
//
"-vnc",
- (pub_vnc ? ":" : "127.0.0.1:") + to_string (offset), // 5900 + offset
+ (pub_vnc ? ":" : "127.0.0.1:") + to_string (offset + m_num), // 5900-base
// QMP.
//
@@ -422,7 +443,7 @@ namespace bbot
// Monitor.
//
- "-chardev", "socket,id=mon,path=" + monitor.string () + ",server,nowait",
+ "-chardev", "socket,id=mon,path=" + monitor.string () + ",server=on,wait=off",
"-mon", "chardev=mon,mode=readline");
qmp_out.close ();
@@ -441,7 +462,8 @@ namespace bbot
}
catch (const io_error& e)
{
- fail << "unable to initialize QMP: " << e;
+ fail << "unable to initialize QMP: " << e <<
+ info << "see " << log;
}
// Start execution.
@@ -452,7 +474,8 @@ namespace bbot
}
catch (const system_error& e)
{
- fail << "unable to communicate with qemu monitor: " << e;
+ fail << "unable to communicate with qemu monitor: " << e <<
+ info << "see " << log;
}
}
@@ -504,7 +527,8 @@ namespace bbot
if (r)
return true;
- fail << "unable to communicate with qemu monitor: " << e;
+ fail << "unable to communicate with qemu monitor: " << e <<
+ info << "see " << log;
}
return wait (seconds);
@@ -525,7 +549,8 @@ namespace bbot
if (wait (t, fh))
return;
- fail (fh) << "unable to communicate with qemu monitor: " << e;
+ fail (fh) << "unable to communicate with qemu monitor: " << e <<
+ info << "see " << log;
}
wait (fh);
@@ -540,7 +565,8 @@ namespace bbot
}
catch (const system_error& e)
{
- fail (fh) << "unable to communicate with qemu monitor: " << e;
+ fail (fh) << "unable to communicate with qemu monitor: " << e <<
+ info << "see " << log;
}
}
@@ -575,7 +601,8 @@ namespace bbot
}
catch (const process_error& e)
{
- fail (fh) << "unable to wait for " << kvm << ": " << e << endf;
+ fail (fh) << "unable to wait for " << kvm << ": " << e <<
+ info << "see " << log << endf;
}
}
@@ -645,30 +672,37 @@ namespace bbot
unique_ptr<machine>
start_machine (const dir_path& md,
const machine_manifest& mm,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
bool pub_vnc)
{
+ assert (machine_num < 10);
+
switch (mm.type)
{
case machine_type::kvm:
return make_unique<kvm_machine> (
- md, mm, mac, br_iface, tftp_port, pub_vnc);
+ md, mm, machine_num, cpus, ram, mac, br_iface, tftp_port, pub_vnc);
case machine_type::nspawn:
- assert (false); //@@ TODO
+ assert (false); // @@ TODO
}
return nullptr;
}
string
- machine_vnc (bool pub)
+ machine_vnc (uint16_t num, bool pub)
{
+ assert (num < 10);
+
string r (pub ? hip : "127.0.0.1");
r += ':';
- r += to_string (5900 + offset);
+ r += to_string (5900 + offset + num);
return r;
}
}
diff --git a/bbot/agent/machine.hxx b/bbot/agent/machine.hxx
index 9a47d12..13646db 100644
--- a/bbot/agent/machine.hxx
+++ b/bbot/agent/machine.hxx
@@ -1,5 +1,5 @@
// file : bbot/agent/machine.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_AGENT_MACHINE_HXX
#define BBOT_AGENT_MACHINE_HXX
@@ -78,19 +78,28 @@ namespace bbot
class machine_manifest;
+ // The machine number should be between 0-9 with 0 for the build machine and
+ // 1-9 for the auxiliary machines.
+ //
+ // Note that tftp_port is not a base (in other words, it is expected to
+ // already be appropriately offset).
+ //
unique_ptr<machine>
start_machine (const dir_path&,
const machine_manifest&,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram, // In KiB.
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
- bool pub_vnc);
+ bool public_vnc);
// Return the machine's public or private VNC session endpoint in the
// '<ip>:<port>' form.
//
string
- machine_vnc (bool pub_vnc);
+ machine_vnc (uint16_t machine_num, bool public_vnc);
}
#endif // BBOT_AGENT_MACHINE_HXX
diff --git a/bbot/agent/tftp.cxx b/bbot/agent/tftp.cxx
index 0df0d1b..58aaabc 100644
--- a/bbot/agent/tftp.cxx
+++ b/bbot/agent/tftp.cxx
@@ -1,5 +1,5 @@
// file : bbot/agent/tftp.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/agent/tftp.hxx>
@@ -126,7 +126,7 @@ namespace bbot
ops.tftp ());
// This is not really accurate since tftpd will, for example, serve
- // an upload request until it is complete. But it's close anough for
+ // an upload request until it is complete. But it's close enough for
// our needs.
//
sec -= (inc - static_cast<size_t> (timeout.tv_sec));
diff --git a/bbot/agent/tftp.hxx b/bbot/agent/tftp.hxx
index 2d02b29..5306dd1 100644
--- a/bbot/agent/tftp.hxx
+++ b/bbot/agent/tftp.hxx
@@ -1,5 +1,5 @@
// file : bbot/agent/tftp.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_AGENT_TFTP_HXX
#define BBOT_AGENT_TFTP_HXX
@@ -29,7 +29,7 @@ namespace bbot
port () const;
// Wait for a TFTP request for up to the specified number of seconds. If
- // increment is not 0, then wait in the specified incremenets (i.e., wait
+ // increment is not 0, then wait in the specified increments (i.e., wait
// for up to that number of seconds; useful when one needs to also
// periodically check for something else). Update the timeout value as
// well as return true if a request was served and false otherwise.
diff --git a/bbot/bbot-agent@.service b/bbot/bbot-agent@.service
index 294fde7..253cc61 100644
--- a/bbot/bbot-agent@.service
+++ b/bbot/bbot-agent@.service
@@ -14,7 +14,8 @@ Type=simple
Environment=VERBOSE=3
Environment=CPU=1
-Environment=RAM=1048576
+Environment=RAM_BUILD=4194304
+Environment=RAM_AUXIL=0
Environment=BRIDGE=br1
Environment=AUTH_KEY=
@@ -22,6 +23,7 @@ Environment=AUTH_KEY=
Environment=INTERACTIVE=false
Environment=BOOTSTRAP_TIMEOUT=3600
+Environment=BOOTSTRAP_AUXILIARY=900
Environment=BOOTSTRAP_RETRIES=2
Environment=BUILD_TIMEOUT=5400
@@ -38,6 +40,8 @@ Environment=TOOLCHAIN_NUM=1
Environment=TOOLCHAIN_VER=
Environment=TOOLCHAIN_ID=
+Environment=INSTANCE_MAX=0
+
Environment="CONTROLLER_URL="
Environment="CONTROLLER_TRUST="
@@ -47,11 +51,13 @@ ExecStart=/build/bots/default/bin/bbot-agent \
--systemd-daemon \
--verbose ${VERBOSE} \
--cpu ${CPU} \
- --ram ${RAM} \
+ --build-ram ${RAM_BUILD} \
+ --auxiliary-ram ${RAM_AUXIL} \
--bridge ${BRIDGE} \
--auth-key ${AUTH_KEY} \
--interactive ${INTERACTIVE} \
--bootstrap-timeout ${BOOTSTRAP_TIMEOUT} \
+ --bootstrap-auxiliary ${BOOTSTRAP_AUXILIARY} \
--bootstrap-retries ${BOOTSTRAP_RETRIES} \
--build-timeout ${BUILD_TIMEOUT} \
--build-retries ${BUILD_RETRIES} \
@@ -63,11 +69,13 @@ ExecStart=/build/bots/default/bin/bbot-agent \
--toolchain-num ${TOOLCHAIN_NUM} \
--toolchain-ver ${TOOLCHAIN_VER} \
--toolchain-id ${TOOLCHAIN_ID} \
+ --instance-max ${INSTANCE_MAX} \
--instance %i \
$CONTROLLER_TRUST \
$CONTROLLER_URL
Nice=0
+CPUAffinity=
User=build
Group=build
WorkingDirectory=~
diff --git a/bbot/bootstrap-manifest.cxx b/bbot/bootstrap-manifest.cxx
index d8597d6..ff24abb 100644
--- a/bbot/bootstrap-manifest.cxx
+++ b/bbot/bootstrap-manifest.cxx
@@ -1,10 +1,10 @@
// file : bbot/bootstrap-manifest.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/bootstrap-manifest.hxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
using namespace butl;
diff --git a/bbot/bootstrap-manifest.hxx b/bbot/bootstrap-manifest.hxx
index 5474882..ccad8a1 100644
--- a/bbot/bootstrap-manifest.hxx
+++ b/bbot/bootstrap-manifest.hxx
@@ -1,5 +1,5 @@
// file : bbot/bootstrap-manifest.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_BOOTSTRAP_MANIFEST_HXX
#define BBOT_BOOTSTRAP_MANIFEST_HXX
@@ -35,7 +35,7 @@ namespace bbot
: versions (move (v)) {}
public:
- bootstrap_manifest () = default; // VC export.
+ bootstrap_manifest () = default;
bootstrap_manifest (butl::manifest_parser&, bool ignore_unknown = false);
bootstrap_manifest (butl::manifest_parser&,
butl::manifest_name_value start,
diff --git a/bbot/bootstrap-manifest.test.cxx b/bbot/bootstrap-manifest.test.cxx
index 1310631..26e027a 100644
--- a/bbot/bootstrap-manifest.test.cxx
+++ b/bbot/bootstrap-manifest.test.cxx
@@ -1,17 +1,20 @@
// file : bbot/bootstrap-manifest.test.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <ios> // ios_base::failbit, ios_base::badbit
#include <iostream>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <bbot/types.hxx>
#include <bbot/utility.hxx>
#include <bbot/bootstrap-manifest.hxx>
+#undef NDEBUG
+#include <cassert>
+
using namespace std;
using namespace butl;
using namespace bbot;
diff --git a/bbot/bootstrap-manifest.test.testscript b/bbot/bootstrap-manifest.test.testscript
index 9d835eb..decddff 100644
--- a/bbot/bootstrap-manifest.test.testscript
+++ b/bbot/bootstrap-manifest.test.testscript
@@ -1,5 +1,5 @@
# file : bbot/bootstrap-manifest.test.testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
: bootstrap-manifest
:
diff --git a/bbot/buildfile b/bbot/buildfile
index ce74c7a..bbca810 100644
--- a/bbot/buildfile
+++ b/bbot/buildfile
@@ -1,5 +1,5 @@
# file : bbot/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Systemd .service file.
#
@@ -21,22 +21,16 @@ import libs += libbbot%lib{bbot}
# Agent is a systemd service.
#
-# @@ Have to package on Linux.
-#
-if ($cxx.target.class == "linux")
-{
- ./: exe{bbot-agent} service{'bbot-agent@'}
-
- exe{bbot-agent}: agent/{hxx ixx txx cxx}{+agent} agent/libue{bbot}
+./: exe{bbot-agent} \
+ service{'bbot-agent@'}: include = ($cxx.target.class == 'linux')
- agent/
- {
- libue{bbot}: {hxx ixx txx cxx}{* -agent -agent-options -*.test...} \
- {hxx ixx cxx}{agent-options} \
- ../libue{bbot}
- }
+exe{bbot-agent}: agent/{hxx ixx txx cxx}{+agent} agent/libue{bbot}
- cxx.libs += -ldl
+agent/
+{
+ libue{bbot}: {hxx ixx txx cxx}{* -agent -agent-options -*.test...} \
+ {hxx ixx cxx}{agent-options} \
+ ../libue{bbot}
}
./: exe{bbot-worker}: worker/{hxx ixx txx cxx}{+worker} worker/libue{bbot}
@@ -53,6 +47,9 @@ libue{bbot}: {hxx ixx txx cxx}{* -common-options -version -*.test...} \
{hxx}{version} \
$libs
+if ($cxx.target.class == 'linux')
+ cxx.libs += -ldl
+
hxx{version}: in{version} $src_root/manifest
# Unit tests.
@@ -94,13 +91,15 @@ if $cli.configured
--cxx-prologue "#include <bbot/types-parsers.hxx>" \
--cli-namespace bbot::cli --generate-specifier --generate-parse
+ # No usage.
+ #
cli.cxx{common-options}: cli.options += --include-prefix bbot \
---guard-prefix BBOT # No usage.
+--guard-prefix BBOT --generate-vector-scanner --generate-group-scanner
# Usage options.
#
cli.options += --suppress-undocumented --long-usage --ansi-color \
---page-usage 'bbot::print_$name$_' --option-length 23
+--page-usage 'bbot::print_$name$_' --option-length 25
agent/cli.cxx{agent-options}: cli.options += --include-prefix bbot/agent \
--guard-prefix BBOT_AGENT
diff --git a/bbot/common.cli b/bbot/common.cli
index 494f4fa..15c4bea 100644
--- a/bbot/common.cli
+++ b/bbot/common.cli
@@ -1,5 +1,5 @@
// file : bbot/common.cli
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
include <bbot/types.hxx>;
diff --git a/bbot/diagnostics.cxx b/bbot/diagnostics.cxx
index fc85f3a..39f95aa 100644
--- a/bbot/diagnostics.cxx
+++ b/bbot/diagnostics.cxx
@@ -1,5 +1,5 @@
// file : bbot/diagnostics.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/diagnostics.hxx>
diff --git a/bbot/diagnostics.hxx b/bbot/diagnostics.hxx
index 63441ed..b98e7c5 100644
--- a/bbot/diagnostics.hxx
+++ b/bbot/diagnostics.hxx
@@ -1,10 +1,10 @@
// file : bbot/diagnostics.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_DIAGNOSTICS_HXX
#define BBOT_DIAGNOSTICS_HXX
-#include <libbutl/diagnostics.mxx>
+#include <libbutl/diagnostics.hxx>
#include <bbot/types.hxx> // Note: not <bbot/utility>.
@@ -134,9 +134,9 @@ namespace bbot
indent,
nullptr,
data,
- [](const diag_record& r)
+ [](const diag_record& r, butl::diag_writer* w)
{
- r.flush ();
+ r.flush (w);
throw failed ();
}) {}
diff --git a/bbot/machine-manifest.cxx b/bbot/machine-manifest.cxx
index 0292824..bddf4d5 100644
--- a/bbot/machine-manifest.cxx
+++ b/bbot/machine-manifest.cxx
@@ -1,14 +1,14 @@
// file : bbot/machine-manifest.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/machine-manifest.hxx>
#include <sstream>
-#include <libbutl/tab-parser.mxx>
-#include <libbutl/string-parser.mxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/tab-parser.hxx>
+#include <libbutl/string-parser.hxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
using namespace std;
using namespace butl;
@@ -41,7 +41,7 @@ namespace bbot
{
if (t == "kvm") return machine_type::kvm;
else if (t == "nspawn") return machine_type::nspawn;
- else throw invalid_argument ("invalid machine type '" + t + "'");
+ else throw invalid_argument ("invalid machine type '" + t + '\'');
}
// machine_manifest
@@ -326,24 +326,36 @@ namespace bbot
if (!machine.mac)
bad_name ("mac address must be present in machine manifest");
- nv = p.next ();
- if (nv.empty ())
- bad_value ("toolchain manifest expected");
+ if (machine.effective_role () == machine_role::build)
+ {
+ nv = p.next ();
+ if (nv.empty ())
+ bad_value ("toolchain manifest expected");
- toolchain = toolchain_manifest (p, nv, iu);
+ toolchain = toolchain_manifest (p, nv, iu);
- nv = p.next ();
- if (nv.empty ())
- bad_value ("bootstrap manifest expected");
+ nv = p.next ();
+ if (nv.empty ())
+ bad_value ("bootstrap manifest expected");
- bootstrap = bootstrap_manifest (p, nv, iu);
+ bootstrap = bootstrap_manifest (p, nv, iu);
- // Make sure this is the end.
- //
- nv = p.next ();
- if (!nv.empty ())
- throw parsing (p.name (), nv.name_line, nv.name_column,
- "single bootstrapped machine manifest expected");
+ // Make sure this is the end.
+ //
+ nv = p.next ();
+ if (!nv.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "single bootstrapped machine manifest expected");
+ }
+ else
+ {
+ // Make sure this is the end.
+ //
+ nv = p.next ();
+ if (!nv.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "single machine manifest expected");
+ }
}
void bootstrapped_machine_manifest::
@@ -359,8 +371,12 @@ namespace bbot
"mac address must be present in machine manifest");
machine.serialize (s);
- toolchain.serialize (s);
- bootstrap.serialize (s);
+
+ if (machine.effective_role () == machine_role::build)
+ {
+ toolchain.serialize (s);
+ bootstrap.serialize (s);
+ }
s.next ("", ""); // End of stream.
}
diff --git a/bbot/machine-manifest.hxx b/bbot/machine-manifest.hxx
index c29493f..d500957 100644
--- a/bbot/machine-manifest.hxx
+++ b/bbot/machine-manifest.hxx
@@ -1,5 +1,5 @@
// file : bbot/machine-manifest.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_MACHINE_MANIFEST_HXX
#define BBOT_MACHINE_MANIFEST_HXX
@@ -49,14 +49,20 @@ namespace bbot
strings c)
: machine_header_manifest (std::move (i),
std::move (n),
- std::move (s)),
+ std::move (s),
+ //
+ // @@ TMP AUXILIARY
+ //
+ nullopt /* role */,
+ nullopt /* ram_minimum */,
+ nullopt /* ram_maximum */),
type (t),
mac (std::move (m)),
options (std::move (o)),
changes (std::move (c)) {}
public:
- machine_manifest () = default; // VC export.
+ machine_manifest () = default;
machine_manifest (butl::manifest_parser&, bool ignore_unknown = false);
machine_manifest (butl::manifest_parser&,
butl::manifest_name_value start,
@@ -80,7 +86,7 @@ namespace bbot
toolchain_manifest (string i): id (i) {}
public:
- toolchain_manifest () = default; // VC export.
+ toolchain_manifest () = default;
toolchain_manifest (butl::manifest_parser&, bool ignore_unknown = false);
toolchain_manifest (butl::manifest_parser&,
butl::manifest_name_value start,
@@ -91,7 +97,8 @@ namespace bbot
};
// The manifest stored in <name>-<toolchain>/ consists of the machine
- // manifest (original), toolchain manifest, and bootstrap manifest.
+ // manifest (original) and, if this a build machine, toolchain manifest and
+ // bootstrap manifest.
//
class bootstrapped_machine_manifest
{
@@ -106,7 +113,7 @@ namespace bbot
: machine (move (m)), toolchain (move (t)), bootstrap (move (b)) {}
public:
- bootstrapped_machine_manifest () = default; // VC export.
+ bootstrapped_machine_manifest () = default;
bootstrapped_machine_manifest (butl::manifest_parser&,
bool ignore_unknown = false);
diff --git a/bbot/machine-manifest.test.cxx b/bbot/machine-manifest.test.cxx
index 099e0f7..061f04e 100644
--- a/bbot/machine-manifest.test.cxx
+++ b/bbot/machine-manifest.test.cxx
@@ -1,17 +1,20 @@
// file : bbot/machine-manifest.test.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <ios> // ios_base::failbit, ios_base::badbit
#include <iostream>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <bbot/types.hxx>
#include <bbot/utility.hxx>
#include <bbot/machine-manifest.hxx>
+#undef NDEBUG
+#include <cassert>
+
using namespace std;
using namespace butl;
using namespace bbot;
diff --git a/bbot/machine-manifest.test.testscript b/bbot/machine-manifest.test.testscript
index 5ef1a45..e358ff3 100644
--- a/bbot/machine-manifest.test.testscript
+++ b/bbot/machine-manifest.test.testscript
@@ -1,5 +1,5 @@
# file : bbot/machine-manifest.test.testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
: machine-manifest
:
@@ -19,7 +19,7 @@
type: kvm
mac: de:ad:be:ef:de:ad
options: -device "virtio-scsi-pci,id=scsi" -device "scsi-hd,drive=disk0"
- changes: \
+ changes:\
0.7.0
- mac is changed to de:ad:be:ef:de:ad
- increased disk size to 30GB
@@ -34,7 +34,7 @@
name: windows_10-msvc_14
summary: Windows 10 build 1607 with VC 14 update 3
type: kvm
- options: \
+ options:\
-device "virtio-scsi-pci,id=scsi"
-device "scsi-hd,drive=disk0"
\
@@ -150,7 +150,7 @@
id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
name: windows_10-msvc_14
summary: Windows 10 build 1607 with VC 14 update 3
- options: \
+ options:\
-device "virtio-scsi-pci,id=scsi"
-device "scsi-hd,drive=disk0
\
@@ -238,27 +238,6 @@
{
test.options += -bm
- : valid
- :
- {
- : all-values
- :
- $* <<EOF >>EOF
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- :
- bbot-version: 1.1.2
- libbbot-version: 1.1.1
- EOF
- }
-
: unknown
:
$* <<EOI 2>"stdin:2:1: error: unknown name 'x' in bootstrapped machine manifest" == 1
@@ -272,40 +251,107 @@
: 1
EOI
- : no-machine-mac
+ : build-role
:
- $* <<EOI 2>'stdin:2:1: error: mac address must be present in machine manifest' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- EOI
+ {
+ : valid
+ :
+ {
+ : all-values
+ :
+ $* <<EOF >>EOF
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ :
+ bbot-version: 1.1.2
+ libbbot-version: 1.1.1
+ EOF
+ }
- : no-toolchain
- :
- $* <<EOI 2>'stdin:8:1: error: toolchain manifest expected' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
- EOI
+ : no-machine-mac
+ :
+ $* <<EOI 2>'stdin:2:1: error: mac address must be present in machine manifest' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ EOI
- : no-bootstrap
- :
- $* <<EOI 2>'stdin:10:1: error: bootstrap manifest expected' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
+ : no-toolchain
+ :
+ $* <<EOI 2>'stdin:8:1: error: toolchain manifest expected' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ EOI
+
+ : no-bootstrap
+ :
+ $* <<EOI 2>'stdin:10:1: error: bootstrap manifest expected' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ EOI
+ }
+
+ : auxiliary-role
:
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- EOI
+ {
+ : valid
+ :
+ {
+ : all-values
+ :
+ $* <<EOF >>EOF
+ : 1
+ :
+ id: x86_64-linux_debian_12-postgresql_15-1.0
+ name: x86_64-linux_debian_12-postgresql_15
+ summary: Debian 12 "bookworm" with PostgreSQL 15.6.0 (auxiliary machine)
+ role: auxiliary
+ ram-minimum: 1048576
+ type: kvm
+ mac: e6:38:72:53:61:ae
+ changes:\
+ 1.0
+ - clone off linux_debian_12-small-1.0
+ - postgresql-15 15.6.0+deb12u1
+ \
+ EOF
+ }
+
+ : unexpected-manifest
+ :
+ $* <<EOI 2>'stdin:10:1: error: single machine manifest expected' == 1
+ : 1
+ :
+ id: x86_64-linux_debian_12-postgresql_15-1.0
+ name: x86_64-linux_debian_12-postgresql_15
+ summary: Debian 12 "bookworm" with PostgreSQL 15.6.0 (auxiliary machine)
+ role: auxiliary
+ ram-minimum: 1048576
+ type: kvm
+ mac: e6:38:72:53:61:ae
+ :
+ EOI
+ }
}
diff --git a/bbot/types-parsers.cxx b/bbot/types-parsers.cxx
index c4eff70..ae98caa 100644
--- a/bbot/types-parsers.cxx
+++ b/bbot/types-parsers.cxx
@@ -1,5 +1,5 @@
// file : bbot/types-parsers.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <bbot/types-parsers.hxx>
diff --git a/bbot/types-parsers.hxx b/bbot/types-parsers.hxx
index 23fc95b..f1414a5 100644
--- a/bbot/types-parsers.hxx
+++ b/bbot/types-parsers.hxx
@@ -1,5 +1,5 @@
// file : bbot/types-parsers.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
// CLI parsers, included into the generated source files.
//
diff --git a/bbot/types.hxx b/bbot/types.hxx
index a39abe2..ea08bc6 100644
--- a/bbot/types.hxx
+++ b/bbot/types.hxx
@@ -1,5 +1,5 @@
// file : bbot/types.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_TYPES_HXX
#define BBOT_TYPES_HXX
@@ -19,13 +19,13 @@
#include <stdexcept> // logic_error, invalid_argument, runtime_error
#include <system_error>
-#include <libbutl/path.mxx>
-#include <libbutl/optional.mxx>
-#include <libbutl/fdstream.mxx>
+#include <libbutl/path.hxx>
+#include <libbutl/optional.hxx>
+#include <libbutl/fdstream.hxx>
#include <libbutl/lz4-stream.hxx>
-#include <libbutl/vector-view.mxx>
-#include <libbutl/small-vector.mxx>
-#include <libbutl/standard-version.mxx>
+#include <libbutl/vector-view.hxx>
+#include <libbutl/small-vector.hxx>
+#include <libbutl/standard-version.hxx>
namespace bbot
{
@@ -66,12 +66,12 @@ namespace bbot
using std::generic_category;
using io_error = std::ios_base::failure;
- // <libbutl/optional.mxx>
+ // <libbutl/optional.hxx>
//
using butl::optional;
using butl::nullopt;
- // <libbutl/fdstream.mxx>
+ // <libbutl/fdstream.hxx>
//
using butl::auto_fd;
using butl::ifdstream;
@@ -82,15 +82,15 @@ namespace bbot
using olz4stream = butl::lz4::ostream;
using ilz4stream = butl::lz4::istream;
- // <libbutl/vector-view.mxx>
+ // <libbutl/vector-view.hxx>
//
using butl::vector_view;
- // <libbutl/small-vector.mxx>
+ // <libbutl/small-vector.hxx>
//
using butl::small_vector;
- // <libbutl/path.mxx>
+ // <libbutl/path.hxx>
//
using butl::path;
using butl::dir_path;
@@ -100,7 +100,7 @@ namespace bbot
using paths = std::vector<path>;
using dir_paths = std::vector<dir_path>;
- // <libbutl/standard-version.mxx>
+ // <libbutl/standard-version.hxx>
//
using butl::standard_version;
}
diff --git a/bbot/utility.hxx b/bbot/utility.hxx
index b93f8b6..7758db4 100644
--- a/bbot/utility.hxx
+++ b/bbot/utility.hxx
@@ -1,22 +1,23 @@
// file : bbot/utility.hxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_UTILITY_HXX
#define BBOT_UTILITY_HXX
-#include <memory> // make_shared()
-#include <string> // to_string(), stoull()
-#include <utility> // move(), forward(), declval(), make_pair()
-#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
+#include <memory> // make_shared()
+#include <string> // to_string(), stoull()
+#include <utility> // move(), forward(), declval(), make_pair()
+#include <cassert> // assert()
+#include <iterator> // make_move_iterator()
+#include <algorithm> // *
#include <libbutl/ft/lang.hxx>
-#include <libbutl/curl.mxx>
-#include <libbutl/process.mxx>
-#include <libbutl/process-io.mxx>
-#include <libbutl/utility.mxx> // icasecmp(), reverse_iterate(), etc
-#include <libbutl/filesystem.mxx>
+#include <libbutl/curl.hxx>
+#include <libbutl/process.hxx>
+#include <libbutl/process-io.hxx>
+#include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(), etc
+#include <libbutl/filesystem.hxx>
#include <bbot/types.hxx>
#include <bbot/version.hxx>
@@ -33,9 +34,10 @@ namespace bbot
using std::to_string;
using std::stoull;
- // <libbutl/utility.mxx>
+ // <libbutl/utility.hxx>
//
using butl::icasecmp;
+ using butl::sanitize_identifier;
using butl::reverse_iterate;
using butl::make_guard;
@@ -45,7 +47,7 @@ namespace bbot
using butl::setenv;
using butl::unsetenv;
- // <libbutl/filesystem.mxx>
+ // <libbutl/filesystem.hxx>
//
using butl::auto_rmdir;
using butl::auto_rmfile;
@@ -147,7 +149,10 @@ namespace bbot
template <typename T>
void
- serialize_manifest (const T&, const path&, const char* what);
+ serialize_manifest (const T&,
+ const path&,
+ const char* what,
+ bool long_lines = false);
template <typename T>
void
@@ -155,7 +160,8 @@ namespace bbot
ostream&,
const string& name,
const char* what,
- bool fail_hard = true);
+ bool fail_hard = true,
+ bool long_lines = false);
}
#include <bbot/utility.txx>
diff --git a/bbot/utility.txx b/bbot/utility.txx
index 3199a61..d2aad10 100644
--- a/bbot/utility.txx
+++ b/bbot/utility.txx
@@ -1,12 +1,12 @@
// file : bbot/utility.txx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#include <iostream> // cin
-#include <libbutl/fdstream.mxx>
+#include <libbutl/fdstream.hxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <bbot/diagnostics.hxx>
@@ -152,9 +152,11 @@ namespace bbot
bool d (f.extension () == "lz4");
- ifdstream ifs (f, (d
- ? ifdstream::badbit
- : ifdstream::badbit | ifdstream::failbit));
+ ifdstream ifs (f,
+ (d ? fdopen_mode::binary : fdopen_mode::none),
+ (d
+ ? ifdstream::badbit
+ : ifdstream::badbit | ifdstream::failbit));
ilz4stream izs;
if (d)
@@ -206,7 +208,7 @@ namespace bbot
template <typename T>
void
- serialize_manifest (const T& m, const path& f, const char* what)
+ serialize_manifest (const T& m, const path& f, const char* what, bool ll)
{
using namespace std;
using namespace butl;
@@ -216,7 +218,7 @@ namespace bbot
ofdstream ofs (f, fdopen_mode::binary);
auto_rmfile arm (f); // Try to remove on failure ignoring errors.
- serialize_manifest (m, ofs, f.string (), what, true);
+ serialize_manifest (m, ofs, f.string (), what, true, ll);
ofs.close ();
arm.cancel ();
@@ -233,13 +235,14 @@ namespace bbot
ostream& os,
const string& name,
const char* what,
- bool fh)
+ bool fh,
+ bool ll)
{
using namespace butl;
try
{
- manifest_serializer s (os, name);
+ manifest_serializer s (os, name, ll);
m.serialize (s);
return;
}
diff --git a/bbot/version.hxx.in b/bbot/version.hxx.in
index 46fb7e0..1720804 100644
--- a/bbot/version.hxx.in
+++ b/bbot/version.hxx.in
@@ -1,5 +1,5 @@
// file : bbot/version.hxx.in -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef BBOT_VERSION // Note: using the version macro itself.
diff --git a/bbot/worker/worker.cli b/bbot/worker/worker.cli
index 0edefe0..cd7159a 100644
--- a/bbot/worker/worker.cli
+++ b/bbot/worker/worker.cli
@@ -1,5 +1,5 @@
// file : bbot/worker.cli
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
include <bbot/common.cli>;
diff --git a/bbot/worker/worker.cxx b/bbot/worker/worker.cxx
index eb7f50b..8fb7796 100644
--- a/bbot/worker/worker.cxx
+++ b/bbot/worker/worker.cxx
@@ -1,26 +1,28 @@
// file : bbot/worker.cxx -*- C++ -*-
-// license : TBC; see accompanying LICENSE file
+// license : MIT; see accompanying LICENSE file
#ifndef _WIN32
# include <signal.h> // signal()
#else
-# include <libbutl/win32-utility.hxx>
+# include <libbutl/win32-utility.hxx> // SetErrorMode(), Sleep()
#endif
#include <map>
#include <regex>
-#include <cstring> // strchr()
+#include <cstring> // strchr(), strncmp()
#include <sstream>
#include <iostream>
-#include <algorithm> // find(), find_if(), remove_if()
-#include <libbutl/b.mxx>
-#include <libbutl/pager.mxx>
-#include <libbutl/prompt.mxx>
-#include <libbutl/utility.mxx> // to_utf8()
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/string-parser.mxx>
+#include <libbutl/b.hxx>
+#include <libbutl/pager.hxx>
+#include <libbutl/prompt.hxx>
+#include <libbutl/utility.hxx> // to_utf8(), eof()
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/string-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <libbutl/json/parser.hxx>
#include <libbbot/manifest.hxx>
@@ -60,7 +62,7 @@ namespace bbot
const size_t tftp_get_retries (3); // Task request retries (see startup()).
}
-bool
+static bool
exists (const dir_path& d)
try
{
@@ -71,6 +73,17 @@ catch (const system_error& e)
fail << "unable to stat path " << d << ": " << e << endf;
}
+static bool
+exists (const path& f)
+try
+{
+ return file_exists (f, true /* follow_symlinks */);
+}
+catch (const system_error& e)
+{
+ fail << "unable to stat path " << f << ": " << e << endf;
+}
+
static dir_path
current_directory ()
try
@@ -82,11 +95,23 @@ catch (const system_error& e)
fail << "unable to obtain current directory: " << e << endf;
}
-static dir_path
-change_wd (tracer& t, string* log, const dir_path& d, bool create = false)
-try
+static void
+#ifndef _WIN32
+mk_p (tracer& t, string* log, const dir_path& d, bool sudo = false)
{
- if (create)
+ if (sudo)
+ {
+ if (log != nullptr)
+ *log += "sudo mkdir -p " + d.representation () + '\n';
+
+ run_io (t, 0, 1, 2, "sudo", "mkdir", "-p", d);
+ }
+ else
+#else
+mk_p (tracer& t, string* log, const dir_path& d, bool = false)
+{
+#endif
+ try
{
if (verb >= 3)
t << "mkdir -p " << d;
@@ -96,21 +121,55 @@ try
try_mkdir_p (d);
}
+ catch (const system_error& e)
+ {
+ fail << "unable to create directory " << d << ": " << e << endf;
+ }
+}
- dir_path r (current_directory ());
+static void
+mk (tracer& t, string* log, const dir_path& d)
+try
+{
+ if (verb >= 3)
+ t << "mkdir " << d;
+ if (log != nullptr)
+ *log += "mkdir " + d.representation () + '\n';
+
+ try_mkdir (d);
+}
+catch (const system_error& e)
+{
+ fail << "unable to create directory " << d << ": " << e << endf;
+}
+
+static bool
+empty (const dir_path& d)
+try
+{
+ return dir_empty (d);
+}
+catch (const system_error& e)
+{
+ fail << "unable to scan directory " << d << ": " << e << endf;
+}
+
+static void
+cp_into (tracer& t, string* log, const path& p, const dir_path& d)
+try
+{
if (verb >= 3)
- t << "cd " << d;
+ t << "cp " << p << ' ' << d;
if (log != nullptr)
- *log += "cd " + d.representation () + '\n';
+ *log += "cp " + p.string () + ' ' + d.representation () + '\n';
- dir_path::current_directory (d);
- return r;
+ cpfile_into (p, d);
}
catch (const system_error& e)
{
- fail << "unable to change current directory to " << d << ": " << e << endf;
+ fail << "unable to copy file " << p << " into " << d << ": " << e << endf;
}
static void
@@ -133,6 +192,25 @@ catch (const system_error& e)
}
static void
+mv_into (tracer& t, string* log, const path& from, const dir_path& into)
+try
+{
+ if (verb >= 3)
+ t << "mv " << from << ' ' << into;
+
+ if (log != nullptr)
+ *log += "mv " + from.representation () + ' ' + into.representation () +
+ "\n";
+
+ mventry_into (from, into);
+}
+catch (const system_error& e)
+{
+ fail << "unable to move entry '" << from << "' into '" << into << "': " << e
+ << endf;
+}
+
+static void
rm_r (tracer& t, string* log, const dir_path& d)
try
{
@@ -149,77 +227,330 @@ catch (const system_error& e)
fail << "unable to remove directory " << d << ": " << e << endf;
}
+static dir_path
+change_wd (tracer& t, string* log, const dir_path& d, bool create = false)
+try
+{
+ if (create)
+ mk_p (t, log, d);
+
+ dir_path r (current_directory ());
+
+ if (verb >= 3)
+ t << "cd " << d;
+
+ if (log != nullptr)
+ *log += "cd " + d.representation () + '\n';
+
+ dir_path::current_directory (d);
+ return r;
+}
+catch (const system_error& e)
+{
+ fail << "unable to change current directory to " << d << ": " << e << endf;
+}
+
// Step IDs.
//
+// NOTE: keep ids ordered according to the sequence of steps and remember to
+// update unreachable breakpoint checks if changing anything here.
+//
enum class step_id
{
- bpkg_module_create,
- bpkg_module_configure_add,
- bpkg_module_configure_fetch,
- bpkg_module_configure_build,
- bpkg_module_update,
- bpkg_module_test,
- bpkg_create,
+ // Common fallbacks for bpkg_*_create/b_test_installed_create and
+ // bpkg_*_configure_build/b_test_installed_configure, respectively. Note:
+ // not breakpoints.
+ //
+ b_create,
+ b_configure,
+
+ // Note that bpkg_module_* options are only used if the main package is a
+ // build system module (using just ~build2 otherwise). They also have no
+ // fallback (build system modules are just too different to try to handle
+ // them together with target and host; e.g., install root). However,
+ // bpkg_module_create is complemented with arguments from un-prefixed step
+ // ids, the same way as other *.create[_for_*] steps (note that un-prefixed
+ // steps are not fallbacks, they are always added first).
+ //
+ bpkg_create, // Breakpoint and base.
+ bpkg_target_create, //: b_create, bpkg_create
+ bpkg_host_create, //: b_create, bpkg_create
+ bpkg_module_create, //: no fallback
+
+ bpkg_link,
+
bpkg_configure_add,
bpkg_configure_fetch,
- bpkg_configure_build,
+
+ // Global (as opposed to package-specific) bpkg-pkg-build options (applies
+ // to all *_configure_build* steps). Note: not a breakpoint.
+ //
+ bpkg_global_configure_build,
+
+ // Note that bpkg_configure_build serves as a breakpoint for the
+ // bpkg-pkg-build call that configures (at once) the main package and all
+ // its external tests.
+ //
+ bpkg_configure_build, // Breakpoint and base.
+ bpkg_target_configure_build, //: b_configure, bpkg_configure_build
+ bpkg_host_configure_build, //: b_configure, bpkg_configure_build
+ bpkg_module_configure_build, //: b_configure, bpkg_configure_build
+
bpkg_update,
bpkg_test,
- bpkg_test_separate_configure_build,
- bpkg_test_separate_update,
- bpkg_test_separate_test,
+
+ // Note that separate test packages are configured as part of the
+ // bpkg_configure_build step above with options taken from
+ // bpkg_{target,host}_configure_build, depending on tests package type.
+ //
+ bpkg_test_separate_update, //: bpkg_update
+ bpkg_test_separate_test, //: bpkg_test
+
+ // Note that we only perform the installation tests if this is a target
+ // package or a self-hosted configuration. Also note that this step is
+ // considered disabled if any of the bpkg_bindist_* steps is explicitly
+ // enabled.
+ //
bpkg_install,
- b_test_installed_create,
- b_test_installed_configure,
+
+ bbot_install_ldconfig, // Note: disabled by default.
+
+ // Note that the bpkg_bindist_* steps are mutually exclusive and the latest
+ // status change for them (via the leading +/- characters in the prefix)
+ // overrides all the previous ones. Disabled by default.
+ //
+ bpkg_bindist_debian,
+ bpkg_bindist_fedora,
+ bpkg_bindist_archive,
+
+ // Note that this step is considered disabled unless one of the
+ // bpkg_bindist_* steps is explicitly enabled. Note: not a breakpoint.
+ //
+ bbot_sys_install,
+
+ bbot_sys_install_apt_get_update,
+ bbot_sys_install_apt_get_install,
+ bbot_sys_install_dnf_install,
+ bbot_sys_install_tar_extract,
+
+ bbot_sys_install_ldconfig, // Note: disabled by default.
+
+ // Note: skipped for modules.
+ //
+ b_test_installed_create, //: b_create
+ b_test_installed_configure, //: b_configure
b_test_installed_test,
- bpkg_test_installed_create,
- bpkg_test_installed_configure_add,
- bpkg_test_installed_configure_fetch,
- bpkg_test_separate_installed_configure_build,
- bpkg_test_separate_installed_update,
- bpkg_test_separate_installed_test,
+
+ // Note that for a host package this can involve both run-time and build-
+ // time tests (which means we may also need a shared configuration for
+ // modules).
+ //
+ // The *_for_{target,host,module} denote main package type, not
+ // configuration being created, which will always be target (more precisely,
+ // target or host, but host only in a self-hosted case, which means it's
+ // the same as target).
+ //
+ // Note that if this is a non-self-hosted configuration, we can only end up
+ // here if building target package and so can just use *_create and *_build
+ // values in buildtabs.
+ //
+ bpkg_test_separate_installed_create, // Breakpoint and base.
+ bpkg_test_separate_installed_create_for_target, //: bpkg_test_separate_installed_create
+ bpkg_test_separate_installed_create_for_host, //: bpkg_test_separate_installed_create
+ bpkg_test_separate_installed_create_for_module, //: no fallback
+
+ bpkg_test_separate_installed_link, // breakpoint only
+ bpkg_test_separate_installed_configure_add, //: bpkg_configure_add
+ bpkg_test_separate_installed_configure_fetch, //: bpkg_configure_fetch
+
+ bpkg_test_separate_installed_configure_build, // Breakpoint and base.
+ bpkg_test_separate_installed_configure_build_for_target, //: bpkg_test_separate_installed_configure_build
+ bpkg_test_separate_installed_configure_build_for_host, //: bpkg_test_separate_installed_configure_build
+ bpkg_test_separate_installed_configure_build_for_module, //: bpkg_test_separate_installed_configure_build
+
+ bpkg_test_separate_installed_update, //: bpkg_update
+ bpkg_test_separate_installed_test, //: bpkg_test
+
+ bbot_sys_uninstall_apt_get_remove,
+ bbot_sys_uninstall_dnf_remove,
+
bpkg_uninstall,
+
+ bbot_bindist_upload, // Note: disabled by default, not a breakpoint.
+
+ // Note that this step is considered disabled unless the upload/ directory
+ // is not empty. Note: not a breakpoint.
+ //
+ bbot_upload,
+
+ bbot_upload_tar_create,
+ bbot_upload_tar_list,
+
end
};
static const strings step_id_str {
- "bpkg.module.create",
- "bpkg.module.configure.add",
- "bpkg.module.configure.fetch",
- "bpkg.module.configure.build",
- "bpkg.module.update",
- "bpkg.module.test",
+ "b.create",
+ "b.configure",
+
"bpkg.create",
+ "bpkg.target.create",
+ "bpkg.host.create",
+ "bpkg.module.create",
+
+ "bpkg.link",
+
"bpkg.configure.add",
"bpkg.configure.fetch",
+
+ "bpkg.global.configure.build",
+
"bpkg.configure.build",
+ "bpkg.target.configure.build",
+ "bpkg.host.configure.build",
+ "bpkg.module.configure.build",
+
"bpkg.update",
"bpkg.test",
- "bpkg.test-separate.configure.build",
+
"bpkg.test-separate.update",
"bpkg.test-separate.test",
+
"bpkg.install",
+ "bbot.install.ldconfig",
+
+ "bpkg.bindist.debian",
+ "bpkg.bindist.fedora",
+ "bpkg.bindist.archive",
+
+ "bbot.sys-install",
+ "bbot.sys-install.apt-get.update",
+ "bbot.sys-install.apt-get.install",
+ "bbot.sys-install.dnf.install",
+ "bbot.sys-install.tar.extract",
+ "bbot.sys-install.ldconfig",
+
"b.test-installed.create",
"b.test-installed.configure",
"b.test-installed.test",
- "bpkg.test-installed.create",
- "bpkg.test-installed.configure.add",
- "bpkg.test-installed.configure.fetch",
+
+ "bpkg.test-separate-installed.create",
+ "bpkg.test-separate-installed.create_for_target",
+ "bpkg.test-separate-installed.create_for_host",
+ "bpkg.test-separate-installed.create_for_module",
+
+ "bpkg.test-separate-installed.link",
+ "bpkg.test-separate-installed.configure.add",
+ "bpkg.test-separate-installed.configure.fetch",
+
"bpkg.test-separate-installed.configure.build",
+ "bpkg.test-separate-installed.configure.build_for_target",
+ "bpkg.test-separate-installed.configure.build_for_host",
+ "bpkg.test-separate-installed.configure.build_for_module",
+
"bpkg.test-separate-installed.update",
"bpkg.test-separate-installed.test",
+
+ "bbot.sys-uninstall.apt-get.remove",
+ "bbot.sys-uninstall.dnf.remove",
+
"bpkg.uninstall",
+
+ "bbot.bindist.upload",
+
+ "bbot.upload",
+ "bbot.upload.tar.create",
+ "bbot.upload.tar.list",
+
"end"};
+static inline const string&
+to_string (step_id s)
+{
+ return step_id_str[static_cast<size_t> (s)];
+}
+
using std::regex;
namespace regex_constants = std::regex_constants;
using regexes = vector<regex>;
+// UTF-8-sanitize and log the line. Also print it to a tracer, if specified,
+// or to stderr otherwise at verbosity level 3 or higher.
+//
+static void
+log_line (string&& l, string& log, tracer* trace = nullptr)
+{
+ if (verb >= 3)
+ {
+ if (trace != nullptr)
+ *trace << l;
+ else
+ text << l;
+ }
+
+ to_utf8 (l, '?', codepoint_types::graphic, U"\n\r\t");
+
+ log += l;
+ log += '\n';
+}
+
+#ifndef _WIN32
+const char* comment_begin ("#");
+#else
+const char* comment_begin ("rem");
+#endif
+
+static void
+log_step_id (tracer& t, string& log, step_id id)
+{
+ string ts (to_string (system_clock::now (),
+ "%Y-%m-%d %H:%M:%S %Z",
+ true /* special */,
+ true /* local */));
+
+ const string& sid (to_string (id));
+
+ l3 ([&]{t << "step id: " << sid << ' ' << ts;});
+
+ log += comment_begin;
+ log += " step id: ";
+ log += sid;
+ log += ' ';
+ log += ts;
+ log += '\n';
+}
+
+// Add the specified string to the log as a comment. Unless the string is
+// empty (e.g., a blank line to separate comments), also trace it.
+//
+static void
+log_comment (tracer& t, string& log, const string& s)
+{
+ if (!s.empty ())
+ l3 ([&]{t << s;});
+
+ log += comment_begin;
+
+ if (!s.empty ())
+ {
+ log += ' ';
+ log += s;
+ }
+
+ log += '\n';
+}
+
// Run the worker script command. Name is used for logging and diagnostics
// only. Match lines read from the command's stderr against the regular
// expressions and return the warning result status (instead of success) in
// case of a match. Save the executed command into last_cmd.
//
+// Redirect stdout to stderr if the out_* arguments are not specified (out_str
+// is NULL and out_file is empty; must never be specified both). Otherwise,
+// save the process output into the variable referenced by out_str, if
+// specified, and to the file referenced by out_file otherwise. Note: in the
+// former case assumes that the output will always fit into the pipe buffer.
+//
// If bkp_step is present and is equal to the command step, then prior to
// running this command ask the user if to continue or abort the task
// execution. If bkp_status is present, then ask for that if the command
@@ -228,36 +559,28 @@ using regexes = vector<regex>;
// For the special end step no command is executed. In this case only the user
// is potentially prompted and the step is traced/logged.
//
+// If specified, the pre-run callback is called after the step id is logged
+// but before the command is logged/executed.
+//
+using pre_run_function = void ();
+
template <typename... A>
static result_status
run_cmd (step_id step,
tracer& t,
- string& log, const regexes& warn_detect,
+ string& log,
+ const function<pre_run_function>& pre_run,
+ optional<string>* out_str, const path& out_file,
+ const regexes& warn_detect,
const string& name,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const process_env& pe,
A&&... a)
{
- // UTF-8-sanitize and log the diagnostics. Also print the raw diagnostics
- // to stderr at verbosity level 3 or higher.
- //
- auto add = [&log, &t] (string&& s, bool trace = true)
- {
- if (verb >= 3)
- {
- if (trace)
- t << s;
- else
- text << s;
- }
-
- to_utf8 (s, '?', codepoint_types::graphic, U"\n\r\t");
-
- log += s;
- log += '\n';
- };
+ assert (out_str == nullptr || out_file.empty ());
string next_cmd;
@@ -266,60 +589,54 @@ run_cmd (step_id step,
//
struct abort {};
- auto prompt = [&last_cmd, &next_cmd, &add] (const string& what)
+ auto prompt = [&aux_env, &last_cmd, &next_cmd, &t, &log] (const string& what)
{
diag_record dr (text);
dr << '\n'
<< what << '\n'
- << " current dir: " << current_directory () << '\n'
- << " environment: " << ops.env_script () << ' ' << ops.env_target ();
+ << " current dir: " << current_directory () << '\n'
+ << " environment: " << ops.env_script () << ' ' << ops.env_target ();
+
+ if (!aux_env.empty ())
+ {
+ dr << '\n'
+ << " auxiliary environment:";
+
+ for (const string& e: aux_env)
+ dr << '\n'
+ << " " << e;
+ }
if (!last_cmd.empty ())
dr << '\n'
- << " last command: " << last_cmd;
+ << " last command: " << last_cmd;
if (!next_cmd.empty ())
dr << '\n'
- << " next command: " << next_cmd;
+ << " next command: " << next_cmd;
dr.flush ();
if (!yn_prompt (
"continue execution (or you may shutdown the machine)? [y/n]"))
{
- add ("execution aborted by interactive user");
+ log_line ("execution aborted by interactive user", log, &t);
throw abort ();
}
};
- auto prompt_step = [step, &t, &log, &bkp_step, &prompt] ()
+ auto prompt_step = [step, &t, &log, &bkp_step, &prompt, &pre_run] ()
{
- const string& sid (step_id_str[static_cast<size_t> (step)]);
-
// Prompt the user if the breakpoint is reached.
//
if (bkp_step && *bkp_step == step)
- prompt (sid + " step reached");
+ prompt (to_string (step) + " step reached");
- string ts (to_string (system_clock::now (),
- "%Y-%m-%d %H:%M:%S %Z",
- true /* special */,
- true /* local */));
+ log_step_id (t, log, step);
- // Log the step id and the command to be executed.
- //
- l3 ([&]{t << "step id: " << sid << ' ' << ts;});
-
-#ifndef _WIN32
- log += "# step id: ";
-#else
- log += "rem step id: ";
-#endif
- log += sid;
- log += ' ';
- log += ts;
- log += '\n';
+ if (pre_run)
+ pre_run ();
};
try
@@ -335,6 +652,8 @@ run_cmd (step_id step,
prompt_step ();
+ // Log the command to be executed.
+ //
t (c, n);
log += next_cmd;
@@ -347,27 +666,47 @@ run_cmd (step_id step,
{
try
{
- fdpipe pipe (fdopen_pipe ()); // Text mode seems appropriate.
+ // Redirect stdout to stderr, if the caller is not interested in it.
+ //
+ // Text mode seems appropriate.
+ //
+ fdpipe out_pipe (out_str != nullptr ? fdopen_pipe () : fdpipe ());
+ fdpipe err_pipe (fdopen_pipe ());
+
+ // If the output file is specified, then open "half-pipe".
+ //
+ if (!out_file.empty ())
+ try
+ {
+ out_pipe.out = fdopen (out_file,
+ fdopen_mode::out | fdopen_mode::create);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to open " << out_file << ": " << e;
+ }
process pr (
- process_start_callback (cmdc,
- fdopen_null (), // Never reads from stdin.
- 2, // 1>&2
- pipe,
- pe,
- forward<A> (a)...));
+ process_start_callback (
+ cmdc,
+ fdopen_null (), // Never reads from stdin.
+ out_pipe.out != nullfd ? out_pipe.out.get () : 2,
+ err_pipe,
+ pe,
+ forward<A> (a)...));
- pipe.out.close ();
+ out_pipe.out.close ();
+ err_pipe.out.close ();
{
// Skip on exception.
//
- ifdstream is (move (pipe.in), fdstream_mode::skip);
+ ifdstream is (move (err_pipe.in),
+ fdstream_mode::skip,
+ ifdstream::badbit);
- for (string l; is.peek () != ifdstream::traits_type::eof (); )
+ for (string l; !eof (getline (is, l)); )
{
- getline (is, l);
-
// Match the log line with the warning-detecting regular
// expressions until the first match.
//
@@ -392,17 +731,27 @@ run_cmd (step_id step,
}
}
- add (move (l), false /* trace */);
+ log_line (move (l), log);
}
}
if (!pr.wait ())
{
const process_exit& e (*pr.exit);
- add (name + " " + to_string (e));
+ log_line (name + ' ' + to_string (e), log, &t);
r = e.normal () ? result_status::error : result_status::abnormal;
}
+ // Only read the buffered output if the process terminated normally.
+ //
+ if (out_str != nullptr && pr.exit->normal ())
+ {
+ // Note: shouldn't throw since the output is buffered.
+ //
+ ifdstream is (move (out_pipe.in));
+ *out_str = is.read_text ();
+ }
+
last_cmd = move (next_cmd);
if (bkp_status && r >= *bkp_status)
@@ -434,6 +783,61 @@ run_cmd (step_id step,
}
}
+template <typename... A>
+static result_status
+run_cmd (step_id step,
+ tracer& t,
+ string& log,
+ optional<string>* out_str, const path& out_file,
+ const regexes& warn_detect,
+ const string& name,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const process_env& pe,
+ A&&... a)
+{
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* pre_run */,
+ out_str, out_file,
+ warn_detect,
+ name,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ pe,
+ forward<A> (a)...);
+}
+
+template <typename V, typename... A>
+static result_status
+run_bpkg (step_id step,
+ const V& envvars,
+ tracer& t,
+ string& log,
+ const function<pre_run_function>& pre_run,
+ optional<string>& out,
+ const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const char* verbosity,
+ const string& cmd, A&&... a)
+{
+ return run_cmd (step,
+ t,
+ log,
+ pre_run,
+ &out, path () /* out_file */,
+ warn_detect,
+ "bpkg " + cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("bpkg", envvars),
+ verbosity, cmd, forward<A> (a)...);
+}
+
template <typename V, typename... A>
static result_status
run_bpkg (step_id step,
@@ -442,15 +846,68 @@ run_bpkg (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const char* verbosity,
+ const string& cmd, A&&... a)
+{
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file */,
+ warn_detect,
+ "bpkg " + cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("bpkg", envvars),
+ verbosity, cmd, forward<A> (a)...);
+}
+
+template <typename... A>
+static result_status
+run_bpkg (step_id step,
+ tracer& t,
+ string& log,
+ const function<pre_run_function>& pre_run,
+ optional<string>& out,
+ const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const char* verbosity,
+ const string& cmd, A&&... a)
+{
+ const char* const* envvars (nullptr);
+
+ return run_bpkg (step,
+ envvars,
+ t,
+ log,
+ pre_run,
+ out,
+ warn_detect,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ verbosity, cmd, forward<A> (a)...);
+}
+
+template <typename V, typename... A>
+static result_status
+run_bpkg (step_id step,
+ const V& envvars,
+ tracer& t,
+ string& log, const path& out, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
{
return run_cmd (step,
t,
- log, warn_detect,
+ log, nullptr /* out_str */, out, warn_detect,
"bpkg " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("bpkg", envvars),
verbosity, cmd, forward<A> (a)...);
}
@@ -459,9 +916,32 @@ template <typename... A>
static result_status
run_bpkg (step_id step,
tracer& t,
+ string& log, const path& out, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const char* verbosity,
+ const string& cmd, A&&... a)
+{
+ const char* const* envvars (nullptr);
+
+ return run_bpkg (step,
+ envvars,
+ t,
+ log, out, warn_detect,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ verbosity, cmd, forward<A> (a)...);
+}
+
+template <typename... A>
+static result_status
+run_bpkg (step_id step,
+ tracer& t,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -472,7 +952,7 @@ run_bpkg (step_id step,
envvars,
t,
log, warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, cmd, forward<A> (a)...);
}
@@ -484,6 +964,7 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const strings& buildspecs, A&&... a)
@@ -498,10 +979,11 @@ run_b (step_id step,
}
return run_cmd (step,
- t,
- log, warn_detect,
+ t, log,
+ nullptr /* out_str */, path () /* out_file */,
+ warn_detect,
name,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("b", envvars),
verbosity, buildspecs, forward<A> (a)...);
}
@@ -514,15 +996,17 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& buildspec, A&&... a)
{
return run_cmd (step,
- t,
- log, warn_detect,
+ t, log,
+ nullptr /* out_str */, path () /* out_file */,
+ warn_detect,
"b " + buildspec,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("b", envvars),
verbosity, buildspec, forward<A> (a)...);
}
@@ -534,6 +1018,7 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& buildspec, A&&... a)
@@ -543,10 +1028,131 @@ run_b (step_id step,
envvars,
t,
log, warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, buildspec, forward<A> (a)...);
}
+template <typename... A>
+static result_status
+run_ldconfig (step_id step,
+ tracer& t,
+ string& log, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ A&&... a)
+{
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file*/,
+ warn_detect,
+ "sudo ldconfig",
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("sudo"),
+ "ldconfig", forward<A> (a)...);
+}
+
+template <typename... A>
+static result_status
+run_apt_get (step_id step,
+ tracer& t,
+ string& log, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const string& cmd, A&&... a)
+{
+ // Note: dumps some of its diagnostics to stdout.
+ //
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file*/,
+ warn_detect,
+ "sudo apt-get " + cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("sudo"),
+ "apt-get", cmd, forward<A> (a)...);
+}
+
+template <typename... A>
+static result_status
+run_dnf (step_id step,
+ tracer& t,
+ string& log, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ const string& cmd, A&&... a)
+{
+ // Note: dumps some of its diagnostics to stdout.
+ //
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file*/,
+ warn_detect,
+ "sudo dnf " + cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("sudo"),
+ "dnf", cmd, forward<A> (a)...);
+}
+
+#ifndef _WIN32
+template <typename... A>
+static result_status
+run_tar (step_id step,
+ tracer& t,
+ string& log, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ bool sudo,
+ A&&... a)
+{
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file*/,
+ warn_detect,
+ sudo ? "sudo tar" : "tar",
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env (sudo ? "sudo" : "tar"),
+ sudo ? "tar" : nullptr, forward<A> (a)...);
+}
+#else
+template <typename... A>
+static result_status
+run_tar (step_id step,
+ tracer& t,
+ string& log, const regexes& warn_detect,
+ const optional<step_id>& bkp_step,
+ const optional<result_status>& bkp_status,
+ const strings& aux_env,
+ string& last_cmd,
+ bool /* sudo */,
+ A&&... a)
+{
+ // Note: using bsdtar which can unpack .zip archives (and also not an MSYS
+ // executable).
+ //
+ return run_cmd (step,
+ t,
+ log,
+ nullptr /* out_str */, path () /* out_file*/,
+ warn_detect,
+ "bsdtar",
+ bkp_step, bkp_status, aux_env, last_cmd,
+ process_env ("bsdtar"),
+ forward<A> (a)...);
+}
+#endif
+
// Upload compressed manifest to the specified TFTP URL with curl. Issue
// diagnostics and throw failed on invalid manifest or process management
// errors and throw io_error for input/output errors or non-zero curl exit.
@@ -567,6 +1173,11 @@ upload_manifest (tracer& trace,
// other hand, uploading from a file appears to work reliably (we still
// get an odd error on Windows from time to time with larger uploads).
//
+ // Let's not break lines in the manifest values not to further increase
+ // the size of the manifest encoded representation. Also here we don't
+ // care much about readability of the manifest since it will only be read
+ // by the bbot agent anyway.
+ //
#if 0
// Note: need to add compression support if re-enable this.
tftp_curl c (trace,
@@ -577,7 +1188,7 @@ upload_manifest (tracer& trace,
"--tftp-blksize", tftp_blksize,
"--max-time", tftp_put_timeout);
- manifest_serializer s (c.out, url);
+ manifest_serializer s (c.out, url, true /* long_lines */);
m.serialize (s);
c.out.close ();
#else
@@ -585,9 +1196,9 @@ upload_manifest (tracer& trace,
try
{
tmp = auto_rmfile (path::temp_path (what + "-manifest.lz4"));
- ofdstream ofs (tmp.path);
+ ofdstream ofs (tmp.path, fdopen_mode::binary);
olz4stream ozs (ofs, 9, 5 /* 256KB */, nullopt /* content_size */);
- manifest_serializer s (ozs, tmp.path.string ());
+ manifest_serializer s (ozs, tmp.path.string (), true /* long_lines */);
m.serialize (s);
ozs.close ();
ofs.close ();
@@ -632,24 +1243,39 @@ upload_manifest (tracer& trace,
}
}
+static strings
+parse_auxiliary_environment (const string&, const char*); // See below.
+
+static const string worker_checksum ("5"); // Logic version.
+
static int bbot::
build (size_t argc, const char* argv[])
{
- using namespace bpkg;
+ using std::map;
+ using std::multimap;
using string_parser::unquote;
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+
+ using namespace bpkg;
+
tracer trace ("build");
// Our overall plan is as follows:
//
// 1. Parse the task manifest (it should be in CWD).
//
- // 2. Run bpkg to create the configuration, add the repository, and
- // configure, build, test, optionally install, test installed and
- // uninstall the package all while saving the logs in the result manifest.
+ // 2. Run bpkg to create the package/tests configurations, add the
+ // repository to them, and configure, build, test, optionally install or
+ // alternatively bindist and sys-install, test installed, and
+ // (sys-)uninstall the package all while saving the logs in the result
+ // manifest.
//
- // 3. Upload the result manifest.
+ // 3. Upload the result manifest and, optionally, the build artifacts.
+ //
+ // NOTE: consider updating worker_checksum if making any logic changes.
//
// Note also that we are being "watched" by the startup version of us which
// will upload an appropriate result in case we exit with an error. So here
@@ -659,42 +1285,35 @@ build (size_t argc, const char* argv[])
task_manifest tm (
parse_manifest<task_manifest> (path ("task.manifest"), "task"));
+ // Reset the dependency checksum if the task's worker checksum doesn't match
+ // the current one.
+ //
+ if (!tm.worker_checksum || *tm.worker_checksum != worker_checksum)
+ tm.dependency_checksum = nullopt;
+
result_manifest rm {
tm.name,
tm.version,
result_status::success,
- operation_results {}
+ operation_results {},
+ worker_checksum,
+ nullopt /* dependency_checksum */
};
- // Reserve storage large enough to hold all the potential operation results
- // without reallocations. Note that this is not an optimization but is
- // required to make sure that element references are not invalidated when
- // new results are added.
- //
- size_t max_results (6);
- rm.results.reserve (max_results);
-
- auto add_result = [&rm, max_results] (string o) -> operation_result&
+ auto add_result = [&rm] (string o) -> operation_result&
{
- assert (rm.results.size () < max_results);
-
rm.results.push_back (
operation_result {move (o), result_status::success, ""});
return rm.results.back ();
};
- // Note that we don't consider the build system module configuring and
- // testing during the "pre-step" as separate operations and share the
- // operation logs with the "main" configure and test steps (see below).
- // Thus, we save pointers to the added result objects for the subsequent
- // use.
- //
- operation_result* configure_result (nullptr);
- operation_result* test_result (nullptr);
-
dir_path rwd; // Root working directory.
+ // Archive of the build artifacts for upload.
+ //
+ path upload_archive ("upload.tar");
+
// Resolve the breakpoint specified by the interactive manifest value into
// the step id or the result status breakpoint. If the breakpoint is
// invalid, then log the error and abort the build. Note that we reuse the
@@ -704,8 +1323,49 @@ build (size_t argc, const char* argv[])
optional<result_status> bkp_status;
string last_cmd; // Used in the user prompt.
+ // Parse the auxiliary environment, if present, to dump it into the
+ // configure operation log and to use it in the interactive build user
+ // prompt. Note that this environment is already set by the parent process.
+ //
+ strings aux_env;
+
for (;;) // The "breakout" loop.
{
+ auto fail_operation = [&trace] (operation_result& r,
+ const string& e,
+ result_status s,
+ const string& name = "",
+ uint64_t line = 0,
+ uint64_t column = 0)
+ {
+ string prefix;
+
+ if (!name.empty ())
+ {
+ prefix += name;
+ prefix += ':';
+
+ if (line != 0)
+ {
+ prefix += to_string (line);
+ prefix += ':';
+
+ if (column != 0)
+ {
+ prefix += to_string (column);
+ prefix += ':';
+ }
+ }
+
+ prefix += ' ';
+ }
+
+ l3 ([&]{trace << prefix << e;});
+
+ r.log += prefix + "error: " + e + '\n';
+ r.status = s;
+ };
+
// Regular expressions that detect different forms of build2 toolchain
// warnings. Accidently (or not), they also cover GCC and Clang warnings
// (for the English locale).
@@ -723,7 +1383,7 @@ build (size_t argc, const char* argv[])
for (const string& re: tm.unquoted_warning_regex ())
wre.emplace_back (re, f);
- if (tm.interactive)
+ if (tm.interactive && *tm.interactive != "none")
{
const string& b (*tm.interactive);
@@ -745,171 +1405,571 @@ build (size_t argc, const char* argv[])
if (!bkp_step && !bkp_status)
{
- string e ("invalid interactive build breakpoint '" + b + "'");
-
- l3 ([&]{trace << e;});
-
- operation_result& r (add_result ("configure"));
-
- r.log = "error: " + e + '\n';
- r.status = result_status::abort;
+ fail_operation (add_result ("configure"),
+ "invalid interactive build breakpoint '" + b + '\'',
+ result_status::abort);
break;
}
}
+ // Parse the auxiliary environment, if present.
+ //
+ if (tm.auxiliary_environment)
+ {
+ // Note: cannot throw since has already been called successfully by the
+ // parent process.
+ //
+ aux_env = parse_auxiliary_environment (*tm.auxiliary_environment,
+ comment_begin);
+ }
+
// Split the argument into prefix (empty if not present) and unquoted
- // value. Return nullopt if the prefix is invalid.
+ // value (absent if not present) and determine the step status. If the
+ // prefix is present and is prepended with the '+'/'-' character, then the
+ // respective step needs to be enabled/disabled. Return nullopt if the
+ // prefix is invalid.
+ //
+ // Note that arguments with absent values are normally used to
+ // enable/disable steps and are omitted from the command lines.
//
- auto parse_arg = [] (const string& a) -> optional<pair<string, string>>
+ struct argument
+ {
+ string prefix;
+
+ // Absent if the argument value is an empty unquoted string.
+ //
+ optional<string> value;
+
+ // True - enable, false - disable, nullopt - neutral.
+ //
+ optional<bool> step_status;
+ };
+
+ auto parse_arg = [] (const string& a) -> optional<argument>
{
size_t p (a.find_first_of (":=\"'"));
+ auto value = [] (const string& v)
+ {
+ return !v.empty () ? unquote (v) : optional<string> ();
+ };
+
if (p == string::npos || a[p] != ':') // No prefix.
- return make_pair (string (), unquote (a));
+ return argument {string (), value (a), nullopt};
+
+ string prefix (a, 0, p);
+
+ optional<bool> enable;
+ if (prefix[0] == '+' || prefix[0] == '-')
+ {
+ enable = (prefix[0] == '+');
+
+ prefix.erase (0, 1);
+
+ if (prefix != "bpkg.update" &&
+ prefix != "bpkg.test" &&
+ prefix != "bpkg.test-separate.update" &&
+ prefix != "bpkg.test-separate.test" &&
+ prefix != "bpkg.install" &&
+ prefix != "bbot.install.ldconfig" &&
+ prefix != "bpkg.bindist.debian" &&
+ prefix != "bpkg.bindist.fedora" &&
+ prefix != "bpkg.bindist.archive" &&
+ prefix != "bbot.sys-install" &&
+ prefix != "bbot.sys-install.ldconfig" &&
+ prefix != "b.test-installed.test" &&
+ prefix != "bpkg.test-separate-installed.update" &&
+ prefix != "bpkg.test-separate-installed.test" &&
+ prefix != "bbot.bindist.upload" &&
+ prefix != "bbot.upload")
+ {
+ return nullopt; // Prefix is invalid.
+ }
+ }
for (const string& id: step_id_str)
{
- if (a.compare (0, p, id, 0, p) == 0 &&
- (id.size () == p || (id.size () > p && id[p] == '.')))
- return make_pair (a.substr (0, p), unquote (a.substr (p + 1)));
+ size_t n (prefix.size ());
+ if (id.compare (0, n, prefix) == 0 &&
+ (id.size () == n || (id.size () > n && id[n] == '.')))
+ return argument {move (prefix), value (a.substr (p + 1)), enable};
}
return nullopt; // Prefix is invalid.
};
- // Enter split arguments into a map. Those without a prefix are
- // entered for the *.create steps.
+ // Keep track of explicitly enabled/disabled steps.
+ //
+ map<string, bool> step_statuses;
+
+ // Return true if the step is explicitly enabled via a +<prefix>:[<value>]
+ // environment/configuration argument.
//
- auto add_arg = [] (std::multimap<string, string>& args,
- pair<string, string>&& a)
+ auto step_enabled = [&step_statuses] (step_id step) -> bool
{
- if (!a.first.empty ())
- args.emplace (move (a));
- else
+ auto i (step_statuses.find (to_string (step)));
+ return i != step_statuses.end () && i->second;
+ };
+
+ // Return true if the step is explicitly disabled via a -<prefix>:[<value>]
+ // environment/configuration argument.
+ //
+ auto step_disabled = [&step_statuses] (step_id step) -> bool
+ {
+ auto i (step_statuses.find (to_string (step)));
+ return i != step_statuses.end () && !i->second;
+ };
+
+ // Save a step status.
+ //
+ // Note that since the bpkg.bindist.* steps are mutually exclusive we only
+ // keep the latest status change (see above for details).
+ //
+ auto step_status = [&step_statuses] (const string& step, bool status)
+ {
+ if (step.compare (0, 13, "bpkg.bindist.") == 0)
{
- args.emplace ("bpkg.create", a.second);
- args.emplace ("b.test-installed.create", a.second);
- args.emplace ("bpkg.test-installed.create", move (a.second));
+ step_statuses.erase ("bpkg.bindist.debian");
+ step_statuses.erase ("bpkg.bindist.fedora");
+ step_statuses.erase ("bpkg.bindist.archive");
}
+
+ step_statuses[step] = status;
};
- // Parse configuration arguments. Report failures to the bbot controller.
+ // Parse the environment, target configuration, and build package
+ // configuration arguments.
+ //
+ // NOTE: keep this parsing order intact so that, for example, a build
+ // package configuration argument can override step status specified
+ // by a target configuration argument.
+ //
+
+ // Parse environment arguments.
+ //
+ multimap<string, string> modules;
+ multimap<string, string> env_args;
+
+ for (size_t i (1); i != argc; ++i)
+ {
+ const char* a (argv[i]);
+ optional<argument> v (parse_arg (a));
+
+ if (!v)
+ fail << "invalid environment argument prefix in '" << a << "'";
+
+ bool mod (v->value &&
+ (*v->value)[0] != '-' &&
+ v->value->find ('=') == string::npos);
+
+ if (mod &&
+ !v->prefix.empty () &&
+ v->prefix != "b.create" &&
+ v->prefix != "bpkg.create" &&
+ v->prefix != "bpkg.target.create" &&
+ v->prefix != "bpkg.host.create" &&
+ v->prefix != "bpkg.module.create" &&
+ v->prefix != "b.test-installed.create" &&
+ v->prefix != "bpkg.test-separate-installed.create" &&
+ v->prefix != "bpkg.test-separate-installed.create_for_target" &&
+ v->prefix != "bpkg.test-separate-installed.create_for_host" &&
+ v->prefix != "bpkg.test-separate-installed.create_for_module")
+ fail << "invalid module prefix in '" << a << "'";
+
+ if (v->step_status)
+ step_status (v->prefix, *v->step_status);
+
+ if (v->value)
+ (mod ? modules : env_args).emplace (make_pair (move (v->prefix),
+ move (*v->value)));
+ }
+
+ // Parse target configuration arguments. Report failures to the bbot
+ // controller.
//
- std::multimap<string, string> config_args;
+ multimap<string, string> tgt_args;
- for (const string& c: tm.config)
+ for (const string& c: tm.target_config)
{
- optional<pair<string, string>> v (parse_arg (c));
+ optional<argument> v (parse_arg (c));
if (!v)
{
rm.status |= result_status::abort;
- l3 ([&]{trace << "invalid configuration argument prefix in "
+ l3 ([&]{trace << "invalid target configuration argument prefix in "
<< "'" << c << "'";});
break;
}
- if (v->second[0] != '-' && v->second.find ('=') == string::npos)
+ if (v->value &&
+ (*v->value)[0] != '-' &&
+ v->value->find ('=') == string::npos)
{
rm.status |= result_status::abort;
- l3 ([&]{trace << "invalid configuration argument '" << c << "'";});
+ l3 ([&]{trace << "invalid target configuration argument '" << c
+ << "'";});
break;
}
- add_arg (config_args, move (*v));
+ if (v->step_status)
+ step_status (v->prefix, *v->step_status);
+
+ if (v->value)
+ tgt_args.emplace (make_pair (move (v->prefix), move (*v->value)));
}
if (!rm.status)
break;
- // Parse environment arguments.
+ // Parse the build package configuration represented as a whitespace
+ // separated list of the following potentially quoted bpkg-pkg-build
+ // command arguments:
//
- std::multimap<string, string> modules;
- std::multimap<string, string> env_args;
-
- for (size_t i (1); i != argc; ++i)
+ // <option>...
+ // <config-var>...
+ // ([{ <config-var>... }+] (?[sys:]|sys:)<pkg-name>[<version-spec>])...
+ // ( { <config-var>... }+ <pkg-name>)...
+ //
+ // If the package configuration is specified, then parse it into the
+ // following lists/maps:
+ //
+ // - The prefixed global options and configuration variables map
+ // (pkg_args). Added to the command lines at the corresponding steps
+ // after potential environment and target configuration arguments.
+ //
+ // - The unprefixed global options list (pkg_config_opts). Specified after
+ // all the prefixed global options on the bpkg-pkg-build command line at
+ // the bpkg.configure.build step.
+ //
+ // - The main package-specific configuration variables list
+ // (pkg_config_vars). Specified for the main package only on the
+ // bpkg-pkg-build command line at the bpkg.configure.build step,
+ // wherever it is configured. Also specified on the b-configure command
+ // line at the b.test-installed.configure step.
+ //
+ // - The main package-specific dependency packages list
+ // (pkg_config_main_deps), potentially with their own configuration
+ // variables (but not options). Only configured where the main package
+ // is configured with the bpkg-pkg-build command line at the
+ // bpkg.configure.build step.
+ //
+ // - The global system dependency packages list (pkg_config_glob_deps).
+ // Configured in all configurations with the bpkg-pkg-build command line
+ // at the bpkg.configure.build step.
+ //
+ // - The main and external test package-specific configuration variables
+ // map (pkg_config_pkgs). Specified on the bpkg-pkg-build command lines
+ // at the bpkg.configure.build and
+ // bpkg.test-separate-installed.configure.build steps. Package names
+ // other than the main and external test package names are silently
+ // ignored.
+ //
+ multimap<string, string> pkg_args;
+ strings pkg_config_opts;
+ strings pkg_config_vars;
+ vector<pair<string, strings>> pkg_config_main_deps; // ?<pkg>, sys:<pkg>
+ vector<pair<string, strings>> pkg_config_glob_deps; // ?sys:<pkg>
+ map<string, strings> pkg_config_pkgs; // <pkg>
+
+ if (!tm.package_config.empty ())
{
- const char* a (argv[i]);
- optional<pair<string, string>> v (parse_arg (a));
+ struct abort {};
- if (!v)
- fail << "invalid environment argument prefix in '" << a << "'";
+ auto fail = [&tm, &add_result, &fail_operation] (const string& d,
+ bool throw_abort = true)
+ {
+ fail_operation (add_result ("configure"),
+ "invalid package configuration: " + d +
+ "\n info: package configuration: '" +
+ tm.package_config + '\'',
+ result_status::abort);
- bool mod (v->second[0] != '-' && v->second.find ('=') == string::npos);
+ if (throw_abort)
+ throw abort ();
+ };
- if (mod && !v->first.empty () &&
- v->first != "bpkg.create" &&
- v->first != "b.test-installed.create" &&
- v->first != "bpkg.test-installed.create")
- fail << "invalid module prefix in '" << a << "'";
+ try
+ {
+ strings argsv (string_parser::parse_quoted (tm.package_config,
+ false /* unquote */));
+
+ cli::vector_scanner scanv (argsv);
+ cli::group_scanner args (scanv);
+
+ while (args.more ())
+ {
+ string a (args.next ());
+
+ // Unless the argument is an unquoted dependency (starts with `?` or
+ // `sys:`), first try to interpret it as a prefixed option/variable
+ // and/or step id status (enabled/disabled).
+ //
+ if (!(a[0] == '?' || a.compare (0, 4, "sys:") == 0))
+ {
+ optional<argument> v (parse_arg (a));
+
+ // Note that we only assume an argument as prefixed if the prefix
+ // is a known step id. Otherwise, we interpret the argument as
+ // unprefixed global option, variable, or a package spec.
+ //
+ if (v && !v->prefix.empty ())
+ {
+ if (v->value &&
+ (*v->value)[0] != '-' &&
+ v->value->find ('=') == string::npos)
+ fail ("invalid prefixed argument '" + a + '\'');
- add_arg (mod ? modules : env_args, move (*v));
+ if (args.group ().more ())
+ fail ("unexpected options group for prefixed argument '" + a +
+ '\'');
+
+ if (v->step_status)
+ step_status (v->prefix, *v->step_status);
+
+ if (v->value)
+ pkg_args.emplace (make_pair (move (v->prefix),
+ move (*v->value)));
+
+ continue;
+ }
+ }
+
+ a = unquote (a);
+
+ // Return true if the argument is an option.
+ //
+ // Note that options with values can only be specified using
+ // the single argument notation.
+ //
+ auto opt = [] (const string& a)
+ {
+ // Make sure that -- or - is always followed by some characters.
+ //
+ return a.compare (0, 2, "--") == 0 ? a.size () > 2 :
+ a[0] == '-' ? a.size () > 1 :
+ false ;
+ };
+
+ // Return true if the argument is a configuration variable.
+ //
+ auto var = [] (const string& a)
+ {
+ // Note: we need to be careful not to misinterpret
+ // '?libfoo == 1.0.0' as a variable.
+ //
+ return a.compare (0, 7, "config.") == 0 &&
+ a.find ('=') != string::npos;
+ };
+
+ bool o (opt (a));
+ bool v (var (a));
+
+ cli::scanner& ag (args.group ());
+
+ if (o) // Option.
+ {
+ if (ag.more ())
+ fail ("unexpected options group for option '" + a + '\'');
+
+ pkg_config_opts.push_back (move (a));
+ }
+ else if (v) // Configuration variable.
+ {
+ if (ag.more ())
+ fail ("unexpected options group for configuration variable '" +
+ a + '\'');
+
+ pkg_config_vars.push_back (move (a));
+ }
+ else // Dependency or build-to-hold package.
+ {
+ // Note that we consider a system package as a dependency
+ // regardless whether it is prefixed with '?' or not.
+ //
+ strings vars;
+ while (ag.more ())
+ {
+ string da (unquote (ag.next ()));
+ if (!var (da))
+ fail ("argument is not a configuration variable for "
+ "dependency " + a + ": '" + da + '\'');
+
+ vars.push_back (move (da));
+ }
+
+ // Add the system dependency packages (prefixed with `?sys:`) to
+ // a separate list, to specify them globally on the
+ // bpkg-pkg-build command line for configuring them in all the
+ // (being) created configurations.
+ //
+ // Note, though, that we will handle the build-to-hold system
+ // packages (prefixed with `sys:`) in the same way as non system
+ // dependencies, since such an auto-configuration is only
+ // supported by bpkg-pkg-build for system dependencies. In the
+ // future, we may support that on the bbot worker level by, for
+ // example, specifying all the configurations manually for the
+ // build-to-hold system packages and also specifying them as a
+ // system dependencies globally. We need to be careful to make
+ // sure that these dependencies are also auto-configured for the
+ // private configurations potentially created by bpkg-pkg-build.
+ //
+ // Also note that in the future we may allow package-specific
+ // --config-uuid options to only configure such packages in the
+ // specified configurations. We may also invent the special
+ // 00000000-0000-0000-0000-000000000005 configuration id to, for
+ // example, only configure them at the
+ // bpkg.test-separate-installed.configure.build step.
+ //
+ if (a.compare (0, 5, "?sys:") == 0) // Global system dependency.
+ {
+ pkg_config_glob_deps.push_back (make_pair (move (a),
+ move (vars)));
+ }
+ else if (a[0] == '?' || // Main package dependency.
+ a.compare (0, 4, "sys:") == 0)
+ {
+ pkg_config_main_deps.push_back (make_pair (move (a),
+ move (vars)));
+ }
+ else // Build-to-hold package.
+ {
+ if (vars.empty ())
+ fail ("no configuration variables specified for package '" +
+ a + '\'');
+
+ auto i (pkg_config_pkgs.find (a));
+
+ if (i == pkg_config_pkgs.end ())
+ {
+ pkg_config_pkgs.emplace (move (a), move (vars));
+ }
+ else
+ {
+ strings& vs (i->second);
+ vs.insert (vs.end (),
+ make_move_iterator (vars.begin ()),
+ make_move_iterator (vars.end ()));
+ }
+ }
+ }
+ }
+ }
+ catch (const cli::exception& e)
+ {
+ fail (e.what (), false /* throw_abort */);
+ break;
+ }
+ catch (const string_parser::invalid_string& e)
+ {
+ fail (e.what (), false /* throw_abort */);
+ break;
+ }
+ catch (const abort&)
+ {
+ break;
+ }
}
- // Return command arguments for the specified step id. Arguments with more
- // specific prefixes come last.
+ // Return command arguments for the specified step id, complementing
+ // *.create[_for_*] steps with un-prefixed arguments. If no arguments are
+ // specified for the step then use the specified fallbacks, potentially
+ // both. Arguments with more specific prefixes come last. Optionally,
+ // search for arguments starting from the specified step id rather than
+ // from the least specific one (tool id).
//
- auto step_args = [] (const std::multimap<string, string>& args,
+ auto step_args = [] (const multimap<string, string>& args,
step_id step,
- optional<step_id> fallback = nullopt) -> strings
+ optional<step_id> fallback1 = nullopt,
+ optional<step_id> fallback2 = nullopt,
+ optional<step_id> start_step = nullopt) -> cstrings
{
- strings r;
- const string& sid (step_id_str[static_cast<size_t> (step)]);
+ cstrings r;
- // If no arguments found for the step id, then use the fallback step id,
- // if specified.
+ // Add arguments for a specified, potentially empty, prefix.
//
- const string& s (args.find (sid) == args.end () && fallback
- ? step_id_str[static_cast<size_t> (*fallback)]
- : sid);
-
- for (size_t n (0);; ++n)
+ auto add_args = [&args, &r] (const string& prefix)
{
- n = s.find ('.', n);
-
- auto range (
- args.equal_range (n == string::npos ? s : string (s, 0, n)));
+ auto range (args.equal_range (prefix));
for (auto i (range.first); i != range.second; ++i)
- r.emplace_back (i->second);
+ r.emplace_back (i->second.c_str ());
+ };
- if (n == string::npos)
+ // Add un-prefixed arguments if this is one of the *.create[_for_*]
+ // steps.
+ //
+ switch (step)
+ {
+ case step_id::b_create:
+ case step_id::bpkg_create:
+ case step_id::bpkg_target_create:
+ case step_id::bpkg_host_create:
+ case step_id::bpkg_module_create:
+ case step_id::b_test_installed_create:
+ case step_id::bpkg_test_separate_installed_create:
+ case step_id::bpkg_test_separate_installed_create_for_target:
+ case step_id::bpkg_test_separate_installed_create_for_host:
+ case step_id::bpkg_test_separate_installed_create_for_module:
+ {
+ add_args ("");
break;
+ }
+ default: break;
}
- return r;
- };
+ auto add_step_args = [&add_args] (step_id step,
+ optional<step_id> start_step = nullopt)
+ {
+ const string& s (to_string (step));
- // Search for config.install.root variable. If it is present and has a
- // non-empty value, then test the package installation and uninstall. Note
- // that passing [null] value would be meaningless, so we don't recognize
- // it as a special one. While at it, cache the bpkg.create args for later
- // use.
- //
- dir_path install_root;
- strings cargs (step_args (config_args, step_id::bpkg_create));
- {
- size_t n (19);
- auto space = [] (char c) {return c == ' ' || c == '\t';};
+ size_t n;
- for (const string& s: reverse_iterate (cargs))
- {
- if (s.compare (0, n, "config.install.root") == 0 &&
- (s[n] == '=' || space (s[n])))
+ if (start_step)
{
- while (space (s[n])) ++n; // Skip spaces.
- if (s[n] == '=') ++n; // Skip the equal sign.
- while (space (s[n])) ++n; // Skip spaces.
+ const string& ss (to_string (*start_step));
- // Note that the config.install.root variable value may
- // potentially be quoted.
- //
- install_root = dir_path (unquote (string (s, n, s.size () - n)));
- break;
+ assert (s.size () >= ss.size () &&
+ s.compare (0, ss.size (), ss) == 0 &&
+ (s.size () == ss.size () || s[ss.size ()] == '.'));
+
+ n = ss.size ();
}
+ else
+ n = 0;
+
+ for (;; ++n)
+ {
+ n = s.find ('.', n);
+
+ add_args (n == string::npos ? s : string (s, 0, n));
+
+ if (n == string::npos)
+ break;
+ }
+ };
+
+ // If no arguments found for the step id, then use the fallback step
+ // ids, if specified.
+ //
+ if (args.find (to_string (step)) != args.end ())
+ {
+ add_step_args (step, start_step);
}
- }
+ else
+ {
+ // Note that if we ever need to specify fallback pairs with common
+ // ancestors, we may want to suppress duplicate ancestor step ids.
+ //
+ if (fallback1)
+ add_step_args (*fallback1);
+
+ if (fallback2)
+ add_step_args (*fallback2);
+ }
+
+ return r;
+ };
// bpkg-rep-fetch trust options.
//
@@ -935,6 +1995,7 @@ build (size_t argc, const char* argv[])
const version& ver (tm.version);
const string repo (tm.repository.string ());
const dir_path pkg_dir (pkg + '-' + ver.string ());
+ const string pkg_var (tm.name.variable ());
// Specify the revision explicitly for the bpkg-build command not to end
// up with a race condition building the latest revision rather than the
@@ -951,7 +2012,7 @@ build (size_t argc, const char* argv[])
// Query the project's build system information with `b info`.
//
auto prj_info = [&trace] (const dir_path& d,
- bool ext_mods,
+ b_info_flags fl,
const char* what)
{
// Note that the `b info` diagnostics won't be copied into any of the
@@ -962,7 +2023,7 @@ build (size_t argc, const char* argv[])
//
try
{
- return b_info (d, ext_mods, verb, trace);
+ return b_info (d, fl, verb, trace);
}
catch (const b_error& e)
{
@@ -974,10 +2035,14 @@ build (size_t argc, const char* argv[])
}
};
- b_project_info prj; // Package project information.
-
rwd = current_directory ();
+ // Create directory for the build artifacts to archive and upload.
+ //
+ dir_path upload_dir ("upload");
+
+ mk (trace, nullptr /* log */, upload_dir);
+
// If the package comes from a version control-based repository, then we
// will also test its dist meta-operation. Specifically, we will checkout
// the package outside the configuration directory passing --checkout-root
@@ -988,6 +2053,11 @@ build (size_t argc, const char* argv[])
dir_path dist_root (rwd / dir_path ("dist"));
dir_path dist_src (dist_root / pkg_dir);
+ dir_path dist_install_root (rwd / dir_path ("dist-install"));
+ dir_path dist_install_src (dist_install_root / pkg_dir);
+
+ dir_path dist_installed_root (rwd / dir_path ("dist-installed"));
+
// Redistribute the package source directory (pkg_dir) checked out into
// the directory other than the configuration directory (dist_root) and
// replace it with the newly created distribution. Assume that the current
@@ -996,12 +2066,12 @@ build (size_t argc, const char* argv[])
// for the build2 process. Return true if the dist meta-operation
// succeeds.
//
- auto redist = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd]
+ auto redist = [&trace, &wre, &bkp_step, &bkp_status, &aux_env, &last_cmd]
(step_id step,
operation_result& r,
const dir_path& dist_root,
const dir_path& pkg_dir, // <name>-<version>
- const char* import = nullptr,
+ const optional<string>& import = nullopt,
const small_vector<string, 1>& envvars = {})
{
// Temporarily change the current directory to the distribution root
@@ -1026,7 +2096,7 @@ build (size_t argc, const char* argv[])
step,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"config.dist.root=" + redist_root.string (),
import,
@@ -1045,998 +2115,3763 @@ build (size_t argc, const char* argv[])
return true;
};
- // The module phase.
+ // Note that if this is not a self-hosted configuration, then we do not
+ // build external runtime tests nor run internal for host or module
+ // packages because the assumption is that they have been built/run (and
+ // with buildtab settings such as warnings, etc) when testing the
+ // self-hosted configuration this non-self-hosted one is based on. Also,
+ // by the same reason, we don't install tools or modules nor generate the
+ // binary distribution packages for them for non-self-hosted
+ // configurations.
+ //
+ // Actually, it could make sense to build and install tools and module
+ // from a target configuration in this case. But that means for a
+ // non-self-hosted configuration a tool/module may want to test two
+ // things: its output build and its own build, which means we would need a
+ // way to control which of the two things (or both) are to be tested
+ // (think of two cross-compiler configurations, Emscripten and MinGW: for
+ // the former a source code generator would normally only want to test the
+ // output while for the latter -- both; maybe we could have a `cross-host`
+ // class, meaning that the configuration is not host itself but its target
+ // is). In any case, seeing that there is no way to verify such own build
+ // works, we ignore this for now.
+ //
+ // Also note that build system modules can only have external build-time
+ // tests (which is verified by bpkg-rep-fetch) and target packages cannot
+ // have external build-time tests (which we verify ourselves).
+ //
+ bool selfhost (tm.host && *tm.host);
+
+ // Detect if the package is of the target, host, or module type.
+ //
+ auto requirement = [&tm] (const char* id)
+ {
+ return find_if (tm.requirements.begin (),
+ tm.requirements.end (),
+ [id] (const requirement_alternatives& r)
+ {
+ if (r.size () == 1)
+ {
+ const requirement_alternative& a (r[0]);
+ return find (a.begin (), a.end (), id) != a.end ();
+ }
+
+ return false;
+ }) != tm.requirements.end ();
+ };
+
+ bool module_pkg (pkg.compare (0, 10, "libbuild2-") == 0);
+ bool bootstrap (module_pkg && requirement ("bootstrap"));
+ bool host_pkg (!module_pkg && requirement ("host"));
+ bool target_pkg (!module_pkg && !host_pkg);
+
+ // Don't generate binary packages for tools or modules for non-self-hosted
+ // configurations (see above for details).
+ //
+ optional<step_id> bindist;
+
+ if (target_pkg || selfhost)
+ {
+ if (step_enabled (step_id::bpkg_bindist_debian))
+ bindist = step_id::bpkg_bindist_debian;
+ else if (step_enabled (step_id::bpkg_bindist_fedora))
+ bindist = step_id::bpkg_bindist_fedora;
+ else if (step_enabled (step_id::bpkg_bindist_archive))
+ bindist = step_id::bpkg_bindist_archive;
+ }
+
+ bool sys_install (bindist && !step_disabled (step_id::bbot_sys_install));
+ bool bindist_upload (bindist && step_enabled (step_id::bbot_bindist_upload));
+
+ // Unless a bpkg.bindist.* step is enabled or bpkg.install step is
+ // disabled, search for config.install.root variable. If it is present and
+ // has a non-empty value, then test the package installation and
+ // uninstall. Note that passing [null] value would be meaningless, so we
+ // don't recognize it as a special one.
+ //
+ // Note that the host package can only be installed for a self-hosted
+ // configuration, using bpkg configuration of the target type.
+ //
+ // Also note that the module package is always installed for a self-hosted
+ // configuration (and never otherwise), using config.install.root
+ // specified for ~build2 configuration.
+ //
+ // If present, indicates that the install, test installed, and uninstall
+ // operations need to be tested.
+ //
+ // Note that the main package may not support the install operation. We,
+ // however, can only detect that after the package is configured. If
+ // that's the case, we will disable the steps which may not be performed
+ // for such a package (bpkg.install, bpkg.bindist.*, etc) later, after the
+ // package is configured.
+ //
+ optional<dir_path> install_root;
+
+ // While building and running tests against the installation created
+ // either from source or from the archive distribution package we will
+ // make the bin/ subdirectory of config.install.root, if specified, the
+ // first entry in the PATH environment variable, except for build system
+ // modules which supposedly don't install any executables.
+ //
+ // Note that normally the config.install.root is expected to be prefixed
+ // with the bpkg.target.create or, as a fallback, b.create or bpkg.create
+ // step ids. However, for testing of the relocatable installations it can
+ // be desirable to extract the archive distribution package content at the
+ // bbot.sys-install.tar.extract step into a different installation
+ // directory. If that's the case, then this directory needs to also be
+ // specified as bbot.sys-install:config.install.root. If specified, this
+ // directory will be preferred as a base for forming the bin/ directory
+ // path.
+ //
+ optional<dir_path> install_bin;
+
+ auto config_install_root = [&step_args, &tgt_args]
+ (step_id s,
+ optional<step_id> f1 = nullopt,
+ optional<step_id> f2 = nullopt)
+ -> optional<dir_path>
+ {
+ size_t n (19);
+ auto space = [] (char c) {return c == ' ' || c == '\t';};
+
+ for (const char* a: reverse_iterate (step_args (tgt_args, s, f1, f2)))
+ {
+ if (strncmp (a, "config.install.root", n) == 0 &&
+ (a[n] == '=' || space (a[n])))
+ {
+ while (space (a[n])) ++n; // Skip spaces.
+ if (a[n] == '=') ++n; // Skip the equal sign.
+ while (space (a[n])) ++n; // Skip spaces.
+
+ // Note that the config.install.root variable value may potentially
+ // be quoted.
+ //
+ return dir_path (unquote (a + n));
+ }
+ }
+
+ return nullopt;
+ };
+
+ if ((target_pkg || selfhost) &&
+ !bindist &&
+ !step_disabled (step_id::bpkg_install))
+ {
+ if (!module_pkg)
+ {
+ install_root = config_install_root (step_id::bpkg_target_create,
+ step_id::b_create,
+ step_id::bpkg_create);
+
+ if (install_root)
+ install_bin = *install_root / dir_path ("bin");
+ }
+ else
+ install_root = dir_path ();
+ }
+
+ // Split external test packages into the runtime and build-time lists.
+ //
+ // Note that runtime and build-time test packages are always configured in
+ // different bpkg configurations, since they can depend on different
+ // versions of the same package.
+ //
+ small_vector<test_dependency, 1> runtime_tests;
+ small_vector<test_dependency, 1> buildtime_tests;
+
+ for (test_dependency& t: tm.tests)
+ {
+ if (t.buildtime)
+ buildtime_tests.push_back (move (t));
+ else if (target_pkg || selfhost)
+ runtime_tests.push_back (move (t));
+ }
+
+ bool has_buildtime_tests (!buildtime_tests.empty ());
+ bool has_runtime_tests (!runtime_tests.empty ());
+
+ // Abort if a target package has external build-time tests.
+ //
+ if (target_pkg && has_buildtime_tests)
+ {
+ fail_operation (
+ add_result ("configure"),
+ "build-time tests in package not marked with `requires: host`",
+ result_status::abort);
+
+ break;
+ }
+
+ // Create the required build configurations.
+ //
+ // Note that if this is a target package, then we intentionally do not
+ // create host or module configuration letting the automatic private
+ // configuration creation to take its course (since that would probably be
+ // the most typical usage scenario).
+ //
+ // Also note that we may need a separate target configuration to build the
+ // host package for installation. This is required to avoid a potential
+ // conflict between the main package and a tool it may try to run during
+ // the build. We also do the same for module packages which, while cannot
+ // have build-time dependencies, could have private code generators. This
+ // configuration needs to have the target type (so that it uses any
+ // build-time dependencies from build-host/module configurations). Note
+ // also that we currently only do this for self-hosted configuration
+ // (since we don't install otherwise, see above).
+ //
+ dir_path target_conf ("build");
+ dir_path host_conf ("build-host");
+ dir_path module_conf ("build-module");
+ dir_path install_conf ("build-install");
+
+ // Main package config.
+ //
+ const dir_path& main_pkg_conf (target_pkg ? target_conf :
+ host_pkg ? host_conf :
+ module_conf);
+
+ // Create the target configuration if this is a target package or if the
+ // host/module package has external build-time tests.
//
+ bool create_target (target_pkg || has_buildtime_tests);
- // If this is a build system module, perform a "pre-step" by building it
- // in a separate configuration reproducing the one used to build build2
- // itself. Note that the configuration and the environment options and
- // variables are not passed to commands that may affect this
- // configuration.
+ // Create the host configuration if this is a host package.
+ //
+ // Also create it for the module package with external build-time tests.
+ // The idea is to be able to test a tool which might only be tested via
+ // the module. To be precise, we need to check that the tests package has
+ // a build-time dependency (on the tool) but that's not easy to do and so
+ // we will create a host configuration if a module has any build-time
+ // tests.
+ //
+ bool create_host (host_pkg || (module_pkg && has_buildtime_tests));
+
+ // Create the module configuration if the package is a build system
+ // module.
+ //
+ // Also create it for the host package with the external build-time tests,
+ // so that a single build2 configuration is used for both target and host
+ // packages (this is important in case they happen to use the same
+ // module).
//
- bool module (pkg.compare (0, 10, "libbuild2-") == 0);
- dir_path module_dir ("build-module");
+ bool create_module (module_pkg || (host_pkg && has_buildtime_tests));
- // If this is a build system module that requires bootstrap, then its
- // importation into the dependent (test) projects cannot be configured and
- // the corresponding config.import.* variable needs to be specified on the
- // bpkg/build2 command line as a global override, whenever required.
+ // Create the configuration for installing the main package (potentially
+ // as a part of generating binary distribution package) of the host or
+ // module type, unless it's not supposed to be installed.
//
- // Note that such a module must be explicitly marked with `requires:
- // bootstrap` in its manifest. This can only be detected after the module
- // is configured and its manifest available.
+ bool create_install (!target_pkg && (install_root || bindist));
+
+ // Configuration where the package will be installed from.
//
- bool bootstrap (false);
+ dir_path effective_install_conf (
+ rwd / (create_install ? install_conf : main_pkg_conf));
- // Note that we will parse the package manifest right after the package is
- // configured.
+ // Root configuration through which we will be configuring the cluster
+ // (note: does not necessarily match the main package type).
+ //
+ // In other words, this is configuration that will be specified for
+ // bpkg-pkg-build as the current configuration (via -d). It must be the
+ // configuration that links to all the other configurations, except
+ // install.
+ //
+ // Note that the install configuration, if present, is either the
+ // cluster's "second root" (for a host package) or is an independent
+ // cluster (for a module package). In either case it needs to additionally
+ // be specified as a current configuration on the command line.
+ //
+ const dir_path& root_conf (create_target ? target_conf :
+ create_host ? host_conf :
+ module_conf);
+
+ // Note that bpkg doesn't support configuring bootstrap module
+ // dependents well, not distinguishing such modules from regular ones
+ // (see pkg_configure() for details). Thus, we need to pass the
+ // !config.import.* global override wherever required ourselves.
//
- package_manifest pm;
- path mf ("manifest");
+ // Also note that since this override is global, it may only be specified
+ // globally on the bpkg command line (as opposed to package-specific
+ // overrides).
+ //
+ optional<string> bootstrap_import;
+
+ if (bootstrap)
+ bootstrap_import = "!config.import." + pkg_var + '=' +
+ (rwd / main_pkg_conf).string ();
- if (module)
+ // Configure.
+ //
{
- // Configure.
+ operation_result* pr (&add_result ("configure"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ // If we have auxiliary environment, show it in the logs.
//
+ if (!aux_env.empty ())
{
- operation_result& r (add_result ("configure"));
- configure_result = &r;
+ for (const string& e: aux_env)
+ {
+ r.log += e;
+ r.log += '\n';
+ }
- // Noop, just for the log record.
+ // Add a trailing blank line to separate this from the rest.
//
- change_wd (trace, &r.log, rwd);
+ r.log += '\n';
+ }
- // b create(<dir>) config.config.load=~build2
- //
- // [bpkg.module.create]
- //
- // Note also that we suppress warnings about unused config.* values,
- // such CLI configuration.
- //
- // What if a module wants to use CLI? The current thinking is that we
- // will be "whitelisting" base (i.e., those that can plausibly be used
- // by multiple modules) libraries and tools for use by build system
- // modules. So if and when we whitelist CLI, we will add it here, next
- // to cc.
- //
- r.status |= run_b (
- step_id::bpkg_module_create,
+ // Noop, just for the log record.
+ //
+ change_wd (trace, &r.log, rwd);
+
+ // If we end up with multiple current configurations (root and install)
+ // then when running the bpkg-pkg-build command we need to specify the
+ // configuration for each package explicitly via --config-uuid.
+ //
+ // While it's tempting to use the --config-name option instead of
+ // --config-uuid, that wouldn't work well for multiple current
+ // configurations. For --config-name the configuration search is carried
+ // out among configurations explicitly linked to the main configuration
+ // only. That's in contrast to --config-uuid, when the whole
+ // configuration cluster is searched (see bpkg-pkg-build implementation
+ // for details).
+ //
+ // Let's not generate random UUIDs but use some predefined values which
+ // we can easily recognize in the build logs.
+ //
+ const char* target_uuid ("00000000-0000-0000-0000-000000000001");
+ const char* host_uuid ("00000000-0000-0000-0000-000000000002");
+ const char* module_uuid ("00000000-0000-0000-0000-000000000003");
+ const char* install_uuid ("00000000-0000-0000-0000-000000000004");
+
+ // Let's however distinguish the target package as a simple common case
+ // and simplify the configuration creation and packages configuration
+ // commands making them more readable in the build log. For this simple
+ // case only one configuration needs to be created explicitly and so it
+ // doesn't need the UUID. Also there is no need in any package-specific
+ // options for the bpkg-pkg-build command in this case.
+ //
+ // Create the target configuration.
+ //
+ // bpkg create <env-modules> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ if (create_target)
+ {
+ step_id b (step_id::bpkg_create); // Breakpoint.
+ step_id s (step_id::bpkg_target_create); // Step.
+ step_id f1 (step_id::b_create); // First fallback.
+ step_id f2 (step_id::bpkg_create); // Second fallback.
+
+ r.status |= run_bpkg (
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
- "create(" + module_dir.representation () + ",cc)",
- "config.config.load=~build2",
- "config.config.persist+='config.*'@unused=drop");
+ "create",
+ "-d", target_conf,
+ !target_pkg ? cstrings ({"--uuid", target_uuid}) : cstrings (),
+ step_args (modules, s, f1, f2),
+ step_args (env_args, s, f1, f2),
+ step_args (tgt_args, s, f1, f2),
+ step_args (pkg_args, s, f1, f2));
if (!r.status)
break;
+ }
- change_wd (trace, &r.log, module_dir);
+ // Create the host configurations.
+ //
+ if (create_host)
+ {
+ step_id b (step_id::bpkg_create);
+
+ if (host_pkg && selfhost)
+ {
+ // Create the host configuration.
+ //
+ {
+ step_id s (step_id::bpkg_host_create);
+ step_id f1 (step_id::b_create);
+ step_id f2 (step_id::bpkg_create);
- // bpkg create --existing
+ // bpkg create --type host <env-modules> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create",
+ "-d", host_conf,
+ "--type", "host",
+ "--uuid", host_uuid,
+ step_args (modules, s, f1, f2),
+ step_args (env_args, s, f1, f2),
+ step_args (tgt_args, s, f1, f2),
+ step_args (pkg_args, s, f1, f2));
+
+ if (!r.status)
+ break;
+ }
+
+ // Create the install configuration.
+ //
+ // bpkg create <env-modules> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ if (create_install)
+ {
+ step_id s (step_id::bpkg_target_create);
+ step_id f1 (step_id::b_create);
+ step_id f2 (step_id::bpkg_create);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create",
+ "-d", install_conf,
+ "--uuid", install_uuid,
+ step_args (modules, s, f1, f2),
+ step_args (env_args, s, f1, f2),
+ step_args (tgt_args, s, f1, f2),
+ step_args (pkg_args, s, f1, f2));
+
+ if (!r.status)
+ break;
+ }
+ }
+ else
+ {
+ // b create(<dir>) config.config.load=~host
+ //
+ // Note also that we suppress warnings about unused config.* values.
+ //
+ r.status |= run_b (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create(" + host_conf.representation () + ",cc)",
+ "config.config.load=~host",
+ "config.config.persist+='config.*'@unused=drop");
+
+ if (!r.status)
+ break;
+
+ // bpkg create --existing --type host
+ //
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "create",
+ "--existing",
+ "-d", host_conf,
+ "--type", "host",
+ "--uuid", host_uuid);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ // Create the module configurations.
+ //
+ if (create_module)
+ {
+ step_id b (step_id::bpkg_create);
+
+ // Create the module configuration.
//
- r.status |= run_bpkg (
- step_id::bpkg_module_create,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "create",
- "--existing");
+ {
+ // b create(<dir>) config.config.load=~build2 [<env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>]
+ //
+ // Note also that we suppress warnings about unused config.* values.
+ //
+ // What if a module wants to use CLI? The current thinking is that
+ // we will be "whitelisting" base (i.e., those that can plausibly be
+ // used by multiple modules) libraries and tools for use by build
+ // system modules. So if and when we whitelist CLI, we will add it
+ // here, next to cc.
+ //
+ string mods;
+ cstrings eas;
+ cstrings cas;
+ cstrings pas;
- if (!r.status)
- break;
+ if (module_pkg && selfhost)
+ {
+ step_id s (step_id::bpkg_module_create);
+
+ for (const char* m: step_args (modules, s))
+ {
+ if (!mods.empty ())
+ mods += ' ';
+
+ mods += m;
+ }
+
+ eas = step_args (env_args, s);
+ cas = step_args (tgt_args, s);
+ pas = step_args (pkg_args, s);
+ }
+ else
+ mods = "cc";
+
+ r.status |= run_b (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create(" + module_conf.representation () + ',' + mods + ')',
+ "config.config.load=~build2",
+ "config.config.persist+='config.*'@unused=drop",
+ eas,
+ cas,
+ pas);
- // bpkg add <env-config-args> <config-args> <repository-url>
+ if (!r.status)
+ break;
+
+ // bpkg create --existing --type build2
+ //
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "create",
+ "--existing",
+ "-d", module_conf,
+ "--type", "build2",
+ "--uuid", module_uuid);
+
+ if (!r.status)
+ break;
+ }
+
+ // Create the install configuration.
//
- // bpkg.module.configure.add (bpkg.configure.add)
+ if (create_install && module_pkg)
+ {
+ step_id s (step_id::bpkg_module_create);
+
+ string mods;
+ for (const char* m: step_args (modules, s))
+ {
+ if (!mods.empty ())
+ mods += ' ';
+
+ mods += m;
+ }
+
+ // b create(<dir>) config.config.load=~build2 [<env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>]
+ //
+ r.status |= run_b (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create(" + install_conf.representation () + ',' + mods + ')',
+ "config.config.load=~build2",
+ "config.config.persist+='config.*'@unused=drop",
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s));
+
+ if (!r.status)
+ break;
+
+ // bpkg create --existing
+ //
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "create",
+ "--existing",
+ "-d", install_conf,
+ "--uuid", install_uuid);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ // Link the configurations.
+ //
+ // bpkg link -d <dir> <dir>
+ //
+ {
+ step_id b (step_id::bpkg_link);
+
+ if (create_target)
+ {
+ if (create_host)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", target_conf,
+ host_conf);
+
+ if (!r.status)
+ break;
+ }
+
+ if (create_module)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", target_conf,
+ module_conf);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ if (create_host)
+ {
+ if (create_module)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", host_conf,
+ module_conf);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ // Link the install configuration only for the host package. Note that
+ // the module package may not have build-time dependencies and so
+ // doesn't need configurations for them.
//
+ if (create_install && host_pkg)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", install_conf,
+ host_conf);
+
+ if (!r.status)
+ break;
+
+ if (create_module)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", install_conf,
+ module_conf);
+
+ if (!r.status)
+ break;
+ }
+ }
+ }
+
+ // Fetch repositories into the main package configuration, the target
+ // configuration for external build-time tests, if any, and the install
+ // configuration, if present.
+ //
+ // bpkg add <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <repository-url>
+ //
+ {
+ step_id b (step_id::bpkg_configure_add);
+ step_id s (step_id::bpkg_configure_add);
+
r.status |= run_bpkg (
- step_id::bpkg_module_configure_add,
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
-
- step_args (env_args,
- step_id::bpkg_module_configure_add,
- step_id::bpkg_configure_add),
-
- step_args (config_args,
- step_id::bpkg_module_configure_add,
- step_id::bpkg_configure_add),
-
+ "-d", main_pkg_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
repo);
if (!r.status)
break;
+ }
+
+ // bpkg fetch <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <trust-options>
+ //
+ {
+ step_id b (step_id::bpkg_configure_fetch);
+ step_id s (step_id::bpkg_configure_fetch);
- // bpkg fetch <env-config-args> <config-args> <trust-options>
- //
- // bpkg.module.configure.fetch (bpkg.configure.fetch)
- //
r.status |= run_bpkg (
- step_id::bpkg_module_configure_fetch,
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
-
- step_args (env_args,
- step_id::bpkg_module_configure_fetch,
- step_id::bpkg_configure_fetch),
-
- step_args (config_args,
- step_id::bpkg_module_configure_fetch,
- step_id::bpkg_configure_fetch),
-
+ "-d", main_pkg_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
trust_ops);
if (!r.status)
break;
+ }
- // bpkg build --configure-only <package-name>/<package-version>
- //
- // [bpkg.module.configure.build]
+ if (create_install)
+ {
+ // bpkg add <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <repository-url>
//
- r.status |= run_bpkg (
- step_id::bpkg_module_configure_build,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "build",
- "--configure-only",
- "--checkout-root", dist_root,
- "--yes",
- pkg_rev);
-
- if (!r.status)
- break;
+ {
+ step_id b (step_id::bpkg_configure_add);
+ step_id s (step_id::bpkg_configure_add);
- rm.status |= r.status;
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "add",
+ "-d", install_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ repo);
- bool dist (exists (dist_src));
- const dir_path& src_dir (dist ? dist_src : pkg_dir);
+ if (!r.status)
+ break;
+ }
- // Note that being unable to parse the package manifest is likely to
- // be an infrastructure problem, given that the package has been
- // successfully configured.
+ // bpkg fetch <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <trust-options>
//
- pm = parse_manifest<package_manifest> (src_dir / mf, "package");
+ {
+ step_id b (step_id::bpkg_configure_fetch);
+ step_id s (step_id::bpkg_configure_fetch);
- bootstrap = find_if (pm.requirements.begin (),
- pm.requirements.end (),
- [] (const requirement_alternatives& r)
- {
- return r.size () == 1 && r[0] == "bootstrap";
- }) != pm.requirements.end ();
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "fetch",
+ "-d", install_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ trust_ops);
+
+ if (!r.status)
+ break;
+ }
+ }
- if (dist)
+ if (has_buildtime_tests)
+ {
+ // bpkg add <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <repository-url>
+ //
{
- // Note that we reuse the configure operation log for the dist
- // meta-operation.
- //
- if (!redist (step_id::bpkg_module_configure_build,
- r,
- dist_root,
- pkg_dir))
+ step_id b (step_id::bpkg_configure_add);
+ step_id s (step_id::bpkg_configure_add);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "add",
+ "-d", target_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ repo);
+
+ if (!r.status)
break;
+ }
- rm.status |= r.status;
+ // bpkg fetch <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <trust-options>
+ //
+ {
+ step_id b (step_id::bpkg_configure_fetch);
+ step_id s (step_id::bpkg_configure_fetch);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "fetch",
+ "-d", target_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ trust_ops);
+
+ if (!r.status)
+ break;
}
}
- // Update.
+ // Configure all the packages using a single bpkg-pkg-build command.
+ //
+ // First, prepare the common and package arguments.
+ //
+ // If no variables are specified in the package configuration, then add
+ // the config.<pkg>.develop=false variable for the main package instead
+ // to trigger its package skeleton creation and loading. Also add this
+ // variable for the external test packages for the same purpose. This
+ // way we make sure that these packages can be used as dependencies of
+ // dependents with configuration clauses.
//
+ // Also add the dependency packages specified in the package
+ // configuration, if any, to configurations where the main package is
+ // being configured.
+ //
+ // Should we also add the dependency packages to configurations where
+ // the test packages are being configured? It feels like we shouldn't.
+ // Moreover, in the future we may decide to support specifying tests
+ // package configuration in the tests manifest value or some such. In
+ // this case a test package may have its own dependencies to be
+ // configured. What we could probably do now, is to never share a bpkg
+ // configuration between the main package and the tests packages if we
+ // configure any dependencies in it. Note that such dependencies may
+ // potentially be unsatisfactory for the test packages (unsatisfactory
+ // version, etc). This, however, seems rather far fetched so let's keep
+ // it simple for now.
+ //
+ strings common_args;
+ strings pkgs;
+
+ if (target_pkg) // The simple common case (see above)?
{
- operation_result& r (add_result ("update"));
+ // The overall command looks like this (but some parts may be omitted):
+ //
+ // bpkg build --configure-only <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <pkg-config-opts>
+ // --
+ // { <pkg-config-vars>|config.<pkg-name>.develop=false }+ <pkg>
+ // { <rtt-config-vars>|config.<runtime-test-name>.develop=false }+ <runtime-test>...
+ // { <dep-config-vars> }+ <main-dep>...
+ // <main-dep>...
+ // <glob-dep>...
+ //
+ step_id s (step_id::bpkg_target_configure_build);
+ step_id f1 (step_id::b_configure);
+ step_id f2 (step_id::bpkg_configure_build);
+
+ cstrings eas (step_args (env_args, s, f1, f2));
+ cstrings cas (step_args (tgt_args, s, f1, f2));
+ cstrings pas (step_args (pkg_args, s, f1, f2));
+
+ common_args.push_back ("--checkout-root");
+ common_args.push_back (dist_root.string ());
+
+ common_args.insert (common_args.end (), eas.begin (), eas.end ());
+ common_args.insert (common_args.end (), cas.begin (), cas.end ());
+ common_args.insert (common_args.end (), pas.begin (), pas.end ());
- // Noop, just for the log record to reduce the potential confusion for
- // the combined log reader due to the configure operation log sharing
- // (see above for details).
+ // Add the main package.
//
- change_wd (trace, &r.log, current_directory ());
+ pkgs.push_back ("{");
- // bpkg update <package-name>
+ // @@ config.<pkg>.develop=false
//
- // [bpkg.module.update]
+ // Only add the config.<pkg>.develop variable if there are no package
+ // configuration variables specified.
//
- r.status |= run_bpkg (
- step_id::bpkg_module_update,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "update",
- pkg);
+ auto i (pkg_config_pkgs.find (tm.name.string ()));
- if (!r.status)
- break;
+ if (!pkg_config_vars.empty () || i != pkg_config_pkgs.end ())
+ {
+ if (!pkg_config_vars.empty ())
+ pkgs.insert (pkgs.end (),
+ pkg_config_vars.begin (), pkg_config_vars.end ());
- rm.status |= r.status;
- }
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+ }
+#if 1
+ else
+ pkgs.push_back ("config." + pkg_var + ".develop=false");
+#endif
- // Run the package internal tests if the test operation is supported by
- // the project.
- //
- prj = prj_info (pkg_dir, true /* ext_mods */, "project");
+ pkgs.push_back ("}+");
+
+ pkgs.push_back (pkg_rev);
+
+ // Add the runtime test packages.
+ //
+ for (const auto& t: runtime_tests)
+ {
+ pkgs.push_back ("{");
+
+ // @@ config.<pkg>.develop=false
+ //
+ // Only add the config.<pkg>.develop variable if there are no
+ // package configuration variables specified.
+ //
+ auto i (pkg_config_pkgs.find (t.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+#if 1
+ else
+ pkgs.push_back ("config." + t.name.variable () + ".develop=false");
+#endif
- if (find (prj.operations.begin (), prj.operations.end (), "test") !=
- prj.operations.end ())
+ pkgs.push_back ("}+");
+
+ // Add test dependency package constraints (strip the potential
+ // reflection variable assignment; for example 'bar > 1.0.0').
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+
+ // Add the main package dependencies.
+ //
+ for (const pair<string, strings>& d: pkg_config_main_deps)
+ {
+ if (!d.second.empty ())
+ {
+ pkgs.push_back ("{");
+ pkgs.insert (pkgs.end (), d.second.begin (), d.second.end ());
+ pkgs.push_back ("}+");
+ }
+
+ pkgs.push_back (d.first);
+ }
+ }
+ else
{
- operation_result& r (add_result ("test"));
- test_result = &r;
+ // The overall command looks like this (but some parts may be omitted):
+ //
+ // bpkg build --configure-only <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <pkg-config-opts>
+ // --
+ // { <build-config> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <pkg-config-vars>|config.<pkg-name>.develop=false }+ <pkg>
+ //
+ // { <build-config> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <rtt-config-vars>|config.<runtime-test-name>.develop=false }+ <runtime-test>...
+ //
+ // { <install-config> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <pkg-config-vars> }+ <pkg>
+ //
+ // { <target-config> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <btt-config-vars>|config.<buildtime-test-name>.develop=false }+ <buildtime-test>...
+ //
+ // { <build-config> <install-config> <dep-config-vars> }+ <main-dep>...
+ // { <build-config> <install-config> }+ { <main-dep>... }
+ // <glob-dep>...
+ //
- // Use --package-cwd to help ported to build2 third-party packages a
- // bit (see bpkg-pkg-test(1) for details).
+ // Main package configuration name.
//
- // Note that internal tests that load the module itself don't make
- // much sense, thus we don't pass the config.import.* variable on
- // the command line for modules that require bootstrap.
+ const char* conf_uuid (host_pkg ? host_uuid : module_uuid);
+
+ // Add the main package args.
//
- // bpkg test <package-name>
+ // Also add the external runtime test packages here since they share
+ // the configuration directory with the main package.
//
- // [bpkg.module.test]
+ {
+ step_id s (target_pkg ? step_id::bpkg_target_configure_build :
+ host_pkg ? step_id::bpkg_host_configure_build :
+ step_id::bpkg_module_configure_build);
+
+ step_id f1 (step_id::b_configure);
+ step_id f2 (step_id::bpkg_configure_build);
+
+ cstrings eas (step_args (env_args, s, f1, f2));
+ cstrings cas (step_args (tgt_args, s, f1, f2));
+ cstrings pas (step_args (pkg_args, s, f1, f2));
+
+ // Add the main package.
+ //
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (conf_uuid);
+
+ pkgs.push_back ("--checkout-root");
+ pkgs.push_back (dist_root.string ());
+
+ pkgs.insert (pkgs.end (), eas.begin (), eas.end ());
+ pkgs.insert (pkgs.end (), cas.begin (), cas.end ());
+ pkgs.insert (pkgs.end (), pas.begin (), pas.end ());
+
+ // @@ config.<pkg>.develop=false
+ //
+ // Only add the config.<pkg>.develop variable if there are no
+ // package configuration variables specified.
+ //
+ auto i (pkg_config_pkgs.find (tm.name.string ()));
+
+ if (!pkg_config_vars.empty () || i != pkg_config_pkgs.end ())
+ {
+ if (!pkg_config_vars.empty ())
+ pkgs.insert (pkgs.end (),
+ pkg_config_vars.begin (), pkg_config_vars.end ());
+
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+ }
+#if 1
+ else
+ pkgs.push_back ("config." + pkg_var + ".develop=false");
+#endif
+
+ pkgs.push_back ("}+");
+
+ pkgs.push_back (pkg_rev);
+ }
+
+ // Add the runtime test packages.
+ //
+ for (const auto& t: runtime_tests)
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (conf_uuid);
+
+ pkgs.push_back ("--checkout-root");
+ pkgs.push_back (dist_root.string ());
+
+ pkgs.insert (pkgs.end (), eas.begin (), eas.end ());
+ pkgs.insert (pkgs.end (), cas.begin (), cas.end ());
+ pkgs.insert (pkgs.end (), pas.begin (), pas.end ());
+
+ // @@ config.<pkg>.develop=false
+ //
+ // Only add the config.<pkg>.develop variable if there are no
+ // package configuration variables specified.
+ //
+ auto i (pkg_config_pkgs.find (t.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+#if 1
+ else
+ pkgs.push_back ("config." + t.name.variable () + ".develop=false");
+#endif
+
+ pkgs.push_back ("}+");
+
+ // Strip the potential reflection variable assignment.
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+ }
+
+ // Add the main package configured in the install configuration and
+ // the external build-time test packages.
//
+ {
+ step_id s (step_id::bpkg_target_configure_build);
+ step_id f1 (step_id::b_configure);
+ step_id f2 (step_id::bpkg_configure_build);
+
+ cstrings eas (step_args (env_args, s, f1, f2));
+ cstrings cas (step_args (tgt_args, s, f1, f2));
+ cstrings pas (step_args (pkg_args, s, f1, f2));
+
+ // Add the main package.
+ //
+ if (create_install)
+ {
+ common_args.push_back ("-d");
+ common_args.push_back (install_conf.string ());
+
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (install_uuid);
+
+ // Note that we do another re-distribution (with a separate
+ // --checkout-root) in case the package is missing file that
+ // are only used during installation.
+ //
+ pkgs.push_back ("--checkout-root");
+ pkgs.push_back (dist_install_root.string ());
+
+ pkgs.insert (pkgs.end (), eas.begin (), eas.end ());
+ pkgs.insert (pkgs.end (), cas.begin (), cas.end ());
+ pkgs.insert (pkgs.end (), pas.begin (), pas.end ());
+
+ pkgs.insert (pkgs.end (),
+ pkg_config_vars.begin (), pkg_config_vars.end ());
+
+ auto i (pkg_config_pkgs.find (tm.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+
+ pkgs.push_back ("}+");
+
+ pkgs.push_back (pkg_rev);
+ }
+
+ // Add the build-time test packages.
+ //
+ // @@ config.<pkg>.develop=false
+ //
+ for (const auto& t: buildtime_tests)
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (target_uuid);
+
+ pkgs.push_back ("--checkout-root");
+ pkgs.push_back (dist_root.string ());
+
+ pkgs.insert (pkgs.end (), eas.begin (), eas.end ());
+ pkgs.insert (pkgs.end (), cas.begin (), cas.end ());
+ pkgs.insert (pkgs.end (), pas.begin (), pas.end ());
+
+ // @@ config.<pkg>.develop=false
+ //
+ // Only add the config.<pkg>.develop variable if there are no
+ // package configuration variables specified.
+ //
+ auto i (pkg_config_pkgs.find (t.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ pkgs.insert (pkgs.end (), i->second.begin (), i->second.end ());
+#if 1
+ else
+ pkgs.push_back ("config." + t.name.variable () + ".develop=false");
+#endif
+
+ pkgs.push_back ("}+");
+
+ // Strip the build-time mark and potential reflection variable
+ // assignment.
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+ }
+
+ // Add the main package dependencies to those configurations where
+ // the main package is configured.
+ //
+ {
+ // Add dependencies which have some configuration variables
+ // specified and count the number of others.
+ //
+ size_t no_vars (0);
+ for (const pair<string, strings>& d: pkg_config_main_deps)
+ {
+ if (!d.second.empty ())
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (conf_uuid);
+
+ if (create_install)
+ {
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (install_uuid);
+ }
+
+ pkgs.insert (pkgs.end (), d.second.begin (), d.second.end ());
+
+ pkgs.push_back ("}+");
+
+ pkgs.push_back (d.first);
+ }
+ else
+ ++no_vars;
+ }
+
+ // Add dependencies which have no configuration variables specified.
+ //
+ if (no_vars != 0)
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (conf_uuid);
+
+ if (create_install)
+ {
+ pkgs.push_back ("--config-uuid");
+ pkgs.push_back (install_uuid);
+ }
+
+ pkgs.push_back ("}+");
+
+ if (no_vars != 1)
+ pkgs.push_back ("{");
+
+ for (const pair<string, strings>& d: pkg_config_main_deps)
+ {
+ if (d.second.empty ())
+ pkgs.push_back (d.first);
+ }
+
+ if (no_vars != 1)
+ pkgs.push_back ("}");
+ }
+ }
+ }
+
+ // Add the global system dependencies.
+ //
+ for (const pair<string, strings>& d: pkg_config_glob_deps)
+ pkgs.push_back (d.first);
+
+ // Finally, configure all the packages.
+ //
+ {
+ step_id b (step_id::bpkg_configure_build);
+ step_id s (step_id::bpkg_global_configure_build);
+
+ optional<string> dependency_checksum;
+
+ // Only log configuration UUIDs if they are specified on the command
+ // line.
+ //
+ function<pre_run_function> log_uuids (
+ [&r, &trace,
+ target_uuid, host_uuid, module_uuid, install_uuid,
+ target_pkg] ()
+ {
+ if (!target_pkg)
+ {
+ auto log = [&r, &trace] (const char* uuid, const char* name)
+ {
+ string s (uuid);
+ s += " - ";
+ s += name;
+
+ log_comment (trace, r.log, s);
+ };
+
+ log_comment (trace, r.log, "");
+
+ log (target_uuid, "target");
+ log (host_uuid, "host");
+ log (module_uuid, "module");
+ log (install_uuid, "install");
+
+ log_comment (trace, r.log, "");
+ }
+ });
+
r.status |= run_bpkg (
- step_id::bpkg_module_test,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ b,
+ trace, r.log,
+ log_uuids,
+ dependency_checksum,
+ wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
- "test",
- "--package-cwd",
- pkg);
+ "build",
+ "--configure-only",
+ "--rebuild-checksum",
+ tm.dependency_checksum ? *tm.dependency_checksum : "",
+ "--yes",
+ "-d", root_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ common_args,
+ pkg_config_opts,
+ (has_runtime_tests || has_buildtime_tests
+ ? bootstrap_import
+ : nullopt),
+ "--",
+ pkgs);
+
+ // The dependency checksum is tricky, here are the possibilities:
+ //
+ // - absent: bpkg terminated abnormally (or was not executed due to
+ // a breakpoint) -- nothing to do here.
+ //
+ // - empty: bpkg terminated normally with error before calculating the
+ // checksum -- nothing to do here either.
+ //
+ // - one line: bpkg checksum that we want.
+ //
+ // - many lines: someone else (e.g., buildfile) printed to stdout,
+ // which we consider an error.
+ //
+ if (dependency_checksum && !dependency_checksum->empty ())
+ {
+ string& s (*dependency_checksum);
+
+ // Make sure that the output contains a single line, and bail out
+ // with the error status if that's not the case.
+ //
+ if (s.find ('\n') == s.size () - 1)
+ {
+ s.pop_back ();
+
+ // If the dependency checksum didn't change, then save it to the
+ // result manifest, clean the logs and bail out with the skip
+ // result status.
+ //
+ if (tm.dependency_checksum && *tm.dependency_checksum == s)
+ {
+ l3 ([&]{trace << "skip";});
+
+ rm.status = result_status::skip;
+ rm.dependency_checksum = move (s);
+ rm.results.clear ();
+ break;
+ }
+
+ // Save the (new) dependency checksum to the result manifest.
+ //
+ // Also note that we save the checksum if bpkg failed after the
+ // checksum was printed. As a result, we won't be rebuilding the
+ // package until the error is fixed (in a package or worker) and
+ // the checksum changes, which feels like a proper behavior.
+ //
+ rm.dependency_checksum = move (s);
+ }
+ else
+ fail_operation (r,
+ "unexpected bpkg output:\n'" + s + '\'',
+ result_status::error);
+ }
if (!r.status)
break;
+ }
- rm.status |= r.status;
+ // Redistribute the main package in both build and install
+ // configurations, if required (test packages will be handled later).
+ //
+ if (exists (dist_src))
+ {
+ change_wd (trace, &r.log, main_pkg_conf);
+
+ step_id b (step_id::bpkg_configure_build);
+
+ if (!redist (b, r, dist_root, pkg_dir))
+ break;
}
- }
- // The main phase.
- //
+ if (exists (dist_install_src))
+ {
+ change_wd (trace, &r.log, rwd / install_conf);
- // Use the global override for modules that require bootstrap.
- //
- string module_import (
- module
- ? ((bootstrap ? "!config.import." : "config.import.") +
- tm.name.variable () + "=" + (rwd / module_dir).string ())
- : "");
+ step_id b (step_id::bpkg_configure_build);
- // Configure.
+ if (!redist (b, r, dist_install_root, pkg_dir))
+ break;
+ }
+
+ rm.status |= r.status;
+ }
+
+#ifdef _WIN32
+ // Give Windows a chance to (presumably) scan any files we may have just
+ // unpacked. Failed that, if we try to overwrite any such file (e.g., a
+ // generated header) we may end up with a permission denied error. Note
+ // also that this is in addition to the 2 seconds retry we have in our
+ // fdopen() implementation, which is not always enough.
//
- dir_path build_dir ("build"); // Configuration directory name.
- dir_path pkg_config (rwd / (module ? module_dir : build_dir));
+ Sleep (5000);
+#endif
+
+ auto fail_unreached_breakpoint = [&bkp_step, &fail_operation]
+ (operation_result& r)
{
- operation_result& r (configure_result != nullptr
- ? *configure_result
- : add_result ("configure"));
+ assert (bkp_step);
- change_wd (trace, &r.log, rwd);
+ fail_operation (r,
+ "interactive build breakpoint " +
+ to_string (*bkp_step) + " cannot be reached",
+ result_status::abort);
+ };
- // bpkg create <env-modules> <env-config-args> <config-args>
- //
- // bpkg.create
+ // Note that if the bpkg.update step is disabled, we also skip all the
+ // test and install related steps.
+ //
+ if (!step_disabled (step_id::bpkg_update))
+ {
+ // Update the main package.
//
{
- // If the package is a build system module, then make sure it is
- // importable in this configuration (see above about bootstrap).
+ operation_result* pr (&add_result ("update"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ change_wd (trace, &r.log, rwd / main_pkg_conf);
+
+ // bpkg update <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
//
+ step_id b (step_id::bpkg_update);
+ step_id s (step_id::bpkg_update);
+
r.status |= run_bpkg (
- step_id::bpkg_create,
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-V",
- "create",
- "-d", build_dir.string (),
- "--wipe",
- step_args (modules, step_id::bpkg_create),
- step_args (env_args, step_id::bpkg_create),
- cargs,
- module && !bootstrap ? module_import.c_str () : nullptr);
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "update",
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ pkg);
if (!r.status)
break;
+
+ rm.status |= r.status;
}
- change_wd (trace, &r.log, build_dir);
+ b_project_info prj (
+ prj_info (pkg_dir,
+ b_info_flags::ext_mods | b_info_flags::subprojects,
+ "project"));
- // bpkg add <env-config-args> <config-args> <repository-url>
+ // If the package turned out to be non-installable, then disable all the
+ // steps which may not be performed for such a package.
//
- // bpkg.configure.add
- //
- r.status |= run_bpkg (
- step_id::bpkg_configure_add,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "add",
- step_args (env_args, step_id::bpkg_configure_add),
- step_args (config_args, step_id::bpkg_configure_add),
- repo);
-
- if (!r.status)
- break;
+ if (find (prj.operations.begin (),
+ prj.operations.end (),
+ "install") == prj.operations.end ())
+ {
+ install_root = nullopt;
+ bindist = nullopt;
+ sys_install = false;
+ bindist_upload = false;
+ }
- // bpkg fetch <env-config-args> <config-args> <trust-options>
+ // Re-distribute if comes from a version control-based repository,
+ // update, and test external test packages in the bpkg configuration in
+ // the current working directory. Optionally pass the config.import.*
+ // variable override and/or set the environment variables for the bpkg
+ // processes. Return true if all operations for all packages succeeded.
//
- // bpkg.configure.fetch
+ // Pass true as the installed argument to use the test separate installed
+ // phase step ids (bpkg.test-separate-installed.*) and the test separate
+ // phase step ids (bpkg.test-separate.*) otherwise. In both cases fall
+ // back to the main phase step ids (bpkg.*) when no environment/
+ // configuration arguments are specified for them.
//
- r.status |= run_bpkg (
- step_id::bpkg_configure_fetch,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "fetch",
- step_args (env_args, step_id::bpkg_configure_fetch),
- step_args (config_args, step_id::bpkg_configure_fetch),
- trust_ops);
+ auto test = [&trace, &wre,
+ &bkp_step, &bkp_status, &aux_env, &last_cmd,
+ &step_args, &env_args, &tgt_args, &pkg_args,
+ &bootstrap_import,
+ &redist]
+ (operation_result& r,
+ const small_vector<test_dependency, 1>& tests,
+ const dir_path& dist_root,
+ bool installed,
+ bool update_only,
+ const small_vector<string, 1>& envvars = {})
+ {
+ const optional<string>& import (!installed
+ ? bootstrap_import
+ : nullopt);
- if (!r.status)
- break;
+ for (const test_dependency& td: tests)
+ {
+ const string& pkg (td.name.string ());
+
+ // Re-distribute.
+ //
+ if (exists (dist_root))
+ {
+ // Note that re-distributing the test package is a bit tricky
+ // since we don't know its version and so cannot deduce its
+ // source directory name easily. We could potentially run the
+ // bpkg-status command after the package is configured and parse
+ // the output to obtain the version. Let's, however, keep it
+ // simple and find the source directory using the package
+ // directory name pattern.
+ //
+ try
+ {
+ dir_path pkg_dir;
+
+ // Note: doesn't follow symlinks.
+ //
+ path_search (dir_path (pkg + "-*/"),
+ [&pkg_dir] (path&& pe, const string&, bool interm)
+ {
+ if (!interm)
+ pkg_dir = path_cast<dir_path> (move (pe));
+
+ return interm;
+ },
+ dist_root,
+ path_match_flags::none);
+
+ if (!pkg_dir.empty ())
+ {
+ step_id b (
+ installed
+ ? step_id::bpkg_test_separate_installed_configure_build
+ : step_id::bpkg_configure_build);
+
+ if (!redist (b, r, dist_root, pkg_dir, import, envvars))
+ return false;
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to scan directory " << dist_root << ": " << e;
+ }
+ }
+
+ // Update.
+ //
+ // bpkg update <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
+ //
+ {
+ step_id b (installed
+ ? step_id::bpkg_test_separate_installed_update
+ : step_id::bpkg_test_separate_update);
+
+ step_id s (b);
+
+ step_id f (step_id::bpkg_update);
+
+ r.status |= run_bpkg (
+ b,
+ envvars,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "update",
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ import,
+ pkg);
+
+ if (!r.status)
+ return false;
+ }
- // bpkg build --configure-only <env-config-args> <config-args>
- // <package-name>/<package-version>
+ // Test.
+ //
+ // Note that we assume that the package supports the test operation
+ // since this is its main purpose.
+ //
+ // bpkg test <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
+ //
+ if (!update_only)
+ {
+ step_id b (installed
+ ? step_id::bpkg_test_separate_installed_test
+ : step_id::bpkg_test_separate_test);
+
+ step_id s (b);
+
+ step_id f (step_id::bpkg_test);
+
+ r.status |= run_bpkg (
+ b,
+ envvars,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "test",
+ "--package-cwd", // See above for details.
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ import,
+ pkg);
+
+ if (!r.status)
+ return false;
+ }
+ }
+
+ return true;
+ };
+
+ // Test the main package.
//
- // bpkg.configure.build
+ // Run the internal tests if the test operation is supported by the
+ // project but only for the target package or if the configuration is
+ // self-hosted.
//
- if (!module) // Note: the module is already built in the pre-step.
+ bool has_internal_tests ((target_pkg || selfhost) &&
+ find (prj.operations.begin (),
+ prj.operations.end (),
+ "test") != prj.operations.end ());
+
+ if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
{
- r.status |= run_bpkg (
- step_id::bpkg_configure_build,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "build",
- "--configure-only",
- "--checkout-root", dist_root,
- "--yes",
- step_args (env_args, step_id::bpkg_configure_build),
- step_args (config_args, step_id::bpkg_configure_build),
- "--",
- pkg_rev);
+ operation_result* pr (&add_result ("test"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
- if (!r.status)
- break;
+ // Run internal tests.
+ //
+ if (has_internal_tests && !step_disabled (step_id::bpkg_test))
+ {
+ // Use --package-cwd to help ported to build2 third-party packages a
+ // bit (see bpkg-pkg-test(1) for details).
+ //
+ // Note that internal tests that load the module itself don't make
+ // much sense, thus we don't pass the config.import.* variable on
+ // the command line for modules that require bootstrap.
+ //
+ // bpkg test <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
+ //
+ step_id b (step_id::bpkg_test);
+ step_id s (step_id::bpkg_test);
- bool dist (exists (dist_src));
- const dir_path& src_dir (dist ? dist_src : pkg_dir);
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "test",
+ "--package-cwd",
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ pkg);
- pm = parse_manifest<package_manifest> (src_dir / mf, "package");
+ if (!r.status)
+ break;
+ }
+ //
+ // Fail if the breakpoint refers to the bpkg.test step but the package
+ // has no internal tests or this step is disabled.
+ //
+ else if (bkp_step && *bkp_step == step_id::bpkg_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
- if (dist)
+ // External tests.
+ //
+ // Note that if the bpkg.test-separate.update step is disabled, we
+ // also skip bpkg.test-separate.test.
+ //
+ if ((has_runtime_tests || has_buildtime_tests) &&
+ !step_disabled (step_id::bpkg_test_separate_update))
{
- if (!redist (step_id::bpkg_configure_build, r, dist_root, pkg_dir))
+ bool update_only (step_disabled (step_id::bpkg_test_separate_test));
+
+ // Fail if the breakpoint refers to the bpkg.test-separate.test step
+ // but this step is disabled.
+ //
+ if (update_only &&
+ bkp_step &&
+ *bkp_step == step_id::bpkg_test_separate_test)
+ {
+ fail_unreached_breakpoint (r);
break;
+ }
- rm.status |= r.status;
+ // External runtime tests.
+ //
+ // Note that we assume that these packages belong to the dependent
+ // package's repository or its complement repositories, recursively.
+ // Thus, we test them in the configuration used to build the
+ // dependent package.
+ //
+ if (has_runtime_tests)
+ {
+ if (!test (r,
+ runtime_tests,
+ dist_root,
+ false /* installed */,
+ update_only))
+ break;
+ }
+
+ // External build-time tests.
+ //
+ if (has_buildtime_tests)
+ {
+ change_wd (trace, &r.log, rwd / target_conf);
+
+ if (!test (r,
+ buildtime_tests,
+ dist_root,
+ false /* installed */,
+ update_only))
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to some of the bpkg.test-separate.*
+ // steps but the package either has no external tests or the
+ // bpkg.test-separate.update step is disabled.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_test_separate_update &&
+ *bkp_step <= step_id::bpkg_test_separate_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
}
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to some of the test steps but the
+ // package has no tests.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_test &&
+ *bkp_step <= step_id::bpkg_test_separate_test)
+ {
+ fail_unreached_breakpoint (add_result ("test"));
+ break;
}
- rm.status |= r.status;
- }
+ // Install from source.
+ //
+ if (install_root)
+ {
+ operation_result* pr (&add_result ("install"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
- // Update.
- //
- if (!module) // Note: the module is already built in the pre-step.
- {
- operation_result& r (add_result ("update"));
+ change_wd (trace, &r.log, effective_install_conf);
+
+ // Note that for a host or module package we don't need the target
+ // configuration anymore, if present. So let's free up the space a
+ // little bit.
+ //
+ if (!target_pkg && create_target)
+ rm_r (trace, &r.log, rwd / target_conf);
+
+ // Install.
+ //
+ {
+ // bpkg install <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
+ //
+ step_id b (step_id::bpkg_install);
+ step_id s (step_id::bpkg_install);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "install",
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ pkg);
+
+ if (!r.status)
+ break;
+ }
+
+ // Run ldconfig.
+ //
+ if (step_enabled (step_id::bbot_install_ldconfig))
+ {
+ // sudo ldconfig <env-config-args> <tgt-config-args> <pkg-config-args>
+ //
+ step_id b (step_id::bbot_install_ldconfig);
+ step_id s (step_id::bbot_install_ldconfig);
+ step_id ss (step_id::bbot_install_ldconfig);
+
+ r.status |= run_ldconfig (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss));
- // bpkg update <env-config-args> <config-args> <package-name>
+ if (!r.status)
+ break;
+ }
+ //
+ // Fail if the breakpoint refers to the bbot.install.ldconfig step but
+ // this step is disabled.
+ //
+ else if (bkp_step && *bkp_step == step_id::bbot_install_ldconfig)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ rm.status |= r.status;
+ }
//
- // bpkg.update
+ // Fail if the breakpoint refers to the bpkg.install related steps but
+ // the package is not supposed to be installed from source.
//
- r.status |= run_bpkg (
- step_id::bpkg_update,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "update",
- step_args (env_args, step_id::bpkg_update),
- step_args (config_args, step_id::bpkg_update),
- pkg);
-
- if (!r.status)
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_install &&
+ *bkp_step <= step_id::bbot_install_ldconfig)
+ {
+ fail_unreached_breakpoint (add_result ("install"));
break;
+ }
- rm.status |= r.status;
- }
+ // Generate the binary distribution package.
+ //
+ // Note that if bbot.bindist.upload step is enabled, it makes sense to
+ // only copy the generated binary distribution files to the
+ // upload/bindist/<distribution>/ directory after the binary
+ // distribution packages are testes, i.e. after the potential
+ // bbot.sys-uninstall.* steps.
+ //
+ // The following bindist_* structures contain a subset of members of the
+ // corresponding structures described in the STRUCTURED RESULT section
+ // of the bpkg-pkg-bindist(1) man page. Note: needed later for
+ // uninstall, upload.
+ //
+ struct bindist_os_release
+ {
+ string name_id;
+ optional<string> version_id;
+ };
- // Run the package internal tests if the test operation is supported by
- // the project, except for the build system module which is taken care of
- // in the pre-step.
- //
- bool internal_tests;
+ struct bindist_file
+ {
+ string type;
+ bbot::path path; // Absolute and normalized.
+ optional<string> system_name;
+ };
- if (module)
- {
- internal_tests = false;
- }
- else
- {
- prj = prj_info (pkg_dir, true /* ext_mods */, "project");
+ struct bindist_package
+ {
+ string name;
+ string version;
+ optional<string> system_version;
+ vector<bindist_file> files;
+ };
- internal_tests = find (prj.operations.begin (),
- prj.operations.end (),
- "test") != prj.operations.end ();
- }
+ struct bindist_result_type
+ {
+ string distribution;
+ string architecture;
+ bindist_os_release os_release;
+ bindist_package package;
+ vector<bindist_package> dependencies;
+ };
- // Run the package external tests, if specified. But first filter them
- // against the test-exclude task manifest values using the package names.
- //
- // Note that a proper implementation should also make sure that the
- // excluded test package version matches the version that will supposedly
- // be configured by bpkg and probably abort the build if that's not the
- // case. Such a mismatch can happen due to some valid reasons (the
- // repository was updated since the task was issued, etc) and should
- // probably be followed with automatic rebuild (the flake monitor idea).
- // Anyway, this all requires additional thinking, so let's keep it simple
- // for now.
- //
- // Filter the external test dependencies in place.
- //
- pm.tests.erase (
- remove_if (pm.tests.begin (), pm.tests.end (),
- [&tm] (const test_dependency& td)
- {
- return find_if (tm.test_exclusions.begin (),
- tm.test_exclusions.end (),
- [&td] (const package& te)
- {
- return te.name == td.name;
- }) != tm.test_exclusions.end ();
- }),
- pm.tests.end ());
-
- bool external_tests (!pm.tests.empty ());
-
- // Configure, re-distribute if comes from a version control-based
- // repository, update, and test packages in the bpkg configuration in the
- // current working directory. Optionally pass the config.import.* variable
- // override and/or set the environment variables for bpkg processes.
- // Return true if all operations for all packages succeed.
- //
- // Pass true as the installed argument to use the test separate installed
- // phase step ids (bpkg.test-separate-installed.*) and the test separate
- // phase step ids (bpkg.test-separate.*) otherwise. In both cases fall
- // back to the main phase step ids (bpkg.*) when no environment/
- // configuration arguments are specified for them.
- //
- // Pass true as the sys_dep argument to configure the dependent package as
- // a system dependency, which is normally required for testing modules and
- // installed dependents. Note that bpkg configures the dependent package
- // as a special dependency for the test package.
- //
- auto test = [&trace, &wre,
- &bkp_step, &bkp_status, &last_cmd,
- &step_args, &config_args, &env_args,
- &pm,
- &redist]
- (operation_result& r,
- const dir_path& dist_root,
- bool installed,
- bool sys_dep,
- const char* import = nullptr,
- const small_vector<string, 1>& envvars = {})
- {
- for (const test_dependency& td: pm.tests)
+ bindist_result_type bindist_result;
+
+ const dir_path& bindist_conf (
+ create_install ? install_conf : main_pkg_conf);
+
+ // Make it absolute for the sake of diagnostics.
+ //
+ path bindist_result_file (rwd / bindist_conf / "bindist-result.json");
+
+ if (bindist)
{
- const string& pkg (td.name.string ());
+ operation_result* pr (&add_result ("bindist"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ // Fail if the breakpoint refers to a bpkg.bindist.* step but this
+ // step differs from the enabled one.
+ //
+ if (bkp_step &&
+ (*bkp_step == step_id::bpkg_bindist_debian ||
+ *bkp_step == step_id::bpkg_bindist_fedora ||
+ *bkp_step == step_id::bpkg_bindist_archive) &&
+ *bkp_step != *bindist)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ change_wd (trace, &r.log, rwd);
- // Configure.
+ // Note that for a host or module package we don't need the target
+ // configuration anymore, if present. So let's free up the space a
+ // little bit.
//
- // bpkg build --configure-only <env-config-args> <config-args>
- // '<package-name>[ <version-constraint>]'
+ if (!target_pkg && create_target)
+ rm_r (trace, &r.log, rwd / target_conf);
+
+ string distribution;
+ dir_path output_root;
+
+ switch (*bindist)
+ {
+ case step_id::bpkg_bindist_debian:
+ {
+ distribution = "debian";
+ output_root = dir_path ("bindist");
+ break;
+ }
+ case step_id::bpkg_bindist_fedora:
+ {
+ distribution = "fedora";
+ break;
+ }
+ case step_id::bpkg_bindist_archive:
+ {
+ distribution = "archive";
+ output_root = dir_path ("bindist");
+ break;
+ }
+ default: assert (false);
+ }
+
+ // bpkg bindist --distribution <distribution>
+ // <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
//
- // bpkg.test-separate[-installed].configure.build (bpkg.configure.build)
+ // Note that if we are installing the result, we need to generate
+ // packages for all the dependencies unless they are included in the
+ // package (with --recursive). The way we are going to arrange for
+ // this is by specifying --recursive=separate first and letting any
+ // user --recursive option override that.
//
- step_id s (installed
- ? step_id::bpkg_test_separate_installed_configure_build
- : step_id::bpkg_test_separate_configure_build);
+ step_id b (*bindist);
+ step_id s (*bindist);
r.status |= run_bpkg (
- s,
- envvars,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ b,
+ trace, r.log, bindist_result_file, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
- "build",
- "--configure-only",
- "--checkout-root", dist_root,
- "--yes",
- step_args (env_args, s, step_id::bpkg_configure_build),
- step_args (config_args, s, step_id::bpkg_configure_build),
- import,
- "--",
- td.string (),
- sys_dep ? ("?sys:" + pm.name.string ()).c_str () : nullptr);
+ "bindist",
+ "--distribution", distribution,
+ sys_install ? cstrings ({"--recursive", "separate"}) : cstrings (),
+ "--structured-result", "json",
+ (!output_root.empty ()
+ ? cstrings ({"--output-root", output_root.string ().c_str ()})
+ : cstrings ()),
+ "-d", bindist_conf,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ pkg);
if (!r.status)
- return false;
+ break;
- // Note that re-distributing the test package is a bit tricky since we
- // don't know its version and so cannot deduce its source directory
- // name easily. We could potentially run the bpkg-status command after
- // the package is configured and parse the output to obtain the
- // version. Let's, however, keep it simple and find the source
- // directory using the package directory name pattern.
+ // Parse the structured result JSON.
//
- if (exists (dist_root))
try
{
- dir_path pkg_dir;
+ ifdstream is (bindist_result_file);
+ json::parser p (is, bindist_result_file.string ());
+
+ using event = json::event;
+
+ auto bad_json = [&p] (string d)
+ {
+ throw json::invalid_json_input (p.input_name,
+ p.line (),
+ p.column (),
+ p.position (),
+ move (d));
+ };
+
+ // Parse bindist_os_release object.
+ //
+ auto parse_os_release = [&p] ()
+ {
+ // enter: after begin_object
+ // leave: after end_object
+
+ bindist_os_release r;
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ const string& n (p.name ());
+
+ if (n == "name_id")
+ {
+ r.name_id = p.next_expect_string ();
+ }
+ else if (n == "version_id")
+ {
+ r.version_id = p.next_expect_string ();
+ }
+ else
+ p.next_expect_value_skip ();
+ }
+
+ return r;
+ };
+
+ // Parse a bindist_file object.
+ //
+ auto parse_file = [&p, &bad_json] ()
+ {
+ // enter: after begin_object
+ // leave: after end_object
+
+ bindist_file r;
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ const string& n (p.name ());
- path_search (dir_path (pkg + "-*/"),
- [&pkg_dir] (path&& pe, const string&, bool interm)
- {
- if (!interm)
- pkg_dir = path_cast<dir_path> (move (pe));
+ if (n == "type")
+ {
+ r.type = p.next_expect_string ();
+ }
+ else if (n == "path")
+ {
+ try
+ {
+ r.path =
+ path (p.next_expect_string ()).complete ().normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ bad_json ("invalid package file path '" + e.path + "'");
+ }
+ }
+ else if (n == "system_name")
+ {
+ r.system_name = p.next_expect_string ();
+ }
+ else
+ p.next_expect_value_skip ();
+ }
- return interm;
- },
- dist_root);
+ return r;
+ };
- if (!pkg_dir.empty () &&
- !redist (s, r, dist_root, pkg_dir, import, envvars))
- return false;
+ // Parse a bindist_package object.
+ //
+ auto parse_package = [&p, &parse_file] ()
+ {
+ // enter: after begin_object
+ // leave: after end_object
+
+ bindist_package r;
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ const string& n (p.name ());
+
+ if (n == "name")
+ {
+ r.name = p.next_expect_string ();
+ }
+ else if (n == "version")
+ {
+ r.version = p.next_expect_string ();
+ }
+ else if (n == "system_version")
+ {
+ r.system_version = p.next_expect_string ();
+ }
+ else if (n == "files")
+ {
+ p.next_expect (event::begin_array);
+
+ while (p.next_expect (event::begin_object, event::end_array))
+ r.files.push_back (parse_file ());
+ }
+ else
+ p.next_expect_value_skip ();
+ }
+
+ return r;
+ };
+
+ // Parse the bindist_result.
+ //
+ // Note that if the bbot.bindist.upload step is enabled, then we
+ // require bindist_result.os_release.version_id to be present. This
+ // way the uploaded binary package can be published for a specific
+ // version of the distribution.
+ //
+ p.next_expect (event::begin_object);
+
+ while (p.next_expect (event::name, event::end_object))
+ {
+ const string& n (p.name ());
+
+ if (n == "distribution")
+ {
+ bindist_result.distribution = p.next_expect_string ();
+
+ if (bindist_result.distribution != distribution)
+ bad_json ("expected distribution '" + distribution +
+ "' instead of '" + bindist_result.distribution + "'");
+ }
+ else if (n == "architecture")
+ {
+ bindist_result.architecture = p.next_expect_string ();
+ }
+ else if (n == "os_release")
+ {
+ p.next_expect (event::begin_object);
+ bindist_result.os_release = parse_os_release ();
+
+ if (!bindist_result.os_release.version_id && bindist_upload)
+ bad_json ("version_id must be present if bbot.bindist.upload "
+ "step is enabled");
+ }
+ else if (n == "package")
+ {
+ p.next_expect (event::begin_object);
+ bindist_result.package = parse_package ();
+ }
+ else if (n == "dependencies")
+ {
+ p.next_expect (event::begin_array);
+
+ while (p.next_expect (event::begin_object, event::end_array))
+ bindist_result.dependencies.push_back (parse_package ());
+ }
+ else
+ p.next_expect_value_skip ();
+ }
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ fail_operation (
+ r,
+ string ("invalid bpkg-pkg-bindist result json input: ") +
+ e.what (),
+ result_status::abort,
+ e.name,
+ e.line,
+ e.column);
+
+ // Fall through.
}
- catch (const system_error& e)
+ catch (const io_error& e)
{
- fail << "unable to scan directory " << dist_root << ": " << e;
+ fail << "unable to read " << bindist_result_file << ": " << e;
}
- // Update.
+ if (!r.status)
+ break;
+
+ log_line ("generated " + distribution + " package for " + pkg + '/' +
+ ver.string () + ':',
+ r.log);
+
+ for (const bindist_file& f: bindist_result.package.files)
+ log_line (" " + f.path.string (), r.log);
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to a bpkg.bindist.* step but this step
+ // is disabled.
+ //
+ else if (bkp_step &&
+ (*bkp_step == step_id::bpkg_bindist_debian ||
+ *bkp_step == step_id::bpkg_bindist_fedora ||
+ *bkp_step == step_id::bpkg_bindist_archive))
+ {
+ fail_unreached_breakpoint (add_result ("bindist"));
+ break;
+ }
+
+ // Install from the binary distribution package generated on a
+ // bpkg.bindist.* step.
+ //
+ if (sys_install)
+ {
+ operation_result* pr (&add_result ("sys-install"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ // Fail if the breakpoint refers to the bbot.sys-install step since
+ // it has no specific command associated.
+ //
+ if (bkp_step && *bkp_step == step_id::bbot_sys_install)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ // Noop, just for the log record.
+ //
+ change_wd (trace, &r.log, rwd);
+
+ // Collect the binary package files.
//
- // bpkg update <env-config-args> <config-args> <package-name>
+ // Specifically, for now we consider files with the system name
+ // specified as package files.
//
- // bpkg.test-separate[-installed].update (bpkg.update)
+ cstrings pfs;
+
+ auto add_package_files = [&pfs] (const vector<bindist_file>& bfs)
+ {
+ for (const bindist_file& f: bfs)
+ {
+ if (f.system_name)
+ pfs.push_back (f.path.string ().c_str ());
+ }
+ };
+
+ add_package_files (bindist_result.package.files);
+
+ for (const bindist_package& d: bindist_result.dependencies)
+ add_package_files (d.files);
+
+ // Install for the `debian` distribution.
//
- s = installed
- ? step_id::bpkg_test_separate_installed_update
- : step_id::bpkg_test_separate_update;
+ if (*bindist == step_id::bpkg_bindist_debian)
+ {
+ // Update package index.
+ //
+ {
+ // sudo apt-get update <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ step_id b (step_id::bbot_sys_install_apt_get_update);
+ step_id s (step_id::bbot_sys_install_apt_get_update);
+ step_id ss (step_id::bbot_sys_install_apt_get_update);
+
+ r.status |= run_apt_get (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "update",
+ "--assume-yes",
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss));
+
+ if (!r.status)
+ break;
+ }
- r.status |= run_bpkg (
- s,
- envvars,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "update",
- step_args (env_args, s, step_id::bpkg_update),
- step_args (config_args, s, step_id::bpkg_update),
- import,
- pkg);
+ // Install.
+ //
+ {
+ // sudo apt-get install <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <distribution-package-file>...
+ //
+ // Note that apt-get install requires a directory separator for an
+ // argument to be treated as a file rather than name. The paths we
+ // pass are absolute.
+ //
+ step_id b (step_id::bbot_sys_install_apt_get_install);
+ step_id s (step_id::bbot_sys_install_apt_get_install);
+ step_id ss (step_id::bbot_sys_install_apt_get_install);
+
+ r.status |= run_apt_get (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "install",
+ "--assume-yes",
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss),
+ pfs);
+
+ if (!r.status)
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to a bbot.sys-install.apt_get.* step
+ // but the distribution is other than `debian`.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bbot_sys_install_apt_get_update &&
+ *bkp_step <= step_id::bbot_sys_install_apt_get_install)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ //
+ // Install for the `fedora` distribution.
+ //
+ else if (*bindist == step_id::bpkg_bindist_fedora)
+ {
+ // sudo dnf install <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <distribution-package-file>...
+ //
+ step_id b (step_id::bbot_sys_install_dnf_install);
+ step_id s (step_id::bbot_sys_install_dnf_install);
+ step_id ss (step_id::bbot_sys_install_dnf_install);
- if (!r.status)
- return false;
+ r.status |= run_dnf (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "install",
+ "--refresh",
+ "--assumeyes",
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss),
+ pfs);
- // Test.
+ if (!r.status)
+ break;
+ }
+ //
+ // Fail if the breakpoint refers to a bbot.sys-install.dnf.* step but
+ // the distribution is other than `fedora`.
//
- // Note that we assume that the package supports the test operation
- // since this is its main purpose.
+ else if (bkp_step && *bkp_step == step_id::bbot_sys_install_dnf_install)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ //
+ // Install for the `archive` distribution.
//
- // bpkg test <env-config-args> <config-args> <package-name>
+ // Since there is no easy way to extract from multiple archives with a
+ // single command, we run tar in a loop.
//
- // bpkg.test-separate[-installed].test (bpkg.test)
+ // Note that it is assumed that the --directory and --strip-components
+ // options are passed via <*-config-args>. The extracted executables
+ // can be arranged to be found by setting config.install.root for
+ // bpkg.target.create, etc (the same way as for installing from
+ // source).
//
- s = installed
- ? step_id::bpkg_test_separate_installed_test
- : step_id::bpkg_test_separate_test;
+ else if (*bindist == step_id::bpkg_bindist_archive)
+ {
+ // If the bbot.sys-install:config.install.root variable is
+ // specified, then make sure the directory it refers to exists by
+ // the time we run `tar -xf`, so that this command doesn't fail
+ // trying to extract into a non-existent directory. Note that we do
+ // that regardless whether the package is a build system module or
+ // not.
+ //
+ optional<dir_path> ir (
+ config_install_root (step_id::bbot_sys_install));
- r.status |= run_bpkg (
- s,
- envvars,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "test",
- "--package-cwd", // See above for details.
- step_args (env_args, s, step_id::bpkg_test),
- step_args (config_args, s, step_id::bpkg_test),
- import,
- pkg);
+ if (ir)
+ mk_p (trace, &r.log, *ir, true /* sudo */);
- if (!r.status)
- return false;
- }
+ if (!module_pkg)
+ {
+ if (!ir)
+ ir = config_install_root (step_id::bpkg_target_create,
+ step_id::b_create,
+ step_id::bpkg_create);
- return true;
- };
+ if (ir)
+ install_bin = *ir / dir_path ("bin");
+ }
- if (internal_tests || external_tests)
- {
- operation_result& r (test_result != nullptr
- ? *test_result
- : add_result ("test"));
+ for (const char* f: pfs)
+ {
+ // [sudo] tar -xf <distribution-package-file> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ step_id b (step_id::bbot_sys_install_tar_extract);
+ step_id s (step_id::bbot_sys_install_tar_extract);
+ step_id ss (step_id::bbot_sys_install_tar_extract);
+
+ r.status |= run_tar (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ true /* sudo */,
+ "-xf",
+ f,
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss));
+
+ if (!r.status)
+ break;
+ }
- // Noop, just for the log record to reduce the potential confusion for
- // the combined log reader due to updating the build system module in a
- // separate configuration (see above for details).
- //
- if (module)
- change_wd (trace, &r.log, current_directory ());
+ if (!r.status)
+ break;
- // Run internal tests.
- //
- if (internal_tests) // Note: false for modules (see above).
- {
- // bpkg test <env-config-args> <config-args> <package-name>
+ // Run ldconfig.
+ //
+ if (step_enabled (step_id::bbot_sys_install_ldconfig))
+ {
+ // sudo ldconfig <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ step_id b (step_id::bbot_sys_install_ldconfig);
+ step_id s (step_id::bbot_sys_install_ldconfig);
+ step_id ss (step_id::bbot_sys_install_ldconfig);
+
+ r.status |= run_ldconfig (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss));
+
+ if (!r.status)
+ break;
+ }
+ //
+ // Fail if the breakpoint refers to the bbot.sys-install.ldconfig
+ // step but this step is disabled.
+ //
+ else if (bkp_step && *bkp_step == step_id::bbot_sys_install_ldconfig)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ }
//
- // bpkg.test
+ // Fail if the breakpoint refers to a
+ // bbot.sys-install.{tar.extract,ldconfig} step but the distribution
+ // is other than `archive`.
//
- r.status |= run_bpkg (
- step_id::bpkg_test,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "test",
- "--package-cwd", // See above for details.
- step_args (env_args, step_id::bpkg_test),
- step_args (config_args, step_id::bpkg_test),
- pkg);
-
- if (!r.status)
+ else if (bkp_step &&
+ *bkp_step >= step_id::bbot_sys_install_tar_extract &&
+ *bkp_step <= step_id::bbot_sys_install_ldconfig)
+ {
+ fail_unreached_breakpoint (r);
break;
+ }
+ else
+ assert (false);
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to a bbot.sys-install.* step but this
+ // step is disabled.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bbot_sys_install &&
+ *bkp_step <= step_id::bbot_sys_install_ldconfig)
+ {
+ fail_unreached_breakpoint (add_result ("sys-install"));
+ break;
}
- // Run external tests.
+ // Now, if the package is installed, either from source or from the
+ // binary distribution package, the overall plan is as follows:
+ //
+ // 1. If the package has subprojects that support the test operation,
+ // then configure, build, and test them out of the source tree
+ // against the installed package using the build system directly.
//
- // Note that we assume that these packages belong to the dependent
- // package's repository or its complement repositories, recursively.
- // Thus, we test them in the configuration used to build the dependent
- // package (except for the build system module).
+ // 2. If any of the test packages are specified, then configure, build,
+ // and test them in a separate bpkg configuration(s) against the
+ // installed package.
//
- if (external_tests)
+ if (install_root || sys_install)
{
- // The test separate phase.
+ // Run the internal tests if the project contains "testable"
+ // subprojects, but not for a module.
//
- if (!test (r,
- dist_root,
- false /* installed */,
- module,
- bootstrap ? module_import.c_str () : nullptr))
- break;
+ has_internal_tests = false;
+
+ dir_paths subprj_dirs; // "Testable" package subprojects.
- // Back to the main phase.
+ // Collect the "testable" subprojects.
//
- }
+ if (!module_pkg)
+ {
+ assert (!rm.results.empty ());
- rm.status |= r.status;
- }
+ // Result of the install or sys-install operation.
+ //
+ operation_result& r (rm.results.back ());
- // Install the package, optionally test the installation and uninstall
- // afterwards.
- //
- // These operations are triggered by presence of config.install.root
- // configuration variable having a non-empty value for
- // bpkg.configure.create step.
- //
- if (install_root.empty ())
- break;
+ change_wd (trace, &r.log, effective_install_conf);
- // Now the overall plan is as follows:
- //
- // 1. Install the package.
- //
- // 2. If the package has subprojects that support the test operation, then
- // configure, build, and test them out of the source tree against the
- // installed package.
- //
- // 3. If any of the test packages are specified, then configure, build,
- // and test them in a separate bpkg configuration against the installed
- // package.
- //
- // 4. Uninstall the package.
- //
- // Install.
- //
- {
- operation_result& r (add_result ("install"));
+ for (const b_project_info::subproject& sp: prj.subprojects)
+ {
+ // Retrieve the subproject information similar to how we've done it
+ // for the package.
+ //
+ b_project_info si (prj_info (pkg_dir / sp.path,
+ b_info_flags::ext_mods,
+ "subproject"));
- change_wd (trace, &r.log, pkg_config);
+ const strings& ops (si.operations);
+ if (find (ops.begin (), ops.end (), "test") != ops.end ())
+ subprj_dirs.push_back (sp.path);
+ }
- // bpkg install <env-config-args> <config-args> <package-name>
- //
- // bpkg.install
- //
- r.status |= run_bpkg (
- step_id::bpkg_install,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "install",
- step_args (env_args, step_id::bpkg_install),
- step_args (config_args, step_id::bpkg_install),
- pkg);
+ has_internal_tests = !subprj_dirs.empty ();
+ }
- if (!r.status)
- break;
+ if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
+ {
+ operation_result* pr (&add_result ("test-installed"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
- rm.status |= r.status;
- }
+ change_wd (trace, &r.log, rwd);
- // The test installed phase.
- //
+ // Make sure that the installed package executables are properly
+ // imported when configuring and running tests, unless we are testing
+ // the build system module (that supposedly doesn't install any
+ // executables).
+ //
+ small_vector<string, 1> envvars;
- // Make sure that the installed package executables are properly imported
- // when configuring and running tests, unless we are testing the build
- // system module (that supposedly doesn't install any executables).
- //
- small_vector<string, 1> envvars;
+ if (install_bin)
+ {
+ // Note that we add the $config.install.root/bin directory at the
+ // beginning of the PATH environment variable value, so the
+ // installed executables are found first.
+ //
+ const string& ib (install_bin->string ());
- dir_paths subprj_dirs; // "Testable" package subprojects.
+ log_comment (trace, r.log,
+ "add " + ib + " to PATH environment variable");
- // We expect the build system modules to not have any testable subprojects
- // but to have external tests package instead.
- //
- if (module)
- internal_tests = false;
- else
- {
- // Note that we add the $config.install.root/bin directory at the
- // beginning of the PATH environment variable value, so the installed
- // executables are found first.
- //
- string paths ("PATH=" + (install_root / "bin").string ());
+ string paths ("PATH=" + ib);
- if (optional<string> s = getenv ("PATH"))
- {
- paths += path::traits_type::path_separator;
- paths += *s;
- }
+ if (optional<string> s = getenv ("PATH"))
+ {
+ paths += path::traits_type::path_separator;
+ paths += *s;
+ }
- envvars.push_back (move (paths));
+ envvars.push_back (move (paths));
+ }
- // Collect the "testable" subprojects.
- //
- for (const b_project_info::subproject& sp: prj.subprojects)
- {
- // Retrieve the subproject information similar to how we've done it
- // for the package.
- //
- b_project_info si (prj_info (pkg_dir / sp.path,
- true /* ext_mods */,
- "subproject"));
+ // Run internal tests.
+ //
+ if (has_internal_tests)
+ {
+ // Create the configuration.
+ //
+ // b create(<dir>, <env-modules>) <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ // Amalgamation directory that will contain configuration
+ // subdirectory for package tests out of source tree build.
+ //
+ dir_path out_dir ("build-installed");
- const strings& ops (si.operations);
- if (find (ops.begin (), ops.end (), "test") != ops.end ())
- subprj_dirs.push_back (sp.path);
- }
+ {
+ step_id b (step_id::b_test_installed_create);
+ step_id s (step_id::b_test_installed_create);
+ step_id f (step_id::b_create);
- // If there are any "testable" subprojects, then configure them
- // (sequentially) and test/build in parallel afterwards.
- //
- internal_tests = !subprj_dirs.empty ();
- }
+ string mods; // build2 create meta-operation parameters.
- if (internal_tests || external_tests)
- {
- operation_result& r (add_result ("test-installed"));
+ for (const char* m: step_args (modules, s, f))
+ {
+ mods += mods.empty () ? ", " : " ";
+ mods += m;
+ }
- change_wd (trace, &r.log, rwd);
+ r.status |= run_b (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create('" + out_dir.representation () + '\'' + mods + ')',
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f));
+
+ if (!r.status)
+ break;
+ }
- // Run internal tests.
- //
- if (internal_tests)
- {
- string mods; // build2 create meta-operation parameters.
+ // Configure testable subprojects sequentially and test/build in
+ // parallel afterwards.
+ //
+ // It feels right to configure internal tests also passing the
+ // main package configuration variables, since they may need to
+ // align with the main package setup (enable some testscripts,
+ // etc).
+ //
+ strings test_specs;
+ for (const dir_path& d: subprj_dirs)
+ {
+ // b configure(<subprj-src-dir>@<subprj-out-dir>) <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <pkg-vars>
+ //
+ step_id b (step_id::b_test_installed_configure);
+ step_id s (step_id::b_test_installed_configure);
+ step_id f (step_id::b_configure);
+
+ dir_path subprj_src_dir (exists (dist_src)
+ ? dist_src / d
+ : main_pkg_conf / pkg_dir / d);
+
+ dir_path subprj_out_dir (out_dir / d);
+
+ r.status |= run_b (
+ b,
+ envvars,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "configure('" +
+ subprj_src_dir.representation () + "'@'" +
+ subprj_out_dir.representation () + "')",
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ pkg_config_vars);
+
+ if (!r.status)
+ break;
+
+ test_specs.push_back (
+ "test('" + subprj_out_dir.representation () + "')");
+ }
+
+ if (!r.status)
+ break;
+
+ // Build/test subprojects.
+ //
+ // b test(<subprj-out-dir>)... <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ if (!step_disabled (step_id::b_test_installed_test))
+ {
+ step_id b (step_id::b_test_installed_test);
+ step_id s (step_id::b_test_installed_test);
+
+ r.status |= run_b (
+ b,
+ envvars,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ test_specs,
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s));
+
+ if (!r.status)
+ break;
+ }
+ //
+ // Fail if the breakpoint refers to the b.test-installed.test step
+ // but this step is disabled.
+ //
+ else if (bkp_step && *bkp_step == step_id::b_test_installed_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to some of the b.test-installed.*
+ // steps but the package doesn't have any internal tests.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::b_test_installed_create &&
+ *bkp_step <= step_id::b_test_installed_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ // Run runtime and build-time tests.
+ //
+ // Note that we only build runtime tests for target packages and for
+ // host packages in self-hosted configurations.
+ //
+ if (has_runtime_tests || has_buildtime_tests)
+ {
+ // Create the required build configurations.
+ //
+ dir_path target_conf ("build-installed-bpkg");
+ dir_path host_conf ("build-installed-bpkg-host");
+ dir_path module_conf ("build-installed-bpkg-module");
+
+ // Create the target configuration if this is a target package
+ // having external runtime tests or a host/module package having
+ // external build-time tests.
+ //
+ bool create_target (target_pkg || has_buildtime_tests);
+
+ // Note that even if there are no runtime tests for a host/module
+ // package, we still need to create the host/build2 configuration
+ // to configure the system package in.
+ //
+ bool create_host (host_pkg || module_pkg);
+
+ bool create_module (module_pkg ||
+ (host_pkg && has_buildtime_tests));
+
+ // Note: a module package cannot have runtime tests and so the
+ // module configuration is only created to serve build-time tests.
+ // Thus, the host or target configuration is always created as
+ // well and the module configuration is never a root
+ // configuration.
+ //
+ assert (create_target || create_host);
+
+ // Root configuration through which we will be configuring the
+ // cluster.
+ //
+ const dir_path& root_conf (create_target ? target_conf : host_conf);
+
+ // Runtime tests configuration. Should only be used if there are
+ // any.
+ //
+ const dir_path& runtime_tests_conf (target_pkg
+ ? target_conf
+ : host_conf);
+
+ // Create the target configuration.
+ //
+ // bpkg create <env-modules> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ if (create_target)
+ {
+ step_id b (step_id::bpkg_test_separate_installed_create);
+
+ // Note that here and below the _for_* step ids are determined
+ // by the main package type (and, yes, that means we will use
+ // the same step ids for target and host configuration -- that,
+ // however, should be ok since host configuration will only be
+ // created in the self-hosted case).
+ //
+ step_id s (
+ target_pkg
+ ? step_id::bpkg_test_separate_installed_create_for_target
+ : host_pkg
+ ? step_id::bpkg_test_separate_installed_create_for_host
+ : step_id::bpkg_test_separate_installed_create_for_module);
+
+ // Note: no fallback for modules.
+ //
+ optional<step_id> f (!module_pkg
+ ? step_id::bpkg_test_separate_installed_create
+ : optional<step_id> ());
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create",
+ "-d", target_conf,
+ step_args (modules, s, f),
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f));
+
+ if (!r.status)
+ break;
+ }
+
+ // Create the host configuration.
+ //
+ if (create_host)
+ {
+ // bpkg create --type host <env-modules> <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ step_id b (step_id::bpkg_test_separate_installed_create);
+
+ step_id s (host_pkg
+ ? step_id::bpkg_test_separate_installed_create_for_host
+ : step_id::bpkg_test_separate_installed_create_for_module);
+
+ // Note: no fallback for modules.
+ //
+ optional<step_id> f (!module_pkg
+ ? step_id::bpkg_test_separate_installed_create
+ : optional<step_id> ());
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create",
+ "-d", host_conf,
+ "--type", "host",
+ "--name", "host",
+ step_args (modules, s, f),
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f));
+
+ if (!r.status)
+ break;
+ }
+
+ // Create the module configuration.
+ //
+ // Note that we never build any tests in it but only configure the
+ // system package. Note, however, that the host/module package
+ // build-time tests can potentially build some other modules here.
+ //
+ if (create_module)
+ {
+ // b create(<dir>) config.config.load=~build2
+ //
+ step_id b (step_id::bpkg_test_separate_installed_create);
+
+ r.status |= run_b (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-V",
+ "create(" + module_conf.representation () + ",cc)",
+ "config.config.load=~build2",
+ "config.config.persist+='config.*'@unused=drop");
+
+ if (!r.status)
+ break;
+
+ // bpkg create --existing --type build2
+ //
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "create",
+ "--existing",
+ "-d", module_conf,
+ "--type", "build2",
+ "--name", "module");
+
+ if (!r.status)
+ break;
+ }
+
+ // Link the configurations.
+ //
+ // bpkg link -d <dir> <dir>
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_link);
+
+ if (create_target)
+ {
+ if (create_host)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", target_conf,
+ host_conf);
+
+ if (!r.status)
+ break;
+ }
+
+ if (create_module)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", target_conf,
+ module_conf);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ if (create_host)
+ {
+ if (create_module)
+ {
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "link",
+ "-d", host_conf,
+ module_conf);
+
+ if (!r.status)
+ break;
+ }
+ }
+ }
+
+ // Add and fetch the repositories.
+ //
+ if (has_runtime_tests)
+ {
+ // bpkg add <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <repository-url>
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_configure_add);
+ step_id s (step_id::bpkg_test_separate_installed_configure_add);
+ step_id f (step_id::bpkg_configure_add);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "add",
+ "-d", runtime_tests_conf,
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ repo);
+
+ if (!r.status)
+ break;
+ }
+
+ // bpkg fetch <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <trust-options>
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_configure_fetch);
+ step_id s (step_id::bpkg_test_separate_installed_configure_fetch);
+ step_id f (step_id::bpkg_configure_fetch);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "fetch",
+ "-d", runtime_tests_conf,
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ trust_ops);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ if (has_buildtime_tests)
+ {
+ // bpkg add <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <repository-url>
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_configure_add);
+ step_id s (step_id::bpkg_test_separate_installed_configure_add);
+ step_id f (step_id::bpkg_configure_add);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "add",
+ "-d", target_conf,
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ repo);
+
+ if (!r.status)
+ break;
+ }
+
+ // bpkg fetch <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <trust-options>
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_configure_fetch);
+ step_id s (step_id::bpkg_test_separate_installed_configure_fetch);
+ step_id f (step_id::bpkg_configure_fetch);
+
+ r.status |= run_bpkg (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "fetch",
+ "-d", target_conf,
+ step_args (env_args, s, f),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, s, f),
+ trust_ops);
+
+ if (!r.status)
+ break;
+ }
+ }
+
+ // Configure all the packages using a single bpkg-pkg-build command.
+ //
+ // bpkg build --configure-only <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // { <config> <rtt-config-vars> }+ <runtime-test>...
+ // { <config> }+ { <runtime-test>... }
+ // { <btt-config-vars> }+ <buildtime-test>...
+ // ?sys:<pkg>
+ // <glob-dep>...
+ //
+ strings pkgs;
+
+ if (has_runtime_tests)
+ {
+ // Note that only host package runtime tests can (but not
+ // necessarily) be configured in a linked configuration and
+ // require --config-name to be specified for them.
+ //
+ assert (!module_pkg);
+
+ string conf_name (runtime_tests_conf == root_conf
+ ? ""
+ : "host");
+
+ // If there are any runtime tests with configuration variables,
+ // then add them to the command line as following:
+ //
+ // { --config-name <name> <config-var>... }+ <runtime-test>...
+ //
+ // Also count the number of runtime tests without configuration
+ // variables.
+ //
+ size_t no_vars (0);
+ for (const auto& t: runtime_tests)
+ {
+ auto i (pkg_config_pkgs.find (t.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ {
+ pkgs.push_back ("{");
+
+ if (!conf_name.empty ())
+ {
+ pkgs.push_back ("--config-name");
+ pkgs.push_back (conf_name);
+ }
+
+ pkgs.insert (pkgs.end (),
+ i->second.begin (), i->second.end ());
+
+ pkgs.push_back ("}+");
+
+ // Strip the potential reflection variable assignment.
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+ else
+ ++no_vars;
+ }
+
+ // If there are any runtime tests without configuration
+ // variables, then add them to the command line as following:
+ //
+ // { --config-name <name> }+ { <runtime-test>... }
+ //
+ if (no_vars != 0)
+ {
+ bool og (!conf_name.empty ());
+
+ if (og)
+ {
+ pkgs.push_back ("{");
+
+ pkgs.push_back ("--config-name");
+ pkgs.push_back (conf_name);
+
+ pkgs.push_back ("}+");
+ }
+
+ if (og && no_vars != 1)
+ pkgs.push_back ("{");
+
+ for (const auto& t: runtime_tests)
+ {
+ if (pkg_config_pkgs.find (t.name.string ()) ==
+ pkg_config_pkgs.end ())
+ {
+ // Strip the potential reflection variable assignment.
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+ }
+
+ if (og && no_vars != 1)
+ pkgs.push_back ("}");
+ }
+ }
+
+ if (has_buildtime_tests)
+ {
+ for (const auto& t: buildtime_tests)
+ {
+ auto i (pkg_config_pkgs.find (t.name.string ()));
+
+ if (i != pkg_config_pkgs.end ())
+ {
+ pkgs.push_back ("{");
+
+ pkgs.insert (pkgs.end (),
+ i->second.begin (), i->second.end ());
+
+ pkgs.push_back ("}+");
+ }
+
+ // Strip the build-time mark and potential reflection variable
+ // assignment.
+ //
+ pkgs.push_back (t.dependency::string ());
+ }
+ }
+
+ pkgs.push_back ("?sys:" + pkg_rev);
+
+ // Add the global system dependencies.
+ //
+ for (const pair<string, strings>& d: pkg_config_glob_deps)
+ pkgs.push_back (d.first);
+
+ // Finally, configure all the test packages.
+ //
+ {
+ step_id b (step_id::bpkg_test_separate_installed_configure_build);
+
+ step_id g (step_id::bpkg_global_configure_build); // Global.
+
+ step_id s (
+ target_pkg
+ ? step_id::bpkg_test_separate_installed_configure_build_for_target
+ : host_pkg
+ ? step_id::bpkg_test_separate_installed_configure_build_for_host
+ : step_id::bpkg_test_separate_installed_configure_build_for_module);
+
+ step_id f (step_id::bpkg_test_separate_installed_configure_build);
+
+ r.status |= run_bpkg (
+ b,
+ envvars,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "-v",
+ "build",
+ "--configure-only",
+ "--checkout-root", dist_installed_root,
+ "--yes",
+ "-d", root_conf,
+ step_args (env_args, g),
+ step_args (env_args, s, f),
+ step_args (tgt_args, g),
+ step_args (tgt_args, s, f),
+ step_args (pkg_args, g),
+ step_args (pkg_args, s, f),
+ "--",
+ pkgs);
+
+ if (!r.status)
+ break;
+ }
+
+#ifdef _WIN32
+ Sleep (5000); // See above.
+#endif
- for (const string& m: step_args (modules,
- step_id::b_test_installed_create))
+ // Note that if bpkg.test-separate-installed.update step is
+ // disabled, we also skip bpkg.test-separate-installed.test.
+ //
+ if (!step_disabled (step_id::bpkg_test_separate_installed_update))
+ {
+ bool update_only (
+ step_disabled (step_id::bpkg_test_separate_installed_test));
+
+ // Fail if the breakpoint refers to the
+ // bpkg.test-separate-installed.test step but this step is
+ // disabled.
+ //
+ if (update_only &&
+ bkp_step &&
+ *bkp_step == step_id::bpkg_test_separate_installed_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ // External runtime tests.
+ //
+ if (has_runtime_tests)
+ {
+ const dir_path& runtime_tests_conf (target_pkg
+ ? target_conf
+ : host_conf);
+
+ change_wd (trace, &r.log, runtime_tests_conf);
+
+ if (!test (r,
+ runtime_tests,
+ dist_installed_root,
+ true /* installed */,
+ update_only,
+ envvars))
+ break;
+ }
+
+ // External build-time tests.
+ //
+ if (has_buildtime_tests)
+ {
+ change_wd (trace, &r.log, rwd / target_conf);
+
+ if (!test (r,
+ buildtime_tests,
+ dist_installed_root,
+ true /* installed */,
+ update_only,
+ envvars))
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to some of the
+ // bpkg.test-separate-installed.{update,test} steps but the
+ // bpkg.test-separate-installed.update step is disabled.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_test_separate_installed_update &&
+ *bkp_step <= step_id::bpkg_test_separate_installed_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to some of the
+ // bpkg.test-separate-installed.* steps but the package has no
+ // external tests.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_test_separate_installed_create &&
+ *bkp_step <= step_id::bpkg_test_separate_installed_test)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to some of the test installed steps
+ // but the package has no tests.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::b_test_installed_create &&
+ *bkp_step <= step_id::bpkg_test_separate_installed_test)
{
- mods += mods.empty () ? ", " : " ";
- mods += m;
+ fail_unreached_breakpoint (add_result ("test-installed"));
+ break;
}
+ }
+ //
+ // Fail if the breakpoint refers to some of the test installed steps but
+ // the package is not supposed to be installed neither from source nor
+ // from the binary distribution package.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::b_test_installed_create &&
+ *bkp_step <= step_id::bpkg_test_separate_installed_test)
+ {
+ fail_unreached_breakpoint (add_result ("test-installed"));
+ break;
+ }
- // b create(<dir>, <env-modules>) <env-config-args> <config-args>
- //
- // b.test-installed.create
+ // Uninstall, if installed from the binary distribution package.
+ //
+ // Note: noop for the archive distribution.
+ //
+ if (sys_install &&
+ (*bindist == step_id::bpkg_bindist_debian ||
+ *bindist == step_id::bpkg_bindist_fedora))
+ {
+ operation_result* pr (&add_result ("sys-uninstall"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ // Noop, just for the log record.
//
- // Amalgamation directory that will contain configuration subdirectory
- // for package tests out of source tree build.
+ change_wd (trace, &r.log, rwd);
+
+ // Collect the binary package system names.
//
- dir_path out_dir ("build-installed");
+ cstrings pns;
- r.status |= run_b (
- step_id::b_test_installed_create,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-V",
- "create('" + out_dir.representation () + "'" + mods + ")",
- step_args (env_args, step_id::b_test_installed_create),
- step_args (config_args, step_id::b_test_installed_create));
+ auto add_package_names = [&pns] (const vector<bindist_file>& bfs)
+ {
+ for (const bindist_file& f: bfs)
+ {
+ if (f.system_name)
+ pns.push_back (f.system_name->c_str ());
+ }
+ };
- if (!r.status)
- break;
+ add_package_names (bindist_result.package.files);
- // Configure subprojects and create buildspecs for their testing.
+ for (const bindist_package& d: bindist_result.dependencies)
+ add_package_names (d.files);
+
+ // Uninstall for the `debian` distribution.
//
- strings test_specs;
- for (const dir_path& d: subprj_dirs)
+ if (*bindist == step_id::bpkg_bindist_debian)
{
- // b configure(<subprj-src-dir>@<subprj-out-dir>) <env-config-args>
- // <config-args>
- //
- // b.test-installed.configure
+ // sudo apt-get remove <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <distribution-package-name>...
//
- dir_path subprj_src_dir (exists (dist_src)
- ? dist_src / d
- : build_dir / pkg_dir / d);
+ step_id b (step_id::bbot_sys_uninstall_apt_get_remove);
+ step_id s (step_id::bbot_sys_uninstall_apt_get_remove);
+ step_id ss (step_id::bbot_sys_uninstall_apt_get_remove);
- dir_path subprj_out_dir (out_dir / d);
-
- r.status |= run_b (
- step_id::b_test_installed_configure,
- envvars,
+ r.status |= run_apt_get (
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "configure('" +
- subprj_src_dir.representation () + "'@'" +
- subprj_out_dir.representation () + "')",
- step_args (env_args, step_id::b_test_installed_configure),
- step_args (config_args, step_id::b_test_installed_configure));
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "remove",
+ "--assume-yes",
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss),
+ pns);
if (!r.status)
break;
-
- test_specs.push_back (
- "test('" + subprj_out_dir.representation () + "')");
}
-
- if (!r.status)
+ //
+ // Fail if the breakpoint refers to the
+ // bbot.sys-uninstall.apt-get.remove step but the distribution is
+ // other than `debian`.
+ //
+ else if (bkp_step &&
+ *bkp_step == step_id::bbot_sys_uninstall_apt_get_remove)
+ {
+ fail_unreached_breakpoint (r);
break;
+ }
+ //
+ // Uninstall for the `fedora` distribution.
+ //
+ else if (*bindist == step_id::bpkg_bindist_fedora)
+ {
+ // sudo dnf remove <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // <distribution-package-name>...
+ //
+ step_id b (step_id::bbot_sys_uninstall_dnf_remove);
+ step_id s (step_id::bbot_sys_uninstall_dnf_remove);
+ step_id ss (step_id::bbot_sys_uninstall_dnf_remove);
+
+ r.status |= run_dnf (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ "remove",
+ "--assumeyes",
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss),
+ pns);
- // Build/test subprojects.
+ if (!r.status)
+ break;
+ }
//
- // b test(<subprj-out-dir>)... <env-config-args> <config-args>
+ // Fail if the breakpoint refers to the bbot.sys-uninstall.dnf.remove
+ // step but the distribution is other than `fedora`.
//
- // b.test-installed.test
+ else if (bkp_step &&
+ *bkp_step == step_id::bbot_sys_uninstall_dnf_remove)
+ {
+ fail_unreached_breakpoint (r);
+ break;
+ }
+ else
+ assert (false);
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to a bbot.sys-uninstall.* step but
+ // this step is disabled.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bbot_sys_uninstall_apt_get_remove &&
+ *bkp_step <= step_id::bbot_sys_uninstall_dnf_remove)
+ {
+ fail_unreached_breakpoint (add_result ("sys-uninstall"));
+ break;
+ }
+
+ // Uninstall, if installed from source.
+ //
+ if (install_root)
+ {
+ operation_result* pr (&add_result ("uninstall"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ change_wd (trace, &r.log, effective_install_conf);
+
+ // bpkg uninstall <env-config-args> <tgt-config-args> <pkg-config-args>
+ // <package-name>
//
- r.status |= run_b (
- step_id::b_test_installed_test,
- envvars,
+ step_id b (step_id::bpkg_uninstall);
+ step_id s (step_id::bpkg_uninstall);
+
+ r.status |= run_bpkg (
+ b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
- test_specs,
- step_args (env_args, step_id::b_test_installed_test),
- step_args (config_args, step_id::b_test_installed_test));
+ "uninstall",
+ step_args (env_args, s),
+ step_args (tgt_args, s),
+ step_args (pkg_args, s),
+ pkg);
if (!r.status)
break;
+
+ rm.status |= r.status;
+ }
+ //
+ // Fail if the breakpoint refers to the bpkg.uninstall step but the
+ // package was not installed from source.
+ //
+ else if (bkp_step && *bkp_step == step_id::bpkg_uninstall)
+ {
+ fail_unreached_breakpoint (add_result ("uninstall"));
+ break;
}
- // Run external tests.
+ // Now, after the package is fully tested, let's prepare the build
+ // artifacts for the upload, using the upload operation log.
+ //
+
+ // Prepare the bindist artifacts.
+ //
+ // Move the binary distribution files generated for the main package and
+ // bindist-result.json to the upload/bindist/<distribution>/ directory.
+ // Also serialize the subset of the bindist result as
+ // bindist-result.manifest.
//
- if (external_tests)
+ // Fail if the breakpoint refers to the bbot.bindist.upload step since
+ // it has no specific command associated.
+ //
+ if (bkp_step && *bkp_step == step_id::bbot_bindist_upload)
+ {
+ fail_unreached_breakpoint (add_result ("upload"));
+ break;
+ }
+
+ if (bindist_upload)
{
- // Configure.
+ operation_result* pr (&add_result ("upload"));
+ operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+
+ change_wd (trace, &r.log, rwd);
+
+ dir_path d (upload_dir /
+ dir_path ("bindist") /
+ dir_path (bindist_result.distribution));
+
+ log_step_id (trace, r.log, step_id::bbot_bindist_upload);
+
+ mk_p (trace, &r.log, d);
+
+ // Move a file to the upload/bindist/<distribution>/ directory.
//
- // bpkg create <env-modules> <env-config-args> <config-args>
+ // On Windows copy the file instead of moving not to fail if it is
+ // being scanned by the Windows Defender or some such.
//
- // bpkg.test-installed.create (bpkg.create)
+ auto mv = [&d, &r, &rwd, &trace] (const path& p)
+ {
+#ifndef _WIN32
+ bool mv (true);
+#else
+ bool mv (false);
+#endif
+ // Use relative path, if possible, to keep the operation log tidy
+ // (won't be the case on Fedora).
+ //
+ const path& rp (p.sub (rwd) ? p.leaf (rwd) : p);
+
+ if (mv)
+ mv_into (trace, &r.log, rp, d);
+ else
+ cp_into (trace, &r.log, rp, d);
+ };
+
+ // Main package files.
//
- dir_path config_dir ("build-installed-bpkg");
+ for (const bindist_file& f: bindist_result.package.files)
+ mv (f.path);
- r.status |= run_bpkg (
- step_id::bpkg_test_installed_create,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-V",
- "create",
- "-d", config_dir.string (),
- "--wipe",
+ // Bindist result JSON.
+ //
+ mv (bindist_result_file);
- step_args (modules,
- step_id::bpkg_test_installed_create,
- step_id::bpkg_create),
+ // Bindist result manifest.
+ //
+ path mf (d / "bindist-result.manifest");
- step_args (env_args,
- step_id::bpkg_test_installed_create,
- step_id::bpkg_create),
+ try
+ {
+ ofdstream os (mf);
+ serializer s (os, mf.string ());
- step_args (config_args,
- step_id::bpkg_test_installed_create,
- step_id::bpkg_create));
+ // Serialize package manifest.
+ //
+ s.next ("", "1"); // Start of manifest.
- if (!r.status)
- break;
+ s.next ("distribution", bindist_result.distribution);
+ s.next ("architecture", bindist_result.architecture);
- change_wd (trace, &r.log, config_dir);
+ s.next ("os-release-name-id", bindist_result.os_release.name_id);
- // bpkg add <env-config-args> <config-args> <repository-url>
- //
- // bpkg.test-installed.configure.add (bpkg.configure.add)
- //
- r.status |= run_bpkg (
- step_id::bpkg_test_installed_configure_add,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "add",
+ // Should have failed earlier.
+ //
+ assert (bindist_result.os_release.version_id);
- step_args (env_args,
- step_id::bpkg_test_installed_configure_add,
- step_id::bpkg_configure_add),
+ s.next ("os-release-version-id",
+ *bindist_result.os_release.version_id);
- step_args (config_args,
- step_id::bpkg_test_installed_configure_add,
- step_id::bpkg_configure_add),
+ s.next ("package-name", bindist_result.package.name);
+ s.next ("package-version", bindist_result.package.version);
- repo);
+ if (bindist_result.package.system_version)
+ s.next ("package-system-version",
+ *bindist_result.package.system_version);
- if (!r.status)
- break;
+ s.next ("", ""); // End of manifest.
- // bpkg fetch <env-config-args> <config-args> <trust-options>
- //
- // bpkg.test-installed.configure.fetch (bpkg.configure.fetch)
- //
- r.status |= run_bpkg (
- step_id::bpkg_test_installed_configure_fetch,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "fetch",
+ // Serialize package file manifests.
+ //
+ for (const bindist_file& f: bindist_result.package.files)
+ {
+ s.next ("", "1"); // Start of manifest.
- step_args (env_args,
- step_id::bpkg_test_installed_configure_fetch,
- step_id::bpkg_configure_fetch),
+ s.next ("package-file-type", f.type);
- step_args (config_args,
- step_id::bpkg_test_installed_configure_fetch,
- step_id::bpkg_configure_fetch),
+ // Note: the simple path representation is POSIX.
+ //
+ s.next ("package-file-path", f.path.leaf ().string ());
- trust_ops);
+ if (f.system_name)
+ s.next ("package-file-system-name", *f.system_name);
- if (!r.status)
- break;
+ s.next ("", ""); // End of manifest.
+ }
+
+ s.next ("", ""); // End of stream.
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to '" << mf << "': " << e;
+ }
+ catch (const serialization& e)
+ {
+ fail << "unable to serialize bindist result: " << e;
+ }
+ }
+
+ // Create the archive of the build artifacts for subsequent upload, if
+ // the upload/ directory is not empty.
+ //
+ // Note that the archive will be uploaded later, right before uploading
+ // the result manifest, unless the task has been aborted by the user or
+ // the result status is error or worse.
+ //
+ if (!empty (rwd / upload_dir) && !step_disabled (step_id::bbot_upload))
+ {
+ // The upload operation log must have been added as part of the
+ // build artifacts preparation for upload.
+ //
+ operation_result& r (rm.results.back ());
- // The test separate installed phase.
+ // Fail if the breakpoint refers to the bbot.upload step since it has
+ // no specific command associated.
//
- if (!test (r,
- rwd / dir_path ("dist-installed"),
- true /* installed */,
- true /* sys_dep */,
- nullptr /* import */,
- envvars))
+ if (bkp_step && *bkp_step == step_id::bbot_upload)
+ {
+ fail_unreached_breakpoint (r);
break;
+ }
+
+ change_wd (trace, &r.log, rwd);
- // Back to the test installed phase.
+ // Archive the build artifacts.
//
- }
+ {
+ // tar -cf upload.tar <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ // upload/
+ //
+ step_id b (step_id::bbot_upload_tar_create);
+ step_id s (step_id::bbot_upload_tar_create);
+ step_id ss (step_id::bbot_upload_tar_create);
- rm.status |= r.status;
- }
+ // Make sure the archive is portable.
+ //
+ // Note that OpenBSD tar does not support --format but it appear
+ // ustar is the default (see bpkg/system-package-manager-archive.cxx
+ // for details).
+ //
+ r.status |= run_tar (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ false /* sudo */,
+#ifndef __OpenBSD__
+ "--format", "ustar",
+#endif
+ "-cf",
+ upload_archive,
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss),
+ upload_dir);
- // Back to the main phase.
- //
- // Uninstall.
- //
- {
- operation_result& r (add_result ("uninstall"));
+ if (!r.status)
+ break;
+ }
+
+ // It feels useful to also print the archive content to the log.
+ //
+ {
+ // tar -tf upload.tar <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
+ //
+ step_id b (step_id::bbot_upload_tar_list);
+ step_id s (step_id::bbot_upload_tar_list);
+ step_id ss (step_id::bbot_upload_tar_list);
- change_wd (trace, &r.log, pkg_config);
+ r.status |= run_tar (
+ b,
+ trace, r.log, wre,
+ bkp_step, bkp_status, aux_env, last_cmd,
+ false /* sudo */,
+ "-tf",
+ upload_archive,
+ step_args (env_args, s, nullopt, nullopt, ss),
+ step_args (tgt_args, s, nullopt, nullopt, ss),
+ step_args (pkg_args, s, nullopt, nullopt, ss));
- // bpkg uninstall <env-config-args> <config-args> <package-name>
+ if (!r.status)
+ break;
+ }
+
+ rm.status |= r.status;
+ }
//
- // bpkg.uninstall
+ // Fail if the breakpoint refers to some of the bbot.upload.* steps but
+ // there is either nothing to upload or the bbot.upload step is
+ // disabled.
//
- r.status |= run_bpkg (
- step_id::bpkg_uninstall,
- trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
- "-v",
- "uninstall",
- step_args (env_args, step_id::bpkg_uninstall),
- step_args (config_args, step_id::bpkg_uninstall),
- pkg);
+ else if (bkp_step &&
+ *bkp_step >= step_id::bbot_upload &&
+ *bkp_step <= step_id::bbot_upload_tar_list)
+ {
+ // If the upload operation log have been added as part of the build
+ // artifacts preparation for upload, then use this log to report the
+ // error. Otherwise, add the new log for that.
+ //
+ // @@ TMP: Apple Clang 14.0.3 ICE
+ //
+ operation_result* pr (&rm.results.back ());
- if (!r.status)
- break;
+ if (pr->operation != "upload")
+ pr = &add_result ("upload");
- rm.status |= r.status;
+ fail_unreached_breakpoint (*pr);
+ break;
+ }
+ }
+ //
+ // Fail if the breakpoint refers to bpkg.update or any dependent step but
+ // the bpkg.update step is disabled.
+ //
+ else if (bkp_step &&
+ *bkp_step >= step_id::bpkg_update &&
+ *bkp_step <= step_id::bbot_upload)
+ {
+ fail_unreached_breakpoint (add_result ("update"));
+ break;
}
break;
@@ -2048,16 +5883,6 @@ build (size_t argc, const char* argv[])
rm.status |= r.status; // Merge last in case of a break.
- // Also merge statuses of the configure and test operations, which logs
- // can potentially be shared across multiple steps and which results may
- // not be the last in the list.
- //
- if (configure_result != nullptr)
- rm.status |= configure_result->status;
-
- if (test_result != nullptr)
- rm.status |= test_result->status;
-
// Unless there is an error (or worse) encountered, log the special 'end'
// step and, if this step is specified in the interactive manifest value,
// ask the user if to continue the task execution.
@@ -2067,9 +5892,11 @@ build (size_t argc, const char* argv[])
if (!error)
{
r.status |= run_cmd (step_id::end,
- trace, r.log, regexes (),
+ trace, r.log,
+ nullptr /* out_str */, path () /* out_file */,
+ regexes (),
"" /* name */,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ());
rm.status |= r.status;
@@ -2116,11 +5943,45 @@ build (size_t argc, const char* argv[])
}
}
else
- assert (rm.status == result_status::abort);
+ assert (rm.status == result_status::abort ||
+ rm.status == result_status::skip);
if (!rwd.empty ())
+ {
change_wd (trace, nullptr /* log */, rwd);
+ // Upload the build artifacts archive, if exists.
+ //
+ bool error (!rm.status);
+ if (exists (upload_archive) && !error)
+ {
+ const string url (
+ "tftp://" + ops.tftp_host () + '/' + upload_archive.string ());
+
+ try
+ {
+ tftp_curl c (trace,
+ upload_archive,
+ nullfd,
+ curl::put,
+ url,
+ "--tftp-blksize", tftp_blksize,
+ "--max-time", tftp_put_timeout);
+
+ if (!c.wait ())
+ fail << "curl " << *c.exit;
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute curl: " << e;
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to upload build artifacts to " << url << ": " << e;
+ }
+ }
+ }
+
// Upload the result.
//
const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4");
@@ -2145,6 +6006,109 @@ build (size_t argc, const char* argv[])
return 3;
}
+// Parse the task_manifest::auxiliary_environment value into the list of
+// environment variable assignments as expected by the process API. Throw
+// invalid_argument if the auxiliary environment is invalid.
+//
+// If comment is not NULL, then add blank and comment lines prefixed with this
+// string (which is normally either '#' or 'rem'). This mode is used to print
+// the environment into the build log.
+//
+static strings
+parse_auxiliary_environment (const string& s, const char* comment = nullptr)
+{
+ strings r;
+
+ // Note: parse observing blanks.
+ //
+ for (size_t b (0), e (0), m (0), n (s.size ());
+ next_word (s, n, b, e, m, '\n', '\r'), b != n; )
+ {
+ string line (s, b, e - b);
+
+ if (trim (line).empty ()) // Blank.
+ {
+ if (comment != nullptr)
+ r.push_back (comment);
+
+ continue;
+ }
+
+ if (line.front () == '#') // Comment.
+ {
+ if (comment != nullptr)
+ {
+ line.erase (0, 1);
+ line.insert (0, comment);
+ r.push_back (move (line));
+ }
+
+ continue;
+ }
+
+ size_t p (line.find ('='));
+
+ if (p == string::npos)
+ throw invalid_argument ("missing '=' in '" + line + '\'');
+
+ string name (line, 0, p);
+
+ if (trim_right (name).empty () ||
+ name.find_first_not_of (
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789") != string::npos)
+ {
+ throw invalid_argument ("invalid variable name '" + name + '\'');
+ }
+
+ // Disallow certain well-known environment variables.
+ //
+ if (name == "PATH"
+#if defined(_WIN32)
+#elif defined(__APPLE__)
+ || name == "DYLD_LIBRARY_PATH"
+#else // Linux, FreeBSD, NetBSD, OpenBSD
+ || name == "LD_LIBRARY_PATH"
+#endif
+ )
+ {
+ throw invalid_argument ("disallowed variable name '" + name + '\'');
+ }
+
+ line.erase (0, p + 1); // Value.
+
+ // Note: we allow empty values.
+ //
+ if (!trim_left (line).empty ())
+ {
+ // Unquote.
+ //
+ char c (line.front ());
+ if (c == '"' || c == '\'')
+ {
+ if (line.size () == 1 || line.back () != c)
+ throw invalid_argument ("invalid quoted value '" + line + '\'');
+
+ line.pop_back ();
+ line.erase (0, 1);
+ }
+ }
+
+ // Reassemble.
+ //
+ line.insert (0, 1, '=');
+ line.insert (0, name);
+
+ r.push_back (move (line));
+ }
+
+ // Pop the final blank line comment.
+ //
+ if (comment != nullptr && r.back () == comment)
+ r.pop_back ();
+
+ return r;
+}
+
static int
startup ()
{
@@ -2154,11 +6118,13 @@ startup ()
//
// 1. Download the task manifest into the build directory (CWD).
//
- // 2. Parse it and get the target.
+ // 2. Parse it and get the target, environment name, and auxiliary
+ // environment.
//
- // 3. Find the environment setup executable for this target.
+ // 3. Find the environment setup executable for this name.
//
- // 4. Execute the environment setup executable.
+ // 4. Execute the environment setup executable for this target in the
+ // auxiliary environment.
//
// 5. If the environment setup executable fails, then upload the (failed)
// result ourselves.
@@ -2173,6 +6139,33 @@ startup ()
//
task_manifest tm;
+ auto upload_result = [&trace, &tm] (result_status rs,
+ operation_results&& ors)
+ {
+ const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4");
+
+ // If we failed before being able to parse the task manifest, use the
+ // "unknown" values for the package name and version.
+ //
+ result_manifest rm {
+ tm.name.empty () ? bpkg::package_name ("unknown") : tm.name,
+ tm.version.empty () ? bpkg::version ("0") : tm.version,
+ rs,
+ move (ors),
+ worker_checksum,
+ nullopt /* dependency_checksum */
+ };
+
+ try
+ {
+ upload_manifest (trace, url, rm, "result");
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to upload result manifest to " << url << ": " << e;
+ }
+ };
+
try
{
// Download the task.
@@ -2254,6 +6247,31 @@ startup ()
fail << "no default environment setup executable in " << env_dir;
}
+ // Auxiliary environment.
+ //
+ strings aux_env;
+ if (tm.auxiliary_environment)
+ {
+ try
+ {
+ aux_env = parse_auxiliary_environment (*tm.auxiliary_environment);
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: include (unparsed) environment into the log so that we can
+ // see what we are dealing with.
+ //
+ operation_result r {
+ "configure",
+ result_status::abort,
+ *tm.auxiliary_environment + "\n" +
+ "error: invalid auxiliary environment: " + e.what () + '\n'};
+
+ upload_result (result_status::abort, {move (r)});
+ return 1;
+ }
+ }
+
// Run it.
//
strings os;
@@ -2291,7 +6309,12 @@ startup ()
// result manifest. There is no reason to retry (most likely there is
// nobody listening on the other end anymore).
//
- switch (run_io_exit (trace, 0, 2, 2, pp, tg, argv0.effect_string (), os))
+ switch (run_io_exit (trace,
+ 0 /* stdin */, 2 /* stdout */, 2 /* stderr */,
+ process_env (pp, aux_env),
+ tg,
+ argv0.effect_string (),
+ os))
{
case 3:
case 2: return 1;
@@ -2301,27 +6324,7 @@ startup ()
}
catch (const failed&)
{
- const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4");
-
- // If we failed before being able to parse the task manifest, use the
- // "unknown" values for the package name and version.
- //
- result_manifest rm {
- tm.name.empty () ? bpkg::package_name ("unknown") : tm.name,
- tm.version.empty () ? bpkg::version ("0") : tm.version,
- result_status::abnormal,
- operation_results {}
- };
-
- try
- {
- upload_manifest (trace, url, rm, "result");
- }
- catch (const io_error& e)
- {
- fail << "unable to upload result manifest to " << url << ": " << e;
- }
-
+ upload_result (result_status::abnormal, operation_results {});
return 1;
}
}
@@ -2410,7 +6413,7 @@ try
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
<< "Copyright (c) " << BBOT_COPYRIGHT << "." << endl
- << "TBC; All rights reserved" << endl;
+ << "This is free software released under the MIT license." << endl;
return 0;
}
diff --git a/build/bootstrap.build b/build/bootstrap.build
index 7c9673c..945349c 100644
--- a/build/bootstrap.build
+++ b/build/bootstrap.build
@@ -1,5 +1,5 @@
# file : build/bootstrap.build
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
project = bbot
diff --git a/build/export.build b/build/export.build
index be0ae07..f5e0fdd 100644
--- a/build/export.build
+++ b/build/export.build
@@ -1,5 +1,5 @@
# file : build/export.build
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
$out_root/
{
diff --git a/build/root.build b/build/root.build
index d16d0e9..ad832c7 100644
--- a/build/root.build
+++ b/build/root.build
@@ -1,5 +1,5 @@
# file : build/root.build
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
cxx.std = latest
@@ -14,10 +14,17 @@ if ($cxx.target.system == 'win32-msvc')
cxx.poptions += -D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS
if ($cxx.class == 'msvc')
- cxx.coptions += /wd4251 /wd4275 /wd4800 /wd4819
+ cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
cxx.poptions =+ "-I$out_root" "-I$src_root"
# Load the cli module but only if it's available. This way a distribution
diff --git a/buildfile b/buildfile
index 354dfe6..333d727 100644
--- a/buildfile
+++ b/buildfile
@@ -1,5 +1,5 @@
# file : buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
./: {*/ -tests/ -build/} \
doc{INSTALL NEWS README} legal{LICENSE} \
diff --git a/doc/buildfile b/doc/buildfile
index 0cb2afa..e583047 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -1,5 +1,5 @@
# file : doc/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
define css: file
css{*}: extension = css
diff --git a/doc/cli.sh b/doc/cli.sh
index bc1361b..ae36428 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.14.0-a.0.z
+version=0.17.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -95,6 +95,8 @@ function compile_doc () # <file> <prefix> <suffix>
--html-prologue-file doc-prologue.xhtml \
--html-epilogue-file doc-epilogue.xhtml \
--link-regex '%bpkg([-.].+)%../../bpkg/doc/bpkg$1%' \
+--link-regex '%bpkg(#.+)?%../../bpkg/doc/build2-package-manager-manual.xhtml$1%' \
+--link-regex '%brep(#.+)?%../../brep/doc/build2-repository-interface-manual.xhtml$1%' \
--output-prefix "$2" \
--output-suffix "$3" \
"$1"
diff --git a/doc/manual.cli b/doc/manual.cli
index 4326a5b..2fa3248 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -41,12 +41,24 @@ that are executed on the build host. Inside virtual machines/containers,
agent. Virtual machines and containers running a \c{bbot} instance in the
worker mode are collectively called \i{build machines}.
+In addition to a build machine, a build task may also require one or more
+\i{auxiliary machines} which provide additional components that are required
+for building or testing a package and that are impossible or impractical to
+provide as part of the build machine itself.
+
Let's now examine the workflow in the other direction, that is, from a worker
-to a controller. Once a build machine is booted (by the agent), the worker
-inside connects to the TFTP server running on the build host and downloads the
-\i{build task manifest}. It then proceeds to perform the build task and
-uploads the \i{build result manifest} (which includes build logs) to the TFTP
-server.
+to a controller. Once a build machine (plus auxiliary machines, if any) are
+booted (by the agent), the worker inside the build machine connects to the
+TFTP server running on the build host and downloads the \i{build task
+manifest}. It then proceeds to perform the build task and uploads the \i{build
+artifacts archive}, if any, followed by the \i{build result manifest} (which
+includes build logs) to the TFTP server.
+
+Unlike build machines, auxiliary machines are not expected to run \c{bbot}.
+Instead, on boot, they are expected to upload to the TFTP server a list of
+environment variables to propagate to the build machine (see the
+\c{auxiliary-environment} task manifest value as well as \l{#arch-worker
+Worker Logic} for details).
Once an agent receives a build task for a specific build machine, it goes
through the following steps. First, it creates a directory on its TFTP server
@@ -79,23 +91,29 @@ agents. In this case we say that the \i{controller act as an agent}. The
controller may also be configured to monitor build sources, such as SCM
repositories, directly in which case it generates build tasks itself.
-In this architecture the build results are propagated up the chain: from a
-worker, to its agent, to its controller, and so on. A controller that is the
-final destination of a build result uses email to notify interested parties of
-the outcome. For example, \c{brep} would send a notification to the package
-owner if the build failed. Similarly, a \c{bbot} controller that monitors a
-\cb{git} repository would send an email to a committer if their commit caused a
-build failure. The email would include a link (normally HTTP/HTTPS) to the
-build logs hosted by the controller.
+In this architecture the build results and optional build artifacts are
+propagated up the chain: from a worker, to its agent, to its controller, and
+so on. A controller that is the final destination of a build result uses email
+to notify interested parties of the outcome. For example, \c{brep} would send
+a notification to the package owner if the build failed. Similarly, a \c{bbot}
+controller that monitors a \cb{git} repository would send an email to a
+committer if their commit caused a build failure. The email would include a
+link (normally HTTP/HTTPS) to the build logs hosted by the controller. The
+build artifacts, such as generated binary distribution packages, are normally
+made available for the interested parties to download. See \l{brep#upload
+Build Artifacts Upload} for details on the \c{brep} controller's
+implementation of the build artifacts upload handling.
\h#arch-machine-config|Configurations|
-The \c{bbot} architecture distinguishes between a \i{machine configuration}
-and a \i{build configuration}. The machine configuration captures the
-operating system, installed compiler toolchain, and so on. The same build
-machine may be used to \"generate\" multiple \i{build configurations}. For
-example, the same machine can normally be used to produce 32/64-bit and
-debug/optimized builds.
+The \c{bbot} architecture distinguishes between a \i{build machine
+configuration}, \i{build target configuration}, and a \i{build package
+configuration}. The machine configuration captures the operating system,
+installed compiler toolchain, and so on. The same build machine may be used to
+\"generate\" multiple \i{build target configurations}. For example, the same
+machine can normally be used to produce debug/optimized builds.
+
+\h2#arch-machine-config-build-machine|Build Machine Configuration|
The machine configuration is \i{approximately} encoded in its \i{machine
name}. The machine name is a list of components separated with \c{-}.
@@ -106,31 +124,31 @@ component.
The encoding is approximate in a sense that it captures only what's important
to distinguish in a particular \c{bbot} deployment.
-The first component normally identifies the operating system and has the
-following recommended form:
+The first three components normally identify the architecture, operating
+system, and optional variant. They have the following recommended form:
\
-[<arch>_][<class>_]<os>[_<version>]
+<arch>-[<class>_]<os>[_<version>][-<variant>]
\
For example:
\
-windows
-windows_10
-windows_10.1607
-i686_windows_xp
-bsd_freebsd_10
-linux_centos_6.2
-linux_ubuntu_16.04
-macos_10.12
+x86_64-windows
+x86_64-windows_10
+x86_64-windows_10.1607
+x86_64-windows_10-devmode
+x86_64-bsd_freebsd_10
+x86_64-linux_ubuntu_16.04
+x86_64-linux_rhel_9.2-bindist
+aarch64-macos_10.12
\
-The second component normally identifies the installed compiler toolchain and
+The last component normally identifies the installed compiler toolchain and
has the following recommended form:
\
-<id>[<version>][<vendor>][<runtime>]
+<id>[_<version>][_<vendor>][_<runtime>]
\
For example:
@@ -140,34 +158,100 @@ gcc
gcc_6
gcc_6.3
gcc_6.3_mingw_w64
+clang_3.9
clang_3.9_libc++
-clang_3.9_libstdc++
msvc_14
-msvc_14u3
-icc
+msvc_14.3
+clang_15.0_msvc_msvc_17.6
+clang_16.0_llvm_msvc_17.6
\
Some examples of complete machine names:
\
-windows_10-msvc_14u3
-macos_10.12-clang_10.0
-linux_ubuntu_16.04-gcc_6.3
+x86_64-windows_10-msvc_14.3
+x86_64-macos_10.12-clang_10.0
+aarch64-linux_ubuntu_16.04-gcc_6.3
+aarch64-linux_rhel_9.2-bindist-gcc_11
\
-Similarly, the build configuration is encoded in a \i{configuration name}
-using the same format. As described in \l{#arch-controller Controller Logic},
-build configurations are generated from machine configurations. As a result,
-it usually makes sense to have the first component identify the operating
-systems and the second component \- the toolchain with the rest identifying a
-particular build configuration variant, for example, optimized, sanitized,
-etc. For example:
+\h2#arch-machine-config-build-target-config|Build Target Configuration|
+
+Similarly, the build target configuration is encoded in a \i{configuration
+name} using the same overall format. As described in \l{#arch-controller
+Controller Logic}, target configurations are generated from machine
+configurations. As a result, it usually makes sense to have the first
+component identify the operating systems and the second component \- the
+compiler toolchain with the rest identifying a particular target configuration
+variant, for example, optimized, sanitized, etc:
\
-windows-vc_14-O2
-linux-gcc_6-O3_asan
+[<class>_]<os>[_<version>]-<toolchain>[-<variant>]
\
+For example:
+
+\
+windows_10-msvc_17.6
+windows_10-msvc_17.6-O2
+windows_10-msvc_17.6-static_O2
+windows_10-msvc_17.6-relocatable
+windows_10-clang_16.0_llvm_msvc_17.6_lld
+linux_debian_12-clang_16_libc++-static_O3
+\
+
+Note that there is no \c{<arch>} component in a build target configuration:
+this information is best conveyed as part of \c{<target>} as described in
+\l{#arch-controller Controller Logic}.
+
+\h2#arch-machine-config-build-package-config|Build Package Configuration|
+
+A package can be built in multiple package configurations per target
+configuration. A build package configuration normally specifies the options
+and/or the package configuration variables that need to be used for the
+build. It may also include the information regarding the dependency packages
+which need to additionally be configured. The build package configurations
+originate from the package manifest \c{*-build-config}, \c{*-builds},
+\c{*-build-include}, and \c{*-build-exclude} values. See
+\l{bpkg#manifest-package Package Manifest} for more information on these
+values.
+
+
+\h2#arch-machine-config-auxiliary|Auxiliary Machines and Configurations|
+
+Besides the build machine and the build configuration that is derived from it,
+a package build may also involve one or more \i{auxiliary machines} and the
+corresponding \i{auxiliary configurations}.
+
+An auxiliary machine provides additional components that are required for
+building or testing a package and that are impossible or impractical to
+provide as part of the build machine itself. For example, a package may need
+access to a suitably configured database, such as PostgreSQL, in order to run
+its tests.
+
+The auxiliary machine name follows the same overall format as the build
+machine name except that the last component captures the information about the
+additional component in question rather than the compiler toolchain. For
+example:
+
+\
+x86_64-linux_debian_12-postgresql_16
+aarch64-linux_debian_12-mysql_8
+\
+
+The auxiliary configuration name is automatically derived from the machine
+name by removing the \c{<arch>} component. For example:
+
+\
+linux_debian_12-postgresql_16
+linux_debian_12-mysql_8
+\
+
+\N|Note that there is no generation of multiple auxiliary configurations from
+the same auxiliary machine since that would require some communication of the
+desired configuration variant to the machine.|
+
+
\h#arch-machine-header|Machine Header Manifest|
@@ TODO: need ref to general manifest overview in bpkg, or, better yet,
@@ -182,26 +266,41 @@ followed by the detailed description of each value in subsequent sections.
id: <machine-id>
name: <machine-name>
summary: <string>
+[role]: build|auxiliary
+[ram-minimum]: <kib>
+[ram-maximum]: <kib>
\
For example:
\
-id: windows_10-msvc_14-1.3
-name: windows_10-msvc_14
+id: x86_64-windows_10-msvc_14-1.3
+name: x86_64-windows_10-msvc_14
summary: Windows 10 build 1607 with VC 14 update 3
\
+\
+id: aarch64-linux_debian_12-postgresql_16-1.0
+name: aarch64-linux_debian_12-postgresql_16
+summary: Debian 12 with PostgreSQL 16 test user/database
+role: auxiliary
+ram-minimum: 2097152
+ram-maximum: 4194304
+\
+
\h2#arch-machine-header-id|\c{id}|
\
id: <machine-id>
\
-The uniquely machine version/revision/build identifies. For virtual machines
+The unique machine version/revision/build identifier. For virtual machines
this can be the disk image checksum. For a container this can be UUID that is
re-generated every time a container filesystem is altered.
+Note that we assume that a different machine identifier is assigned on any
+change that may affect the build result.
+
\h2#arch-machine-header-name|\c{name}|
@@ -221,11 +320,34 @@ summary: <string>
The one-line description of the machine.
+\h2#arch-machine-header-role|\c{role}|
+
+\
+[role]: build|auxiliary
+\
+
+The machine role. If unspecified, then \c{build} is assumed.
+
+
+\h2#arch-machine-header-ram|\c{ram-minimum}, \c{ram-maximum}|
+
+\
+[ram-minimum]: <kib>
+[ram-maximum]: <kib>
+\
+
+The minimum and the maximum amount of RAM in KiB that the machine requires.
+The maximum amount is interpreted as the amount beyond which there will be no
+benefit. If unspecified, then it is assumed the machine will run with any
+minimum amount a deployment will provide and will always benefit from more
+RAM, respectively. Neither value should be \c{0}.
+
+
\h#arch-machine|Machine Manifest|
The build machine manifest contains the complete description of a build
machine on the build host (see the Build OS documentation for their origin and
-location). The machine manifest starts with the machine manifest header with
+location). The machine manifest starts with the machine header manifest with
all the header values appearing before any non-header values. The non-header
part of manifest synopsis is presented next followed by the detailed
description of each value in subsequent sections.
@@ -304,7 +426,8 @@ changes: 1.2: increased disk size to 30GB
Or:
\
-changes:\
+changes:
+\\
1.1
- initial version
@@ -330,14 +453,24 @@ version: <package-version>
repository-url: <repository-url>
[repository-type]: pkg|git|dir
[trust]: <repository-fp>
-[test-exclude]: <package-name>/<package-version>
+[requires]: <package-requirements>
+[tests]: <dependency-package>
+[examples]: <dependency-package>
+[benchmarks]: <dependency-package>
+[dependency-checksum]: <checksum>
machine: <machine-name>
+[auxiliary-machine]: <machine-name>
+[auxiliary-machine-<name>]: <machine-name>
target: <target-triplet>
[environment]: <environment-name>
-[config]: <config-args>
+[auxiliary-environment]: <environment-vars>
+[target-config]: <tgt-config-args>
+[package-config]: <pkg-config-args>
+[host]: true|false
[warning-regex]: <warning-regex>
[interactive]: <breakpoint>
+[worker-checksum]: <checksum>
\
@@ -378,6 +511,7 @@ The repository type (see \c{repository-url} for details). Alternatively, the
repository type can be specified as part of the URL scheme. See
\l{bpkg-repository-types(1)} for details.
+
\h2#arch-task-trust|\c{trust}|
\
@@ -396,19 +530,27 @@ some agents may only trust their internally-specified fingerprints to prevent
the \"man in the middle\" attacks.
-\h2#arch-task-test-exclude|\c{test-exclude}|
+\h2#arch-task-requires-tests-examples-benchmarks|\c{requires, tests, examples, benchmarks}|
+
+The primary package manifest values that need to be known by the \c{bbot}
+worker before it retrieves the primary package manifest. See
+\l{bpkg#manifest-package Package Manifest} for more information on these
+values.
+
+The controller copies these values from the primary package manifest, except
+those \c{tests}, \c{examples}, and \c{benchmarks} values which should be
+excluded from building due to their \c{builds}, \c{build-include}, and
+\c{build-exclude} manifest values.
+
+
+\h2#arch-task-dependency-checksum|\c{dependency-checksum}|
\
-[test-exclude]: <package-name>/<package-version>
+[dependency-checksum]: <checksum>
\
-The separate test, example, or benchmark package to exclude from building
-together with the primary package. This value may be specified multiple times.
-
-The controller adds this value for packages specified via the \c{tests},
-\c{examples}, and \c{benchmarks} primary package manifest values which should
-be excluded from building due to their \c{builds}, \c{build-include}, and
-\c{build-exclude} manifest values.
+The package dependency checksum received as a part of the previous build task
+result (see \l{#arch-result Result Manifest}).
\h2#arch-task-machine|\c{machine}|
@@ -420,6 +562,21 @@ machine: <machine-name>
The name of the build machine to use.
+\h2#arch-task-auxiliary-machine|\c{auxiliary-machine}|
+
+\
+[auxiliary-machine]: <machine-name>
+[auxiliary-machine-<name>]: <machine-name>
+\
+
+The names of the auxiliary machines to use. These values correspond to the
+\c{build-auxiliary} and \c{build-auxiliary-<name>} values in the package
+manifest. While there each value specifies an auxiliary configuration pattern,
+here it specifies the concrete auxiliary machine name that was picked by the
+controller from the list of available auxiliary machines (sent as part of the
+task request) that match this pattern.
+
+
\h2#arch-task-target|\c{target}|
\
@@ -445,30 +602,113 @@ The name of the build environment to use. See \l{#arch-worker Worker Logic}
for details.
-\h2#arch-task-config|\c{config}|
+\h2#arch-task-auxiliary-environment|\c{auxiliary-environment}|
+
+\
+[auxiliary-environment]: <environment-vars>
+\
+
+The environment variables describing the auxiliary machines. If any
+\c{auxiliary-machine*} values are specified, then after starting such
+machines, the agent prepares a combined list of environment variables that
+were uploaded by such machines and passes it in this value to the worker.
+
+The format of this value is a list of environment variable assignments
+one per line, in the form:
+
+\
+<name>=<value>
+\
+
+Whitespaces before \c{<name>}, around \c{=}, and after \c{<value>} as well as
+blank lines and lines that start with \c{#} are ignored. The \c{<name>} part
+must only contain capital alphabetic, numeric, and \c{_} characters. The
+\c{<value>} part as a whole can be single ('\ ') or double (\"\ \")
+quoted. For example:
+
+\
+DATABASE_HOST=192.168.0.1
+DATABASE_PORT=1245
+DATABASE_USER='John \"Johnny\" Doe'
+DATABASE_NAME=\" test database \"
+\
+
+If the corresponding machine is specified as \c{auxiliary-machine-<name>},
+then its environment variables are prefixed with capitalized \c{<name>_}. For
+example:
+
+\
+auxiliary-machine-pgsql: x86_64-linux_debian_12-postgresql_16
+auxiliary-environment:
+\\
+PGSQL_DATABASE_HOST=192.168.0.1
+PGSQL_DATABASE_PORT=1245
+...
+\\
+\
+
+
+\h2#arch-task-target-config|\c{target-config}|
+
+\
+[target-config]: <tgt-config-args>
+\
+
+The additional target configuration options and variables. A single level of
+quotes (either single or double) is removed in each value before being passed
+to \c{bpkg}. For example, the following value:
+
+\
+target-config: config.cc.coptions=\"-O3 -stdlib='libc++'\"
+\
+
+Will be passed to \c{bpkg} as the following (single) argument:
\
-[config]: <config-args>
+config.cc.coptions=-O3 -stdlib='libc++'
\
-The additional configuration options and variables. A single level of quotes
+Values can be separated with spaces or newlines. See \l{#arch-controller
+Controller Logic} for details.
+
+
+\h2#arch-task-package-config|\c{package-config}|
+
+\
+[package-config]: <pkg-config-args>
+\
+
+The primary package manifest \c{*-build-config} value for the build
+configuration the build task is issued for. See \l{bpkg#manifest-package
+Package Manifest} for more information on this value. A single level of quotes
(either single or double) is removed in each value before being passed to
\c{bpkg}. For example, the following value:
\
-config: config.cc.coptions=\"-O3 -stdlib='libc++'\"
+package-config: \"?libcurl ~7.76.0\"
\
Will be passed to \c{bpkg} as the following (single) argument:
\
-config.cc.coptions=-O3 -stdlib='libc++'
+?libcurl ~7.76.0
\
Values can be separated with spaces or newlines. See \l{#arch-controller
Controller Logic} for details.
+\h2#arch-task-host|\c{host}|
+
+\
+[host]: true|false
+\
+
+If \c{true}, then the build target configuration is self-hosted. If not
+specified, \c{false} is assumed. See \l{#arch-controller Controller Logic} for
+details.
+
+
\h2#arch-task-warning-regex|\c{warning-regex}|
\
@@ -516,8 +756,19 @@ specified \c{interactive-mode} with either the \c{true} or \c{both} value in
the task request.
The breakpoint can either be a primary step id of the worker script or the
-special \c{error} or \c{warning} value. See \l{#arch-worker Worker Logic} for
-details.
+special \c{error} or \c{warning} value. There is also the special \c{none}
+value which never interrupts the task execution. See \l{#arch-worker Worker
+Logic} for details.
+
+
+\h2#arch-task-worker-checksum|\c{worker-checksum}|
+
+\
+[worker-checksum]: <checksum>
+\
+
+The worker checksum received as a part of the previous build task result (see
+\l{#arch-result Result Manifest}).
\h#arch-result|Result Manifest|
@@ -535,15 +786,26 @@ status: <status>
[update-status]: <status>
[test-status]: <status>
[install-status]: <status>
+[bindist-status]: <status>
+[sys-install-status]: <status>
[test-installed-status]: <status>
+[sys-uninstall-status]: <status>
[uninstall-status]: <status>
+[upload-status]: <status>
[configure-log]: <text>
[update-log]: <text>
[test-log]: <text>
[install-log]: <text>
+[bindist-log]: <text>
+[sys-install-log]: <text>
[test-installed-log]: <text>
+[sys-uninstall-log]: <text>
[uninstall-log]: <text>
+[upload-log]: <text>
+
+[worker-checksum]: <checksum>
+[dependency-checksum]: <checksum>
\
@@ -574,11 +836,13 @@ status: <status>
The overall (cumulative) build result status. Valid values are:
\
+skip # Package update and subsequent operations were skipped.
success # All operations completed successfully.
warning # One or more operations completed with warnings.
error # One or more operations completed with errors.
abort # One or more operations were aborted.
abnormal # One or more operations terminated abnormally.
+interrupt # Task execution has been interrupted.
\
The \c{abort} status indicates that the operation has been aborted by
@@ -591,9 +855,18 @@ include 'log:' with commands that start VM, for completeness?).
The \c{abnormal} status indicates that the operation has terminated
abnormally, for example, due to the package manager or build system crash.
+The \c{interrupt} status indicates that the task execution has been
+interrupted, for example, to reassign resources to a higher priority task.
+
Note that the overall \c{status} value should appear before any per-operation
\c{*-status} values.
+The \c{skip} status indicates that the received from the controller build task
+checksums have not changed and the task execution has therefore been skipped
+under the assumption that it would have produced the same result. See
+\c{agent-checksum}, \c{worker-checksum}, and \c{dependency-checksum} for
+details.
+
\h2#arch-result-x-status|\c{*-status}|
@@ -611,8 +884,12 @@ configure
update
test
install
+bindist
+sys-install
test-installed
+sys-uninstall
uninstall
+upload
\
@@ -628,13 +905,38 @@ the list of supported operation names refer to the \c{*-status} value
description.
+\h2#arch-result-dependency-checksum|\c{dependency-checksum}|
+
+\
+[dependency-checksum]: <checksum>
+\
+
+The package dependency checksum obtained as a byproduct of the package
+configuration operation. See \l{bpkg-pkg-build(1)} command's
+\c{--rebuild-checksum} option for details.
+
+
+\h2#arch-result-worker-checksum|\c{worker-checksum}|
+
+\
+[worker-checksum]: <checksum>
+\
+
+The version of the worker logic used to perform the package build task.
+
+
\h#arch-task-req|Task Request Manifest|
An agent (or controller acting as an agent) sends a task request to its
controller via HTTP/HTTPS POST method (@@ URL/API endpoint). The task request
-starts with the task request manifest followed by a list of machine manifests.
-The task request manifest synopsis is presented next followed by the detailed
-description of each value in subsequent sections.
+starts with the task request manifest followed by a list of machine header
+manifests. The task request manifest synopsis is presented next followed by
+the detailed description of each value in subsequent sections.
+
+\N|The controller is expected to pick each offered machine header manifest
+only once. If an agent is capable of running multiple instances of the same
+machine, then it must send the matching number of machine header manifests for
+such a machine.|
\
agent: <name>
@@ -643,6 +945,7 @@ toolchain-version: <standard-version>
[interactive-mode]: false|true|both
[interactive-login]: <login>
[fingerprint]: <agent-fingerprint>
+[auxiliary-ram]: <kib>
\
@@ -709,6 +1012,18 @@ authentication in which case it should respond with the 401 (unauthorized)
HTTP status code.
+\h2#arch-task-req-auxiliary-ram|\c{auxiliary-ram}|
+
+\
+[auxiliary-ram]: <kib>
+\
+
+The amount of RAM in KiB that is available for running auxiliary machines. If
+unspecified, then assume there is no hard limit (that is, the agent can
+allocate up to the host's available RAM minus the amount required to run the
+build machine).
+
+
\h#arch-task-res|Task Response Manifest|
A controller sends the task response manifest in response to the task request
@@ -722,6 +1037,8 @@ subsequent sections.
session: <id>
[challenge]: <text>
[result-url]: <url>
+[*-upload-url]: <url>
+[agent-checksum]: <checksum>
\
@@ -759,6 +1076,28 @@ private key and then \c{base64}-encoding the result.
The URL to POST (upload) the result request to.
+\h2#arch-task-res-upload-url|\c{*-upload-url}|
+
+\
+[*-upload-url]: <url>
+\
+
+The URLs to upload the build artifacts to, if any, via the HTTP \c{POST}
+method using the \c{multipart/form-data} content type (see \l{brep#upload
+Build Artifacts Upload} for details on the upload protocol). The substring
+matched by \c{*} in \c{*-upload-url} denotes the upload type.
+
+
+\h2#arch-task-res-agent-checksum|\c{agent-checksum}|
+
+\
+[agent-checksum]: <checksum>
+\
+
+The agent checksum received as a part of the previous build task result
+request (see \l{#arch-result-req Result Request Manifest}).
+
+
\h#arch-result-req|Result Request Manifest|
On completion of a task an agent (or controller acting as an agent) sends the
@@ -772,6 +1111,7 @@ description of each value in subsequent sections.
\
session: <id>
[challenge]: <text>
+[agent-checksum]: <checksum>
\
@@ -795,6 +1135,15 @@ response. It must be present only if the \c{challenge} value was present in
the task response.
+\h2#arch-result-req-agent-checksum|\c{agent-checksum}|
+
+\
+[agent-checksum]: <checksum>
+\
+
+The version of the agent logic used to perform the package build task.
+
+
\h#arch-worker|Worker Logic|
The \c{bbot} worker builds each package in a \i{build environment} that is
@@ -802,20 +1151,24 @@ established for a particular build target. The environment has three
components: the execution environment (environment variables, etc), build
system modules, as well as configuration options and variables.
-Setting up of the environment is performed by an executable (script, batch
-file, etc). Specifically, upon receiving a build task, if it specifies the
-environment name then the worker looks for the environment setup executable
-with this name in a specific directory and for the executable called
-\c{default} otherwise. Not being able to locate the environment executable is
-an error.
-
-Once the environment setup executable is determined, the worker re-executes
-itself as that executable passing to it as command line arguments the target
-name, the path to the \c{bbot} worker to be executed once the environment is
-setup, and any additional options that need to be propagated to the re-executed
-worker. The environment setup executable is executed in the build directory as
-its current working directory. The build directory contains the build task
-\c{task.manifest} file.
+Setting up of the execution environment is performed by an executable (script,
+batch file, etc). Specifically, upon receiving a build task, if it specifies
+the environment name then the worker looks for the environment setup
+executable with this name in a specific directory and for the executable
+called \c{default} otherwise. Not being able to locate the environment
+executable is an error.
+
+In addition to the environment executable, if the task requires any auxiliary
+machines, then the \c{auxiliary-environment} value from the task manifest is
+incorporated into the execution environment.
+
+Specifically, once the environment setup executable is determined, the worker
+re-executes itself in the auxiliary environment and as that executable passing
+to it as command line arguments the target name, the path to the \c{bbot}
+worker to be executed once the environment is setup, and any additional
+options that need to be propagated to the re-executed worker. The environment
+setup executable is executed in the build directory as its current working
+directory. The build directory contains the build task \c{task.manifest} file.
The environment setup executable sets up the necessary execution environment
for example by adjusting \c{PATH} or running a suitable \c{vcvars} batch file.
@@ -828,16 +1181,56 @@ modules (\c{<env-modules>}) and the list of configuration options and variables
The re-executed \c{bbot} worker then proceeds to test the package from the
repository by executing the following commands, collectively called a
\i{worker script}. Each command has a unique \i{step id} that can be used as a
-breakpoint as well as a prefix in the \c{<config-args>},
+breakpoint and normally as a prefix in the \c{<tgt-config-args>},
\c{<env-config-args>}, and \c{<env-modules>} values as discussed in
-\l{#arch-controller Controller Logic}. Some step ids have fallback step ids
-(listed in parenthesis) which are used in the absence of the primary step id
-values. The \c{<>}-values are from the task manifest and the environment.
-
-\
-# bpkg.create
+\l{#arch-controller Controller Logic} as well as in the \c{<pkg-config-args>}
+values (see below). The \c{<>}-values are from the task manifest and the
+environment though some are assigned by the worker during the script execution
+(configuration directories, UUIDs, etc). In particular, the
+\c{<pkg-config-args>} (prefixed global options and variables),
+\c{<pkg-config-opts>} (unprefixed options), \c{<pkg-config-vars>} (unprefixed
+variables), \c{<dependency-name>}, \c{<dependency-version-constraint>}, and
+\c{<dep-config-vars>} values result from parsing the
+\l{#arch-task-package-config \c{package-config}} task manifest value.
+
+Some prefix step ids have fallback step ids which are used in the absence of
+the primary step id values. If the prefix step id differs from the breakpoint
+step id and/or has the fallback step ids, then they are listed in parenthesis:
+the prefix id before the colon and the fallback ids after it.
+
+Some commands have no target configuration or environment options or
+variables. Such commands have only breakpoint step ids associated, which are
+listed in square brackets.
+
+Note that the worker script varies for different primary package types. The
+\c{bbot} worker classifies the primary package based on the configuration type
+in which it is built: \c{module} (build system module packages), \c{host}
+(packages such as source code generators, marked with the \c{requires: host}
+manifest value; see \l{bpkg#manifest-package Package Manifest} for details),
+and \c{target} (all other packages).
+
+Note also that the \c{*.configure.build} step configures potentially multiple
+packages (primary package, tests, etc) in potentially multiple configurations
+by always using the \c{bpkg.global.configure.build} prefix step id for global
+(as opposed to package-specific) \l{bpkg-pkg-build(1)} options. The
+\c{bpkg.global.configure.build} prefix id has no fallback ids.
+
+Note finally that if no configuration variables are specified in the main
+package configuration, then the worker adds the
+\c{config.<name>.develop=false} configuration variable for the main package at
+the \c{bpkg.configure.build} step to trigger its package skeleton creation and
+loading. It also adds this variable for external test packages at this step
+and for the same purpose. This makes sure that these packages can be used as
+dependencies of dependents with configuration clauses. To keep the below
+listings concise, these variables are not shown.
+
+Worker script for \c{target} packages:
+
+\
+# bpkg.create (bpkg.target.create : b.create, bpkg.create)
#
-bpkg -V create <env-modules> <env-config-args> <config-args>
+bpkg -V create <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
# bpkg.configure.add
#
@@ -847,9 +1240,20 @@ bpkg -v add <repository-url>
#
bpkg -v fetch --trust <repository-fp>
-# bpkg.configure.build
+# bpkg.configure.build (
+# bpkg.global.configure.build,
+# (bpkg.target.configure.build : b.configure, bpkg.configure.build))
#
-bpkg -v build --yes --configure-only <package-name>/<package-version>
+bpkg -v build --configure-only \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<pkg-config-opts>] \\
+ [{ <pkg-config-vars> }+] <package-name>/<package-version> \\
+ [([{ <test-config-vars> }+] \\
+ <test-package-name>[ <test-version-constraint>])...] \\
+ [([{ <dep-config-vars> }+] \\
+ (?|sys:)<dependency-name> \\
+ [<dependency-version-constraint>])...] \\
+ [?sys:<dependency-name>[ <dependency-version-constraint>]...]
# bpkg.update
#
@@ -863,138 +1267,1010 @@ bpkg -v update <package-name>
bpkg -v test <package-name>
}
-# For each package referred to by the tests, examples, or benchmarks
-# package manifest values and not excluded by the test-exclude task
-# manifest values:
+# For each (runtime) tests, examples, or benchmarks package referred
+# to by the task manifest:
#
{
- # bpkg.test-separate.configure.build (bpkg.configure.build)
- #
- bpkg -v build --yes --configure-only \\
- '<package-name> [<version-constraint>]'
-
- # bpkg.test-separate.update (bpkg.update)
+ # bpkg.test-separate.update ( : bpkg.update)
#
bpkg -v update <package-name>
- # bpkg.test-separate.test (bpkg.test)
+ # bpkg.test-separate.test ( : bpkg.test)
#
bpkg -v test <package-name>
}
-# If config.install.root is specified:
+# If the install operation is supported by the package,
+# config.install.root is specified, and no
+# bpkg.bindist.{debian,fedora,archive} step is enabled:
#
{
# bpkg.install
#
bpkg -v install <package-name>
+ # If bbot.install.ldconfig step is enabled:
+ #
+ {
+ # bbot.install.ldconfig
+ #
+ sudo ldconfig
+ }
+}
+
+# If the install operation is supported by the package and
+# bpkg.bindist.{debian,fedora,archive} step is enabled:
+#
+{
+ # bpkg.bindist.{debian,fedora,archive}
+ #
+ bpkg -v bindist --distribution <distribution> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ <package-name>
+}
+
+# If the install operation is supported by the package and
+# bbot.sys-install step is enabled:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-install.apt-get.update
+ #
+ sudo apt-get update
+
+ # bbot.sys-install.apt-get.install
+ #
+ sudo apt-get install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-install.dnf.install
+ #
+ sudo dnf install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # For each package file:
+ #
+ {
+ # bbot.sys-install.tar.extract
+ #
+ [sudo] tar -xf <distribution-package-file> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+ }
+
+ # If bbot.sys-install.ldconfig step is enabled:
+ #
+ {
+ # bbot.sys-install.ldconfig
+ #
+ sudo ldconfig
+ }
+ }
+}
+
+# If the main package is installed either from source or from the
+# binary distribution package:
+#
+{
# If the package contains subprojects that support the test
# operation:
#
{
- # b.test-installed.create
+ # b.test-installed.create ( : b.create)
#
- b -V create <env-modules> <env-config-args> <config-args>
+ b -V create <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
- # b.test-installed.configure
+ # For each test subproject:
#
- b -v configure
+ {
+ # b.test-installed.configure ( : b.configure)
+ #
+ b -v configure [<pkg-config-vars>]
+ }
# b.test-installed.test
#
b -v test
}
- # If any of the tests, examples, or benchmarks package manifest
- # values are specified and are not all excluded by the test-exclude
- # task manifest values:
+ # If task manifest refers to any (runtime) tests, examples, or
+ # benchmarks packages:
#
{
- # bpkg.test-installed.create (bpkg.create)
+ # bpkg.test-separate-installed.create (
+ # bpkg.test-separate-installed.create_for_target :
+ # bpkg.test-separate-installed.create)
#
- bpkg -V create <env-modules> <env-config-args> <config-args>
+ bpkg -V create <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
- # bpkg.test-installed.configure.add (bpkg.configure.add)
+ # bpkg.test-separate-installed.configure.add (
+ # : bpkg.configure.add)
#
bpkg -v add <repository-url>
- # bpkg.test-installed.configure.fetch (bpkg.configure.fetch)
+ # bpkg.test-separate-installed.configure.fetch (
+ # : bpkg.configure.fetch)
#
bpkg -v fetch --trust <repository-fp>
- # For each package referred to by the tests, examples, or
- # benchmarks package manifest values and not excluded by the
- # test-exclude task manifest values:
+ # bpkg.test-separate-installed.configure.build (
+ # bpkg.global.configure.build,
+ # (bpkg.test-separate-installed.configure.build_for_target :
+ # bpkg.test-separate-installed.configure.build))
+ #
+ bpkg -v build --configure-only \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ ([{ <test-config-vars> }+] \\
+ <test-package-name>[ <test-version-constraint>])... \\
+ ?sys:<package-name>/<package-version> \\
+ [?sys:<dependency-name>[ <dependency-version-constraint>]...]
+
+ # For each (runtime) tests, examples, or benchmarks package
+ # referred to by the task manifest:
#
{
- # bpkg.test-separate-installed.configure.build (
- # bpkg.configure.build)
- #
- bpkg -v build --yes --configure-only \\
- '<package-name> [<version-constraint>]'
-
- # bpkg.test-separate-installed.update (bpkg.update)
+ # bpkg.test-separate-installed.update ( : bpkg.update)
#
bpkg -v update <package-name>
- # bpkg.test-separate-installed.test (bpkg.test)
+ # bpkg.test-separate-installed.test ( : bpkg.test)
#
bpkg -v test <package-name>
}
}
+}
+
+# If the main package is installed from the binary distribution package:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-uninstall.apt-get.remove
+ #
+ sudo apt-get remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-uninstall.dnf.remove
+ #
+ sudo dnf remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # Noop.
+ }
+}
+# If the main package is installed from source:
+#
+{
# bpkg.uninstall
#
bpkg -v uninstall <package-name>
}
+# If the install operation is supported by the package and
+# bbot.bindist.upload step is enabled:
+#
+{
+ # Move the generated binary distribution files to the
+ # upload/bindist/<distribution>/ directory.
+}
+
+# If bbot.upload step is enabled and upload/ directory is not empty:
+#
+{
+ # bbot.upload.tar.create
+ #
+ tar -cf upload.tar upload/
+
+ # bbot.upload.tar.list
+ #
+ tar -tf upload.tar upload/
+}
+
# end
#
# This step id can only be used as a breakpoint.
\
-For details on configuring and testing installation refer to
-\l{#arch-controller Controller Logic}.
+Worker script for \c{host} packages:
-If the package is a build system module, then it is built and tested (using
-the bundled tests) in a separate configuration that mimics the one used to
-build \c{build2} itself. Note that the configuration and environment options
-and variables are not passed to commands that may affect this configuration.
-Such commands, therefore, have associated step ids that can only be used
-as breakpoints (listed in square brackets):
+\
+# If configuration is self-hosted:
+#
+{
+ # bpkg.create (bpkg.host.create : b.create, bpkg.create)
+ #
+ bpkg -V create --type host -d <host-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+}
+#
+# Otherwise:
+#
+{
+ # [bpkg.create]
+ #
+ b -V create(<host-conf>, cc) config.config.load=~host
+ bpkg -v create --existing --type host -d <host-conf>
+}
+
+# bpkg.configure.add
+#
+bpkg -v add -d <host-conf> <repository-url>
+
+# bpkg.configure.fetch
+#
+bpkg -v fetch -d <host-conf> --trust <repository-fp>
+
+# If configuration is self-hosted and config.install.root is specified:
+#
+{
+ # bpkg.create (bpkg.target.create : b.create, bpkg.create)
+ #
+ bpkg -V create -d <install-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # [bpkg.link]
+ #
+ bpkg -v link -d <install-conf> <host-conf>
+
+ # bpkg.configure.add
+ #
+ bpkg -v add -d <install-conf> <repository-url>
+
+ # bpkg.configure.fetch
+ #
+ bpkg -v fetch -d <install-conf> --trust <repository-fp>
+}
+
+# If task manifest refers to any build-time tests, examples, or
+# benchmarks packages:
+#
+{
+ # bpkg.create (bpkg.target.create : b.create, bpkg.create)
+ #
+ bpkg -V create -d <target-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # [bpkg.create]
+ #
+ b -V create(<module-conf>, cc) config.config.load=~build2
+ bpkg -v create --existing --type build2 -d <module-conf>
+
+ # [bpkg.link]
+ #
+ bpkg -v link -d <target-conf> <host-conf>
+ bpkg -v link -d <target-conf> <module-conf>
+ bpkg -v link -d <host-conf> <module-conf>
+
+ # If configuration is self-hosted and config.install.root is
+ # specified:
+ #
+ {
+ # [bpkg.link]
+ #
+ bpkg -v link -d <install-conf> <module-conf>
+ }
+
+ # bpkg.configure.add
+ #
+ bpkg -v add -d <target-conf> <repository-url>
+
+ # bpkg.configure.fetch
+ #
+ bpkg -v fetch -d <target-conf> --trust <repository-fp>
+}
+
+# bpkg.configure.build (bpkg.global.configure.build)
+#
+# Notes:
+#
+# - Some parts may be omitted.
+#
+# - Parts related to different configurations have different prefix
+# step ids:
+#
+# bpkg.host.configure.build for <host-uuid>
+# bpkg.target.configure.build for <install-uuid>
+# bpkg.target.configure.build for <target-uuid>
+#
+# - All parts have the same fallback step ids: b.configure and
+# bpkg.configure.build.
+#
+bpkg -v build --configure-only \\
+<env-config-args> <tgt-config-args> <pkg-config-args> \\
+[<pkg-config-opts>] \\
+\\
+{ --config-uuid <host-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<pkg-config-vars>] }+ \\
+<package-name>/<package-version> \\
+\\
+{ --config-uuid <install-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<pkg-config-vars>] }+ \\
+<package-name>/<package-version> \\
+\\
+({ --config-uuid <host-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<test-config-vars>] }+ \\
+ <runtime-test-package-name>[ <test-version-constraint>])... \\
+\\
+({ --config-uuid <target-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<test-config-vars>] }+ \\
+ <buildtime-test-package-name>[ <test-version-constraint>])... \\
+\\
+({ --config-uuid <host-uuid> [--config-uuid <install-uuid>] \\
+ [<dep-config-vars>] }+ \\
+ (?|sys:)<dependency-name>[ <dependency-version-constraint>])... \\
+\\
+[?sys:<dependency-name>[ <dependency-version-constraint>]...]
+
+# bpkg.update
+#
+bpkg -v update -d <host-conf> <package-name>
+
+# If the test operation is supported by the package:
+#
+{
+ # bpkg.test
+ #
+ bpkg -v test -d <host-conf> <package-name>
+}
+
+# If configuration is self-hosted, then for each runtime tests,
+# examples, or benchmarks package referred to by the task manifest:
+#
+{
+ # bpkg.test-separate.update ( : bpkg.update)
+ #
+ bpkg -v update -d <host-conf> <package-name>
+
+ # bpkg.test-separate.test ( : bpkg.test)
+ #
+ bpkg -v test -d <host-conf> <package-name>
+}
+
+# For each build-time tests, examples, or benchmarks package referred
+# to by the task manifest:
+#
+{
+ # bpkg.test-separate.update ( : bpkg.update)
+ #
+ bpkg -v update -d <target-conf> <package-name>
+
+ # bpkg.test-separate.test ( : bpkg.test)
+ #
+ bpkg -v test -d <target-conf> <package-name>
+}
+
+# If configuration is self-hosted, the install operation is supported
+# by the package, config.install.root is specified, and no
+# bpkg.bindist.{debian,fedora,archive} step is enabled:
+#
+{
+ # bpkg.install
+ #
+ bpkg -v install -d <install-conf> <package-name>
+
+ # If bbot.install.ldconfig step is enabled:
+ #
+ {
+ # bbot.install.ldconfig
+ #
+ sudo ldconfig
+ }
+}
+
+# If configuration is self-hosted, the install operation is supported
+# by the package, and bpkg.bindist.{debian,fedora,archive} step is
+# enabled:
+#
+{
+ # bpkg.bindist.{debian,fedora,archive}
+ #
+ bpkg -v bindist --distribution <distribution> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ <package-name>
+}
+
+# If the install operation is supported by the package and
+# bbot.sys-install step is enabled:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-install.apt-get.update
+ #
+ sudo apt-get update
+
+ # bbot.sys-install.apt-get.install
+ #
+ sudo apt-get install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-install.dnf.install
+ #
+ sudo dnf install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # For each package file:
+ #
+ {
+ # bbot.sys-install.tar.extract
+ #
+ [sudo] tar -xf <distribution-package-file> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+ }
+
+ # If bbot.sys-install.ldconfig step is enabled:
+ #
+ {
+ # bbot.sys-install.ldconfig
+ #
+ sudo ldconfig
+ }
+ }
+}
+
+# If the main package is installed either from source or from the
+# binary distribution package:
+#
+{
+ # If the package contains subprojects that support the test
+ # operation:
+ #
+ {
+ # b.test-installed.create ( : b.create)
+ #
+ b -V create <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # For each test subproject:
+ #
+ {
+ # b.test-installed.configure ( : b.configure)
+ #
+ b -v configure [<pkg-config-vars>]
+ }
+
+ # b.test-installed.test
+ #
+ b -v test
+ }
+
+ # If task manifest refers to any tests, examples, or benchmarks
+ # packages:
+ #
+ {
+ # bpkg.test-separate-installed.create (
+ # bpkg.test-separate-installed.create_for_host :
+ # bpkg.test-separate-installed.create)
+ #
+ bpkg -V create --type host -d <host-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # If task manifest refers to any runtime tests, examples, or
+ # benchmarks packages:
+ #
+ {
+ # bpkg.test-separate-installed.configure.add (
+ # : bpkg.configure.add)
+ #
+ bpkg -v add -d <host-conf> <repository-url>
+
+ # bpkg.test-separate-installed.configure.fetch (
+ # : bpkg.configure.fetch)
+ #
+ bpkg -v fetch -d <host-conf> --trust <repository-fp>
+ }
+
+ # If task manifest refers to any build-time tests, examples, or
+ # benchmarks packages:
+ #
+ {
+ # bpkg.test-separate-installed.create (
+ # bpkg.test-separate-installed.create_for_host :
+ # bpkg.test-separate-installed.create)
+ #
+ bpkg -V create -d <target-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # [bpkg.test-separate-installed.create]
+ #
+ b -V create(<module-conf>, cc) config.config.load=~build2
+ bpkg -v create --existing --type build2 -d <module-conf>
+
+ # [bpkg.test-separate-installed.link]
+ #
+ bpkg -v link -d <target-conf> <host-conf>
+ bpkg -v link -d <target-conf> <module-conf>
+ bpkg -v link -d <host-conf> <module-conf>
+
+ # bpkg.test-separate-installed.configure.add (
+ # : bpkg.configure.add)
+ #
+ bpkg -v add -d <target-conf> <repository-url>
+
+ # bpkg.test-separate-installed.configure.fetch (
+ # : bpkg.configure.fetch)
+ #
+ bpkg -v fetch -d <target-conf> --trust <repository-fp>
+ }
+
+ # bpkg.test-separate-installed.configure.build (
+ # bpkg.global.configure.build,
+ # (bpkg.test-separate-installed.configure.build_for_host :
+ # bpkg.test-separate-installed.configure.build))
+ #
+ # Note that any of the runtime or build-time tests related parts
+ # (but not both) may be omitted.
+ #
+ bpkg -v build --configure-only \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ \\
+ ({ --config-name <host-conf> [<test-config-vars>] }+ \\
+ <runtime-test-package-name>[ <test-version-constraint>])... \\
+ \\
+ ({ --config-name <target-conf> [<test-config-vars>] }+ \\
+ <buildtime-test-package-name>[ <test-version-constraint>])... \\
+ \\
+ ?sys:<package-name>/<package-version> \\
+ \\
+ [?sys:<dependency-name>[ <dependency-version-constraint>]...]
+
+ # For each tests, examples, or benchmarks package referred
+ # to by the task manifest:
+ #
+ {
+ # bpkg.test-separate-installed.update ( : bpkg.update)
+ #
+ bpkg -v update <package-name>
+
+ # bpkg.test-separate-installed.test ( : bpkg.test)
+ #
+ bpkg -v test <package-name>
+ }
+ }
+}
+
+# If the main package is installed from the binary distribution package:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-uninstall.apt-get.remove
+ #
+ sudo apt-get remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-uninstall.dnf.remove
+ #
+ sudo dnf remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # Noop.
+ }
+}
+
+# If the main package is installed from source:
+#
+{
+ # bpkg.uninstall
+ #
+ bpkg -v uninstall -d <install-conf> <package-name>
+}
+
+# If the install operation is supported by the package and
+# bbot.bindist.upload step is enabled:
+#
+{
+ # Move the generated binary distribution files to the
+ # upload/bindist/<distribution>/ directory.
+}
+
+# If bbot.upload step is enabled and upload/ directory is not empty:
+#
+{
+ # bbot.upload.tar.create
+ #
+ tar -cf upload.tar upload/
+
+ # bbot.upload.tar.list
+ #
+ tar -tf upload.tar upload/
+}
+
+# end
+#
+# This step id can only be used as a breakpoint.
+\
+
+Worker script for \c{module} packages:
\
-# [bpkg.module.create]
+# If configuration is self-hosted:
+#
+{
+ # bpkg.create (bpkg.module.create)
+ #
+ b -V create(<module-conf>, <env-modules>) config.config.load=~build2 \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+ bpkg -v create --existing --type build2 -d <module-conf>
+}
+#
+# Otherwise:
#
-b -V create config.config.load=~build2
-bpkg -v create --existing
+{
+ # [bpkg.create]
+ #
+ b -V create(<module-conf>, cc) config.config.load=~build2
+ bpkg -v create --existing --type build2 -d <module-conf>
+}
-# bpkg.module.configure.add (bpkg.configure.add)
+# bpkg.configure.add
#
-bpkg -v add <repository-url>
+bpkg -v add -d <module-conf> <repository-url>
-# bpkg.module.configure.fetch (bpkg.configure.fetch)
+# bpkg.configure.fetch
#
-bpkg -v fetch --trust <repository-fp>
+bpkg -v fetch -d <module-conf> --trust <repository-fp>
+
+# If configuration is self-hosted and config.install.root is specified:
+#
+{
+ # bpkg.create (bpkg.module.create)
+ #
+ b -V create(<install-conf>, <env-modules>) \\
+ config.config.load=~build2 \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+ bpkg -v create --existing --type build2 -d <install-conf>
+
+ # bpkg.configure.add
+ #
+ bpkg -v add -d <install-conf> <repository-url>
-# [bpkg.module.configure.build]
+ # bpkg.configure.fetch
+ #
+ bpkg -v fetch -d <install-conf> --trust <repository-fp>
+}
+
+# If task manifest refers to any (build-time) tests, examples, or
+# benchmarks packages:
#
-bpkg -v build --yes --configure-only <package-name>/<package-version>
+{
+ # bpkg.create (bpkg.target.create : b.create, bpkg.create)
+ #
+ bpkg -V create -d <target-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
-# [bpkg.module.update]
+ # [bpkg.create]
+ #
+ b -V create(<host-conf>, cc) config.config.load=~host
+ bpkg -v create --existing --type host -d <host-conf>
+
+ # [bpkg.link]
+ #
+ bpkg -v link -d <target-conf> <host-conf>
+ bpkg -v link -d <target-conf> <module-conf>
+ bpkg -v link -d <host-conf> <module-conf>
+
+ # bpkg.configure.add
+ #
+ bpkg -v add -d <target-conf> <repository-url>
+
+ # bpkg.configure.fetch
+ #
+ bpkg -v fetch -d <target-conf> --trust <repository-fp>
+}
+
+# bpkg.configure.build (bpkg.global.configure.build)
#
-bpkg -v update <package-name>
+# Notes:
+#
+# - Some parts may be omitted.
+#
+# - Parts related to different configurations have different prefix
+# step ids:
+#
+# bpkg.module.configure.build for <module-uuid>
+# bpkg.target.configure.build for <install-uuid>
+# bpkg.target.configure.build for <target-uuid>
+#
+# - All parts have the same fallback step ids: b.configure and
+# bpkg.configure.build.
+#
+bpkg -v build --configure-only \\
+<env-config-args> <tgt-config-args> <pkg-config-args> \\
+[<pkg-config-opts>] \\
+\\
+{ --config-uuid <module-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<pkg-config-vars>] }+ \\
+<package-name>/<package-version> \\
+\\
+{ --config-uuid <install-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<pkg-config-vars>] }+ \\
+<package-name>/<package-version> \\
+\\
+({ --config-uuid <target-uuid> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ [<test-config-vars>] }+ \\
+ <buildtime-test-package-name>[ <test-version-constraint>])... \\
+\\
+({ --config-uuid <host-uuid> [--config-uuid <install-uuid>] \\
+ [<dep-config-vars>] }+ \\
+ (?|sys:)<dependency-name>[ <dependency-version-constraint>])... \\
+\\
+[?sys:<dependency-name>[ <dependency-version-constraint>]...]
+
+# bpkg.update
+#
+bpkg -v update -d <module-conf> <package-name>
# If the test operation is supported by the package:
#
{
- # [bpkg.module.test]
+ # bpkg.test
#
- bpkg -v test <package-name>
+ bpkg -v test -d <module-conf> <package-name>
+}
+
+# For each (build-time) tests, examples, or benchmarks package referred
+# to by the task manifest:
+#
+{
+ # bpkg.test-separate.update ( : bpkg.update)
+ #
+ bpkg -v update -d <target-conf> <package-name>
+
+ # bpkg.test-separate.test ( : bpkg.test)
+ #
+ bpkg -v test -d <target-conf> <package-name>
+}
+
+# If configuration is self-hosted, the install operation is supported
+# by the package, config.install.root is specified, and no
+# bpkg.bindist.{debian,fedora,archive} step is enabled:
+#
+{
+ # bpkg.install
+ #
+ bpkg -v install -d <install-conf> <package-name>
+
+ # If bbot.install.ldconfig step is enabled:
+ #
+ {
+ # bbot.install.ldconfig
+ #
+ sudo ldconfig
+ }
+}
+
+# If configuration is self-hosted, the install operation is supported
+# by the package, and bpkg.bindist.{debian,fedora,archive} step is
+# enabled:
+#
+{
+ # bpkg.bindist.{debian,fedora,archive}
+ #
+ bpkg -v bindist --distribution <distribution> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ <package-name>
+}
+
+# If the install operation is supported by the package and
+# bbot.sys-install step is enabled:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-install.apt-get.update
+ #
+ sudo apt-get update
+
+ # bbot.sys-install.apt-get.install
+ #
+ sudo apt-get install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-install.dnf.install
+ #
+ sudo dnf install <distribution-package-file>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # For each package file:
+ #
+ {
+ # bbot.sys-install.tar.extract
+ #
+ [sudo] tar -xf <distribution-package-file> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+ }
+
+ # If bbot.sys-install.ldconfig step is enabled:
+ #
+ {
+ # bbot.sys-install.ldconfig
+ #
+ sudo ldconfig
+ }
+ }
}
+
+# If the main package is installed either from source or from the
+# binary distribution package:
+#
+{
+ # If task manifest refers to any (build-time) tests, examples, or
+ # benchmarks packages:
+ #
+ {
+ # [bpkg.test-separate-installed.create]
+ #
+ b -V create(<module-conf>, cc) config.config.load=~build2
+ bpkg -v create --existing --type build2 -d <module-conf>
+
+ # bpkg.test-separate-installed.create (
+ # bpkg.test-separate-installed.create_for_module :
+ # bpkg.test-separate-installed.create)
+ #
+ bpkg -V create -d <target-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # bpkg.test-separate-installed.create (
+ # bpkg.test-separate-installed.create_for_module :
+ # bpkg.test-separate-installed.create)
+ #
+ bpkg -V create --type host -d <host-conf> <env-modules> \\
+ <env-config-args> <tgt-config-args> <pkg-config-args>
+
+ # [bpkg.test-separate-installed.link]
+ #
+ bpkg -v link -d <target-conf> <host-conf>
+ bpkg -v link -d <target-conf> <module-conf>
+ bpkg -v link -d <host-conf> <module-conf>
+
+ # bpkg.test-separate-installed.configure.add (
+ # : bpkg.configure.add)
+ #
+ bpkg -v add -d <target-conf> <repository-url>
+
+ # bpkg.test-separate-installed.configure.fetch (
+ # : bpkg.configure.fetch)
+ #
+ bpkg -v fetch -d <target-conf> --trust <repository-fp>
+
+ # bpkg.test-separate-installed.configure.build (
+ # bpkg.global.configure.build,
+ # (bpkg.test-separate-installed.configure.build_for_module :
+ # bpkg.test-separate-installed.configure.build))
+ #
+ bpkg -v build --configure-only \\
+ <env-config-args> <tgt-config-args> <pkg-config-args> \\
+ \\
+ ({ --config-name <target-conf> [<test-config-vars>] }+ \\
+ <buildtime-test-package-name>[ <test-version-constraint>])... \\
+ \\
+ ?sys:<package-name>/<package-version> \\
+ \\
+ [?sys:<dependency-name>[ <dependency-version-constraint>]...]
+
+ # For each (build-time) tests, examples, or benchmarks package
+ # referred to by the task manifest:
+ #
+ {
+ # bpkg.test-separate-installed.update ( : bpkg.update)
+ #
+ bpkg -v update -d <target-conf> <package-name>
+
+ # bpkg.test-separate-installed.test ( : bpkg.test)
+ #
+ bpkg -v test -d <target-conf> <package-name>
+ }
+ }
+}
+
+# If the main package is installed from the binary distribution package:
+#
+{
+ # If <distribution> is 'debian':
+ #
+ {
+ # bbot.sys-uninstall.apt-get.remove
+ #
+ sudo apt-get remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'fedora':
+ #
+ {
+ # bbot.sys-uninstall.dnf.remove
+ #
+ sudo dnf remove <distribution-package-name>...
+ }
+ #
+ # Otherwise, if <distribution> is 'archive':
+ #
+ {
+ # Noop.
+ }
+}
+
+# If the main package is installed from source:
+#
+{
+ # bpkg.uninstall
+ #
+ bpkg -v uninstall -d <install-conf> <package-name>
+}
+
+# If the install operation is supported by the package and
+# bbot.bindist.upload step is enabled:
+#
+{
+ # Move the generated binary distribution files to the
+ # upload/bindist/<distribution>/ directory.
+}
+
+# If bbot.upload step is enabled and upload/ directory is not empty:
+#
+{
+ # bbot.upload.tar.create
+ #
+ tar -cf upload.tar upload/
+
+ # bbot.upload.tar.list
+ #
+ tar -tf upload.tar upload/
+}
+
+# end
+#
+# This step id can only be used as a breakpoint.
\
+For details on configuring and testing installation refer to
+\l{#arch-controller Controller Logic}.
+
If a primary or test package comes from a version control-based repository,
then its \c{dist} meta-operation is also tested as a part of the
\c{bpkg[.*].configure.build} steps by re-distributing the source directory in
@@ -1040,82 +2316,183 @@ shift
exec \"$@\" cc config.c=\"gcc-9 $mode\" config.cxx=\"g++-9 $mode\"
\
+\h2#arch-worker-bindist-result|Bindist Result Manifest|
+
+At the \c{bbot.bindist.upload} step the worker also creates the
+\c{bindist-result.json} and \c{bindist-result.manifest} files in the
+\c{upload/bindist/<distribution>/} directory, next to the generated binary
+distribution package files. The \c{bindist-result.json} file contains the
+structured JSON output of the \l{bpkg-pkg-bindist(1)} command. The
+\c{bindist-result.manifest} file contains the subset of the information from
+\c{bindist-result.json}. Specifically, it starts with the binary distribution
+package header manifest followed by a list of package file manifests. The
+manifest values are:
+
+\
+distribution:
+architecture:
+os-release-name-id:
+os-release-version-id:
+package-name:
+package-version:
+[package-system-version]:
+
+package-file-type:
+package-file-path:
+[package-file-system-name]:
+\
+
+The manifest values derive from the corresponding JSON object values and
+preserve their semantics. The only differences are that the
+\c{os-release-version-id} value may not be absent and the
+\c{package-file-path} values are relative to the
+\c{upload/bindist/<distribution>/} directory and are in the POSIX
+representation. See \l{bpkg-pkg-bindist(1)} for the JSON values semantics.
+
\h#arch-controller|Controller Logic|
A \c{bbot} controller that issues own build tasks maps available build
-machines (as reported by agents) to \i{build configurations} according to the
-\c{buildtab} configuration file. Blank lines and lines that start with \c{#}
-are ignored. All other lines in this file have the following format:
+machines (as reported by agents) to \i{build target configurations} according
+to the \c{buildtab} configuration file. Blank lines and lines that start with
+\c{#} are ignored. All other lines in this file have the following format:
\
-<machine-pattern> <config> <target>[/<environment>] <classes> [<config-arg>]* [<warning-regex>]*
+<machine-pattern> <target-config> <target>[/<environment>] <classes> [<tgt-config-arg>]* [<warning-regex>]*
-<config-arg> = [<prefix>:](<variable>|<option>)
-<prefix> = <tool>[.<phase>][.<operation>[.<command>]]
+<tgt-config-arg> = [[+|-]<prefix>:](<variable>|<option>) | \\
+ (+|-)<prefix>:
+<prefix> = <tool>[.<cfg-type>][.<phase>][.<operation>[.<command>]]
\
Where \c{<machine-pattern>} is filesystem wildcard pattern that is matched
-against available machine names, \c{<config>} is the configuration name,
-\c{<target>} is the build target, optional \c{<environment>} is the build
-environment name, \c{<classes>} is a space-separated list of configuration
-classes that is matched against the package \c{builds} values, optional
-\c{<config-arg>} list is additional configuration options and variables, and
-optional \c{<warning-regex>} list is additional regular expressions that
-should be used to detect warnings in the logs.
-
-The build configurations can belong to multiple classes with their names
-reflecting some common configuration aspects, such as the operating system,
-compiler, build options, etc. Predefined class names are \c{default}, \c{all},
-and \c{none}. The default configurations are built by default. A configuration
-must also belong to the \c{all} unless it is hidden. Valid custom class names
-must contain only alpha-numeric characters, \c{_}, \c{+}, \c{-}, and \c{.},
-except as the first character for the last three. Class names that start with
-\c{_} are reserved for the future hidden/special class functionality.
-
-Regular expressions must start with \c{~}, to be distinguished from
-configuration options and variables. Note that the \c{<config-arg>} and
-\c{<warning-regex>} lists have the same quoting semantics as in the \c{config}
-and the \c{warning-regex} value in the build task manifest. The matched
-machine name, the target, the environment name, configuration
-options/variables, and regular expressions are included into the build task
-manifest.
-
-Values in the \c{<config-arg>} list can be opionally prefixed with the \i{step
-id} or a leading portion thereof to restrict it to a specific step, operation,
-phase, or tool in the \i{worker script} (see \l{#arch-worker Worker
-Logic}). Unprefixed values only apply to the \c{bpkg.create},
-\c{b.test-installed.create}, and \c{bpkg.test-installed.create} steps. Note
-that options with values can only be specified using the single argument
+against available machine names, \c{<target-config>} is the target
+configuration name, \c{<target>} is the build target, optional
+\c{<environment>} is the build environment name, \c{<classes>} is a
+space-separated list of configuration classes that is matched against the
+package configuration \c{*-builds} values, optional \c{<tgt-config-arg>} list
+is additional configuration options and variables, and optional
+\c{<warning-regex>} list is additional regular expressions that should be used
+to detect warnings in the logs.
+
+The build target configurations can belong to multiple classes with their
+names reflecting some common configuration aspects, such as the operating
+system, compiler, build options, etc. Predefined class names are \c{default},
+\c{all}, \c{hidden}, \c{none}, \c{host}, and \c{build2}. The default target
+configurations are built by default. A configuration must also belong to the
+\c{all}, \c{hidden}, or some special-purpose configuration class. The latter
+is intended for testing some optional functionality which packages are not
+expected to provide normally (for example, relocatable installation). A
+configuration that is self-hosted must also belong to the \c{host} class and,
+if it is also self-hosted for build system modules, to the \c{build2}
+class. Valid custom class names must contain only alpha-numeric characters,
+\c{_}, \c{+}, \c{-}, and \c{.}, except as the first character for the last
+three. Class names that start with \c{_} are reserved for the future
+hidden/special class functionality.
+
+Regular expressions must start with \c{~}, to be distinguished from target
+configuration options and variables. Note that the \c{<tgt-config-arg>} and
+\c{<warning-regex>} lists have the same quoting semantics as in the
+\c{target-config} and the \c{warning-regex} value in the build task
+manifest. The matched machine name, the target, the environment name,
+configuration options/variables, and regular expressions are included into the
+build task manifest.
+
+Values in the \c{<tgt-config-arg>} list can be optionally prefixed with the
+\i{step id} or a leading portion thereof to restrict it to a specific step,
+operation, phase, or tool in the \i{worker script} (see \l{#arch-worker Worker
+Logic}). The prefix can optionally begin with the \c{+} or \c{-} character (in
+this case the argument can be omitted) to enable or disable the respective
+step. The steps which can be enabled or disabled are:
+
+\
+bpkg.update
+bpkg.test
+bpkg.test-separate.update
+bpkg.test-separate.test
+
+# Disabled if bpkg.bindist.* is enabled.
+#
+bpkg.install
+
+# Disabled by default.
+#
+bbot.install.ldconfig
+
+# Disabled by default.
+#
+bpkg.bindist.{debian,fedora,archive}
+
+# Disabled if bpkg.bindist.* is disabled.
+#
+bbot.sys-install
+
+# Disabled by default.
+#
+bbot.sys-install.ldconfig
+
+b.test-installed.test
+bpkg.test-separate-installed.update
+bpkg.test-separate-installed.test
+
+# Disabled by default.
+#
+bbot.bindist.upload
+
+bbot.upload
+\
+
+Note that the \c{bpkg.bindist.*} steps are mutually exclusive and only the
+last step status change via the \c{(+|-)bpkg.bindist.*} prefix is considered.
+
+Unprefixed values only apply to the \c{*.create[_for_*]} steps. Note that
+options with values can only be specified using the single argument
notation. For example:
\
-bpkg:--fetch-timeout=600 bpkg.configure.fetch:--fetch-timeout=60 b:-j1
+bpkg:--fetch-timeout=600 \\
+bpkg.configure.fetch:--fetch-timeout=60 \\
++bpkg.bindist.debian: \\
+b:-j1
\
Note that each machine name is matched against every pattern and all the
-patterns that match produce a configuration. If a machine does not match any
-pattern, then it is ignored (meaning that this controller is not interested in
-testing its packages with this machine). If multiple machines match the same
-pattern, then only a single configuration using any of the machines is
-produced (meaning that this controller considers these machines equivalent).
-
-As an example, let's say we have a machine named \c{windows_10-vc_14u3}. If we
+patterns that match produce a target configuration. If a machine does not
+match any pattern, then it is ignored (meaning that this controller is not
+interested in testing its packages with this machine). If multiple machines
+match the same pattern, then only a single target configuration using any of
+the machines is produced (meaning that this controller considers these
+machines equivalent).
+
+As an example, let's say we have a machine named \c{windows_10-vc_14.3}. If we
wanted to test both 32 and 64-bit as well as debug and optimized builds, then
-we could have generated the following configurations:
+we could have generated the following target configurations:
+
+\
+windows*-msvc_14* windows-msvc_14-Z7 i686-microsoft-win32-msvc14.0 \"all default msvc i686 debug\" config.cc.coptions=/Z7 config.cc.loptions=/DEBUG ~\"warning C4\d{3}: \"
+
+windows*-msvc_14* windows-msvc_14-O2 i686-microsoft-win32-msvc14.0 \"all default msvc i686 optimized\" config.cc.coptions=/O2 ~\"warning C4\d{3}: \"
+
+windows*-msvc_14* windows-msvc_14-Z7 x86_64-microsoft-win32-msvc14.0 \"all default msvc x86_64 debug\" config.cc.coptions=/Z7 config.cc.loptions=/DEBUG ~\"warning C4\d{3}: \"
+windows*-msvc_14* windows-msvc_14-O2 x86_64-microsoft-win32-msvc14.0 \"all default msvc x86_64 optimized\" config.cc.coptions=/O2 ~\"warning C4\d{3}: \"
\
-windows*-msvc_14* windows-msvc_14-32-Z7 i686-microsoft-win32-msvc14.0 \"all default msvc i686 debug\" config.cc.coptions=/Z7 config.cc.loptions=/DEBUG ~\"warning C4\d{3}: \"
-windows*-msvc_14* windows-msvc_14-32-O2 i686-microsoft-win32-msvc14.0 \"all default msvc i686 optimized\" config.cc.coptions=/O2 ~\"warning C4\d{3}: \"
+In the above example we could handle both \c{i686} and \c{x86_64}
+architectures with the same machine but this may not always be possible
+and we may have to use different machines for different configuration/target
+combinations. For example:
+
+\
+x86_64_linux_debian_11*-gcc_12.2 linux_debian_11-gcc_12.2 i686-linux-gnu ...
-windows*-msvc_14* windows-msvc_14-64-Z7 x86_64-microsoft-win32-msvc14.0 \"all default msvc x86_64 debug\" config.cc.coptions=/Z7 config.cc.loptions=/DEBUG ~\"warning C4\d{3}: \"
+x86_64_linux_debian_11*-gcc_12.2 linux_debian_11-gcc_12.2 x86_64-linux-gnu ...
-windows*-msvc_14* windows-msvc_14-64-O2 x86_64-microsoft-win32-msvc14.0 \"all default msvc x86_64 optimized\" config.cc.coptions=/O2 ~\"warning C4\d{3}: \"
+aarch64_linux_debian_11*-gcc_12.2 linux_debian_11-gcc_12.2 aarch64-linux-gnu ...
\
As another example, let's say we have \c{linux_fedora_25-gcc_6} and
\c{linux_ubuntu_16.04-gcc_6}. If all we cared about is testing GCC 6 64-bit
-builds on Linux, then our configurations could look like this:
+builds on Linux, then our target configurations could look like this:
\
linux*-gcc_6 linux-gcc_6-g x86_64-linux-gnu \"all default gcc debug\" config.cc.coptions=-g
@@ -1123,11 +2500,12 @@ linux*-gcc_6 linux-gcc_6-g x86_64-linux-gnu \"all default gcc debug\" config.cc.
linux*-gcc_6 linux-gcc_6-O3 x86_64-linux-gnu \"all default gcc optimized\" config.cc.coptions=-O3
\
-A build configuration class can derive from another class in which case
-configurations that belong to the derived class are treated as also belonging
-to the base class (or classes, recursively). The derived and base class names
-are separated with \c{:} (no leading or trailing spaces allowed) and the base
-must be present in the first mentioning of the derived class. For example:
+A build target configuration class can derive from another class in which case
+target configurations that belong to the derived class are treated as also
+belonging to the base class (or classes, recursively). The derived and base
+class names are separated with \c{:} (no leading or trailing spaces allowed)
+and the base must be present in the first mentioning of the derived class. For
+example:
\
linux*-gcc_6 linux-gcc_6-g x86_64-linux-gnu \"all gcc-6+ debug\" config.cc.coptions=-g
@@ -1151,15 +2529,17 @@ linux*-gcc_6 linux-gcc_6 x86_64-linux-gnu \"all gcc-6+ \"
linux*-gcc_8 linux-gcc_8 x86_64-linux-gnu \"all gcc-8+:gcc-7+\"
\
-If the \c{<config-arg>} list contains the \c{config.install.root} variable
-that applies to the \c{bpkg.create} step, then in addition to building and
-possibly running tests, the \c{bbot} worker will also test installing and
-uninstalling each package. Furthermore, if the package contains subprojects
-that support the test operation and/or refers to other packages via the
-\c{tests}, \c{examples}, or \c{benchmarks} manifest values which are not
-excluded by the \c{test-exclude} task manifest values, then the worker will
-additionally build such subprojects/packages against the installation and run
-their tests (test installed and test separate installed phases).
+If the \c{<tgt-config-arg>} list contains the \c{config.install.root} variable
+that applies to the \c{bpkg.target.create} or, as a fallback, \c{b.create} or
+\c{bpkg.create} steps, then in addition to building and possibly running
+tests, the \c{bbot} worker will also test installing and uninstalling each
+package (unless replaced with the \c{bbot.sys-install} step). Furthermore, if
+the package contains subprojects that support the test operation and/or refers
+to other packages via the \c{tests}, \c{examples}, or \c{benchmarks} manifest
+values which are not excluded by the \c{bbot} controller, then the worker will
+additionally build such subprojects/packages against the installation (created
+either from source or from the binary distribution package) and run their
+tests (test installed and test separate installed phases).
Two types of installations can be tested: \i{system} and \i{private}. A system
installation uses a well-known location, such as \c{/usr} or \c{/usr/local},
@@ -1170,8 +2550,8 @@ preferable, it may not be always usable because of the potential conflicts
with the already installed software, for example, by the system package
manager.
-As an example, the following two configurations could be used to test system
-and private installations:
+As an example, the following two target configurations could be used to test
+system and private installations:
\
linux*-gcc* linux-gcc-sysinstall x86_64-linux-gnu \"all default gcc\" config.install.root=/usr config.install.sudo=sudo
@@ -1179,7 +2559,23 @@ linux*-gcc* linux-gcc-sysinstall x86_64-linux-gnu \"all default gcc\" config.ins
linux*-gcc* linux-gcc-prvinstall x86_64-linux-gnu \"all default gcc\" config.install.root=/tmp/install config.cc.poptions=-I/tmp/install/include config.cc.loptions=-L/tmp/install/lib config.bin.rpath=/tmp/install/lib
\
-Note also that while building and running tests against the installation the
-worker makes the \c{bin} subdirectory of \c{config.install.root} the first
-entry in the \c{PATH} environment variable.
+Note also that while building and running tests against the installation
+created either from source or from the archive distribution package the worker
+makes the \c{bin} subdirectory of \c{config.install.root} the first entry in
+the \c{PATH} environment variable, except for build system modules which
+supposedly don't install any executables. As was mentioned earlier, normally
+the \c{config.install.root} variable is expected to be prefixed with the
+\c{bpkg.target.create} or, as a fallback, \c{b.create} or \c{bpkg.create} step
+ids. However, for testing of the relocatable installations it can be desirable
+to extract the archive distribution package content at the
+\c{bbot.sys-install.tar.extract} step into a different installation
+directory. If that's the case, then this directory needs to also be specified
+as \c{bbot.sys-install:config.install.root}. If specified, this directory will
+be preferred as a base for forming the \c{bin/} directory path.
+
+The \c{bbot} controller normally issues the build task by picking an unbuilt
+package configuration and one of the produced (via the machine names match)
+target configurations, which is not excluded from building due to this package
+configuration \c{*-builds}, \c{*-build-include}, and \c{*-build-exclude}
+manifest values.
"
diff --git a/doc/style b/doc/style
-Subproject 10f31a8bea8e5817fccf01978009c1ecaf3eabf
+Subproject b72eb624d13b1628e27e9f6c0b3c80853e8e015
diff --git a/etc/bootstrap/bbot-bootstrap-clang.bat b/etc/bootstrap/bbot-bootstrap-clang.bat
index 79c4009..99124e9 100644
--- a/etc/bootstrap/bbot-bootstrap-clang.bat
+++ b/etc/bootstrap/bbot-bootstrap-clang.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/bootstrap/bbot-bootstrap-clang.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
setlocal EnableExtensions EnableDelayedExpansion
@@ -52,7 +52,9 @@ rem See also adding MSVC-bundled Clang to PATH below.
set "MSVC="
-set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+set "MSVC=C:\Program Files\Microsoft Visual Studio\2022\Community"
+
set "VCVARS=VC\Auxiliary\Build\vcvars64.bat"
set "CXX=clang++"
diff --git a/etc/bootstrap/bbot-bootstrap-freebsd.sh b/etc/bootstrap/bbot-bootstrap-freebsd.sh
index 4668dfd..58eba53 100755
--- a/etc/bootstrap/bbot-bootstrap-freebsd.sh
+++ b/etc/bootstrap/bbot-bootstrap-freebsd.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# file : etc/bootstrap/bbot-bootstrap-freebsd.sh
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Note: install GNU make with 'pkg install gmake'.
#
diff --git a/etc/bootstrap/bbot-bootstrap-linux.sh b/etc/bootstrap/bbot-bootstrap-linux.sh
index 71a1dda..8037c27 100755
--- a/etc/bootstrap/bbot-bootstrap-linux.sh
+++ b/etc/bootstrap/bbot-bootstrap-linux.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# file : etc/bootstrap/bbot-bootstrap-linux.sh
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
if ! jobs="$(nproc)"; then
jobs=1
@@ -12,7 +12,7 @@ fi
cd /tmp
ulimit -c unlimited
-# Note: bootstrap script options must come before build.sh options/arguments
+# Note: bootstrap script options must come before build.sh options/arguments.
#
"$(dirname "$0")/bbot-bootstrap.sh" --make make --jobs "$jobs" "$@"
diff --git a/etc/bootstrap/bbot-bootstrap-macos.sh b/etc/bootstrap/bbot-bootstrap-macos.sh
index a83b0ec..fd28e8b 100755
--- a/etc/bootstrap/bbot-bootstrap-macos.sh
+++ b/etc/bootstrap/bbot-bootstrap-macos.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# file : etc/bootstrap/bbot-bootstrap-macos.sh
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# open -a Terminal -n -F ~/bbot-bootstrap-macos.sh
diff --git a/etc/bootstrap/bbot-bootstrap-mingw.bat b/etc/bootstrap/bbot-bootstrap-mingw.bat
index bea22d9..823bc1c 100644
--- a/etc/bootstrap/bbot-bootstrap-mingw.bat
+++ b/etc/bootstrap/bbot-bootstrap-mingw.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/bootstrap/bbot-bootstrap-mingw.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
setlocal EnableExtensions EnableDelayedExpansion
diff --git a/etc/bootstrap/bbot-bootstrap-msvc-14.bat b/etc/bootstrap/bbot-bootstrap-msvc-14.bat
index efa1ecd..34ecf54 100644
--- a/etc/bootstrap/bbot-bootstrap-msvc-14.bat
+++ b/etc/bootstrap/bbot-bootstrap-msvc-14.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/bootstrap/bbot-bootstrap-msvc-14.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
setlocal EnableExtensions EnableDelayedExpansion
diff --git a/etc/bootstrap/bbot-bootstrap-msvc.bat b/etc/bootstrap/bbot-bootstrap-msvc.bat
index ba08fd5..7506f1e 100644
--- a/etc/bootstrap/bbot-bootstrap-msvc.bat
+++ b/etc/bootstrap/bbot-bootstrap-msvc.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/bootstrap/bbot-bootstrap-msvc.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
setlocal EnableExtensions EnableDelayedExpansion
@@ -45,7 +45,8 @@ goto start
:start
rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community"
-set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+set "MSVC=C:\Program Files\Microsoft Visual Studio\2022\Community"
set "VCVARS=VC\Auxiliary\Build\vcvars64.bat"
set "BUILD=C:\tmp"
diff --git a/etc/bootstrap/bbot-bootstrap.service b/etc/bootstrap/bbot-bootstrap.service
index 6dbf365..5d6c023 100644
--- a/etc/bootstrap/bbot-bootstrap.service
+++ b/etc/bootstrap/bbot-bootstrap.service
@@ -13,13 +13,23 @@ Group=build
# Old versions of systemd have no '~'.
WorkingDirectory=/home/build
+# Uncomment this if there is an X server running (e.g., Xvfb).
+#
+#Environment=DISPLAY=:99
+
Environment=CXX=g++
+Environment=INSTALL=/usr/local
Environment=BUILD=/tmp
Environment=ENVIRONMENTS=/home/build/environments
+# Note: bootstrap script options must come before build.sh options/arguments.
+#
ExecStart=/usr/local/bin/bbot-bootstrap-linux.sh \
+--install ${INSTALL} \
--build ${BUILD} \
--environments ${ENVIRONMENTS} \
+--private \
+--sudo sudo \
${CXX}
StandardInput=tty-force
diff --git a/etc/bootstrap/bbot-bootstrap.sh b/etc/bootstrap/bbot-bootstrap.sh
index 2204de7..e3231bf 100755
--- a/etc/bootstrap/bbot-bootstrap.sh
+++ b/etc/bootstrap/bbot-bootstrap.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# file : etc/bootstrap/bbot-bootstrap.sh
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
usage="Usage: $0 [<options>] [<build-options>]"
@@ -32,6 +32,7 @@ run ()
# Defaults that can be changed via command line.
#
+install=
build=/tmp
environments="$HOME/environments"
make=
@@ -39,8 +40,19 @@ jobs=
# Parse options.
#
+# Note that if --install is specified, any necessary --private/--sudo options
+# will need to be specified as <build-options>.
+#
while test $# -ne 0; do
case $1 in
+ --install)
+ shift
+ if test $# -eq 0; then
+ error "missing install directory after --install"
+ fi
+ install="$1"
+ shift
+ ;;
--build)
shift
if test $# -eq 0; then
@@ -84,14 +96,18 @@ done
# Note: build_options is array-like (expanded unquoted).
#
tftp="196.254.111.222"
-install="/usr/local"
-build_options=
verbose=3
timeout=600
+build_options=
#install="/tmp/bbot-install"
#tftp="127.0.0.1:55123"
-#build_options="--install-dir $install"
+
+if test -n "$install"; then
+ build_options="$build_options --install-dir $install"
+else
+ install="/usr/local"
+fi
# If make was specified, add it to build_options.
#
diff --git a/etc/buildfile b/etc/buildfile
index c421d23..a21410a 100644
--- a/etc/buildfile
+++ b/etc/buildfile
@@ -1,5 +1,5 @@
# file : etc/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
./: file{** -buildfile}
diff --git a/etc/environments/default-aarch64 b/etc/environments/default-aarch64
new file mode 100755
index 0000000..3fb5ccb
--- /dev/null
+++ b/etc/environments/default-aarch64
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+# file : etc/environments/default-aarch64
+# license : MIT; see accompanying LICENSE file
+
+#
+# Environment setup script for C/C++ compilation.
+#
+
+# NOTE: don't forget to adjust the target mode selection below.
+#
+c=gcc
+cxx=g++
+
+# $1 - target
+# $2 - bbot executable (if absent, then run $SHELL)
+# $3+ - bbot options
+
+set -e # Exit on errors.
+
+# Based on target determine what we are building.
+#
+mode=
+case "$1" in
+ aarch64-*)
+ ;;
+ *)
+ echo "unknown target: '$1'" 1>&2
+ exit 1
+ ;;
+esac
+shift
+
+if test $# -ne 0; then
+ exec "$@" cc config.c="$c $mode" config.cxx="$cxx $mode"
+else
+ echo "config.c=$c $mode" 1>&2
+ echo "config.cxx=$cxx $mode" 1>&2
+ exec $SHELL -i
+fi
diff --git a/etc/environments/default-clang.bat b/etc/environments/default-clang.bat
index 75861ff..3868889 100644
--- a/etc/environments/default-clang.bat
+++ b/etc/environments/default-clang.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/environments/default-clang.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
rem
rem Environment setup script for C/C++ compilation with Clang targeting
@@ -24,8 +24,11 @@ set "MSVC="
rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community"
rem set "MSVC_VER=14.1"
-set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
-set "MSVC_VER=14.2"
+rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+rem set "MSVC_VER=14.2"
+
+set "MSVC=C:\Program Files\Microsoft Visual Studio\2022\Community"
+set "MSVC_VER=14.3"
set "VCVARS32=VC\Auxiliary\Build\vcvarsamd64_x86.bat"
set "VCVARS64=VC\Auxiliary\Build\vcvars64.bat"
diff --git a/etc/environments/default-emcc b/etc/environments/default-emcc
index 509542f..9a8e595 100755
--- a/etc/environments/default-emcc
+++ b/etc/environments/default-emcc
@@ -1,7 +1,7 @@
#!/bin/sh
# file : etc/environments/default-emcc
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
#
# Environment setup script for C/C++ compilation with Emscripten using
diff --git a/etc/environments/default-mingw.bat b/etc/environments/default-mingw.bat
index 9adee8f..e8bdf3d 100644
--- a/etc/environments/default-mingw.bat
+++ b/etc/environments/default-mingw.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/environments/default-mingw.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
rem
rem Environment setup script for C/C++ compilation with MinGW GCC.
diff --git a/etc/environments/default-msvc-14.bat b/etc/environments/default-msvc-14.bat
index f5e64eb..cc5ab15 100644
--- a/etc/environments/default-msvc-14.bat
+++ b/etc/environments/default-msvc-14.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/environments/default-msvc-14.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
rem
rem Environment setup script for C/C++ compilation with Visual Studio 14.
diff --git a/etc/environments/default-msvc.bat b/etc/environments/default-msvc.bat
index 563a81a..c07f825 100644
--- a/etc/environments/default-msvc.bat
+++ b/etc/environments/default-msvc.bat
@@ -1,7 +1,7 @@
@echo off
rem file : etc/environments/default-msvc.bat
-rem license : TBC; see accompanying LICENSE file
+rem license : MIT; see accompanying LICENSE file
rem
rem Environment setup script for C/C++ compilation with Visual Studio.
@@ -17,8 +17,11 @@ set "MSVC="
rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community"
rem set "MSVC_VER=14.1"
-set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
-set "MSVC_VER=14.2"
+rem set "MSVC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
+rem set "MSVC_VER=14.2"
+
+set "MSVC=C:\Program Files\Microsoft Visual Studio\2022\Community"
+set "MSVC_VER=14.3"
set "VCVARS32=VC\Auxiliary\Build\vcvarsamd64_x86.bat"
set "VCVARS64=VC\Auxiliary\Build\vcvars64.bat"
diff --git a/etc/environments/default b/etc/environments/default-x86_64
index c7c7953..e324fa1 100755
--- a/etc/environments/default
+++ b/etc/environments/default-x86_64
@@ -1,7 +1,7 @@
#!/bin/sh
-# file : etc/environments/default
-# license : TBC; see accompanying LICENSE file
+# file : etc/environments/default-x86_64
+# license : MIT; see accompanying LICENSE file
#
# Environment setup script for C/C++ compilation.
diff --git a/manifest b/manifest
index 2c855b5..5fcb879 100644
--- a/manifest
+++ b/manifest
@@ -1,9 +1,9 @@
: 1
name: bbot
-version: 0.14.0-a.0.z
+version: 0.17.0-a.0.z
project: build2
summary: build2 build bot
-license: other: TODO ; License is not yet decided, currently all rights reserved.
+license: MIT
topics: continuous integration and testing, build automation, build toolchain
keywords: CI CD
description-file: README
@@ -13,11 +13,11 @@ doc-url: https://build2.org/doc.xhtml
src-url: https://git.build2.org/cgit/bbot/tree/
email: users@build2.org
build-warning-email: builds@build2.org
-builds: host
+builds: all : &host
requires: c++14
-depends: * build2 >= 0.13.0
-depends: * bpkg >= 0.13.0
-# @@ Should probably become conditional dependency.
-requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.14.0-a.0.1 0.14.0-a.1)
-depends: libbbot [0.14.0-a.0.1 0.14.0-a.1)
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
+# @@ DEP Should probably become conditional dependency.
+#requires: ? cli ; Only required if changing .cli files.
+depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbbot [0.17.0-a.0.1 0.17.0-a.1)
diff --git a/tests/agent/buildfile b/tests/agent/buildfile
index 236674f..ea4659e 100644
--- a/tests/agent/buildfile
+++ b/tests/agent/buildfile
@@ -1,5 +1,5 @@
# file : tests/agent/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Setup the bbot-agent that we are testing.
#
diff --git a/tests/agent/testscript b/tests/agent/testscript
index c977c26..69ca968 100644
--- a/tests/agent/testscript
+++ b/tests/agent/testscript
@@ -1,5 +1,5 @@
# file : tests/agent/testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# The /build/machines directory should be on a btrfs filesystem and have the
# following layout/contents:
@@ -18,8 +18,8 @@
test.options = --verbose 3
-cp = $src_base/btrfs-cpdir -f /build/machines.orig /build/machines
-rm = $src_base/btrfs-rmdir /build/machines
+cp = [cmdline] $src_base/btrfs-cpdir -f /build/machines.orig /build/machines
+rm = [cmdline] $src_base/btrfs-rmdir /build/machines
: dump-machines
:
diff --git a/tests/build/bootstrap.build b/tests/build/bootstrap.build
index b2c4297..c9187a6 100644
--- a/tests/build/bootstrap.build
+++ b/tests/build/bootstrap.build
@@ -1,5 +1,5 @@
# file : tests/build/bootstrap.build
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
project = # Unnamed subproject.
diff --git a/tests/build/root.build b/tests/build/root.build
index 640afbb..cb9ba03 100644
--- a/tests/build/root.build
+++ b/tests/build/root.build
@@ -1,5 +1,5 @@
# file : tests/build/root.build
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Configure C++ module and specify the test target for cross-testing.
#
diff --git a/tests/buildfile b/tests/buildfile
index a5c185f..556ed55 100644
--- a/tests/buildfile
+++ b/tests/buildfile
@@ -1,4 +1,4 @@
# file : tests/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
./: {*/ -build/}
diff --git a/tests/integration/buildfile b/tests/integration/buildfile
index 6167f06..43cb07f 100644
--- a/tests/integration/buildfile
+++ b/tests/integration/buildfile
@@ -1,5 +1,5 @@
# file : tests/integration/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Setup the bbot-agent and bbot-worker that we are testing.
#
diff --git a/tests/integration/testscript b/tests/integration/testscript
index 1408946..988859f 100644
--- a/tests/integration/testscript
+++ b/tests/integration/testscript
@@ -1,5 +1,8 @@
# file : tests/integration/testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
+
+# NOTE: to see the worker execution progress run the test with the
+# --no-diag-buffer option.
#\
# Requirement:
@@ -48,17 +51,90 @@ controller = https://stage.build2.org/?build-task
wait = 1 # Seconds.
controller = --fake-request ../task --dump-result
+# Note that we also need to make sure that the installed package libraries are
+# properly imported when configuring and running tests, and that the installed
+# executables are runnable.
+#
+config = "bpkg.create:config.install.root=\"'$~/install'\" \
+bpkg.configure.fetch:--fetch-timeout=60 \
+bpkg.global.configure.build:--fetch-timeout=60 \
+bpkg.create:config.bin.rpath=\"'$~/install/lib'\" \
+config.cc.coptions=-Wall \
+b.test-installed.configure:\"config.cc.loptions=-L'$~/install/lib'\" \
+bpkg.test-separate-installed.create:\"config.cc.loptions=-L'$~/install/lib'\""
+
pkg = libhello
-ver = 1.0.0+7
+ver = 1.0.0+11
#rep_url = "https://git.build2.org/hello/libhello.git#1.0"
#rep_type = git
rep_url = https://stage.build2.org/1
rep_type = pkg
rfp = yes
+#host='host: true'
+#dependency_checksum = 'dependency-checksum: e6f10587696020674c260669f4e7000a0139df72467bff9770aea2f2b8b57ba0'
+
+#package_config = 'package-config: { config.libhello.extras=true }+ libhello'
+#package_config = 'package-config: +bbot.install.ldconfig:'
+#package_config = 'package-config: -bpkg.install:'
+#\
+package_config = "package-config:
+\\
++bpkg.bindist.archive:--archive-build-meta=
+bpkg.bindist.archive:config.install.relocatable=true
+
++bbot.bindist.upload:
+
+bbot.sys-install.tar.extract:--directory=$~
+bbot.sys-install.tar.extract:--strip-components=1
+
+b.test-installed.configure:config.cc.loptions='-L$~/usr/local/lib'
+bpkg.test-separate-installed.create:config.cc.loptions='-L$~/usr/local/lib'
+
+bpkg.create:config.bin.rpath=[null]
+\\"
+config = "$config bpkg.create:config.install.root=\"'$~/usr/local'\""
++export LD_LIBRARY_PATH="$~/usr/local/lib:$getenv('LD_LIBRARY_PATH')"
+#\
+#\
+sys_install_dir = [dir_path] $~/sys-install/
+package_config = "package-config:
+\\
++bpkg.bindist.archive:--archive-build-meta=
+bpkg.bindist.archive:config.install.relocatable=true
+
++bbot.bindist.upload:
+
+b.test-installed.configure:config.cc.loptions='-L$sys_install_dir/lib'
+bpkg.test-separate-installed.create:config.cc.loptions='-L$sys_install_dir/lib'
+
+bpkg.create:config.bin.rpath=[null]
+\\"
+config = "$config bpkg.create:config.install.root=\"'$~/usr/local'\" \
+bbot.sys-install:config.install.root=\"'$sys_install_dir'\" \
+bbot.sys-install.tar.extract:--directory=\"$sys_install_dir\" \
+bbot.sys-install.tar.extract:--strip-components=3"
++export LD_LIBRARY_PATH="$sys_install_dir/lib:$getenv('LD_LIBRARY_PATH')"
+#\
+#\
+package_config = 'package-config:
+\
++bpkg.bindist.fedora:
++bbot.bindist.upload:
+bpkg.create:config.bin.rpath=[null]
+\'
+#\
+#\
+package_config = 'package-config:\
+bpkg.configure.fetch:--fetch-timeout=120 -bpkg.install:
+config.libhello.develop=true
+sys:libuuid-c++ --sys-install --sys-no-stub --sys-yes
+\
+'
+#\
#\
pkg = hello
-ver = 1.0.0+6
+ver = 1.0.0+8
rep_url = "https://git.build2.org/hello/hello.git"
rep_type = git
rfp = yes
@@ -66,95 +142,348 @@ rfp = yes
#\
pkg = libstudxml
-ver = 1.1.0-b.9.20210202082911.e729667b0f34
+ver = 1.1.0-b.10
rep_url = https://stage.build2.org/1
rep_type = pkg
rfp = yes
#\
-#\
# To make sure that the test-installed phase succeeds use the build2 driver
-# installed into ~/install/bin.
+# installed into a writable directory, for example, ~/install/bin.
#
+#\
pkg = libbuild2-hello
-ver = 0.1.0-a.0.20201019074759.bba32abb6d3d
+ver = 0.1.0
rep_url = "https://github.com/build2/libbuild2-hello.git#master"
rep_type = git
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
+tests="tests: * libbuild2-hello-tests == $ver"
+host='host: true'
#\
-
+#package_config = 'package-config: -bpkg.install:'
+#\
+package_config = 'package-config:
+\
++bpkg.bindist.fedora:
+bpkg.module.create:config.bin.rpath=[null]
+\'
#\
+
# Use the build2 driver installed into ~/install/bin (see above).
#
+#\
pkg = libbuild2-kconfig
-ver = 0.1.0-a.0.20210108084836.3687e4b95226
+ver = 0.3.0-a.0.20221118053819.f702eb65da87
rep_url = "https://github.com/build2/libbuild2-kconfig.git#master"
rep_type = git
#ver = 0.1.0-a.0.20200910053253.a71aa3f3938b
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
+requires = 'requires: bootstrap'
+tests = "tests: * libbuild2-kconfig-tests == $ver
+examples: * kconfig-hello == $ver"
+host = 'host: true'
+#\
+#\
+package_config = 'package-config:
+\
+{ config.libbuild2_kconfig-tests.extras=true }+ libbuild2-kconfig-tests
+\'
+#\
+#package_config = 'package-config: config.libbuild2_kconfig.develop=true'
+#package_config = 'package-config: -bpkg.install:'
+#\
+package_config = "package-config:
+\\
++bpkg.bindist.archive:--archive-build-meta=
+bbot.sys-install.tar.extract:--directory=$~
+bbot.sys-install.tar.extract:--strip-components=1
+
++bbot.bindist.upload:
+
+bpkg.create:config.bin.rpath=[null]
+\\"
+config = "$config bpkg.create:config.install.root=\"'$~/usr/local'\""
++export LD_LIBRARY_PATH="$~/usr/local/lib:$getenv('LD_LIBRARY_PATH')"
+#\
+#\
+package_config = 'package-config:
+\
++bpkg.bindist.fedora:
+bpkg.module.create:config.bin.rpath=[null]
+\'
+#\
+#dependency_checksum = 'dependency-checksum: 72ae02bed9a05aaf022147297a99b84d63b712e15d05cc073551da39003e87e8'
+
+# Use the build2 driver installed into ~/install/bin (see above).
+#
+#\
+pkg = libbuild2-autoconf
+ver = 0.2.0
+rep_url = "https://github.com/build2/libbuild2-autoconf.git#master"
+rep_type = git
+rfp = yes
+tests = "tests: * libbuild2-autoconf-tests == $ver"
+host = 'host: true'
#\
#\
pkg = curl
-ver = 7.67.0+8
-rep_url = https://pkg.cppget.org/1/testing
+ver = 7.84.0
+rep_url = https://pkg.cppget.org/1/stable
rep_type = pkg
rfp = yes
#\
#\
+pkg = bpkg
+ver = 0.16.0-a.0.20230201123204.d956e69e8b55
+rep_url = https://stage.build2.org/1
+rep_type = pkg
+rfp = yes
+package_config = 'package-config:
+\
+config.bpkg.tests.remote=true
+?libodb-sqlite +{ config.libodb_sqlite.develop=true }
+?cli +{ config.cli.develop=true }
+?sys:libsqlite3 --sys-install
+\'
+#\
+
+#\
pkg = cli
-ver = 1.2.0-b.7.20210311174126.7aba3e27228e
-rep_url = "https://git.codesynthesis.com/cli/cli.git#adhoc-recipe"
+ver = 1.2.0
+rep_url = "https://git.codesynthesis.com/cli/cli.git#master"
rep_type = git
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
+requires='requires: host'
+tests="tests: * cli-tests == $ver
+examples: * cli-examples == $ver"
+host='host: true'
+#\
+#\
+package_config = "package-config:
+\\
+{ config.cli.extras=true }+ cli
+{ config.cli_tests.extras=true }+ cli-tests
+{ config.cli_examples.extras=true }+ cli-examples
+\\"
+#\
+#\
+package_config = "package-config:
+\\
++bpkg.bindist.archive:--archive-build-meta=
+bbot.sys-install.tar.extract:--directory=$~
+bbot.sys-install.tar.extract:--strip-components=1
+
++bbot.bindist.upload:
+
+bpkg.create:config.bin.rpath=[null]
+\\"
+config = "$config bpkg.create:config.install.root=\"'$~/usr/local'\""
++export LD_LIBRARY_PATH="$~/usr/local/lib:$getenv('LD_LIBRARY_PATH')"
#\
+#\
+package_config = 'package-config:
+\
++bpkg.bindist.fedora:
+
++bbot.bindist.upload:
+
+bpkg.create:config.bin.rpath=[null]
+\'
+#\
+#package_config = 'package-config: ?libcutl +{ config.libcutl.develop=true }'
+#package_config = 'package-config: -bpkg.install:'
#\
pkg = libxsd
-ver = 4.2.0-b.1.20210302135218.6a71bc57f6eb
+ver = 4.2.0-b.3.20220224113525.516981000564
rep_url = "https://git.codesynthesis.com/xsd/xsd.git#master"
rep_type = git
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
+tests="tests: libxsd-tests == $ver"
+#\
+
+#\
+pkg = xsd
+ver = 4.2.0-b.4.20230320140030.aafc60b2e901
+rep_url = "https://git.codesynthesis.com/xsd/xsd.git#master"
+rep_type = git
+#rep_url = https://queue.stage.build2.org/1
+#rep_type = pkg
+rfp = yes
+requires='requires: host'
+tests="tests: * xsd-tests == $ver
+examples: * xsd-examples == $ver"
+host='host: true'
+#\
+#\
+package_config = "package-config:
+\\
+?sys:libxerces-c --sys-install --sys-yes
+{ config.xsd_tests.extras=true }+ xsd-tests
++bpkg.bindist.archive:--archive-build-meta=
+bbot.sys-install.tar.extract:--directory=$~
+bbot.sys-install.tar.extract:--strip-components=1
+bpkg.create:config.bin.rpath=[null]
+\\"
+config = "$config bpkg.create:config.install.root=\"'$~/usr/local'\""
++export LD_LIBRARY_PATH="$~/usr/local/lib:$getenv('LD_LIBRARY_PATH')"
+#\
+#\
+package_config = 'package-config:
+\
+?sys:libxerces-c --sys-install --sys-yes
++bpkg.bindist.fedora:
+bpkg.create:config.bin.rpath=[null]
+\'
+#\
+#package_config = 'package-config:
+# \
+#?libxerces-c +{ config.libxerces_c.network=true }
+#?libcurl/7.76.0
+#?sys:libz/*
+# \'
+
+#dependency_checksum = 'dependency-checksum: 40a0ad4546d836a3afc83a9e7da22f2b5d224af4e62996d88f7103eaee23e9e1'
+
+#\
+pkg = libxerces-c
+ver = 3.2.4
+rep_url = https://pkg.cppget.org/1/stable
+rep_type = pkg
+rfp = yes
+package_config = 'package-config:
+\
+config.libxerces_c.network=true
+"?libcurl ~7.76.0"
+sys:libz/*
+-bpkg.update:
+\'
+#\
+
+#\
+pkg = odb
+ver = 2.5.0-b.22.20220629083600.4a9af07ee566
+rep_url = "https://git.codesynthesis.com/odb/odb.git#master"
+rep_type = git
+rfp = yes
+requires='requires: host'
+host='host: true'
#\
#\
pkg = libcmark-gfm-extensions
-ver = 0.29.0-a.1+7
-rep_url = https://pkg.cppget.org/1/alpha
+ver = 0.29.0-a.4
+rep_url = https://stage.build2.org/1
rep_type = pkg
rfp = yes
+host='host: true'
#\
#\
pkg = non-existing
ver = 0.1.0
-rep_url = https://pkg.cppget.org/1/alpha
+rep_url = https://stage.build2.org/1
rep_type = pkg
rfp = yes
#\
-# Note that we also need to make sure that the installed package libraries are
-# properly imported when configuring and running tests, and that the installed
-# executables are runnable.
-#
-config = "\"config.install.root='$~/install'\" \
-bpkg:--fetch-timeout=60 \
-\"config.bin.rpath='$~/install/lib'\" \
-config.cc.coptions=-Wall \
-b.test-installed.configure:\"config.cc.loptions=-L'$~/install/lib'\" \
-bpkg.test-installed.create:\"config.cc.loptions=-L'$~/install/lib'\""
+#\
+pkg = fmt
+ver = 8.1.1
+rep_url = "https://github.com/build2-packaging/fmt.git"
+#rep_url = "git+file:/tmp/fmt#master"
+rep_type = git
+rfp = yes
+#\
-#interactive="interactive: bpkg.configure.build"
+#\
+pkg = libodb-sqlite
+ver = 2.5.0-b.26.20240131175206.1c7f67f47770
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_type = git
+rfp = yes
+tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=sqlite'
+#\
+#\
+package_config = 'package-config:
+\
+{ config.odb_tests.multi_database=true }+ odb-tests
+\'
+#\
+
+#\
+pkg = libodb-pgsql
+ver = 2.5.0-b.26.20240131175206.1c7f67f47770
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_type = git
+rfp = yes
+tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=pgsql'
+#\
+#\
+package_config = 'package-config:
+\
+{ config.odb_tests.database="sqlite pgsql" }+ odb-tests
+\'
+#\
+
+#\
+pkg = odb-tests
+ver = 2.5.0-b.26.20240131175206.1c7f67f47770
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_type = git
+rfp = yes
+#\
+#\
+package_config = 'package-config:
+\
+config.odb_tests.database="sqlite pgsql"
+\'
+#\
+
+#\
+pkg = libodb-oracle
+ver = 2.5.0-b.26.20240201133448.3fa01c83a095
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_type = git
+rfp = yes
+package_config = 'package-config:
+\
+config.cc.poptions+=-I/usr/include/oracle/12.2/client64 config.cc.loptions+=-L/usr/lib/oracle/12.2/client64/lib
+\'
+#\
+
+#\
+pkg = libodb-qt
+ver = 2.5.0-b.26.20240201180613.633ad7ccad39
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_type = git
+rfp = yes
+#\
+
+#interactive="interactive: b.test-installed.configure"
#interactive="interactive: warning"
+#\
+aux_env = 'auxiliary-environment:
+\
+# x86_64-linux_debian_12-postgresql_15
+#
+DATABASE_HOST=10.0.213.126
+DATABASE_PORT=5432
+DATABASE_USER=test
+DATABASE_NAME=test
+\
+'
+#\
+cat <<"EOI" >=task
: 1
@@ -163,10 +492,17 @@ bpkg.test-installed.create:\"config.cc.loptions=-L'$~/install/lib'\""
repository-url: $rep_url
repository-type: $rep_type
trust: $rfp
+ $requires
+ $tests
machine: $machine
target: $target
- config: $config
+ $aux_env
+ target-config: $config
+ $package_config
$interactive
+ $host
+ worker-checksum: 1
+ $dependency_checksum
EOI
+if ("$environment" != "")
@@ -178,7 +514,7 @@ end
tftp = 127.0.0.1:55123
a = $0
-+ sed -e 's/-agent$/-worker/' <"$0" | set w
++sed -e 's/-agent$/-worker/' <"$0" | set w
: agent
:
@@ -209,9 +545,15 @@ a = $0
chmod ugo+x $env;
sleep $wait;
$w --verbose 3 --startup --tftp-host $tftp --environments $~ \
- &?build-module/*** &?build/*** \
+ &?build/*** &?build-host/*** &?build-module/*** &?build-install/*** \
&?build-installed/*** &?build-installed-bpkg/*** \
- &?dist/*** &?redist/*** \
+ &?build-installed-bpkg-module/*** &?build-installed-bpkg-host/*** \
+ &?dist/*** &?redist/*** &?bindist/*** \
+ &?dist-install/*** &?redist-install/*** \
&?dist-installed/*** &?redist-installed/*** \
- &task.manifest <| 2>|
+ &?../usr/*** &?upload/*** &?upload.tar \
+ &?../sys-install/*** \
+ &task.manifest <| 2>|;
+
+ sudo rm -rf ../sys-install/
}
diff --git a/tests/integration/tftp-map b/tests/integration/tftp-map
index 7a2b59a..cab223e 100644
--- a/tests/integration/tftp-map
+++ b/tests/integration/tftp-map
@@ -1,5 +1,5 @@
# file : tests/integration/tftp-map
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Test working directories relative to out_base:
#
diff --git a/tests/machine/buildfile b/tests/machine/buildfile
index 061d474..a29df00 100644
--- a/tests/machine/buildfile
+++ b/tests/machine/buildfile
@@ -1,5 +1,5 @@
# file : tests/machine/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Setup the bbot-agent that we are testing.
#
diff --git a/tests/machine/testscript b/tests/machine/testscript
index f5e4320..76921ed 100644
--- a/tests/machine/testscript
+++ b/tests/machine/testscript
@@ -1,5 +1,5 @@
# file : tests/machine/testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Note that if interrupted (^C) during machine execution, then have to
# delete iptables rules manually. To list, use -S, to delete -D:
@@ -7,7 +7,7 @@
# iptables -t nat -S
# iptables -S
-test.options = --cpu 8 --ram 10485760 --verbose 3
+test.options = --cpu 8 --build-ram 10485760 --verbose 3
tftp = /build/tftp
machines = /btrfs/boris/machines # @@ TODO
diff --git a/tests/worker/bootstrap.testscript b/tests/worker/bootstrap.testscript
index 01a0ce7..e84fef4 100644
--- a/tests/worker/bootstrap.testscript
+++ b/tests/worker/bootstrap.testscript
@@ -1,5 +1,5 @@
# file : tests/worker/bootstrap.testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
test.options = --bootstrap
diff --git a/tests/worker/build.testscript b/tests/worker/build.testscript
index a28b504..d362663 100644
--- a/tests/worker/build.testscript
+++ b/tests/worker/build.testscript
@@ -1,5 +1,5 @@
# file : tests/worker/build.testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Note: requires TFTP server (see buildfile).
@@ -45,13 +45,13 @@ cat manifest >>~"%EOO%"
configure-status: success
update-status: success
test-status: success
- configure-log: \\
+ configure-log:\\
%.*%+
\\
- update-log: \\
+ update-log:\\
%.*%+
\\
- test-log: \\
+ test-log:\\
%.*%+
\\
EOO
@@ -93,7 +93,7 @@ cat manifest >>~"%EOO%"
version: 1.2.3
status: error
configure-status: error
- configure-log: \\
+ configure-log:\\
%.*%+
\\
EOO
@@ -127,10 +127,10 @@ cat manifest >>~"%EOO%"
status: error
configure-status: success
update-status: error
- configure-log: \\
+ configure-log:\\
%.*%+
\\
- update-log: \\
+ update-log:\\
%.*%+
\\
EOO
diff --git a/tests/worker/buildfile b/tests/worker/buildfile
index fae11bb..b43544f 100644
--- a/tests/worker/buildfile
+++ b/tests/worker/buildfile
@@ -1,5 +1,5 @@
# file : tests/worker/buildfile
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
#\
diff --git a/tests/worker/startup.testscript b/tests/worker/startup.testscript
index 7d68048..d78ae8b 100644
--- a/tests/worker/startup.testscript
+++ b/tests/worker/startup.testscript
@@ -1,5 +1,5 @@
# file : tests/worker/startup.testscript
-# license : TBC; see accompanying LICENSE file
+# license : MIT; see accompanying LICENSE file
# Note: requires TFTP server (see buildfile).