aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS6
-rw-r--r--bbot/agent/agent.cxx467
-rw-r--r--bbot/agent/http-service.cxx31
-rw-r--r--bbot/machine-manifest.hxx32
-rw-r--r--bbot/machine-manifest.test.testscript9
-rw-r--r--bbot/worker/worker.cxx61
-rwxr-xr-xdoc/cli.sh2
-rw-r--r--doc/manual.cli16
-rw-r--r--manifest10
-rw-r--r--repositories.manifest4
-rw-r--r--tests/integration/testscript20
11 files changed, 365 insertions, 293 deletions
diff --git a/NEWS b/NEWS
index bf4df52..29154da 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,9 @@
+Version 0.17.0
+
+ * Support for auxiliary machines. See the bbot manual for details.
+
+ * Support for bbot.sys-install:config.install.root variable in worker.
+
Version 0.16.0
* New bpkg.bindist.*, bbot.sys-install.*, bbot.install.ldconfig steps.
diff --git a/bbot/agent/agent.cxx b/bbot/agent/agent.cxx
index 75f7228..b6f1783 100644
--- a/bbot/agent/agent.cxx
+++ b/bbot/agent/agent.cxx
@@ -1073,10 +1073,11 @@ using bootstrapped_machines = vector<bootstrapped_machine>;
static pair<toolchain_lock, bootstrapped_machines>
enumerate_machines (const dir_path& machines)
-try
{
tracer trace ("enumerate_machines", machines.string ().c_str ());
+ size_t dir_iter_retries (0); // Directory iteration retry count (see below).
+
for (;;) // From-scratch retry loop for after bootstrap (see below).
{
pair<toolchain_lock, bootstrapped_machines> pr;
@@ -1116,7 +1117,10 @@ try
machine_type::kvm,
string ("de:ad:be:ef:de:ad"),
nullopt,
- strings ()},
+ strings (),
+ nullopt,
+ nullopt,
+ nullopt},
toolchain_manifest {tc_id},
bootstrap_manifest {}}});
@@ -1149,280 +1153,313 @@ try
// The first level are machine volumes.
//
- for (const dir_entry& ve: dir_iterator (machines, dir_iterator::no_follow))
+ try
{
- const string vn (ve.path ().string ());
-
- // Ignore hidden directories.
- //
- if (ve.type () != entry_type::directory || vn[0] == '.')
- continue;
-
- const dir_path vd (dir_path (machines) /= vn);
-
- // Inside we have machines.
- //
- try
+ bool dir_iter_retry (false);
+ for (const dir_entry& ve:
+ dir_iterator (machines, dir_iterator::no_follow))
{
- for (const dir_entry& me: dir_iterator (vd, dir_iterator::no_follow))
- {
- const string mn (me.path ().string ());
+ const string vn (ve.path ().string ());
- if (me.type () != entry_type::directory || mn[0] == '.')
- continue;
-
- const dir_path md (dir_path (vd) /= mn);
+ // Ignore hidden directories.
+ //
+ if (ve.type () != entry_type::directory || vn[0] == '.')
+ continue;
- // Our endgoal here is to obtain a bootstrapped snapshot of this
- // machine while watching out for potential race conditions (other
- // instances as well as machines being added/upgraded/removed; see
- // the manual for details).
- //
- // So here is our overall plan:
- //
- // 1. Resolve current subvolume link for our bootstrap protocol.
- //
- // 2. Lock the machine. This excludes any other instance from trying
- // to perform the following steps.
- //
- // 3. If there is no link, cleanup old bootstrap (if any) and ignore
- // this machine.
- //
- // 4. Try to create a snapshot of current subvolume (this operation
- // is atomic). If failed (e.g., someone changed the link and
- // removed the subvolume in the meantime), retry from #1.
- //
- // 5. Compare the snapshot to the already bootstrapped version (if
- // any) and see if we need to re-bootstrap. If so, use the
- // snapshot as a starting point. Rename to bootstrapped at the
- // end (atomic).
- //
- dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
- dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
+ const dir_path vd (dir_path (machines) /= vn);
- auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
+ // Inside we have machines.
+ //
+ try
+ {
+ for (const dir_entry& me: dir_iterator (vd, dir_iterator::no_follow))
{
- run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
- run_btrfs (trace, "subvolume", "delete", tp);
- };
+ const string mn (me.path ().string ());
- for (size_t retry (0);; ++retry)
- {
- if (retry != 0)
- sleep (1);
+ if (me.type () != entry_type::directory || mn[0] == '.')
+ continue;
+
+ const dir_path md (dir_path (vd) /= mn);
- // Resolve the link to subvolume path.
+ // Our endgoal here is to obtain a bootstrapped snapshot of this
+ // machine while watching out for potential race conditions (other
+ // instances as well as machines being added/upgraded/removed; see
+ // the manual for details).
+ //
+ // So here is our overall plan:
+ //
+ // 1. Resolve current subvolume link for our bootstrap protocol.
+ //
+ // 2. Lock the machine. This excludes any other instance from
+ // trying to perform the following steps.
+ //
+ // 3. If there is no link, cleanup old bootstrap (if any) and
+ // ignore this machine.
+ //
+ // 4. Try to create a snapshot of current subvolume (this
+ // operation is atomic). If failed (e.g., someone changed the
+ // link and removed the subvolume in the meantime), retry from
+ // #1.
+ //
+ // 5. Compare the snapshot to the already bootstrapped version (if
+ // any) and see if we need to re-bootstrap. If so, use the
+ // snapshot as a starting point. Rename to bootstrapped at the
+ // end (atomic).
//
- dir_path sp; // <name>-<P>.<R>
+ dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
+ dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
- try
+ auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
{
- sp = path_cast<dir_path> (readsymlink (lp));
+ run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
+ run_btrfs (trace, "subvolume", "delete", tp);
+ };
- if (sp.relative ())
- sp = md / sp;
- }
- catch (const system_error& e)
+ for (size_t retry (0);; ++retry)
{
- // Leave the subvolume path empty if the subvolume link doesn't
- // exist and fail on any other error.
- //
- if (e.code ().category () != std::generic_category () ||
- e.code ().value () != ENOENT)
- fail << "unable to read subvolume link " << lp << ": " << e;
- }
+ if (retry != 0)
+ sleep (1);
- // Try to lock the machine.
- //
- machine_lock ml (lock_machine (tl, tp));
+ // Resolve the link to subvolume path.
+ //
+ dir_path sp; // <name>-<P>.<R>
- if (!ml.locked ())
- {
- machine_manifest mm;
- if (ml.prio)
+ try
{
- // Get the machine manifest (subset of the steps performed for
- // the locked case below).
- //
- // Note that it's possible the machine we get is not what was
- // originally locked by the other process (e.g., it has been
- // upgraded since). It's also possible that if and when we
- // interrupt and lock this machine, it will be a different
- // machine (e.g., it has been upgraded since we read this
- // machine manifest). To deal with all of that we will be
- // reloading this information if/when we acquire the lock to
- // this machine.
- //
- if (sp.empty ())
- {
- l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
- break;
- }
-
- l3 ([&]{trace << "keeping " << md << ": locked by " << ml.pid
- << " with priority " << *ml.prio;});
-
- mm = parse_manifest<machine_manifest> (
- sp / "manifest", "machine");
+ sp = path_cast<dir_path> (readsymlink (lp));
- none = none && mm.effective_role () == machine_role::auxiliary;
+ if (sp.relative ())
+ sp = md / sp;
}
- else // Bootstrapping/suspended.
+ catch (const system_error& e)
{
- l3 ([&]{trace << "keeping " << md << ": being bootstrapped "
- << "or suspened by " << ml.pid;});
-
- // Assume it is a build machine (we cannot determine whether
- // it is build or auxiliary without loading its manifest).
+ // Leave the subvolume path empty if the subvolume link
+ // doesn't exist and fail on any other error.
//
- none = false;
+ if (e.code ().category () != std::generic_category () ||
+ e.code ().value () != ENOENT)
+ fail << "unable to read subvolume link " << lp << ": " << e;
}
- // Add the machine to the lists and bail out.
+ // Try to lock the machine.
//
- r.push_back (bootstrapped_machine {
- move (ml),
- move (tp),
- bootstrapped_machine_manifest {move (mm), {}, {}}});
+ machine_lock ml (lock_machine (tl, tp));
- break;
- }
+ if (!ml.locked ())
+ {
+ machine_manifest mm;
+ if (ml.prio)
+ {
+ // Get the machine manifest (subset of the steps performed
+ // for the locked case below).
+ //
+ // Note that it's possible the machine we get is not what
+ // was originally locked by the other process (e.g., it has
+ // been upgraded since). It's also possible that if and when
+ // we interrupt and lock this machine, it will be a
+ // different machine (e.g., it has been upgraded since we
+ // read this machine manifest). To deal with all of that we
+ // will be reloading this information if/when we acquire the
+ // lock to this machine.
+ //
+ if (sp.empty ())
+ {
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
- bool te (dir_exists (tp));
+ l3 ([&]{trace << "keeping " << md << ": locked by " << ml.pid
+ << " with priority " << *ml.prio;});
- // If the resolution fails, then this means there is no current
- // machine subvolume (for this bootstrap protocol). In this case
- // we clean up our toolchain subvolume (-<toolchain>, if any) and
- // ignore this machine.
- //
- if (sp.empty ())
- {
- if (te)
- delete_bootstrapped ();
+ mm = parse_manifest<machine_manifest> (
+ sp / "manifest", "machine");
- l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
- break;
- }
+ none =
+ none && mm.effective_role () == machine_role::auxiliary;
+ }
+ else // Bootstrapping/suspended.
+ {
+ l3 ([&]{trace << "keeping " << md << ": being bootstrapped "
+ << "or suspened by " << ml.pid;});
- // <name>-<toolchain>-<xxx>
- //
- dir_path xp (snapshot_path (tp));
+ // Assume it is a build machine (we cannot determine whether
+ // it is build or auxiliary without loading its manifest).
+ //
+ none = false;
+ }
- if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
- {
- if (retry >= 10)
- fail << "unable to snapshot subvolume " << sp;
+ // Add the machine to the lists and bail out.
+ //
+ r.push_back (bootstrapped_machine {
+ move (ml),
+ move (tp),
+ bootstrapped_machine_manifest {move (mm), {}, {}}});
- continue;
- }
+ break;
+ }
- // Load the (original) machine manifest.
- //
- machine_manifest mm (
- parse_manifest<machine_manifest> (sp / "manifest", "machine"));
+ bool te (dir_exists (tp));
- bool aux (mm.effective_role () == machine_role::auxiliary);
+ // If the resolution fails, then this means there is no current
+ // machine subvolume (for this bootstrap protocol). In this case
+ // we clean up our toolchain subvolume (-<toolchain>, if any)
+ // and ignore this machine.
+ //
+ if (sp.empty ())
+ {
+ if (te)
+ delete_bootstrapped ();
- // Skip machines for which we don't have sufficient RAM.
- //
- if (effective_ram_minimum (mm) >
- (aux ? ops.auxiliary_ram () : ops.build_ram ()))
- {
- l3 ([&]{trace << "skipping " << md << ": insufficient RAM";});
- run_btrfs (trace, "subvolume", "delete", xp);
- break;
- }
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
- none = none && aux;
+ // <name>-<toolchain>-<xxx>
+ //
+ dir_path xp (snapshot_path (tp));
- // If we already have <name>-<toolchain>, see if it needs to be
- // re-bootstrapped. Things that render it obsolete:
- //
- // 1. New machine revision (compare machine ids).
- // 2. New toolchain (compare toolchain ids, not auxiliary).
- // 3. New bbot/libbbot (compare versions, not auxiliary).
- //
- // The last case has a complication: what should we do if we have
- // bootstrapped a newer version of bbot? This would mean that we
- // are about to be stopped and upgraded (and the upgraded version
- // will probably be able to use the result). So we simply ignore
- // this machine for this run.
- //
- // Note: see similar code in the machine interruption logic.
- //
- optional<bootstrapped_machine_manifest> bmm;
- if (te)
- {
- bmm = parse_manifest<bootstrapped_machine_manifest> (
- tp / "manifest", "bootstrapped machine");
+ if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
+ {
+ if (retry >= 10)
+ fail << "unable to snapshot subvolume " << sp;
+
+ continue;
+ }
+
+ // Load the (original) machine manifest.
+ //
+ machine_manifest mm (
+ parse_manifest<machine_manifest> (sp / "manifest", "machine"));
+
+ bool aux (mm.effective_role () == machine_role::auxiliary);
- if (bmm->machine.id != mm.id)
+ // Skip machines for which we don't have sufficient RAM.
+ //
+ if (effective_ram_minimum (mm) >
+ (aux ? ops.auxiliary_ram () : ops.build_ram ()))
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
- te = false;
+ l3 ([&]{trace << "skipping " << md << ": insufficient RAM";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
}
- if (!aux)
+ none = none && aux;
+
+ // If we already have <name>-<toolchain>, see if it needs to be
+ // re-bootstrapped. Things that render it obsolete:
+ //
+ // 1. New machine revision (compare machine ids).
+ // 2. New toolchain (compare toolchain ids, not auxiliary).
+ // 3. New bbot/libbbot (compare versions, not auxiliary).
+ //
+ // The last case has a complication: what should we do if we
+ // have bootstrapped a newer version of bbot? This would mean
+ // that we are about to be stopped and upgraded (and the
+ // upgraded version will probably be able to use the result). So
+ // we simply ignore this machine for this run.
+ //
+ // Note: see similar code in the machine interruption logic.
+ //
+ optional<bootstrapped_machine_manifest> bmm;
+ if (te)
{
- if (!tc_id.empty () && bmm->toolchain.id != tc_id)
+ bmm = parse_manifest<bootstrapped_machine_manifest> (
+ tp / "manifest", "bootstrapped machine");
+
+ if (bmm->machine.id != mm.id)
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
te = false;
}
- if (int i = compare_bbot (bmm->bootstrap))
+ if (!aux)
{
- if (i < 0)
+ if (!tc_id.empty () && bmm->toolchain.id != tc_id)
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
te = false;
}
- else
+
+ if (int i = compare_bbot (bmm->bootstrap))
{
- l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
- run_btrfs (trace, "subvolume", "delete", xp);
- break;
+ if (i < 0)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ te = false;
+ }
+ else
+ {
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
+ }
}
}
+
+ if (!te)
+ delete_bootstrapped ();
}
+ else
+ l3 ([&]{trace << "bootstrap " << tp;});
if (!te)
- delete_bootstrapped ();
- }
- else
- l3 ([&]{trace << "bootstrap " << tp;});
-
- if (!te)
- {
- // Ignore any other machines that need bootstrapping.
- //
- if (!pboot)
{
- pboot = pending_bootstrap {
- move (ml), move (tp), move (xp), move (mm), move (bmm)};
+ // Ignore any other machines that need bootstrapping.
+ //
+ if (!pboot)
+ {
+ pboot = pending_bootstrap {
+ move (ml), move (tp), move (xp), move (mm), move (bmm)};
+ }
+ else
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ break;
}
else
run_btrfs (trace, "subvolume", "delete", xp);
+ // Add the machine to the lists.
+ //
+ r.push_back (
+ bootstrapped_machine {move (ml), move (tp), move (*bmm)});
+
break;
- }
- else
- run_btrfs (trace, "subvolume", "delete", xp);
+ } // Retry loop.
+ } // Inner dir_iterator loop.
+ }
+ catch (const system_error& e)
+ {
+ // Once in a while we get ENOENT while iterating over the machines
+ // volume directory. This directory contains the machine directories
+ // (not btrfs subvolumes) and is not being changed when we get this
+ // error. Maybe this is due to directory sizes/timestamps changes,
+ // but then we would expect to get this error a lot more often..? So
+ // this feels like a btrfs bug which we are going to retry a few
+ // times. See GH issue #349 for additional information.
+ //
+ dir_iter_retry = (dir_iter_retries++ != 3);
- // Add the machine to the lists.
- //
- r.push_back (
- bootstrapped_machine {move (ml), move (tp), move (*bmm)});
+ (dir_iter_retry
+ ? warn
+ : error) << "unable to iterate over " << vd << ": " << e;
+ if (dir_iter_retry)
break;
- } // Retry loop.
- } // Inner dir_iterator loop.
- }
- catch (const system_error& e)
- {
- fail << "unable to iterate over " << vd << ": " << e;
- }
- } // Outer dir_iterator loop.
+ else
+ throw failed ();
+ }
+ } // Outer dir_iterator loop.
+
+ if (dir_iter_retry)
+ continue; // Re-enumerate from scratch.
+ else
+ dir_iter_retries = 0; // Reset for re-enumeration due to other reasons.
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << machines << ": " << e;
+ }
// See if there is a pending bootstrap and whether we can perform it.
//
@@ -1533,10 +1570,6 @@ try
// Unreachable.
}
-catch (const system_error& e)
-{
- fail << "unable to iterate over " << machines << ": " << e << endf;
-}
// Perform the build task throwing interrupt if it has been interrupted.
//
diff --git a/bbot/agent/http-service.cxx b/bbot/agent/http-service.cxx
index 1921edc..3214885 100644
--- a/bbot/agent/http-service.cxx
+++ b/bbot/agent/http-service.cxx
@@ -4,6 +4,7 @@
#include <bbot/agent/http-service.hxx>
#include <libbutl/curl.hxx>
+#include <libbutl/semantic-version.hxx>
#include <bbot/diagnostics.hxx>
@@ -14,6 +15,8 @@ namespace bbot
{
namespace http_service
{
+ static optional<semantic_version> curl_version;
+
result
post (const agent_options& o, const string& u, const parameters& params)
{
@@ -77,6 +80,17 @@ namespace bbot
p.name + '=' + p.value);
}
+ // Query the curl's version, if not done yet. If something goes wrong,
+ // set the version to 0.0.0 so that we treat it as a really old curl.
+ //
+ if (!curl_version)
+ {
+ if (optional<semantic_version> v = curl::version (path ("curl")))
+ curl_version = move (*v);
+ else
+ curl_version = semantic_version {0, 0, 0};
+ }
+
// Note that we prefer the low-level process API for running curl over
// using butl::curl because in this context it is restrictive and
// inconvenient.
@@ -110,6 +124,23 @@ namespace bbot
//
"--include",
+ // Note that in the presence of the
+ // --include|-i option, the output may
+ // include the CONNECT request response
+ // headers if curl tunnels through a
+ // proxy. To suppress these headers we
+ // also add the
+ // --suppress-connect-headers option for
+ // the curl versions 7.54.0 (when the
+ // option was invented) and above. For
+ // the earlier versions we just don't
+ // support the tunneling.
+ //
+ (*curl_version >=
+ semantic_version {7, 54, 0}
+ ? "--suppress-connect-headers"
+ : nullptr),
+
"--max-time", o.request_timeout (),
"--connect-timeout", o.connect_timeout (),
fos,
diff --git a/bbot/machine-manifest.hxx b/bbot/machine-manifest.hxx
index d500957..b488425 100644
--- a/bbot/machine-manifest.hxx
+++ b/bbot/machine-manifest.hxx
@@ -40,26 +40,26 @@ namespace bbot
strings
unquoted_options () const; // Return empty if absent.
- machine_manifest (std::string i,
- std::string n,
- std::string s,
+ machine_manifest (string i,
+ string n,
+ string s,
machine_type t,
optional<string> m,
optional<strings> o,
- strings c)
- : machine_header_manifest (std::move (i),
- std::move (n),
- std::move (s),
- //
- // @@ TMP AUXILIARY
- //
- nullopt /* role */,
- nullopt /* ram_minimum */,
- nullopt /* ram_maximum */),
+ strings c,
+ optional<machine_role> r,
+ optional<uint64_t> rmn,
+ optional<uint64_t> rmx)
+ : machine_header_manifest (move (i),
+ move (n),
+ move (s),
+ r,
+ rmn,
+ rmx),
type (t),
- mac (std::move (m)),
- options (std::move (o)),
- changes (std::move (c)) {}
+ mac (move (m)),
+ options (move (o)),
+ changes (move (c)) {}
public:
machine_manifest () = default;
diff --git a/bbot/machine-manifest.test.testscript b/bbot/machine-manifest.test.testscript
index e358ff3..f6a2eae 100644
--- a/bbot/machine-manifest.test.testscript
+++ b/bbot/machine-manifest.test.testscript
@@ -19,7 +19,8 @@
type: kvm
mac: de:ad:be:ef:de:ad
options: -device "virtio-scsi-pci,id=scsi" -device "scsi-hd,drive=disk0"
- changes:\
+ changes:
+ \
0.7.0
- mac is changed to de:ad:be:ef:de:ad
- increased disk size to 30GB
@@ -34,7 +35,8 @@
name: windows_10-msvc_14
summary: Windows 10 build 1607 with VC 14 update 3
type: kvm
- options:\
+ options:
+ \
-device "virtio-scsi-pci,id=scsi"
-device "scsi-hd,drive=disk0"
\
@@ -331,7 +333,8 @@
ram-minimum: 1048576
type: kvm
mac: e6:38:72:53:61:ae
- changes:\
+ changes:
+ \
1.0
- clone off linux_debian_12-small-1.0
- postgresql-15 15.6.0+deb12u1
diff --git a/bbot/worker/worker.cxx b/bbot/worker/worker.cxx
index 8fb7796..b019337 100644
--- a/bbot/worker/worker.cxx
+++ b/bbot/worker/worker.cxx
@@ -2402,8 +2402,7 @@ build (size_t argc, const char* argv[])
// Configure.
//
{
- operation_result* pr (&add_result ("configure"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("configure"));
// If we have auxiliary environment, show it in the logs.
//
@@ -2549,7 +2548,7 @@ build (size_t argc, const char* argv[])
}
else
{
- // b create(<dir>) config.config.load=~host
+ // b create(<dir>) config.config.load=~host-no-warnings
//
// Note also that we suppress warnings about unused config.* values.
//
@@ -2559,7 +2558,7 @@ build (size_t argc, const char* argv[])
bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + host_conf.representation () + ",cc)",
- "config.config.load=~host",
+ "config.config.load=~host-no-warnings",
"config.config.persist+='config.*'@unused=drop");
if (!r.status)
@@ -2592,9 +2591,14 @@ build (size_t argc, const char* argv[])
// Create the module configuration.
//
{
- // b create(<dir>) config.config.load=~build2 [<env-config-args>
- // <tgt-config-args>
- // <pkg-config-args>]
+ // b create(<dir>) config.config.load=~build2[-no-warnings]
+ // [<env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>]
+ //
+ // If the main package is not a build system module or the
+ // configuration is not self-hosted, then we load the
+ // ~build2-no-warnings configuration rather than ~build2.
//
// Note also that we suppress warnings about unused config.* values.
//
@@ -2634,7 +2638,9 @@ build (size_t argc, const char* argv[])
bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + module_conf.representation () + ',' + mods + ')',
- "config.config.load=~build2",
+ (module_pkg && selfhost
+ ? "config.config.load=~build2"
+ : "config.config.load=~build2-no-warnings"),
"config.config.persist+='config.*'@unused=drop",
eas,
cas,
@@ -2675,9 +2681,9 @@ build (size_t argc, const char* argv[])
mods += m;
}
- // b create(<dir>) config.config.load=~build2 [<env-config-args>
- // <tgt-config-args>
- // <pkg-config-args>]
+ // b create(<dir>) config.config.load=~build2 <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
//
r.status |= run_b (
b,
@@ -3551,8 +3557,7 @@ build (size_t argc, const char* argv[])
// Update the main package.
//
{
- operation_result* pr (&add_result ("update"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("update"));
change_wd (trace, &r.log, rwd / main_pkg_conf);
@@ -3759,8 +3764,7 @@ build (size_t argc, const char* argv[])
if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
{
- operation_result* pr (&add_result ("test"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("test"));
// Run internal tests.
//
@@ -3887,8 +3891,7 @@ build (size_t argc, const char* argv[])
//
if (install_root)
{
- operation_result* pr (&add_result ("install"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("install"));
change_wd (trace, &r.log, effective_install_conf);
@@ -4022,8 +4025,7 @@ build (size_t argc, const char* argv[])
if (bindist)
{
- operation_result* pr (&add_result ("bindist"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("bindist"));
// Fail if the breakpoint refers to a bpkg.bindist.* step but this
// step differs from the enabled one.
@@ -4335,8 +4337,7 @@ build (size_t argc, const char* argv[])
//
if (sys_install)
{
- operation_result* pr (&add_result ("sys-install"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("sys-install"));
// Fail if the breakpoint refers to the bbot.sys-install step since
// it has no specific command associated.
@@ -4660,8 +4661,7 @@ build (size_t argc, const char* argv[])
if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
{
- operation_result* pr (&add_result ("test-installed"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("test-installed"));
change_wd (trace, &r.log, rwd);
@@ -4972,7 +4972,7 @@ build (size_t argc, const char* argv[])
//
if (create_module)
{
- // b create(<dir>) config.config.load=~build2
+ // b create(<dir>) config.config.load=~build2-no-warnings
//
step_id b (step_id::bpkg_test_separate_installed_create);
@@ -4982,7 +4982,7 @@ build (size_t argc, const char* argv[])
bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + module_conf.representation () + ",cc)",
- "config.config.load=~build2",
+ "config.config.load=~build2-no-warnings",
"config.config.persist+='config.*'@unused=drop");
if (!r.status)
@@ -5456,8 +5456,7 @@ build (size_t argc, const char* argv[])
(*bindist == step_id::bpkg_bindist_debian ||
*bindist == step_id::bpkg_bindist_fedora))
{
- operation_result* pr (&add_result ("sys-uninstall"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("sys-uninstall"));
// Noop, just for the log record.
//
@@ -5578,8 +5577,7 @@ build (size_t argc, const char* argv[])
//
if (install_root)
{
- operation_result* pr (&add_result ("uninstall"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("uninstall"));
change_wd (trace, &r.log, effective_install_conf);
@@ -5637,8 +5635,7 @@ build (size_t argc, const char* argv[])
if (bindist_upload)
{
- operation_result* pr (&add_result ("upload"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("upload"));
change_wd (trace, &r.log, rwd);
@@ -5851,8 +5848,6 @@ build (size_t argc, const char* argv[])
// artifacts preparation for upload, then use this log to report the
// error. Otherwise, add the new log for that.
//
- // @@ TMP: Apple Clang 14.0.3 ICE
- //
operation_result* pr (&rm.results.back ());
if (pr->operation != "upload")
diff --git a/doc/cli.sh b/doc/cli.sh
index ae36428..0d76c29 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.17.0-a.0.z
+version=0.18.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
diff --git a/doc/manual.cli b/doc/manual.cli
index 2fa3248..64f2179 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -1505,7 +1505,7 @@ Worker script for \c{host} packages:
{
# [bpkg.create]
#
- b -V create(<host-conf>, cc) config.config.load=~host
+ b -V create(<host-conf>, cc) config.config.load=~host-no-warnings
bpkg -v create --existing --type host -d <host-conf>
}
@@ -1549,7 +1549,7 @@ bpkg -v fetch -d <host-conf> --trust <repository-fp>
# [bpkg.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) config.config.load=~build2-no-warnings
bpkg -v create --existing --type build2 -d <module-conf>
# [bpkg.link]
@@ -1801,7 +1801,9 @@ bpkg -v update -d <host-conf> <package-name>
# [bpkg.test-separate-installed.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) \\
+ config.config.load=~build2-no-warnings
+
bpkg -v create --existing --type build2 -d <module-conf>
# [bpkg.test-separate-installed.link]
@@ -1934,7 +1936,7 @@ Worker script for \c{module} packages:
{
# [bpkg.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) config.config.load=~build2-no-warnings
bpkg -v create --existing --type build2 -d <module-conf>
}
@@ -1976,7 +1978,7 @@ bpkg -v fetch -d <module-conf> --trust <repository-fp>
# [bpkg.create]
#
- b -V create(<host-conf>, cc) config.config.load=~host
+ b -V create(<host-conf>, cc) config.config.load=~host-no-warnings
bpkg -v create --existing --type host -d <host-conf>
# [bpkg.link]
@@ -2146,7 +2148,9 @@ bpkg -v update -d <module-conf> <package-name>
{
# [bpkg.test-separate-installed.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) \\
+ config.config.load=~build2-no-warnings
+
bpkg -v create --existing --type build2 -d <module-conf>
# bpkg.test-separate-installed.create (
diff --git a/manifest b/manifest
index 5fcb879..73bca1e 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: bbot
-version: 0.17.0-a.0.z
+version: 0.18.0-a.0.z
project: build2
summary: build2 build bot
license: MIT
@@ -15,9 +15,9 @@ email: users@build2.org
build-warning-email: builds@build2.org
builds: all : &host
requires: c++14
-depends: * build2 >= 0.16.0-
-depends: * bpkg >= 0.16.0-
+depends: * build2 >= 0.16.0
+depends: * bpkg >= 0.16.0
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
-depends: libbbot [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbutl [0.18.0-a.0.1 0.18.0-a.1)
+depends: libbbot [0.18.0-a.0.1 0.18.0-a.1)
diff --git a/repositories.manifest b/repositories.manifest
index b10bd68..3a28818 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -3,8 +3,8 @@ summary: build2 build bot repository
:
role: prerequisite
-location: ../libbutl.git##HEAD
+location: ../libbutl.git#HEAD
:
role: prerequisite
-location: ../libbbot.git##HEAD
+location: ../libbbot.git#HEAD
diff --git a/tests/integration/testscript b/tests/integration/testscript
index 988859f..2dcd849 100644
--- a/tests/integration/testscript
+++ b/tests/integration/testscript
@@ -64,7 +64,7 @@ b.test-installed.configure:\"config.cc.loptions=-L'$~/install/lib'\" \
bpkg.test-separate-installed.create:\"config.cc.loptions=-L'$~/install/lib'\""
pkg = libhello
-ver = 1.0.0+11
+ver = 1.0.0+12
#rep_url = "https://git.build2.org/hello/libhello.git#1.0"
#rep_type = git
rep_url = https://stage.build2.org/1
@@ -153,14 +153,14 @@ rfp = yes
#
#\
pkg = libbuild2-hello
-ver = 0.1.0
+ver = 0.2.0
rep_url = "https://github.com/build2/libbuild2-hello.git#master"
rep_type = git
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
-tests="tests: * libbuild2-hello-tests == $ver"
-host='host: true'
+tests = "tests: * libbuild2-hello-tests == $ver"
+host = 'host: true'
#\
#package_config = 'package-config: -bpkg.install:'
#\
@@ -175,7 +175,7 @@ bpkg.module.create:config.bin.rpath=[null]
#
#\
pkg = libbuild2-kconfig
-ver = 0.3.0-a.0.20221118053819.f702eb65da87
+ver = 0.3.0
rep_url = "https://github.com/build2/libbuild2-kconfig.git#master"
rep_type = git
#ver = 0.1.0-a.0.20200910053253.a71aa3f3938b
@@ -409,7 +409,7 @@ rfp = yes
#\
pkg = libodb-sqlite
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=sqlite'
@@ -424,7 +424,7 @@ package_config = 'package-config:
#\
pkg = libodb-pgsql
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=pgsql'
@@ -439,7 +439,7 @@ package_config = 'package-config:
#\
pkg = odb-tests
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
#\
@@ -453,7 +453,7 @@ config.odb_tests.database="sqlite pgsql"
#\
pkg = libodb-oracle
ver = 2.5.0-b.26.20240201133448.3fa01c83a095
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
package_config = 'package-config:
@@ -465,7 +465,7 @@ config.cc.poptions+=-I/usr/include/oracle/12.2/client64 config.cc.loptions+=-L/u
#\
pkg = libodb-qt
ver = 2.5.0-b.26.20240201180613.633ad7ccad39
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
#\