// file : bbot/worker.cxx -*- C++ -*- // license : TBC; see accompanying LICENSE file #ifndef _WIN32 # include // signal() #else # include #endif #include #include #include // strchr() #include #include #include // find(), find_if(), remove_if() #include #include #include #include // to_utf8() #include #include #include #include #include #include #include #include #include using namespace butl; using namespace bbot; using std::cout; using std::endl; namespace bbot { int main (int argc, char* argv[]); static int build (size_t argc, const char* argv[]); process_path argv0; worker_options ops; dir_path env_dir; // Note that upload can be quite large and take a while to upload under high // load. // const size_t tftp_blksize (1468); // Between 512 (default) and 65464. const size_t tftp_put_timeout (3600); // 1 hour (also the default). const size_t tftp_get_timeout (10); // 10 seconds. const size_t tftp_get_retries (3); // Task request retries (see startup()). } bool exists (const dir_path& d) try { return dir_exists (d); } catch (const system_error& e) { fail << "unable to stat path " << d << ": " << e << endf; } static dir_path current_directory () try { return dir_path::current_directory (); } catch (const system_error& e) { fail << "unable to obtain current directory: " << e << endf; } static dir_path change_wd (tracer& t, string* log, const dir_path& d, bool create = false) try { if (create) { if (verb >= 3) t << "mkdir -p " << d; if (log != nullptr) *log += "mkdir -p " + d.representation () + '\n'; try_mkdir_p (d); } dir_path r (current_directory ()); if (verb >= 3) t << "cd " << d; if (log != nullptr) *log += "cd " + d.representation () + '\n'; dir_path::current_directory (d); return r; } catch (const system_error& e) { fail << "unable to change current directory to " << d << ": " << e << endf; } static void mv (tracer& t, string* log, const dir_path& from, const dir_path& to) try { if (verb >= 3) t << "mv " << from << ' ' << to; if (log != nullptr) *log += "mv " + from.representation () + ' ' + to.representation () + "\n"; mvdir (from, to); } catch (const system_error& e) { fail << "unable to move directory '" << from << "' to '" << to << "': " << e << endf; } static void rm_r (tracer& t, string* log, const dir_path& d) try { if (verb >= 3) t << "rm -r " << d; if (log != nullptr) *log += "rm -r " + d.representation () + '\n'; rmdir_r (d); } catch (const system_error& e) { fail << "unable to remove directory " << d << ": " << e << endf; } // Step IDs. // enum class step_id { bpkg_module_create, bpkg_module_configure_add, bpkg_module_configure_fetch, bpkg_module_configure_build, bpkg_module_update, bpkg_module_test, bpkg_create, bpkg_configure_add, bpkg_configure_fetch, bpkg_configure_build, bpkg_update, bpkg_test, bpkg_test_separate_configure_build, bpkg_test_separate_update, bpkg_test_separate_test, bpkg_install, b_test_installed_create, b_test_installed_configure, b_test_installed_test, bpkg_test_installed_create, bpkg_test_installed_configure_add, bpkg_test_installed_configure_fetch, bpkg_test_separate_installed_configure_build, bpkg_test_separate_installed_update, bpkg_test_separate_installed_test, bpkg_uninstall, end }; static const strings step_id_str { "bpkg.module.create", "bpkg.module.configure.add", "bpkg.module.configure.fetch", "bpkg.module.configure.build", "bpkg.module.update", "bpkg.module.test", "bpkg.create", "bpkg.configure.add", "bpkg.configure.fetch", "bpkg.configure.build", "bpkg.update", "bpkg.test", "bpkg.test-separate.configure.build", "bpkg.test-separate.update", "bpkg.test-separate.test", "bpkg.install", "b.test-installed.create", "b.test-installed.configure", "b.test-installed.test", "bpkg.test-installed.create", "bpkg.test-installed.configure.add", "bpkg.test-installed.configure.fetch", "bpkg.test-separate-installed.configure.build", "bpkg.test-separate-installed.update", "bpkg.test-separate-installed.test", "bpkg.uninstall", "end"}; using std::regex; namespace regex_constants = std::regex_constants; using regexes = vector; // Run the worker script command. Name is used for logging and diagnostics // only. Match lines read from the command's stderr against the regular // expressions and return the warning result status (instead of success) in // case of a match. Save the executed command into last_cmd. // // If bkp_step is present and is equal to the command step, then prior to // running this command ask the user if to continue or abort the task // execution. If bkp_status is present, then ask for that if the command // execution results with the specified or more critical status. // // For the special end step no command is executed. In this case only the user // is potentially prompted and the step is traced/logged. // template static result_status run_cmd (step_id step, tracer& t, string& log, const regexes& warn_detect, const string& name, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const process_env& pe, A&&... a) { // UTF-8-sanitize and log the diagnostics. Also print the raw diagnostics // to stderr at verbosity level 3 or higher. // auto add = [&log, &t] (string&& s, bool trace = true) { if (verb >= 3) { if (trace) t << s; else text << s; } to_utf8 (s, '?', codepoint_types::graphic, U"\n\r\t"); log += s; log += '\n'; }; string next_cmd; // Prompt the user if to continue the task execution and, if they refuse, // log this and throw abort. // struct abort {}; auto prompt = [&last_cmd, &next_cmd, &add] (const string& what) { diag_record dr (text); dr << '\n' << what << '\n' << " current dir: " << current_directory () << '\n' << " environment: " << ops.env_script () << ' ' << ops.env_target (); if (!last_cmd.empty ()) dr << '\n' << " last command: " << last_cmd; if (!next_cmd.empty ()) dr << '\n' << " next command: " << next_cmd; dr.flush (); if (!yn_prompt ( "continue execution (or you may shutdown the machine)? [y/n]")) { add ("execution aborted by interactive user"); throw abort (); } }; auto prompt_step = [step, &t, &log, &bkp_step, &prompt] () { const string& sid (step_id_str[static_cast (step)]); // Prompt the user if the breakpoint is reached. // if (bkp_step && *bkp_step == step) prompt (sid + " step reached"); string ts (to_string (system_clock::now (), "%Y-%m-%d %H:%M:%S %Z", true /* special */, true /* local */)); // Log the step id and the command to be executed. // l3 ([&]{t << "step id: " << sid << ' ' << ts;}); #ifndef _WIN32 log += "# step id: "; #else log += "rem step id: "; #endif log += sid; log += ' '; log += ts; log += '\n'; }; try { // Trace, log, and save the command line. // auto cmdc = [&t, &log, &next_cmd, &prompt_step] (const char* c[], size_t n) { std::ostringstream os; process::print (os, c, n); next_cmd = os.str (); prompt_step (); t (c, n); log += next_cmd; log += '\n'; }; result_status r (result_status::success); if (step != step_id::end) { try { fdpipe pipe (fdopen_pipe ()); // Text mode seems appropriate. process pr ( process_start_callback (cmdc, fdopen_null (), // Never reads from stdin. 2, // 1>&2 pipe, pe, forward (a)...)); pipe.out.close (); { // Skip on exception. // ifdstream is (move (pipe.in), fdstream_mode::skip); for (string l; is.peek () != ifdstream::traits_type::eof (); ) { getline (is, l); // Match the log line with the warning-detecting regular // expressions until the first match. // if (r != result_status::warning) { for (const regex& re: warn_detect) { // Only examine the first 512 bytes. Long lines (e.g., linker // command lines) could trigger implementation-specific // limitations (like stack overflow). Plus, it is a // performance concern. // if (regex_search (l.begin (), (l.size () < 512 ? l.end () : l.begin () + 512), re)) { r = result_status::warning; break; } } } add (move (l), false /* trace */); } } if (!pr.wait ()) { const process_exit& e (*pr.exit); add (name + " " + to_string (e)); r = e.normal () ? result_status::error : result_status::abnormal; } last_cmd = move (next_cmd); if (bkp_status && r >= *bkp_status) { next_cmd.clear (); // Note: used by prompt(). prompt (!r ? "error occured" : "warning is issued"); } } catch (const process_error& e) { fail << "unable to execute " << name << ": " << e; } catch (const io_error& e) { fail << "unable to read " << name << " diagnostics: " << e; } } else { next_cmd.clear (); // Note: used by prompt_step(). prompt_step (); } return r; } catch (const abort&) { return result_status::abort; } } template static result_status run_bpkg (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { return run_cmd (step, t, log, warn_detect, "bpkg " + cmd, bkp_step, bkp_status, last_cmd, process_env ("bpkg", envvars), verbosity, cmd, forward (a)...); } template static result_status run_bpkg (step_id step, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { const char* const* envvars (nullptr); return run_bpkg (step, envvars, t, log, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, cmd, forward (a)...); } template static result_status run_b (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const strings& buildspecs, A&&... a) { string name ("b"); for (const string& s: buildspecs) { if (!name.empty ()) name += ' '; name += s; } return run_cmd (step, t, log, warn_detect, name, bkp_step, bkp_status, last_cmd, process_env ("b", envvars), verbosity, buildspecs, forward (a)...); } template static result_status run_b (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& buildspec, A&&... a) { return run_cmd (step, t, log, warn_detect, "b " + buildspec, bkp_step, bkp_status, last_cmd, process_env ("b", envvars), verbosity, buildspec, forward (a)...); } template static result_status run_b (step_id step, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& buildspec, A&&... a) { const char* const* envvars (nullptr); return run_b (step, envvars, t, log, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, buildspec, forward (a)...); } // Upload compressed manifest to the specified TFTP URL with curl. Issue // diagnostics and throw failed on invalid manifest or process management // errors and throw io_error for input/output errors or non-zero curl exit. // template static void upload_manifest (tracer& trace, const string& url, const T& m, const string& what) { try { // Piping the data directly into curl's stdin sometimes results in the // broken pipe error on the client and partial/truncated upload on the // server. This happens quite regularly on older Linux distributions // (e.g., Debian 8, Ubuntu 16.04) but also sometimes on Windows. On the // other hand, uploading from a file appears to work reliably (we still // get an odd error on Windows from time to time with larger uploads). // #if 0 // Note: need to add compression support if re-enable this. tftp_curl c (trace, path ("-"), nullfd, curl::put, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_put_timeout); manifest_serializer s (c.out, url); m.serialize (s); c.out.close (); #else auto_rmfile tmp; try { tmp = auto_rmfile (path::temp_path (what + "-manifest.lz4")); ofdstream ofs (tmp.path); olz4stream ozs (ofs, 9, 5 /* 256KB */, nullopt /* content_size */); manifest_serializer s (ozs, tmp.path.string ()); m.serialize (s); ozs.close (); ofs.close (); } catch (const io_error& e) // In case not derived from system_error. { fail << "unable to save " << what << " manifest: " << e; } catch (const system_error& e) { fail << "unable to save " << what << " manifest: " << e; } tftp_curl c (trace, tmp.path, nullfd, curl::put, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_put_timeout); #endif if (!c.wait ()) throw_generic_ios_failure (EIO, "non-zero curl exit code"); } catch (const manifest_serialization& e) { fail << "invalid " << what << " manifest: " << e.description; } catch (const process_error& e) { fail << "unable to execute curl: " << e; } catch (const system_error& e) { const auto& c (e.code ()); if (c.category () == generic_category ()) throw_generic_ios_failure (c.value (), e.what ()); else throw_system_ios_failure (c.value (), e.what ()); } } static int bbot:: build (size_t argc, const char* argv[]) { using namespace bpkg; using string_parser::unquote; tracer trace ("build"); // Our overall plan is as follows: // // 1. Parse the task manifest (it should be in CWD). // // 2. Run bpkg to create the configuration, add the repository, and // configure, build, test, optionally install, test installed and // uninstall the package all while saving the logs in the result manifest. // // 3. Upload the result manifest. // // Note also that we are being "watched" by the startup version of us which // will upload an appropriate result in case we exit with an error. So here // for abnormal situations (like a failure to parse the manifest), we just // fail. // task_manifest tm ( parse_manifest (path ("task.manifest"), "task")); result_manifest rm { tm.name, tm.version, result_status::success, operation_results {} }; // Reserve storage large enough to hold all the potential operation results // without reallocations. Note that this is not an optimization but is // required to make sure that element references are not invalidated when // new results are added. // size_t max_results (6); rm.results.reserve (max_results); auto add_result = [&rm, max_results] (string o) -> operation_result& { assert (rm.results.size () < max_results); rm.results.push_back ( operation_result {move (o), result_status::success, ""}); return rm.results.back (); }; // Note that we don't consider the build system module configuring and // testing during the "pre-step" as separate operations and share the // operation logs with the "main" configure and test steps (see below). // Thus, we save pointers to the added result objects for the subsequent // use. // operation_result* configure_result (nullptr); operation_result* test_result (nullptr); dir_path rwd; // Root working directory. // Resolve the breakpoint specified by the interactive manifest value into // the step id or the result status breakpoint. If the breakpoint is // invalid, then log the error and abort the build. Note that we reuse the // configure operation log here not to complicate things. // optional bkp_step; optional bkp_status; string last_cmd; // Used in the user prompt. for (;;) // The "breakout" loop. { // Regular expressions that detect different forms of build2 toolchain // warnings. Accidently (or not), they also cover GCC and Clang warnings // (for the English locale). // // The expressions will be matched multiple times, so let's make the // matching faster, with the potential cost of making regular expressions // creation slower. // regex::flag_type f (regex_constants::optimize); // ECMAScript is implied. regexes wre { regex ("^warning: ", f), regex ("^.+: warning: ", f)}; for (const string& re: tm.unquoted_warning_regex ()) wre.emplace_back (re, f); if (tm.interactive) { const string& b (*tm.interactive); if (b == "error") bkp_status = result_status::error; else if (b == "warning") bkp_status = result_status::warning; else { for (size_t i (0); i < step_id_str.size (); ++i) { if (b == step_id_str[i]) { bkp_step = static_cast (i); break; } } } if (!bkp_step && !bkp_status) { string e ("invalid interactive build breakpoint '" + b + "'"); l3 ([&]{trace << e;}); operation_result& r (add_result ("configure")); r.log = "error: " + e + '\n'; r.status = result_status::abort; break; } } // Split the argument into prefix (empty if not present) and unquoted // value. Return nullopt if the prefix is invalid. // auto parse_arg = [] (const string& a) -> optional> { size_t p (a.find_first_of (":=\"'")); if (p == string::npos || a[p] != ':') // No prefix. return make_pair (string (), unquote (a)); for (const string& id: step_id_str) { if (a.compare (0, p, id, 0, p) == 0 && (id.size () == p || (id.size () > p && id[p] == '.'))) return make_pair (a.substr (0, p), unquote (a.substr (p + 1))); } return nullopt; // Prefix is invalid. }; // Enter split arguments into a map. Those without a prefix are // entered for the *.create steps. // auto add_arg = [] (std::multimap& args, pair&& a) { if (!a.first.empty ()) args.emplace (move (a)); else { args.emplace ("bpkg.create", a.second); args.emplace ("b.test-installed.create", a.second); args.emplace ("bpkg.test-installed.create", move (a.second)); } }; // Parse configuration arguments. Report failures to the bbot controller. // std::multimap config_args; for (const string& c: tm.config) { optional> v (parse_arg (c)); if (!v) { rm.status |= result_status::abort; l3 ([&]{trace << "invalid configuration argument prefix in " << "'" << c << "'";}); break; } if (v->second[0] != '-' && v->second.find ('=') == string::npos) { rm.status |= result_status::abort; l3 ([&]{trace << "invalid configuration argument '" << c << "'";}); break; } add_arg (config_args, move (*v)); } if (!rm.status) break; // Parse environment arguments. // std::multimap modules; std::multimap env_args; for (size_t i (1); i != argc; ++i) { const char* a (argv[i]); optional> v (parse_arg (a)); if (!v) fail << "invalid environment argument prefix in '" << a << "'"; bool mod (v->second[0] != '-' && v->second.find ('=') == string::npos); if (mod && !v->first.empty () && v->first != "bpkg.create" && v->first != "b.test-installed.create" && v->first != "bpkg.test-installed.create") fail << "invalid module prefix in '" << a << "'"; add_arg (mod ? modules : env_args, move (*v)); } // Return command arguments for the specified step id. Arguments with more // specific prefixes come last. // auto step_args = [] (const std::multimap& args, step_id step, optional fallback = nullopt) -> strings { strings r; const string& sid (step_id_str[static_cast (step)]); // If no arguments found for the step id, then use the fallback step id, // if specified. // const string& s (args.find (sid) == args.end () && fallback ? step_id_str[static_cast (*fallback)] : sid); for (size_t n (0);; ++n) { n = s.find ('.', n); auto range ( args.equal_range (n == string::npos ? s : string (s, 0, n))); for (auto i (range.first); i != range.second; ++i) r.emplace_back (i->second); if (n == string::npos) break; } return r; }; // Search for config.install.root variable. If it is present and has a // non-empty value, then test the package installation and uninstall. Note // that passing [null] value would be meaningless, so we don't recognize // it as a special one. While at it, cache the bpkg.create args for later // use. // dir_path install_root; strings cargs (step_args (config_args, step_id::bpkg_create)); { size_t n (19); auto space = [] (char c) {return c == ' ' || c == '\t';}; for (const string& s: reverse_iterate (cargs)) { if (s.compare (0, n, "config.install.root") == 0 && (s[n] == '=' || space (s[n]))) { while (space (s[n])) ++n; // Skip spaces. if (s[n] == '=') ++n; // Skip the equal sign. while (space (s[n])) ++n; // Skip spaces. // Note that the config.install.root variable value may // potentially be quoted. // install_root = dir_path (unquote (string (s, n, s.size () - n))); break; } } } // bpkg-rep-fetch trust options. // cstrings trust_ops; { const char* t ("--trust-no"); for (const string& fp: tm.trust) { if (fp == "yes") t = "--trust-yes"; else { trust_ops.push_back ("--trust"); trust_ops.push_back (fp.c_str ()); } } trust_ops.push_back (t); } const string& pkg (tm.name.string ()); const version& ver (tm.version); const string repo (tm.repository.string ()); const dir_path pkg_dir (pkg + '-' + ver.string ()); // Specify the revision explicitly for the bpkg-build command not to end // up with a race condition building the latest revision rather than the // zero revision. // const string pkg_rev (pkg + '/' + version (ver.epoch, ver.upstream, ver.release, ver.effective_revision (), ver.iteration).string ()); // Query the project's build system information with `b info`. // auto prj_info = [&trace] (const dir_path& d, bool ext_mods, const char* what) { // Note that the `b info` diagnostics won't be copied into any of the // build logs. This is fine as this is likely to be an infrastructure // problem, given that the project distribution has been successfully // created. It's actually not quite clear which log this diagnostics // could go into. // try { return b_info (d, ext_mods, verb, trace); } catch (const b_error& e) { if (e.normal ()) throw failed (); // Assume the build2 process issued diagnostics. fail << "unable to query " << what << ' ' << d << " info: " << e << endf; } }; b_project_info prj; // Package project information. rwd = current_directory (); // If the package comes from a version control-based repository, then we // will also test its dist meta-operation. Specifically, we will checkout // the package outside the configuration directory passing --checkout-root // to the configure-only pkg-build command, re-distribute the checked out // directory in the load distribution mode, and then use this distribution // as a source to build the package. // dir_path dist_root (rwd / dir_path ("dist")); dir_path dist_src (dist_root / pkg_dir); // Redistribute the package source directory (pkg_dir) checked out into // the directory other than the configuration directory (dist_root) and // replace it with the newly created distribution. Assume that the current // directory is the package configuration directory. Optionally pass the // config.import.* variable override and/or set the environment variables // for the build2 process. Return true if the dist meta-operation // succeeds. // auto redist = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd] (step_id step, operation_result& r, const dir_path& dist_root, const dir_path& pkg_dir, // - const char* import = nullptr, const small_vector& envvars = {}) { // Temporarily change the current directory to the distribution root // parent directory from the configuration directory to shorten the // command line paths and try to avoid the '..' path prefix. // dir_path dp (dist_root.directory ()); dir_path dn (dist_root.leaf ()); // Redistribute the package using the configured output directory. // dir_path cnf_dir (change_wd (trace, &r.log, dp)); dir_path out_dir (cnf_dir.relative (dp) / pkg_dir); dir_path src_dir (dn / pkg_dir); // Create the re-distribution root directory next to the distribution // root. // dir_path redist_root ("re" + dn.string ()); r.status |= run_b ( step, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "config.dist.root=" + redist_root.string (), import, ("dist('" + src_dir.representation () + "'@'" + out_dir.representation () + "')")); if (!r.status) return false; // Replace the package source directory with the re-distribution result. // rm_r (trace, &r.log, src_dir); mv (trace, &r.log, redist_root / pkg_dir, src_dir); change_wd (trace, &r.log, cnf_dir); // Return back to the configuration. return true; }; // The module phase. // // If this is a build system module, perform a "pre-step" by building it // in a separate configuration reproducing the one used to build build2 // itself. Note that the configuration and the environment options and // variables are not passed to commands that may affect this // configuration. // bool module (pkg.compare (0, 10, "libbuild2-") == 0); dir_path module_dir ("build-module"); // If this is a build system module that requires bootstrap, then its // importation into the dependent (test) projects cannot be configured and // the corresponding config.import.* variable needs to be specified on the // bpkg/build2 command line as a global override, whenever required. // // Note that such a module must be explicitly marked with `requires: // bootstrap` in its manifest. This can only be detected after the module // is configured and its manifest available. // bool bootstrap (false); // Note that we will parse the package manifest right after the package is // configured. // package_manifest pm; path mf ("manifest"); if (module) { // Configure. // { operation_result& r (add_result ("configure")); configure_result = &r; // Noop, just for the log record. // change_wd (trace, &r.log, rwd); // b create() config.config.load=~build2 // // [bpkg.module.create] // // Note also that we suppress warnings about unused config.* values, // such CLI configuration. // // What if a module wants to use CLI? The current thinking is that we // will be "whitelisting" base (i.e., those that can plausibly be used // by multiple modules) libraries and tools for use by build system // modules. So if and when we whitelist CLI, we will add it here, next // to cc. // r.status |= run_b ( step_id::bpkg_module_create, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create(" + module_dir.representation () + ",cc)", "config.config.load=~build2", "config.config.persist+='config.*'@unused=drop"); if (!r.status) break; change_wd (trace, &r.log, module_dir); // bpkg create --existing // r.status |= run_bpkg ( step_id::bpkg_module_create, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "create", "--existing"); if (!r.status) break; // bpkg add // // bpkg.module.configure.add (bpkg.configure.add) // r.status |= run_bpkg ( step_id::bpkg_module_configure_add, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", step_args (env_args, step_id::bpkg_module_configure_add, step_id::bpkg_configure_add), step_args (config_args, step_id::bpkg_module_configure_add, step_id::bpkg_configure_add), repo); if (!r.status) break; // bpkg fetch // // bpkg.module.configure.fetch (bpkg.configure.fetch) // r.status |= run_bpkg ( step_id::bpkg_module_configure_fetch, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", step_args (env_args, step_id::bpkg_module_configure_fetch, step_id::bpkg_configure_fetch), step_args (config_args, step_id::bpkg_module_configure_fetch, step_id::bpkg_configure_fetch), trust_ops); if (!r.status) break; // bpkg build --configure-only / // // [bpkg.module.configure.build] // r.status |= run_bpkg ( step_id::bpkg_module_configure_build, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "build", "--configure-only", "--checkout-root", dist_root, "--yes", pkg_rev); if (!r.status) break; rm.status |= r.status; bool dist (exists (dist_src)); const dir_path& src_dir (dist ? dist_src : pkg_dir); // Note that being unable to parse the package manifest is likely to // be an infrastructure problem, given that the package has been // successfully configured. // pm = parse_manifest (src_dir / mf, "package"); bootstrap = find_if (pm.requirements.begin (), pm.requirements.end (), [] (const requirement_alternatives& r) { return r.size () == 1 && r[0] == "bootstrap"; }) != pm.requirements.end (); if (dist) { // Note that we reuse the configure operation log for the dist // meta-operation. // if (!redist (step_id::bpkg_module_configure_build, r, dist_root, pkg_dir)) break; rm.status |= r.status; } } // Update. // { operation_result& r (add_result ("update")); // Noop, just for the log record to reduce the potential confusion for // the combined log reader due to the configure operation log sharing // (see above for details). // change_wd (trace, &r.log, current_directory ()); // bpkg update // // [bpkg.module.update] // r.status |= run_bpkg ( step_id::bpkg_module_update, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "update", pkg); if (!r.status) break; rm.status |= r.status; } // Run the package internal tests if the test operation is supported by // the project. // prj = prj_info (pkg_dir, true /* ext_mods */, "project"); if (find (prj.operations.begin (), prj.operations.end (), "test") != prj.operations.end ()) { operation_result& r (add_result ("test")); test_result = &r; // Use --package-cwd to help ported to build2 third-party packages a // bit (see bpkg-pkg-test(1) for details). // // Note that internal tests that load the module itself don't make // much sense, thus we don't pass the config.import.* variable on // the command line for modules that require bootstrap. // // bpkg test // // [bpkg.module.test] // r.status |= run_bpkg ( step_id::bpkg_module_test, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "test", "--package-cwd", pkg); if (!r.status) break; rm.status |= r.status; } } // The main phase. // // Use the global override for modules that require bootstrap. // string module_import ( module ? ((bootstrap ? "!config.import." : "config.import.") + tm.name.variable () + "=" + (rwd / module_dir).string ()) : ""); // Configure. // dir_path build_dir ("build"); // Configuration directory name. dir_path pkg_config (rwd / (module ? module_dir : build_dir)); { operation_result& r (configure_result != nullptr ? *configure_result : add_result ("configure")); change_wd (trace, &r.log, rwd); // bpkg create // // bpkg.create // { // If the package is a build system module, then make sure it is // importable in this configuration (see above about bootstrap). // r.status |= run_bpkg ( step_id::bpkg_create, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", build_dir.string (), "--wipe", step_args (modules, step_id::bpkg_create), step_args (env_args, step_id::bpkg_create), cargs, module && !bootstrap ? module_import.c_str () : nullptr); if (!r.status) break; } change_wd (trace, &r.log, build_dir); // bpkg add // // bpkg.configure.add // r.status |= run_bpkg ( step_id::bpkg_configure_add, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", step_args (env_args, step_id::bpkg_configure_add), step_args (config_args, step_id::bpkg_configure_add), repo); if (!r.status) break; // bpkg fetch // // bpkg.configure.fetch // r.status |= run_bpkg ( step_id::bpkg_configure_fetch, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", step_args (env_args, step_id::bpkg_configure_fetch), step_args (config_args, step_id::bpkg_configure_fetch), trust_ops); if (!r.status) break; // bpkg build --configure-only // / // // bpkg.configure.build // if (!module) // Note: the module is already built in the pre-step. { r.status |= run_bpkg ( step_id::bpkg_configure_build, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "build", "--configure-only", "--checkout-root", dist_root, "--yes", step_args (env_args, step_id::bpkg_configure_build), step_args (config_args, step_id::bpkg_configure_build), "--", pkg_rev); if (!r.status) break; bool dist (exists (dist_src)); const dir_path& src_dir (dist ? dist_src : pkg_dir); pm = parse_manifest (src_dir / mf, "package"); if (dist) { if (!redist (step_id::bpkg_configure_build, r, dist_root, pkg_dir)) break; rm.status |= r.status; } } rm.status |= r.status; } // Update. // if (!module) // Note: the module is already built in the pre-step. { operation_result& r (add_result ("update")); // bpkg update // // bpkg.update // r.status |= run_bpkg ( step_id::bpkg_update, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "update", step_args (env_args, step_id::bpkg_update), step_args (config_args, step_id::bpkg_update), pkg); if (!r.status) break; rm.status |= r.status; } // Run the package internal tests if the test operation is supported by // the project, except for the build system module which is taken care of // in the pre-step. // bool internal_tests; if (module) { internal_tests = false; } else { prj = prj_info (pkg_dir, true /* ext_mods */, "project"); internal_tests = find (prj.operations.begin (), prj.operations.end (), "test") != prj.operations.end (); } // Run the package external tests, if specified. But first filter them // against the test-exclude task manifest values using the package names. // // Note that a proper implementation should also make sure that the // excluded test package version matches the version that will supposedly // be configured by bpkg and probably abort the build if that's not the // case. Such a mismatch can happen due to some valid reasons (the // repository was updated since the task was issued, etc) and should // probably be followed with automatic rebuild (the flake monitor idea). // Anyway, this all requires additional thinking, so let's keep it simple // for now. // // Filter the external test dependencies in place. // pm.tests.erase ( remove_if (pm.tests.begin (), pm.tests.end (), [&tm] (const test_dependency& td) { return find_if (tm.test_exclusions.begin (), tm.test_exclusions.end (), [&td] (const package& te) { return te.name == td.name; }) != tm.test_exclusions.end (); }), pm.tests.end ()); bool external_tests (!pm.tests.empty ()); // Configure, re-distribute if comes from a version control-based // repository, update, and test packages in the bpkg configuration in the // current working directory. Optionally pass the config.import.* variable // override and/or set the environment variables for bpkg processes. // Return true if all operations for all packages succeed. // // Pass true as the installed argument to use the test separate installed // phase step ids (bpkg.test-separate-installed.*) and the test separate // phase step ids (bpkg.test-separate.*) otherwise. In both cases fall // back to the main phase step ids (bpkg.*) when no environment/ // configuration arguments are specified for them. // // Pass true as the sys_dep argument to configure the dependent package as // a system dependency, which is normally required for testing modules and // installed dependents. Note that bpkg configures the dependent package // as a special dependency for the test package. // auto test = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd, &step_args, &config_args, &env_args, &pm, &redist] (operation_result& r, const dir_path& dist_root, bool installed, bool sys_dep, const char* import = nullptr, const small_vector& envvars = {}) { for (const test_dependency& td: pm.tests) { const string& pkg (td.name.string ()); // Configure. // // bpkg build --configure-only // '[ ]' // // bpkg.test-separate[-installed].configure.build (bpkg.configure.build) // step_id s (installed ? step_id::bpkg_test_separate_installed_configure_build : step_id::bpkg_test_separate_configure_build); r.status |= run_bpkg ( s, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "build", "--configure-only", "--checkout-root", dist_root, "--yes", step_args (env_args, s, step_id::bpkg_configure_build), step_args (config_args, s, step_id::bpkg_configure_build), import, "--", td.string (), sys_dep ? ("?sys:" + pm.name.string ()).c_str () : nullptr); if (!r.status) return false; // Note that re-distributing the test package is a bit tricky since we // don't know its version and so cannot deduce its source directory // name easily. We could potentially run the bpkg-status command after // the package is configured and parse the output to obtain the // version. Let's, however, keep it simple and find the source // directory using the package directory name pattern. // if (exists (dist_root)) try { dir_path pkg_dir; path_search (dir_path (pkg + "-*/"), [&pkg_dir] (path&& pe, const string&, bool interm) { if (!interm) pkg_dir = path_cast (move (pe)); return interm; }, dist_root); if (!pkg_dir.empty () && !redist (s, r, dist_root, pkg_dir, import, envvars)) return false; } catch (const system_error& e) { fail << "unable to scan directory " << dist_root << ": " << e; } // Update. // // bpkg update // // bpkg.test-separate[-installed].update (bpkg.update) // s = installed ? step_id::bpkg_test_separate_installed_update : step_id::bpkg_test_separate_update; r.status |= run_bpkg ( s, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "update", step_args (env_args, s, step_id::bpkg_update), step_args (config_args, s, step_id::bpkg_update), import, pkg); if (!r.status) return false; // Test. // // Note that we assume that the package supports the test operation // since this is its main purpose. // // bpkg test // // bpkg.test-separate[-installed].test (bpkg.test) // s = installed ? step_id::bpkg_test_separate_installed_test : step_id::bpkg_test_separate_test; r.status |= run_bpkg ( s, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "test", "--package-cwd", // See above for details. step_args (env_args, s, step_id::bpkg_test), step_args (config_args, s, step_id::bpkg_test), import, pkg); if (!r.status) return false; } return true; }; if (internal_tests || external_tests) { operation_result& r (test_result != nullptr ? *test_result : add_result ("test")); // Noop, just for the log record to reduce the potential confusion for // the combined log reader due to updating the build system module in a // separate configuration (see above for details). // if (module) change_wd (trace, &r.log, current_directory ()); // Run internal tests. // if (internal_tests) // Note: false for modules (see above). { // bpkg test // // bpkg.test // r.status |= run_bpkg ( step_id::bpkg_test, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "test", "--package-cwd", // See above for details. step_args (env_args, step_id::bpkg_test), step_args (config_args, step_id::bpkg_test), pkg); if (!r.status) break; } // Run external tests. // // Note that we assume that these packages belong to the dependent // package's repository or its complement repositories, recursively. // Thus, we test them in the configuration used to build the dependent // package (except for the build system module). // if (external_tests) { // The test separate phase. // if (!test (r, dist_root, false /* installed */, module, bootstrap ? module_import.c_str () : nullptr)) break; // Back to the main phase. // } rm.status |= r.status; } // Install the package, optionally test the installation and uninstall // afterwards. // // These operations are triggered by presence of config.install.root // configuration variable having a non-empty value for // bpkg.configure.create step. // if (install_root.empty ()) break; // Now the overall plan is as follows: // // 1. Install the package. // // 2. If the package has subprojects that support the test operation, then // configure, build, and test them out of the source tree against the // installed package. // // 3. If any of the test packages are specified, then configure, build, // and test them in a separate bpkg configuration against the installed // package. // // 4. Uninstall the package. // // Install. // { operation_result& r (add_result ("install")); change_wd (trace, &r.log, pkg_config); // bpkg install // // bpkg.install // r.status |= run_bpkg ( step_id::bpkg_install, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "install", step_args (env_args, step_id::bpkg_install), step_args (config_args, step_id::bpkg_install), pkg); if (!r.status) break; rm.status |= r.status; } // The test installed phase. // // Make sure that the installed package executables are properly imported // when configuring and running tests, unless we are testing the build // system module (that supposedly doesn't install any executables). // small_vector envvars; dir_paths subprj_dirs; // "Testable" package subprojects. // We expect the build system modules to not have any testable subprojects // but to have external tests package instead. // if (module) internal_tests = false; else { // Note that we add the $config.install.root/bin directory at the // beginning of the PATH environment variable value, so the installed // executables are found first. // string paths ("PATH=" + (install_root / "bin").string ()); if (optional s = getenv ("PATH")) { paths += path::traits_type::path_separator; paths += *s; } envvars.push_back (move (paths)); // Collect the "testable" subprojects. // for (const b_project_info::subproject& sp: prj.subprojects) { // Retrieve the subproject information similar to how we've done it // for the package. // b_project_info si (prj_info (pkg_dir / sp.path, true /* ext_mods */, "subproject")); const strings& ops (si.operations); if (find (ops.begin (), ops.end (), "test") != ops.end ()) subprj_dirs.push_back (sp.path); } // If there are any "testable" subprojects, then configure them // (sequentially) and test/build in parallel afterwards. // internal_tests = !subprj_dirs.empty (); } if (internal_tests || external_tests) { operation_result& r (add_result ("test-installed")); change_wd (trace, &r.log, rwd); // Run internal tests. // if (internal_tests) { string mods; // build2 create meta-operation parameters. for (const string& m: step_args (modules, step_id::b_test_installed_create)) { mods += mods.empty () ? ", " : " "; mods += m; } // b create(, ) // // b.test-installed.create // // Amalgamation directory that will contain configuration subdirectory // for package tests out of source tree build. // dir_path out_dir ("build-installed"); r.status |= run_b ( step_id::b_test_installed_create, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create('" + out_dir.representation () + "'" + mods + ")", step_args (env_args, step_id::b_test_installed_create), step_args (config_args, step_id::b_test_installed_create)); if (!r.status) break; // Configure subprojects and create buildspecs for their testing. // strings test_specs; for (const dir_path& d: subprj_dirs) { // b configure(@) // // // b.test-installed.configure // dir_path subprj_src_dir (exists (dist_src) ? dist_src / d : build_dir / pkg_dir / d); dir_path subprj_out_dir (out_dir / d); r.status |= run_b ( step_id::b_test_installed_configure, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "configure('" + subprj_src_dir.representation () + "'@'" + subprj_out_dir.representation () + "')", step_args (env_args, step_id::b_test_installed_configure), step_args (config_args, step_id::b_test_installed_configure)); if (!r.status) break; test_specs.push_back ( "test('" + subprj_out_dir.representation () + "')"); } if (!r.status) break; // Build/test subprojects. // // b test()... // // b.test-installed.test // r.status |= run_b ( step_id::b_test_installed_test, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", test_specs, step_args (env_args, step_id::b_test_installed_test), step_args (config_args, step_id::b_test_installed_test)); if (!r.status) break; } // Run external tests. // if (external_tests) { // Configure. // // bpkg create // // bpkg.test-installed.create (bpkg.create) // dir_path config_dir ("build-installed-bpkg"); r.status |= run_bpkg ( step_id::bpkg_test_installed_create, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", config_dir.string (), "--wipe", step_args (modules, step_id::bpkg_test_installed_create, step_id::bpkg_create), step_args (env_args, step_id::bpkg_test_installed_create, step_id::bpkg_create), step_args (config_args, step_id::bpkg_test_installed_create, step_id::bpkg_create)); if (!r.status) break; change_wd (trace, &r.log, config_dir); // bpkg add // // bpkg.test-installed.configure.add (bpkg.configure.add) // r.status |= run_bpkg ( step_id::bpkg_test_installed_configure_add, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", step_args (env_args, step_id::bpkg_test_installed_configure_add, step_id::bpkg_configure_add), step_args (config_args, step_id::bpkg_test_installed_configure_add, step_id::bpkg_configure_add), repo); if (!r.status) break; // bpkg fetch // // bpkg.test-installed.configure.fetch (bpkg.configure.fetch) // r.status |= run_bpkg ( step_id::bpkg_test_installed_configure_fetch, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", step_args (env_args, step_id::bpkg_test_installed_configure_fetch, step_id::bpkg_configure_fetch), step_args (config_args, step_id::bpkg_test_installed_configure_fetch, step_id::bpkg_configure_fetch), trust_ops); if (!r.status) break; // The test separate installed phase. // if (!test (r, rwd / dir_path ("dist-installed"), true /* installed */, true /* sys_dep */, nullptr /* import */, envvars)) break; // Back to the test installed phase. // } rm.status |= r.status; } // Back to the main phase. // // Uninstall. // { operation_result& r (add_result ("uninstall")); change_wd (trace, &r.log, pkg_config); // bpkg uninstall // // bpkg.uninstall // r.status |= run_bpkg ( step_id::bpkg_uninstall, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "uninstall", step_args (env_args, step_id::bpkg_uninstall), step_args (config_args, step_id::bpkg_uninstall), pkg); if (!r.status) break; rm.status |= r.status; } break; } if (!rm.results.empty ()) { operation_result& r (rm.results.back ()); rm.status |= r.status; // Merge last in case of a break. // Also merge statuses of the configure and test operations, which logs // can potentially be shared across multiple steps and which results may // not be the last in the list. // if (configure_result != nullptr) rm.status |= configure_result->status; if (test_result != nullptr) rm.status |= test_result->status; // Unless there is an error (or worse) encountered, log the special 'end' // step and, if this step is specified in the interactive manifest value, // ask the user if to continue the task execution. // bool error (!rm.status); if (!error) { r.status |= run_cmd (step_id::end, trace, r.log, regexes (), "" /* name */, bkp_step, bkp_status, last_cmd, process_env ()); rm.status |= r.status; } // Truncate logs if they would exceed the upload limit. // // @@ TMP: currently this limit is hard-coded. In the future it should be // sent along with the task manifest. // const size_t upload_limit (10 * 1024 * 1024); { // Reserve 10K for other manifest values (alternatively, we could do it // exactly in upload_manifest()). // const size_t manifest_size (10 * 1024); size_t n (manifest_size); for (const operation_result& r: rm.results) n += r.log.size (); if (n > upload_limit) { // Divide the size equally among all the operations and truncate any // that exceed their allowance. This way we will get some information // for each operation. // n = (upload_limit - manifest_size) / rm.results.size (); for (operation_result& r: rm.results) { if (r.log.size () <= n) continue; // We need to be careful not to truncate it in the middle of UTF-8 // sequence. So we look for the last newline that still fits. // size_t p (n - 80 /* for the "log truncated" note */); for (; p != 0 && r.log[p] != '\n'; --p) ; r.log.resize (p != 0 ? p + 1 : 0); // Keep the newline. r.log += "-------------------------------LOG TRUNCATED-------------------------------\n"; } } } } else assert (rm.status == result_status::abort); if (!rwd.empty ()) change_wd (trace, nullptr /* log */, rwd); // Upload the result. // const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4"); try { upload_manifest (trace, url, rm, "result"); // We use exit code 2 to signal abnormal termination but where we managed // to upload the result manifest. See startup() for details. // return rm.status != result_status::abnormal ? 0 : 2; } catch (const io_error& e) { error << "unable to upload result manifest to " << url << ": " << e; } // We use exit code 3 to signal an unsuccessful attempt to upload the result // manifest. See startup() for details. // return 3; } static int startup () { tracer trace ("startup"); // Our overall plan is as follows: // // 1. Download the task manifest into the build directory (CWD). // // 2. Parse it and get the target. // // 3. Find the environment setup executable for this target. // // 4. Execute the environment setup executable. // // 5. If the environment setup executable fails, then upload the (failed) // result ourselves. // const string url ("tftp://" + ops.tftp_host () + "/task.manifest"); const path mf ("task.manifest"); // If we fail, try to upload the result manifest (abnormal termination). The // idea is that the machine gets suspended and we can investigate what's // going on by logging in and examining the diagnostics (e.g., via // journalctl, etc). // task_manifest tm; try { // Download the task. // // We are downloading from our host so there shouldn't normally be any // connectivity issues. Unless, of course, we are on Windows where all // kinds of flakiness is business as usual. Note that having a long enough // timeout is not enough: if we try to connect before the network is up, // we will keep waiting forever, even after it is up. So we have to // timeout and try again. This is also pretty bad (unlike, say during // bootstrap which doesn't happen very often) since we are wasting the // machine time. So we are going to log it as a warning and not merely a // trace since if this is a common occurrence, then something has to be // done about it. // for (size_t retry (1);; ++retry) { try { tftp_curl c (trace, nullfd, mf, curl::get, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_get_timeout); if (!c.wait ()) throw_generic_error (EIO); break; } catch (const system_error& e) { bool bail (retry > tftp_get_retries); diag_record dr (bail ? error : warn); dr << "unable to download task manifest from " << url << " on " << retry << " try: " << e; if (bail) throw failed (); } } // Parse it. // tm = parse_manifest (mf, "task"); // Find the environment setup executable. // // While the executable path contains a directory (so the PATH search does // not apply) we still use process::path_search() to automatically handle // appending platform-specific executable extensions (.exe/.bat, etc). // process_path pp; if (tm.environment) { try { pp = process::try_path_search (env_dir / *tm.environment, false /* init */); } catch (const invalid_path& e) { fail << "invalid environment name '" << e.path << "': " << e; } if (pp.empty ()) fail << "no environment setup executable in " << env_dir << " " << "for environment name '" << *tm.environment << "'"; } else { pp = process::try_path_search (env_dir / "default", false /* init */); if (pp.empty ()) fail << "no default environment setup executable in " << env_dir; } // Run it. // strings os; string tg (tm.target.string ()); // Use the name=value notation for options to minimize the number of // arguments passed to the environment setup executable. Note that the // etc/environments/default-*.bat scripts can only handle the limited // number of arguments. // if (ops.systemd_daemon ()) os.push_back ("--systemd-daemon"); if (ops.verbose_specified ()) os.push_back ("--verbose=" + to_string (ops.verbose ())); if (ops.tftp_host_specified ()) os.push_back ("--tftp-host=" + ops.tftp_host ()); os.push_back (string ("--env-script=") + pp.effect_string ()); os.push_back ("--env-target=" + tg); // Note that we use the effective (absolute) path instead of recall since // we may have changed the CWD. // // Also note that the worker can ask the user if to continue the task // execution when the interactive build breakpoint is reached. Thus, we // don't redirect stdin to /dev/null. // // Exit code 2 signals abnormal termination but where the worker uploaded // the result itself. // // Exit code 3 signals an unsuccessful attempt by the worker to upload the // result manifest. There is no reason to retry (most likely there is // nobody listening on the other end anymore). // switch (run_io_exit (trace, 0, 2, 2, pp, tg, argv0.effect_string (), os)) { case 3: case 2: return 1; case 0: return 0; default: fail << "process " << pp << " exited with non-zero code" << endf; } } catch (const failed&) { const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4"); // If we failed before being able to parse the task manifest, use the // "unknown" values for the package name and version. // result_manifest rm { tm.name.empty () ? bpkg::package_name ("unknown") : tm.name, tm.version.empty () ? bpkg::version ("0") : tm.version, result_status::abnormal, operation_results {} }; try { upload_manifest (trace, url, rm, "result"); } catch (const io_error& e) { fail << "unable to upload result manifest to " << url << ": " << e; } return 1; } } static int bootstrap () { bootstrap_manifest bm { bootstrap_manifest::versions_type { {"bbot", standard_version (BBOT_VERSION_STR)}, {"libbbot", standard_version (LIBBBOT_VERSION_STR)}, {"libbpkg", standard_version (LIBBPKG_VERSION_STR)}, {"libbutl", standard_version (LIBBUTL_VERSION_STR)} } }; serialize_manifest (bm, cout, "stdout", "bootstrap"); return 0; } int bbot:: main (int argc, char* argv[]) try { tracer trace ("main"); // This is a little hack to make our baseutils for Windows work when called // with absolute path. In a nutshell, MSYS2's exec*p() doesn't search in the // parent's executable directory, only in PATH. And since we are running // without a shell (that would read /etc/profile which sets PATH to some // sensible values), we are only getting Win32 PATH values. And MSYS2 /bin // is not one of them. So what we are going to do is add /bin at the end of // PATH (which will be passed as is by the MSYS2 machinery). This will make // MSYS2 search in /bin (where our baseutils live). And for everyone else // this should be harmless since it is not a valid Win32 path. // #ifdef _WIN32 { string mp; if (optional p = getenv ("PATH")) { mp = move (*p); mp += ';'; } mp += "/bin"; setenv ("PATH", mp); } #endif // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if // the pipe reading end is closed. Note that by default this signal // terminates a process. Also note that there is no way to disable this // behavior on a file descriptor basis or for the write() function call. // // On Windows disable displaying error reporting dialog box. Note that the // error mode is inherited by child processes. // #ifndef _WIN32 if (signal (SIGPIPE, SIG_IGN) == SIG_ERR) fail << "unable to ignore broken pipe (SIGPIPE) signal: " << system_error (errno, std::generic_category ()); // Sanitize. #else SetErrorMode (SetErrorMode (0) | // Returns the current mode. SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); #endif cli::argv_scanner scan (argc, argv, true); ops.parse (scan); verb = ops.verbose (); // @@ systemd 231 added JOURNAL_STREAM environment variable which allows // detecting if stderr is connected to the journal. // if (ops.systemd_daemon ()) systemd_diagnostics (false); // Version. // if (ops.version ()) { cout << "bbot-worker " << BBOT_VERSION_ID << endl << "libbbot " << LIBBBOT_VERSION_ID << endl << "libbpkg " << LIBBPKG_VERSION_ID << endl << "libbutl " << LIBBUTL_VERSION_ID << endl << "Copyright (c) " << BBOT_COPYRIGHT << "." << endl << "TBC; All rights reserved" << endl; return 0; } // Help. // if (ops.help ()) { pager p ("bbot-worker help", false); print_bbot_worker_usage (p.stream ()); // If the pager failed, assume it has issued some diagnostics. // return p.wait () ? 0 : 1; } // Figure out our mode. // if (ops.bootstrap () && ops.startup ()) fail << "--bootstrap and --startup are mutually exclusive"; enum class mode {boot, start, build} m (mode::build); if (ops.bootstrap ()) m = mode::boot; if (ops.startup ()) m = mode::start; if (ops.systemd_daemon ()) { info << "bbot worker " << BBOT_VERSION_ID; } // Figure out our path (used for re-exec). // argv0 = process::path_search (argv[0], true); // Sort out the build directory. // if (ops.build_specified ()) change_wd (trace, nullptr /* log */, ops.build (), true /* create */); // Sort out the environment directory. // try { env_dir = ops.environments_specified () ? ops.environments () : dir_path::home_directory (); if (!dir_exists (env_dir)) throw_generic_error (ENOENT); } catch (const system_error& e) { fail << "invalid environment directory: " << e; } int r (1); switch (m) { case mode::boot: r = bootstrap (); break; case mode::start: r = startup (); break; case mode::build: r = build (static_cast (argc), const_cast (argv)); break; } return r; } catch (const failed&) { return 1; // Diagnostics has already been issued. } catch (const cli::exception& e) { error << e; return 1; } int main (int argc, char* argv[]) { return bbot::main (argc, argv); }