From 18394bc05dc4cadb2dc193cfeb78598c70447869 Mon Sep 17 00:00:00 2001 From: Boris Kolpackov Date: Wed, 19 Oct 2022 10:26:22 +0200 Subject: Add support for post hoc prerequisites Unlike normal and ad hoc prerequisites, a post hoc prerequisite is built after the target, not before. It may also form a dependency cycle together with normal/ad hoc prerequisites. In other words, all this form of dependency guarantees is that a post hoc prerequisite will be built if its dependent target is built. See the NEWS file for details and an example. --- NEWS | 34 ++++++- libbuild2/algorithm.cxx | 214 +++++++++++++++++++++++++++++++---------- libbuild2/algorithm.hxx | 19 +++- libbuild2/algorithm.ixx | 20 +++- libbuild2/config/operation.cxx | 6 +- libbuild2/context.cxx | 4 + libbuild2/context.hxx | 27 +++++- libbuild2/dist/operation.cxx | 30 +++--- libbuild2/dist/rule.cxx | 4 + libbuild2/operation.cxx | 199 +++++++++++++++++++++++++++++++------- libbuild2/operation.hxx | 34 +++---- libbuild2/target.cxx | 7 +- libbuild2/target.hxx | 13 ++- libbuild2/types.hxx | 2 + 14 files changed, 476 insertions(+), 137 deletions(-) diff --git a/NEWS b/NEWS index 4a42ec3..180056c 100644 --- a/NEWS +++ b/NEWS @@ -1,9 +1,41 @@ -Version 0.15.0 +Version 0.16.0 * The in.substitution variable has been renamed to in.mode. The original name is still recognized for backwards compatibility. + * Support for post hoc prerequisites. + + Unlike normal and ad hoc prerequisites, a post hoc prerequisite is built + after the target, not before. It may also form a dependency cycle together + with normal/ad hoc prerequisites. In other words, all this form of + dependency guarantees is that a post hoc prerequisite will be built if its + dependent target is built. + + A canonical example where this can be useful is a library with a plugin: + the plugin depends on the library while the library would like to make + sure the plugin is built whenever the library is built so that programs + that link the library can be executed without having to specify explicit + dependency on the plugin (at least for the dynamic linking case): + + lib{hello}: ... + lib{hello-plugin}: ... lib{hello} + libs{hello}: libs{hello-plugin}: include = posthoc + + Note that there is no guarantee that post hoc prerequisites will be built + before the dependents of the target "see" it as built. Rather, it is + guaranteed that post hoc prerequisites will be built before the end of the + overall build (more precisely, before the current operation completes). + As a result, post hoc prerequisites should not be relied upon if the + result (for example, a source code generator) is expected to be used + during build (more precisely, within the same operation). + + Note also that the post hoc semantics is not the same as order-only in + GNU make. In fact, it is an even more "relaxed" form of dependency. + Specifically, while order-only prerequisite is guaranteed to be built + before the target, post hoc prerequisite is only guaranteed to be built + before the end of the overall build. + Version 0.15.0 * Generated C/C++ headers and ad hoc sources are now updated during match. diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx index 696a09d..16a6728 100644 --- a/libbuild2/algorithm.cxx +++ b/libbuild2/algorithm.cxx @@ -812,6 +812,69 @@ namespace build2 return re; } + // If anything goes wrong, set target state to failed and return false. + // + // Note: must be called while holding target_lock. + // + static bool + match_posthoc (action a, target& t) + { + // The plan is to, while holding the lock, search and collect all the post + // hoc prerequisited and add an entry to context::current_posthoc_targets. + // The actual matching happens as post-pass in the meta-operation's match + // function. + // + // While it may seem like we could do matching here by unlocking (or + // unstacking) the lock for this target, that will only work for simple + // cases. In particular, consider: + // + // lib{foo}: ... + // lib{plug}: ... lib{foo} + // libs{foo}: libs{plug}: include = posthoc + // + // The chain we will end up with: + // + // lib{foo}->libs{foo}=>libs{plug}->lib{foo} + // + // This will trip up the cycle detection for group lib{foo}, not for + // libs{foo}. + // + // In the end, matching (and execution) "inline" (i.e., as we match/ + // execute the corresponding target) appears to be unworkable in the + // face of cycles. + + // @@ Anything we need to do for group members (see through)? Feels quite + // far-fetched. + // + vector pts; + try + { + for (const prerequisite& p: group_prerequisites (t)) + { + if (include (a, t, p) == include_type::posthoc) + { + pts.push_back (&search (t, p)); // May fail. + } + } + } + catch (const failed&) + { + t.state[a].state = target_state::failed; + return false; + } + + if (!pts.empty ()) + { + context& ctx (t.ctx); + + mlock l (ctx.current_posthoc_targets_mutex); + ctx.current_posthoc_targets.push_back ( + context::posthoc_target {a, t, move (pts)}); + } + + return true; + } + // If step is true then perform only one step of the match/apply sequence. // // If try_match is true, then indicate whether there is a rule match with @@ -977,7 +1040,20 @@ namespace build2 return make_pair (false, target_state::unknown); if (task_count == nullptr) - return match_impl (l, false /* step */, try_match); + { + pair r (match_impl (l, false /*step*/, try_match)); + + if (r.first && + r.second != target_state::failed && + l.offset == target::offset_applied && + ct.has_group_prerequisites ()) // Already matched. + { + if (!match_posthoc (a, *l.target)) + r.second = target_state::failed; + } + + return r; + } // Pass "disassembled" lock since the scheduler queue doesn't support // task destruction. @@ -1003,9 +1079,18 @@ namespace build2 { phase_lock pl (t.ctx, run_phase::match); // Throws. { + // Note: target_lock must be unlocked within the match phase. + // target_lock l {a, &t, offset, first}; // Reassemble. - match_impl (l, false /* step */, try_match); - // Unlock within the match phase. + + pair r ( + match_impl (l, false /* step */, try_match)); + + if (r.first && + r.second != target_state::failed && + l.offset == target::offset_applied && + t.has_group_prerequisites ()) // Already matched. + match_posthoc (a, t); } } catch (const failed&) {} // Phase lock failure. @@ -1033,7 +1118,7 @@ namespace build2 } static group_view - resolve_members_impl (action a, const target& g, target_lock&& l) + resolve_members_impl (action a, const target& g, target_lock l) { // Note that we will be unlocked if the target is already applied. // @@ -1048,9 +1133,11 @@ namespace build2 { // Match (locked). // - if (match_impl (l, true).second == target_state::failed) + if (match_impl (l, true /* step */).second == target_state::failed) throw failed (); + // Note: only matched so no call to match_posthoc(). + if ((r = g.group_members (a)).members != nullptr) break; @@ -1082,7 +1169,16 @@ namespace build2 // Apply (locked). // - if (match_impl (l, true).second == target_state::failed) + pair s (match_impl (l, true /* step */)); + + if (s.second != target_state::failed && + g.has_group_prerequisites ()) // Already matched. + { + if (!match_posthoc (a, *l.target)) + s.second = target_state::failed; + } + + if (s.second == target_state::failed) throw failed (); if ((r = g.group_members (a)).members != nullptr) @@ -1152,9 +1248,22 @@ namespace build2 } void - resolve_group_impl (action, const target&, target_lock l) + resolve_group_impl (action a, const target& t, target_lock l) { - match_impl (l, true /* step */, true /* try_match */); + pair r ( + match_impl (l, true /* step */, true /* try_match */)); + + if (r.first && + r.second != target_state::failed && + l.offset == target::offset_applied && + t.has_group_prerequisites ()) // Already matched. + { + if (!match_posthoc (a, *l.target)) + r.second = target_state::failed; + } + + if (r.first && r.second == target_state::failed) + throw failed (); } template @@ -1220,7 +1329,7 @@ namespace build2 } void - match_members (action a, target& t, const target* const* ts, size_t n) + match_members (action a, const target& t, const target* const* ts, size_t n) { // Pretty much identical to match_prerequisite_range() except we don't // search. @@ -1254,7 +1363,7 @@ namespace build2 void match_members (action a, - target& t, + const target& t, prerequisite_targets& ts, size_t s, pair imv) @@ -2118,6 +2227,7 @@ namespace build2 size_t exec (ctx.count_executed ()); size_t busy (ctx.count_busy ()); + optional r; if (s.task_count.compare_exchange_strong ( tc, busy, @@ -2130,8 +2240,9 @@ namespace build2 { // There could still be scope operations. // - if (t.is_a ()) - execute_recipe (a, t, nullptr /* recipe */); + r = t.is_a () + ? execute_recipe (a, t, nullptr /* recipe */) + : s.state; s.task_count.store (exec, memory_order_release); ctx.sched.resume (s.task_count); @@ -2139,23 +2250,25 @@ namespace build2 else { if (task_count == nullptr) - return execute_impl (a, t); - - // Pass our diagnostics stack (this is safe since we expect the - // caller to wait for completion before unwinding its diag stack). - // - if (ctx.sched.async (start_count, - *task_count, - [a] (const diag_frame* ds, target& t) - { - diag_frame::stack_guard dsg (ds); - execute_impl (a, t); - }, - diag_frame::stack (), - ref (t))) - return target_state::unknown; // Queued. - - // Executed synchronously, fall through. + r = execute_impl (a, t); + else + { + // Pass our diagnostics stack (this is safe since we expect the + // caller to wait for completion before unwinding its diag stack). + // + if (ctx.sched.async (start_count, + *task_count, + [a] (const diag_frame* ds, target& t) + { + diag_frame::stack_guard dsg (ds); + execute_impl (a, t); + }, + diag_frame::stack (), + ref (t))) + return target_state::unknown; // Queued. + + // Executed synchronously, fall through. + } } } else @@ -2166,7 +2279,7 @@ namespace build2 else assert (tc == exec); } - return t.executed_state (a, false); + return r ? *r : t.executed_state (a, false /* fail */); } target_state @@ -2187,6 +2300,7 @@ namespace build2 size_t exec (ctx.count_executed ()); size_t busy (ctx.count_busy ()); + optional r; if (s.task_count.compare_exchange_strong ( tc, busy, @@ -2196,31 +2310,31 @@ namespace build2 if (s.state == target_state::unknown) { if (task_count == nullptr) - return execute_impl (a, t); - - if (ctx.sched.async (start_count, - *task_count, - [a] (const diag_frame* ds, target& t) - { - diag_frame::stack_guard dsg (ds); - execute_impl (a, t); - }, - diag_frame::stack (), - ref (t))) - return target_state::unknown; // Queued. - - // Executed synchronously, fall through. + r = execute_impl (a, t); + else + { + if (ctx.sched.async (start_count, + *task_count, + [a] (const diag_frame* ds, target& t) + { + diag_frame::stack_guard dsg (ds); + execute_impl (a, t); + }, + diag_frame::stack (), + ref (t))) + return target_state::unknown; // Queued. + + // Executed synchronously, fall through. + } } else { assert (s.state == target_state::unchanged || s.state == target_state::failed); - if (s.state == target_state::unchanged) - { - if (t.is_a ()) - execute_recipe (a, t, nullptr /* recipe */); - } + r = s.state == target_state::unchanged && t.is_a () + ? execute_recipe (a, t, nullptr /* recipe */) + : s.state; s.task_count.store (exec, memory_order_release); ctx.sched.resume (s.task_count); @@ -2234,7 +2348,7 @@ namespace build2 else assert (tc == exec); } - return t.executed_state (a, false); + return r ? *r : t.executed_state (a, false /* fail */); } bool diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx index e558d3a..756c3fe 100644 --- a/libbuild2/algorithm.hxx +++ b/libbuild2/algorithm.hxx @@ -195,6 +195,8 @@ namespace build2 explicit operator bool () const {return target != nullptr;} + // Note: achieved offset is preserved. + // void unlock (); @@ -374,6 +376,12 @@ namespace build2 pair match_sync (action, const target&, unmatch); + // As above but without incrementing the target's dependents count. Should + // be executed with execute_direct_*(). + // + target_state + match_direct_sync (action, const target&, bool fail = true); + // Start asynchronous match. Return target_state::postponed if the // asynchronous operation has been started and target_state::busy if the // target has already been busy. Regardless of the result, match_complete() @@ -486,11 +494,11 @@ namespace build2 // target pointers are skipped. // LIBBUILD2_SYMEXPORT void - match_members (action, target&, const target* const*, size_t); + match_members (action, const target&, const target* const*, size_t); template inline void - match_members (action a, target& t, const target* (&ts)[N]) + match_members (action a, const target& t, const target* (&ts)[N]) { match_members (a, t, ts, N); } @@ -501,7 +509,7 @@ namespace build2 // LIBBUILD2_SYMEXPORT void match_members (action a, - target& t, + const target& t, prerequisite_targets& ts, size_t start = 0, pair include = {0, 0}); @@ -613,7 +621,7 @@ namespace build2 // translates target_state::failed to the failed exception. // target_state - execute_direct_sync (action, const target&); + execute_direct_sync (action, const target&, bool fail = true); target_state execute_direct_async (action, const target&, @@ -795,8 +803,9 @@ namespace build2 // Call straight or reverse depending on the current mode. // + template target_state - execute_members (action, const target&, const target*[], size_t); + execute_members (action, const target&, T[], size_t); template inline target_state diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx index 417a10e..7c87a72 100644 --- a/libbuild2/algorithm.ixx +++ b/libbuild2/algorithm.ixx @@ -424,6 +424,19 @@ namespace build2 return r; } + inline target_state + match_direct_sync (action a, const target& t, bool fail) + { + assert (t.ctx.phase == run_phase::match); + + target_state r (match_impl (a, t, 0, nullptr).second); + + if (fail && r == target_state::failed) + throw failed (); + + return r; + } + inline pair try_match_sync (action a, const target& t, bool fail) { @@ -787,7 +800,7 @@ namespace build2 execute_direct_impl (action, const target&, size_t, atomic_count*); inline target_state - execute_direct_sync (action a, const target& t) + execute_direct_sync (action a, const target& t, bool fail) { target_state r (execute_direct_impl (a, t, 0, nullptr)); @@ -800,7 +813,7 @@ namespace build2 r = t.executed_state (a, false); } - if (r == target_state::failed) + if (r == target_state::failed && fail) throw failed (); return r; @@ -960,8 +973,9 @@ namespace build2 p.first, static_cast (p.second)); } + template inline target_state - execute_members (action a, const target& t, const target* ts[], size_t n) + execute_members (action a, const target& t, T ts[], size_t n) { return t.ctx.current_mode == execution_mode::first ? straight_execute_members (a, t, ts, n, 0) diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx index c6fea07..3dd8729 100644 --- a/libbuild2/config/operation.cxx +++ b/libbuild2/config/operation.cxx @@ -874,7 +874,9 @@ namespace build2 fail (l) << "forwarding to source directory " << rs.src_path (); } else - load (params, rs, buildfile, out_base, src_base, l); // Normal load. + // Normal load. + // + perform_load (params, rs, buildfile, out_base, src_base, l); } static void @@ -894,7 +896,7 @@ namespace build2 ts.push_back (&rs); } else - search (params, rs, bs, bf, tk, l, ts); // Normal search. + perform_search (params, rs, bs, bf, tk, l, ts); // Normal search. } static void diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx index f46a3eb..e44a688 100644 --- a/libbuild2/context.cxx +++ b/libbuild2/context.cxx @@ -737,6 +737,10 @@ namespace build2 dependency_count.store (0, memory_order_relaxed); target_count.store (0, memory_order_relaxed); skip_count.store (0, memory_order_relaxed); + + // Clear accumulated targets with post hoc prerequisites. + // + current_posthoc_targets.clear (); } bool run_phase_mutex:: diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx index 1d46309..40a8bdd 100644 --- a/libbuild2/context.hxx +++ b/libbuild2/context.hxx @@ -388,12 +388,22 @@ namespace build2 const variable_overrides& var_overrides; // Project and relative scope. function_map& functions; - // Enter project-wide (as opposed to global) variable overrides. + // Current targets with post hoc prerequisites. // - void - enter_project_overrides (scope& rs, - const dir_path& out_base, - const variable_overrides&); + // Note that we don't expect many of these so a simple mutex should be + // sufficient. Note also that we may end up adding more entries as we + // match existing so use list for node and iterator stability. See + // match_poshoc() for details. + // + struct posthoc_target + { + build2::action action; + reference_wrapper target; + vector prerequisite_targets; + }; + + list current_posthoc_targets; + mutex current_posthoc_targets_mutex; // Global scope. // @@ -639,6 +649,13 @@ namespace build2 optional module_context = nullptr, const loaded_modules_lock* inherited_mudules_lock = nullptr); + // Enter project-wide (as opposed to global) variable overrides. + // + void + enter_project_overrides (scope& rs, + const dir_path& out_base, + const variable_overrides&); + // Set current meta-operation and operation. // void diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx index 91d2321..9a662df 100644 --- a/libbuild2/dist/operation.cxx +++ b/libbuild2/dist/operation.cxx @@ -85,7 +85,7 @@ namespace build2 if (auto* m = rs.find_module (module::name)) m->distributed = true; - load (vs, rs, bf, out_base, src_base, l); + perform_load (vs, rs, bf, out_base, src_base, l); } // Enter the specified source file as a target of type T. The path is @@ -301,17 +301,17 @@ namespace build2 const operation_info* poif (ops[pid]); ctx.current_operation (*poif, oif, false /* diag_noise */); action a (dist_id, poif->id, oif->id); - match (params, a, ts, - 1 /* diag (failures only) */, - false /* progress */); + perform_match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); } } ctx.current_operation (*oif, nullptr, false /* diag_noise */); action a (dist_id, oif->id); - match (params, a, ts, - 1 /* diag (failures only) */, - false /* progress */); + perform_match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); if (auto po = oif->post_operation) { @@ -320,9 +320,9 @@ namespace build2 const operation_info* poif (ops[pid]); ctx.current_operation (*poif, oif, false /* diag_noise */); action a (dist_id, poif->id, oif->id); - match (params, a, ts, - 1 /* diag (failures only) */, - false /* progress */); + perform_match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); } } } @@ -1092,6 +1092,8 @@ namespace build2 // given the prescribed semantics of adhoc (match/execute but otherwise // ignore) is followed. // + // Note that we don't need to do anything for posthoc. + // if (i == include_type::excluded) { l5 ([&]{trace << "overriding exclusion of " << p;}); @@ -1116,11 +1118,11 @@ namespace build2 nullptr, // meta-operation pre &dist_operation_pre, &dist_load_load, - &search, // normal search - nullptr, // no match (see dist_execute()). + &perform_search, // normal search + nullptr, // no match (see dist_execute()). &dist_load_execute, - nullptr, // operation post - nullptr, // meta-operation post + nullptr, // operation post + nullptr, // meta-operation post &dist_include }; diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx index ac3d440..e47f1f8 100644 --- a/libbuild2/dist/rule.cxx +++ b/libbuild2/dist/rule.cxx @@ -79,6 +79,10 @@ namespace build2 !p.dir.sub (out_root)) continue; + // @@ TODO: this can actually be order-dependent: for example + // libs{} prerequisite may be unknown because we haven't + // matched the lib{} group yet. + // fail << "prerequisite " << k << " is not existing source file " << "nor known output target" << endf; } diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx index fc569a9..908ebd6 100644 --- a/libbuild2/operation.cxx +++ b/libbuild2/operation.cxx @@ -60,7 +60,7 @@ namespace build2 true, // bootstrap_outer nullptr, // meta-operation pre nullptr, // operation pre - &load, + &perform_load, nullptr, // search nullptr, // match nullptr, // execute @@ -72,12 +72,12 @@ namespace build2 // perform // void - load (const values&, - scope& root, - const path& bf, - const dir_path& out_base, - const dir_path& src_base, - const location&) + perform_load (const values&, + scope& root, + const path& bf, + const dir_path& out_base, + const dir_path& src_base, + const location&) { // Load project's root.build. // @@ -96,15 +96,15 @@ namespace build2 } void - search (const values&, - const scope&, - const scope& bs, - const path& bf, - const target_key& tk, - const location& l, - action_targets& ts) + perform_search (const values&, + const scope&, + const scope& bs, + const path& bf, + const target_key& tk, + const location& l, + action_targets& ts) { - tracer trace ("search"); + tracer trace ("perform_search"); context& ctx (bs.ctx); phase_lock pl (ctx, run_phase::match); @@ -248,9 +248,10 @@ namespace build2 } void - match (const values&, action a, action_targets& ts, uint16_t diag, bool prog) + perform_match (const values&, action a, action_targets& ts, + uint16_t diag, bool prog) { - tracer trace ("match"); + tracer trace ("perform_match"); if (ts.empty ()) return; @@ -311,6 +312,7 @@ namespace build2 // many we have started. Wait with unlocked phase to allow phase // switching. // + bool fail (false); size_t i (0), n (ts.size ()); { atomic_count task_count (0); @@ -326,16 +328,69 @@ namespace build2 // Bail out if the target has failed and we weren't instructed to // keep going. // - if (s == target_state::failed && !ctx.keep_going) + if (s == target_state::failed) { - ++i; - break; + fail = true; + + if (!ctx.keep_going) + { + ++i; + break; + } } } wg.wait (); } + // If we have any targets with post hoc prerequisites, match those. + // + // See match_posthoc() for the overall approach description. + // + bool posthoc_fail (false); + if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going)) + { + // Note that on each iteration we may end up with new entries at the + // back. Since we start and end each iteration in serial execution, we + // don't need to mess with the mutex. + // + for (const context::posthoc_target& p: ctx.current_posthoc_targets) + { + action a (p.action); // May not be the same as argument action. + const target& t (p.target); + + auto df = make_diag_frame ( + [a, &t](const diag_record& dr) + { + if (verb != 0) + dr << info << "while matching to " << diag_do (t.ctx, a) + << " post hoc prerequisites of " << t; + }); + + // Cannot use normal match because incrementing dependency counts in + // the face of cycles does not work well (we will deadlock for the + // reverse execution mode). + // + // @@ TODO: match in parallel. + // + for (const target* pt: p.prerequisite_targets) + { + target_state s (match_direct_sync (a, *pt, false /* fail */)); + + if (s == target_state::failed) + { + posthoc_fail = true; + + if (!ctx.keep_going) + break; + } + } + + if (posthoc_fail && !ctx.keep_going) + break; + } + } + // Clear the progress if present. // if (mg) @@ -346,15 +401,25 @@ namespace build2 // We are now running serially. Re-examine targets that we have matched. // - bool fail (false); for (size_t j (0); j != n; ++j) { action_target& at (ts[j]); const target& t (at.as ()); - target_state s (j < i - ? match_complete (a, t, false) - : target_state::postponed); + // We cannot attribute post hoc failures to specific targets so it + // seems the best we can do is just fail them all. + // + target_state s; + if (j < i) + { + s = match_complete (a, t, false); + + if (posthoc_fail) + s = /*t.state[a].state =*/ target_state::failed; + } + else + s = target_state::postponed; + switch (s) { case target_state::postponed: @@ -405,16 +470,52 @@ namespace build2 } void - execute (const values&, action a, action_targets& ts, - uint16_t diag, bool prog) + perform_execute (const values&, action a, action_targets& ts, + uint16_t diag, bool prog) { - tracer trace ("execute"); + tracer trace ("perform_execute"); if (ts.empty ()) return; context& ctx (ts[0].as ().ctx); + bool posthoc_fail (false); + auto execute_posthoc = [&ctx, &posthoc_fail] () + { + for (const context::posthoc_target& p: ctx.current_posthoc_targets) + { + action a (p.action); // May not be the same as argument action. + const target& t (p.target); + + auto df = make_diag_frame ( + [a, &t](const diag_record& dr) + { + if (verb != 0) + dr << info << "while " << diag_doing (t.ctx, a) + << " post hoc prerequisites of " << t; + }); + + // @@ TODO: execute in parallel. + // + for (const target* pt: p.prerequisite_targets) + { + target_state s (execute_direct_sync (a, *pt, false /* fail */)); + + if (s == target_state::failed) + { + posthoc_fail = true; + + if (!ctx.keep_going) + break; + } + } + + if (posthoc_fail && !ctx.keep_going) + break; + } + }; + // Reverse the order of targets if the execution mode is 'last'. // if (ctx.current_mode == execution_mode::last) @@ -422,6 +523,7 @@ namespace build2 phase_lock pl (ctx, run_phase::execute); // Never switched. + bool fail (false); { // Tune the scheduler. // @@ -478,9 +580,18 @@ namespace build2 } } + // In the 'last' execution mode run post hoc first. + // + if (ctx.current_mode == execution_mode::last) + { + if (!ctx.current_posthoc_targets.empty ()) + execute_posthoc (); + } + // Similar logic to execute_members(): first start asynchronous // execution of all the top-level targets. // + if (!posthoc_fail || ctx.keep_going) { atomic_count task_count (0); wait_guard wg (ctx, task_count); @@ -496,13 +607,24 @@ namespace build2 // Bail out if the target has failed and we weren't instructed to // keep going. // - if (s == target_state::failed && !ctx.keep_going) - break; + if (s == target_state::failed) + { + fail = true; + + if (!ctx.keep_going) + break; + } } wg.wait (); } + if (ctx.current_mode == execution_mode::first) + { + if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going)) + execute_posthoc (); + } + // We are now running serially. // @@ -538,15 +660,22 @@ namespace build2 // Re-examine all the targets and print diagnostics. // - bool fail (false); for (action_target& at: ts) { const target& t (at.as ()); + // Similar to match we cannot attribute post hoc failures to specific + // targets so it seems the best we can do is just fail them all. + // + if (!posthoc_fail) + at.state = t.executed_state (a, false); + else + at.state = /*t.state[a].state =*/ target_state::failed; + // Note that here we call executed_state() directly instead of // execute_complete() since we know there is no need to wait. // - switch ((at.state = t.executed_state (a, false))) + switch (at.state) { case target_state::unknown: { @@ -623,10 +752,10 @@ namespace build2 true, // bootstrap_outer nullptr, // meta-operation pre nullptr, // operation pre - &load, - &search, - &match, - &execute, + &perform_load, + &perform_search, + &perform_match, + &perform_execute, nullptr, // operation post nullptr, // meta-operation post nullptr // include diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx index 4eb2658..2ff82ad 100644 --- a/libbuild2/operation.hxx +++ b/libbuild2/operation.hxx @@ -147,36 +147,36 @@ namespace build2 // scope. // LIBBUILD2_SYMEXPORT void - load (const values&, - scope&, - const path&, - const dir_path&, - const dir_path&, - const location&); + perform_load (const values&, + scope&, + const path&, + const dir_path&, + const dir_path&, + const location&); // Search and match the target. This is the default implementation // that does just that and adds a pointer to the target to the list. // LIBBUILD2_SYMEXPORT void - search (const values&, - const scope&, - const scope&, - const path&, - const target_key&, - const location&, - action_targets&); + perform_search (const values&, + const scope&, + const scope&, + const path&, + const target_key&, + const location&, + action_targets&); LIBBUILD2_SYMEXPORT void - match (const values&, action, action_targets&, - uint16_t diag, bool prog); + perform_match (const values&, action, action_targets&, + uint16_t diag, bool prog); // Execute the action on the list of targets. This is the default // implementation that does just that while issuing appropriate // diagnostics (unless quiet). // LIBBUILD2_SYMEXPORT void - execute (const values&, action, const action_targets&, - uint16_t diag, bool prog); + perform_execute (const values&, action, const action_targets&, + uint16_t diag, bool prog); LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_noop; LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_perform; diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx index a466951..6bd6cc1 100644 --- a/libbuild2/target.cxx +++ b/libbuild2/target.cxx @@ -566,9 +566,10 @@ namespace build2 if (const string* v = cast_null (p.vars[ctx.var_include])) { - if (*v == "false") r = include_type::excluded; - else if (*v == "adhoc") r = include_type::adhoc; - else if (*v == "true") r = include_type::normal; + if (*v == "false") r = include_type::excluded; + else if (*v == "true") r = include_type::normal; + else if (*v == "adhoc") r = include_type::adhoc; + else if (*v == "posthoc") r = include_type::posthoc; else fail << "invalid " << *ctx.var_include << " variable value " << "'" << *v << "' specified for prerequisite " << p; diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx index 6387b8f..8e1c3a1 100644 --- a/libbuild2/target.hxx +++ b/libbuild2/target.hxx @@ -38,16 +38,19 @@ namespace build2 // Prerequisite inclusion/exclusion (see include() function below). // + // Note that posthoc is handled internally and should normally be treated by + // the rules the same as excluded. + // class include_type { public: - enum value {excluded, adhoc, normal}; + enum value {excluded, posthoc, adhoc, normal}; include_type (value v): v_ (v) {} include_type (bool v): v_ (v ? normal : excluded) {} operator value () const {return v_;} - explicit operator bool () const {return v_ != excluded;} + explicit operator bool () const {return v_ == normal || v_ == adhoc;} private: value v_; @@ -713,6 +716,12 @@ namespace build2 static const size_t offset_executed = 5; // Recipe has been executed. static const size_t offset_busy = 6; // Match/execute in progress. + // @@ PERF There is a lot of data below that is only needed for "output" + // as opposed to "source" targets (data pads, prerequisite_targets, + // etc). Maybe we should move this stuff to an optional extra (like we + // have for the root scope). Maybe we could even allocate it as part of + // the target's memory block or some such? + // Inner/outer operation state. See for details. // class LIBBUILD2_SYMEXPORT opstate diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx index c260aeb..6b4022d 100644 --- a/libbuild2/types.hxx +++ b/libbuild2/types.hxx @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -114,6 +115,7 @@ namespace build2 using std::multiset; using std::array; using std::vector; + using std::list; using butl::vector_view; // using butl::small_vector; // -- cgit v1.1