aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2019-08-22 16:08:45 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2019-08-22 16:16:32 +0200
commit708b092956f10b5c05641f90d55b209e887d52de (patch)
tree418b2ba2b214fc6af857d75301e5301d4fe0322b
parent4f5b6cb7ed4e05e98cce7e692462f49e24b7a39a (diff)
Run phase
-rw-r--r--build2/bin/target.cxx14
-rw-r--r--build2/cc/compile-rule.cxx6
-rw-r--r--build2/cc/link-rule.cxx2
-rw-r--r--build2/cc/utility.cxx2
-rw-r--r--libbuild2/algorithm.cxx51
-rw-r--r--libbuild2/algorithm.hxx10
-rw-r--r--libbuild2/algorithm.ixx23
-rw-r--r--libbuild2/config/operation.cxx2
-rw-r--r--libbuild2/context.cxx87
-rw-r--r--libbuild2/context.hxx182
-rw-r--r--libbuild2/context.ixx18
-rw-r--r--libbuild2/file.ixx4
-rw-r--r--libbuild2/operation.cxx25
-rw-r--r--libbuild2/scope.hxx4
-rw-r--r--libbuild2/target.cxx12
-rw-r--r--libbuild2/target.ixx8
-rw-r--r--libbuild2/test/rule.cxx2
-rw-r--r--libbuild2/test/script/parser.cxx2
-rw-r--r--libbuild2/types.hxx1
-rw-r--r--libbuild2/variable.cxx6
-rw-r--r--libbuild2/variable.hxx12
-rw-r--r--libbuild2/variable.ixx2
22 files changed, 255 insertions, 220 deletions
diff --git a/build2/bin/target.cxx b/build2/bin/target.cxx
index 2ef3d87..6ea0988 100644
--- a/build2/bin/target.cxx
+++ b/build2/bin/target.cxx
@@ -264,13 +264,13 @@ namespace build2
{
// Casts are MT-aware (during serial load).
//
- E* e (phase == run_phase::load
+ E* e (ctx.phase == run_phase::load
? const_cast<E*> (ctx.targets.find<E> (dir, out, n))
: nullptr);
- A* a (phase == run_phase::load
+ A* a (ctx.phase == run_phase::load
? const_cast<A*> (ctx.targets.find<A> (dir, out, n))
: nullptr);
- S* s (phase == run_phase::load
+ S* s (ctx.phase == run_phase::load
? const_cast<S*> (ctx.targets.find<S> (dir, out, n))
: nullptr);
@@ -328,10 +328,10 @@ namespace build2
libul_factory (context& ctx,
const target_type&, dir_path dir, dir_path out, string n)
{
- libua* a (phase == run_phase::load
+ libua* a (ctx.phase == run_phase::load
? const_cast<libua*> (ctx.targets.find<libua> (dir, out, n))
: nullptr);
- libus* s (phase == run_phase::load
+ libus* s (ctx.phase == run_phase::load
? const_cast<libus*> (ctx.targets.find<libus> (dir, out, n))
: nullptr);
@@ -411,10 +411,10 @@ namespace build2
{
// Casts are MT-aware (during serial load).
//
- liba* a (phase == run_phase::load
+ liba* a (ctx.phase == run_phase::load
? const_cast<liba*> (ctx.targets.find<liba> (dir, out, n))
: nullptr);
- libs* s (phase == run_phase::load
+ libs* s (ctx.phase == run_phase::load
? const_cast<libs*> (ctx.targets.find<libs> (dir, out, n))
: nullptr);
diff --git a/build2/cc/compile-rule.cxx b/build2/cc/compile-rule.cxx
index 9dede21..f600f76 100644
--- a/build2/cc/compile-rule.cxx
+++ b/build2/cc/compile-rule.cxx
@@ -559,7 +559,7 @@ namespace build2
// @@ MT perf: so we are going to switch the phase and execute for
// any generated header.
//
- phase_switch ps (run_phase::execute);
+ phase_switch ps (t.ctx, run_phase::execute);
target_state ns (execute_direct (a, t));
if (ns != os && ns != target_state::unchanged)
@@ -698,7 +698,7 @@ namespace build2
// Start asynchronous matching of prerequisites. Wait with unlocked
// phase to allow phase switching.
//
- wait_guard wg (target::count_busy (), t[a].task_count, true);
+ wait_guard wg (t.ctx, target::count_busy (), t[a].task_count, true);
size_t start (pts.size ()); // Index of the first to be added.
for (prerequisite_member p: group_prerequisite_members (a, t))
@@ -5113,7 +5113,7 @@ namespace build2
{
// Switch the phase to load then create and load the subproject.
//
- phase_switch phs (run_phase::load);
+ phase_switch phs (rs.ctx, run_phase::load);
// Re-test again now that we are in exclusive phase (another thread
// could have already created and loaded the subproject).
diff --git a/build2/cc/link-rule.cxx b/build2/cc/link-rule.cxx
index 09109c2..57772a9 100644
--- a/build2/cc/link-rule.cxx
+++ b/build2/cc/link-rule.cxx
@@ -1198,7 +1198,7 @@ namespace build2
// Wait with unlocked phase to allow phase switching.
//
- wait_guard wg (target::count_busy (), t[a].task_count, true);
+ wait_guard wg (t.ctx, target::count_busy (), t[a].task_count, true);
i = start;
for (prerequisite_member p: group_prerequisite_members (a, t))
diff --git a/build2/cc/utility.cxx b/build2/cc/utility.cxx
index ff807c9..e9d4ce3 100644
--- a/build2/cc/utility.cxx
+++ b/build2/cc/utility.cxx
@@ -70,7 +70,7 @@ namespace build2
// Called by the compile rule during execute.
//
- return phase == run_phase::match && !exist
+ return x.ctx.phase == run_phase::match && !exist
? &search (x, tt, x.dir, x.out, x.name)
: search_existing (x.ctx, tt, x.dir, x.out, x.name);
}
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 298f945..0af18c8 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -22,7 +22,7 @@ namespace build2
const target&
search (const target& t, const prerequisite& p)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
const target* r (p.target.load (memory_order_consume));
@@ -35,13 +35,15 @@ namespace build2
const target*
search_existing (const prerequisite& p)
{
- assert (phase == run_phase::match || phase == run_phase::execute);
+ context& ctx (p.scope.ctx);
+
+ assert (ctx.phase == run_phase::match || ctx.phase == run_phase::execute);
const target* r (p.target.load (memory_order_consume));
if (r == nullptr)
{
- r = search_existing (p.scope.ctx, p.key ());
+ r = search_existing (ctx, p.key ());
if (r != nullptr)
search_custom (p, *r);
@@ -53,7 +55,7 @@ namespace build2
const target&
search (const target& t, const prerequisite_key& pk)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
// business.
@@ -70,7 +72,7 @@ namespace build2
const target*
search_existing (context& ctx, const prerequisite_key& pk)
{
- assert (phase == run_phase::match || phase == run_phase::execute);
+ assert (ctx.phase == run_phase::match || ctx.phase == run_phase::execute);
return pk.proj
? import_existing (ctx, pk)
@@ -80,7 +82,7 @@ namespace build2
const target&
search (const target& t, name n, const scope& s)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
auto rp (s.find_target_type (n, location ()));
const target_type* tt (rp.first);
@@ -108,7 +110,8 @@ namespace build2
const target*
search_existing (const name& cn, const scope& s, const dir_path& out)
{
- assert (phase == run_phase::match || phase == run_phase::execute);
+ assert (s.ctx.phase == run_phase::match ||
+ s.ctx.phase == run_phase::execute);
name n (cn);
auto rp (s.find_target_type (n, location ()));
@@ -166,7 +169,7 @@ namespace build2
target_lock
lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
{
- assert (phase == run_phase::match);
+ assert (ct.ctx.phase == run_phase::match);
// Most likely the target's state is (count_touched - 1), that is, 0 or
// previously executed, so let's start with that.
@@ -206,7 +209,7 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock ul;
+ phase_unlock ul (ct.ctx);
e = sched.wait (busy - 1, task_count, *wq);
}
@@ -245,7 +248,7 @@ namespace build2
void
unlock_impl (action a, target& t, size_t offset)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
atomic_count& task_count (t[a].task_count);
@@ -642,7 +645,7 @@ namespace build2
try
{
- phase_lock pl (run_phase::match); // Can throw.
+ phase_lock pl (t.ctx, run_phase::match); // Throws.
{
target_lock l {a, &t, offset}; // Reassemble.
match_impl (l, false /* step */, try_match);
@@ -732,7 +735,7 @@ namespace build2
// to execute it now.
//
{
- phase_switch ps (run_phase::execute);
+ phase_switch ps (g.ctx, run_phase::execute);
execute_direct (a, g);
}
@@ -755,7 +758,7 @@ namespace build2
// We can be called during execute though everything should have been
// already resolved.
//
- switch (phase)
+ switch (g.ctx.phase)
{
case run_phase::match:
{
@@ -797,7 +800,7 @@ namespace build2
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (target::count_busy (), t[a].task_count, true);
+ wait_guard wg (t.ctx, target::count_busy (), t[a].task_count, true);
size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
@@ -854,7 +857,7 @@ namespace build2
// Pretty much identical to match_prerequisite_range() except we don't
// search.
//
- wait_guard wg (target::count_busy (), t[a].task_count, true);
+ wait_guard wg (t.ctx, target::count_busy (), t[a].task_count, true);
for (size_t i (0); i != n; ++i)
{
@@ -1740,14 +1743,14 @@ namespace build2
template <typename T>
target_state
- straight_execute_members (action a, atomic_count& tc,
+ straight_execute_members (context& ctx, action a, atomic_count& tc,
T ts[], size_t n, size_t p)
{
target_state r (target_state::unchanged);
// Start asynchronous execution of prerequisites.
//
- wait_guard wg (target::count_busy (), tc);
+ wait_guard wg (ctx, target::count_busy (), tc);
n += p;
for (size_t i (p); i != n; ++i)
@@ -1795,14 +1798,14 @@ namespace build2
template <typename T>
target_state
- reverse_execute_members (action a, atomic_count& tc,
+ reverse_execute_members (context& ctx, action a, atomic_count& tc,
T ts[], size_t n, size_t p)
{
// Pretty much as straight_execute_members() but in reverse order.
//
target_state r (target_state::unchanged);
- wait_guard wg (target::count_busy (), tc);
+ wait_guard wg (ctx, target::count_busy (), tc);
n = p - n;
for (size_t i (p); i != n; )
@@ -1846,19 +1849,19 @@ namespace build2
//
template LIBBUILD2_SYMEXPORT target_state
straight_execute_members<const target*> (
- action, atomic_count&, const target*[], size_t, size_t);
+ context&, action, atomic_count&, const target*[], size_t, size_t);
template LIBBUILD2_SYMEXPORT target_state
reverse_execute_members<const target*> (
- action, atomic_count&, const target*[], size_t, size_t);
+ context&, action, atomic_count&, const target*[], size_t, size_t);
template LIBBUILD2_SYMEXPORT target_state
straight_execute_members<prerequisite_target> (
- action, atomic_count&, prerequisite_target[], size_t, size_t);
+ context&, action, atomic_count&, prerequisite_target[], size_t, size_t);
template LIBBUILD2_SYMEXPORT target_state
reverse_execute_members<prerequisite_target> (
- action, atomic_count&, prerequisite_target[], size_t, size_t);
+ context&, action, atomic_count&, prerequisite_target[], size_t, size_t);
pair<optional<target_state>, const target*>
execute_prerequisites (const target_type* tt,
@@ -1877,7 +1880,7 @@ namespace build2
//
target_state rs (target_state::unchanged);
- wait_guard wg (target::count_busy (), t[a].task_count);
+ wait_guard wg (t.ctx, target::count_busy (), t[a].task_count);
for (size_t i (0); i != n; ++i)
{
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 2ca83e3..d772469 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -613,18 +613,20 @@ namespace build2
//
template <typename T>
target_state
- straight_execute_members (action, atomic_count&, T[], size_t, size_t);
+ straight_execute_members (context&, action, atomic_count&,
+ T[], size_t, size_t);
template <typename T>
target_state
- reverse_execute_members (action, atomic_count&, T[], size_t, size_t);
+ reverse_execute_members (context&, action, atomic_count&,
+ T[], size_t, size_t);
template <typename T>
inline target_state
straight_execute_members (action a, const target& t,
T ts[], size_t c, size_t s)
{
- return straight_execute_members (a, t[a].task_count, ts, c, s);
+ return straight_execute_members (t.ctx, a, t[a].task_count, ts, c, s);
}
template <typename T>
@@ -632,7 +634,7 @@ namespace build2
reverse_execute_members (action a, const target& t,
T ts[], size_t c, size_t s)
{
- return reverse_execute_members (a, t[a].task_count, ts, c, s);
+ return reverse_execute_members (t.ctx, a, t[a].task_count, ts, c, s);
}
// Call straight or reverse depending on the current mode.
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 8bd69c9..b409b7c 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -13,7 +13,8 @@ namespace build2
inline const target&
search_custom (const prerequisite& p, const target& t)
{
- assert (phase == run_phase::match || phase == run_phase::execute);
+ assert (t.ctx.phase == run_phase::match ||
+ t.ctx.phase == run_phase::execute);
const target* e (nullptr);
if (!p.target.compare_exchange_strong (
@@ -281,7 +282,7 @@ namespace build2
inline target_state
match (action a, const target& t, bool fail)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
target_state r (match (a, t, 0, nullptr).second);
@@ -296,7 +297,7 @@ namespace build2
inline pair<bool, target_state>
try_match (action a, const target& t, bool fail)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
match (a, t, 0, nullptr, true /* try_match */));
@@ -315,7 +316,7 @@ namespace build2
inline bool
match (action a, const target& t, unmatch um)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
target_state s (match (a, t, 0, nullptr).second);
@@ -355,7 +356,7 @@ namespace build2
size_t sc, atomic_count& tc,
bool fail)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
target_state r (match (a, t, sc, &tc).second);
if (fail && !keep_going && r == target_state::failed)
@@ -406,7 +407,7 @@ namespace build2
inline void
match_recipe (target_lock& l, recipe r)
{
- assert (phase == run_phase::match && l.target != nullptr);
+ assert (l.target != nullptr && l.target->ctx.phase == run_phase::match);
(*l.target)[l.action].rule = nullptr; // No rule.
set_recipe (l, move (r));
@@ -416,7 +417,7 @@ namespace build2
inline recipe
match_delegate (action a, target& t, const rule& dr, bool try_match)
{
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
@@ -450,7 +451,7 @@ namespace build2
if (a.outer ())
a = a.inner_action ();
- switch (phase)
+ switch (t.ctx.phase)
{
case run_phase::match:
{
@@ -611,7 +612,8 @@ namespace build2
{
assert (a.outer ());
auto& p (t.prerequisite_targets[a]);
- return straight_execute_members (a.inner_action (),
+ return straight_execute_members (t.ctx,
+ a.inner_action (),
t[a].task_count,
p.data (),
c == 0 ? p.size () - s : c,
@@ -623,7 +625,8 @@ namespace build2
{
assert (a.outer ());
auto& p (t.prerequisite_targets[a]);
- return reverse_execute_members (a.inner_action (),
+ return reverse_execute_members (t.ctx,
+ a.inner_action (),
t[a].task_count,
p.data (),
c == 0 ? p.size () : c,
diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx
index e235a3a..6abfd33 100644
--- a/libbuild2/config/operation.cxx
+++ b/libbuild2/config/operation.cxx
@@ -554,7 +554,7 @@ namespace build2
rs->ctx.current_oif (*oif);
- phase_lock pl (run_phase::match);
+ phase_lock pl (t.ctx, run_phase::match);
match (action (configure_id, id), t);
}
}
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index 720e8d8..82a2cbb 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -46,13 +46,14 @@ namespace build2
variable_pool var_pool;
variable_overrides var_overrides;
- data (context& c): scopes (c), targets (c), var_pool (true /* global */) {}
+ data (context& c): scopes (c), targets (c), var_pool (&c /* global */) {}
};
context::
context (scheduler& s, const strings& cmd_vars)
: data_ (new data (*this)),
sched (s),
+ phase_mutex (phase),
scopes (data_->scopes),
global_scope (create_global_scope (data_->scopes)),
targets (data_->targets),
@@ -546,11 +547,6 @@ namespace build2
scheduler sched;
- run_phase phase;
- run_phase_mutex phase_mutex;
-
- size_t load_generation;
-
bool run_phase_mutex::
lock (run_phase p)
{
@@ -576,13 +572,13 @@ namespace build2
//
if (u)
{
- phase = p;
+ phase_ = p;
r = !fail_;
}
- else if (phase != p)
+ else if (phase_ != p)
{
sched.deactivate (false /* external */);
- for (; phase != p; v->wait (l)) ;
+ for (; phase_ != p; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
sched.activate (false /* external */);
@@ -631,10 +627,10 @@ namespace build2
{
condition_variable* v;
- if (lc_ != 0) {phase = run_phase::load; v = &lv_;}
- else if (mc_ != 0) {phase = run_phase::match; v = &mv_;}
- else if (ec_ != 0) {phase = run_phase::execute; v = &ev_;}
- else {phase = run_phase::load; v = nullptr;}
+ if (lc_ != 0) {phase_ = run_phase::load; v = &lv_;}
+ else if (mc_ != 0) {phase_ = run_phase::match; v = &mv_;}
+ else if (ec_ != 0) {phase_ = run_phase::execute; v = &ev_;}
+ else {phase_ = run_phase::load; v = nullptr;}
if (v != nullptr)
{
@@ -681,7 +677,7 @@ namespace build2
if (u)
{
- phase = n;
+ phase_ = n;
r = !fail_;
// Notify others that could be waiting for this phase.
@@ -695,7 +691,7 @@ namespace build2
else // phase != n
{
sched.deactivate (false /* external */);
- for (; phase != n; v->wait (l)) ;
+ for (; phase_ != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
sched.activate (false /* external */);
@@ -735,22 +731,25 @@ namespace build2
phase_lock* phase_lock_instance;
phase_lock::
- phase_lock (run_phase p)
- : p (p)
+ phase_lock (context& c, run_phase p)
+ : ctx (c), phase (p)
{
- if (phase_lock* l = phase_lock_instance)
- assert (l->p == p);
+ phase_lock* pl (phase_lock_instance);
+
+ if (pl != nullptr && &pl->ctx == &ctx)
+ assert (pl->phase == phase);
else
{
- if (!phase_mutex.lock (p))
+ if (!ctx.phase_mutex.lock (phase))
{
- phase_mutex.unlock (p);
+ ctx.phase_mutex.unlock (phase);
throw failed ();
}
+ prev = pl;
phase_lock_instance = this;
- //text << this_thread::get_id () << " phase acquire " << p;
+ //text << this_thread::get_id () << " phase acquire " << phase;
}
}
@@ -759,8 +758,8 @@ namespace build2
{
if (phase_lock_instance == this)
{
- phase_lock_instance = nullptr;
- phase_mutex.unlock (p);
+ phase_lock_instance = prev;
+ ctx.phase_mutex.unlock (phase);
//text << this_thread::get_id () << " phase release " << p;
}
@@ -769,13 +768,15 @@ namespace build2
// phase_unlock
//
phase_unlock::
- phase_unlock (bool u)
+ phase_unlock (context& ctx, bool u)
: l (u ? phase_lock_instance : nullptr)
{
if (u)
{
- phase_lock_instance = nullptr;
- phase_mutex.unlock (l->p);
+ assert (&l->ctx == &ctx);
+
+ phase_lock_instance = nullptr; // Note: not l->prev.
+ ctx.phase_mutex.unlock (l->phase);
//text << this_thread::get_id () << " phase unlock " << l->p;
}
@@ -786,7 +787,7 @@ namespace build2
{
if (l != nullptr)
{
- bool r (phase_mutex.lock (l->p));
+ bool r (l->ctx.phase_mutex.lock (l->phase));
phase_lock_instance = l;
// Fail unless we are already failing. Note that we keep the phase
@@ -802,19 +803,22 @@ namespace build2
// phase_switch
//
phase_switch::
- phase_switch (run_phase n)
- : o (phase), n (n)
+ phase_switch (context& ctx, run_phase n)
+ : old_phase (ctx.phase), new_phase (n)
{
- if (!phase_mutex.relock (o, n))
+ phase_lock* pl (phase_lock_instance);
+ assert (&pl->ctx == &ctx);
+
+ if (!ctx.phase_mutex.relock (old_phase, new_phase))
{
- phase_mutex.relock (n, o);
+ ctx.phase_mutex.relock (new_phase, old_phase);
throw failed ();
}
- phase_lock_instance->p = n;
+ pl->phase = new_phase;
- if (n == run_phase::load) // Note: load lock is exclusive.
- load_generation++;
+ if (new_phase == run_phase::load) // Note: load lock is exclusive.
+ ctx.load_generation++;
//text << this_thread::get_id () << " phase switch " << o << " " << n;
}
@@ -822,18 +826,21 @@ namespace build2
phase_switch::
~phase_switch () noexcept (false)
{
+ phase_lock* pl (phase_lock_instance);
+ run_phase_mutex& pm (pl->ctx.phase_mutex);
+
// If we are coming off a failed load phase, mark the phase_mutex as
// failed to terminate all other threads since the build state may no
// longer be valid.
//
- if (n == run_phase::load && uncaught_exception ())
+ if (new_phase == run_phase::load && uncaught_exception ())
{
- mlock l (phase_mutex.m_);
- phase_mutex.fail_ = true;
+ mlock l (pm.m_);
+ pm.fail_ = true;
}
- bool r (phase_mutex.relock (n, o));
- phase_lock_instance->p = o;
+ bool r (pm.relock (new_phase, old_phase));
+ pl->phase = old_phase;
// Similar logic to ~phase_unlock().
//
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index f51645e..d61d063 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -41,79 +41,6 @@ namespace build2
//
LIBBUILD2_SYMEXPORT extern scheduler sched;
- // @@ CTX: document (backlinks, non-overlap etc). RW story.
- //
- class LIBBUILD2_SYMEXPORT context
- {
- struct data;
- unique_ptr<data> data_;
-
- public:
- scheduler& sched;
-
- const scope_map& scopes;
- const scope& global_scope;
-
- target_set& targets;
-
- const variable_pool& var_pool;
- const variable_overrides& var_overrides; // Project and relative scope.
-
- public:
- explicit
- context (scheduler&, const strings& cmd_vars = {});
-
- void
- current_mif (const meta_operation_info&);
-
- void
- current_oif (const operation_info& inner,
- const operation_info* outer = nullptr,
- bool diag_noise = true);
-
- context (context&&) = delete;
- context& operator= (context&&) = delete;
-
- context (const context&) = delete;
- context& operator= (const context&) = delete;
-
- ~context ();
- };
-
- // In order to perform each operation the build system goes through the
- // following phases:
- //
- // load - load the buildfiles
- // match - search prerequisites and match rules
- // execute - execute the matched rule
- //
- // The build system starts with a "serial load" phase and then continues
- // with parallel match and execute. Match, however, can be interrupted
- // both with load and execute.
- //
- // Match can be interrupted with "exclusive load" in order to load
- // additional buildfiles. Similarly, it can be interrupted with (parallel)
- // execute in order to build targetd required to complete the match (for
- // example, generated source code or source code generators themselves).
- //
- // Such interruptions are performed by phase change that is protected by
- // phase_mutex (which is also used to synchronize the state changes between
- // phases).
- //
- // Serial load can perform arbitrary changes to the build state. Exclusive
- // load, however, can only perform "island appends". That is, it can create
- // new "nodes" (variables, scopes, etc) but not (semantically) change
- // already existing nodes or invalidate any references to such (the idea
- // here is that one should be able to load additional buildfiles as long as
- // they don't interfere with the existing build state). The "islands" are
- // identified by the load_generation number (0 for the initial/serial
- // load). It is incremented in case of a phase switch and can be stored in
- // various "nodes" to verify modifications are only done "within the
- // islands".
- //
- LIBBUILD2_SYMEXPORT extern run_phase phase;
- LIBBUILD2_SYMEXPORT extern size_t load_generation;
-
// A "tri-mutex" that keeps all the threads in one of the three phases. When
// a thread wants to switch a phase, it has to wait for all the other
// threads to do the same (or release their phase locks). The load phase is
@@ -160,12 +87,11 @@ namespace build2
bool
relock (run_phase unlock, run_phase lock);
- public:
- run_phase_mutex ()
- : fail_ (false), lc_ (0), mc_ (0), ec_ (0)
- {
- phase = run_phase::load;
- }
+ private:
+ friend class context;
+
+ run_phase_mutex (run_phase& p)
+ : phase_ (p), fail_ (false), lc_ (0), mc_ (0), ec_ (0) {}
private:
friend struct phase_lock;
@@ -182,6 +108,8 @@ namespace build2
// When the mutex is unlocked (all three counters become zero, the phase
// is always changed to load (this is also the initial state).
//
+ run_phase& phase_;
+
mutex m_;
bool fail_;
@@ -197,7 +125,81 @@ namespace build2
mutex lm_;
};
- extern run_phase_mutex phase_mutex;
+ // @@ CTX: document (backlinks, non-overlap etc). RW story.
+ //
+ class LIBBUILD2_SYMEXPORT context
+ {
+ struct data;
+ unique_ptr<data> data_;
+
+ public:
+ scheduler& sched;
+
+ // In order to perform each operation the build system goes through the
+ // following phases:
+ //
+ // load - load the buildfiles
+ // match - search prerequisites and match rules
+ // execute - execute the matched rule
+ //
+ // The build system starts with a "serial load" phase and then continues
+ // with parallel match and execute. Match, however, can be interrupted
+ // both with load and execute.
+ //
+ // Match can be interrupted with "exclusive load" in order to load
+ // additional buildfiles. Similarly, it can be interrupted with (parallel)
+ // execute in order to build targetd required to complete the match (for
+ // example, generated source code or source code generators themselves).
+ //
+ // Such interruptions are performed by phase change that is protected by
+ // phase_mutex (which is also used to synchronize the state changes
+ // between phases).
+ //
+ // Serial load can perform arbitrary changes to the build state. Exclusive
+ // load, however, can only perform "island appends". That is, it can
+ // create new "nodes" (variables, scopes, etc) but not (semantically)
+ // change already existing nodes or invalidate any references to such (the
+ // idea here is that one should be able to load additional buildfiles as
+ // long as they don't interfere with the existing build state). The
+ // "islands" are identified by the load_generation number (0 for the
+ // initial/serial load). It is incremented in case of a phase switch and
+ // can be stored in various "nodes" to verify modifications are only done
+ // "within the islands".
+ //
+ run_phase phase = run_phase::load;
+ run_phase_mutex phase_mutex;
+ size_t load_generation = 0;
+
+ // Scopes, targets, and variables.
+ //
+ const scope_map& scopes;
+ const scope& global_scope;
+
+ target_set& targets;
+
+ const variable_pool& var_pool;
+ const variable_overrides& var_overrides; // Project and relative scope.
+
+ public:
+ explicit
+ context (scheduler&, const strings& cmd_vars = {});
+
+ void
+ current_mif (const meta_operation_info&);
+
+ void
+ current_oif (const operation_info& inner,
+ const operation_info* outer = nullptr,
+ bool diag_noise = true);
+
+ context (context&&) = delete;
+ context& operator= (context&&) = delete;
+
+ context (const context&) = delete;
+ context& operator= (const context&) = delete;
+
+ ~context ();
+ };
// Grab a new phase lock releasing it on destruction. The lock can be
// "owning" or "referencing" (recursive).
@@ -253,7 +255,7 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_lock
{
- explicit phase_lock (run_phase);
+ explicit phase_lock (context&, run_phase);
~phase_lock ();
phase_lock (phase_lock&&) = delete;
@@ -262,7 +264,9 @@ namespace build2
phase_lock& operator= (phase_lock&&) = delete;
phase_lock& operator= (const phase_lock&) = delete;
- run_phase p;
+ context& ctx;
+ phase_lock* prev; // From another context.
+ run_phase phase;
};
// Assuming we have a lock on the current phase, temporarily release it
@@ -270,7 +274,7 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_unlock
{
- phase_unlock (bool unlock = true);
+ phase_unlock (context&, bool unlock = true);
~phase_unlock () noexcept (false);
phase_lock* l;
@@ -281,10 +285,10 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_switch
{
- explicit phase_switch (run_phase);
+ explicit phase_switch (context&, run_phase);
~phase_switch () noexcept (false);
- run_phase o, n;
+ run_phase old_phase, new_phase;
};
// Wait for a task count optionally and temporarily unlocking the phase.
@@ -295,11 +299,12 @@ namespace build2
wait_guard (); // Empty.
- explicit
- wait_guard (atomic_count& task_count,
+ wait_guard (context&,
+ atomic_count& task_count,
bool phase = false);
- wait_guard (size_t start_count,
+ wait_guard (context&,
+ size_t start_count,
atomic_count& task_count,
bool phase = false);
@@ -314,6 +319,7 @@ namespace build2
wait_guard (const wait_guard&) = delete;
wait_guard& operator= (const wait_guard&) = delete;
+ context* ctx;
size_t start_count;
atomic_count* task_count;
bool phase;
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index f947bd7..4750de0 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -8,19 +8,19 @@ namespace build2
//
inline wait_guard::
wait_guard ()
- : start_count (0), task_count (nullptr), phase (false)
+ : ctx (nullptr), start_count (0), task_count (nullptr), phase (false)
{
}
inline wait_guard::
- wait_guard (atomic_count& tc, bool p)
- : wait_guard (0, tc, p)
+ wait_guard (context& c, atomic_count& tc, bool p)
+ : wait_guard (c, 0, tc, p)
{
}
inline wait_guard::
- wait_guard (size_t sc, atomic_count& tc, bool p)
- : start_count (sc), task_count (&tc), phase (p)
+ wait_guard (context& c, size_t sc, atomic_count& tc, bool p)
+ : ctx (&c), start_count (sc), task_count (&tc), phase (p)
{
}
@@ -33,7 +33,10 @@ namespace build2
inline wait_guard::
wait_guard (wait_guard&& x)
- : start_count (x.start_count), task_count (x.task_count), phase (x.phase)
+ : ctx (x.ctx),
+ start_count (x.start_count),
+ task_count (x.task_count),
+ phase (x.phase)
{
x.task_count = nullptr;
}
@@ -44,6 +47,7 @@ namespace build2
if (&x != this)
{
assert (task_count == nullptr);
+ ctx = x.ctx;
start_count = x.start_count; task_count = x.task_count; phase = x.phase;
x.task_count = nullptr;
}
@@ -53,7 +57,7 @@ namespace build2
inline void wait_guard::
wait ()
{
- phase_unlock u (phase);
+ phase_unlock u (*ctx, phase);
sched.wait (start_count, *task_count);
task_count = nullptr;
}
diff --git a/libbuild2/file.ixx b/libbuild2/file.ixx
index 59b53bb..564fc11 100644
--- a/libbuild2/file.ixx
+++ b/libbuild2/file.ixx
@@ -18,14 +18,14 @@ namespace build2
inline const target&
import (context& ctx, const prerequisite_key& pk)
{
- assert (phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
return *import (ctx, pk, false);
}
inline const target*
import_existing (context& ctx, const prerequisite_key& pk)
{
- assert (phase == run_phase::match || phase == run_phase::execute);
+ assert (ctx.phase == run_phase::match || ctx.phase == run_phase::execute);
return import (ctx, pk, true);
}
}
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 879d416..289b893 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -101,9 +101,10 @@ namespace build2
{
tracer trace ("search");
- phase_lock pl (run_phase::match);
+ context& ctx (bs.ctx);
+ phase_lock pl (ctx, run_phase::match);
- const target* t (bs.ctx.targets.find (tk, trace));
+ const target* t (ctx.targets.find (tk, trace));
// Only do the implied buildfile if we haven't loaded one. Failed that we
// may try go this route even though we've concluded the implied buildfile
@@ -131,8 +132,13 @@ namespace build2
{
tracer trace ("match");
+ if (ts.empty ())
+ return;
+
+ context& ctx (ts[0].as_target ().ctx);
+
{
- phase_lock l (run_phase::match);
+ phase_lock l (ctx, run_phase::match);
// Setup progress reporting if requested.
//
@@ -165,7 +171,7 @@ namespace build2
size_t i (0), n (ts.size ());
{
atomic_count task_count (0);
- wait_guard wg (task_count, true);
+ wait_guard wg (ctx, task_count, true);
for (; i != n; ++i)
{
@@ -245,7 +251,7 @@ namespace build2
// Phase restored to load.
//
- assert (phase == run_phase::load);
+ assert (ctx.phase == run_phase::load);
}
void
@@ -254,6 +260,11 @@ namespace build2
{
tracer trace ("execute");
+ if (ts.empty ())
+ return;
+
+ context& ctx (ts[0].as_target ().ctx);
+
// Reverse the order of targets if the execution mode is 'last'.
//
if (current_mode == execution_mode::last)
@@ -268,7 +279,7 @@ namespace build2
default: assert (false); // Not yet supported.
}
- phase_lock pl (run_phase::execute); // Never switched.
+ phase_lock pl (ctx, run_phase::execute); // Never switched.
// Set the dry-run flag.
//
@@ -318,7 +329,7 @@ namespace build2
//
{
atomic_count task_count (0);
- wait_guard wg (task_count);
+ wait_guard wg (ctx, task_count);
for (const action_target& at: ts)
{
diff --git a/libbuild2/scope.hxx b/libbuild2/scope.hxx
index c3816dd..94cb506 100644
--- a/libbuild2/scope.hxx
+++ b/libbuild2/scope.hxx
@@ -342,7 +342,7 @@ namespace build2
scope&
rw () const
{
- assert (phase == run_phase::load);
+ assert (ctx.phase == run_phase::load);
return const_cast<scope&> (*this);
}
@@ -462,7 +462,7 @@ namespace build2
scope_map&
rw () const
{
- assert (phase == run_phase::load);
+ assert (ctx.phase == run_phase::load);
return const_cast<scope_map&> (*this);
}
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index 9749e94..977c75c 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -365,7 +365,7 @@ namespace build2
// We sometimes call insert() even if we expect to find an existing
// target in order to keep the same code (see cc/search_library()).
//
- assert (phase != run_phase::execute);
+ assert (ctx.phase != run_phase::execute);
optional<string> e (
tt.fixed_extension != nullptr
@@ -430,7 +430,7 @@ namespace build2
{
// The implied flag can only be cleared during the load phase.
//
- assert (phase == run_phase::load);
+ assert (ctx.phase == run_phase::load);
// Clear the implied flag.
//
@@ -536,7 +536,7 @@ namespace build2
//
const mtime_target* t (this);
- switch (phase)
+ switch (ctx.phase)
{
case run_phase::load: break;
case run_phase::match:
@@ -893,11 +893,11 @@ namespace build2
//
bool retest (false);
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
{
// Switch the phase to load.
//
- phase_switch ps (run_phase::load);
+ phase_switch ps (t.ctx, run_phase::load);
// This is subtle: while we were fussing around another thread may
// have loaded the buildfile. So re-test now that we are in exclusive
@@ -937,7 +937,7 @@ namespace build2
}
}
}
- assert (phase == run_phase::match);
+ assert (t.ctx.phase == run_phase::match);
// If we loaded/implied the buildfile, examine the target again.
//
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index fea87f3..e7fef17 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -88,7 +88,7 @@ namespace build2
inline pair<bool, target_state> target::
matched_state_impl (action a) const
{
- assert (phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
// Note that the "tried" state is "final".
//
@@ -110,7 +110,7 @@ namespace build2
inline target_state target::
executed_state_impl (action a) const
{
- assert (phase == run_phase::execute);
+ assert (ctx.phase == run_phase::execute);
return (group_state (a) ? group->state : state)[a].state;
}
@@ -422,7 +422,7 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (phase == run_phase::execute &&
+ assert (ctx.phase == run_phase::execute &&
!group_state (action () /* inner */));
duration::rep r (mtime_.load (memory_order_consume));
@@ -440,7 +440,7 @@ namespace build2
inline bool mtime_target::
newer (timestamp mt) const
{
- assert (phase == run_phase::execute);
+ assert (ctx.phase == run_phase::execute);
timestamp mp (mtime ());
diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx
index 50eb105..61bfba4 100644
--- a/libbuild2/test/rule.cxx
+++ b/libbuild2/test/rule.cxx
@@ -490,7 +490,7 @@ namespace build2
wait_guard wg;
if (!dry_run)
- wg = wait_guard (target::count_busy (), t[a].task_count);
+ wg = wait_guard (t.ctx, target::count_busy (), t[a].task_count);
// Result vector.
//
diff --git a/libbuild2/test/script/parser.cxx b/libbuild2/test/script/parser.cxx
index a4aeff2..43c3849 100644
--- a/libbuild2/test/script/parser.cxx
+++ b/libbuild2/test/script/parser.cxx
@@ -2896,7 +2896,7 @@ namespace build2
if (exec_scope)
{
atomic_count task_count (0);
- wait_guard wg (task_count);
+ wait_guard wg (g->root.test_target.ctx, task_count);
// Start asynchronous execution of inner scopes keeping track of
// how many we have handled.
diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx
index 61880ed..b14a365 100644
--- a/libbuild2/types.hxx
+++ b/libbuild2/types.hxx
@@ -334,7 +334,6 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
operator<< (ostream&, run_phase); // utility.cxx
- LIBBUILD2_SYMEXPORT extern run_phase phase;
}
// In order to be found (via ADL) these have to be either in std:: or in
diff --git a/libbuild2/variable.cxx b/libbuild2/variable.cxx
index 84c1fb3..86109d2 100644
--- a/libbuild2/variable.cxx
+++ b/libbuild2/variable.cxx
@@ -1243,7 +1243,7 @@ namespace build2
const bool* o,
bool pat)
{
- assert (!global_ || phase == run_phase::load);
+ assert (!global_ || global_->phase == run_phase::load);
// Apply pattern.
//
@@ -1318,7 +1318,7 @@ namespace build2
bool retro,
bool match)
{
- assert (!global_ || phase == run_phase::load);
+ assert (!global_ || global_->phase == run_phase::load);
size_t pn (p.size ());
@@ -1432,7 +1432,7 @@ namespace build2
pair<reference_wrapper<value>, bool> variable_map::
insert (const variable& var, bool typed)
{
- assert (!global_ || phase == run_phase::load);
+ assert (!global_ || ctx->phase == run_phase::load);
auto p (m_.emplace (var, value_data (typed ? var.type : nullptr)));
value_data& r (p.first->second);
diff --git a/libbuild2/variable.hxx b/libbuild2/variable.hxx
index d153cb0..4befa51 100644
--- a/libbuild2/variable.hxx
+++ b/libbuild2/variable.hxx
@@ -1162,14 +1162,14 @@ namespace build2
void
clear () {map_.clear ();}
- variable_pool (): variable_pool (false) {}
+ variable_pool (): variable_pool (nullptr) {}
- // RW access.
+ // RW access (only for the global pool).
//
variable_pool&
rw () const
{
- assert (phase == run_phase::load);
+ assert (global_->phase == run_phase::load);
return const_cast<variable_pool&> (*this);
}
@@ -1251,15 +1251,15 @@ namespace build2
private:
std::multiset<pattern> patterns_;
- // Global pool flag.
+ // Global pool flag/context.
//
private:
friend class context;
explicit
- variable_pool (bool global): global_ (global) {}
+ variable_pool (context* global): global_ (global) {}
- bool global_;
+ context* global_;
};
}
diff --git a/libbuild2/variable.ixx b/libbuild2/variable.ixx
index f0bde09..e5353ed 100644
--- a/libbuild2/variable.ixx
+++ b/libbuild2/variable.ixx
@@ -764,7 +764,7 @@ namespace build2
{
// We assume typification is not modification so no version increment.
//
- if (phase == run_phase::load)
+ if (ctx->phase == run_phase::load)
{
if (v.type != var.type)
build2::typify (const_cast<value_data&> (v), *var.type, &var);