aboutsummaryrefslogtreecommitdiff
path: root/libbuild2
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2')
-rw-r--r--libbuild2/algorithm.cxx2
-rw-r--r--libbuild2/context.cxx34
-rw-r--r--libbuild2/context.hxx10
-rw-r--r--libbuild2/context.ixx2
-rw-r--r--libbuild2/scheduler.cxx25
-rw-r--r--libbuild2/scheduler.hxx34
-rw-r--r--libbuild2/scheduler.ixx14
-rw-r--r--libbuild2/scheduler.txx38
8 files changed, 128 insertions, 31 deletions
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 3d8b89c..62c500d 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -284,7 +284,7 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
+ phase_unlock u (ct.ctx, true /* delay */);
e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index c0442f0..6e4fd6f 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -1124,35 +1124,35 @@ namespace build2
// phase_unlock
//
phase_unlock::
- phase_unlock (context& c, bool u, bool d)
- : ctx (u ? &c : nullptr), lock (nullptr)
+ phase_unlock (context* c, bool d)
+ : ctx (c), lock_ (nullptr)
{
- if (u && !d)
+ if (ctx != nullptr && !d)
unlock ();
}
void phase_unlock::
unlock ()
{
- if (ctx != nullptr && lock == nullptr)
+ if (ctx != nullptr && lock_ == nullptr)
{
- lock = phase_lock_instance;
- assert (&lock->ctx == ctx);
+ lock_ = phase_lock_instance;
+ assert (&lock_->ctx == ctx);
phase_lock_instance = nullptr; // Note: not lock->prev.
- ctx->phase_mutex.unlock (lock->phase);
+ ctx->phase_mutex.unlock (lock_->phase);
- //text << this_thread::get_id () << " phase unlock " << lock->phase;
+ //text << this_thread::get_id () << " phase unlock " << lock_->phase;
}
}
- phase_unlock::
- ~phase_unlock () noexcept (false)
+ void phase_unlock::
+ lock ()
{
- if (lock != nullptr)
+ if (lock_ != nullptr)
{
- bool r (ctx->phase_mutex.lock (lock->phase));
- phase_lock_instance = lock;
+ bool r (ctx->phase_mutex.lock (lock_->phase));
+ phase_lock_instance = lock_;
// Fail unless we are already failing. Note that we keep the phase
// locked since there will be phase_lock down the stack to unlock it.
@@ -1160,10 +1160,16 @@ namespace build2
if (!r && !uncaught_exception ())
throw failed ();
- //text << this_thread::get_id () << " phase lock " << lock->phase;
+ //text << this_thread::get_id () << " phase lock " << lock_->phase;
}
}
+ phase_unlock::
+ ~phase_unlock () noexcept (false)
+ {
+ lock ();
+ }
+
// phase_switch
//
phase_switch::
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index 2dec54a..33fc892 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -849,14 +849,20 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_unlock
{
- phase_unlock (context&, bool unlock = true, bool delay = false);
+ explicit phase_unlock (context*, bool delay = false);
+ explicit phase_unlock (context& ctx, bool delay = false)
+ : phase_unlock (&ctx, delay) {}
+
~phase_unlock () noexcept (false);
void
unlock ();
+ void
+ lock ();
+
context* ctx;
- phase_lock* lock;
+ phase_lock* lock_;
};
// Assuming we have a lock on the current phase, temporarily switch to a
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index 7b2a405..6c8c428 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -56,7 +56,7 @@ namespace build2
inline void wait_guard::
wait ()
{
- phase_unlock u (*ctx, phase, true /* delay */);
+ phase_unlock u (phase ? ctx : nullptr, true /* delay */);
ctx->sched->wait (start_count, *task_count, u);
task_count = nullptr;
}
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index 5027f90..e3fbcc1 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -93,12 +93,11 @@ namespace build2
}
void scheduler::
- deactivate (bool external)
+ deactivate_impl (bool external, lock&& rl)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
- lock l (mutex_);
+ lock l (move (rl)); // Make sure unlocked on exception.
active_--;
waiting_++;
@@ -131,11 +130,10 @@ namespace build2
}
}
- void scheduler::
- activate (bool external, bool collision)
+ scheduler::lock scheduler::
+ activate_impl (bool external, bool collision)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
lock l (mutex_);
@@ -160,6 +158,8 @@ namespace build2
if (shutdown_)
throw_generic_error (ECANCELED);
+
+ return l;
}
void scheduler::
@@ -207,7 +207,10 @@ namespace build2
deallocate (size_t n)
{
if (max_active_ == 1) // Serial execution.
+ {
+ assert (n == 0);
return;
+ }
lock l (mutex_);
active_ -= n;
@@ -216,13 +219,15 @@ namespace build2
size_t scheduler::
suspend (size_t start_count, const atomic_count& task_count)
{
+ assert (max_active_ != 1); // Suspend during serial execution?
+
wait_slot& s (
wait_queue_[
hash<const atomic_count*> () (&task_count) % wait_queue_size_]);
// This thread is no longer active.
//
- deactivate (false /* external */);
+ deactivate_impl (false /* external */, lock (mutex_));
// Note that the task count is checked while holding the lock. We also
// have to notify while holding the lock (see resume()). The aim here
@@ -259,7 +264,7 @@ namespace build2
// This thread is no longer waiting.
//
- activate (false /* external */, collision);
+ activate_impl (false /* external */, collision);
return tc;
}
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index d3b8ceb..3cc206e 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -192,13 +192,15 @@ namespace build2
//
// The external flag indicates whether the wait is for an event external
// to the scheduler, that is, triggered by something other than one of the
- // threads managed by the scheduler.
+ // threads managed by the scheduler. This is used to suspend deadlock
+ // detection (which is progress-based and which cannot be measured for
+ // external events).
//
void
deactivate (bool external);
void
- activate (bool external, bool = false);
+ activate (bool external);
// Sleep for the specified duration, deactivating the thread before going
// to sleep and re-activating it after waking up (which means this
@@ -217,7 +219,7 @@ namespace build2
// Allocate additional active thread count to the current active thread,
// for example, to be "passed" to an external program:
//
- // scheduler::alloc_guard ag (ctx.sched, ctx.sched.max_active () / 2);
+ // scheduler::alloc_guard ag (*ctx.sched, ctx.sched->max_active () / 2);
// args.push_back ("-flto=" + to_string (1 + ag.n));
// run (args);
// ag.deallocate ();
@@ -242,6 +244,22 @@ namespace build2
void
deallocate (size_t);
+ // Similar to allocate() but reserve all the available threads blocking
+ // until this becomes possible. Call unlock() on the specified lock before
+ // deactivating and lock() after activating (can be used to unlock the
+ // phase). Typical usage:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched,
+ // phase_unlock (ctx, true /* delay */));
+ //
+ // Or, without unlocking the phase:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched, phase_unlock (nullptr));
+ //
+ template <typename L>
+ size_t
+ serialize (L& lock);
+
struct alloc_guard
{
size_t n;
@@ -249,6 +267,10 @@ namespace build2
alloc_guard (): n (0), s_ (nullptr) {}
alloc_guard (scheduler& s, size_t m): n (s.allocate (m)), s_ (&s) {}
+ template <typename L,
+ typename std::enable_if<!std::is_integral<L>::value, int>::type = 0>
+ alloc_guard (scheduler& s, L&& l): n (s.serialize (l)), s_ (&s) {}
+
alloc_guard (alloc_guard&& x) noexcept
: n (x.n), s_ (x.s_) {x.s_ = nullptr;}
@@ -939,6 +961,12 @@ namespace build2
private:
optional<size_t>
wait_impl (size_t, const atomic_count&, work_queue);
+
+ void
+ deactivate_impl (bool, lock&&);
+
+ lock
+ activate_impl (bool, bool);
};
}
diff --git a/libbuild2/scheduler.ixx b/libbuild2/scheduler.ixx
index 96eaee1..f46d035 100644
--- a/libbuild2/scheduler.ixx
+++ b/libbuild2/scheduler.ixx
@@ -44,6 +44,20 @@ namespace build2
return suspend (start_count, task_count);
}
+ inline void scheduler::
+ deactivate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ deactivate_impl (external, lock (mutex_));
+ }
+
+ inline void scheduler::
+ activate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ activate_impl (external, false /* collision */);
+ }
+
inline scheduler::queue_mark::
queue_mark (scheduler& s)
: tq_ (s.queue ())
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 460c4d4..87c9384 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -137,4 +137,42 @@ namespace build2
if (tc.fetch_sub (1, memory_order_release) - 1 <= t.start_count)
s.resume (tc); // Resume waiters, if any.
}
+
+ template <typename L>
+ size_t scheduler::
+ serialize (L& el)
+ {
+ if (max_active_ == 1) // Serial execution.
+ return 0;
+
+ lock l (mutex_);
+
+ if (active_ == 1)
+ active_ = max_active_;
+ else
+ {
+ // Wait until we are the only active thread.
+ //
+ el.unlock ();
+
+ while (active_ != 1)
+ {
+ // While it would have been more efficient to implement this via the
+ // condition variable notifications, that logic is already twisted
+ // enough (and took a considerable time to debug). So for now we keep
+ // it simple and do sleep and re-check. Make the sleep external not to
+ // trip up the deadlock detection.
+ //
+ deactivate_impl (true /* external */, move (l));
+ active_sleep (std::chrono::milliseconds (10));
+ l = activate_impl (true /* external */, false /* collision */);
+ }
+
+ active_ = max_active_;
+ l.unlock (); // Important: unlock before attempting to relock external!
+ el.lock ();
+ }
+
+ return max_active_ - 1;
+ }
}