diff options
-rw-r--r-- | build2/b.cxx | 6 | ||||
-rw-r--r-- | build2/context | 4 | ||||
-rw-r--r-- | build2/context.cxx | 4 | ||||
-rw-r--r-- | build2/scope | 12 | ||||
-rw-r--r-- | build2/scope.cxx | 95 | ||||
-rw-r--r-- | build2/types | 7 | ||||
-rw-r--r-- | build2/variable | 50 | ||||
-rw-r--r-- | build2/variable.cxx | 3 | ||||
-rw-r--r-- | build2/variable.txx | 69 |
9 files changed, 181 insertions, 69 deletions
diff --git a/build2/b.cxx b/build2/b.cxx index 14258b3..01d9378 100644 --- a/build2/b.cxx +++ b/build2/b.cxx @@ -273,7 +273,7 @@ main (int argc, char* argv[]) keep_going = !ops.serial_stop (); - // Start up the scheduler. + // Start up the scheduler and allocate lock shards. // size_t jobs (0); @@ -296,6 +296,10 @@ main (int argc, char* argv[]) sched.startup (jobs); + variable_cache_mutex_shard_size = sched.shard_size (); + variable_cache_mutex_shard.reset ( + new shared_mutex[variable_cache_mutex_shard_size]); + // Trace some overall environment information. // if (verb >= 5) diff --git a/build2/context b/build2/context index 3c731a5..79f04a2 100644 --- a/build2/context +++ b/build2/context @@ -253,10 +253,6 @@ namespace build2 // extern atomic_count dependency_count; - // Variable override value cache. - // - extern variable_override_cache var_override_cache; - // Reset the build state. In particular, this removes all the targets, // scopes, and variables. // diff --git a/build2/context.cxx b/build2/context.cxx index cbb2656..8b4fd52 100644 --- a/build2/context.cxx +++ b/build2/context.cxx @@ -60,8 +60,6 @@ namespace build2 atomic_count dependency_count; - variable_override_cache var_override_cache; - variable_overrides reset (const strings& cmd_vars) { @@ -77,8 +75,6 @@ namespace build2 variable_overrides vos; - var_override_cache.clear (); - targets.clear (); sm.clear (); vp.clear (); diff --git a/build2/scope b/build2/scope index 64aaf24..2b86200 100644 --- a/build2/scope +++ b/build2/scope @@ -177,6 +177,18 @@ namespace build2 // variable_type_map target_vars; + // Variable override cache (project root and global scope only). + // + // The key is the variable plus the innermost (scope-wise) variable map to + // which this override applies. See find_override() for details. + // + // Note: since it can be modified on any lookup (including during the + // execute phase), the cache is protected by its own mutex shard. + // + mutable + variable_cache<pair<const variable*, const variable_map*>> + override_cache; + // Meta/operations supported by this project (set on the root // scope only). // diff --git a/build2/scope.cxx b/build2/scope.cxx index 6f21636..d7e9d8e 100644 --- a/build2/scope.cxx +++ b/build2/scope.cxx @@ -329,19 +329,7 @@ namespace build2 if (inner_proj == nullptr) inner_proj = global_scope; - // Implementing proper caching is tricky so for now we are going to re- - // calculate the value every time. Later, the plan is to use value - // versioning (incremented on every update) to detect stem value changes. - // We also need to watch out for the change of the stem itself in addition - // to its value (think of a new variable set since last lookup which is - // now a new stem). Thus stem_vars in variable_override_value. - // - // @@ MT - // - variable_override_value& cache ( - var_override_cache[make_pair (inner_vars, &var)]); - - // Now find our "stem", that is the value to which we will be appending + // Now find our "stem", that is, the value to which we will be appending // suffixes and prepending prefixes. This is either the original or the // __override, provided it applies. We may also not have either. // @@ -404,41 +392,51 @@ namespace build2 break; } - // If there is a stem, set it as the initial value of the cache. - // Otherwise, start with a NULL value. - // - // Note: very similar logic as in the target type/pattern specific cache - // population code above. + // Check the cache. // + pair<value&, ulock> cache ( + inner_proj->override_cache.insert ( + make_pair (&var, inner_vars), stem)); - // Un-typify the cache. This can be necessary, for example, if we are - // changing from one value-typed stem to another. + value& cv (cache.first); + bool cl (cache.second.owns_lock ()); + + // If cache miss/invalidation, update the value. // - if (!stem.defined () || cache.value.type != stem->type) + if (cl) { - cache.value = nullptr; - cache.value.type = nullptr; // Un-typify. - } + // Note: very similar logic as in the target type/pattern specific cache + // population code above. + // - if (stem.defined ()) - { - cache.value = *stem; - cache.stem_vars = stem.vars; + // Un-typify the cache. This can be necessary, for example, if we are + // changing from one value-typed stem to another. + // + if (!stem.defined () || cv.type != stem->type) + { + cv = nullptr; + cv.type = nullptr; // Un-typify. + } + + if (stem.defined ()) + cv = *stem; + + // Typify the cache value. If the stem is the original, then the type + // would get propagated automatically. But the stem could also be the + // override, which is kept untyped. Or the stem might not be there at + // all while we still need to apply prefixes/suffixes in the type-aware + // way. + // + if (cv.type == nullptr && var.type != nullptr) + typify (cv, *var.type, &var); } - else - cache.stem_vars = nullptr; // No stem. - // Typify the cache value. If the stem is the original, then the type - // would get propagated automatically. But the stem could also be the - // override, which is kept untyped. Or the stem might not be there at all - // while we still need to apply prefixes/suffixes in the type-aware way. + // Now apply override prefixes and suffixes (if updating the cache). Also + // calculate the vars and depth of the result, which will be those of the + // stem or prefix/suffix that applies, whichever is the innermost. // - if (cache.value.type == nullptr && var.type != nullptr) - typify (cache.value, *var.type, &var); - - // Now apply override prefixes and suffixes. Also calculate the vars and - // depth of the result, which will be those of the stem or prefix/suffix - // that applies, whichever is the innermost. + // Note: we could probably cache this information instead of recalculating + // it every time. // size_t depth (stem_depth); const variable_map* vars (stem.vars); @@ -468,13 +466,16 @@ namespace build2 // auto l (find (o, ".__prefix")); - if (l) // No sense to prepend/append if NULL. + if (cl) { - cache.value.prepend (names (cast<names> (l)), &var); - } - else if ((l = find (o, ".__suffix"))) - { - cache.value.append (names (cast<names> (l)), &var); + if (l) // No sense to prepend/append if NULL. + { + cv.prepend (names (cast<names> (l)), &var); + } + else if ((l = find (o, ".__suffix"))) + { + cv.append (names (cast<names> (l)), &var); + } } if (l.defined ()) @@ -502,7 +503,7 @@ namespace build2 // Use the location of the innermost value that contributed as the // location of the result. // - return make_pair (lookup (&cache.value, vars), depth); + return make_pair (lookup (&cv, vars), depth); } value& scope:: diff --git a/build2/types b/build2/types index 0e6012b..82ee889 100644 --- a/build2/types +++ b/build2/types @@ -15,7 +15,7 @@ #include <cstdint> // uint{8,16,32,64}_t, *_MIN, *_MAX #include <istream> #include <ostream> -#include <functional> // function, reference_wrapper +#include <functional> // hash, function, reference_wrapper #include <initializer_list> #include <mutex> @@ -61,6 +61,8 @@ namespace build2 using std::function; using std::reference_wrapper; + using std::hash; + using std::initializer_list; using std::unique_ptr; @@ -101,6 +103,9 @@ namespace build2 using slock = ulock; #endif + using std::defer_lock; + using std::adopt_lock; + // Exceptions. // // While <exception> is included, there is no using for std::exception -- diff --git a/build2/variable b/build2/variable index 6c579dd..35530ca 100644 --- a/build2/variable +++ b/build2/variable @@ -1219,11 +1219,7 @@ namespace build2 lookup find (const target_type&, const string& tname, const variable&) const; - // In many places we assume that we can store a reference to the returned - // variable value (e.g., install::lookup_install()). As a result, in case - // of append/prepend where we calculate the value dynamically, we have to - // cache it (note, however, that if the value becomes stale, there is no - // guarantee the references remain valid). + // // The key is the combination of the "original value identity" (as a // pointer to the value in one of the variable_pattern_map's) and the @@ -1241,17 +1237,47 @@ namespace build2 map_type map_; }; - // Variable override cache. + // Value caching. Used for overrides as well as target type/pattern-specific + // append/prepend. + // + // In many places we assume that we can store a reference to the returned + // variable value (e.g., install::lookup_install()). As a result, in these + // cases where we calculate the value dynamically, we have to cache it + // (note, however, that if the value becomes stale, there is no guarantee + // the references remain valid). // - struct variable_override_value + template <typename K> + class variable_cache { - variable_map::value_data value; - const variable_map* stem_vars = nullptr; // NULL means there is no stem. + public: + // If the returned unique lock is locked, then the value has been + // invalidated. + // + pair<value&, ulock> + insert (K, const lookup& stem); + + private: + struct entry_type + { + build2::value value; + + // Location of the stem as well as the version on which this cache + // value is based. Used to track the location and value of the stem + // for cache invalidation. NULL/0 means there is no stem. + // + const variable_map* stem_vars = nullptr; + size_t stem_version = 0; + }; + + using map_type = std::map<K, entry_type>; + + map_type m_; }; - using variable_override_cache = std::map<pair<const variable_map*, - const variable*>, - variable_override_value>; + // Allocated in main(). + // + extern size_t variable_cache_mutex_shard_size; + extern unique_ptr<shared_mutex[]> variable_cache_mutex_shard; } #include <build2/variable.ixx> diff --git a/build2/variable.cxx b/build2/variable.cxx index 7e105e6..09790b4 100644 --- a/build2/variable.cxx +++ b/build2/variable.cxx @@ -1283,4 +1283,7 @@ namespace build2 return lookup (); } + + size_t variable_cache_mutex_shard_size; + unique_ptr<shared_mutex[]> variable_cache_mutex_shard; } diff --git a/build2/variable.txx b/build2/variable.txx index b993200..5b212f0 100644 --- a/build2/variable.txx +++ b/build2/variable.txx @@ -552,4 +552,73 @@ namespace build2 &map_compare<K, V>, &default_empty<map<K, V>> }; + + // variable_cache + // + template <typename K> + pair<value&, ulock> variable_cache<K>:: + insert (K k, const lookup& stem) + { + const variable_map* vars (stem.vars); // NULL if undefined. + size_t ver ( + stem.defined () + ? static_cast<const variable_map::value_data*> (stem.value)->version + : 0); + + shared_mutex& m ( + variable_cache_mutex_shard[ + hash<variable_cache*> () (this) % variable_cache_mutex_shard_size]); + + slock sl (m); + ulock ul (m, defer_lock); + + auto i (m_.find (k)); + + // Cache hit. + // + if (i != m_.end () && + i->second.stem_vars == vars && + i->second.stem_version == ver) + return pair<value&, ulock> (i->second.value, move (ul)); + + // Relock for exclusive access. Note that it is entirely possible + // that between unlock and lock someone else has updated the entry. + // + sl.unlock (); + ul.lock (); + + // Note that the cache entries are never removed so we can reuse the + // iterator. + // + pair<typename map_type::iterator, bool> p (i, i == m_.end ()); + + if (p.second) + p = m_.emplace (move (k), entry_type {value (nullptr), vars, ver}); + + entry_type& e (p.first->second); + + // Cache miss. + // + if (p.second) + ; + // + // Cache invalidation. + // + else if (e.stem_vars != vars || e.stem_version != ver) + { + if (e.stem_vars != vars) + e.stem_vars = vars; + else + assert (e.stem_version <= ver); + + e.stem_version = ver; + } + // + // Cache hit. + // + else + ul.unlock (); + + return pair<value&, ulock> (e.value, move (ul)); + } } |