--- a/mfbt/HashTable.h
+++ b/mfbt/HashTable.h
@@ -48,23 +48,28 @@
#include "mozilla/ReentrancyGuard.h"
#include "mozilla/TypeTraits.h"
#include "mozilla/UniquePtr.h"
namespace mozilla {
template<class>
struct DefaultHasher;
+
template<class, class>
class HashMapEntry;
+
namespace detail {
+
template<typename T>
class HashTableEntry;
+
template<class T, class HashPolicy, class AllocPolicy>
class HashTable;
+
} // namespace detail
/*****************************************************************************/
// The "generation" of a hash table is an opaque value indicating the state of
// modification of the hash table through its lifetime. If the generation of
// a hash table compares equal at times T1 and T2, then lookups in the hash
// table, pointers to (or into) hash table entries, etc. at time T1 are valid
@@ -92,79 +97,87 @@ using Generation = Opaque<uint64_t>;
// called by HashMap must not call back into the same HashMap object.
// - Due to the lack of exception handling, the user must call |init()|.
template<class Key,
class Value,
class HashPolicy = DefaultHasher<Key>,
class AllocPolicy = MallocAllocPolicy>
class HashMap
{
- typedef HashMapEntry<Key, Value> TableEntry;
+ using TableEntry = HashMapEntry<Key, Value>;
struct MapHashPolicy : HashPolicy
{
using Base = HashPolicy;
- typedef Key KeyType;
- static const Key& getKey(TableEntry& e) { return e.key(); }
- static void setKey(TableEntry& e, Key& k)
+ using KeyType = Key;
+
+ static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); }
+
+ static void setKey(TableEntry& aEntry, Key& aKey)
{
- HashPolicy::rekey(e.mutableKey(), k);
+ HashPolicy::rekey(aEntry.mutableKey(), aKey);
}
};
- typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
- Impl impl;
+ using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
+ Impl mImpl;
public:
- typedef typename HashPolicy::Lookup Lookup;
- typedef TableEntry Entry;
+ using Lookup = typename HashPolicy::Lookup;
+ using Entry = TableEntry;
// HashMap construction is fallible (due to OOM); thus the user must call
// init after constructing a HashMap and check the return value.
- explicit HashMap(AllocPolicy a = AllocPolicy())
- : impl(a)
+ explicit HashMap(AllocPolicy aPolicy = AllocPolicy())
+ : mImpl(aPolicy)
{
}
- MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
- bool initialized() const { return impl.initialized(); }
+
+ MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
+
+ bool initialized() const { return mImpl.initialized(); }
// Return whether the given lookup value is present in the map. E.g.:
//
- // typedef HashMap<int,char> HM;
+ // using HM = HashMap<int,char>;
// HM h;
// if (HM::Ptr p = h.lookup(3)) {
// const HM::Entry& e = *p; // p acts like a pointer to Entry
// assert(p->key == 3); // Entry contains the key
// char val = p->value; // and value
// }
//
// Also see the definition of Ptr in HashTable above (with T = Entry).
- typedef typename Impl::Ptr Ptr;
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+ using Ptr = typename Impl::Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
+ {
+ return mImpl.lookup(aLookup);
+ }
// Like lookup, but does not assert if two threads call lookup at the same
// time. Only use this method when none of the threads will modify the map.
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
{
- return impl.readonlyThreadsafeLookup(l);
+ return mImpl.readonlyThreadsafeLookup(aLookup);
}
// Assuming |p.found()|, remove |*p|.
- void remove(Ptr p) { impl.remove(p); }
+ void remove(Ptr aPtr) { mImpl.remove(aPtr); }
// Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
// insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
// |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
//
- // typedef HashMap<int,char> HM;
+ // using HM = HashMap<int,char>;
// HM h;
// HM::AddPtr p = h.lookupForAdd(3);
// if (!p) {
- // if (!h.add(p, 3, 'a'))
+ // if (!h.add(p, 3, 'a')) {
// return false;
+ // }
// }
// const HM::Entry& e = *p; // p acts like a pointer to Entry
// assert(p->key == 3); // Entry contains the key
// char val = p->value; // and value
//
// Also see the definition of AddPtr in HashTable above (with T = Entry).
//
// N.B. The caller must ensure that no mutating hash table operations
@@ -172,187 +185,205 @@ public:
// looking up the key a second time, the caller may use the more efficient
// relookupOrAdd method. This method reuses part of the hashing computation
// to more efficiently insert the key if it has not been added. For
// example, a mutation-handling version of the previous example:
//
// HM::AddPtr p = h.lookupForAdd(3);
// if (!p) {
// call_that_may_mutate_h();
- // if (!h.relookupOrAdd(p, 3, 'a'))
+ // if (!h.relookupOrAdd(p, 3, 'a')) {
// return false;
+ // }
// }
// const HM::Entry& e = *p;
// assert(p->key == 3);
// char val = p->value;
- typedef typename Impl::AddPtr AddPtr;
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ //
+ using AddPtr = typename Impl::AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
{
- return impl.lookupForAdd(l);
+ return mImpl.lookupForAdd(aLookup);
}
template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v)
+ MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue)
{
- return impl.add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ return mImpl.add(
+ aPtr, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
template<typename KeyInput>
- MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k)
+ MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey)
{
- return impl.add(p, std::forward<KeyInput>(k), Value());
+ return mImpl.add(aPtr, std::forward<KeyInput>(aKey), Value());
}
template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v)
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr,
+ KeyInput&& aKey,
+ ValueInput&& aValue)
{
- return impl.relookupOrAdd(
- p, k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ return mImpl.relookupOrAdd(aPtr,
+ aKey,
+ std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
}
// |all()| returns a Range containing |count()| elements. E.g.:
//
- // typedef HashMap<int,char> HM;
+ // using HM = HashMap<int,char>;
// HM h;
- // for (HM::Range r = h.all(); !r.empty(); r.popFront())
+ // for (HM::Range r = h.all(); !r.empty(); r.popFront()) {
// char c = r.front().value();
+ // }
//
// Also see the definition of Range in HashTable above (with T = Entry).
- typedef typename Impl::Range Range;
- Range all() const { return impl.all(); }
+ using Range = typename Impl::Range;
+ Range all() const { return mImpl.all(); }
// Typedef for the enumeration class. An Enum may be used to examine and
// remove table entries:
//
- // typedef HashMap<int,char> HM;
+ // using HM = HashMap<int,char>;
// HM s;
- // for (HM::Enum e(s); !e.empty(); e.popFront())
- // if (e.front().value() == 'l')
+ // for (HM::Enum e(s); !e.empty(); e.popFront()) {
+ // if (e.front().value() == 'l') {
// e.removeFront();
+ // }
+ // }
//
// Table resize may occur in Enum's destructor. Also see the definition of
// Enum in HashTable above (with T = Entry).
- typedef typename Impl::Enum Enum;
+ using Enum = typename Impl::Enum;
// Remove all entries. This does not shrink the table. For that consider
// using the finish() method.
- void clear() { impl.clear(); }
+ void clear() { mImpl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the map to be initialized again.
- void clearAndShrink() { impl.clearAndShrink(); }
+ void clearAndShrink() { mImpl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The map must
// be initialized again before any use.
- void finish() { impl.finish(); }
+ void finish() { mImpl.finish(); }
// Does the table contain any entries?
- bool empty() const { return impl.empty(); }
+ bool empty() const { return mImpl.empty(); }
// Number of live elements in the map.
- uint32_t count() const { return impl.count(); }
+ uint32_t count() const { return mImpl.count(); }
// Total number of allocation in the dynamic table. Note: resize will
// happen well before count() == capacity().
- size_t capacity() const { return impl.capacity(); }
+ size_t capacity() const { return mImpl.capacity(); }
- // Don't just call |impl.sizeOfExcludingThis()| because there's no
- // guarantee that |impl| is the first field in HashMap.
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ // Don't just call |mImpl.sizeOfExcludingThis()| because there's no
+ // guarantee that |mImpl| is the first field in HashMap.
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
- return impl.sizeOfExcludingThis(mallocSizeOf);
+ return mImpl.sizeOfExcludingThis(aMallocSizeOf);
}
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
- return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ return aMallocSizeOf(this) + mImpl.sizeOfExcludingThis(aMallocSizeOf);
}
- Generation generation() const { return impl.generation(); }
+ Generation generation() const { return mImpl.generation(); }
/************************************************** Shorthand operations */
- bool has(const Lookup& l) const { return impl.lookup(l).found(); }
+ bool has(const Lookup& aLookup) const
+ {
+ return mImpl.lookup(aLookup).found();
+ }
- // Overwrite existing value with v. Return false on oom.
+ // Overwrite existing value with aValue. Return false on oom.
template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v)
+ MOZ_MUST_USE bool put(KeyInput&& aKey, ValueInput&& aValue)
{
- AddPtr p = lookupForAdd(k);
+ AddPtr p = lookupForAdd(aKey);
if (p) {
- p->value() = std::forward<ValueInput>(v);
+ p->value() = std::forward<ValueInput>(aValue);
return true;
}
- return add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ return add(
+ p, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
// Like put, but assert that the given key is not already present.
template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v)
+ MOZ_MUST_USE bool putNew(KeyInput&& aKey, ValueInput&& aValue)
{
- return impl.putNew(
- k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ return mImpl.putNew(
+ aKey, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
// Only call this to populate an empty map after reserving space with init().
template<typename KeyInput, typename ValueInput>
- void putNewInfallible(KeyInput&& k, ValueInput&& v)
+ void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue)
{
- impl.putNewInfallible(
- k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ mImpl.putNewInfallible(
+ aKey, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
- // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
- Ptr lookupWithDefault(const Key& k, const Value& defaultValue)
+ // Add (aKey,aDefaultValue) if |aKey| is not found. Return a false-y Ptr on
+ // oom.
+ Ptr lookupWithDefault(const Key& aKey, const Value& aDefaultValue)
{
- AddPtr p = lookupForAdd(k);
- if (p)
+ AddPtr p = lookupForAdd(aKey);
+ if (p) {
return p;
- bool ok = add(p, k, defaultValue);
+ }
+ bool ok = add(p, aKey, aDefaultValue);
MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
(void)ok;
return p;
}
// Remove if present.
- void remove(const Lookup& l)
+ void remove(const Lookup& aLookup)
{
- if (Ptr p = lookup(l))
+ if (Ptr p = lookup(aLookup)) {
remove(p);
+ }
}
// Infallibly rekey one entry, if necessary.
// Requires template parameters Key and HashPolicy::Lookup to be the same
// type.
- void rekeyIfMoved(const Key& old_key, const Key& new_key)
+ void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey)
{
- if (old_key != new_key)
- rekeyAs(old_key, new_key, new_key);
+ if (aOldKey != aNewKey) {
+ rekeyAs(aOldKey, aNewKey, aNewKey);
+ }
}
// Infallibly rekey one entry if present, and return whether that happened.
- bool rekeyAs(const Lookup& old_lookup,
- const Lookup& new_lookup,
- const Key& new_key)
+ bool rekeyAs(const Lookup& aOldLookup,
+ const Lookup& aNewLookup,
+ const Key& aNewKey)
{
- if (Ptr p = lookup(old_lookup)) {
- impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
+ if (Ptr p = lookup(aOldLookup)) {
+ mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
return true;
}
return false;
}
// HashMap is movable
- HashMap(HashMap&& rhs)
- : impl(std::move(rhs.impl))
+ HashMap(HashMap&& aRhs)
+ : mImpl(std::move(aRhs.mImpl))
{
}
- void operator=(HashMap&& rhs)
+ void operator=(HashMap&& aRhs)
{
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- impl = std::move(rhs.impl);
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
+ mImpl = std::move(aRhs.mImpl);
}
private:
// HashMap is not copyable or assignable
HashMap(const HashMap& hm) = delete;
HashMap& operator=(const HashMap& hm) = delete;
friend class Impl::Enum;
@@ -378,252 +409,268 @@ private:
template<class T,
class HashPolicy = DefaultHasher<T>,
class AllocPolicy = MallocAllocPolicy>
class HashSet
{
struct SetOps : HashPolicy
{
using Base = HashPolicy;
- typedef T KeyType;
- static const KeyType& getKey(const T& t) { return t; }
- static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
+ using KeyType = T;
+
+ static const KeyType& getKey(const T& aT) { return aT; }
+ static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
};
- typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
- Impl impl;
+ using Impl = detail::HashTable<const T, SetOps, AllocPolicy>;
+ Impl mImpl;
public:
- typedef typename HashPolicy::Lookup Lookup;
- typedef T Entry;
+ using Lookup = typename HashPolicy::Lookup;
+ using Entry = T;
// HashSet construction is fallible (due to OOM); thus the user must call
// init after constructing a HashSet and check the return value.
explicit HashSet(AllocPolicy a = AllocPolicy())
- : impl(a)
+ : mImpl(a)
{
}
- MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
- bool initialized() const { return impl.initialized(); }
+
+ MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
+
+ bool initialized() const { return mImpl.initialized(); }
// Return whether the given lookup value is present in the map. E.g.:
//
- // typedef HashSet<int> HS;
+ // using HS = HashSet<int>;
// HS h;
// if (HS::Ptr p = h.lookup(3)) {
// assert(*p == 3); // p acts like a pointer to int
// }
//
// Also see the definition of Ptr in HashTable above.
- typedef typename Impl::Ptr Ptr;
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+ using Ptr = typename Impl::Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
+ {
+ return mImpl.lookup(aLookup);
+ }
// Like lookup, but does not assert if two threads call lookup at the same
// time. Only use this method when none of the threads will modify the map.
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
{
- return impl.readonlyThreadsafeLookup(l);
+ return mImpl.readonlyThreadsafeLookup(aLookup);
}
- // Assuming |p.found()|, remove |*p|.
- void remove(Ptr p) { impl.remove(p); }
+ // Assuming |aPtr.found()|, remove |*aPtr|.
+ void remove(Ptr aPtr) { mImpl.remove(aPtr); }
// Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
// insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
// |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
//
- // typedef HashSet<int> HS;
+ // using HS = HashSet<int>;
// HS h;
// HS::AddPtr p = h.lookupForAdd(3);
// if (!p) {
- // if (!h.add(p, 3))
+ // if (!h.add(p, 3)) {
// return false;
+ // }
// }
// assert(*p == 3); // p acts like a pointer to int
//
// Also see the definition of AddPtr in HashTable above.
//
// N.B. The caller must ensure that no mutating hash table operations
// occur between a pair of |lookupForAdd| and |add| calls. To avoid
// looking up the key a second time, the caller may use the more efficient
// relookupOrAdd method. This method reuses part of the hashing computation
// to more efficiently insert the key if it has not been added. For
// example, a mutation-handling version of the previous example:
//
// HS::AddPtr p = h.lookupForAdd(3);
// if (!p) {
// call_that_may_mutate_h();
- // if (!h.relookupOrAdd(p, 3, 3))
+ // if (!h.relookupOrAdd(p, 3, 3)) {
// return false;
+ // }
// }
// assert(*p == 3);
//
// Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
// entry |t|, where the caller ensures match(l,t).
- typedef typename Impl::AddPtr AddPtr;
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ using AddPtr = typename Impl::AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
{
- return impl.lookupForAdd(l);
+ return mImpl.lookupForAdd(aLookup);
}
template<typename U>
- MOZ_MUST_USE bool add(AddPtr& p, U&& u)
+ MOZ_MUST_USE bool add(AddPtr& aPtr, U&& aU)
{
- return impl.add(p, std::forward<U>(u));
+ return mImpl.add(aPtr, std::forward<U>(aU));
}
template<typename U>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u)
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, U&& aU)
{
- return impl.relookupOrAdd(p, l, std::forward<U>(u));
+ return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
}
// |all()| returns a Range containing |count()| elements:
//
- // typedef HashSet<int> HS;
+ // using HS = HashSet<int>;
// HS h;
- // for (HS::Range r = h.all(); !r.empty(); r.popFront())
+ // for (HS::Range r = h.all(); !r.empty(); r.popFront()) {
// int i = r.front();
+ // }
//
// Also see the definition of Range in HashTable above.
- typedef typename Impl::Range Range;
- Range all() const { return impl.all(); }
+ using Range = typename Impl::Range;
+ Range all() const { return mImpl.all(); }
// Typedef for the enumeration class. An Enum may be used to examine and
// remove table entries:
//
- // typedef HashSet<int> HS;
+ // using HS = HashSet<int>;
// HS s;
- // for (HS::Enum e(s); !e.empty(); e.popFront())
- // if (e.front() == 42)
+ // for (HS::Enum e(s); !e.empty(); e.popFront()) {
+ // if (e.front() == 42) {
// e.removeFront();
+ // }
+ // }
//
// Table resize may occur in Enum's destructor. Also see the definition of
// Enum in HashTable above.
- typedef typename Impl::Enum Enum;
+ using Enum = typename Impl::Enum;
// Remove all entries. This does not shrink the table. For that consider
// using the finish() method.
- void clear() { impl.clear(); }
+ void clear() { mImpl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the set to be initialized again.
- void clearAndShrink() { impl.clearAndShrink(); }
+ void clearAndShrink() { mImpl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The set must
// be initialized again before any use.
- void finish() { impl.finish(); }
+ void finish() { mImpl.finish(); }
// Does the table contain any entries?
- bool empty() const { return impl.empty(); }
+ bool empty() const { return mImpl.empty(); }
// Number of live elements in the map.
- uint32_t count() const { return impl.count(); }
+ uint32_t count() const { return mImpl.count(); }
// Total number of allocation in the dynamic table. Note: resize will
// happen well before count() == capacity().
- size_t capacity() const { return impl.capacity(); }
+ size_t capacity() const { return mImpl.capacity(); }
- // Don't just call |impl.sizeOfExcludingThis()| because there's no
- // guarantee that |impl| is the first field in HashSet.
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ // Don't just call |mImpl.sizeOfExcludingThis()| because there's no
+ // guarantee that |mImpl| is the first field in HashSet.
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
- return impl.sizeOfExcludingThis(mallocSizeOf);
+ return mImpl.sizeOfExcludingThis(aMallocSizeOf);
}
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
- return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ return aMallocSizeOf(this) + mImpl.sizeOfExcludingThis(aMallocSizeOf);
}
- Generation generation() const { return impl.generation(); }
+ Generation generation() const { return mImpl.generation(); }
/************************************************** Shorthand operations */
- bool has(const Lookup& l) const { return impl.lookup(l).found(); }
+ bool has(const Lookup& aLookup) const
+ {
+ return mImpl.lookup(aLookup).found();
+ }
- // Add |u| if it is not present already. Return false on oom.
+ // Add |aU| if it is not present already. Return false on oom.
template<typename U>
- MOZ_MUST_USE bool put(U&& u)
+ MOZ_MUST_USE bool put(U&& aU)
{
- AddPtr p = lookupForAdd(u);
- return p ? true : add(p, std::forward<U>(u));
+ AddPtr p = lookupForAdd(aU);
+ return p ? true : add(p, std::forward<U>(aU));
}
// Like put, but assert that the given key is not already present.
template<typename U>
- MOZ_MUST_USE bool putNew(U&& u)
+ MOZ_MUST_USE bool putNew(U&& aU)
{
- return impl.putNew(u, std::forward<U>(u));
+ return mImpl.putNew(aU, std::forward<U>(aU));
}
template<typename U>
- MOZ_MUST_USE bool putNew(const Lookup& l, U&& u)
+ MOZ_MUST_USE bool putNew(const Lookup& aLookup, U&& aU)
{
- return impl.putNew(l, std::forward<U>(u));
+ return mImpl.putNew(aLookup, std::forward<U>(aU));
}
// Only call this to populate an empty set after reserving space with init().
template<typename U>
- void putNewInfallible(const Lookup& l, U&& u)
+ void putNewInfallible(const Lookup& aLookup, U&& aU)
{
- impl.putNewInfallible(l, std::forward<U>(u));
+ mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
}
- void remove(const Lookup& l)
+ void remove(const Lookup& aLookup)
{
- if (Ptr p = lookup(l))
+ if (Ptr p = lookup(aLookup)) {
remove(p);
+ }
}
// Infallibly rekey one entry, if present.
// Requires template parameters T and HashPolicy::Lookup to be the same type.
- void rekeyIfMoved(const Lookup& old_value, const T& new_value)
+ void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue)
{
- if (old_value != new_value)
- rekeyAs(old_value, new_value, new_value);
+ if (aOldValue != aNewValue) {
+ rekeyAs(aOldValue, aNewValue, aNewValue);
+ }
}
// Infallibly rekey one entry if present, and return whether that happened.
- bool rekeyAs(const Lookup& old_lookup,
- const Lookup& new_lookup,
- const T& new_value)
+ bool rekeyAs(const Lookup& aOldLookup,
+ const Lookup& aNewLookup,
+ const T& aNewValue)
{
- if (Ptr p = lookup(old_lookup)) {
- impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
+ if (Ptr p = lookup(aOldLookup)) {
+ mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
return true;
}
return false;
}
// Infallibly replace the current key at |p| with an equivalent key.
// Specifically, both HashPolicy::hash and HashPolicy::match must return
// identical results for the new and old key when applied against all
// possible matching values.
- void replaceKey(Ptr p, const T& new_value)
+ void replaceKey(Ptr aPtr, const T& aNewValue)
{
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(*p != new_value);
- MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
- MOZ_ASSERT(HashPolicy::match(*p, new_value));
- const_cast<T&>(*p) = new_value;
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(*aPtr != aNewValue);
+ MOZ_ASSERT(HashPolicy::hash(*aPtr) == HashPolicy::hash(aNewValue));
+ MOZ_ASSERT(HashPolicy::match(*aPtr, aNewValue));
+ const_cast<T&>(*aPtr) = aNewValue;
}
// HashSet is movable
- HashSet(HashSet&& rhs)
- : impl(std::move(rhs.impl))
+ HashSet(HashSet&& aRhs)
+ : mImpl(std::move(aRhs.mImpl))
{
}
- void operator=(HashSet&& rhs)
+ void operator=(HashSet&& aRhs)
{
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- impl = std::move(rhs.impl);
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
+ mImpl = std::move(aRhs.mImpl);
}
private:
- // HashSet is not copyable or assignable
+ // HashSet is not copyable or assignable.
HashSet(const HashSet& hs) = delete;
HashSet& operator=(const HashSet& hs) = delete;
friend class Impl::Enum;
};
/*****************************************************************************/
@@ -653,45 +700,54 @@ private:
// }
// Pointer hashing policy that uses HashGeneric() to create good hashes for
// pointers. Note that we don't shift out the lowest k bits to generate a
// good distribution for arena allocated pointers.
template<typename Key>
struct PointerHasher
{
- typedef Key Lookup;
- static HashNumber hash(const Lookup& l)
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup)
{
- size_t word = reinterpret_cast<size_t>(l);
+ size_t word = reinterpret_cast<size_t>(aLookup);
return HashGeneric(word);
}
- static bool match(const Key& k, const Lookup& l) { return k == l; }
- static void rekey(Key& k, const Key& newKey) { k = newKey; }
+
+ static bool match(const Key& aKey, const Lookup& aLookup)
+ {
+ return aKey == aLookup;
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
};
// Default hash policy: just use the 'lookup' value. This of course only
// works if the lookup value is integral. HashTable applies ScrambleHashCode to
// the result of the 'hash' which means that it is 'ok' if the lookup value is
// not well distributed over the HashNumber domain.
template<class Key>
struct DefaultHasher
{
- typedef Key Lookup;
- static HashNumber hash(const Lookup& l)
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup)
{
// Hash if can implicitly cast to hash number type.
- return l;
+ return aLookup;
}
- static bool match(const Key& k, const Lookup& l)
+
+ static bool match(const Key& aKey, const Lookup& aLookup)
{
// Use builtin or overloaded operator==.
- return k == l;
+ return aKey == aLookup;
}
- static void rekey(Key& k, const Key& newKey) { k = newKey; }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
};
// Specialize hashing policy for pointer types. It assumes that the type is
// at least word-aligned. For types with smaller size use PointerHasher.
template<class T>
struct DefaultHasher<T*> : PointerHasher<T*>
{
};
@@ -699,66 +755,77 @@ struct DefaultHasher<T*> : PointerHasher
// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
// raw pointer to PointerHasher.
template<class T, class D>
struct DefaultHasher<UniquePtr<T, D>>
{
using Lookup = UniquePtr<T, D>;
using PtrHasher = PointerHasher<T*>;
- static HashNumber hash(const Lookup& l) { return PtrHasher::hash(l.get()); }
- static bool match(const UniquePtr<T, D>& k, const Lookup& l)
+ static HashNumber hash(const Lookup& aLookup)
+ {
+ return PtrHasher::hash(aLookup.get());
+ }
+
+ static bool match(const UniquePtr<T, D>& aKey, const Lookup& aLookup)
{
- return PtrHasher::match(k.get(), l.get());
+ return PtrHasher::match(aKey.get(), aLookup.get());
}
- static void rekey(UniquePtr<T, D>& k, UniquePtr<T, D>&& newKey)
+
+ static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey)
{
- k = std::move(newKey);
+ aKey = std::move(aNewKey);
}
};
// For doubles, we can xor the two uint32s.
template<>
struct DefaultHasher<double>
{
- typedef double Lookup;
- static HashNumber hash(double d)
+ using Lookup = double;
+
+ static HashNumber hash(double aVal)
{
static_assert(sizeof(HashNumber) == 4,
"subsequent code assumes a four-byte hash");
- uint64_t u = BitwiseCast<uint64_t>(d);
+ uint64_t u = BitwiseCast<uint64_t>(aVal);
return HashNumber(u ^ (u >> 32));
}
- static bool match(double lhs, double rhs)
+
+ static bool match(double aLhs, double aRhs)
{
- return BitwiseCast<uint64_t>(lhs) == BitwiseCast<uint64_t>(rhs);
+ return BitwiseCast<uint64_t>(aLhs) == BitwiseCast<uint64_t>(aRhs);
}
};
template<>
struct DefaultHasher<float>
{
- typedef float Lookup;
- static HashNumber hash(float f)
+ using Lookup = float;
+
+ static HashNumber hash(float aVal)
{
static_assert(sizeof(HashNumber) == 4,
"subsequent code assumes a four-byte hash");
- return HashNumber(BitwiseCast<uint32_t>(f));
+ return HashNumber(BitwiseCast<uint32_t>(aVal));
}
- static bool match(float lhs, float rhs)
+
+ static bool match(float aLhs, float aRhs)
{
- return BitwiseCast<uint32_t>(lhs) == BitwiseCast<uint32_t>(rhs);
+ return BitwiseCast<uint32_t>(aLhs) == BitwiseCast<uint32_t>(aRhs);
}
};
// A hash policy that compares C strings.
struct CStringHasher
{
- typedef const char* Lookup;
- static HashNumber hash(Lookup l) { return HashString(l); }
+ using Lookup = const char*;
+
+ static HashNumber hash(Lookup aLookup) { return HashString(aLookup); }
+
static bool match(const char* key, Lookup lookup)
{
return strcmp(key, lookup) == 0;
}
};
// Fallible hashing interface.
//
@@ -769,44 +836,44 @@ struct CStringHasher
// This is used by MovableCellHasher to handle the fact that generating a unique
// ID for cell pointer may fail due to OOM.
template<typename HashPolicy>
struct FallibleHashMethods
{
// Return true if a hashcode is already available for its argument. Once
// this returns true for a specific argument it must continue to do so.
template<typename Lookup>
- static bool hasHash(Lookup&& l)
+ static bool hasHash(Lookup&& aLookup)
{
return true;
}
// Fallible method to ensure a hashcode exists for its argument and create
// one if not. Returns false on error, e.g. out of memory.
template<typename Lookup>
- static bool ensureHash(Lookup&& l)
+ static bool ensureHash(Lookup&& aLookup)
{
return true;
}
};
template<typename HashPolicy, typename Lookup>
static bool
-HasHash(Lookup&& l)
+HasHash(Lookup&& aLookup)
{
return FallibleHashMethods<typename HashPolicy::Base>::hasHash(
- std::forward<Lookup>(l));
+ std::forward<Lookup>(aLookup));
}
template<typename HashPolicy, typename Lookup>
static bool
-EnsureHash(Lookup&& l)
+EnsureHash(Lookup&& aLookup)
{
return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
- std::forward<Lookup>(l));
+ std::forward<Lookup>(aLookup));
}
/*****************************************************************************/
// Both HashMap and HashSet are implemented by a single HashTable that is even
// more heavily parameterized than the other two. This leaves HashTable gnarly
// and extremely coupled to HashMap and HashSet; thus code should not use
// HashTable directly.
@@ -821,39 +888,40 @@ class HashMapEntry
friend class detail::HashTable;
template<class>
friend class detail::HashTableEntry;
template<class, class, class, class>
friend class HashMap;
public:
template<typename KeyInput, typename ValueInput>
- HashMapEntry(KeyInput&& k, ValueInput&& v)
- : key_(std::forward<KeyInput>(k))
- , value_(std::forward<ValueInput>(v))
+ HashMapEntry(KeyInput&& aKey, ValueInput&& aValue)
+ : key_(std::forward<KeyInput>(aKey))
+ , value_(std::forward<ValueInput>(aValue))
{
}
- HashMapEntry(HashMapEntry&& rhs)
- : key_(std::move(rhs.key_))
- , value_(std::move(rhs.value_))
+ HashMapEntry(HashMapEntry&& aRhs)
+ : key_(std::move(aRhs.key_))
+ , value_(std::move(aRhs.value_))
{
}
- void operator=(HashMapEntry&& rhs)
+ void operator=(HashMapEntry&& aRhs)
{
- key_ = std::move(rhs.key_);
- value_ = std::move(rhs.value_);
+ key_ = std::move(aRhs.key_);
+ value_ = std::move(aRhs.value_);
}
- typedef Key KeyType;
- typedef Value ValueType;
+ using KeyType = Key;
+ using ValueType = Value;
const Key& key() const { return key_; }
Key& mutableKey() { return key_; }
+
const Value& value() const { return value_; }
Value& value() { return value_; }
private:
HashMapEntry(const HashMapEntry&) = delete;
void operator=(const HashMapEntry&) = delete;
};
@@ -873,28 +941,28 @@ class HashTableEntry
{
private:
using NonConstT = typename RemoveConst<T>::Type;
static const HashNumber sFreeKey = 0;
static const HashNumber sRemovedKey = 1;
static const HashNumber sCollisionBit = 1;
- HashNumber keyHash = sFreeKey;
- alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)];
+ HashNumber mKeyHash = sFreeKey;
+ alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)];
private:
template<class, class, class>
friend class HashTable;
// Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
- // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even
+ // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even
// through |void*|. Placing the latter cast in these separate functions
// breaks the chain such that affected GCC versions no longer warn/error.
- void* rawValuePtr() { return valueData_; }
+ void* rawValuePtr() { return mValueData; }
static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
HashTableEntry(const HashTableEntry&) = delete;
void operator=(const HashTableEntry&) = delete;
NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
@@ -905,450 +973,460 @@ private:
MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
}
public:
HashTableEntry() = default;
~HashTableEntry()
{
- if (isLive())
+ if (isLive()) {
destroyStoredT();
+ }
MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
}
void destroy()
{
MOZ_ASSERT(isLive());
destroyStoredT();
}
- void swap(HashTableEntry* other)
+ void swap(HashTableEntry* aOther)
{
- if (this == other)
+ if (this == aOther) {
return;
+ }
MOZ_ASSERT(isLive());
- if (other->isLive()) {
- Swap(*valuePtr(), *other->valuePtr());
+ if (aOther->isLive()) {
+ Swap(*valuePtr(), *aOther->valuePtr());
} else {
- *other->valuePtr() = std::move(*valuePtr());
+ *aOther->valuePtr() = std::move(*valuePtr());
destroy();
}
- Swap(keyHash, other->keyHash);
+ Swap(mKeyHash, aOther->mKeyHash);
}
T& get()
{
MOZ_ASSERT(isLive());
return *valuePtr();
}
NonConstT& getMutable()
{
MOZ_ASSERT(isLive());
return *valuePtr();
}
- bool isFree() const { return keyHash == sFreeKey; }
+ bool isFree() const { return mKeyHash == sFreeKey; }
void clearLive()
{
MOZ_ASSERT(isLive());
- keyHash = sFreeKey;
+ mKeyHash = sFreeKey;
destroyStoredT();
}
void clear()
{
- if (isLive())
+ if (isLive()) {
destroyStoredT();
-
+ }
MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
- keyHash = sFreeKey;
+ mKeyHash = sFreeKey;
}
- bool isRemoved() const { return keyHash == sRemovedKey; }
+ bool isRemoved() const { return mKeyHash == sRemovedKey; }
void removeLive()
{
MOZ_ASSERT(isLive());
- keyHash = sRemovedKey;
+ mKeyHash = sRemovedKey;
destroyStoredT();
}
- bool isLive() const { return isLiveHash(keyHash); }
+ bool isLive() const { return isLiveHash(mKeyHash); }
void setCollision()
{
MOZ_ASSERT(isLive());
- keyHash |= sCollisionBit;
+ mKeyHash |= sCollisionBit;
}
- void unsetCollision() { keyHash &= ~sCollisionBit; }
+ void unsetCollision() { mKeyHash &= ~sCollisionBit; }
- bool hasCollision() const { return keyHash & sCollisionBit; }
+ bool hasCollision() const { return mKeyHash & sCollisionBit; }
- bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
+ bool matchHash(HashNumber hn) { return (mKeyHash & ~sCollisionBit) == hn; }
- HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
+ HashNumber getKeyHash() const { return mKeyHash & ~sCollisionBit; }
template<typename... Args>
- void setLive(HashNumber hn, Args&&... args)
+ void setLive(HashNumber aHashNumber, Args&&... aArgs)
{
MOZ_ASSERT(!isLive());
- keyHash = hn;
- new (valuePtr()) T(std::forward<Args>(args)...);
+ mKeyHash = aHashNumber;
+ new (valuePtr()) T(std::forward<Args>(aArgs)...);
MOZ_ASSERT(isLive());
}
};
template<class T, class HashPolicy, class AllocPolicy>
class HashTable : private AllocPolicy
{
friend class mozilla::ReentrancyGuard;
- typedef typename RemoveConst<T>::Type NonConstT;
- typedef typename HashPolicy::KeyType Key;
- typedef typename HashPolicy::Lookup Lookup;
+ using NonConstT = typename RemoveConst<T>::Type;
+ using Key = typename HashPolicy::KeyType;
+ using Lookup = typename HashPolicy::Lookup;
public:
using Entry = HashTableEntry<T>;
// A nullable pointer to a hash table element. A Ptr |p| can be tested
// either explicitly |if (p.found()) p->...| or using boolean conversion
// |if (p) p->...|. Ptr objects must not be used after any mutating hash
// table operations unless |generation()| is tested.
class Ptr
{
friend class HashTable;
- Entry* entry_;
+ Entry* mEntry;
#ifdef DEBUG
- const HashTable* table_;
- Generation generation;
+ const HashTable* mTable;
+ Generation mGeneration;
#endif
protected:
- Ptr(Entry& entry, const HashTable& tableArg)
- : entry_(&entry)
+ Ptr(Entry& aEntry, const HashTable& aTable)
+ : mEntry(&aEntry)
#ifdef DEBUG
- , table_(&tableArg)
- , generation(tableArg.generation())
+ , mTable(&aTable)
+ , mGeneration(aTable.generation())
#endif
{
}
public:
Ptr()
- : entry_(nullptr)
+ : mEntry(nullptr)
#ifdef DEBUG
- , table_(nullptr)
- , generation(0)
+ , mTable(nullptr)
+ , mGeneration(0)
#endif
{
}
- bool isValid() const { return !!entry_; }
+ bool isValid() const { return !!mEntry; }
bool found() const
{
- if (!isValid())
+ if (!isValid()) {
return false;
+ }
#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mGeneration == mTable->generation());
#endif
- return entry_->isLive();
+ return mEntry->isLive();
}
explicit operator bool() const { return found(); }
- bool operator==(const Ptr& rhs) const
+ bool operator==(const Ptr& aRhs) const
{
- MOZ_ASSERT(found() && rhs.found());
- return entry_ == rhs.entry_;
+ MOZ_ASSERT(found() && aRhs.found());
+ return mEntry == aRhs.mEntry;
}
- bool operator!=(const Ptr& rhs) const
+ bool operator!=(const Ptr& aRhs) const
{
#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mGeneration == mTable->generation());
#endif
- return !(*this == rhs);
+ return !(*this == aRhs);
}
T& operator*() const
{
#ifdef DEBUG
MOZ_ASSERT(found());
- MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mGeneration == mTable->generation());
#endif
- return entry_->get();
+ return mEntry->get();
}
T* operator->() const
{
#ifdef DEBUG
MOZ_ASSERT(found());
- MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mGeneration == mTable->generation());
#endif
- return &entry_->get();
+ return &mEntry->get();
}
};
// A Ptr that can be used to add a key after a failed lookup.
class AddPtr : public Ptr
{
friend class HashTable;
- HashNumber keyHash;
+
+ HashNumber mKeyHash;
#ifdef DEBUG
- uint64_t mutationCount;
+ uint64_t mMutationCount;
#endif
- AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
- : Ptr(entry, tableArg)
- , keyHash(hn)
+ AddPtr(Entry& aEntry, const HashTable& aTable, HashNumber aHashNumber)
+ : Ptr(aEntry, aTable)
+ , mKeyHash(aHashNumber)
#ifdef DEBUG
- , mutationCount(tableArg.mutationCount)
+ , mMutationCount(aTable.mMutationCount)
#endif
{
}
public:
AddPtr()
- : keyHash(0)
+ : mKeyHash(0)
{
}
};
// A collection of hash table entries. The collection is enumerated by
// calling |front()| followed by |popFront()| as long as |!empty()|. As
// with Ptr/AddPtr, Range objects must not be used after any mutating hash
// table operation unless the |generation()| is tested.
class Range
{
protected:
friend class HashTable;
- Range(const HashTable& tableArg, Entry* c, Entry* e)
- : cur(c)
- , end(e)
+ Range(const HashTable& aTable, Entry* aCur, Entry* aEnd)
+ : mCur(aCur)
+ , mEnd(aEnd)
#ifdef DEBUG
- , table_(&tableArg)
- , mutationCount(tableArg.mutationCount)
- , generation(tableArg.generation())
- , validEntry(true)
+ , mTable(&aTable)
+ , mMutationCount(aTable.mMutationCount)
+ , mGeneration(aTable.generation())
+ , mValidEntry(true)
#endif
{
- while (cur < end && !cur->isLive())
- ++cur;
+ while (mCur < mEnd && !mCur->isLive()) {
+ ++mCur;
+ }
}
- Entry* cur;
- Entry* end;
+ Entry* mCur;
+ Entry* mEnd;
#ifdef DEBUG
- const HashTable* table_;
- uint64_t mutationCount;
- Generation generation;
- bool validEntry;
+ const HashTable* mTable;
+ uint64_t mMutationCount;
+ Generation mGeneration;
+ bool mValidEntry;
#endif
public:
Range()
- : cur(nullptr)
- , end(nullptr)
+ : mCur(nullptr)
+ , mEnd(nullptr)
#ifdef DEBUG
- , table_(nullptr)
- , mutationCount(0)
- , generation(0)
- , validEntry(false)
+ , mTable(nullptr)
+ , mMutationCount(0)
+ , mGeneration(0)
+ , mValidEntry(false)
#endif
{
}
bool empty() const
{
#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
+ MOZ_ASSERT(mGeneration == mTable->generation());
+ MOZ_ASSERT(mMutationCount == mTable->mMutationCount);
#endif
- return cur == end;
+ return mCur == mEnd;
}
T& front() const
{
MOZ_ASSERT(!empty());
#ifdef DEBUG
- MOZ_ASSERT(validEntry);
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
+ MOZ_ASSERT(mValidEntry);
+ MOZ_ASSERT(mGeneration == mTable->generation());
+ MOZ_ASSERT(mMutationCount == mTable->mMutationCount);
#endif
- return cur->get();
+ return mCur->get();
}
void popFront()
{
MOZ_ASSERT(!empty());
#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
+ MOZ_ASSERT(mGeneration == mTable->generation());
+ MOZ_ASSERT(mMutationCount == mTable->mMutationCount);
#endif
- while (++cur < end && !cur->isLive())
+ while (++mCur < mEnd && !mCur->isLive()) {
continue;
+ }
#ifdef DEBUG
- validEntry = true;
+ mValidEntry = true;
#endif
}
};
// A Range whose lifetime delimits a mutating enumeration of a hash table.
// Since rehashing when elements were removed during enumeration would be
// bad, it is postponed until the Enum is destructed. Since the Enum's
// destructor touches the hash table, the user must ensure that the hash
// table is still alive when the destructor runs.
class Enum : public Range
{
friend class HashTable;
- HashTable& table_;
- bool rekeyed;
- bool removed;
+ HashTable& mTable;
+ bool mRekeyed;
+ bool mRemoved;
// Enum is movable but not copyable.
Enum(const Enum&) = delete;
void operator=(const Enum&) = delete;
public:
template<class Map>
explicit Enum(Map& map)
: Range(map.all())
- , table_(map.impl)
- , rekeyed(false)
- , removed(false)
+ , mTable(map.mImpl)
+ , mRekeyed(false)
+ , mRemoved(false)
{
}
- MOZ_IMPLICIT Enum(Enum&& other)
- : Range(other)
- , table_(other.table_)
- , rekeyed(other.rekeyed)
- , removed(other.removed)
+ MOZ_IMPLICIT Enum(Enum&& aOther)
+ : Range(aOther)
+ , mTable(aOther.mTable)
+ , mRekeyed(aOther.mRekeyed)
+ , mRemoved(aOther.mRemoved)
{
- other.rekeyed = false;
- other.removed = false;
+ aOther.mRekeyed = false;
+ aOther.mRemoved = false;
}
// Removes the |front()| element from the table, leaving |front()|
// invalid until the next call to |popFront()|. For example:
//
// HashSet<int> s;
- // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
- // if (e.front() == 42)
+ // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront()) {
+ // if (e.front() == 42) {
// e.removeFront();
+ // }
+ // }
void removeFront()
{
- table_.remove(*this->cur);
- removed = true;
+ mTable.remove(*this->mCur);
+ mRemoved = true;
#ifdef DEBUG
- this->validEntry = false;
- this->mutationCount = table_.mutationCount;
+ this->mValidEntry = false;
+ this->mMutationCount = mTable.mMutationCount;
#endif
}
NonConstT& mutableFront()
{
MOZ_ASSERT(!this->empty());
#ifdef DEBUG
- MOZ_ASSERT(this->validEntry);
- MOZ_ASSERT(this->generation == this->Range::table_->generation());
- MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
+ MOZ_ASSERT(this->mValidEntry);
+ MOZ_ASSERT(this->mGeneration == this->Range::mTable->generation());
+ MOZ_ASSERT(this->mMutationCount == this->Range::mTable->mMutationCount);
#endif
- return this->cur->getMutable();
+ return this->mCur->getMutable();
}
// Removes the |front()| element and re-inserts it into the table with
// a new key at the new Lookup position. |front()| is invalid after
// this operation until the next call to |popFront()|.
- void rekeyFront(const Lookup& l, const Key& k)
+ void rekeyFront(const Lookup& aLookup, const Key& aKey)
{
- MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
- Ptr p(*this->cur, table_);
- table_.rekeyWithoutRehash(p, l, k);
- rekeyed = true;
+ MOZ_ASSERT(&aKey != &HashPolicy::getKey(this->mCur->get()));
+ Ptr p(*this->mCur, mTable);
+ mTable.rekeyWithoutRehash(p, aLookup, aKey);
+ mRekeyed = true;
#ifdef DEBUG
- this->validEntry = false;
- this->mutationCount = table_.mutationCount;
+ this->mValidEntry = false;
+ this->mMutationCount = mTable.mMutationCount;
#endif
}
- void rekeyFront(const Key& k) { rekeyFront(k, k); }
+ void rekeyFront(const Key& aKey) { rekeyFront(aKey, aKey); }
// Potentially rehashes the table.
~Enum()
{
- if (rekeyed) {
- table_.gen++;
- table_.checkOverRemoved();
+ if (mRekeyed) {
+ mTable.mGen++;
+ mTable.checkOverRemoved();
}
- if (removed)
- table_.compactIfUnderloaded();
+ if (mRemoved) {
+ mTable.compactIfUnderloaded();
+ }
}
};
// HashTable is movable
- HashTable(HashTable&& rhs)
- : AllocPolicy(rhs)
+ HashTable(HashTable&& aRhs)
+ : AllocPolicy(aRhs)
{
- PodAssign(this, &rhs);
- rhs.table = nullptr;
+ PodAssign(this, &aRhs);
+ aRhs.mTable = nullptr;
}
- void operator=(HashTable&& rhs)
+ void operator=(HashTable&& aRhs)
{
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- if (table)
- destroyTable(*this, table, capacity());
- PodAssign(this, &rhs);
- rhs.table = nullptr;
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
+ if (mTable) {
+ destroyTable(*this, mTable, capacity());
+ }
+ PodAssign(this, &aRhs);
+ aRhs.mTable = nullptr;
}
private:
// HashTable is not copyable or assignable
HashTable(const HashTable&) = delete;
void operator=(const HashTable&) = delete;
-private:
static const size_t CAP_BITS = 30;
public:
- uint64_t gen : 56; // entry storage generation number
- uint64_t hashShift : 8; // multiplicative hash shift
- Entry* table; // entry storage
- uint32_t entryCount; // number of entries in table
- uint32_t removedCount; // removed entry sentinels in table
+ uint64_t mGen : 56; // entry storage generation number
+ uint64_t mHashShift : 8; // multiplicative hash shift
+ Entry* mTable; // entry storage
+ uint32_t mEntryCount; // number of entries in mTable
+ uint32_t mRemovedCount; // removed entry sentinels in mTable
#ifdef DEBUG
- uint64_t mutationCount;
+ uint64_t mMutationCount;
mutable bool mEntered;
+
// Note that some updates to these stats are not thread-safe. See the
// comment on the three-argument overloading of HashTable::lookup().
mutable struct Stats
{
- uint32_t searches; // total number of table searches
- uint32_t steps; // hash chain links traversed
- uint32_t hits; // searches that found key
- uint32_t misses; // searches that didn't find key
- uint32_t addOverRemoved; // adds that recycled a removed entry
- uint32_t removes; // calls to remove
- uint32_t removeFrees; // calls to remove that freed the entry
- uint32_t grows; // table expansions
- uint32_t shrinks; // table contractions
- uint32_t compresses; // table compressions
- uint32_t rehashes; // tombstone decontaminations
- } stats;
+ uint32_t mSearches; // total number of table searches
+ uint32_t mSteps; // hash chain links traversed
+ uint32_t mHits; // searches that found key
+ uint32_t mMisses; // searches that didn't find key
+ uint32_t mAddOverRemoved; // adds that recycled a removed entry
+ uint32_t mRemoves; // calls to remove
+ uint32_t mRemoveFrees; // calls to remove that freed the entry
+ uint32_t mGrows; // table expansions
+ uint32_t mShrinks; // table contractions
+ uint32_t mCompresses; // table compressions
+ uint32_t mRehashes; // tombstone decontaminations
+ } mStats;
#define METER(x) x
#else
#define METER(x)
#endif
// The default initial capacity is 32 (enough to hold 16 elements), but it
// can be as low as 4.
static const uint32_t sMinCapacity = 4;
@@ -1360,320 +1438,330 @@ public:
static const uint8_t sAlphaDenominator = 4;
static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
static const HashNumber sFreeKey = Entry::sFreeKey;
static const HashNumber sRemovedKey = Entry::sRemovedKey;
static const HashNumber sCollisionBit = Entry::sCollisionBit;
- void setTableSizeLog2(uint32_t sizeLog2)
+ void setTableSizeLog2(uint32_t aSizeLog2)
{
- hashShift = kHashNumberBits - sizeLog2;
+ mHashShift = kHashNumberBits - aSizeLog2;
}
- static bool isLiveHash(HashNumber hash) { return Entry::isLiveHash(hash); }
+ static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); }
- static HashNumber prepareHash(const Lookup& l)
+ static HashNumber prepareHash(const Lookup& aLookup)
{
- HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
+ HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(aLookup));
// Avoid reserved hash codes.
- if (!isLiveHash(keyHash))
+ if (!isLiveHash(keyHash)) {
keyHash -= (sRemovedKey + 1);
+ }
return keyHash & ~sCollisionBit;
}
enum FailureBehavior
{
DontReportFailure = false,
ReportFailure = true
};
- static Entry* createTable(AllocPolicy& alloc,
- uint32_t capacity,
- FailureBehavior reportFailure = ReportFailure)
+ static Entry* createTable(AllocPolicy& aAllocPolicy,
+ uint32_t aCapacity,
+ FailureBehavior aReportFailure = ReportFailure)
{
- Entry* table = reportFailure
- ? alloc.template pod_malloc<Entry>(capacity)
- : alloc.template maybe_pod_malloc<Entry>(capacity);
+ Entry* table = aReportFailure
+ ? aAllocPolicy.template pod_malloc<Entry>(aCapacity)
+ : aAllocPolicy.template maybe_pod_malloc<Entry>(aCapacity);
if (table) {
- for (uint32_t i = 0; i < capacity; i++)
+ for (uint32_t i = 0; i < aCapacity; i++) {
new (&table[i]) Entry();
+ }
}
return table;
}
- static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
+ static Entry* maybeCreateTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity)
{
- Entry* table = alloc.template maybe_pod_malloc<Entry>(capacity);
+ Entry* table = aAllocPolicy.template maybe_pod_malloc<Entry>(aCapacity);
if (table) {
- for (uint32_t i = 0; i < capacity; i++)
+ for (uint32_t i = 0; i < aCapacity; i++) {
new (&table[i]) Entry();
+ }
}
return table;
}
- static void destroyTable(AllocPolicy& alloc,
- Entry* oldTable,
- uint32_t capacity)
+ static void destroyTable(AllocPolicy& aAllocPolicy,
+ Entry* aOldTable,
+ uint32_t aCapacity)
{
- Entry* end = oldTable + capacity;
- for (Entry* e = oldTable; e < end; ++e)
+ Entry* end = aOldTable + aCapacity;
+ for (Entry* e = aOldTable; e < end; ++e) {
e->~Entry();
- alloc.free_(oldTable, capacity);
+ }
+ aAllocPolicy.free_(aOldTable, aCapacity);
}
public:
- explicit HashTable(AllocPolicy ap)
- : AllocPolicy(ap)
- , gen(0)
- , hashShift(kHashNumberBits)
- , table(nullptr)
- , entryCount(0)
- , removedCount(0)
+ explicit HashTable(AllocPolicy aAllocPolicy)
+ : AllocPolicy(aAllocPolicy)
+ , mGen(0)
+ , mHashShift(kHashNumberBits)
+ , mTable(nullptr)
+ , mEntryCount(0)
+ , mRemovedCount(0)
#ifdef DEBUG
- , mutationCount(0)
+ , mMutationCount(0)
, mEntered(false)
#endif
{
}
- MOZ_MUST_USE bool init(uint32_t length)
+ MOZ_MUST_USE bool init(uint32_t aLen)
{
MOZ_ASSERT(!initialized());
// Reject all lengths whose initial computed capacity would exceed
- // sMaxCapacity. Round that maximum length down to the nearest power
- // of two for speedier code.
- if (MOZ_UNLIKELY(length > sMaxInit)) {
+ // sMaxCapacity. Round that maximum aLen down to the nearest power of two
+ // for speedier code.
+ if (MOZ_UNLIKELY(aLen > sMaxInit)) {
this->reportAllocOverflow();
return false;
}
static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator ==
sMaxInit,
"multiplication in numerator below could overflow");
static_assert(sMaxInit * sAlphaDenominator <=
UINT32_MAX - sMaxAlphaNumerator,
"numerator calculation below could potentially overflow");
- // Compute the smallest capacity allowing |length| elements to be
- // inserted without rehashing: ceil(length / max-alpha). (Ceiling
+ // Compute the smallest capacity allowing |aLen| elements to be
+ // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling
// integral division: <http://stackoverflow.com/a/2745086>.)
uint32_t newCapacity =
- (length * sAlphaDenominator + sMaxAlphaNumerator - 1) /
- sMaxAlphaNumerator;
- if (newCapacity < sMinCapacity)
+ (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
+ if (newCapacity < sMinCapacity) {
newCapacity = sMinCapacity;
+ }
// Round up capacity to next power-of-two.
uint32_t log2 = mozilla::CeilingLog2(newCapacity);
newCapacity = 1u << log2;
- MOZ_ASSERT(newCapacity >= length);
+ MOZ_ASSERT(newCapacity >= aLen);
MOZ_ASSERT(newCapacity <= sMaxCapacity);
- table = createTable(*this, newCapacity);
- if (!table)
+ mTable = createTable(*this, newCapacity);
+ if (!mTable) {
return false;
-
+ }
setTableSizeLog2(log2);
- METER(memset(&stats, 0, sizeof(stats)));
+ METER(memset(&mStats, 0, sizeof(mStats)));
return true;
}
- bool initialized() const { return !!table; }
+ bool initialized() const { return !!mTable; }
~HashTable()
{
- if (table)
- destroyTable(*this, table, capacity());
+ if (mTable) {
+ destroyTable(*this, mTable, capacity());
+ }
}
private:
- HashNumber hash1(HashNumber hash0) const { return hash0 >> hashShift; }
+ HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; }
struct DoubleHash
{
- HashNumber h2;
- HashNumber sizeMask;
+ HashNumber mHash2;
+ HashNumber mSizeMask;
};
- DoubleHash hash2(HashNumber curKeyHash) const
+ DoubleHash hash2(HashNumber aCurKeyHash) const
{
- uint32_t sizeLog2 = kHashNumberBits - hashShift;
- DoubleHash dh = { ((curKeyHash << sizeLog2) >> hashShift) | 1,
+ uint32_t sizeLog2 = kHashNumberBits - mHashShift;
+ DoubleHash dh = { ((aCurKeyHash << sizeLog2) >> mHashShift) | 1,
(HashNumber(1) << sizeLog2) - 1 };
return dh;
}
- static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+ static HashNumber applyDoubleHash(HashNumber aHash1,
+ const DoubleHash& aDoubleHash)
{
- return (h1 - dh.h2) & dh.sizeMask;
+ return (aHash1 - aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
}
bool overloaded()
{
static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
"multiplication below could overflow");
- return entryCount + removedCount >=
+ return mEntryCount + mRemovedCount >=
capacity() * sMaxAlphaNumerator / sAlphaDenominator;
}
// Would the table be underloaded if it had the given capacity and entryCount?
- static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+ static bool wouldBeUnderloaded(uint32_t aCapacity, uint32_t aEntryCount)
{
static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
"multiplication below could overflow");
- return capacity > sMinCapacity &&
- entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
+ return aCapacity > sMinCapacity &&
+ aEntryCount <= aCapacity * sMinAlphaNumerator / sAlphaDenominator;
}
- bool underloaded() { return wouldBeUnderloaded(capacity(), entryCount); }
+ bool underloaded() { return wouldBeUnderloaded(capacity(), mEntryCount); }
- static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
+ static MOZ_ALWAYS_INLINE bool match(Entry& aEntry, const Lookup& aLookup)
{
- return HashPolicy::match(HashPolicy::getKey(e.get()), l);
+ return HashPolicy::match(HashPolicy::getKey(aEntry.get()), aLookup);
}
// Warning: in order for readonlyThreadsafeLookup() to be safe this
// function must not modify the table in any way when |collisionBit| is 0.
// (The use of the METER() macro to increment stats violates this
// restriction but we will live with that for now because it's enabled so
// rarely.)
- MOZ_ALWAYS_INLINE Entry& lookup(const Lookup& l,
- HashNumber keyHash,
- uint32_t collisionBit) const
+ MOZ_ALWAYS_INLINE Entry& lookup(const Lookup& aLookup,
+ HashNumber aKeyHash,
+ uint32_t aCollisionBit) const
{
- MOZ_ASSERT(isLiveHash(keyHash));
- MOZ_ASSERT(!(keyHash & sCollisionBit));
- MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
- MOZ_ASSERT(table);
- METER(stats.searches++);
+ MOZ_ASSERT(isLiveHash(aKeyHash));
+ MOZ_ASSERT(!(aKeyHash & sCollisionBit));
+ MOZ_ASSERT(aCollisionBit == 0 || aCollisionBit == sCollisionBit);
+ MOZ_ASSERT(mTable);
+ METER(mStats.mSearches++);
// Compute the primary hash address.
- HashNumber h1 = hash1(keyHash);
- Entry* entry = &table[h1];
+ HashNumber h1 = hash1(aKeyHash);
+ Entry* entry = &mTable[h1];
// Miss: return space for a new entry.
if (entry->isFree()) {
- METER(stats.misses++);
+ METER(mStats.mMisses++);
return *entry;
}
// Hit: return entry.
- if (entry->matchHash(keyHash) && match(*entry, l)) {
- METER(stats.hits++);
+ if (entry->matchHash(aKeyHash) && match(*entry, aLookup)) {
+ METER(mStats.mHits++);
return *entry;
}
// Collision: double hash.
- DoubleHash dh = hash2(keyHash);
+ DoubleHash dh = hash2(aKeyHash);
// Save the first removed entry pointer so we can recycle later.
Entry* firstRemoved = nullptr;
while (true) {
if (MOZ_UNLIKELY(entry->isRemoved())) {
- if (!firstRemoved)
+ if (!firstRemoved) {
firstRemoved = entry;
+ }
} else {
- if (collisionBit == sCollisionBit)
+ if (aCollisionBit == sCollisionBit) {
entry->setCollision();
+ }
}
- METER(stats.steps++);
+ METER(mStats.mSteps++);
h1 = applyDoubleHash(h1, dh);
- entry = &table[h1];
+ entry = &mTable[h1];
if (entry->isFree()) {
- METER(stats.misses++);
+ METER(mStats.mMisses++);
return firstRemoved ? *firstRemoved : *entry;
}
- if (entry->matchHash(keyHash) && match(*entry, l)) {
- METER(stats.hits++);
+ if (entry->matchHash(aKeyHash) && match(*entry, aLookup)) {
+ METER(mStats.mHits++);
return *entry;
}
}
}
// This is a copy of lookup hardcoded to the assumptions:
// 1. the lookup is a lookupForAdd
// 2. the key, whose |keyHash| has been passed is not in the table,
// 3. no entries have been removed from the table.
// This specialized search avoids the need for recovering lookup values
// from entries, which allows more flexible Lookup/Key types.
- Entry& findFreeEntry(HashNumber keyHash)
+ Entry& findFreeEntry(HashNumber aKeyHash)
{
- MOZ_ASSERT(!(keyHash & sCollisionBit));
- MOZ_ASSERT(table);
- METER(stats.searches++);
+ MOZ_ASSERT(!(aKeyHash & sCollisionBit));
+ MOZ_ASSERT(mTable);
+ METER(mStats.mSearches++);
- // We assume 'keyHash' has already been distributed.
+ // We assume 'aKeyHash' has already been distributed.
// Compute the primary hash address.
- HashNumber h1 = hash1(keyHash);
- Entry* entry = &table[h1];
+ HashNumber h1 = hash1(aKeyHash);
+ Entry* entry = &mTable[h1];
// Miss: return space for a new entry.
if (!entry->isLive()) {
- METER(stats.misses++);
+ METER(mStats.mMisses++);
return *entry;
}
// Collision: double hash.
- DoubleHash dh = hash2(keyHash);
+ DoubleHash dh = hash2(aKeyHash);
while (true) {
MOZ_ASSERT(!entry->isRemoved());
entry->setCollision();
- METER(stats.steps++);
+ METER(mStats.mSteps++);
h1 = applyDoubleHash(h1, dh);
- entry = &table[h1];
+ entry = &mTable[h1];
if (!entry->isLive()) {
- METER(stats.misses++);
+ METER(mStats.mMisses++);
return *entry;
}
}
}
enum RebuildStatus
{
NotOverloaded,
Rehashed,
RehashFailed
};
- RebuildStatus changeTableSize(int deltaLog2,
- FailureBehavior reportFailure = ReportFailure)
+ RebuildStatus changeTableSize(int aDeltaLog2,
+ FailureBehavior aReportFailure = ReportFailure)
{
// Look, but don't touch, until we succeed in getting new entry store.
- Entry* oldTable = table;
+ Entry* oldTable = mTable;
uint32_t oldCap = capacity();
- uint32_t newLog2 = kHashNumberBits - hashShift + deltaLog2;
+ uint32_t newLog2 = kHashNumberBits - mHashShift + aDeltaLog2;
uint32_t newCapacity = 1u << newLog2;
if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
- if (reportFailure)
+ if (aReportFailure) {
this->reportAllocOverflow();
+ }
return RehashFailed;
}
- Entry* newTable = createTable(*this, newCapacity, reportFailure);
- if (!newTable)
+ Entry* newTable = createTable(*this, newCapacity, aReportFailure);
+ if (!newTable) {
return RehashFailed;
+ }
// We can't fail from here on, so update table parameters.
setTableSizeLog2(newLog2);
- removedCount = 0;
- gen++;
- table = newTable;
+ mRemovedCount = 0;
+ mGen++;
+ mTable = newTable;
// Copy only live entries, leaving removed ones behind.
Entry* end = oldTable + oldCap;
for (Entry* src = oldTable; src < end; ++src) {
if (src->isLive()) {
HashNumber hn = src->getKeyHash();
findFreeEntry(hn).setLive(
hn, std::move(const_cast<typename Entry::NonConstT&>(src->get())));
@@ -1685,386 +1773,401 @@ private:
// All entries have been destroyed, no need to destroyTable.
this->free_(oldTable, oldCap);
return Rehashed;
}
bool shouldCompressTable()
{
// Compress if a quarter or more of all entries are removed.
- return removedCount >= (capacity() >> 2);
+ return mRemovedCount >= (capacity() >> 2);
}
- RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
+ RebuildStatus checkOverloaded(FailureBehavior aReportFailure = ReportFailure)
{
- if (!overloaded())
+ if (!overloaded()) {
return NotOverloaded;
+ }
int deltaLog2;
if (shouldCompressTable()) {
- METER(stats.compresses++);
+ METER(mStats.mCompresses++);
deltaLog2 = 0;
} else {
- METER(stats.grows++);
+ METER(mStats.mGrows++);
deltaLog2 = 1;
}
- return changeTableSize(deltaLog2, reportFailure);
+ return changeTableSize(deltaLog2, aReportFailure);
}
// Infallibly rehash the table if we are overloaded with removals.
void checkOverRemoved()
{
if (overloaded()) {
- if (checkOverloaded(DontReportFailure) == RehashFailed)
+ if (checkOverloaded(DontReportFailure) == RehashFailed) {
rehashTableInPlace();
+ }
}
}
- void remove(Entry& e)
+ void remove(Entry& aEntry)
{
- MOZ_ASSERT(table);
- METER(stats.removes++);
+ MOZ_ASSERT(mTable);
+ METER(mStats.mRemoves++);
- if (e.hasCollision()) {
- e.removeLive();
- removedCount++;
+ if (aEntry.hasCollision()) {
+ aEntry.removeLive();
+ mRemovedCount++;
} else {
- METER(stats.removeFrees++);
- e.clearLive();
+ METER(mStats.mRemoveFrees++);
+ aEntry.clearLive();
}
- entryCount--;
+ mEntryCount--;
#ifdef DEBUG
- mutationCount++;
+ mMutationCount++;
#endif
}
void checkUnderloaded()
{
if (underloaded()) {
- METER(stats.shrinks++);
+ METER(mStats.mShrinks++);
(void)changeTableSize(-1, DontReportFailure);
}
}
// Resize the table down to the largest capacity which doesn't underload the
// table. Since we call checkUnderloaded() on every remove, you only need
// to call this after a bulk removal of items done without calling remove().
void compactIfUnderloaded()
{
int32_t resizeLog2 = 0;
uint32_t newCapacity = capacity();
- while (wouldBeUnderloaded(newCapacity, entryCount)) {
+ while (wouldBeUnderloaded(newCapacity, mEntryCount)) {
newCapacity = newCapacity >> 1;
resizeLog2--;
}
- if (resizeLog2 != 0)
+ if (resizeLog2 != 0) {
(void)changeTableSize(resizeLog2, DontReportFailure);
+ }
}
// This is identical to changeTableSize(currentSize), but without requiring
// a second table. We do this by recycling the collision bits to tell us if
// the element is already inserted or still waiting to be inserted. Since
// already-inserted elements win any conflicts, we get the same table as we
// would have gotten through random insertion order.
void rehashTableInPlace()
{
- METER(stats.rehashes++);
- removedCount = 0;
- gen++;
- for (size_t i = 0; i < capacity(); ++i)
- table[i].unsetCollision();
-
+ METER(mStats.mRehashes++);
+ mRemovedCount = 0;
+ mGen++;
+ for (size_t i = 0; i < capacity(); ++i) {
+ mTable[i].unsetCollision();
+ }
for (size_t i = 0; i < capacity();) {
- Entry* src = &table[i];
+ Entry* src = &mTable[i];
if (!src->isLive() || src->hasCollision()) {
++i;
continue;
}
HashNumber keyHash = src->getKeyHash();
HashNumber h1 = hash1(keyHash);
DoubleHash dh = hash2(keyHash);
- Entry* tgt = &table[h1];
+ Entry* tgt = &mTable[h1];
while (true) {
if (!tgt->hasCollision()) {
src->swap(tgt);
tgt->setCollision();
break;
}
h1 = applyDoubleHash(h1, dh);
- tgt = &table[h1];
+ tgt = &mTable[h1];
}
}
// TODO: this algorithm leaves collision bits on *all* elements, even if
// they are on no collision path. We have the option of setting the
// collision bits correctly on a subsequent pass or skipping the rehash
// unless we are totally filled with tombstones: benchmark to find out
// which approach is best.
}
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
+ // Note: |aLookup| may be a reference to a piece of |u|, so this function
+ // must take care not to use |aLookup| after moving |u|.
//
// Prefer to use putNewInfallible; this function does not check
// invariants.
template<typename... Args>
- void putNewInfallibleInternal(const Lookup& l, Args&&... args)
+ void putNewInfallibleInternal(const Lookup& aLookup, Args&&... aArgs)
{
- MOZ_ASSERT(table);
+ MOZ_ASSERT(mTable);
- HashNumber keyHash = prepareHash(l);
+ HashNumber keyHash = prepareHash(aLookup);
Entry* entry = &findFreeEntry(keyHash);
MOZ_ASSERT(entry);
if (entry->isRemoved()) {
- METER(stats.addOverRemoved++);
- removedCount--;
+ METER(mStats.mAddOverRemoved++);
+ mRemovedCount--;
keyHash |= sCollisionBit;
}
- entry->setLive(keyHash, std::forward<Args>(args)...);
- entryCount++;
+ entry->setLive(keyHash, std::forward<Args>(aArgs)...);
+ mEntryCount++;
#ifdef DEBUG
- mutationCount++;
+ mMutationCount++;
#endif
}
public:
void clear()
{
- Entry* end = table + capacity();
- for (Entry* e = table; e < end; ++e)
+ Entry* end = mTable + capacity();
+ for (Entry* e = mTable; e < end; ++e) {
e->clear();
-
- removedCount = 0;
- entryCount = 0;
+ }
+ mRemovedCount = 0;
+ mEntryCount = 0;
#ifdef DEBUG
- mutationCount++;
+ mMutationCount++;
#endif
}
void clearAndShrink()
{
clear();
compactIfUnderloaded();
}
void finish()
{
#ifdef DEBUG
MOZ_ASSERT(!mEntered);
#endif
- if (!table)
+ if (!mTable) {
return;
+ }
- destroyTable(*this, table, capacity());
- table = nullptr;
- gen++;
- entryCount = 0;
- removedCount = 0;
+ destroyTable(*this, mTable, capacity());
+ mTable = nullptr;
+ mGen++;
+ mEntryCount = 0;
+ mRemovedCount = 0;
#ifdef DEBUG
- mutationCount++;
+ mMutationCount++;
#endif
}
Range all() const
{
- MOZ_ASSERT(table);
- return Range(*this, table, table + capacity());
+ MOZ_ASSERT(mTable);
+ return Range(*this, mTable, mTable + capacity());
}
bool empty() const
{
- MOZ_ASSERT(table);
- return !entryCount;
+ MOZ_ASSERT(mTable);
+ return !mEntryCount;
}
uint32_t count() const
{
- MOZ_ASSERT(table);
- return entryCount;
+ MOZ_ASSERT(mTable);
+ return mEntryCount;
}
uint32_t capacity() const
{
- MOZ_ASSERT(table);
- return 1u << (kHashNumberBits - hashShift);
+ MOZ_ASSERT(mTable);
+ return 1u << (kHashNumberBits - mHashShift);
}
Generation generation() const
{
- MOZ_ASSERT(table);
- return Generation(gen);
+ MOZ_ASSERT(mTable);
+ return Generation(mGen);
}
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
- return mallocSizeOf(table);
+ return aMallocSizeOf(mTable);
}
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
- return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
}
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
{
ReentrancyGuard g(*this);
- if (!HasHash<HashPolicy>(l))
+ if (!HasHash<HashPolicy>(aLookup)) {
return Ptr();
- HashNumber keyHash = prepareHash(l);
- return Ptr(lookup(l, keyHash, 0), *this);
+ }
+ HashNumber keyHash = prepareHash(aLookup);
+ return Ptr(lookup(aLookup, keyHash, 0), *this);
}
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
{
- if (!HasHash<HashPolicy>(l))
+ if (!HasHash<HashPolicy>(aLookup)) {
return Ptr();
- HashNumber keyHash = prepareHash(l);
- return Ptr(lookup(l, keyHash, 0), *this);
+ }
+ HashNumber keyHash = prepareHash(aLookup);
+ return Ptr(lookup(aLookup, keyHash, 0), *this);
}
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
{
ReentrancyGuard g(*this);
- if (!EnsureHash<HashPolicy>(l))
+ if (!EnsureHash<HashPolicy>(aLookup)) {
return AddPtr();
- HashNumber keyHash = prepareHash(l);
+ }
+ HashNumber keyHash = prepareHash(aLookup);
// Directly call the constructor in the return statement to avoid
// excess copying when building with Visual Studio 2017.
// See bug 1385181.
- return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash);
+ return AddPtr(lookup(aLookup, keyHash, sCollisionBit), *this, keyHash);
}
template<typename... Args>
- MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
+ MOZ_MUST_USE bool add(AddPtr& aPtr, Args&&... aArgs)
{
ReentrancyGuard g(*this);
- MOZ_ASSERT(table);
- MOZ_ASSERT_IF(p.isValid(), p.table_ == this);
- MOZ_ASSERT(!p.found());
- MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+ MOZ_ASSERT(mTable);
+ MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this);
+ MOZ_ASSERT(!aPtr.found());
+ MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit));
// Check for error from ensureHash() here.
- if (!p.isValid())
+ if (!aPtr.isValid()) {
return false;
+ }
- MOZ_ASSERT(p.generation == generation());
+ MOZ_ASSERT(aPtr.mGeneration == generation());
#ifdef DEBUG
- MOZ_ASSERT(p.mutationCount == mutationCount);
+ MOZ_ASSERT(aPtr.mMutationCount == mMutationCount);
#endif
// Changing an entry from removed to live does not affect whether we
// are overloaded and can be handled separately.
- if (p.entry_->isRemoved()) {
- if (!this->checkSimulatedOOM())
+ if (aPtr.mEntry->isRemoved()) {
+ if (!this->checkSimulatedOOM()) {
return false;
- METER(stats.addOverRemoved++);
- removedCount--;
- p.keyHash |= sCollisionBit;
+ }
+ METER(mStats.mAddOverRemoved++);
+ mRemovedCount--;
+ aPtr.mKeyHash |= sCollisionBit;
} else {
- // Preserve the validity of |p.entry_|.
+ // Preserve the validity of |aPtr.mEntry|.
RebuildStatus status = checkOverloaded();
- if (status == RehashFailed)
+ if (status == RehashFailed) {
return false;
- if (status == NotOverloaded && !this->checkSimulatedOOM())
+ }
+ if (status == NotOverloaded && !this->checkSimulatedOOM()) {
return false;
- if (status == Rehashed)
- p.entry_ = &findFreeEntry(p.keyHash);
+ }
+ if (status == Rehashed) {
+ aPtr.mEntry = &findFreeEntry(aPtr.mKeyHash);
+ }
}
- p.entry_->setLive(p.keyHash, std::forward<Args>(args)...);
- entryCount++;
+ aPtr.mEntry->setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
+ mEntryCount++;
#ifdef DEBUG
- mutationCount++;
- p.generation = generation();
- p.mutationCount = mutationCount;
+ mMutationCount++;
+ aPtr.mGeneration = generation();
+ aPtr.mMutationCount = mMutationCount;
#endif
return true;
}
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
+ // Note: |aLookup| may be a reference to a piece of |u|, so this function
+ // must take care not to use |aLookup| after moving |u|.
template<typename... Args>
- void putNewInfallible(const Lookup& l, Args&&... args)
+ void putNewInfallible(const Lookup& aLookup, Args&&... aArgs)
{
- MOZ_ASSERT(!lookup(l).found());
+ MOZ_ASSERT(!lookup(aLookup).found());
ReentrancyGuard g(*this);
- putNewInfallibleInternal(l, std::forward<Args>(args)...);
+ putNewInfallibleInternal(aLookup, std::forward<Args>(aArgs)...);
}
- // Note: |l| may be alias arguments in |args|, so this function must take
- // care not to use |l| after moving |args|.
+ // Note: |aLookup| may be alias arguments in |aArgs|, so this function must
+ // take care not to use |aLookup| after moving |aArgs|.
template<typename... Args>
- MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
+ MOZ_MUST_USE bool putNew(const Lookup& aLookup, Args&&... aArgs)
{
- if (!this->checkSimulatedOOM())
+ if (!this->checkSimulatedOOM()) {
return false;
-
- if (!EnsureHash<HashPolicy>(l))
+ }
+ if (!EnsureHash<HashPolicy>(aLookup)) {
return false;
-
- if (checkOverloaded() == RehashFailed)
+ }
+ if (checkOverloaded() == RehashFailed) {
return false;
-
- putNewInfallible(l, std::forward<Args>(args)...);
+ }
+ putNewInfallible(aLookup, std::forward<Args>(aArgs)...);
return true;
}
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
+ // Note: |aLookup| may be a reference to a piece of |u|, so this function
+ // must take care not to use |aLookup| after moving |u|.
template<typename... Args>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr,
+ const Lookup& aLookup,
+ Args&&... aArgs)
{
// Check for error from ensureHash() here.
- if (!p.isValid())
+ if (!aPtr.isValid()) {
return false;
-
+ }
#ifdef DEBUG
- p.generation = generation();
- p.mutationCount = mutationCount;
+ aPtr.mGeneration = generation();
+ aPtr.mMutationCount = mMutationCount;
#endif
{
ReentrancyGuard g(*this);
- MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
- p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
+ // Check that aLookup has not been destroyed.
+ MOZ_ASSERT(prepareHash(aLookup) == aPtr.mKeyHash);
+ aPtr.mEntry = &lookup(aLookup, aPtr.mKeyHash, sCollisionBit);
}
- return p.found() || add(p, std::forward<Args>(args)...);
+ return aPtr.found() || add(aPtr, std::forward<Args>(aArgs)...);
}
- void remove(Ptr p)
+ void remove(Ptr aPtr)
{
- MOZ_ASSERT(table);
+ MOZ_ASSERT(mTable);
ReentrancyGuard g(*this);
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(p.generation == generation());
- remove(*p.entry_);
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(aPtr.mGeneration == generation());
+ remove(*aPtr.mEntry);
checkUnderloaded();
}
- void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
+ void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey)
{
- MOZ_ASSERT(table);
+ MOZ_ASSERT(mTable);
ReentrancyGuard g(*this);
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(p.generation == generation());
- typename HashTableEntry<T>::NonConstT t(std::move(*p));
- HashPolicy::setKey(t, const_cast<Key&>(k));
- remove(*p.entry_);
- putNewInfallibleInternal(l, std::move(t));
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(aPtr.mGeneration == generation());
+ typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
+ HashPolicy::setKey(t, const_cast<Key&>(aKey));
+ remove(*aPtr.mEntry);
+ putNewInfallibleInternal(aLookup, std::move(t));
}
- void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
+ void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey)
{
- rekeyWithoutRehash(p, l, k);
+ rekeyWithoutRehash(aPtr, aLookup, aKey);
checkOverRemoved();
}
#undef METER
};
} // namespace detail
} // namespace mozilla