--- a/mfbt/HashTable.h
+++ b/mfbt/HashTable.h
@@ -46,21 +46,25 @@
#include "mozilla/Opaque.h"
#include "mozilla/PodOperations.h"
#include "mozilla/ReentrancyGuard.h"
#include "mozilla/TypeTraits.h"
#include "mozilla/UniquePtr.h"
namespace mozilla {
-template <class> struct DefaultHasher;
-template <class, class> class HashMapEntry;
+template<class>
+struct DefaultHasher;
+template<class, class>
+class HashMapEntry;
namespace detail {
- template <typename T> class HashTableEntry;
- template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+template<typename T>
+class HashTableEntry;
+template<class T, class HashPolicy, class AllocPolicy>
+class HashTable;
} // namespace detail
/*****************************************************************************/
// The "generation" of a hash table is an opaque value indicating the state of
// modification of the hash table through its lifetime. If the generation of
// a hash table compares equal at times T1 and T2, then lookups in the hash
// table, pointers to (or into) hash table entries, etc. at time T1 are valid
@@ -82,259 +86,281 @@ using Generation = Opaque<uint64_t>;
// - see Hash Policy section below
// AllocPolicy:
// - see AllocPolicy.h
//
// Note:
// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
// called by HashMap must not call back into the same HashMap object.
// - Due to the lack of exception handling, the user must call |init()|.
-template <class Key,
- class Value,
- class HashPolicy = DefaultHasher<Key>,
- class AllocPolicy = MallocAllocPolicy>
+template<class Key,
+ class Value,
+ class HashPolicy = DefaultHasher<Key>,
+ class AllocPolicy = MallocAllocPolicy>
class HashMap
{
- typedef HashMapEntry<Key, Value> TableEntry;
-
- struct MapHashPolicy : HashPolicy
- {
- using Base = HashPolicy;
- typedef Key KeyType;
- static const Key& getKey(TableEntry& e) { return e.key(); }
- static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); }
- };
+ typedef HashMapEntry<Key, Value> TableEntry;
- typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
- Impl impl;
+ struct MapHashPolicy : HashPolicy
+ {
+ using Base = HashPolicy;
+ typedef Key KeyType;
+ static const Key& getKey(TableEntry& e) { return e.key(); }
+ static void setKey(TableEntry& e, Key& k)
+ {
+ HashPolicy::rekey(e.mutableKey(), k);
+ }
+ };
- public:
- typedef typename HashPolicy::Lookup Lookup;
- typedef TableEntry Entry;
+ typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
+ Impl impl;
- // HashMap construction is fallible (due to OOM); thus the user must call
- // init after constructing a HashMap and check the return value.
- explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {}
- MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
- bool initialized() const { return impl.initialized(); }
+public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef TableEntry Entry;
- // Return whether the given lookup value is present in the map. E.g.:
- //
- // typedef HashMap<int,char> HM;
- // HM h;
- // if (HM::Ptr p = h.lookup(3)) {
- // const HM::Entry& e = *p; // p acts like a pointer to Entry
- // assert(p->key == 3); // Entry contains the key
- // char val = p->value; // and value
- // }
- //
- // Also see the definition of Ptr in HashTable above (with T = Entry).
- typedef typename Impl::Ptr Ptr;
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+ // HashMap construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashMap and check the return value.
+ explicit HashMap(AllocPolicy a = AllocPolicy())
+ : impl(a)
+ {
+ }
+ MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
- // Like lookup, but does not assert if two threads call lookup at the same
- // time. Only use this method when none of the threads will modify the map.
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
- return impl.readonlyThreadsafeLookup(l);
- }
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // if (HM::Ptr p = h.lookup(3)) {
+ // const HM::Entry& e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above (with T = Entry).
+ typedef typename Impl::Ptr Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
- // Assuming |p.found()|, remove |*p|.
- void remove(Ptr p) { impl.remove(p); }
+ // Like lookup, but does not assert if two threads call lookup at the same
+ // time. Only use this method when none of the threads will modify the map.
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ {
+ return impl.readonlyThreadsafeLookup(l);
+ }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
- // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
- // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
- // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
- //
- // typedef HashMap<int,char> HM;
- // HM h;
- // HM::AddPtr p = h.lookupForAdd(3);
- // if (!p) {
- // if (!h.add(p, 3, 'a'))
- // return false;
- // }
- // const HM::Entry& e = *p; // p acts like a pointer to Entry
- // assert(p->key == 3); // Entry contains the key
- // char val = p->value; // and value
- //
- // Also see the definition of AddPtr in HashTable above (with T = Entry).
- //
- // N.B. The caller must ensure that no mutating hash table operations
- // occur between a pair of |lookupForAdd| and |add| calls. To avoid
- // looking up the key a second time, the caller may use the more efficient
- // relookupOrAdd method. This method reuses part of the hashing computation
- // to more efficiently insert the key if it has not been added. For
- // example, a mutation-handling version of the previous example:
- //
- // HM::AddPtr p = h.lookupForAdd(3);
- // if (!p) {
- // call_that_may_mutate_h();
- // if (!h.relookupOrAdd(p, 3, 'a'))
- // return false;
- // }
- // const HM::Entry& e = *p;
- // assert(p->key == 3);
- // char val = p->value;
- typedef typename Impl::AddPtr AddPtr;
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
- return impl.lookupForAdd(l);
- }
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
+ // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry& e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ //
+ // Also see the definition of AddPtr in HashTable above (with T = Entry).
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry& e = *p;
+ // assert(p->key == 3);
+ // char val = p->value;
+ typedef typename Impl::AddPtr AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ {
+ return impl.lookupForAdd(l);
+ }
- template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
- return impl.add(p,
- std::forward<KeyInput>(k),
- std::forward<ValueInput>(v));
- }
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v)
+ {
+ return impl.add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
- template<typename KeyInput>
- MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) {
- return impl.add(p, std::forward<KeyInput>(k), Value());
- }
+ template<typename KeyInput>
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k)
+ {
+ return impl.add(p, std::forward<KeyInput>(k), Value());
+ }
- template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
- return impl.relookupOrAdd(p, k,
- std::forward<KeyInput>(k),
- std::forward<ValueInput>(v));
- }
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v)
+ {
+ return impl.relookupOrAdd(
+ p, k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
- // |all()| returns a Range containing |count()| elements. E.g.:
- //
- // typedef HashMap<int,char> HM;
- // HM h;
- // for (HM::Range r = h.all(); !r.empty(); r.popFront())
- // char c = r.front().value();
- //
- // Also see the definition of Range in HashTable above (with T = Entry).
- typedef typename Impl::Range Range;
- Range all() const { return impl.all(); }
+ // |all()| returns a Range containing |count()| elements. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // for (HM::Range r = h.all(); !r.empty(); r.popFront())
+ // char c = r.front().value();
+ //
+ // Also see the definition of Range in HashTable above (with T = Entry).
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
- // Typedef for the enumeration class. An Enum may be used to examine and
- // remove table entries:
- //
- // typedef HashMap<int,char> HM;
- // HM s;
- // for (HM::Enum e(s); !e.empty(); e.popFront())
- // if (e.front().value() == 'l')
- // e.removeFront();
- //
- // Table resize may occur in Enum's destructor. Also see the definition of
- // Enum in HashTable above (with T = Entry).
- typedef typename Impl::Enum Enum;
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM s;
+ // for (HM::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front().value() == 'l')
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above (with T = Entry).
+ typedef typename Impl::Enum Enum;
- // Remove all entries. This does not shrink the table. For that consider
- // using the finish() method.
- void clear() { impl.clear(); }
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all entries. Unlike clear() this method tries to shrink the table.
+ // Unlike finish() it does not require the map to be initialized again.
+ void clearAndShrink() { impl.clearAndShrink(); }
- // Remove all entries. Unlike clear() this method tries to shrink the table.
- // Unlike finish() it does not require the map to be initialized again.
- void clearAndShrink() { impl.clearAndShrink(); }
+ // Remove all the entries and release all internal buffers. The map must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
- // Remove all the entries and release all internal buffers. The map must
- // be initialized again before any use.
- void finish() { impl.finish(); }
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
+
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
- // Does the table contain any entries?
- bool empty() const { return impl.empty(); }
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
- // Number of live elements in the map.
- uint32_t count() const { return impl.count(); }
-
- // Total number of allocation in the dynamic table. Note: resize will
- // happen well before count() == capacity().
- size_t capacity() const { return impl.capacity(); }
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashMap.
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
- // Don't just call |impl.sizeOfExcludingThis()| because there's no
- // guarantee that |impl| is the first field in HashMap.
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
- return impl.sizeOfExcludingThis(mallocSizeOf);
- }
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
- return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
- }
+ Generation generation() const { return impl.generation(); }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup& l) const { return impl.lookup(l).found(); }
- Generation generation() const {
- return impl.generation();
+ // Overwrite existing value with v. Return false on oom.
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v)
+ {
+ AddPtr p = lookupForAdd(k);
+ if (p) {
+ p->value() = std::forward<ValueInput>(v);
+ return true;
}
-
- /************************************************** Shorthand operations */
-
- bool has(const Lookup& l) const {
- return impl.lookup(l).found();
- }
+ return add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
- // Overwrite existing value with v. Return false on oom.
- template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) {
- AddPtr p = lookupForAdd(k);
- if (p) {
- p->value() = std::forward<ValueInput>(v);
- return true;
- }
- return add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
- }
+ // Like put, but assert that the given key is not already present.
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v)
+ {
+ return impl.putNew(
+ k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
- // Like put, but assert that the given key is not already present.
- template<typename KeyInput, typename ValueInput>
- MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) {
- return impl.putNew(k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
- }
+ // Only call this to populate an empty map after reserving space with init().
+ template<typename KeyInput, typename ValueInput>
+ void putNewInfallible(KeyInput&& k, ValueInput&& v)
+ {
+ impl.putNewInfallible(
+ k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
- // Only call this to populate an empty map after reserving space with init().
- template<typename KeyInput, typename ValueInput>
- void putNewInfallible(KeyInput&& k, ValueInput&& v) {
- impl.putNewInfallible(k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
- }
+ // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
+ Ptr lookupWithDefault(const Key& k, const Value& defaultValue)
+ {
+ AddPtr p = lookupForAdd(k);
+ if (p)
+ return p;
+ bool ok = add(p, k, defaultValue);
+ MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
+ (void)ok;
+ return p;
+ }
- // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
- Ptr lookupWithDefault(const Key& k, const Value& defaultValue) {
- AddPtr p = lookupForAdd(k);
- if (p)
- return p;
- bool ok = add(p, k, defaultValue);
- MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
- (void)ok;
- return p;
- }
+ // Remove if present.
+ void remove(const Lookup& l)
+ {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
- // Remove if present.
- void remove(const Lookup& l) {
- if (Ptr p = lookup(l))
- remove(p);
- }
-
- // Infallibly rekey one entry, if necessary.
- // Requires template parameters Key and HashPolicy::Lookup to be the same type.
- void rekeyIfMoved(const Key& old_key, const Key& new_key) {
- if (old_key != new_key)
- rekeyAs(old_key, new_key, new_key);
- }
+ // Infallibly rekey one entry, if necessary.
+ // Requires template parameters Key and HashPolicy::Lookup to be the same
+ // type.
+ void rekeyIfMoved(const Key& old_key, const Key& new_key)
+ {
+ if (old_key != new_key)
+ rekeyAs(old_key, new_key, new_key);
+ }
- // Infallibly rekey one entry if present, and return whether that happened.
- bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) {
- if (Ptr p = lookup(old_lookup)) {
- impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
- return true;
- }
- return false;
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& old_lookup,
+ const Lookup& new_lookup,
+ const Key& new_key)
+ {
+ if (Ptr p = lookup(old_lookup)) {
+ impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
+ return true;
}
+ return false;
+ }
- // HashMap is movable
- HashMap(HashMap&& rhs) : impl(std::move(rhs.impl)) {}
- void operator=(HashMap&& rhs) {
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- impl = std::move(rhs.impl);
- }
+ // HashMap is movable
+ HashMap(HashMap&& rhs)
+ : impl(std::move(rhs.impl))
+ {
+ }
+ void operator=(HashMap&& rhs)
+ {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ impl = std::move(rhs.impl);
+ }
- private:
- // HashMap is not copyable or assignable
- HashMap(const HashMap& hm) = delete;
- HashMap& operator=(const HashMap& hm) = delete;
+private:
+ // HashMap is not copyable or assignable
+ HashMap(const HashMap& hm) = delete;
+ HashMap& operator=(const HashMap& hm) = delete;
- friend class Impl::Enum;
+ friend class Impl::Enum;
};
/*****************************************************************************/
// A performant, STL-like container providing a hash-based set of values. In
// particular, HashSet calls constructors and destructors of all objects added
// so non-PODs may be used safely.
//
@@ -344,245 +370,264 @@ class HashMap
// - see Hash Policy section below
// AllocPolicy:
// - see AllocPolicy.h
//
// Note:
// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
// HashSet must not call back into the same HashSet object.
// - Due to the lack of exception handling, the user must call |init()|.
-template <class T,
- class HashPolicy = DefaultHasher<T>,
- class AllocPolicy = MallocAllocPolicy>
+template<class T,
+ class HashPolicy = DefaultHasher<T>,
+ class AllocPolicy = MallocAllocPolicy>
class HashSet
{
- struct SetOps : HashPolicy
- {
- using Base = HashPolicy;
- typedef T KeyType;
- static const KeyType& getKey(const T& t) { return t; }
- static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
- };
+ struct SetOps : HashPolicy
+ {
+ using Base = HashPolicy;
+ typedef T KeyType;
+ static const KeyType& getKey(const T& t) { return t; }
+ static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
+ };
- typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
- Impl impl;
+ typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
+ Impl impl;
- public:
- typedef typename HashPolicy::Lookup Lookup;
- typedef T Entry;
-
- // HashSet construction is fallible (due to OOM); thus the user must call
- // init after constructing a HashSet and check the return value.
- explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {}
- MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
- bool initialized() const { return impl.initialized(); }
+public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef T Entry;
- // Return whether the given lookup value is present in the map. E.g.:
- //
- // typedef HashSet<int> HS;
- // HS h;
- // if (HS::Ptr p = h.lookup(3)) {
- // assert(*p == 3); // p acts like a pointer to int
- // }
- //
- // Also see the definition of Ptr in HashTable above.
- typedef typename Impl::Ptr Ptr;
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+ // HashSet construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashSet and check the return value.
+ explicit HashSet(AllocPolicy a = AllocPolicy())
+ : impl(a)
+ {
+ }
+ MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
- // Like lookup, but does not assert if two threads call lookup at the same
- // time. Only use this method when none of the threads will modify the map.
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
- return impl.readonlyThreadsafeLookup(l);
- }
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // if (HS::Ptr p = h.lookup(3)) {
+ // assert(*p == 3); // p acts like a pointer to int
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above.
+ typedef typename Impl::Ptr Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
- // Assuming |p.found()|, remove |*p|.
- void remove(Ptr p) { impl.remove(p); }
+ // Like lookup, but does not assert if two threads call lookup at the same
+ // time. Only use this method when none of the threads will modify the map.
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ {
+ return impl.readonlyThreadsafeLookup(l);
+ }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
- // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
- // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
- // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
- //
- // typedef HashSet<int> HS;
- // HS h;
- // HS::AddPtr p = h.lookupForAdd(3);
- // if (!p) {
- // if (!h.add(p, 3))
- // return false;
- // }
- // assert(*p == 3); // p acts like a pointer to int
- //
- // Also see the definition of AddPtr in HashTable above.
- //
- // N.B. The caller must ensure that no mutating hash table operations
- // occur between a pair of |lookupForAdd| and |add| calls. To avoid
- // looking up the key a second time, the caller may use the more efficient
- // relookupOrAdd method. This method reuses part of the hashing computation
- // to more efficiently insert the key if it has not been added. For
- // example, a mutation-handling version of the previous example:
- //
- // HS::AddPtr p = h.lookupForAdd(3);
- // if (!p) {
- // call_that_may_mutate_h();
- // if (!h.relookupOrAdd(p, 3, 3))
- // return false;
- // }
- // assert(*p == 3);
- //
- // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
- // entry |t|, where the caller ensures match(l,t).
- typedef typename Impl::AddPtr AddPtr;
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
- return impl.lookupForAdd(l);
- }
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
+ // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3))
+ // return false;
+ // }
+ // assert(*p == 3); // p acts like a pointer to int
+ //
+ // Also see the definition of AddPtr in HashTable above.
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 3))
+ // return false;
+ // }
+ // assert(*p == 3);
+ //
+ // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
+ // entry |t|, where the caller ensures match(l,t).
+ typedef typename Impl::AddPtr AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ {
+ return impl.lookupForAdd(l);
+ }
- template <typename U>
- MOZ_MUST_USE bool add(AddPtr& p, U&& u) {
- return impl.add(p, std::forward<U>(u));
- }
+ template<typename U>
+ MOZ_MUST_USE bool add(AddPtr& p, U&& u)
+ {
+ return impl.add(p, std::forward<U>(u));
+ }
- template <typename U>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
- return impl.relookupOrAdd(p, l, std::forward<U>(u));
- }
+ template<typename U>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u)
+ {
+ return impl.relookupOrAdd(p, l, std::forward<U>(u));
+ }
- // |all()| returns a Range containing |count()| elements:
- //
- // typedef HashSet<int> HS;
- // HS h;
- // for (HS::Range r = h.all(); !r.empty(); r.popFront())
- // int i = r.front();
- //
- // Also see the definition of Range in HashTable above.
- typedef typename Impl::Range Range;
- Range all() const { return impl.all(); }
+ // |all()| returns a Range containing |count()| elements:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // for (HS::Range r = h.all(); !r.empty(); r.popFront())
+ // int i = r.front();
+ //
+ // Also see the definition of Range in HashTable above.
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
+
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashSet<int> HS;
+ // HS s;
+ // for (HS::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above.
+ typedef typename Impl::Enum Enum;
- // Typedef for the enumeration class. An Enum may be used to examine and
- // remove table entries:
- //
- // typedef HashSet<int> HS;
- // HS s;
- // for (HS::Enum e(s); !e.empty(); e.popFront())
- // if (e.front() == 42)
- // e.removeFront();
- //
- // Table resize may occur in Enum's destructor. Also see the definition of
- // Enum in HashTable above.
- typedef typename Impl::Enum Enum;
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all entries. Unlike clear() this method tries to shrink the table.
+ // Unlike finish() it does not require the set to be initialized again.
+ void clearAndShrink() { impl.clearAndShrink(); }
- // Remove all entries. This does not shrink the table. For that consider
- // using the finish() method.
- void clear() { impl.clear(); }
+ // Remove all the entries and release all internal buffers. The set must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
+
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
- // Remove all entries. Unlike clear() this method tries to shrink the table.
- // Unlike finish() it does not require the set to be initialized again.
- void clearAndShrink() { impl.clearAndShrink(); }
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
- // Remove all the entries and release all internal buffers. The set must
- // be initialized again before any use.
- void finish() { impl.finish(); }
-
- // Does the table contain any entries?
- bool empty() const { return impl.empty(); }
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
- // Number of live elements in the map.
- uint32_t count() const { return impl.count(); }
-
- // Total number of allocation in the dynamic table. Note: resize will
- // happen well before count() == capacity().
- size_t capacity() const { return impl.capacity(); }
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashSet.
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
- // Don't just call |impl.sizeOfExcludingThis()| because there's no
- // guarantee that |impl| is the first field in HashSet.
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
- return impl.sizeOfExcludingThis(mallocSizeOf);
- }
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
- return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
- }
+ Generation generation() const { return impl.generation(); }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup& l) const { return impl.lookup(l).found(); }
- Generation generation() const {
- return impl.generation();
- }
-
- /************************************************** Shorthand operations */
+ // Add |u| if it is not present already. Return false on oom.
+ template<typename U>
+ MOZ_MUST_USE bool put(U&& u)
+ {
+ AddPtr p = lookupForAdd(u);
+ return p ? true : add(p, std::forward<U>(u));
+ }
- bool has(const Lookup& l) const {
- return impl.lookup(l).found();
- }
+ // Like put, but assert that the given key is not already present.
+ template<typename U>
+ MOZ_MUST_USE bool putNew(U&& u)
+ {
+ return impl.putNew(u, std::forward<U>(u));
+ }
- // Add |u| if it is not present already. Return false on oom.
- template <typename U>
- MOZ_MUST_USE bool put(U&& u) {
- AddPtr p = lookupForAdd(u);
- return p ? true : add(p, std::forward<U>(u));
- }
+ template<typename U>
+ MOZ_MUST_USE bool putNew(const Lookup& l, U&& u)
+ {
+ return impl.putNew(l, std::forward<U>(u));
+ }
- // Like put, but assert that the given key is not already present.
- template <typename U>
- MOZ_MUST_USE bool putNew(U&& u) {
- return impl.putNew(u, std::forward<U>(u));
- }
-
- template <typename U>
- MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) {
- return impl.putNew(l, std::forward<U>(u));
- }
+ // Only call this to populate an empty set after reserving space with init().
+ template<typename U>
+ void putNewInfallible(const Lookup& l, U&& u)
+ {
+ impl.putNewInfallible(l, std::forward<U>(u));
+ }
- // Only call this to populate an empty set after reserving space with init().
- template <typename U>
- void putNewInfallible(const Lookup& l, U&& u) {
- impl.putNewInfallible(l, std::forward<U>(u));
- }
+ void remove(const Lookup& l)
+ {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
- void remove(const Lookup& l) {
- if (Ptr p = lookup(l))
- remove(p);
- }
-
- // Infallibly rekey one entry, if present.
- // Requires template parameters T and HashPolicy::Lookup to be the same type.
- void rekeyIfMoved(const Lookup& old_value, const T& new_value) {
- if (old_value != new_value)
- rekeyAs(old_value, new_value, new_value);
- }
+ // Infallibly rekey one entry, if present.
+ // Requires template parameters T and HashPolicy::Lookup to be the same type.
+ void rekeyIfMoved(const Lookup& old_value, const T& new_value)
+ {
+ if (old_value != new_value)
+ rekeyAs(old_value, new_value, new_value);
+ }
- // Infallibly rekey one entry if present, and return whether that happened.
- bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) {
- if (Ptr p = lookup(old_lookup)) {
- impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
- return true;
- }
- return false;
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& old_lookup,
+ const Lookup& new_lookup,
+ const T& new_value)
+ {
+ if (Ptr p = lookup(old_lookup)) {
+ impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
+ return true;
}
+ return false;
+ }
- // Infallibly replace the current key at |p| with an equivalent key.
- // Specifically, both HashPolicy::hash and HashPolicy::match must return
- // identical results for the new and old key when applied against all
- // possible matching values.
- void replaceKey(Ptr p, const T& new_value) {
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(*p != new_value);
- MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
- MOZ_ASSERT(HashPolicy::match(*p, new_value));
- const_cast<T&>(*p) = new_value;
- }
+ // Infallibly replace the current key at |p| with an equivalent key.
+ // Specifically, both HashPolicy::hash and HashPolicy::match must return
+ // identical results for the new and old key when applied against all
+ // possible matching values.
+ void replaceKey(Ptr p, const T& new_value)
+ {
+ MOZ_ASSERT(p.found());
+ MOZ_ASSERT(*p != new_value);
+ MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
+ MOZ_ASSERT(HashPolicy::match(*p, new_value));
+ const_cast<T&>(*p) = new_value;
+ }
- // HashSet is movable
- HashSet(HashSet&& rhs) : impl(std::move(rhs.impl)) {}
- void operator=(HashSet&& rhs) {
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- impl = std::move(rhs.impl);
- }
+ // HashSet is movable
+ HashSet(HashSet&& rhs)
+ : impl(std::move(rhs.impl))
+ {
+ }
+ void operator=(HashSet&& rhs)
+ {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ impl = std::move(rhs.impl);
+ }
- private:
- // HashSet is not copyable or assignable
- HashSet(const HashSet& hs) = delete;
- HashSet& operator=(const HashSet& hs) = delete;
+private:
+ // HashSet is not copyable or assignable
+ HashSet(const HashSet& hs) = delete;
+ HashSet& operator=(const HashSet& hs) = delete;
- friend class Impl::Enum;
+ friend class Impl::Enum;
};
/*****************************************************************************/
// Hash Policy
//
// A hash policy P for a hash table with key-type Key must provide:
// - a type |P::Lookup| to use to lookup table entries;
@@ -605,1383 +650,1423 @@ class HashSet
// if (!p) {
// assert(P::match(k, l)); // must hold
// h.add(p, k);
// }
// Pointer hashing policy that uses HashGeneric() to create good hashes for
// pointers. Note that we don't shift out the lowest k bits to generate a
// good distribution for arena allocated pointers.
-template <typename Key>
+template<typename Key>
struct PointerHasher
{
- typedef Key Lookup;
- static HashNumber hash(const Lookup& l) {
- size_t word = reinterpret_cast<size_t>(l);
- return HashGeneric(word);
- }
- static bool match(const Key& k, const Lookup& l) {
- return k == l;
- }
- static void rekey(Key& k, const Key& newKey) {
- k = newKey;
- }
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& l)
+ {
+ size_t word = reinterpret_cast<size_t>(l);
+ return HashGeneric(word);
+ }
+ static bool match(const Key& k, const Lookup& l) { return k == l; }
+ static void rekey(Key& k, const Key& newKey) { k = newKey; }
};
// Default hash policy: just use the 'lookup' value. This of course only
// works if the lookup value is integral. HashTable applies ScrambleHashCode to
// the result of the 'hash' which means that it is 'ok' if the lookup value is
// not well distributed over the HashNumber domain.
-template <class Key>
+template<class Key>
struct DefaultHasher
{
- typedef Key Lookup;
- static HashNumber hash(const Lookup& l) {
- // Hash if can implicitly cast to hash number type.
- return l;
- }
- static bool match(const Key& k, const Lookup& l) {
- // Use builtin or overloaded operator==.
- return k == l;
- }
- static void rekey(Key& k, const Key& newKey) {
- k = newKey;
- }
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& l)
+ {
+ // Hash if can implicitly cast to hash number type.
+ return l;
+ }
+ static bool match(const Key& k, const Lookup& l)
+ {
+ // Use builtin or overloaded operator==.
+ return k == l;
+ }
+ static void rekey(Key& k, const Key& newKey) { k = newKey; }
};
// Specialize hashing policy for pointer types. It assumes that the type is
// at least word-aligned. For types with smaller size use PointerHasher.
-template <class T>
+template<class T>
struct DefaultHasher<T*> : PointerHasher<T*>
-{};
+{
+};
// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
// raw pointer to PointerHasher.
-template <class T, class D>
+template<class T, class D>
struct DefaultHasher<UniquePtr<T, D>>
{
- using Lookup = UniquePtr<T, D>;
- using PtrHasher = PointerHasher<T*>;
+ using Lookup = UniquePtr<T, D>;
+ using PtrHasher = PointerHasher<T*>;
- static HashNumber hash(const Lookup& l) {
- return PtrHasher::hash(l.get());
- }
- static bool match(const UniquePtr<T, D>& k, const Lookup& l) {
- return PtrHasher::match(k.get(), l.get());
- }
- static void rekey(UniquePtr<T, D>& k, UniquePtr<T, D>&& newKey) {
- k = std::move(newKey);
- }
+ static HashNumber hash(const Lookup& l) { return PtrHasher::hash(l.get()); }
+ static bool match(const UniquePtr<T, D>& k, const Lookup& l)
+ {
+ return PtrHasher::match(k.get(), l.get());
+ }
+ static void rekey(UniquePtr<T, D>& k, UniquePtr<T, D>&& newKey)
+ {
+ k = std::move(newKey);
+ }
};
// For doubles, we can xor the two uint32s.
-template <>
+template<>
struct DefaultHasher<double>
{
- typedef double Lookup;
- static HashNumber hash(double d) {
- static_assert(sizeof(HashNumber) == 4,
- "subsequent code assumes a four-byte hash");
- uint64_t u = BitwiseCast<uint64_t>(d);
- return HashNumber(u ^ (u >> 32));
- }
- static bool match(double lhs, double rhs) {
- return BitwiseCast<uint64_t>(lhs) == BitwiseCast<uint64_t>(rhs);
- }
+ typedef double Lookup;
+ static HashNumber hash(double d)
+ {
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ uint64_t u = BitwiseCast<uint64_t>(d);
+ return HashNumber(u ^ (u >> 32));
+ }
+ static bool match(double lhs, double rhs)
+ {
+ return BitwiseCast<uint64_t>(lhs) == BitwiseCast<uint64_t>(rhs);
+ }
};
-template <>
+template<>
struct DefaultHasher<float>
{
- typedef float Lookup;
- static HashNumber hash(float f) {
- static_assert(sizeof(HashNumber) == 4,
- "subsequent code assumes a four-byte hash");
- return HashNumber(BitwiseCast<uint32_t>(f));
- }
- static bool match(float lhs, float rhs) {
- return BitwiseCast<uint32_t>(lhs) == BitwiseCast<uint32_t>(rhs);
- }
+ typedef float Lookup;
+ static HashNumber hash(float f)
+ {
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ return HashNumber(BitwiseCast<uint32_t>(f));
+ }
+ static bool match(float lhs, float rhs)
+ {
+ return BitwiseCast<uint32_t>(lhs) == BitwiseCast<uint32_t>(rhs);
+ }
};
// A hash policy that compares C strings.
struct CStringHasher
{
- typedef const char* Lookup;
- static HashNumber hash(Lookup l) {
- return HashString(l);
- }
- static bool match(const char* key, Lookup lookup) {
- return strcmp(key, lookup) == 0;
- }
+ typedef const char* Lookup;
+ static HashNumber hash(Lookup l) { return HashString(l); }
+ static bool match(const char* key, Lookup lookup)
+ {
+ return strcmp(key, lookup) == 0;
+ }
};
// Fallible hashing interface.
//
// Most of the time generating a hash code is infallible so this class provides
// default methods that always succeed. Specialize this class for your own hash
// policy to provide fallible hashing.
//
// This is used by MovableCellHasher to handle the fact that generating a unique
// ID for cell pointer may fail due to OOM.
-template <typename HashPolicy>
+template<typename HashPolicy>
struct FallibleHashMethods
{
- // Return true if a hashcode is already available for its argument. Once
- // this returns true for a specific argument it must continue to do so.
- template <typename Lookup> static bool hasHash(Lookup&& l) { return true; }
+ // Return true if a hashcode is already available for its argument. Once
+ // this returns true for a specific argument it must continue to do so.
+ template<typename Lookup>
+ static bool hasHash(Lookup&& l)
+ {
+ return true;
+ }
- // Fallible method to ensure a hashcode exists for its argument and create
- // one if not. Returns false on error, e.g. out of memory.
- template <typename Lookup> static bool ensureHash(Lookup&& l) { return true; }
+ // Fallible method to ensure a hashcode exists for its argument and create
+ // one if not. Returns false on error, e.g. out of memory.
+ template<typename Lookup>
+ static bool ensureHash(Lookup&& l)
+ {
+ return true;
+ }
};
-template <typename HashPolicy, typename Lookup>
+template<typename HashPolicy, typename Lookup>
static bool
-HasHash(Lookup&& l) {
- return FallibleHashMethods<typename HashPolicy::Base>::hasHash(std::forward<Lookup>(l));
+HasHash(Lookup&& l)
+{
+ return FallibleHashMethods<typename HashPolicy::Base>::hasHash(
+ std::forward<Lookup>(l));
}
-template <typename HashPolicy, typename Lookup>
+template<typename HashPolicy, typename Lookup>
static bool
-EnsureHash(Lookup&& l) {
- return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(std::forward<Lookup>(l));
+EnsureHash(Lookup&& l)
+{
+ return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
+ std::forward<Lookup>(l));
}
/*****************************************************************************/
// Both HashMap and HashSet are implemented by a single HashTable that is even
// more heavily parameterized than the other two. This leaves HashTable gnarly
// and extremely coupled to HashMap and HashSet; thus code should not use
// HashTable directly.
-template <class Key, class Value>
+template<class Key, class Value>
class HashMapEntry
{
- Key key_;
- Value value_;
-
- template <class, class, class> friend class detail::HashTable;
- template <class> friend class detail::HashTableEntry;
- template <class, class, class, class> friend class HashMap;
+ Key key_;
+ Value value_;
- public:
- template<typename KeyInput, typename ValueInput>
- HashMapEntry(KeyInput&& k, ValueInput&& v)
- : key_(std::forward<KeyInput>(k)),
- value_(std::forward<ValueInput>(v))
- {}
+ template<class, class, class>
+ friend class detail::HashTable;
+ template<class>
+ friend class detail::HashTableEntry;
+ template<class, class, class, class>
+ friend class HashMap;
+
+public:
+ template<typename KeyInput, typename ValueInput>
+ HashMapEntry(KeyInput&& k, ValueInput&& v)
+ : key_(std::forward<KeyInput>(k))
+ , value_(std::forward<ValueInput>(v))
+ {
+ }
- HashMapEntry(HashMapEntry&& rhs)
- : key_(std::move(rhs.key_)),
- value_(std::move(rhs.value_))
- {}
+ HashMapEntry(HashMapEntry&& rhs)
+ : key_(std::move(rhs.key_))
+ , value_(std::move(rhs.value_))
+ {
+ }
- void operator=(HashMapEntry&& rhs) {
- key_ = std::move(rhs.key_);
- value_ = std::move(rhs.value_);
- }
+ void operator=(HashMapEntry&& rhs)
+ {
+ key_ = std::move(rhs.key_);
+ value_ = std::move(rhs.value_);
+ }
- typedef Key KeyType;
- typedef Value ValueType;
+ typedef Key KeyType;
+ typedef Value ValueType;
- const Key& key() const { return key_; }
- Key& mutableKey() { return key_; }
- const Value& value() const { return value_; }
- Value& value() { return value_; }
+ const Key& key() const { return key_; }
+ Key& mutableKey() { return key_; }
+ const Value& value() const { return value_; }
+ Value& value() { return value_; }
- private:
- HashMapEntry(const HashMapEntry&) = delete;
- void operator=(const HashMapEntry&) = delete;
+private:
+ HashMapEntry(const HashMapEntry&) = delete;
+ void operator=(const HashMapEntry&) = delete;
};
-template <typename K, typename V>
-struct IsPod<HashMapEntry<K, V> >
+template<typename K, typename V>
+struct IsPod<HashMapEntry<K, V>>
: IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
-{};
+{
+};
namespace detail {
-template <class T, class HashPolicy, class AllocPolicy>
+template<class T, class HashPolicy, class AllocPolicy>
class HashTable;
-template <typename T>
+template<typename T>
class HashTableEntry
{
- private:
- using NonConstT = typename RemoveConst<T>::Type;
+private:
+ using NonConstT = typename RemoveConst<T>::Type;
+
+ static const HashNumber sFreeKey = 0;
+ static const HashNumber sRemovedKey = 1;
+ static const HashNumber sCollisionBit = 1;
+
+ HashNumber keyHash = sFreeKey;
+ alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)];
+
+private:
+ template<class, class, class>
+ friend class HashTable;
+
+ // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
+ // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even
+ // through |void*|. Placing the latter cast in these separate functions
+ // breaks the chain such that affected GCC versions no longer warn/error.
+ void* rawValuePtr() { return valueData_; }
+
+ static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
+
+ HashTableEntry(const HashTableEntry&) = delete;
+ void operator=(const HashTableEntry&) = delete;
+
+ NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
+
+ void destroyStoredT()
+ {
+ NonConstT* ptr = valuePtr();
+ ptr->~T();
+ MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
+ }
+
+public:
+ HashTableEntry() = default;
- static const HashNumber sFreeKey = 0;
- static const HashNumber sRemovedKey = 1;
- static const HashNumber sCollisionBit = 1;
+ ~HashTableEntry()
+ {
+ if (isLive())
+ destroyStoredT();
+
+ MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
+ }
+
+ void destroy()
+ {
+ MOZ_ASSERT(isLive());
+ destroyStoredT();
+ }
- HashNumber keyHash = sFreeKey;
- alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)];
+ void swap(HashTableEntry* other)
+ {
+ if (this == other)
+ return;
+ MOZ_ASSERT(isLive());
+ if (other->isLive()) {
+ Swap(*valuePtr(), *other->valuePtr());
+ } else {
+ *other->valuePtr() = std::move(*valuePtr());
+ destroy();
+ }
+ Swap(keyHash, other->keyHash);
+ }
- private:
- template <class, class, class> friend class HashTable;
+ T& get()
+ {
+ MOZ_ASSERT(isLive());
+ return *valuePtr();
+ }
+
+ NonConstT& getMutable()
+ {
+ MOZ_ASSERT(isLive());
+ return *valuePtr();
+ }
+
+ bool isFree() const { return keyHash == sFreeKey; }
- // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
- // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even
- // through |void*|. Placing the latter cast in these separate functions
- // breaks the chain such that affected GCC versions no longer warn/error.
- void* rawValuePtr() { return valueData_; }
+ void clearLive()
+ {
+ MOZ_ASSERT(isLive());
+ keyHash = sFreeKey;
+ destroyStoredT();
+ }
+
+ void clear()
+ {
+ if (isLive())
+ destroyStoredT();
+
+ MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
+ keyHash = sFreeKey;
+ }
+
+ bool isRemoved() const { return keyHash == sRemovedKey; }
+
+ void removeLive()
+ {
+ MOZ_ASSERT(isLive());
+ keyHash = sRemovedKey;
+ destroyStoredT();
+ }
+
+ bool isLive() const { return isLiveHash(keyHash); }
+
+ void setCollision()
+ {
+ MOZ_ASSERT(isLive());
+ keyHash |= sCollisionBit;
+ }
+
+ void unsetCollision() { keyHash &= ~sCollisionBit; }
+
+ bool hasCollision() const { return keyHash & sCollisionBit; }
+
+ bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
+
+ HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
- static bool isLiveHash(HashNumber hash)
+ template<typename... Args>
+ void setLive(HashNumber hn, Args&&... args)
+ {
+ MOZ_ASSERT(!isLive());
+ keyHash = hn;
+ new (valuePtr()) T(std::forward<Args>(args)...);
+ MOZ_ASSERT(isLive());
+ }
+};
+
+template<class T, class HashPolicy, class AllocPolicy>
+class HashTable : private AllocPolicy
+{
+ friend class mozilla::ReentrancyGuard;
+
+ typedef typename RemoveConst<T>::Type NonConstT;
+ typedef typename HashPolicy::KeyType Key;
+ typedef typename HashPolicy::Lookup Lookup;
+
+public:
+ using Entry = HashTableEntry<T>;
+
+ // A nullable pointer to a hash table element. A Ptr |p| can be tested
+ // either explicitly |if (p.found()) p->...| or using boolean conversion
+ // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+ // table operations unless |generation()| is tested.
+ class Ptr
+ {
+ friend class HashTable;
+
+ Entry* entry_;
+#ifdef DEBUG
+ const HashTable* table_;
+ Generation generation;
+#endif
+
+ protected:
+ Ptr(Entry& entry, const HashTable& tableArg)
+ : entry_(&entry)
+#ifdef DEBUG
+ , table_(&tableArg)
+ , generation(tableArg.generation())
+#endif
{
- return hash > sRemovedKey;
- }
-
- HashTableEntry(const HashTableEntry&) = delete;
- void operator=(const HashTableEntry&) = delete;
-
- NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
-
- void destroyStoredT() {
- NonConstT* ptr = valuePtr();
- ptr->~T();
- MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
}
public:
- HashTableEntry() = default;
-
- ~HashTableEntry() {
- if (isLive())
- destroyStoredT();
-
- MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
- }
-
- void destroy() {
- MOZ_ASSERT(isLive());
- destroyStoredT();
- }
-
- void swap(HashTableEntry* other) {
- if (this == other)
- return;
- MOZ_ASSERT(isLive());
- if (other->isLive()) {
- Swap(*valuePtr(), *other->valuePtr());
- } else {
- *other->valuePtr() = std::move(*valuePtr());
- destroy();
- }
- Swap(keyHash, other->keyHash);
- }
-
- T& get() {
- MOZ_ASSERT(isLive());
- return *valuePtr();
- }
-
- NonConstT& getMutable() {
- MOZ_ASSERT(isLive());
- return *valuePtr();
- }
-
- bool isFree() const {
- return keyHash == sFreeKey;
+ Ptr()
+ : entry_(nullptr)
+#ifdef DEBUG
+ , table_(nullptr)
+ , generation(0)
+#endif
+ {
}
- void clearLive() {
- MOZ_ASSERT(isLive());
- keyHash = sFreeKey;
- destroyStoredT();
- }
-
- void clear() {
- if (isLive())
- destroyStoredT();
+ bool isValid() const { return !!entry_; }
- MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
- keyHash = sFreeKey;
- }
-
- bool isRemoved() const {
- return keyHash == sRemovedKey;
- }
-
- void removeLive() {
- MOZ_ASSERT(isLive());
- keyHash = sRemovedKey;
- destroyStoredT();
+ bool found() const
+ {
+ if (!isValid())
+ return false;
+#ifdef DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return entry_->isLive();
}
- bool isLive() const {
- return isLiveHash(keyHash);
- }
-
- void setCollision() {
- MOZ_ASSERT(isLive());
- keyHash |= sCollisionBit;
- }
+ explicit operator bool() const { return found(); }
- void unsetCollision() {
- keyHash &= ~sCollisionBit;
- }
-
- bool hasCollision() const {
- return keyHash & sCollisionBit;
- }
-
- bool matchHash(HashNumber hn) {
- return (keyHash & ~sCollisionBit) == hn;
- }
-
- HashNumber getKeyHash() const {
- return keyHash & ~sCollisionBit;
+ bool operator==(const Ptr& rhs) const
+ {
+ MOZ_ASSERT(found() && rhs.found());
+ return entry_ == rhs.entry_;
}
- template <typename... Args>
- void setLive(HashNumber hn, Args&&... args)
+ bool operator!=(const Ptr& rhs) const
{
- MOZ_ASSERT(!isLive());
- keyHash = hn;
- new (valuePtr()) T(std::forward<Args>(args)...);
- MOZ_ASSERT(isLive());
+#ifdef DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return !(*this == rhs);
}
-};
-
-template <class T, class HashPolicy, class AllocPolicy>
-class HashTable : private AllocPolicy
-{
- friend class mozilla::ReentrancyGuard;
-
- typedef typename RemoveConst<T>::Type NonConstT;
- typedef typename HashPolicy::KeyType Key;
- typedef typename HashPolicy::Lookup Lookup;
-
- public:
- using Entry = HashTableEntry<T>;
-
- // A nullable pointer to a hash table element. A Ptr |p| can be tested
- // either explicitly |if (p.found()) p->...| or using boolean conversion
- // |if (p) p->...|. Ptr objects must not be used after any mutating hash
- // table operations unless |generation()| is tested.
- class Ptr
- {
- friend class HashTable;
-
- Entry* entry_;
-#ifdef DEBUG
- const HashTable* table_;
- Generation generation;
-#endif
- protected:
- Ptr(Entry& entry, const HashTable& tableArg)
- : entry_(&entry)
+ T& operator*() const
+ {
#ifdef DEBUG
- , table_(&tableArg)
- , generation(tableArg.generation())
-#endif
- {}
-
- public:
- Ptr()
- : entry_(nullptr)
-#ifdef DEBUG
- , table_(nullptr)
- , generation(0)
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(generation == table_->generation());
#endif
- {}
-
- bool isValid() const {
- return !!entry_;
- }
-
- bool found() const {
- if (!isValid())
- return false;
-#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
-#endif
- return entry_->isLive();
- }
-
- explicit operator bool() const {
- return found();
- }
-
- bool operator==(const Ptr& rhs) const {
- MOZ_ASSERT(found() && rhs.found());
- return entry_ == rhs.entry_;
- }
+ return entry_->get();
+ }
- bool operator!=(const Ptr& rhs) const {
-#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
-#endif
- return !(*this == rhs);
- }
-
- T& operator*() const {
-#ifdef DEBUG
- MOZ_ASSERT(found());
- MOZ_ASSERT(generation == table_->generation());
-#endif
- return entry_->get();
- }
-
- T* operator->() const {
-#ifdef DEBUG
- MOZ_ASSERT(found());
- MOZ_ASSERT(generation == table_->generation());
-#endif
- return &entry_->get();
- }
- };
-
- // A Ptr that can be used to add a key after a failed lookup.
- class AddPtr : public Ptr
+ T* operator->() const
{
- friend class HashTable;
- HashNumber keyHash;
-#ifdef DEBUG
- uint64_t mutationCount;
-#endif
-
- AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
- : Ptr(entry, tableArg)
- , keyHash(hn)
#ifdef DEBUG
- , mutationCount(tableArg.mutationCount)
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(generation == table_->generation());
#endif
- {}
-
- public:
- AddPtr() : keyHash(0) {}
- };
-
- // A collection of hash table entries. The collection is enumerated by
- // calling |front()| followed by |popFront()| as long as |!empty()|. As
- // with Ptr/AddPtr, Range objects must not be used after any mutating hash
- // table operation unless the |generation()| is tested.
- class Range
- {
- protected:
- friend class HashTable;
+ return &entry_->get();
+ }
+ };
- Range(const HashTable& tableArg, Entry* c, Entry* e)
- : cur(c)
- , end(e)
+ // A Ptr that can be used to add a key after a failed lookup.
+ class AddPtr : public Ptr
+ {
+ friend class HashTable;
+ HashNumber keyHash;
#ifdef DEBUG
- , table_(&tableArg)
- , mutationCount(tableArg.mutationCount)
- , generation(tableArg.generation())
- , validEntry(true)
-#endif
- {
- while (cur < end && !cur->isLive())
- ++cur;
- }
-
- Entry* cur;
- Entry* end;
-#ifdef DEBUG
- const HashTable* table_;
- uint64_t mutationCount;
- Generation generation;
- bool validEntry;
+ uint64_t mutationCount;
#endif
- public:
- Range()
- : cur(nullptr)
- , end(nullptr)
-#ifdef DEBUG
- , table_(nullptr)
- , mutationCount(0)
- , generation(0)
- , validEntry(false)
-#endif
- {}
-
- bool empty() const {
-#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
-#endif
- return cur == end;
- }
-
- T& front() const {
- MOZ_ASSERT(!empty());
-#ifdef DEBUG
- MOZ_ASSERT(validEntry);
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
-#endif
- return cur->get();
- }
-
- void popFront() {
- MOZ_ASSERT(!empty());
-#ifdef DEBUG
- MOZ_ASSERT(generation == table_->generation());
- MOZ_ASSERT(mutationCount == table_->mutationCount);
-#endif
- while (++cur < end && !cur->isLive())
- continue;
+ AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
+ : Ptr(entry, tableArg)
+ , keyHash(hn)
#ifdef DEBUG
- validEntry = true;
-#endif
- }
- };
-
- // A Range whose lifetime delimits a mutating enumeration of a hash table.
- // Since rehashing when elements were removed during enumeration would be
- // bad, it is postponed until the Enum is destructed. Since the Enum's
- // destructor touches the hash table, the user must ensure that the hash
- // table is still alive when the destructor runs.
- class Enum : public Range
- {
- friend class HashTable;
-
- HashTable& table_;
- bool rekeyed;
- bool removed;
-
- // Enum is movable but not copyable.
- Enum(const Enum&) = delete;
- void operator=(const Enum&) = delete;
-
- public:
- template<class Map>
- explicit Enum(Map& map)
- : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
-
- MOZ_IMPLICIT Enum(Enum&& other)
- : Range(other), table_(other.table_), rekeyed(other.rekeyed), removed(other.removed)
- {
- other.rekeyed = false;
- other.removed = false;
- }
-
- // Removes the |front()| element from the table, leaving |front()|
- // invalid until the next call to |popFront()|. For example:
- //
- // HashSet<int> s;
- // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
- // if (e.front() == 42)
- // e.removeFront();
- void removeFront() {
- table_.remove(*this->cur);
- removed = true;
-#ifdef DEBUG
- this->validEntry = false;
- this->mutationCount = table_.mutationCount;
-#endif
- }
-
- NonConstT& mutableFront() {
- MOZ_ASSERT(!this->empty());
-#ifdef DEBUG
- MOZ_ASSERT(this->validEntry);
- MOZ_ASSERT(this->generation == this->Range::table_->generation());
- MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
-#endif
- return this->cur->getMutable();
- }
-
- // Removes the |front()| element and re-inserts it into the table with
- // a new key at the new Lookup position. |front()| is invalid after
- // this operation until the next call to |popFront()|.
- void rekeyFront(const Lookup& l, const Key& k) {
- MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
- Ptr p(*this->cur, table_);
- table_.rekeyWithoutRehash(p, l, k);
- rekeyed = true;
-#ifdef DEBUG
- this->validEntry = false;
- this->mutationCount = table_.mutationCount;
+ , mutationCount(tableArg.mutationCount)
#endif
- }
-
- void rekeyFront(const Key& k) {
- rekeyFront(k, k);
- }
-
- // Potentially rehashes the table.
- ~Enum() {
- if (rekeyed) {
- table_.gen++;
- table_.checkOverRemoved();
- }
-
- if (removed)
- table_.compactIfUnderloaded();
- }
- };
-
- // HashTable is movable
- HashTable(HashTable&& rhs)
- : AllocPolicy(rhs)
{
- PodAssign(this, &rhs);
- rhs.table = nullptr;
- }
- void operator=(HashTable&& rhs) {
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
- if (table)
- destroyTable(*this, table, capacity());
- PodAssign(this, &rhs);
- rhs.table = nullptr;
- }
-
- private:
- // HashTable is not copyable or assignable
- HashTable(const HashTable&) = delete;
- void operator=(const HashTable&) = delete;
-
- private:
- static const size_t CAP_BITS = 30;
-
- public:
- uint64_t gen:56; // entry storage generation number
- uint64_t hashShift:8; // multiplicative hash shift
- Entry* table; // entry storage
- uint32_t entryCount; // number of entries in table
- uint32_t removedCount; // removed entry sentinels in table
-
-#ifdef DEBUG
- uint64_t mutationCount;
- mutable bool mEntered;
- // Note that some updates to these stats are not thread-safe. See the
- // comment on the three-argument overloading of HashTable::lookup().
- mutable struct Stats
- {
- uint32_t searches; // total number of table searches
- uint32_t steps; // hash chain links traversed
- uint32_t hits; // searches that found key
- uint32_t misses; // searches that didn't find key
- uint32_t addOverRemoved; // adds that recycled a removed entry
- uint32_t removes; // calls to remove
- uint32_t removeFrees; // calls to remove that freed the entry
- uint32_t grows; // table expansions
- uint32_t shrinks; // table contractions
- uint32_t compresses; // table compressions
- uint32_t rehashes; // tombstone decontaminations
- } stats;
-# define METER(x) x
-#else
-# define METER(x)
-#endif
-
- // The default initial capacity is 32 (enough to hold 16 elements), but it
- // can be as low as 4.
- static const uint32_t sMinCapacity = 4;
- static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
- static const uint32_t sMaxCapacity = 1u << CAP_BITS;
-
- // Hash-table alpha is conceptually a fraction, but to avoid floating-point
- // math we implement it as a ratio of integers.
- static const uint8_t sAlphaDenominator = 4;
- static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
- static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
-
- static const HashNumber sFreeKey = Entry::sFreeKey;
- static const HashNumber sRemovedKey = Entry::sRemovedKey;
- static const HashNumber sCollisionBit = Entry::sCollisionBit;
-
- void setTableSizeLog2(uint32_t sizeLog2)
- {
- hashShift = kHashNumberBits - sizeLog2;
- }
-
- static bool isLiveHash(HashNumber hash)
- {
- return Entry::isLiveHash(hash);
- }
-
- static HashNumber prepareHash(const Lookup& l)
- {
- HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
-
- // Avoid reserved hash codes.
- if (!isLiveHash(keyHash))
- keyHash -= (sRemovedKey + 1);
- return keyHash & ~sCollisionBit;
- }
-
- enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
-
- static Entry* createTable(AllocPolicy& alloc, uint32_t capacity,
- FailureBehavior reportFailure = ReportFailure)
- {
- Entry* table = reportFailure
- ? alloc.template pod_malloc<Entry>(capacity)
- : alloc.template maybe_pod_malloc<Entry>(capacity);
- if (table) {
- for (uint32_t i = 0; i < capacity; i++)
- new (&table[i]) Entry();
- }
- return table;
- }
-
- static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
- {
- Entry* table = alloc.template maybe_pod_malloc<Entry>(capacity);
- if (table) {
- for (uint32_t i = 0; i < capacity; i++)
- new (&table[i]) Entry();
- }
- return table;
- }
-
- static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity)
- {
- Entry* end = oldTable + capacity;
- for (Entry* e = oldTable; e < end; ++e)
- e->~Entry();
- alloc.free_(oldTable, capacity);
}
public:
- explicit HashTable(AllocPolicy ap)
- : AllocPolicy(ap)
- , gen(0)
- , hashShift(kHashNumberBits)
- , table(nullptr)
- , entryCount(0)
- , removedCount(0)
-#ifdef DEBUG
- , mutationCount(0)
- , mEntered(false)
-#endif
- {}
-
- MOZ_MUST_USE bool init(uint32_t length)
+ AddPtr()
+ : keyHash(0)
{
- MOZ_ASSERT(!initialized());
-
- // Reject all lengths whose initial computed capacity would exceed
- // sMaxCapacity. Round that maximum length down to the nearest power
- // of two for speedier code.
- if (MOZ_UNLIKELY(length > sMaxInit)) {
- this->reportAllocOverflow();
- return false;
- }
-
- static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
- "multiplication in numerator below could overflow");
- static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
- "numerator calculation below could potentially overflow");
-
- // Compute the smallest capacity allowing |length| elements to be
- // inserted without rehashing: ceil(length / max-alpha). (Ceiling
- // integral division: <http://stackoverflow.com/a/2745086>.)
- uint32_t newCapacity =
- (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
- if (newCapacity < sMinCapacity)
- newCapacity = sMinCapacity;
-
- // Round up capacity to next power-of-two.
- uint32_t log2 = mozilla::CeilingLog2(newCapacity);
- newCapacity = 1u << log2;
-
- MOZ_ASSERT(newCapacity >= length);
- MOZ_ASSERT(newCapacity <= sMaxCapacity);
-
- table = createTable(*this, newCapacity);
- if (!table)
- return false;
-
- setTableSizeLog2(log2);
- METER(memset(&stats, 0, sizeof(stats)));
- return true;
}
-
- bool initialized() const
- {
- return !!table;
- }
-
- ~HashTable()
- {
- if (table)
- destroyTable(*this, table, capacity());
- }
-
- private:
- HashNumber hash1(HashNumber hash0) const
- {
- return hash0 >> hashShift;
- }
-
- struct DoubleHash
- {
- HashNumber h2;
- HashNumber sizeMask;
- };
-
- DoubleHash hash2(HashNumber curKeyHash) const
- {
- uint32_t sizeLog2 = kHashNumberBits - hashShift;
- DoubleHash dh = {
- ((curKeyHash << sizeLog2) >> hashShift) | 1,
- (HashNumber(1) << sizeLog2) - 1
- };
- return dh;
- }
-
- static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
- {
- return (h1 - dh.h2) & dh.sizeMask;
- }
-
- bool overloaded()
- {
- static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
- "multiplication below could overflow");
- return entryCount + removedCount >=
- capacity() * sMaxAlphaNumerator / sAlphaDenominator;
- }
-
- // Would the table be underloaded if it had the given capacity and entryCount?
- static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
- {
- static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
- "multiplication below could overflow");
- return capacity > sMinCapacity &&
- entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
- }
+ };
- bool underloaded()
- {
- return wouldBeUnderloaded(capacity(), entryCount);
- }
-
- static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
- {
- return HashPolicy::match(HashPolicy::getKey(e.get()), l);
- }
-
- // Warning: in order for readonlyThreadsafeLookup() to be safe this
- // function must not modify the table in any way when |collisionBit| is 0.
- // (The use of the METER() macro to increment stats violates this
- // restriction but we will live with that for now because it's enabled so
- // rarely.)
- MOZ_ALWAYS_INLINE Entry&
- lookup(const Lookup& l, HashNumber keyHash, uint32_t collisionBit) const
- {
- MOZ_ASSERT(isLiveHash(keyHash));
- MOZ_ASSERT(!(keyHash & sCollisionBit));
- MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
- MOZ_ASSERT(table);
- METER(stats.searches++);
-
- // Compute the primary hash address.
- HashNumber h1 = hash1(keyHash);
- Entry* entry = &table[h1];
-
- // Miss: return space for a new entry.
- if (entry->isFree()) {
- METER(stats.misses++);
- return *entry;
- }
-
- // Hit: return entry.
- if (entry->matchHash(keyHash) && match(*entry, l)) {
- METER(stats.hits++);
- return *entry;
- }
-
- // Collision: double hash.
- DoubleHash dh = hash2(keyHash);
-
- // Save the first removed entry pointer so we can recycle later.
- Entry* firstRemoved = nullptr;
-
- while (true) {
- if (MOZ_UNLIKELY(entry->isRemoved())) {
- if (!firstRemoved)
- firstRemoved = entry;
- } else {
- if (collisionBit == sCollisionBit)
- entry->setCollision();
- }
+ // A collection of hash table entries. The collection is enumerated by
+ // calling |front()| followed by |popFront()| as long as |!empty()|. As
+ // with Ptr/AddPtr, Range objects must not be used after any mutating hash
+ // table operation unless the |generation()| is tested.
+ class Range
+ {
+ protected:
+ friend class HashTable;
- METER(stats.steps++);
- h1 = applyDoubleHash(h1, dh);
-
- entry = &table[h1];
- if (entry->isFree()) {
- METER(stats.misses++);
- return firstRemoved ? *firstRemoved : *entry;
- }
-
- if (entry->matchHash(keyHash) && match(*entry, l)) {
- METER(stats.hits++);
- return *entry;
- }
- }
- }
-
- // This is a copy of lookup hardcoded to the assumptions:
- // 1. the lookup is a lookupForAdd
- // 2. the key, whose |keyHash| has been passed is not in the table,
- // 3. no entries have been removed from the table.
- // This specialized search avoids the need for recovering lookup values
- // from entries, which allows more flexible Lookup/Key types.
- Entry& findFreeEntry(HashNumber keyHash)
+ Range(const HashTable& tableArg, Entry* c, Entry* e)
+ : cur(c)
+ , end(e)
+#ifdef DEBUG
+ , table_(&tableArg)
+ , mutationCount(tableArg.mutationCount)
+ , generation(tableArg.generation())
+ , validEntry(true)
+#endif
{
- MOZ_ASSERT(!(keyHash & sCollisionBit));
- MOZ_ASSERT(table);
- METER(stats.searches++);
-
- // We assume 'keyHash' has already been distributed.
-
- // Compute the primary hash address.
- HashNumber h1 = hash1(keyHash);
- Entry* entry = &table[h1];
-
- // Miss: return space for a new entry.
- if (!entry->isLive()) {
- METER(stats.misses++);
- return *entry;
- }
-
- // Collision: double hash.
- DoubleHash dh = hash2(keyHash);
-
- while (true) {
- MOZ_ASSERT(!entry->isRemoved());
- entry->setCollision();
-
- METER(stats.steps++);
- h1 = applyDoubleHash(h1, dh);
-
- entry = &table[h1];
- if (!entry->isLive()) {
- METER(stats.misses++);
- return *entry;
- }
- }
+ while (cur < end && !cur->isLive())
+ ++cur;
}
- enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
-
- RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
- {
- // Look, but don't touch, until we succeed in getting new entry store.
- Entry* oldTable = table;
- uint32_t oldCap = capacity();
- uint32_t newLog2 = kHashNumberBits - hashShift + deltaLog2;
- uint32_t newCapacity = 1u << newLog2;
- if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
- if (reportFailure)
- this->reportAllocOverflow();
- return RehashFailed;
- }
-
- Entry* newTable = createTable(*this, newCapacity, reportFailure);
- if (!newTable)
- return RehashFailed;
-
- // We can't fail from here on, so update table parameters.
- setTableSizeLog2(newLog2);
- removedCount = 0;
- gen++;
- table = newTable;
-
- // Copy only live entries, leaving removed ones behind.
- Entry* end = oldTable + oldCap;
- for (Entry* src = oldTable; src < end; ++src) {
- if (src->isLive()) {
- HashNumber hn = src->getKeyHash();
- findFreeEntry(hn).setLive(
- hn, std::move(const_cast<typename Entry::NonConstT&>(src->get())));
- }
-
- src->~Entry();
- }
-
- // All entries have been destroyed, no need to destroyTable.
- this->free_(oldTable, oldCap);
- return Rehashed;
- }
-
- bool shouldCompressTable()
- {
- // Compress if a quarter or more of all entries are removed.
- return removedCount >= (capacity() >> 2);
- }
-
- RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
- {
- if (!overloaded())
- return NotOverloaded;
-
- int deltaLog2;
- if (shouldCompressTable()) {
- METER(stats.compresses++);
- deltaLog2 = 0;
- } else {
- METER(stats.grows++);
- deltaLog2 = 1;
- }
-
- return changeTableSize(deltaLog2, reportFailure);
- }
-
- // Infallibly rehash the table if we are overloaded with removals.
- void checkOverRemoved()
- {
- if (overloaded()) {
- if (checkOverloaded(DontReportFailure) == RehashFailed)
- rehashTableInPlace();
- }
- }
-
- void remove(Entry& e)
- {
- MOZ_ASSERT(table);
- METER(stats.removes++);
-
- if (e.hasCollision()) {
- e.removeLive();
- removedCount++;
- } else {
- METER(stats.removeFrees++);
- e.clearLive();
- }
- entryCount--;
+ Entry* cur;
+ Entry* end;
#ifdef DEBUG
- mutationCount++;
+ const HashTable* table_;
+ uint64_t mutationCount;
+ Generation generation;
+ bool validEntry;
#endif
- }
-
- void checkUnderloaded()
- {
- if (underloaded()) {
- METER(stats.shrinks++);
- (void) changeTableSize(-1, DontReportFailure);
- }
- }
-
- // Resize the table down to the largest capacity which doesn't underload the
- // table. Since we call checkUnderloaded() on every remove, you only need
- // to call this after a bulk removal of items done without calling remove().
- void compactIfUnderloaded()
- {
- int32_t resizeLog2 = 0;
- uint32_t newCapacity = capacity();
- while (wouldBeUnderloaded(newCapacity, entryCount)) {
- newCapacity = newCapacity >> 1;
- resizeLog2--;
- }
-
- if (resizeLog2 != 0)
- (void) changeTableSize(resizeLog2, DontReportFailure);
- }
-
- // This is identical to changeTableSize(currentSize), but without requiring
- // a second table. We do this by recycling the collision bits to tell us if
- // the element is already inserted or still waiting to be inserted. Since
- // already-inserted elements win any conflicts, we get the same table as we
- // would have gotten through random insertion order.
- void rehashTableInPlace()
- {
- METER(stats.rehashes++);
- removedCount = 0;
- gen++;
- for (size_t i = 0; i < capacity(); ++i)
- table[i].unsetCollision();
-
- for (size_t i = 0; i < capacity();) {
- Entry* src = &table[i];
-
- if (!src->isLive() || src->hasCollision()) {
- ++i;
- continue;
- }
-
- HashNumber keyHash = src->getKeyHash();
- HashNumber h1 = hash1(keyHash);
- DoubleHash dh = hash2(keyHash);
- Entry* tgt = &table[h1];
- while (true) {
- if (!tgt->hasCollision()) {
- src->swap(tgt);
- tgt->setCollision();
- break;
- }
-
- h1 = applyDoubleHash(h1, dh);
- tgt = &table[h1];
- }
- }
-
- // TODO: this algorithm leaves collision bits on *all* elements, even if
- // they are on no collision path. We have the option of setting the
- // collision bits correctly on a subsequent pass or skipping the rehash
- // unless we are totally filled with tombstones: benchmark to find out
- // which approach is best.
- }
-
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
- //
- // Prefer to use putNewInfallible; this function does not check
- // invariants.
- template <typename... Args>
- void putNewInfallibleInternal(const Lookup& l, Args&&... args)
- {
- MOZ_ASSERT(table);
-
- HashNumber keyHash = prepareHash(l);
- Entry* entry = &findFreeEntry(keyHash);
- MOZ_ASSERT(entry);
-
- if (entry->isRemoved()) {
- METER(stats.addOverRemoved++);
- removedCount--;
- keyHash |= sCollisionBit;
- }
-
- entry->setLive(keyHash, std::forward<Args>(args)...);
- entryCount++;
-#ifdef DEBUG
- mutationCount++;
-#endif
- }
public:
- void clear()
- {
- Entry* end = table + capacity();
- for (Entry* e = table; e < end; ++e)
- e->clear();
-
- removedCount = 0;
- entryCount = 0;
+ Range()
+ : cur(nullptr)
+ , end(nullptr)
#ifdef DEBUG
- mutationCount++;
+ , table_(nullptr)
+ , mutationCount(0)
+ , generation(0)
+ , validEntry(false)
#endif
- }
-
- void clearAndShrink()
- {
- clear();
- compactIfUnderloaded();
- }
-
- void finish()
{
-#ifdef DEBUG
- MOZ_ASSERT(!mEntered);
-#endif
- if (!table)
- return;
-
- destroyTable(*this, table, capacity());
- table = nullptr;
- gen++;
- entryCount = 0;
- removedCount = 0;
-#ifdef DEBUG
- mutationCount++;
-#endif
- }
-
- Range all() const
- {
- MOZ_ASSERT(table);
- return Range(*this, table, table + capacity());
}
bool empty() const
{
- MOZ_ASSERT(table);
- return !entryCount;
+#ifdef DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ return cur == end;
}
- uint32_t count() const
+ T& front() const
{
- MOZ_ASSERT(table);
- return entryCount;
+ MOZ_ASSERT(!empty());
+#ifdef DEBUG
+ MOZ_ASSERT(validEntry);
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ return cur->get();
}
- uint32_t capacity() const
+ void popFront()
{
- MOZ_ASSERT(table);
- return 1u << (kHashNumberBits - hashShift);
+ MOZ_ASSERT(!empty());
+#ifdef DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ while (++cur < end && !cur->isLive())
+ continue;
+#ifdef DEBUG
+ validEntry = true;
+#endif
+ }
+ };
+
+ // A Range whose lifetime delimits a mutating enumeration of a hash table.
+ // Since rehashing when elements were removed during enumeration would be
+ // bad, it is postponed until the Enum is destructed. Since the Enum's
+ // destructor touches the hash table, the user must ensure that the hash
+ // table is still alive when the destructor runs.
+ class Enum : public Range
+ {
+ friend class HashTable;
+
+ HashTable& table_;
+ bool rekeyed;
+ bool removed;
+
+ // Enum is movable but not copyable.
+ Enum(const Enum&) = delete;
+ void operator=(const Enum&) = delete;
+
+ public:
+ template<class Map>
+ explicit Enum(Map& map)
+ : Range(map.all())
+ , table_(map.impl)
+ , rekeyed(false)
+ , removed(false)
+ {
}
- Generation generation() const
+ MOZ_IMPLICIT Enum(Enum&& other)
+ : Range(other)
+ , table_(other.table_)
+ , rekeyed(other.rekeyed)
+ , removed(other.removed)
{
- MOZ_ASSERT(table);
- return Generation(gen);
+ other.rekeyed = false;
+ other.removed = false;
}
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ // Removes the |front()| element from the table, leaving |front()|
+ // invalid until the next call to |popFront()|. For example:
+ //
+ // HashSet<int> s;
+ // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ void removeFront()
{
- return mallocSizeOf(table);
- }
-
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
- {
- return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ table_.remove(*this->cur);
+ removed = true;
+#ifdef DEBUG
+ this->validEntry = false;
+ this->mutationCount = table_.mutationCount;
+#endif
}
- MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
+ NonConstT& mutableFront()
{
- ReentrancyGuard g(*this);
- if (!HasHash<HashPolicy>(l))
- return Ptr();
- HashNumber keyHash = prepareHash(l);
- return Ptr(lookup(l, keyHash, 0), *this);
+ MOZ_ASSERT(!this->empty());
+#ifdef DEBUG
+ MOZ_ASSERT(this->validEntry);
+ MOZ_ASSERT(this->generation == this->Range::table_->generation());
+ MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
+#endif
+ return this->cur->getMutable();
}
- MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ // Removes the |front()| element and re-inserts it into the table with
+ // a new key at the new Lookup position. |front()| is invalid after
+ // this operation until the next call to |popFront()|.
+ void rekeyFront(const Lookup& l, const Key& k)
{
- if (!HasHash<HashPolicy>(l))
- return Ptr();
- HashNumber keyHash = prepareHash(l);
- return Ptr(lookup(l, keyHash, 0), *this);
+ MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
+ Ptr p(*this->cur, table_);
+ table_.rekeyWithoutRehash(p, l, k);
+ rekeyed = true;
+#ifdef DEBUG
+ this->validEntry = false;
+ this->mutationCount = table_.mutationCount;
+#endif
}
- MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ void rekeyFront(const Key& k) { rekeyFront(k, k); }
+
+ // Potentially rehashes the table.
+ ~Enum()
{
- ReentrancyGuard g(*this);
- if (!EnsureHash<HashPolicy>(l))
- return AddPtr();
- HashNumber keyHash = prepareHash(l);
- // Directly call the constructor in the return statement to avoid
- // excess copying when building with Visual Studio 2017.
- // See bug 1385181.
- return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash);
+ if (rekeyed) {
+ table_.gen++;
+ table_.checkOverRemoved();
+ }
+
+ if (removed)
+ table_.compactIfUnderloaded();
}
+ };
+
+ // HashTable is movable
+ HashTable(HashTable&& rhs)
+ : AllocPolicy(rhs)
+ {
+ PodAssign(this, &rhs);
+ rhs.table = nullptr;
+ }
+ void operator=(HashTable&& rhs)
+ {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ if (table)
+ destroyTable(*this, table, capacity());
+ PodAssign(this, &rhs);
+ rhs.table = nullptr;
+ }
- template <typename... Args>
- MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
- {
- ReentrancyGuard g(*this);
- MOZ_ASSERT(table);
- MOZ_ASSERT_IF(p.isValid(), p.table_ == this);
- MOZ_ASSERT(!p.found());
- MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+private:
+ // HashTable is not copyable or assignable
+ HashTable(const HashTable&) = delete;
+ void operator=(const HashTable&) = delete;
+
+private:
+ static const size_t CAP_BITS = 30;
+
+public:
+ uint64_t gen : 56; // entry storage generation number
+ uint64_t hashShift : 8; // multiplicative hash shift
+ Entry* table; // entry storage
+ uint32_t entryCount; // number of entries in table
+ uint32_t removedCount; // removed entry sentinels in table
- // Check for error from ensureHash() here.
- if (!p.isValid())
- return false;
-
- MOZ_ASSERT(p.generation == generation());
#ifdef DEBUG
- MOZ_ASSERT(p.mutationCount == mutationCount);
+ uint64_t mutationCount;
+ mutable bool mEntered;
+ // Note that some updates to these stats are not thread-safe. See the
+ // comment on the three-argument overloading of HashTable::lookup().
+ mutable struct Stats
+ {
+ uint32_t searches; // total number of table searches
+ uint32_t steps; // hash chain links traversed
+ uint32_t hits; // searches that found key
+ uint32_t misses; // searches that didn't find key
+ uint32_t addOverRemoved; // adds that recycled a removed entry
+ uint32_t removes; // calls to remove
+ uint32_t removeFrees; // calls to remove that freed the entry
+ uint32_t grows; // table expansions
+ uint32_t shrinks; // table contractions
+ uint32_t compresses; // table compressions
+ uint32_t rehashes; // tombstone decontaminations
+ } stats;
+#define METER(x) x
+#else
+#define METER(x)
#endif
- // Changing an entry from removed to live does not affect whether we
- // are overloaded and can be handled separately.
- if (p.entry_->isRemoved()) {
- if (!this->checkSimulatedOOM())
- return false;
- METER(stats.addOverRemoved++);
- removedCount--;
- p.keyHash |= sCollisionBit;
- } else {
- // Preserve the validity of |p.entry_|.
- RebuildStatus status = checkOverloaded();
- if (status == RehashFailed)
- return false;
- if (status == NotOverloaded && !this->checkSimulatedOOM())
- return false;
- if (status == Rehashed)
- p.entry_ = &findFreeEntry(p.keyHash);
- }
+ // The default initial capacity is 32 (enough to hold 16 elements), but it
+ // can be as low as 4.
+ static const uint32_t sMinCapacity = 4;
+ static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
+ static const uint32_t sMaxCapacity = 1u << CAP_BITS;
+
+ // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+ // math we implement it as a ratio of integers.
+ static const uint8_t sAlphaDenominator = 4;
+ static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+ static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+
+ static const HashNumber sFreeKey = Entry::sFreeKey;
+ static const HashNumber sRemovedKey = Entry::sRemovedKey;
+ static const HashNumber sCollisionBit = Entry::sCollisionBit;
+
+ void setTableSizeLog2(uint32_t sizeLog2)
+ {
+ hashShift = kHashNumberBits - sizeLog2;
+ }
+
+ static bool isLiveHash(HashNumber hash) { return Entry::isLiveHash(hash); }
+
+ static HashNumber prepareHash(const Lookup& l)
+ {
+ HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
+
+ // Avoid reserved hash codes.
+ if (!isLiveHash(keyHash))
+ keyHash -= (sRemovedKey + 1);
+ return keyHash & ~sCollisionBit;
+ }
+
+ enum FailureBehavior
+ {
+ DontReportFailure = false,
+ ReportFailure = true
+ };
- p.entry_->setLive(p.keyHash, std::forward<Args>(args)...);
- entryCount++;
+ static Entry* createTable(AllocPolicy& alloc,
+ uint32_t capacity,
+ FailureBehavior reportFailure = ReportFailure)
+ {
+ Entry* table = reportFailure
+ ? alloc.template pod_malloc<Entry>(capacity)
+ : alloc.template maybe_pod_malloc<Entry>(capacity);
+ if (table) {
+ for (uint32_t i = 0; i < capacity; i++)
+ new (&table[i]) Entry();
+ }
+ return table;
+ }
+
+ static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
+ {
+ Entry* table = alloc.template maybe_pod_malloc<Entry>(capacity);
+ if (table) {
+ for (uint32_t i = 0; i < capacity; i++)
+ new (&table[i]) Entry();
+ }
+ return table;
+ }
+
+ static void destroyTable(AllocPolicy& alloc,
+ Entry* oldTable,
+ uint32_t capacity)
+ {
+ Entry* end = oldTable + capacity;
+ for (Entry* e = oldTable; e < end; ++e)
+ e->~Entry();
+ alloc.free_(oldTable, capacity);
+ }
+
+public:
+ explicit HashTable(AllocPolicy ap)
+ : AllocPolicy(ap)
+ , gen(0)
+ , hashShift(kHashNumberBits)
+ , table(nullptr)
+ , entryCount(0)
+ , removedCount(0)
#ifdef DEBUG
- mutationCount++;
- p.generation = generation();
- p.mutationCount = mutationCount;
+ , mutationCount(0)
+ , mEntered(false)
#endif
- return true;
+ {
+ }
+
+ MOZ_MUST_USE bool init(uint32_t length)
+ {
+ MOZ_ASSERT(!initialized());
+
+ // Reject all lengths whose initial computed capacity would exceed
+ // sMaxCapacity. Round that maximum length down to the nearest power
+ // of two for speedier code.
+ if (MOZ_UNLIKELY(length > sMaxInit)) {
+ this->reportAllocOverflow();
+ return false;
}
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
- template <typename... Args>
- void putNewInfallible(const Lookup& l, Args&&... args)
- {
- MOZ_ASSERT(!lookup(l).found());
- ReentrancyGuard g(*this);
- putNewInfallibleInternal(l, std::forward<Args>(args)...);
+ static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator ==
+ sMaxInit,
+ "multiplication in numerator below could overflow");
+ static_assert(sMaxInit * sAlphaDenominator <=
+ UINT32_MAX - sMaxAlphaNumerator,
+ "numerator calculation below could potentially overflow");
+
+ // Compute the smallest capacity allowing |length| elements to be
+ // inserted without rehashing: ceil(length / max-alpha). (Ceiling
+ // integral division: <http://stackoverflow.com/a/2745086>.)
+ uint32_t newCapacity =
+ (length * sAlphaDenominator + sMaxAlphaNumerator - 1) /
+ sMaxAlphaNumerator;
+ if (newCapacity < sMinCapacity)
+ newCapacity = sMinCapacity;
+
+ // Round up capacity to next power-of-two.
+ uint32_t log2 = mozilla::CeilingLog2(newCapacity);
+ newCapacity = 1u << log2;
+
+ MOZ_ASSERT(newCapacity >= length);
+ MOZ_ASSERT(newCapacity <= sMaxCapacity);
+
+ table = createTable(*this, newCapacity);
+ if (!table)
+ return false;
+
+ setTableSizeLog2(log2);
+ METER(memset(&stats, 0, sizeof(stats)));
+ return true;
+ }
+
+ bool initialized() const { return !!table; }
+
+ ~HashTable()
+ {
+ if (table)
+ destroyTable(*this, table, capacity());
+ }
+
+private:
+ HashNumber hash1(HashNumber hash0) const { return hash0 >> hashShift; }
+
+ struct DoubleHash
+ {
+ HashNumber h2;
+ HashNumber sizeMask;
+ };
+
+ DoubleHash hash2(HashNumber curKeyHash) const
+ {
+ uint32_t sizeLog2 = kHashNumberBits - hashShift;
+ DoubleHash dh = { ((curKeyHash << sizeLog2) >> hashShift) | 1,
+ (HashNumber(1) << sizeLog2) - 1 };
+ return dh;
+ }
+
+ static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+ {
+ return (h1 - dh.h2) & dh.sizeMask;
+ }
+
+ bool overloaded()
+ {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
+ "multiplication below could overflow");
+ return entryCount + removedCount >=
+ capacity() * sMaxAlphaNumerator / sAlphaDenominator;
+ }
+
+ // Would the table be underloaded if it had the given capacity and entryCount?
+ static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+ {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
+ "multiplication below could overflow");
+ return capacity > sMinCapacity &&
+ entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
+ }
+
+ bool underloaded() { return wouldBeUnderloaded(capacity(), entryCount); }
+
+ static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
+ {
+ return HashPolicy::match(HashPolicy::getKey(e.get()), l);
+ }
+
+ // Warning: in order for readonlyThreadsafeLookup() to be safe this
+ // function must not modify the table in any way when |collisionBit| is 0.
+ // (The use of the METER() macro to increment stats violates this
+ // restriction but we will live with that for now because it's enabled so
+ // rarely.)
+ MOZ_ALWAYS_INLINE Entry& lookup(const Lookup& l,
+ HashNumber keyHash,
+ uint32_t collisionBit) const
+ {
+ MOZ_ASSERT(isLiveHash(keyHash));
+ MOZ_ASSERT(!(keyHash & sCollisionBit));
+ MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
+ MOZ_ASSERT(table);
+ METER(stats.searches++);
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry* entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Hit: return entry.
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ // Save the first removed entry pointer so we can recycle later.
+ Entry* firstRemoved = nullptr;
+
+ while (true) {
+ if (MOZ_UNLIKELY(entry->isRemoved())) {
+ if (!firstRemoved)
+ firstRemoved = entry;
+ } else {
+ if (collisionBit == sCollisionBit)
+ entry->setCollision();
+ }
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return firstRemoved ? *firstRemoved : *entry;
+ }
+
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+ }
+ }
+
+ // This is a copy of lookup hardcoded to the assumptions:
+ // 1. the lookup is a lookupForAdd
+ // 2. the key, whose |keyHash| has been passed is not in the table,
+ // 3. no entries have been removed from the table.
+ // This specialized search avoids the need for recovering lookup values
+ // from entries, which allows more flexible Lookup/Key types.
+ Entry& findFreeEntry(HashNumber keyHash)
+ {
+ MOZ_ASSERT(!(keyHash & sCollisionBit));
+ MOZ_ASSERT(table);
+ METER(stats.searches++);
+
+ // We assume 'keyHash' has already been distributed.
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry* entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ while (true) {
+ MOZ_ASSERT(!entry->isRemoved());
+ entry->setCollision();
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+ }
+ }
+
+ enum RebuildStatus
+ {
+ NotOverloaded,
+ Rehashed,
+ RehashFailed
+ };
+
+ RebuildStatus changeTableSize(int deltaLog2,
+ FailureBehavior reportFailure = ReportFailure)
+ {
+ // Look, but don't touch, until we succeed in getting new entry store.
+ Entry* oldTable = table;
+ uint32_t oldCap = capacity();
+ uint32_t newLog2 = kHashNumberBits - hashShift + deltaLog2;
+ uint32_t newCapacity = 1u << newLog2;
+ if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+ if (reportFailure)
+ this->reportAllocOverflow();
+ return RehashFailed;
+ }
+
+ Entry* newTable = createTable(*this, newCapacity, reportFailure);
+ if (!newTable)
+ return RehashFailed;
+
+ // We can't fail from here on, so update table parameters.
+ setTableSizeLog2(newLog2);
+ removedCount = 0;
+ gen++;
+ table = newTable;
+
+ // Copy only live entries, leaving removed ones behind.
+ Entry* end = oldTable + oldCap;
+ for (Entry* src = oldTable; src < end; ++src) {
+ if (src->isLive()) {
+ HashNumber hn = src->getKeyHash();
+ findFreeEntry(hn).setLive(
+ hn, std::move(const_cast<typename Entry::NonConstT&>(src->get())));
+ }
+
+ src->~Entry();
}
- // Note: |l| may be alias arguments in |args|, so this function must take
- // care not to use |l| after moving |args|.
- template <typename... Args>
- MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
- {
- if (!this->checkSimulatedOOM())
- return false;
+ // All entries have been destroyed, no need to destroyTable.
+ this->free_(oldTable, oldCap);
+ return Rehashed;
+ }
+
+ bool shouldCompressTable()
+ {
+ // Compress if a quarter or more of all entries are removed.
+ return removedCount >= (capacity() >> 2);
+ }
+
+ RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
+ {
+ if (!overloaded())
+ return NotOverloaded;
+
+ int deltaLog2;
+ if (shouldCompressTable()) {
+ METER(stats.compresses++);
+ deltaLog2 = 0;
+ } else {
+ METER(stats.grows++);
+ deltaLog2 = 1;
+ }
+
+ return changeTableSize(deltaLog2, reportFailure);
+ }
+
+ // Infallibly rehash the table if we are overloaded with removals.
+ void checkOverRemoved()
+ {
+ if (overloaded()) {
+ if (checkOverloaded(DontReportFailure) == RehashFailed)
+ rehashTableInPlace();
+ }
+ }
+
+ void remove(Entry& e)
+ {
+ MOZ_ASSERT(table);
+ METER(stats.removes++);
+
+ if (e.hasCollision()) {
+ e.removeLive();
+ removedCount++;
+ } else {
+ METER(stats.removeFrees++);
+ e.clearLive();
+ }
+ entryCount--;
+#ifdef DEBUG
+ mutationCount++;
+#endif
+ }
+
+ void checkUnderloaded()
+ {
+ if (underloaded()) {
+ METER(stats.shrinks++);
+ (void)changeTableSize(-1, DontReportFailure);
+ }
+ }
- if (!EnsureHash<HashPolicy>(l))
- return false;
+ // Resize the table down to the largest capacity which doesn't underload the
+ // table. Since we call checkUnderloaded() on every remove, you only need
+ // to call this after a bulk removal of items done without calling remove().
+ void compactIfUnderloaded()
+ {
+ int32_t resizeLog2 = 0;
+ uint32_t newCapacity = capacity();
+ while (wouldBeUnderloaded(newCapacity, entryCount)) {
+ newCapacity = newCapacity >> 1;
+ resizeLog2--;
+ }
+
+ if (resizeLog2 != 0)
+ (void)changeTableSize(resizeLog2, DontReportFailure);
+ }
+
+ // This is identical to changeTableSize(currentSize), but without requiring
+ // a second table. We do this by recycling the collision bits to tell us if
+ // the element is already inserted or still waiting to be inserted. Since
+ // already-inserted elements win any conflicts, we get the same table as we
+ // would have gotten through random insertion order.
+ void rehashTableInPlace()
+ {
+ METER(stats.rehashes++);
+ removedCount = 0;
+ gen++;
+ for (size_t i = 0; i < capacity(); ++i)
+ table[i].unsetCollision();
+
+ for (size_t i = 0; i < capacity();) {
+ Entry* src = &table[i];
+
+ if (!src->isLive() || src->hasCollision()) {
+ ++i;
+ continue;
+ }
- if (checkOverloaded() == RehashFailed)
- return false;
+ HashNumber keyHash = src->getKeyHash();
+ HashNumber h1 = hash1(keyHash);
+ DoubleHash dh = hash2(keyHash);
+ Entry* tgt = &table[h1];
+ while (true) {
+ if (!tgt->hasCollision()) {
+ src->swap(tgt);
+ tgt->setCollision();
+ break;
+ }
+
+ h1 = applyDoubleHash(h1, dh);
+ tgt = &table[h1];
+ }
+ }
- putNewInfallible(l, std::forward<Args>(args)...);
- return true;
+ // TODO: this algorithm leaves collision bits on *all* elements, even if
+ // they are on no collision path. We have the option of setting the
+ // collision bits correctly on a subsequent pass or skipping the rehash
+ // unless we are totally filled with tombstones: benchmark to find out
+ // which approach is best.
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ //
+ // Prefer to use putNewInfallible; this function does not check
+ // invariants.
+ template<typename... Args>
+ void putNewInfallibleInternal(const Lookup& l, Args&&... args)
+ {
+ MOZ_ASSERT(table);
+
+ HashNumber keyHash = prepareHash(l);
+ Entry* entry = &findFreeEntry(keyHash);
+ MOZ_ASSERT(entry);
+
+ if (entry->isRemoved()) {
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ keyHash |= sCollisionBit;
}
- // Note: |l| may be a reference to a piece of |u|, so this function
- // must take care not to use |l| after moving |u|.
- template <typename... Args>
- MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
- {
- // Check for error from ensureHash() here.
- if (!p.isValid())
- return false;
+ entry->setLive(keyHash, std::forward<Args>(args)...);
+ entryCount++;
+#ifdef DEBUG
+ mutationCount++;
+#endif
+ }
+
+public:
+ void clear()
+ {
+ Entry* end = table + capacity();
+ for (Entry* e = table; e < end; ++e)
+ e->clear();
+
+ removedCount = 0;
+ entryCount = 0;
+#ifdef DEBUG
+ mutationCount++;
+#endif
+ }
+
+ void clearAndShrink()
+ {
+ clear();
+ compactIfUnderloaded();
+ }
+
+ void finish()
+ {
+#ifdef DEBUG
+ MOZ_ASSERT(!mEntered);
+#endif
+ if (!table)
+ return;
+
+ destroyTable(*this, table, capacity());
+ table = nullptr;
+ gen++;
+ entryCount = 0;
+ removedCount = 0;
+#ifdef DEBUG
+ mutationCount++;
+#endif
+ }
+
+ Range all() const
+ {
+ MOZ_ASSERT(table);
+ return Range(*this, table, table + capacity());
+ }
+
+ bool empty() const
+ {
+ MOZ_ASSERT(table);
+ return !entryCount;
+ }
+
+ uint32_t count() const
+ {
+ MOZ_ASSERT(table);
+ return entryCount;
+ }
+
+ uint32_t capacity() const
+ {
+ MOZ_ASSERT(table);
+ return 1u << (kHashNumberBits - hashShift);
+ }
+
+ Generation generation() const
+ {
+ MOZ_ASSERT(table);
+ return Generation(gen);
+ }
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(table);
+ }
+
+ size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
+ {
+ ReentrancyGuard g(*this);
+ if (!HasHash<HashPolicy>(l))
+ return Ptr();
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0), *this);
+ }
+
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ {
+ if (!HasHash<HashPolicy>(l))
+ return Ptr();
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0), *this);
+ }
+
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+ {
+ ReentrancyGuard g(*this);
+ if (!EnsureHash<HashPolicy>(l))
+ return AddPtr();
+ HashNumber keyHash = prepareHash(l);
+ // Directly call the constructor in the return statement to avoid
+ // excess copying when building with Visual Studio 2017.
+ // See bug 1385181.
+ return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash);
+ }
+
+ template<typename... Args>
+ MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
+ {
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(table);
+ MOZ_ASSERT_IF(p.isValid(), p.table_ == this);
+ MOZ_ASSERT(!p.found());
+ MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+
+ // Check for error from ensureHash() here.
+ if (!p.isValid())
+ return false;
+
+ MOZ_ASSERT(p.generation == generation());
+#ifdef DEBUG
+ MOZ_ASSERT(p.mutationCount == mutationCount);
+#endif
+
+ // Changing an entry from removed to live does not affect whether we
+ // are overloaded and can be handled separately.
+ if (p.entry_->isRemoved()) {
+ if (!this->checkSimulatedOOM())
+ return false;
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ p.keyHash |= sCollisionBit;
+ } else {
+ // Preserve the validity of |p.entry_|.
+ RebuildStatus status = checkOverloaded();
+ if (status == RehashFailed)
+ return false;
+ if (status == NotOverloaded && !this->checkSimulatedOOM())
+ return false;
+ if (status == Rehashed)
+ p.entry_ = &findFreeEntry(p.keyHash);
+ }
+
+ p.entry_->setLive(p.keyHash, std::forward<Args>(args)...);
+ entryCount++;
+#ifdef DEBUG
+ mutationCount++;
+ p.generation = generation();
+ p.mutationCount = mutationCount;
+#endif
+ return true;
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ template<typename... Args>
+ void putNewInfallible(const Lookup& l, Args&&... args)
+ {
+ MOZ_ASSERT(!lookup(l).found());
+ ReentrancyGuard g(*this);
+ putNewInfallibleInternal(l, std::forward<Args>(args)...);
+ }
+
+ // Note: |l| may be alias arguments in |args|, so this function must take
+ // care not to use |l| after moving |args|.
+ template<typename... Args>
+ MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
+ {
+ if (!this->checkSimulatedOOM())
+ return false;
+
+ if (!EnsureHash<HashPolicy>(l))
+ return false;
+
+ if (checkOverloaded() == RehashFailed)
+ return false;
+
+ putNewInfallible(l, std::forward<Args>(args)...);
+ return true;
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ template<typename... Args>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+ {
+ // Check for error from ensureHash() here.
+ if (!p.isValid())
+ return false;
#ifdef DEBUG
- p.generation = generation();
- p.mutationCount = mutationCount;
+ p.generation = generation();
+ p.mutationCount = mutationCount;
#endif
- {
- ReentrancyGuard g(*this);
- MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
- p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
- }
- return p.found() || add(p, std::forward<Args>(args)...);
- }
-
- void remove(Ptr p)
{
- MOZ_ASSERT(table);
- ReentrancyGuard g(*this);
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(p.generation == generation());
- remove(*p.entry_);
- checkUnderloaded();
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
+ p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
}
+ return p.found() || add(p, std::forward<Args>(args)...);
+ }
+
+ void remove(Ptr p)
+ {
+ MOZ_ASSERT(table);
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(p.found());
+ MOZ_ASSERT(p.generation == generation());
+ remove(*p.entry_);
+ checkUnderloaded();
+ }
- void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
- {
- MOZ_ASSERT(table);
- ReentrancyGuard g(*this);
- MOZ_ASSERT(p.found());
- MOZ_ASSERT(p.generation == generation());
- typename HashTableEntry<T>::NonConstT t(std::move(*p));
- HashPolicy::setKey(t, const_cast<Key&>(k));
- remove(*p.entry_);
- putNewInfallibleInternal(l, std::move(t));
- }
+ void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
+ {
+ MOZ_ASSERT(table);
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(p.found());
+ MOZ_ASSERT(p.generation == generation());
+ typename HashTableEntry<T>::NonConstT t(std::move(*p));
+ HashPolicy::setKey(t, const_cast<Key&>(k));
+ remove(*p.entry_);
+ putNewInfallibleInternal(l, std::move(t));
+ }
- void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
- {
- rekeyWithoutRehash(p, l, k);
- checkOverRemoved();
- }
+ void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
+ {
+ rekeyWithoutRehash(p, l, k);
+ checkOverRemoved();
+ }
#undef METER
};
} // namespace detail
} // namespace mozilla
-#endif /* mozilla_HashTable_h */
+#endif /* mozilla_HashTable_h */