insert(P&& value) {
+ return insert_impl(KeySelect()(value), std::forward(value));
+ }
+
+ template
+ iterator insert_hint(const_iterator hint, P&& value) {
+ if (hint != cend() &&
+ compare_keys(KeySelect()(*hint), KeySelect()(value))) {
+ return mutable_iterator(hint);
+ }
+
+ return insert(std::forward(value)).first;
+ }
+
+ template
+ void insert(InputIt first, InputIt last) {
+ if (std::is_base_of<
+ std::forward_iterator_tag,
+ typename std::iterator_traits::iterator_category>::value) {
+ const auto nb_elements_insert = std::distance(first, last);
+ const size_type nb_free_buckets = m_load_threshold - size();
+ tsl_rh_assert(m_load_threshold >= size());
+
+ if (nb_elements_insert > 0 &&
+ nb_free_buckets < size_type(nb_elements_insert)) {
+ reserve(size() + size_type(nb_elements_insert));
+ }
+ }
+
+ for (; first != last; ++first) {
+ insert(*first);
+ }
+ }
+
+ template
+ std::pair insert_or_assign(K&& key, M&& obj) {
+ auto it = try_emplace(std::forward(key), std::forward(obj));
+ if (!it.second) {
+ it.first.value() = std::forward(obj);
+ }
+
+ return it;
+ }
+
+ template
+ iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ auto it = mutable_iterator(hint);
+ it.value() = std::forward(obj);
+
+ return it;
+ }
+
+ return insert_or_assign(std::forward(key), std::forward(obj)).first;
+ }
+
+ template
+ std::pair emplace(Args&&... args) {
+ return insert(value_type(std::forward(args)...));
+ }
+
+ template
+ iterator emplace_hint(const_iterator hint, Args&&... args) {
+ return insert_hint(hint, value_type(std::forward(args)...));
+ }
+
+ template
+ std::pair try_emplace(K&& key, Args&&... args) {
+ return insert_impl(key, std::piecewise_construct,
+ std::forward_as_tuple(std::forward(key)),
+ std::forward_as_tuple(std::forward(args)...));
+ }
+
+ template
+ iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ return mutable_iterator(hint);
+ }
+
+ return try_emplace(std::forward(key), std::forward(args)...).first;
+ }
+
+ /**
+ * Here to avoid `template size_type erase(const K& key)` being used
+ * when we use an `iterator` instead of a `const_iterator`.
+ */
+ iterator erase(iterator pos) {
+ erase_from_bucket(pos);
+
+ /**
+ * Erase bucket used a backward shift after clearing the bucket.
+ * Check if there is a new value in the bucket, if not get the next
+ * non-empty.
+ */
+ if (pos.m_bucket->empty()) {
+ ++pos;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return pos;
+ }
+
+ iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ if (first == last) {
+ return mutable_iterator(first);
+ }
+
+ auto first_mutable = mutable_iterator(first);
+ auto last_mutable = mutable_iterator(last);
+ for (auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) {
+ if (!it->empty()) {
+ it->clear();
+ m_nb_elements--;
+ }
+ }
+
+ if (last_mutable == end()) {
+ m_try_shrink_on_next_insert = true;
+ return end();
+ }
+
+ /*
+ * Backward shift on the values which come after the deleted values.
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t icloser_bucket =
+ static_cast(first_mutable.m_bucket - m_buckets);
+ std::size_t ito_move_closer_value =
+ static_cast(last_mutable.m_bucket - m_buckets);
+ tsl_rh_assert(ito_move_closer_value > icloser_bucket);
+
+ const std::size_t ireturn_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ while (ito_move_closer_value < m_bucket_count &&
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) {
+ icloser_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ tsl_rh_assert(m_buckets[icloser_bucket].empty());
+ const distance_type new_distance = distance_type(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() -
+ (ito_move_closer_value - icloser_bucket));
+ m_buckets[icloser_bucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ito_move_closer_value].truncated_hash(),
+ std::move(m_buckets[ito_move_closer_value].value()));
+ m_buckets[ito_move_closer_value].clear();
+
+ ++icloser_bucket;
+ ++ito_move_closer_value;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return iterator(m_buckets + ireturn_bucket);
+ }
+
+ template
+ size_type erase(const K& key) {
+ return erase(key, hash_key(key));
+ }
+
+ template
+ size_type erase(const K& key, std::size_t hash) {
+ auto it = find(key, hash);
+ if (it != end()) {
+ erase_from_bucket(it);
+ m_try_shrink_on_next_insert = true;
+
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ void swap(robin_hash& other) {
+ using std::swap;
+
+ swap(static_cast(*this), static_cast(other));
+ swap(static_cast(*this), static_cast(other));
+ swap(static_cast(*this), static_cast(other));
+ swap(m_buckets_data, other.m_buckets_data);
+ swap(m_buckets, other.m_buckets);
+ swap(m_bucket_count, other.m_bucket_count);
+ swap(m_nb_elements, other.m_nb_elements);
+ swap(m_load_threshold, other.m_load_threshold);
+ swap(m_min_load_factor, other.m_min_load_factor);
+ swap(m_max_load_factor, other.m_max_load_factor);
+ swap(m_grow_on_next_insert, other.m_grow_on_next_insert);
+ swap(m_try_shrink_on_next_insert, other.m_try_shrink_on_next_insert);
+ }
+
+ /*
+ * Lookup
+ */
+ template ::value>::type* = nullptr>
+ typename U::value_type& at(const K& key) {
+ return at(key, hash_key(key));
+ }
+
+ template ::value>::type* = nullptr>
+ typename U::value_type& at(const K& key, std::size_t hash) {
+ return const_cast(
+ static_cast(this)->at(key, hash));
+ }
+
+ template ::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key) const {
+ return at(key, hash_key(key));
+ }
+
+ template ::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key, std::size_t hash) const {
+ auto it = find(key, hash);
+ if (it != cend()) {
+ return it.value();
+ } else {
+ TSL_RH_THROW_OR_TERMINATE(std::out_of_range, "Couldn't find key.");
+ }
+ }
+
+ template ::value>::type* = nullptr>
+ typename U::value_type& operator[](K&& key) {
+ return try_emplace(std::forward(key)).first.value();
+ }
+
+ template
+ size_type count(const K& key) const {
+ return count(key, hash_key(key));
+ }
+
+ template
+ size_type count(const K& key, std::size_t hash) const {
+ if (find(key, hash) != cend()) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ template
+ iterator find(const K& key) {
+ return find_impl(key, hash_key(key));
+ }
+
+ template
+ iterator find(const K& key, std::size_t hash) {
+ return find_impl(key, hash);
+ }
+
+ template
+ const_iterator find(const K& key) const {
+ return find_impl(key, hash_key(key));
+ }
+
+ template
+ const_iterator find(const K& key, std::size_t hash) const {
+ return find_impl(key, hash);
+ }
+
+ template
+ bool contains(const K& key) const {
+ return contains(key, hash_key(key));
+ }
+
+ template
+ bool contains(const K& key, std::size_t hash) const {
+ return count(key, hash) != 0;
+ }
+
+ template
+ std::pair equal_range(const K& key) {
+ return equal_range(key, hash_key(key));
+ }
+
+ template
+ std::pair equal_range(const K& key, std::size_t hash) {
+ iterator it = find(key, hash);
+ return std::make_pair(it, (it == end()) ? it : std::next(it));
+ }
+
+ template
+ std::pair equal_range(const K& key) const {
+ return equal_range(key, hash_key(key));
+ }
+
+ template
+ std::pair equal_range(
+ const K& key, std::size_t hash) const {
+ const_iterator it = find(key, hash);
+ return std::make_pair(it, (it == cend()) ? it : std::next(it));
+ }
+
+ /*
+ * Bucket interface
+ */
+ size_type bucket_count() const { return m_bucket_count; }
+
+ size_type max_bucket_count() const {
+ return std::min(GrowthPolicy::max_bucket_count(),
+ m_buckets_data.max_size());
+ }
+
+ /*
+ * Hash policy
+ */
+ float load_factor() const {
+ if (bucket_count() == 0) {
+ return 0;
+ }
+
+ return float(m_nb_elements) / float(bucket_count());
+ }
+
+ float min_load_factor() const { return m_min_load_factor; }
+
+ float max_load_factor() const { return m_max_load_factor; }
+
+ void min_load_factor(float ml) {
+ m_min_load_factor = clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR),
+ float(MAXIMUM_MIN_LOAD_FACTOR));
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR),
+ float(MAXIMUM_MAX_LOAD_FACTOR));
+ m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor);
+ }
+
+ void rehash(size_type count) {
+ count = std::max(count,
+ size_type(std::ceil(float(size()) / max_load_factor())));
+ rehash_impl(count);
+ }
+
+ void reserve(size_type count) {
+ rehash(size_type(std::ceil(float(count) / max_load_factor())));
+ }
+
+ /*
+ * Observers
+ */
+ hasher hash_function() const { return static_cast(*this); }
+
+ key_equal key_eq() const { return static_cast(*this); }
+
+ /*
+ * Other
+ */
+ iterator mutable_iterator(const_iterator pos) {
+ return iterator(const_cast(pos.m_bucket));
+ }
+
+ template
+ void serialize(Serializer& serializer) const {
+ serialize_impl(serializer);
+ }
+
+ template
+ void deserialize(Deserializer& deserializer, bool hash_compatible) {
+ deserialize_impl(deserializer, hash_compatible);
+ }
+
+ private:
+ template
+ std::size_t hash_key(const K& key) const {
+ return Hash::operator()(key);
+ }
+
+ template
+ bool compare_keys(const K1& key1, const K2& key2) const {
+ return KeyEqual::operator()(key1, key2);
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const {
+ const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash);
+ tsl_rh_assert(bucket < m_bucket_count ||
+ (bucket == 0 && m_bucket_count == 0));
+
+ return bucket;
+ }
+
+ template ::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ return (index + 1) & this->m_mask;
+ }
+
+ template ::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ index++;
+ return (index != bucket_count()) ? index : 0;
+ }
+
+ template
+ iterator find_impl(const K& key, std::size_t hash) {
+ return mutable_iterator(
+ static_cast(this)->find(key, hash));
+ }
+
+ template
+ const_iterator find_impl(const K& key, std::size_t hash) const {
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (TSL_RH_LIKELY(
+ (!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) {
+ return const_iterator(m_buckets + ibucket);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ return cend();
+ }
+
+ void erase_from_bucket(iterator pos) {
+ pos.m_bucket->clear();
+ m_nb_elements--;
+
+ /**
+ * Backward shift, swap the empty bucket, previous_ibucket, with the values
+ * on its right, ibucket, until we cross another empty bucket or if the
+ * other bucket has a distance_from_ideal_bucket == 0.
+ *
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t previous_ibucket =
+ static_cast(pos.m_bucket - m_buckets);
+ std::size_t ibucket = next_bucket(previous_ibucket);
+
+ while (m_buckets[ibucket].dist_from_ideal_bucket() > 0) {
+ tsl_rh_assert(m_buckets[previous_ibucket].empty());
+
+ const distance_type new_distance =
+ distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1);
+ m_buckets[previous_ibucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ibucket].truncated_hash(),
+ std::move(m_buckets[ibucket].value()));
+ m_buckets[ibucket].clear();
+
+ previous_ibucket = ibucket;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ template
+ std::pair insert_impl(const K& key,
+ Args&&... value_type_args) {
+ const std::size_t hash = hash_key(key);
+
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if ((!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) {
+ return std::make_pair(iterator(m_buckets + ibucket), false);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ if (rehash_on_extreme_load()) {
+ ibucket = bucket_for_hash(hash);
+ dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+ }
+
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(
+ dist_from_ideal_bucket, bucket_entry::truncate_hash(hash),
+ std::forward(value_type_args)...);
+ } else {
+ insert_value(ibucket, dist_from_ideal_bucket,
+ bucket_entry::truncate_hash(hash),
+ std::forward(value_type_args)...);
+ }
+
+ m_nb_elements++;
+ /*
+ * The value will be inserted in ibucket in any case, either because it was
+ * empty or by stealing the bucket (robin hood).
+ */
+ return std::make_pair(iterator(m_buckets + ibucket), true);
+ }
+
+ template
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, Args&&... value_type_args) {
+ value_type value(std::forward(value_type_args)...);
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ /*
+ * We don't use `value_type&& value` as last argument due to a bug in MSVC
+ * when `value_type` is a pointer, The compiler is not able to see the
+ * difference between `std::string*` and `std::string*&&` resulting in a
+ * compilation error.
+ *
+ * The `value` will be in a moved state at the end of the function.
+ */
+ void insert_value_impl(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type& value) {
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash,
+ value);
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+
+ while (!m_buckets[ibucket].empty()) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (dist_from_ideal_bucket >=
+ bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) {
+ /**
+ * The number of probes is really high, rehash the map on the next
+ * insert. Difficult to do now as rehash may throw an exception.
+ */
+ m_grow_on_next_insert = true;
+ }
+
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash,
+ std::move(value));
+ }
+
+ void rehash_impl(size_type count) {
+ robin_hash new_table(count, static_cast(*this),
+ static_cast(*this), get_allocator(),
+ m_min_load_factor, m_max_load_factor);
+
+ const bool use_stored_hash =
+ USE_STORED_HASH_ON_REHASH(new_table.bucket_count());
+ for (auto& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ continue;
+ }
+
+ const std::size_t hash =
+ use_stored_hash ? bucket.truncated_hash()
+ : new_table.hash_key(KeySelect()(bucket.value()));
+
+ new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0,
+ bucket_entry::truncate_hash(hash),
+ std::move(bucket.value()));
+ }
+
+ new_table.m_nb_elements = m_nb_elements;
+ new_table.swap(*this);
+ }
+
+ void clear_and_shrink() noexcept {
+ GrowthPolicy::clear();
+ m_buckets_data.clear();
+ m_buckets = static_empty_bucket_ptr();
+ m_bucket_count = 0;
+ m_nb_elements = 0;
+ m_load_threshold = 0;
+ m_grow_on_next_insert = false;
+ m_try_shrink_on_next_insert = false;
+ }
+
+ void insert_value_on_rehash(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ while (true) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket,
+ hash, std::move(value));
+ return;
+ } else {
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+ }
+
+ dist_from_ideal_bucket++;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ /**
+ * Grow the table if m_grow_on_next_insert is true or we reached the
+ * max_load_factor. Shrink the table if m_try_shrink_on_next_insert is true
+ * (an erase occurred) and we're below the min_load_factor.
+ *
+ * Return true if the table has been rehashed.
+ */
+ bool rehash_on_extreme_load() {
+ if (m_grow_on_next_insert || size() >= m_load_threshold) {
+ rehash_impl(GrowthPolicy::next_bucket_count());
+ m_grow_on_next_insert = false;
+
+ return true;
+ }
+
+ if (m_try_shrink_on_next_insert) {
+ m_try_shrink_on_next_insert = false;
+ if (m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) {
+ reserve(size() + 1);
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template
+ void serialize_impl(Serializer& serializer) const {
+ const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION;
+ serializer(version);
+
+ // Indicate if the truncated hash of each bucket is stored. Use a
+ // std::int16_t instead of a bool to avoid the need for the serializer to
+ // support an extra 'bool' type.
+ const std::int16_t hash_stored_for_bucket =
+ static_cast(STORE_HASH);
+ serializer(hash_stored_for_bucket);
+
+ const slz_size_type nb_elements = m_nb_elements;
+ serializer(nb_elements);
+
+ const slz_size_type bucket_count = m_buckets_data.size();
+ serializer(bucket_count);
+
+ const float min_load_factor = m_min_load_factor;
+ serializer(min_load_factor);
+
+ const float max_load_factor = m_max_load_factor;
+ serializer(max_load_factor);
+
+ for (const bucket_entry& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ const std::int16_t empty_bucket =
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ serializer(empty_bucket);
+ } else {
+ const std::int16_t dist_from_ideal_bucket =
+ bucket.dist_from_ideal_bucket();
+ serializer(dist_from_ideal_bucket);
+ if (STORE_HASH) {
+ const std::uint32_t truncated_hash = bucket.truncated_hash();
+ serializer(truncated_hash);
+ }
+ serializer(bucket.value());
+ }
+ }
+ }
+
+ template
+ void deserialize_impl(Deserializer& deserializer, bool hash_compatible) {
+ tsl_rh_assert(m_buckets_data.empty()); // Current hash table must be empty
+
+ const slz_size_type version =
+ deserialize_value(deserializer);
+ // For now we only have one version of the serialization protocol.
+ // If it doesn't match there is a problem with the file.
+ if (version != SERIALIZATION_PROTOCOL_VERSION) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "Can't deserialize the ordered_map/set. "
+ "The protocol version header is invalid.");
+ }
+
+ const bool hash_stored_for_bucket =
+ deserialize_value(deserializer) ? true : false;
+ if (hash_compatible && STORE_HASH != hash_stored_for_bucket) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Can't deserialize a map with a different StoreHash "
+ "than the one used during the serialization when "
+ "hash compatibility is used");
+ }
+
+ const slz_size_type nb_elements =
+ deserialize_value(deserializer);
+ const slz_size_type bucket_count_ds =
+ deserialize_value(deserializer);
+ const float min_load_factor = deserialize_value(deserializer);
+ const float max_load_factor = deserialize_value(deserializer);
+
+ if (min_load_factor < MINIMUM_MIN_LOAD_FACTOR ||
+ min_load_factor > MAXIMUM_MIN_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid min_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ if (max_load_factor < MINIMUM_MAX_LOAD_FACTOR ||
+ max_load_factor > MAXIMUM_MAX_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid max_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+
+ if (bucket_count_ds == 0) {
+ tsl_rh_assert(nb_elements == 0);
+ return;
+ }
+
+ if (!hash_compatible) {
+ reserve(numeric_cast(nb_elements,
+ "Deserialized nb_elements is too big."));
+ for (slz_size_type ibucket = 0; ibucket < bucket_count_ds; ibucket++) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ if (hash_stored_for_bucket) {
+ TSL_RH_UNUSED(deserialize_value(deserializer));
+ }
+
+ insert(deserialize_value(deserializer));
+ }
+ }
+
+ tsl_rh_assert(nb_elements == size());
+ } else {
+ m_bucket_count = numeric_cast(
+ bucket_count_ds, "Deserialized bucket_count is too big.");
+
+ GrowthPolicy::operator=(GrowthPolicy(m_bucket_count));
+ // GrowthPolicy should not modify the bucket count we got from
+ // deserialization
+ if (m_bucket_count != bucket_count_ds) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "The GrowthPolicy is not the same even "
+ "though hash_compatible is true.");
+ }
+
+ m_nb_elements = numeric_cast(
+ nb_elements, "Deserialized nb_elements is too big.");
+ m_buckets_data.resize(m_bucket_count);
+ m_buckets = m_buckets_data.data();
+
+ for (bucket_entry& bucket : m_buckets_data) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ truncated_hash_type truncated_hash = 0;
+ if (hash_stored_for_bucket) {
+ tsl_rh_assert(hash_stored_for_bucket);
+ truncated_hash = deserialize_value(deserializer);
+ }
+
+ bucket.set_value_of_empty_bucket(
+ dist_from_ideal_bucket, truncated_hash,
+ deserialize_value(deserializer));
+ }
+ }
+
+ if (!m_buckets_data.empty()) {
+ m_buckets_data.back().set_as_last_bucket();
+ }
+ }
+ }
+
+ public:
+ static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0;
+
+ static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f;
+ static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f;
+ static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f;
+
+ static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f;
+
+ static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR,
+ "MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR");
+ static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR,
+ "MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR");
+ static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR,
+ "MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR");
+
+ private:
+ /**
+ * Protocol version currenlty used for serialization.
+ */
+ static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1;
+
+ /**
+ * Return an always valid pointer to an static empty bucket_entry with
+ * last_bucket() == true.
+ */
+ bucket_entry* static_empty_bucket_ptr() noexcept {
+ static bucket_entry empty_bucket(true);
+ return &empty_bucket;
+ }
+
+ private:
+ buckets_container_type m_buckets_data;
+
+ /**
+ * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points
+ * to static_empty_bucket_ptr. This variable is useful to avoid the cost of
+ * checking if m_buckets_data is empty when trying to find an element.
+ *
+ * TODO Remove m_buckets_data and only use a pointer instead of a
+ * pointer+vector to save some space in the robin_hash object. Manage the
+ * Allocator manually.
+ */
+ bucket_entry* m_buckets;
+
+ /**
+ * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit
+ * slower.
+ */
+ size_type m_bucket_count;
+
+ size_type m_nb_elements;
+
+ size_type m_load_threshold;
+
+ float m_min_load_factor;
+ float m_max_load_factor;
+
+ bool m_grow_on_next_insert;
+
+ /**
+ * We can't shrink down the map on erase operations as the erase methods need
+ * to return the next iterator. Shrinking the map would invalidate all the
+ * iterators and we could not return the next iterator in a meaningful way, On
+ * erase, we thus just indicate on erase that we should try to shrink the hash
+ * table on the next insert if we go below the min_load_factor.
+ */
+ bool m_try_shrink_on_next_insert;
+};
+
+} // namespace detail_robin_hash
+
+} // namespace tsl
+
+#endif
diff --git a/src/external/tsl/robin_map.h b/src/external/tsl/robin_map.h
new file mode 100644
index 00000000..fc860407
--- /dev/null
+++ b/src/external/tsl/robin_map.h
@@ -0,0 +1,807 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_MAP_H
+#define TSL_ROBIN_MAP_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "robin_hash.h"
+
+namespace tsl {
+
+/**
+ * Implementation of a hash map using open-addressing and the robin hood hashing
+ * algorithm with backward shift deletion.
+ *
+ * For operations modifying the hash map (insert, erase, rehash, ...), the
+ * strong exception guarantee is only guaranteed when the expression
+ * `std::is_nothrow_swappable>::value &&
+ * std::is_nothrow_move_constructible>::value` is true,
+ * otherwise if an exception is thrown during the swap or the move, the hash map
+ * may end up in a undefined state. Per the standard a `Key` or `T` with a
+ * noexcept copy constructor and no move constructor also satisfies the
+ * `std::is_nothrow_move_constructible>::value` criterion (and
+ * will thus guarantee the strong exception for the map).
+ *
+ * When `StoreHash` is true, 32 bits of the hash are stored alongside the
+ * values. It can improve the performance during lookups if the `KeyEqual`
+ * function takes time (if it engenders a cache-miss for example) as we then
+ * compare the stored hashes before comparing the keys. When
+ * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also
+ * speed-up the rehash process as we can avoid to recalculate the hash. When it
+ * is detected that storing the hash will not incur any memory penalty due to
+ * alignment (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry) == sizeof(tsl::detail_robin_hash::bucket_entry)`)
+ * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored
+ * even if `StoreHash` is false so that we can speed-up the rehash (but it will
+ * not be used on lookups unless `StoreHash` is true).
+ *
+ * `GrowthPolicy` defines how the map grows and consequently how a hash value is
+ * mapped to a bucket. By default the map uses
+ * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of
+ * buckets to a power of two and uses a mask to map the hash to a bucket instead
+ * of the slow modulo. Other growth policies are available and you may define
+ * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the
+ * interface.
+ *
+ * `std::pair` must be swappable.
+ *
+ * `Key` and `T` must be copy and/or move constructible.
+ *
+ * If the destructor of `Key` or `T` throws an exception, the behaviour of the
+ * class is undefined.
+ *
+ * Iterators invalidation:
+ * - clear, operator=, reserve, rehash: always invalidate the iterators.
+ * - insert, emplace, emplace_hint, operator[]: if there is an effective
+ * insert, invalidate the iterators.
+ * - erase: always invalidate the iterators.
+ */
+template ,
+ class KeyEqual = std::equal_to,
+ class Allocator = std::allocator>,
+ bool StoreHash = false,
+ class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>>
+class robin_map {
+ private:
+ template
+ using has_is_transparent = tsl::detail_robin_hash::has_is_transparent;
+
+ class KeySelect {
+ public:
+ using key_type = Key;
+
+ const key_type& operator()(const std::pair& key_value) const
+ noexcept {
+ return key_value.first;
+ }
+
+ key_type& operator()(std::pair& key_value) noexcept {
+ return key_value.first;
+ }
+ };
+
+ class ValueSelect {
+ public:
+ using value_type = T;
+
+ const value_type& operator()(const std::pair& key_value) const
+ noexcept {
+ return key_value.second;
+ }
+
+ value_type& operator()(std::pair& key_value) noexcept {
+ return key_value.second;
+ }
+ };
+
+ using ht = detail_robin_hash::robin_hash, KeySelect,
+ ValueSelect, Hash, KeyEqual,
+ Allocator, StoreHash, GrowthPolicy>;
+
+ public:
+ using key_type = typename ht::key_type;
+ using mapped_type = T;
+ using value_type = typename ht::value_type;
+ using size_type = typename ht::size_type;
+ using difference_type = typename ht::difference_type;
+ using hasher = typename ht::hasher;
+ using key_equal = typename ht::key_equal;
+ using allocator_type = typename ht::allocator_type;
+ using reference = typename ht::reference;
+ using const_reference = typename ht::const_reference;
+ using pointer = typename ht::pointer;
+ using const_pointer = typename ht::const_pointer;
+ using iterator = typename ht::iterator;
+ using const_iterator = typename ht::const_iterator;
+
+ public:
+ /*
+ * Constructors
+ */
+ robin_map() : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {}
+
+ explicit robin_map(size_type bucket_count, const Hash& hash = Hash(),
+ const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : m_ht(bucket_count, hash, equal, alloc) {}
+
+ robin_map(size_type bucket_count, const Allocator& alloc)
+ : robin_map(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ robin_map(size_type bucket_count, const Hash& hash, const Allocator& alloc)
+ : robin_map(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit robin_map(const Allocator& alloc)
+ : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {}
+
+ template
+ robin_map(InputIt first, InputIt last,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ robin_map(std::initializer_list init,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
+
+ robin_map(std::initializer_list init, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(),
+ alloc) {}
+
+ robin_map(std::initializer_list init, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(),
+ alloc) {}
+
+ robin_map& operator=(std::initializer_list ilist) {
+ m_ht.clear();
+
+ m_ht.reserve(ilist.size());
+ m_ht.insert(ilist.begin(), ilist.end());
+
+ return *this;
+ }
+
+ allocator_type get_allocator() const { return m_ht.get_allocator(); }
+
+ /*
+ * Iterators
+ */
+ iterator begin() noexcept { return m_ht.begin(); }
+ const_iterator begin() const noexcept { return m_ht.begin(); }
+ const_iterator cbegin() const noexcept { return m_ht.cbegin(); }
+
+ iterator end() noexcept { return m_ht.end(); }
+ const_iterator end() const noexcept { return m_ht.end(); }
+ const_iterator cend() const noexcept { return m_ht.cend(); }
+
+ /*
+ * Capacity
+ */
+ bool empty() const noexcept { return m_ht.empty(); }
+ size_type size() const noexcept { return m_ht.size(); }
+ size_type max_size() const noexcept { return m_ht.max_size(); }
+
+ /*
+ * Modifiers
+ */
+ void clear() noexcept { m_ht.clear(); }
+
+ std::pair insert(const value_type& value) {
+ return m_ht.insert(value);
+ }
+
+ template ::value>::type* = nullptr>
+ std::pair insert(P&& value) {
+ return m_ht.emplace(std::forward(value));
+ }
+
+ std::pair insert(value_type&& value) {
+ return m_ht.insert(std::move(value));
+ }
+
+ iterator insert(const_iterator hint, const value_type& value) {
+ return m_ht.insert_hint(hint, value);
+ }
+
+ template ::value>::type* = nullptr>
+ iterator insert(const_iterator hint, P&& value) {
+ return m_ht.emplace_hint(hint, std::forward(value));
+ }
+
+ iterator insert(const_iterator hint, value_type&& value) {
+ return m_ht.insert_hint(hint, std::move(value));
+ }
+
+ template
+ void insert(InputIt first, InputIt last) {
+ m_ht.insert(first, last);
+ }
+
+ void insert(std::initializer_list ilist) {
+ m_ht.insert(ilist.begin(), ilist.end());
+ }
+
+ template
+ std::pair