diff --git a/definitions/memory_layout.yaml b/definitions/memory_layout.yaml index 6af8844..8c76f62 100644 --- a/definitions/memory_layout.yaml +++ b/definitions/memory_layout.yaml @@ -7,8 +7,11 @@ size: 1T shared: true +- name: heapmap + size: 32G + - name: heap - size: 64G + size: 32G - name: stacks size: 64G diff --git a/src/kernel/heap_allocator.cpp b/src/kernel/heap_allocator.cpp index 5fc196a..edee718 100644 --- a/src/kernel/heap_allocator.cpp +++ b/src/kernel/heap_allocator.cpp @@ -9,69 +9,43 @@ #include "heap_allocator.h" #include "memory.h" -struct heap_allocator::mem_header +uint32_t & get_map_key(heap_allocator::block_info &info) { return info.offset; } + +struct heap_allocator::free_header { - mem_header(mem_header *prev, mem_header *next, uint8_t order) : - m_prev(prev), m_next(next) - { - set_order(order); - } - - inline void set_order(uint8_t order) { - m_prev = reinterpret_cast( - reinterpret_cast(prev()) | (order & 0x3f)); - } - - inline void set_used(bool used) { - m_next = reinterpret_cast( - reinterpret_cast(next()) | (used ? 1 : 0)); - } - - inline void set_next(mem_header *next) { - bool u = used(); - m_next = next; - set_used(u); - } - - inline void set_prev(mem_header *prev) { - uint8_t s = order(); - m_prev = prev; - set_order(s); + void clear(unsigned new_order) { + prev = next = nullptr; + order = new_order; } void remove() { - if (next()) next()->set_prev(prev()); - if (prev()) prev()->set_next(next()); - set_prev(nullptr); - set_next(nullptr); + if (next) next->prev = prev; + if (prev) prev->next = next; + prev = next = nullptr; } - inline mem_header * next() { return util::mask_pointer(m_next, 0x3f); } - inline mem_header * prev() { return util::mask_pointer(m_prev, 0x3f); } - - inline mem_header * buddy() const { - return reinterpret_cast( - reinterpret_cast(this) ^ (1 << order())); + inline free_header * buddy() const { + return reinterpret_cast( + reinterpret_cast(this) ^ (1 << order)); } inline bool eldest() const { return this < buddy(); } - inline uint8_t order() const { return reinterpret_cast(m_prev) & 0x3f; } - inline bool used() const { return reinterpret_cast(m_next) & 0x1; } - -private: - mem_header *m_prev; - mem_header *m_next; + free_header *prev; + free_header *next; + unsigned order; }; heap_allocator::heap_allocator() : m_start {0}, m_end {0} {} -heap_allocator::heap_allocator(uintptr_t start, size_t size) : +heap_allocator::heap_allocator(uintptr_t start, size_t size, uintptr_t heapmap) : m_start {start}, - m_end {start+size}, - m_blocks {0}, - m_allocated_size {0} + m_end {start}, + m_maxsize {size}, + m_allocated_size {0}, + m_map (reinterpret_cast(heapmap), 512) + { memset(m_free, 0, sizeof(m_free)); } @@ -79,12 +53,10 @@ heap_allocator::heap_allocator(uintptr_t start, size_t size) : void * heap_allocator::allocate(size_t length) { - size_t total = length + sizeof(mem_header); - if (length == 0) return nullptr; - unsigned order = util::log2(total); + unsigned order = util::log2(length); if (order < min_order) order = min_order; @@ -94,10 +66,16 @@ heap_allocator::allocate(size_t length) util::scoped_lock lock {m_lock}; - mem_header *header = pop_free(order); - header->set_used(true); m_allocated_size += (1 << order); - return header + 1; + + free_header *block = pop_free(order); + if (!block && !split_off(order, block)) { + return new_block(order); + } + + m_map[map_key(block)].free = false; + + return block; } void @@ -111,70 +89,107 @@ heap_allocator::free(void *p) util::scoped_lock lock {m_lock}; - mem_header *header = reinterpret_cast(p); - header -= 1; // p points after the header - header->set_used(false); - m_allocated_size -= (1 << header->order()); + free_header *block = reinterpret_cast(p); + block_info *info = m_map.find(map_key(block)); + kassert(info, "Attempt to free pointer not known to the heap"); + if (!info) return; - while (header->order() != max_order) { - auto order = header->order(); + m_allocated_size -= (1 << info->order); - mem_header *buddy = header->buddy(); - if (buddy->used() || buddy->order() != order) - break; - - if (get_free(order) == buddy) - get_free(order) = buddy->next(); - - buddy->remove(); - - header = header->eldest() ? header : buddy; - header->set_order(order + 1); - } - - uint8_t order = header->order(); - header->set_next(get_free(order)); - get_free(order) = header; - if (header->next()) - header->next()->set_prev(header); + block->clear(info->order); + block = merge_block(block); + register_free_block(block, block->order); } -void -heap_allocator::ensure_block(unsigned order) -{ - if (get_free(order) != nullptr) - return; - - if (order == max_order) { - size_t bytes = (1 << max_order); - uintptr_t next = m_start + m_blocks * bytes; - if (next + bytes <= m_end) { - mem_header *nextp = reinterpret_cast(next); - new (nextp) mem_header(nullptr, nullptr, order); - get_free(order) = nextp; - ++m_blocks; - } - } else { - mem_header *orig = pop_free(order + 1); - if (orig) { - mem_header *next = util::offset_pointer(orig, 1 << order); - new (next) mem_header(orig, nullptr, order); - - orig->set_next(next); - orig->set_order(order); - get_free(order) = orig; - } - } -} - -heap_allocator::mem_header * +heap_allocator::free_header * heap_allocator::pop_free(unsigned order) { - ensure_block(order); - mem_header *block = get_free(order); + free_header *block = get_free(order); if (block) { - get_free(order) = block->next(); + get_free(order) = block->next; block->remove(); } return block; } + +heap_allocator::free_header * +heap_allocator::merge_block(free_header *block) +{ + // The lock needs to be held while calling merge_block + + unsigned order = block->order; + while (order < max_order) { + block_info *info = m_map.find(map_key(block->buddy())); + if (!info || !info->free || info->order != order) + break; + + free_header *buddy = block->buddy(); + if (get_free(order) == buddy) + get_free(order) = buddy->next; + buddy->remove(); + + block = block->eldest() ? block : buddy; + + m_map.erase(map_key(block->buddy())); + block->order = m_map[map_key(block)].order = ++order; + } + + return block; +} + +void * +heap_allocator::new_block(unsigned order) +{ + // The lock needs to be held while calling new_block + + // Add the largest blocks possible until m_end is + // aligned to be a block of the requested order + unsigned current = address_order(m_end); + while (current < order) { + register_free_block(reinterpret_cast(m_end), current); + m_end += 1 << current; + current = address_order(m_end); + } + + void *block = reinterpret_cast(m_end); + m_end += 1 << order; + m_map[map_key(block)].order = order; + return block; +} + +void +heap_allocator::register_free_block(free_header *block, unsigned order) +{ + // The lock needs to be held while calling register_free_block + + block_info &info = m_map[map_key(block)]; + info.free = true; + info.order = order; + + block->clear(order); + block->next = get_free(order); + get_free(order) = block; + +} + +bool +heap_allocator::split_off(unsigned order, free_header *&block) +{ + // The lock needs to be held while calling split_off + + const unsigned next = order + 1; + if (next > max_order) { + block = nullptr; + return false; + } + + block = pop_free(next); + if (!block && !split_off(next, block)) + return false; + + block->order = order; + free_header *buddy = block->buddy(); + register_free_block(block->buddy(), order); + m_map[map_key(block)].order = order; + return true; +} diff --git a/src/kernel/heap_allocator.h b/src/kernel/heap_allocator.h index 6837a6e..456922c 100644 --- a/src/kernel/heap_allocator.h +++ b/src/kernel/heap_allocator.h @@ -5,7 +5,7 @@ #include #include - +#include /// Allocator for a given heap range class heap_allocator @@ -15,9 +15,10 @@ public: heap_allocator(); /// Constructor. The given memory area must already have been reserved. - /// \arg start Starting address of the heap - /// \arg size Size of the heap in bytes - heap_allocator(uintptr_t start, size_t size); + /// \arg start Starting address of the heap + /// \arg size Size of the heap in bytes + /// \arg heapmap Starting address of the heap tracking map + heap_allocator(uintptr_t start, size_t size, uintptr_t heapmap); /// Allocate memory from the area managed. /// \arg length The amount of memory to allocate, in bytes @@ -30,34 +31,74 @@ public: void free(void *p); /// Minimum block size is (2^min_order). Must be at least 6. - static const unsigned min_order = 6; + static const unsigned min_order = 6; // 2^6 == 64 B - /// Maximum block size is (2^max_order). Must be less than 64. - static const unsigned max_order = 22; + /// Maximum block size is (2^max_order). Must be less than 32 + min_order. + static const unsigned max_order = 22; // 2^22 == 4 MiB protected: - class mem_header; + struct free_header; + struct block_info + { + uint32_t offset; + uint8_t order; + bool free; + }; + friend uint32_t & get_map_key(block_info &info); - /// Ensure there is a block of a given order, recursively splitting - /// \arg order Order (2^N) of the block we want - void ensure_block(unsigned order); + inline uint32_t map_key(void *p) const { + return static_cast( + (reinterpret_cast(p) - m_start) >> min_order); + } + + using block_map = util::inplace_map; + + /// Get the largest block size order that aligns with this address + inline unsigned address_order(uintptr_t addr) { + unsigned tz = __builtin_ctzll(addr); + return tz > max_order ? max_order : tz; + } /// Helper accessor for the list of blocks of a given order /// \arg order Order (2^N) of the block we want /// \returns A mutable reference to the head of the list - mem_header *& get_free(unsigned order) { return m_free[order - min_order]; } + free_header *& get_free(unsigned order) { return m_free[order - min_order]; } - /// Helper to get a block of the given order, growing if necessary + /// Helper to remove and return the first block in the free + /// list for the given order. + free_header * pop_free(unsigned order); + + /// Merge the given block with any currently free buddies to + /// create the largest block possible. + /// \arg block The current block + /// \returns The fully-merged block + free_header * merge_block(free_header *block); + + /// Create a new block of the given order past the end of the existing + /// heap. The block will be marked as non-free. + /// \arg order The requested size order + /// \returns A pointer to the block's memory + void * new_block(unsigned order); + + /// Register the given block as free with the given order. + /// \arg block The newly-created or freed block + /// \arg order The size order to set on the block + void register_free_block(free_header *block, unsigned order); + + /// Helper to get a block of the given order by splitting existing + /// larger blocks. Returns false if there were no larger blocks. /// \arg order Order (2^N) of the block we want - /// \returns A detached block of the given order - mem_header * pop_free(unsigned order); + /// \arg block [out] Receives a pointer to the requested block + /// \returns True if a split was done + bool split_off(unsigned order, free_header *&block); uintptr_t m_start, m_end; - size_t m_blocks; - mem_header *m_free[max_order - min_order + 1]; + size_t m_maxsize; + free_header *m_free[max_order - min_order + 1]; size_t m_allocated_size; util::spinlock m_lock; + block_map m_map; heap_allocator(const heap_allocator &) = delete; }; diff --git a/src/kernel/memory_bootstrap.cpp b/src/kernel/memory_bootstrap.cpp index 393bf85..4533e58 100644 --- a/src/kernel/memory_bootstrap.cpp +++ b/src/kernel/memory_bootstrap.cpp @@ -37,6 +37,9 @@ frame_allocator &g_frame_allocator = __g_frame_allocator_storage.value; static util::no_construct __g_kernel_heap_area_storage; obj::vm_area_untracked &g_kernel_heap_area = __g_kernel_heap_area_storage.value; +static util::no_construct __g_kernel_heapmap_area_storage; +obj::vm_area_untracked &g_kernel_heapmap_area = __g_kernel_heapmap_area_storage.value; + static util::no_construct __g_kernel_stacks_storage; obj::vm_area_guarded &g_kernel_stacks = __g_kernel_stacks_storage.value; @@ -73,7 +76,6 @@ memory_initialize_pre_ctors(bootproto::args &kargs) page_table *kpml4 = static_cast(kargs.pml4); - new (&g_kernel_heap) heap_allocator {mem::heap_offset, mem::heap_size}; frame_block *blocks = reinterpret_cast(mem::bitmap_offset); new (&g_frame_allocator) frame_allocator {blocks, kargs.frame_blocks.count}; @@ -87,13 +89,20 @@ memory_initialize_pre_ctors(bootproto::args &kargs) reg = reg->next; } + obj::process *kp = obj::process::create_kernel_process(kpml4); vm_space &vm = kp->space(); obj::vm_area *heap = new (&g_kernel_heap_area) obj::vm_area_untracked(mem::heap_size, vm_flags::write); + obj::vm_area *heap_map = new (&g_kernel_heapmap_area) + obj::vm_area_untracked(mem::heapmap_size, vm_flags::write); + vm.add(mem::heap_offset, heap); + vm.add(mem::heapmap_offset, heap_map); + + new (&g_kernel_heap) heap_allocator {mem::heap_offset, mem::heap_size, mem::heapmap_offset}; obj::vm_area *stacks = new (&g_kernel_stacks) obj::vm_area_guarded { mem::stacks_offset, diff --git a/src/libraries/util/util.module b/src/libraries/util/util.module index f11e648..9da9cf7 100644 --- a/src/libraries/util/util.module +++ b/src/libraries/util/util.module @@ -20,6 +20,7 @@ module("util", "util/map.h", "util/misc.h", "util/no_construct.h", + "util/node_map.h", "util/pointers.h", "util/spinlock.h", "util/util.h", diff --git a/src/libraries/util/util/hash.h b/src/libraries/util/util/hash.h index 50a2fc6..7b8921f 100644 --- a/src/libraries/util/util/hash.h +++ b/src/libraries/util/util/hash.h @@ -59,13 +59,30 @@ constexpr inline typename types::sized_uint::type hash_fold(typename types::s return (value2 >> types::sized_uint::bits) ^ (value2 & types::sized_uint::mask); } +inline uint64_t splitmix64(uint64_t v) { + // From splitmix64, http://xorshift.di.unimi.it/splitmix64.c + v = (v ^ (v >> 30)) * 0xbf58476d1ce4e5b9ull; + v = (v ^ (v >> 27)) * 0x94d049bb133111ebull; + v = v ^ (v >> 31); + return v; +} + +inline uint32_t inthash32(uint32_t v) { + // From the H2 database's integer hash + // https://github.com/h2database/h2database + v = ((v >> 16) ^ v) * 0x45d9f3b; + v = ((v >> 16) ^ v) * 0x45d9f3b; + v = (v >> 16) ^ v; + return v; +} + template inline uint64_t hash(const T &v) { return fnv1a::hash64(reinterpret_cast(&v), sizeof(T)); } -template <> inline uint64_t hash (const uint8_t &i) { return i; } -template <> inline uint64_t hash(const uint16_t &i) { return i; } -template <> inline uint64_t hash(const uint32_t &i) { return i; } -template <> inline uint64_t hash(const uint64_t &i) { return i; } +template <> inline uint64_t hash (const uint8_t &i) { return splitmix64(i); } +template <> inline uint64_t hash(const uint16_t &i) { return splitmix64(i); } +template <> inline uint64_t hash(const uint32_t &i) { return splitmix64(i); } +template <> inline uint64_t hash(const uint64_t &i) { return splitmix64(i); } template <> inline uint64_t hash(const char * const &s) { return fnv1a::hash64_string(s); } } // namespace util diff --git a/src/libraries/util/util/node_map.h b/src/libraries/util/util/node_map.h new file mode 100644 index 0000000..3ae4c0e --- /dev/null +++ b/src/libraries/util/util/node_map.h @@ -0,0 +1,235 @@ +#pragma once +/// \file node_map.h +/// Definition of a hash table collection for use in kernel space, where the values +/// are the hash nodes themselves - the hash key is part of the value. +/// +/// Thanks to the following people for inspiration of this implementation: +/// +/// Sebastian Sylvan +/// https://www.sebastiansylvan.com/post/robin-hood-hashing-should-be-your-default-hash-table-implementation/ +/// +/// Emmanuel Goossaert +/// http://codecapsule.com/2013/11/11/robin-hood-hashing/ +/// http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/ + +#include +#include +#include +#include + +namespace util { + +using growth_func = void*(*)(void*, size_t, size_t); + +inline void * default_realloc(void *p, size_t oldsize, size_t newsize) { + char *newp = new char[newsize]; + memcpy(newp, p, oldsize); + delete [] reinterpret_cast(p); + return newp; +} + +inline void * null_realloc(void *p, size_t oldsize, size_t newsize) { return p; } + +/// Hash map where the values are the hash nodes themselves. (ie, the +/// hash key is a part of the value.) Growth is done with realloc, so +/// the map tries to grow in-place if it can. +template +class node_map +{ +public: + using key_type = K; + using node_type = V; + + static constexpr size_t max_load = 90; + static constexpr size_t min_capacity = 8; + + inline size_t count() const { return m_count; } + inline size_t capacity() const { return m_capacity; } + inline size_t threshold() const { return (m_capacity * max_load) / 100; } + + /// Default constructor. Creates an empty map with the given capacity. + node_map(size_t capacity = 0) : + m_count {0}, + m_capacity {0}, + m_nodes {nullptr} + { + if (capacity) { + m_capacity = 1 << log2(capacity); + m_nodes = reinterpret_cast( + realloc(nullptr, 0, m_capacity * sizeof(node_type))); + for (size_t i = 0; i < m_capacity; ++i) + get_map_key(m_nodes[i]) = invalid_id; + } + } + + /// Existing buffer constructor. Uses the given buffer as initial + /// capacity. + node_map(node_type *buffer, size_t capacity) : + m_count {0}, + m_capacity {capacity}, + m_nodes {buffer} + { + for (size_t i = 0; i < m_capacity; ++i) + get_map_key(m_nodes[i]) = invalid_id; + } + + virtual ~node_map() { + for (size_t i = 0; i < m_capacity; ++i) + m_nodes[i].~node_type(); + delete [] reinterpret_cast(m_nodes); + } + + node_type & operator[](const key_type &key) { + size_t slot; + if (lookup(key, slot)) + return m_nodes[slot]; + + node_type new_node; + get_map_key(new_node) = key; + return insert(std::move(new_node)); + } + + node_type * find(const key_type &key) { + size_t slot; + if (!lookup(key, slot)) + return nullptr; + return &m_nodes[slot]; + } + + const node_type * find(const key_type &key) const { + size_t slot; + if (!lookup(key, slot)) + return false; + return &m_nodes[slot]; + } + + node_type & insert(node_type&& node) { + if (++m_count > threshold()) grow(); + + key_type &key = get_map_key(node); + size_t slot = mod(hash(key)); + size_t dist = 0; + + while (true) { + node_type &node_at_slot = m_nodes[slot]; + key_type &key_at_slot = get_map_key(node_at_slot); + + if (open(key_at_slot)) { + node_at_slot = node; + return node_at_slot; + } + + size_t psl_at_slot = psl(key_at_slot, slot); + if (dist > psl_at_slot) { + std::swap(node, node_at_slot); + dist = psl_at_slot; + } + + slot = mod(slot + 1); + ++dist; + } + } + + bool erase(const key_type &key) { + size_t slot; + if (!lookup(key, slot)) + return false; + + node_type &node = m_nodes[slot]; + node.~node_type(); + get_map_key(node) = invalid_id; + --m_count; + + while (fixup(slot++)); + return true; + } + +protected: + inline size_t mod(size_t slot) const { return slot & (m_capacity - 1); } + inline bool open(const key_type &key) const { return key == invalid_id; } + + inline size_t psl(const key_type &key, size_t slot) const { + return mod(slot + m_capacity - mod(hash(key))); + } + + bool fixup(size_t slot) { + size_t next_slot = mod(slot+1); + node_type &next = m_nodes[next_slot]; + key_type &next_key = get_map_key(next); + + if (open(next_key) || psl(next_key, next_slot) == 0) + return false; + + m_nodes[slot] = std::move(next); + next.~node_type(); + next_key = invalid_id; + return true; + } + + void grow() { + node_type *old_nodes = m_nodes; + size_t old_capacity = m_capacity; + size_t new_capacity = m_capacity * 2; + + if (new_capacity < min_capacity) + new_capacity = min_capacity; + + m_nodes = reinterpret_cast( + realloc(m_nodes, old_capacity * sizeof(node_type), + new_capacity * sizeof(node_type))); + + for (size_t i = old_capacity; i < new_capacity; ++i) + get_map_key(m_nodes[i]) = invalid_id; + + m_capacity = new_capacity; + + for (size_t slot = 0; slot < old_capacity; ++slot) { + node_type &node = m_nodes[slot]; + key_type &key = get_map_key(node); + size_t target = mod(hash(key)); + + if (open(key) || target < old_capacity) + continue; + + --m_count; + insert(std::move(node)); + node.~node_type(); + key = invalid_id; + + size_t fixer = slot; + while (fixup(fixer++) && fixer < old_capacity); + } + } + + const bool lookup(const key_type &key, size_t &slot) const { + if (!m_count) + return false; + + size_t dist = 0; + slot = mod(hash(key)); + + while (true) { + key_type &key_at_slot = get_map_key(m_nodes[slot]); + + if (key_at_slot == key) + return true; + + if (open(key_at_slot) || dist > psl(key_at_slot, slot)) + return false; + + slot = mod(slot + 1); + ++dist; + } + } + +private: + size_t m_count; + size_t m_capacity; + node_type *m_nodes; +}; + +template +using inplace_map = node_map; + +} // namespace util diff --git a/src/user/test_runner/tests/constexpr_hash.cpp b/src/user/test_runner/tests/constexpr_hash.cpp index fedec81..4c92692 100644 --- a/src/user/test_runner/tests/constexpr_hash.cpp +++ b/src/user/test_runner/tests/constexpr_hash.cpp @@ -1,6 +1,10 @@ #include #include "test_case.h" +constexpr static uint64_t hash1_expected = 0x3d8342e701016873; +constexpr static uint64_t hash3_expected = 0xf0ac589d837f11b8; +constexpr static uint64_t hash4_expected = 0x034742bc87c5c1bc; + class hash_tests : public test::fixture { @@ -8,19 +12,17 @@ class hash_tests : TEST_CASE( hash_tests, equality_test64 ) { - const auto hash1 = static_cast("hash1!"_id); - CHECK( hash1 == 210, "hash gave unexpected value"); + const uint64_t hash1 = "hash1!"_id; + const uint64_t hash2 = "hash1!"_id; + const uint64_t hash3 = "not hash1!"_id; + const uint64_t hash4 = "another thing that's longer"_id; - const auto hash2 = static_cast("hash1!"_id); - CHECK(hash1 == hash2, "hashes of equal strings should be equal"); - - const auto hash3 = static_cast("not hash1!"_id); - CHECK(hash1 != hash3, "hashes of different strings should not be equal"); - CHECK(hash3 == 37, "hash gave unexpected value"); - - const auto hash4 = static_cast("another thing that's longer"_id); - CHECK(hash1 != hash4, "hashes of different strings should not be equal"); - CHECK(hash4 == 212, "hash gave unexpected value"); + CHECK( hash1 == hash1_expected, "hash gave unexpected value"); + CHECK( hash1 == hash2, "hashes of equal strings should be equal"); + CHECK( hash1 != hash3, "hashes of different strings should not be equal"); + CHECK( hash3 == hash3_expected, "hash gave unexpected value"); + CHECK( hash1 != hash4, "hashes of different strings should not be equal"); + CHECK( hash4 == hash4_expected, "hash gave unexpected value"); } TEST_CASE( hash_tests, equality_test8 ) diff --git a/src/user/test_runner/tests/map.cpp b/src/user/test_runner/tests/map.cpp index 509d5b1..576eb91 100644 --- a/src/user/test_runner/tests/map.cpp +++ b/src/user/test_runner/tests/map.cpp @@ -1,5 +1,6 @@ #include #include +#include #include "test_case.h" #include "test_rng.h" @@ -9,9 +10,59 @@ struct map_tests : { }; +struct map_item +{ + uint64_t key; + uint64_t value; +}; + +uint64_t & get_map_key(map_item &mi) { return mi.key; } + +TEST_CASE( map_tests, node_map ) +{ + util::node_map map; + map.insert({12, 14}); + map.insert({13, 15}); + map.insert({14, 16}); + map.insert({15, 17}); + map.insert({16, 18}); + map.insert({20, 22}); + map.insert({24, 26}); + + CHECK( map.count() == 7, "Map returned incorred count()" ); + + auto *item = map.find(12); + CHECK( item, "Did not find inserted item" ); + CHECK( item && item->key == 12 && item->value == 14, + "Found incorrect item" ); + + item = map.find(40); + CHECK( !item, "Found non-inserted item" ); + + bool found = map.erase(12); + CHECK( found, "Failed to delete inserted item" ); + + item = map.find(12); + CHECK( !item, "Found item after delete" ); + + // Force the map to grow + map.insert({35, 38}); + map.insert({36, 39}); + map.insert({37, 40}); + + CHECK( map.count() == 9, "Map returned incorred count()" ); + + item = map.find(13); + CHECK( item, "Did not find inserted item after grow()" ); + CHECK( item && item->key == 13 && item->value == 15, + "Found incorrect item after grow()" ); +} + TEST_CASE( map_tests, insert ) { test::rng rng {12345}; + double foo = 1.02345; + foo *= 4.6e4; std::vector ints; for (int i = 0; i < 1000; ++i)