Skip to content

Commit

Permalink
Merge pull request #573 from evoskuil/master
Browse files Browse the repository at this point in the history
Add arraymap.reset(), remove dead code, arraymap index guard.
  • Loading branch information
evoskuil authored Feb 25, 2025
2 parents 6b5e24a + 02910ee commit d62bb47
Show file tree
Hide file tree
Showing 12 changed files with 151 additions and 245 deletions.
45 changes: 25 additions & 20 deletions include/bitcoin/database/impl/primitives/arrayhead.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ CLASS::arrayhead(storage& head, const Link& buckets) NOEXCEPT
}

TEMPLATE
size_t CLASS::size() const NOEXCEPT
inline size_t CLASS::size() const NOEXCEPT
{
return file_.size();
}

TEMPLATE
size_t CLASS::buckets() const NOEXCEPT
inline size_t CLASS::buckets() const NOEXCEPT
{
const auto count = position_to_link(size()).value;
BC_ASSERT(count < Link::terminal);
Expand All @@ -55,8 +55,9 @@ bool CLASS::enabled() const NOEXCEPT
TEMPLATE
inline Link CLASS::index(size_t key) const NOEXCEPT
{
if (key >= buckets())
return {};
// buckets a table lock via file.size().
////if (key >= buckets()) return {};
////BC_ASSERT_MSG(key < buckets(), "index overflow");

// Put index does not validate, allowing for head expansion.
return putter_index(key);
Expand All @@ -70,28 +71,32 @@ inline Link CLASS::putter_index(size_t key) const NOEXCEPT
}

TEMPLATE
bool CLASS::create() NOEXCEPT
bool CLASS::clear() NOEXCEPT
{
if (is_nonzero(file_.size()))
const auto ptr = file_.get();
if (!ptr)
return false;

const auto allocation = link_to_position(initial_buckets_);
const auto start = file_.allocate(allocation);
// Retains head size, since head is array not map, and resets body logical
// count to zero, which is picked up in arraymap::reset(). Body file size
// remains unchanged and subject to initialization size at each startup. So
// there is no reduction until restart, which can include config change.
std::fill_n(ptr->data(), size(), system::bit_all<uint8_t>);
return set_body_count(zero);
}

// Guards addition overflow in manager_.get (start must be valid).
if (start == storage::eof)
TEMPLATE
bool CLASS::create() NOEXCEPT
{
if (is_nonzero(size()))
return false;

const auto ptr = file_.get(start);
if (!ptr)
// Guards addition overflow in manager_.get (start must be valid).
if (file_.allocate(link_to_position(initial_buckets_)) == storage::eof)
return false;

BC_ASSERT_MSG(verify(), "unexpected body size");

// std::memset/fill_n have identical performance (on win32).
////std::memset(ptr->data(), system::bit_all<uint8_t>, size());
std::fill_n(ptr->data(), size(), system::bit_all<uint8_t>);
return set_body_count(zero);
BC_ASSERT_MSG(verify(), "unexpected head size");
return clear();
}

TEMPLATE
Expand All @@ -104,7 +109,7 @@ TEMPLATE
bool CLASS::get_body_count(Link& count) const NOEXCEPT
{
const auto ptr = file_.get();
if (!ptr || Link::size > file_.size())
if (!ptr || Link::size > size())
return false;

count = array_cast<Link::size>(ptr->data());
Expand All @@ -115,7 +120,7 @@ TEMPLATE
bool CLASS::set_body_count(const Link& count) NOEXCEPT
{
const auto ptr = file_.get();
if (!ptr || Link::size > file_.size())
if (!ptr || Link::size > size())
return false;

array_cast<Link::size>(ptr->data()) = count;
Expand Down
14 changes: 8 additions & 6 deletions include/bitcoin/database/impl/primitives/arraymap.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,14 @@ bool CLASS::close() NOEXCEPT
return head_.set_body_count(body_.count());
}

TEMPLATE
bool CLASS::reset() NOEXCEPT
{
Link count{};
return head_.clear() && head_.get_body_count(count) &&
body_.truncate(count);
}

TEMPLATE
bool CLASS::backup() NOEXCEPT
{
Expand Down Expand Up @@ -128,12 +136,6 @@ code CLASS::reload() NOEXCEPT
// query interface
// ----------------------------------------------------------------------------

TEMPLATE
inline bool CLASS::exists(size_t key) const NOEXCEPT
{
return !at(key).is_terminal();
}

TEMPLATE
inline Link CLASS::at(size_t key) const NOEXCEPT
{
Expand Down
25 changes: 8 additions & 17 deletions include/bitcoin/database/impl/primitives/hashhead.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ CLASS::hashhead(storage& head, const Link& buckets) NOEXCEPT
}

TEMPLATE
size_t CLASS::size() const NOEXCEPT
inline size_t CLASS::size() const NOEXCEPT
{
return link_to_position(buckets_);
}

TEMPLATE
size_t CLASS::buckets() const NOEXCEPT
inline size_t CLASS::buckets() const NOEXCEPT
{
return buckets_;
}
Expand All @@ -64,7 +64,7 @@ bool CLASS::create() NOEXCEPT
if (!ptr)
return false;

BC_ASSERT_MSG(verify(), "unexpected body size");
BC_ASSERT_MSG(verify(), "unexpected head size");

// std::memset/fill_n have identical performance (on win32).
////std::memset(ptr->data(), system::bit_all<uint8_t>, allocation);
Expand Down Expand Up @@ -105,20 +105,11 @@ inline Link CLASS::index(const Key& key) const NOEXCEPT
{
BC_ASSERT_MSG(is_nonzero(buckets_), "hash table requires buckets");

// TODO: for greater flexibility, inject hash function through template.
if constexpr (Hash)
{
// djb2_hash exhibits very poor uniqueness result for sequential keys.
return system::djb2_hash(key) % buckets_;
}
else
{
// unique_hash assumes sufficient uniqueness in low order key bytes.
return system::unique_hash(key) % buckets_;

// TODO: restrict buckets to power of two and replace modulo above with
// return and(sub1(buckets), unique_hash(key)) [and() is much faster].
}
// unique_hash assumes sufficient uniqueness in low order key bytes.
return system::unique_hash(key) % buckets_;

// TODO: restrict buckets to power of two and replace modulo above with
// return and(sub1(buckets), unique_hash(key)) [and() is much faster].
}

TEMPLATE
Expand Down
9 changes: 6 additions & 3 deletions include/bitcoin/database/primitives/arrayhead.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,23 +41,26 @@ class arrayhead
arrayhead(storage& head, const Link& buckets) NOEXCEPT;

/// Sizing is dynamic (thread safe).
size_t size() const NOEXCEPT;
size_t buckets() const NOEXCEPT;
inline size_t size() const NOEXCEPT;
inline size_t buckets() const NOEXCEPT;

/// Configure initial buckets to zero to disable the table.
bool enabled() const NOEXCEPT;

/// Create from empty head file (not thread safe).
bool create() NOEXCEPT;

/// Clear the existing index of all links.
bool clear() NOEXCEPT;

/// False if head file size incorrect (not thread safe).
bool verify() const NOEXCEPT;

/// Unsafe if verify false (not thread safe).
bool get_body_count(Link& count) const NOEXCEPT;
bool set_body_count(const Link& count) NOEXCEPT;

/// Convert natural key to head bucket index (validated).
/// Convert natural key to head bucket index (unvalidated).
inline Link index(size_t key) const NOEXCEPT;

/// Convert natural key to head bucket index (unvalidated).
Expand Down
8 changes: 3 additions & 5 deletions include/bitcoin/database/primitives/arraymap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class arraymap

bool create() NOEXCEPT;
bool close() NOEXCEPT;
bool reset() NOEXCEPT;
bool backup() NOEXCEPT;
bool restore() NOEXCEPT;
bool verify() const NOEXCEPT;
Expand Down Expand Up @@ -87,13 +88,10 @@ class arraymap
/// Query interface, iterator is not thread safe.
/// -----------------------------------------------------------------------

/// True if an instance of object with key exists.
inline bool exists(size_t key) const NOEXCEPT;

/// Return element link at key or terminal if not found/error.
/// Return element link at key, terminal if not found/error (unverified).
inline Link at(size_t key) const NOEXCEPT;

/// Get first element matching the search key, false if not found/error.
/// Get first element matching key, false if not found/error (unverified).
template <typename Element, if_equal<Element::size, Size> = true>
inline bool at(size_t key, Element& element) const NOEXCEPT;

Expand Down
10 changes: 5 additions & 5 deletions include/bitcoin/database/primitives/hashhead.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
namespace libbitcoin {
namespace database {

template <typename Link, typename Key, bool Hash>
template <typename Link, typename Key>
class hashhead
{
public:
Expand All @@ -39,8 +39,8 @@ class hashhead
hashhead(storage& head, const Link& buckets) NOEXCEPT;

/// Sizing (thread safe).
size_t size() const NOEXCEPT;
size_t buckets() const NOEXCEPT;
inline size_t size() const NOEXCEPT;
inline size_t buckets() const NOEXCEPT;

/// Create from empty head file (not thread safe).
bool create() NOEXCEPT;
Expand Down Expand Up @@ -86,8 +86,8 @@ class hashhead
} // namespace database
} // namespace libbitcoin

#define TEMPLATE template <typename Link, typename Key, bool Hash>
#define CLASS hashhead<Link, Key, Hash>
#define TEMPLATE template <typename Link, typename Key>
#define CLASS hashhead<Link, Key>

#include <bitcoin/database/impl/primitives/hashhead.ipp>

Expand Down
10 changes: 5 additions & 5 deletions include/bitcoin/database/primitives/hashmap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ namespace database {
/// Readers and writers are always prepositioned at data, and are limited to
/// the extent the record/slab size is known (limit can always be removed).
/// Streams are always initialized from first element byte up to file limit.
template <typename Link, typename Key, size_t Size, bool Hash>
template <typename Link, typename Key, size_t Size>
class hashmap
{
public:
Expand Down Expand Up @@ -196,7 +196,7 @@ class hashmap
static constexpr auto key_size = array_count<Key>;
static constexpr auto index_size = Link::size + key_size;

using head = database::hashhead<Link, Key, Hash>;
using head = database::hashhead<Link, Key>;
using body = database::manager<Link, Key, Size>;

// Thread safe (index/top/push).
Expand All @@ -209,13 +209,13 @@ class hashmap

template <typename Element>
using hash_map = hashmap<linkage<Element::pk>, system::data_array<Element::sk>,
Element::size, Element::hash_function>;
Element::size>;

} // namespace database
} // namespace libbitcoin

#define TEMPLATE template <typename Link, typename Key, size_t Size, bool Hash>
#define CLASS hashmap<Link, Key, Size, Hash>
#define TEMPLATE template <typename Link, typename Key, size_t Size>
#define CLASS hashmap<Link, Key, Size>

#include <bitcoin/database/impl/primitives/hashmap.ipp>

Expand Down
9 changes: 0 additions & 9 deletions include/bitcoin/database/tables/schema.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ namespace schema
// record hashmap
struct header
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::block;
static constexpr size_t sk = schema::hash;
static constexpr size_t minsize =
Expand All @@ -157,7 +156,6 @@ namespace schema
// record hashmap
struct transaction
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::tx;
static constexpr size_t sk = schema::hash;
static constexpr size_t minsize =
Expand Down Expand Up @@ -209,7 +207,6 @@ namespace schema
// record multimap
struct point
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::ins_;
static constexpr size_t sk = schema::hash + schema::index;
static constexpr size_t minsize = zero;
Expand Down Expand Up @@ -252,7 +249,6 @@ namespace schema
// slab hashmap
struct txs
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::txs_;
static constexpr size_t sk = schema::header::pk;
static constexpr size_t minsize =
Expand Down Expand Up @@ -286,7 +282,6 @@ namespace schema
// address record count is output count.
struct address
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::outs_;
static constexpr size_t sk = schema::hash;
static constexpr size_t minsize = schema::put;
Expand All @@ -300,7 +295,6 @@ namespace schema
// record hashmap
struct strong_tx
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::tx;
static constexpr size_t sk = schema::transaction::pk;
static constexpr size_t minsize =
Expand Down Expand Up @@ -333,7 +327,6 @@ namespace schema
// slab hashmap
struct validated_bk
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::bk_slab;
static constexpr size_t sk = schema::header::pk;
static constexpr size_t minsize =
Expand All @@ -348,7 +341,6 @@ namespace schema
// slab modest (sk:4) multimap, with low multiple rate.
struct validated_tx
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::tx_slab;
static constexpr size_t sk = schema::transaction::pk;
static constexpr size_t minsize =
Expand All @@ -368,7 +360,6 @@ namespace schema
// slab hashmap
struct neutrino
{
static constexpr bool hash_function = false;
static constexpr size_t pk = schema::neutrino_;
static constexpr size_t sk = schema::header::pk;
static constexpr size_t minsize =
Expand Down
21 changes: 20 additions & 1 deletion test/primitives/arrayhead.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ BOOST_AUTO_TEST_CASE(arrayhead__get_body_count__created__zero)
BOOST_REQUIRE_EQUAL(count, zero);
}

BOOST_AUTO_TEST_CASE(arrayhead__set_body_count__get__expected)
BOOST_AUTO_TEST_CASE(arrayhead__set_body_count__get_body_count__expected)
{
data_chunk data;
test::chunk_storage store{ data };
Expand All @@ -103,6 +103,25 @@ BOOST_AUTO_TEST_CASE(arrayhead__set_body_count__get__expected)
BOOST_REQUIRE_EQUAL(count, expected);
}

BOOST_AUTO_TEST_CASE(arrayhead__clear__get_body_count__zero)
{
data_chunk data;
test::chunk_storage store{ data };
test_header head{ store, buckets };
BOOST_REQUIRE(head.create());

constexpr auto expected = 42u;
BOOST_REQUIRE(head.set_body_count(expected));

link count{};
BOOST_REQUIRE(head.get_body_count(count));
BOOST_REQUIRE_EQUAL(count, expected);

BOOST_REQUIRE(head.clear());
BOOST_REQUIRE(head.get_body_count(count));
BOOST_REQUIRE_EQUAL(count, zero);
}

BOOST_AUTO_TEST_CASE(arrayhead__at__key__terminal)
{
test::chunk_storage store;
Expand Down
Loading

0 comments on commit d62bb47

Please sign in to comment.