Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions include/bitcoin/database/impl/primitives/hashmap.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -555,16 +555,12 @@ bool CLASS::write(Link& previous, const memory_ptr& ptr, const Link& link,
return false;

// Commit element to search (terminal is a valid bucket index).
bool collision{};
if (!head_.push(collision, link, next, key))
bool search{};
if (!head_.push(search, link, next, key))
return false;

// If filter collision set previous stack head for conflict resolution.
if (collision)
previous = next;
else
previous = Link::terminal;

// If collision set previous stack head for conflict resolution search.
previous = search ? Link{ next } : Link{};
return true;
}

Expand Down
81 changes: 50 additions & 31 deletions include/bitcoin/database/impl/query/archive_write.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,11 @@ TEMPLATE
code CLASS::set_code(const tx_link& tx_fk, const transaction& tx) NOEXCEPT
{
// This is the only multitable write query (except initialize/genesis).
using namespace system;

if (tx.is_empty())
return error::tx_empty;

using namespace system;
using ix = linkage<schema::index>;
const auto& ins = tx.inputs_ptr();
const auto& ous = tx.outputs_ptr();
Expand All @@ -105,24 +106,31 @@ code CLASS::set_code(const tx_link& tx_fk, const transaction& tx) NOEXCEPT
// ========================================================================
const auto scope = store_.get_transactor();

// If dirty we must guard against duplicates.
const auto dirty = store_.is_dirty();

// Allocate contiguously and store inputs.
input_link in_fk{};
if (!store_.input.put_link(in_fk, table::input::put_ref{ {}, tx }))
if (!store_.input.put_link(in_fk,
table::input::put_ref{ {}, tx }))
return error::tx_input_put;

// Allocate contiguously and store outputs.
output_link out_fk{};
if (!store_.output.put_link(out_fk, table::output::put_ref{ {}, tx_fk, tx }))
if (!store_.output.put_link(out_fk,
table::output::put_ref{ {}, tx_fk, tx }))
return error::tx_output_put;

// Allocate and contiguously store input links.
ins_link ins_fk{};
if (!store_.ins.put_link(ins_fk, table::ins::put_ref{ {}, in_fk, tx_fk, tx }))
if (!store_.ins.put_link(ins_fk,
table::ins::put_ref{ {}, in_fk, tx_fk, tx }))
return error::tx_ins_put;

// Allocate and contiguously store output links.
outs_link outs_fk{};
if (!store_.outs.put_link(outs_fk, table::outs::put_ref{ {}, out_fk, tx }))
if (!store_.outs.put_link(outs_fk,
table::outs::put_ref{ {}, out_fk, tx }))
return error::tx_outs_put;

// Create tx record.
Expand All @@ -148,41 +156,54 @@ code CLASS::set_code(const tx_link& tx_fk, const transaction& tx) NOEXCEPT
return error::tx_point_allocate;

for (const auto& in: *ins)
if (!store_.point.put(ins_fk++, in->point(), table::point::record{}))
if (!store_.point.put(ins_fk++, in->point(),
table::point::record{}))
return error::tx_null_point_put;
}
else
{
// Expand synchronizes keys with ins_fk, entries dropped into same offset.
// Expand synchronizes keys with ins_fk, entries set into same offset.
// Allocate contiguous points (at sequential keys matching ins_fk).
if (!store_.point.expand(ins_fk + inputs))
return error::tx_point_allocate;

// Collect duplicates to store in duplicate table.
std::vector<chain::point> twins{};
auto ptr = store_.point.get_memory();

// This must be set after tx.set and before tx.commit, since searchable and
// produces an association to tx.link, and is also an integral part of tx.
for (const auto& in: *ins)
// Must be set after tx.set and before tx.commit, since searchable and
// produces association to tx.link, and is also an integral part of tx.
if (dirty)
{
bool duplicate{};
if (!store_.point.put(duplicate, ptr, ins_fk++, in->point(),
table::point::record{}))
return error::tx_point_put;

if (duplicate)
twins.push_back(in->point());
// Collect duplicates to store in duplicate table.
std::vector<chain::point> twins{};
auto ptr = store_.point.get_memory();
for (const auto& in: *ins)
{
bool duplicate{};
if (!store_.point.put(duplicate, ptr, ins_fk++, in->point(),
table::point::record{}))
return error::tx_point_put;

if (duplicate)
twins.push_back(in->point());
}

ptr.reset();

// As few duplicates are expected, duplicate domain is only 2^16.
// Return of tx_duplicate_put implies link domain has overflowed.
for (const auto& twin: twins)
if (!store_.duplicate.exists(twin))
if (!store_.duplicate.put(twin, table::duplicate::record{}))
return error::tx_duplicate_put;
}
else
{
auto ptr = store_.point.get_memory();
for (const auto& in: *ins)
if (!store_.point.put(ptr, ins_fk++, in->point(),
table::point::record{}))
return error::tx_point_put;

ptr.reset();

// As few duplicates are expected, the duplicate domain is only 2^16.
// Return of tx_duplicate_put implies that the link domain has overflowed.
for (const auto& twin: twins)
if (!store_.duplicate.exists(twin))
if (!store_.duplicate.put(twin, table::duplicate::record{}))
return error::tx_duplicate_put;
ptr.reset();
}
}

// Commit address index records (hashmap).
Expand Down Expand Up @@ -361,8 +382,6 @@ code CLASS::set_code(const block& block, const header_link& key,

code ec{};
auto fk = tx_fks;

// Each tx is set under a distinct transactor.
for (const auto& tx: *block.transactions_ptr())
if ((ec = set_code(fk++, *tx)))
return ec;
Expand Down
10 changes: 5 additions & 5 deletions include/bitcoin/database/impl/query/optional.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ inline code CLASS::parallel_address_transform(std::atomic_bool& cancel,
std::atomic_bool fail{};
std::vector<outpoint> outpoints(links.size());
std::transform(parallel, links.begin(), links.end(), outpoints.begin(),
[&functor, &fail](const auto& link) NOEXCEPT
[&functor, &cancel, &fail](const auto& link) NOEXCEPT
{
return functor(link, fail);
return functor(link, cancel, fail);
});

out.clear();
Expand Down Expand Up @@ -96,7 +96,7 @@ code CLASS::get_address_outputs_turbo(std::atomic_bool& cancel, outpoints& out,
return ec;

return parallel_address_transform(cancel, out, links,
[this, &cancel](const auto& link, auto& fail) NOEXCEPT
[this](const auto& link, auto& cancel, auto& fail) NOEXCEPT
{
if (cancel || fail) return outpoint{};
auto outpoint = get_spent(link);
Expand Down Expand Up @@ -139,7 +139,7 @@ code CLASS::get_confirmed_unspent_outputs_turbo(std::atomic_bool& cancel,
return ec;

return parallel_address_transform(cancel, out, links,
[this, &cancel](const auto& link, auto& fail) NOEXCEPT
[this](const auto& link, auto& cancel, auto& fail) NOEXCEPT
{
if (cancel || fail || !is_confirmed_unspent(link))
return outpoint{};
Expand Down Expand Up @@ -185,7 +185,7 @@ code CLASS::get_minimum_unspent_outputs_turbo(std::atomic_bool& cancel,
return ec;

return parallel_address_transform(cancel, out, links,
[this, &cancel, minimum](const auto& link, auto& fail) NOEXCEPT
[this, minimum](const auto& link, auto& cancel, auto& fail) NOEXCEPT
{
if (cancel || fail || !is_confirmed_unspent(link))
return outpoint{};
Expand Down
12 changes: 11 additions & 1 deletion include/bitcoin/database/impl/store.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <bitcoin/database/boost.hpp>
#include <bitcoin/database/define.hpp>
#include <bitcoin/database/file/file.hpp>
#include <bitcoin/database/tables/schema.hpp>

// TODO: evaluate performance benefits of concurrency.

Expand Down Expand Up @@ -542,7 +543,7 @@ code CLASS::reload(const event_handler& handler) NOEXCEPT
}

code ec{ error::success };
const auto reload = [&handler](code& ec, auto& storage,
const auto reload = [&handler, this](code& ec, auto& storage,
table_t table) NOEXCEPT
{
if (!ec)
Expand All @@ -552,6 +553,7 @@ code CLASS::reload(const event_handler& handler) NOEXCEPT
{
handler(event_t::load_file, table);
ec = storage.reload();
this->dirty_ = true;
}
}
};
Expand Down Expand Up @@ -763,6 +765,8 @@ code CLASS::open_load(const event_handler& handler) NOEXCEPT
load(ec, filter_tx_head_, table_t::filter_tx_head);
load(ec, filter_tx_body_, table_t::filter_tx_body);

// create, open, and restore each invoke open_load.
dirty_ = header_body_.size() > schema::header::minrow;
return ec;
}

Expand Down Expand Up @@ -1149,6 +1153,12 @@ const typename CLASS::transactor CLASS::get_transactor() NOEXCEPT
return transactor{ transactor_mutex_ };
}

TEMPLATE
bool CLASS::is_dirty() const NOEXCEPT
{
return dirty_;
}

TEMPLATE
code CLASS::get_fault() const NOEXCEPT
{
Expand Down
4 changes: 4 additions & 0 deletions include/bitcoin/database/store.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ class store
/// Get a transactor object.
const transactor get_transactor() NOEXCEPT;

/// Determine if the store is non-empty/initialized.
bool is_dirty() const NOEXCEPT;

/// Get first fault code or error::success.
code get_fault() const NOEXCEPT;

Expand Down Expand Up @@ -229,6 +232,7 @@ class store
flush_lock flush_lock_;
interprocess_lock process_lock_;
std::shared_timed_mutex transactor_mutex_{};
bool dirty_{ true };

private:
using path = std::filesystem::path;
Expand Down
64 changes: 63 additions & 1 deletion test/store.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "mocks/blocks.hpp"
#include "mocks/map_store.hpp"

// these are the slow tests (mmap)
// these include the slow tests (mmap)

BOOST_FIXTURE_TEST_SUITE(store_tests, test::directory_setup_fixture)

Expand All @@ -45,6 +45,68 @@ BOOST_AUTO_TEST_CASE(store__construct__default_configuration__referenced)
BOOST_REQUIRE_EQUAL(&instance.configuration(), &configuration);
}

BOOST_AUTO_TEST_CASE(store__is_dirty__uninitialized__true)
{
const settings configuration{};
store<map> instance{ configuration };
BOOST_REQUIRE(instance.is_dirty());
}

BOOST_AUTO_TEST_CASE(store__is_dirty__initialized__true)
{
settings configuration{};
configuration.path = TEST_DIRECTORY;
store<map> instance{ configuration };
query<store<map>> query_{ instance };
BOOST_REQUIRE(!instance.create(events));
BOOST_REQUIRE(query_.initialize(test::genesis));
BOOST_REQUIRE(!instance.is_dirty());
BOOST_REQUIRE(!instance.close(events));
}

BOOST_AUTO_TEST_CASE(store__is_dirty__open__false)
{
settings configuration{};
configuration.path = TEST_DIRECTORY;
store<map> instance{ configuration };
query<store<map>> query_{ instance };
BOOST_REQUIRE(!instance.create(events));
BOOST_REQUIRE(query_.initialize(test::genesis));
BOOST_REQUIRE(!instance.is_dirty());
BOOST_REQUIRE(!instance.close(events));
}

BOOST_AUTO_TEST_CASE(store__is_dirty__open_add_header__false)
{
settings configuration{};
configuration.path = TEST_DIRECTORY;
store<map> instance{ configuration };
query<store<map>> query_{ instance };
BOOST_REQUIRE(!instance.create(events));
BOOST_REQUIRE(query_.initialize(test::genesis));
BOOST_REQUIRE(query_.set(system::chain::header{}, context{}, false));
BOOST_REQUIRE(!instance.is_dirty());
BOOST_REQUIRE(!instance.close(events));
}

BOOST_AUTO_TEST_CASE(store__is_dirty__open_with_two_headers__true)
{
settings configuration{};
configuration.path = TEST_DIRECTORY;

store<map> instance1{ configuration };
query<store<map>> query1_{ instance1 };
BOOST_REQUIRE(!instance1.create(events));
BOOST_REQUIRE(query1_.initialize(test::genesis));
BOOST_REQUIRE(query1_.set(system::chain::header{}, context{}, false));
BOOST_REQUIRE(!instance1.close(events));

store<map> instance2{ configuration };
BOOST_REQUIRE(!instance1.open(events));
BOOST_REQUIRE(instance2.is_dirty());
BOOST_REQUIRE(!instance1.close(events));
}

BOOST_AUTO_TEST_CASE(store__paths__default_configuration__expected)
{
const settings configuration{};
Expand Down
Loading