class Chronometer
+{
+public:
+ Chronometer() = default;
+ ~Chronometer() = default;
+
+ void start(const P phase, const std::uint16_t iteration, const mx::util::core_set &core_set)
+ {
+ _current_phase = phase;
+ _current_iteration = iteration;
+ _core_set = core_set;
+
+ _perf.start();
+ _start = std::chrono::steady_clock::now();
+ }
+
+ InterimResult stop(const std::uint64_t count_operations)
+ {
+ const auto end = std::chrono::steady_clock::now();
+ _perf.stop();
+
+ const auto milliseconds = std::chrono::duration_cast(end - _start);
+
+ return {count_operations,
+ _current_phase,
+ _current_iteration,
+ _core_set.size(),
+ milliseconds,
+ _perf.counter(),
+ statistic_map(mx::tasking::profiling::Statistic::Executed),
+ statistic_map(mx::tasking::profiling::Statistic::ExecutedReader),
+ statistic_map(mx::tasking::profiling::Statistic::ExecutedWriter),
+ statistic_map(mx::tasking::profiling::Statistic::Scheduled),
+ statistic_map(mx::tasking::profiling::Statistic::ScheduledOnChannel),
+ statistic_map(mx::tasking::profiling::Statistic::ScheduledOffChannel),
+ statistic_map(mx::tasking::profiling::Statistic::Fill)};
+ }
+
+ void add(PerfCounter &performance_counter) { _perf.add(performance_counter); }
+
+private:
+ std::uint16_t _current_iteration{0U};
+ P _current_phase;
+ mx::util::core_set _core_set;
+ alignas(64) Perf _perf;
+ alignas(64) std::chrono::steady_clock::time_point _start;
+
+ std::unordered_map statistic_map(
+ const mx::tasking::profiling::Statistic::Counter counter)
+ {
+ std::unordered_map statistics;
+ for (auto i = 0U; i < mx::tasking::runtime::channels(); ++i)
+ {
+ statistics[i] = mx::tasking::runtime::statistic(counter, i);
+ }
+ return statistics;
+ }
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/cores.cpp b/repos/mml/src/app/blinktree/benchmark/cores.cpp
new file mode 100644
index 0000000000..ef700cfd64
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/cores.cpp
@@ -0,0 +1,100 @@
+#include "cores.h"
+#include
+#include
+#include
+
+using namespace benchmark;
+
+Cores::Cores(const std::uint16_t min_cores, const std::uint16_t max_cores, const std::uint16_t steps,
+ const mx::util::core_set::Order order)
+{
+ this->add_for_range(min_cores, max_cores, steps, order);
+}
+
+Cores::Cores(const std::string &cores, const std::uint16_t steps, const mx::util::core_set::Order order)
+{
+ const std::regex single_core_regex("(\\d+)$");
+ const std::regex from_core_regex("(\\d+):$");
+ const std::regex core_range_regex("(\\d+):(\\d+)");
+
+ std::stringstream stream(cores);
+ std::string token;
+ while (std::getline(stream, token, ';'))
+ {
+ std::smatch match;
+
+ if (std::regex_match(token, match, single_core_regex))
+ {
+ const auto core = std::stoi(match[1].str());
+ this->add_for_range(core, core, steps, order);
+ }
+ else if (std::regex_match(token, match, from_core_regex))
+ {
+ this->add_for_range(std::stoi(match[1].str()), mx::system::topology::count_cores(), steps, order);
+ }
+ else if (std::regex_match(token, match, core_range_regex))
+ {
+ this->add_for_range(std::stoi(match[1].str()), std::stoi(match[2].str()), steps, order);
+ }
+ }
+}
+
+void Cores::add_for_range(const std::uint16_t min_cores, const std::uint16_t max_cores, const std::uint16_t steps,
+ const mx::util::core_set::Order order)
+{
+ if (min_cores == 0U || min_cores == max_cores)
+ {
+ this->_core_sets.push_back(mx::util::core_set::build(max_cores, order));
+ }
+ else
+ {
+ auto cores = min_cores;
+ if (cores % steps != 0U)
+ {
+ this->_core_sets.push_back(mx::util::core_set::build(cores, order));
+ cores++;
+ }
+
+ for (auto count_cores = cores; count_cores <= max_cores; count_cores++)
+ {
+ if (count_cores % steps == 0U)
+ {
+ this->_core_sets.push_back(mx::util::core_set::build(count_cores, order));
+ }
+ }
+
+ if (max_cores % steps != 0U)
+ {
+ this->_core_sets.push_back(mx::util::core_set::build(max_cores, order));
+ }
+ }
+}
+
+std::string Cores::dump(const std::uint8_t indent) const
+{
+ std::stringstream stream;
+
+ for (auto i = 0U; i < this->_core_sets.size(); ++i)
+ {
+ if (i > 0U)
+ {
+ stream << "\n";
+ }
+ const auto &core_set = this->_core_sets[i];
+ if (indent > 0U)
+ {
+ stream << std::string(indent, ' ');
+ }
+ stream << core_set.size() << ": " << core_set;
+ }
+ stream << std::flush;
+
+ return stream.str();
+}
+
+namespace benchmark {
+std::ostream &operator<<(std::ostream &stream, const Cores &cores)
+{
+ return stream << cores.dump(0U) << std::endl;
+}
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/cores.h b/repos/mml/src/app/blinktree/benchmark/cores.h
new file mode 100644
index 0000000000..30d3eb532f
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/cores.h
@@ -0,0 +1,53 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace benchmark {
+/**
+ * Set of core_sets used for a benchmark that should be performed over
+ * different core counts to benchmark scalability.
+ * Can be created from min and max cores (i.e. 1 core to 32 cores) or from
+ * string identifying the cores (i.e. "1:32").
+ */
+class Cores
+{
+ friend std::ostream &operator<<(std::ostream &stream, const Cores &cores);
+
+public:
+ Cores(std::uint16_t min_cores, std::uint16_t max_cores, std::uint16_t steps, mx::util::core_set::Order order);
+ Cores(const std::string &cores, std::uint16_t steps, mx::util::core_set::Order order);
+ Cores(Cores &&) noexcept = default;
+
+ ~Cores() = default;
+
+ const mx::util::core_set &next()
+ {
+ const auto current_index = _current_index++;
+ if (current_index < _core_sets.size())
+ {
+ return _core_sets[current_index];
+ }
+
+ return _empty_core_set;
+ }
+
+ [[nodiscard]] const mx::util::core_set ¤t() const noexcept { return _core_sets[_current_index - 1]; }
+ [[nodiscard]] std::size_t size() const noexcept { return _core_sets.size(); }
+
+ void reset() { _current_index = 0U; }
+
+ [[nodiscard]] std::string dump(std::uint8_t indent) const;
+
+private:
+ std::vector _core_sets;
+ std::uint16_t _current_index = 0U;
+ const mx::util::core_set _empty_core_set;
+
+ void add_for_range(std::uint16_t min_cores, std::uint16_t max_cores, std::uint16_t steps,
+ mx::util::core_set::Order order);
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/perf.cpp b/repos/mml/src/app/blinktree/benchmark/perf.cpp
new file mode 100644
index 0000000000..366e671854
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/perf.cpp
@@ -0,0 +1,71 @@
+#include "perf.h"
+
+using namespace benchmark;
+
+/**
+ * Counter "Instructions Retired"
+ * Counts when the last uop of an instruction retires.
+ */
+[[maybe_unused]] PerfCounter Perf::INSTRUCTIONS = {"instr", PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS};
+
+/**
+ */
+[[maybe_unused]] PerfCounter Perf::CYCLES = {"cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES};
+
+/**
+ */
+[[maybe_unused]] PerfCounter Perf::L1_MISSES = {"l1-miss", PERF_TYPE_HW_CACHE,
+ PERF_COUNT_HW_CACHE_L1D | (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)};
+
+/**
+ * Counter "LLC Misses"
+ * Accesses to the LLC in which the data is not present(miss).
+ */
+[[maybe_unused]] PerfCounter Perf::LLC_MISSES = {"llc-miss", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES};
+
+/**
+ * Counter "LLC Reference"
+ * Accesses to the LLC, in which the data is present(hit) or not present(miss)
+ */
+[[maybe_unused]] PerfCounter Perf::LLC_REFERENCES = {"llc-ref", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES};
+
+/**
+ * Micro architecture "Skylake"
+ * Counter "CYCLE_ACTIVITY.STALLS_MEM_ANY"
+ * EventSel=A3H,UMask=14H, CMask=20
+ * Execution stalls while memory subsystem has an outstanding load.
+ */
+PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3};
+
+/**
+ * Micro architecture "Skylake"
+ * Counter "SW_PREFETCH_ACCESS.NTA"
+ * EventSel=32H,UMask=01H
+ * Number of PREFETCHNTA instructions executed.
+ */
+[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_NTA = {"sw-prefetch-nta", PERF_TYPE_RAW, 0x530132};
+
+/**
+ * Micro architecture "Skylake"
+ * Counter "SW_PREFETCH_ACCESS.T0"
+ * EventSel=32H,UMask=02H
+ * Number of PREFETCHT0 instructions executed.
+ */
+[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T0 = {"sw-prefetch-t0", PERF_TYPE_RAW, 0x530232};
+
+/**
+ * Micro architecture "Skylake"
+ * Counter "SW_PREFETCH_ACCESS.T1_T2"
+ * EventSel=32H,UMask=04H
+ * Number of PREFETCHT1 or PREFETCHT2 instructions executed.
+ */
+[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T1_T2 = {"sw-prefetch-t1t2", PERF_TYPE_RAW, 0x530432};
+
+/**
+ * Micro architecture "Skylake"
+ * Counter "SW_PREFETCH_ACCESS.PREFETCHW"
+ * EventSel=32H,UMask=08H
+ * Number of PREFETCHW instructions executed.
+ */
+[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_WRITE = {"sw-prefetch-w", PERF_TYPE_RAW, 0x530832};
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/perf.h b/repos/mml/src/app/blinktree/benchmark/perf.h
new file mode 100644
index 0000000000..9925d5f964
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/perf.h
@@ -0,0 +1,157 @@
+#pragma once
+#include
+//#include
+#include
+//#include // TODO: Find Genode equivalent
+#include
+#include
+#include
+#include
+
+/*
+ * For more Performance Counter take a look into the Manual from Intel:
+ * https://software.intel.com/sites/default/files/managed/8b/6e/335279_performance_monitoring_events_guide.pdf
+ *
+ * To get event ids from manual specification see libpfm4:
+ * http://www.bnikolic.co.uk/blog/hpc-prof-events.html
+ * Clone, Make, use examples/check_events to generate event id code from event:
+ * ./check_events :[:c=]
+ * Example:
+ * ./cycle_activity:0x14:c=20
+ */
+
+namespace benchmark {
+
+/**
+ * Represents a Linux Performance Counter.
+ */
+class PerfCounter
+{
+public:
+ PerfCounter(std::string &&name, const std::uint64_t type, const std::uint64_t event_id) : _name(std::move(name))
+ {
+ /*std::memset(&_perf_event_attribute, 0, sizeof(perf_event_attr));
+ _perf_event_attribute.type = type;
+ _perf_event_attribute.size = sizeof(perf_event_attr);
+ _perf_event_attribute.config = event_id;
+ _perf_event_attribute.disabled = true;
+ _perf_event_attribute.inherit = 1;
+ _perf_event_attribute.exclude_kernel = false;
+ _perf_event_attribute.exclude_hv = false;
+ _perf_event_attribute.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;*/
+ }
+
+ ~PerfCounter() = default;
+
+ bool open()
+ {
+ /*_file_descriptor = syscall(__NR_perf_event_open, &_perf_event_attribute, 0, -1, -1, 0);*/
+ return _file_descriptor >= 0;
+ }
+
+ bool start()
+ {
+ //ioctl(_file_descriptor, PERF_EVENT_IOC_RESET, 0);
+ //ioctl(_file_descriptor, PERF_EVENT_IOC_ENABLE, 0);
+ return ::read(_file_descriptor, &_prev, sizeof(read_format)) == sizeof(read_format);
+ }
+
+ bool stop()
+ {
+ //const auto is_read = ::read(_file_descriptor, &_data, sizeof(read_format)) == sizeof(read_format);
+ //ioctl(_file_descriptor, PERF_EVENT_IOC_DISABLE, 0);
+ return false; // is_read;
+ }
+
+ [[nodiscard]] double read() const
+ {
+ const auto multiplexing_correction = static_cast(_data.time_enabled - _prev.time_enabled) /
+ static_cast(_data.time_running - _prev.time_running);
+ return static_cast(_data.value - _prev.value) * multiplexing_correction;
+ }
+
+ [[nodiscard]] const std::string &name() const { return _name; }
+ explicit operator const std::string &() const { return name(); }
+
+ bool operator==(const std::string &name) const { return _name == name; }
+
+private:
+ struct read_format
+ {
+ std::uint64_t value = 0;
+ std::uint64_t time_enabled = 0;
+ std::uint64_t time_running = 0;
+ };
+
+ const std::string _name;
+ std::int32_t _file_descriptor = -1;
+ //perf_event_attr _perf_event_attribute{};
+ read_format _prev{};
+ read_format _data{};
+};
+
+/**
+ * Holds a set of performance counter and starts/stops them together.
+ */
+class Perf
+{
+public:
+ [[maybe_unused]] static PerfCounter INSTRUCTIONS;
+ [[maybe_unused]] static PerfCounter CYCLES;
+ [[maybe_unused]] static PerfCounter L1_MISSES;
+ [[maybe_unused]] [[maybe_unused]] static PerfCounter LLC_MISSES;
+ [[maybe_unused]] static PerfCounter LLC_REFERENCES;
+ [[maybe_unused]] static PerfCounter STALLED_CYCLES_BACKEND;
+ [[maybe_unused]] static PerfCounter STALLS_MEM_ANY;
+ [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_NTA;
+ [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T0;
+ [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T1_T2;
+ [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_WRITE;
+
+ Perf() noexcept = default;
+ ~Perf() noexcept = default;
+
+ bool add(PerfCounter &counter_)
+ {
+ if (counter_.open())
+ {
+ _counter.push_back(counter_);
+ return true;
+ }
+
+ return false;
+ }
+
+ void start()
+ {
+ for (auto &counter_ : _counter)
+ {
+ counter_.start();
+ }
+ }
+
+ void stop()
+ {
+ for (auto &counter_ : _counter)
+ {
+ counter_.stop();
+ }
+ }
+
+ double operator[](const std::string &name) const
+ {
+ auto counter_iterator = std::find(_counter.begin(), _counter.end(), name);
+ if (counter_iterator != _counter.end())
+ {
+ return counter_iterator->read();
+ }
+
+ return 0.0;
+ }
+
+ std::vector &counter() { return _counter; }
+
+private:
+ std::vector _counter;
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/phase.h b/repos/mml/src/app/blinktree/benchmark/phase.h
new file mode 100644
index 0000000000..7c5a93671a
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/phase.h
@@ -0,0 +1,9 @@
+#pragma once
+#include
+namespace benchmark {
+enum class phase : std::uint8_t
+{
+ FILL = 0U,
+ MIXED = 1U
+};
+}
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/string_util.cpp b/repos/mml/src/app/blinktree/benchmark/string_util.cpp
new file mode 100644
index 0000000000..a08c54c610
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/string_util.cpp
@@ -0,0 +1,15 @@
+#include "string_util.h"
+#include
+
+using namespace benchmark;
+
+void string_util::split(const std::string &text, const char delimiter,
+ const std::function &callback)
+{
+ std::stringstream stream(text);
+ std::string token;
+ while (std::getline(stream, token, delimiter))
+ {
+ callback(token);
+ }
+}
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/string_util.h b/repos/mml/src/app/blinktree/benchmark/string_util.h
new file mode 100644
index 0000000000..eaa64fe3dc
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/string_util.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include
+#include
+
+namespace benchmark {
+class string_util
+{
+public:
+ static void split(const std::string &text, char delimiter,
+ const std::function &callback);
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/workload.cpp b/repos/mml/src/app/blinktree/benchmark/workload.cpp
new file mode 100644
index 0000000000..3d98355f77
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/workload.cpp
@@ -0,0 +1,20 @@
+#include "workload.h"
+#include
+
+using namespace benchmark;
+
+std::pair Workload::next(const std::uint64_t count) noexcept
+{
+ const auto index = this->_current_index.fetch_add(count, std::memory_order_relaxed);
+ const auto workload_size = this->_workload_set[this->_current_phase].size();
+
+ return index < workload_size ? std::make_pair(index, std::min(count, workload_size - index))
+ : std::make_pair(std::numeric_limits::max(), 0UL);
+}
+
+namespace benchmark {
+std::ostream &operator<<(std::ostream &stream, const Workload &workload)
+{
+ return stream << workload._workload_set << std::flush;
+}
+} // namespace benchmark
diff --git a/repos/mml/src/app/blinktree/benchmark/workload.h b/repos/mml/src/app/blinktree/benchmark/workload.h
new file mode 100644
index 0000000000..d790a18c1c
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/workload.h
@@ -0,0 +1,58 @@
+#pragma once
+
+#include "phase.h"
+#include "workload_set.h"
+#include
+#include
+#include
+#include
+
+namespace benchmark {
+class Workload
+{
+ friend std::ostream &operator<<(std::ostream &stream, const Workload &workload);
+
+public:
+ Workload() noexcept = default;
+ ~Workload() noexcept = default;
+
+ [[maybe_unused]] void build(const std::string &fill_workload_file, const std::string &mixed_workload_file)
+ {
+ _workload_set.build(fill_workload_file, mixed_workload_file);
+ }
+
+ [[maybe_unused]] void build(const std::uint64_t fill_inserts, const std::uint64_t mixed_inserts,
+ const std::uint64_t mixed_lookups, const std::uint64_t mixed_updates,
+ const std::uint64_t mixed_deletes)
+ {
+ _workload_set.build(fill_inserts, mixed_inserts, mixed_lookups, mixed_updates, mixed_deletes);
+ }
+
+ [[maybe_unused]] void shuffle() { _workload_set.shuffle(); }
+
+ std::pair next(std::uint64_t count) noexcept;
+
+ [[nodiscard]] std::uint64_t size() const noexcept { return _workload_set[_current_phase].size(); }
+ [[nodiscard]] bool empty() const noexcept { return _workload_set[_current_phase].empty(); }
+ [[nodiscard]] bool empty(const phase phase) const noexcept { return _workload_set[phase].empty(); }
+
+ void reset(const phase phase) noexcept
+ {
+ _current_phase = phase;
+ _current_index = 0;
+ }
+
+ const NumericTuple &operator[](const std::size_t index) const noexcept
+ {
+ return _workload_set[_current_phase][index];
+ }
+ bool operator==(const phase phase) const noexcept { return _current_phase == phase; }
+ explicit operator phase() const noexcept { return _current_phase; }
+
+private:
+ NumericWorkloadSet _workload_set;
+ phase _current_phase = phase::FILL;
+
+ alignas(64) std::atomic_uint64_t _current_index{0U};
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/benchmark/workload_set.cpp b/repos/mml/src/app/blinktree/benchmark/workload_set.cpp
new file mode 100644
index 0000000000..fbb5413011
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/workload_set.cpp
@@ -0,0 +1,168 @@
+#include "workload_set.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+using namespace benchmark;
+
+void NumericWorkloadSet::build(const std::string &fill_workload_file, const std::string &mixed_workload_file)
+{
+ auto parse = [](auto &file_stream, std::vector &data_set) -> bool {
+ std::srand(1337);
+ std::string op_name;
+ std::uint64_t key{};
+
+ bool contains_update = false;
+
+ while (file_stream >> op_name >> key)
+ {
+ if (op_name == "INSERT")
+ {
+ contains_update = true;
+ data_set.emplace_back(NumericTuple{NumericTuple::INSERT, key, std::rand()});
+ }
+ else if (op_name == "READ")
+ {
+ data_set.emplace_back(NumericTuple{NumericTuple::LOOKUP, key});
+ }
+ else if (op_name == "UPDATE")
+ {
+ contains_update = true;
+ data_set.emplace_back(NumericTuple{NumericTuple::UPDATE, key, std::rand()});
+ }
+ }
+
+ return contains_update;
+ };
+
+ std::mutex out_mutex;
+ std::thread fill_thread{[this, &out_mutex, &parse, &fill_workload_file]() {
+ std::ifstream fill_file(fill_workload_file);
+ if (fill_file.good())
+ {
+ parse(fill_file, this->_data_sets[static_cast(phase::FILL)]);
+ }
+ else
+ {
+ std::lock_guard lock{out_mutex};
+ std::cerr << "Could not open workload file '" << fill_workload_file << "'." << std::endl;
+ }
+ }};
+
+ std::thread mixed_thread{[this, &out_mutex, &parse, &mixed_workload_file]() {
+ std::ifstream mixed_file(mixed_workload_file);
+ if (mixed_file.good())
+ {
+ this->_mixed_phase_contains_update =
+ parse(mixed_file, this->_data_sets[static_cast(phase::MIXED)]);
+ }
+ else
+ {
+ std::lock_guard lock{out_mutex};
+ std::cerr << "Could not open workload file '" << mixed_workload_file << "'." << std::endl;
+ }
+ }};
+
+ fill_thread.join();
+ mixed_thread.join();
+}
+
+void NumericWorkloadSet::build(const std::uint64_t fill_inserts, const std::uint64_t mixed_inserts,
+ const std::uint64_t mixed_lookups, const std::uint64_t mixed_updates,
+ const std::uint64_t mixed_deletes)
+{
+ std::srand(1337);
+ this->_data_sets[static_cast(phase::FILL)].reserve(fill_inserts);
+ this->_data_sets[static_cast(phase::MIXED)].reserve(mixed_inserts + mixed_lookups + mixed_updates +
+ mixed_deletes);
+
+ for (auto i = 0U; i < fill_inserts; ++i)
+ {
+ this->_data_sets[static_cast(phase::FILL)].emplace_back(
+ NumericTuple{NumericTuple::INSERT, i + 1U, std::rand()});
+ }
+
+ this->_mixed_phase_contains_update = mixed_inserts > 0U || mixed_deletes > 0U || mixed_updates > 0U;
+
+ for (auto i = fill_inserts; i < fill_inserts + mixed_inserts; ++i)
+ {
+ this->_data_sets[static_cast(phase::MIXED)].emplace_back(
+ NumericTuple{NumericTuple::INSERT, i + 1U, std::rand()});
+ }
+
+ for (auto i = 0U; i < mixed_lookups; ++i)
+ {
+ this->_data_sets[static_cast(phase::MIXED)].push_back(
+ {NumericTuple::LOOKUP, this->_data_sets[static_cast(phase::FILL)][i % fill_inserts].key()});
+ }
+
+ for (auto i = 0U; i < mixed_updates; ++i)
+ {
+ this->_data_sets[static_cast(phase::MIXED)].push_back(
+ {NumericTuple::UPDATE, this->_data_sets[static_cast(phase::FILL)][i % fill_inserts].key(),
+ std::rand()});
+ }
+
+ for (auto i = 0U; i < mixed_deletes; ++i)
+ {
+ this->_data_sets[static_cast(phase::MIXED)].push_back(
+ {NumericTuple::DELETE, this->_data_sets[static_cast(phase::FILL)][i % fill_inserts].key()});
+ }
+}
+
+void NumericWorkloadSet::shuffle()
+{
+ std::srand(1337U + 42U);
+ std::random_device random_device;
+ std::mt19937 mersenne_twister_engine(random_device());
+
+ std::shuffle(this->_data_sets[static_cast(phase::FILL)].begin(),
+ this->_data_sets[static_cast(phase::FILL)].end(), mersenne_twister_engine);
+ std::shuffle(this->_data_sets[static_cast(phase::MIXED)].begin(),
+ this->_data_sets[static_cast(phase::MIXED)].end(), mersenne_twister_engine);
+}
+
+std::ostream &NumericWorkloadSet::nice_print(std::ostream &stream, const std::size_t number) noexcept
+{
+ if (number >= 1000000U)
+ {
+ return stream << (number / 1000000U) << "m";
+ }
+
+ if (number >= 1000U)
+ {
+ return stream << (number / 1000U) << "k";
+ }
+
+ return stream << number;
+}
+
+namespace benchmark {
+std::ostream &operator<<(std::ostream &stream, const NumericWorkloadSet &workload)
+{
+ const auto has_fill_and_mixed = workload[phase::FILL].empty() == false && workload[phase::MIXED].empty() == false;
+
+ if (workload[phase::FILL].empty() == false)
+ {
+ stream << "fill: ";
+ NumericWorkloadSet::nice_print(stream, workload[phase::FILL].size());
+ }
+
+ if (has_fill_and_mixed)
+ {
+ stream << " / ";
+ }
+
+ if (workload[phase::MIXED].empty() == false)
+ {
+ stream << (workload._mixed_phase_contains_update ? "mixed: " : "read-only: ");
+ NumericWorkloadSet::nice_print(stream, workload[phase::MIXED].size());
+ }
+
+ return stream << std::flush;
+}
+} // namespace benchmark
diff --git a/repos/mml/src/app/blinktree/benchmark/workload_set.h b/repos/mml/src/app/blinktree/benchmark/workload_set.h
new file mode 100644
index 0000000000..c64dbd0d8b
--- /dev/null
+++ b/repos/mml/src/app/blinktree/benchmark/workload_set.h
@@ -0,0 +1,74 @@
+#pragma once
+
+#include "phase.h"
+#include
+#include
+#include
+#include
+#include
+
+namespace benchmark {
+class NumericTuple
+{
+public:
+ enum Type
+ {
+ INSERT,
+ LOOKUP,
+ UPDATE,
+ DELETE
+ };
+
+ constexpr NumericTuple(const Type type, const std::uint64_t key) : _type(type), _key(key) {}
+ constexpr NumericTuple(const Type type, const std::uint64_t key, const std::int64_t value)
+ : _type(type), _key(key), _value(value)
+ {
+ }
+
+ NumericTuple(NumericTuple &&) noexcept = default;
+ NumericTuple(const NumericTuple &) = default;
+
+ ~NumericTuple() = default;
+
+ NumericTuple &operator=(NumericTuple &&) noexcept = default;
+
+ [[nodiscard]] std::uint64_t key() const { return _key; };
+ [[nodiscard]] std::int64_t value() const { return _value; }
+
+ bool operator==(const Type type) const { return _type == type; }
+
+private:
+ Type _type;
+ std::uint64_t _key;
+ std::int64_t _value = 0;
+};
+
+class NumericWorkloadSet
+{
+ friend std::ostream &operator<<(std::ostream &stream, const NumericWorkloadSet &workload_set);
+
+public:
+ NumericWorkloadSet() = default;
+ ~NumericWorkloadSet() = default;
+
+ void build(const std::string &fill_workload_file, const std::string &mixed_workload_file);
+ void build(std::uint64_t fill_inserts, std::uint64_t mixed_inserts, std::uint64_t mixed_lookups,
+ std::uint64_t mixed_updates, std::uint64_t mixed_deletes);
+ void shuffle();
+
+ [[nodiscard]] const std::vector &fill() const noexcept { return _data_sets[0]; }
+ [[nodiscard]] const std::vector &mixed() const noexcept { return _data_sets[1]; }
+ const std::vector &operator[](const phase phase) const noexcept
+ {
+ return _data_sets[static_cast(phase)];
+ }
+
+ explicit operator bool() const { return fill().empty() == false || mixed().empty() == false; }
+
+private:
+ std::array, 2> _data_sets;
+ bool _mixed_phase_contains_update = false;
+
+ static std::ostream &nice_print(std::ostream &stream, std::size_t number) noexcept;
+};
+} // namespace benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/README.md b/repos/mml/src/app/blinktree/blinktree_benchmark/README.md
new file mode 100644
index 0000000000..06f1770410
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/README.md
@@ -0,0 +1,79 @@
+# BLinkTree Benchmark
+The BLinkTree-benchmark stores `8` byte numeric keys and values.
+Call `./bin/blinktree_benchmark -h` for help and parameters.
+
+## How to generate YCSB workload
+* Workload specifications are done by files in `workloads_specification/`.
+* Call `make ycsb-a` and `make ycsb-c` to generate workloads **A** and **C**.
+* Workload files are stored in `workloads/`
+* Use `./bin/blinktree_benchmark -f ` to pass the desired workload.
+* Default (if not specified) is `-f workloads/fill_randint_workloada workloads/mixed_randint_workloada`.
+
+## Important CLI arguments
+* The first argument is the number of cores:
+ * `./bin/blinktree_benchmark 1` for using a single core.
+ * `./bin/blinktree_benchmark 1:24` for using cores `1` up to `24`.
+* `-i ` specifies the number of repetitions of each workload.
+* `-s ` steps of the cores:
+ * `-s 1` will increase the used cores by one (core ids: `0,1,2,3,4,5,6,7,..,23`).
+ * `-s 2` will skip every second core (core ids: `0,1,3,5,7,..23`).
+* `-pd ` specifies the prefetch distance.
+* `-p` or `--perf` will activate performance counter (result will be printed to console and output file).
+* `--latched` will enable latches for synchronization (default off).
+* `--exclusive` forces the tasks to access tree nodes exclusively (e.g. by using spinlocks or core-based sequencing) (default off).
+* `--sync4me` will use built-in synchronization selection to choose the matching primitive based on annotations.
+* `-o ` will write the results in **json** format to the given file.
+
+## Understanding the output
+After started, the benchmark will print a summary of configured cores and workload:
+
+ core configuration:
+ 1: 0
+ 2: 0 1
+ 4: 0 1 2 3
+ workload: fill: 5m / readonly: 5m
+
+Here, we configured the benchmark to use one to four cores; each line of the core configuration displays the number of cores and the core identifiers.
+
+Following, the benchmark will be started and print the results for every iteration:
+
+ 1 1 0 1478 ms 3.38295e+06 op/s
+ 1 1 1 1237 ms 4.04204e+06 op/s
+ 2 1 0 964 ms 5.18672e+06 op/s
+ 2 1 1 675 ms 7.40741e+06 op/s
+ 4 1 0 935 ms 5.34759e+06 op/s
+ 4 1 1 532 ms 9.3985e+06 op/s
+
+* The first column is the number of used cores.
+* The second column displays the iteration of the benchmark (configured by `-i X`).
+* Thirdly, the phase-identifier will be printed: `0` for initialization phase (which will be only inserts) and `1` for the workload phase (which is read-only here).
+* After that, the time and throughput are written.
+* If `--perf` is enabled, the output will be extended by some perf counters, which are labeled (like throughput).
+
+## Plot the results
+When using `-o FILE`, the results will be written to the given file, using `JSON` format.
+The plot script `scripts/plot_blinktree_benchmark INPUT_FILE [INPUT_FILE ...]` will aggregate and plot the results using one or more of those `JSON` files.
+
+## Examples
+
+###### Running workload A using optimistic synchronization
+
+ ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o optimistic.json
+
+###### Running workload A using best matching synchronization
+
+ ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --sync4me -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o sync4me.json
+
+###### Running workload A using reader/writer-locks
+
+ ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o rwlocked.json
+
+###### Running workload A using core-based sequencing
+
+ ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o core-sequenced.json
+
+###### Running workload A using spin-locks
+
+ ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o spinlocked.json
+
+
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp
new file mode 100644
index 0000000000..24117ba79f
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp
@@ -0,0 +1,199 @@
+#include "benchmark.h"
+#include
+#include
+#include
+#include
+#include
+
+using namespace application::blinktree_benchmark;
+
+Benchmark::Benchmark(benchmark::Cores &&cores, const std::uint16_t iterations, std::string &&fill_workload_file,
+ std::string &&mixed_workload_file, const bool use_performance_counter,
+ const mx::synchronization::isolation_level node_isolation_level,
+ const mx::synchronization::protocol preferred_synchronization_method,
+ const bool print_tree_statistics, const bool check_tree, std::string &&result_file_name,
+ std::string &&statistic_file_name, std::string &&tree_file_name, const bool profile)
+ : _cores(std::move(cores)), _iterations(iterations), _node_isolation_level(node_isolation_level),
+ _preferred_synchronization_method(preferred_synchronization_method),
+ _print_tree_statistics(print_tree_statistics), _check_tree(check_tree),
+ _result_file_name(std::move(result_file_name)), _statistic_file_name(std::move(statistic_file_name)),
+ _tree_file_name(std::move(tree_file_name)), _profile(profile)
+{
+ if (use_performance_counter)
+ {
+ this->_chronometer.add(benchmark::Perf::CYCLES);
+ this->_chronometer.add(benchmark::Perf::INSTRUCTIONS);
+ this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY);
+ this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA);
+ this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE);
+ }
+
+ std::cout << "core configuration: \n" << this->_cores.dump(2) << std::endl;
+
+ this->_workload.build(fill_workload_file, mixed_workload_file);
+ if (this->_workload.empty(benchmark::phase::FILL) && this->_workload.empty(benchmark::phase::MIXED))
+ {
+ std::exit(1);
+ }
+
+ std::cout << "workload: " << this->_workload << "\n" << std::endl;
+}
+
+void Benchmark::start()
+{
+ // Reset tree.
+ if (this->_tree == nullptr)
+ {
+ this->_tree = std::make_unique>(
+ this->_node_isolation_level, this->_preferred_synchronization_method);
+ }
+
+ // Reset request scheduler.
+ if (this->_request_scheduler.empty() == false)
+ {
+ this->_request_scheduler.clear();
+ }
+
+ // Create one request scheduler per core.
+ for (auto core_index = 0U; core_index < this->_cores.current().size(); core_index++)
+ {
+ const auto channel_id = core_index;
+ auto *request_scheduler = mx::tasking::runtime::new_task(
+ 0U, core_index, channel_id, this->_workload, this->_cores.current(), this->_tree.get(), this);
+ mx::tasking::runtime::spawn(*request_scheduler, 0U);
+ this->_request_scheduler.push_back(request_scheduler);
+ }
+ this->_open_requests = this->_request_scheduler.size();
+
+ // Start measurement.
+ if (this->_profile)
+ {
+ mx::tasking::runtime::profile(this->profile_file_name());
+ }
+ this->_chronometer.start(static_cast(static_cast(this->_workload)),
+ this->_current_iteration + 1, this->_cores.current());
+}
+
+const mx::util::core_set &Benchmark::core_set()
+{
+ if (this->_current_iteration == std::numeric_limits::max())
+ {
+ // This is the very first time we start the benchmark.
+ this->_current_iteration = 0U;
+ return this->_cores.next();
+ }
+
+ // Switch from fill to mixed phase.
+ if (this->_workload == benchmark::phase::FILL && this->_workload.empty(benchmark::phase::MIXED) == false)
+ {
+ this->_workload.reset(benchmark::phase::MIXED);
+ return this->_cores.current();
+ }
+ this->_workload.reset(benchmark::phase::FILL);
+
+ // Run the next iteration.
+ if (++this->_current_iteration < this->_iterations)
+ {
+ return this->_cores.current();
+ }
+ this->_current_iteration = 0U;
+
+ // At this point, all phases and all iterations for the current core configuration
+ // are done. Increase the cores.
+ return this->_cores.next();
+}
+
+void Benchmark::requests_finished()
+{
+ const auto open_requests = --this->_open_requests;
+
+ if (open_requests == 0U) // All request schedulers are done.
+ {
+ // Stop and print time (and performance counter).
+ const auto result = this->_chronometer.stop(this->_workload.size());
+ mx::tasking::runtime::stop();
+ std::cout << result << std::endl;
+
+ // Dump results to file.
+ if (this->_result_file_name.empty() == false)
+ {
+ std::ofstream result_file_stream(this->_result_file_name, std::ofstream::app);
+ result_file_stream << result.to_json().dump() << std::endl;
+ }
+
+ // Dump statistics to file.
+ if constexpr (mx::tasking::config::task_statistics())
+ {
+ if (this->_statistic_file_name.empty() == false)
+ {
+ std::ofstream statistic_file_stream(this->_statistic_file_name, std::ofstream::app);
+ nlohmann::json statistic_json;
+ statistic_json["iteration"] = result.iteration();
+ statistic_json["cores"] = result.core_count();
+ statistic_json["phase"] = result.phase();
+ statistic_json["scheduled"] = nlohmann::json();
+ statistic_json["scheduled-on-channel"] = nlohmann::json();
+ statistic_json["scheduled-off-channel"] = nlohmann::json();
+ statistic_json["executed"] = nlohmann::json();
+ statistic_json["executed-reader"] = nlohmann::json();
+ statistic_json["executed-writer"] = nlohmann::json();
+ statistic_json["buffer-fills"] = nlohmann::json();
+ for (auto i = 0U; i < this->_cores.current().size(); i++)
+ {
+ const auto core_id = std::int32_t{this->_cores.current()[i]};
+ const auto core_id_string = std::to_string(core_id);
+ statistic_json["scheduled"][core_id_string] =
+ result.scheduled_tasks(core_id) / double(result.operation_count());
+ statistic_json["scheduled-on-core"][core_id_string] =
+ result.scheduled_tasks_on_core(core_id) / double(result.operation_count());
+ statistic_json["scheduled-off-core"][core_id_string] =
+ result.scheduled_tasks_off_core(core_id) / double(result.operation_count());
+ statistic_json["executed"][core_id_string] =
+ result.executed_tasks(core_id) / double(result.operation_count());
+ statistic_json["executed-reader"][core_id_string] =
+ result.executed_reader_tasks(core_id) / double(result.operation_count());
+ statistic_json["executed-writer"][core_id_string] =
+ result.executed_writer_tasks(core_id) / double(result.operation_count());
+ statistic_json["fill"][core_id_string] =
+ result.worker_fills(core_id) / double(result.operation_count());
+ }
+
+ statistic_file_stream << statistic_json.dump(2) << std::endl;
+ }
+ }
+
+ // Check and print the tree.
+ if (this->_check_tree)
+ {
+ this->_tree->check();
+ }
+
+ if (this->_print_tree_statistics)
+ {
+ this->_tree->print_statistics();
+ }
+
+ const auto is_last_phase =
+ this->_workload == benchmark::phase::MIXED || this->_workload.empty(benchmark::phase::MIXED);
+
+ // Dump the tree.
+ if (this->_tree_file_name.empty() == false && is_last_phase)
+ {
+ std::ofstream tree_file_stream(this->_tree_file_name);
+ tree_file_stream << static_cast(*(this->_tree)).dump() << std::endl;
+ }
+
+ // Delete the tree to free the hole memory.
+ if (is_last_phase)
+ {
+ this->_tree.reset(nullptr);
+ }
+ }
+}
+
+std::string Benchmark::profile_file_name() const
+{
+ return "profiling-" + std::to_string(this->_cores.current().size()) + "-cores" + "-phase-" +
+ std::to_string(static_cast(static_cast(this->_workload))) + "-iteration-" +
+ std::to_string(this->_current_iteration) + ".json";
+}
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h
new file mode 100644
index 0000000000..ae0b789b4b
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h
@@ -0,0 +1,103 @@
+#pragma once
+
+#include "listener.h"
+#include "request_scheduler.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace application::blinktree_benchmark {
+/**
+ * Benchmark executing the task-based BLink-Tree.
+ */
+class Benchmark final : public Listener
+{
+public:
+ Benchmark(benchmark::Cores &&, std::uint16_t iterations, std::string &&fill_workload_file,
+ std::string &&mixed_workload_file, bool use_performance_counter,
+ mx::synchronization::isolation_level node_isolation_level,
+ mx::synchronization::protocol preferred_synchronization_method, bool print_tree_statistics,
+ bool check_tree, std::string &&result_file_name, std::string &&statistic_file_name,
+ std::string &&tree_file_name, bool profile);
+
+ ~Benchmark() noexcept override = default;
+
+ /**
+ * @return Core set the benchmark should run in the current iteration.
+ */
+ const mx::util::core_set &core_set();
+
+ /**
+ * Callback for request tasks to notify they are out of
+ * new requests.
+ */
+ void requests_finished() override;
+
+ /**
+ * Starts the benchmark after initialization.
+ */
+ void start();
+
+private:
+ // Collection of cores the benchmark should run on.
+ benchmark::Cores _cores;
+
+ // Number of iterations the benchmark should use.
+ const std::uint16_t _iterations;
+
+ // Current iteration within the actual core set.
+ std::uint16_t _current_iteration = std::numeric_limits::max();
+
+ // Workload to get requests from.
+ benchmark::Workload _workload;
+
+ // Tree to run requests on.
+ std::unique_ptr> _tree;
+
+ // The synchronization mechanism to use for tree nodes.
+ const mx::synchronization::isolation_level _node_isolation_level;
+
+ // Preferred synchronization method.
+ const mx::synchronization::protocol _preferred_synchronization_method;
+
+ // If true, the tree statistics (height, number of nodes, ...) will be printed.
+ const bool _print_tree_statistics;
+
+ // If true, the tree will be checked for consistency after each iteration.
+ const bool _check_tree;
+
+ // Name of the file to print results to.
+ const std::string _result_file_name;
+
+ // Name of the file to print further statistics.
+ const std::string _statistic_file_name;
+
+ // Name of the file to serialize the tree to.
+ const std::string _tree_file_name;
+
+ // If true, use idle profiling.
+ const bool _profile;
+
+ // Number of open request tasks; used for tracking the benchmark.
+ alignas(64) std::atomic_uint16_t _open_requests = 0;
+
+ // List of request schedulers.
+ alignas(64) std::vector _request_scheduler;
+
+ // Chronometer for starting/stopping time and performance counter.
+ alignas(64) benchmark::Chronometer _chronometer;
+
+ /**
+ * @return Name of the file to write profiling results to.
+ */
+ [[nodiscard]] std::string profile_file_name() const;
+};
+} // namespace application::blinktree_benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/config.h b/repos/mml/src/app/blinktree/blinktree_benchmark/config.h
new file mode 100644
index 0000000000..2144075be8
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/config.h
@@ -0,0 +1,17 @@
+#pragma once
+
+namespace application::blinktree_benchmark {
+class config
+{
+public:
+ /**
+ * @return Number of requests that will be started at a time by the request scheduler.
+ */
+ static constexpr auto batch_size() noexcept { return 500U; }
+
+ /**
+ * @return Number of maximal open requests, system-wide.
+ */
+ static constexpr auto max_parallel_requests() noexcept { return 1500U; }
+};
+} // namespace application::blinktree_benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/listener.h b/repos/mml/src/app/blinktree/blinktree_benchmark/listener.h
new file mode 100644
index 0000000000..5a911fa6b5
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/listener.h
@@ -0,0 +1,15 @@
+#pragma once
+
+namespace application::blinktree_benchmark {
+/**
+ * The listener will be used to notify the benchmark that request tasks are
+ * done and no more work is available.
+ */
+class Listener
+{
+public:
+ constexpr Listener() = default;
+ virtual ~Listener() = default;
+ virtual void requests_finished() = 0;
+};
+} // namespace application::blinktree_benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp b/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp
new file mode 100644
index 0000000000..c7bd6d5b49
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp
@@ -0,0 +1,190 @@
+#include "benchmark.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+using namespace application::blinktree_benchmark;
+
+/**
+ * Instantiates the BLink-Tree benchmark with CLI arguments.
+ * @param count_arguments Number of CLI arguments.
+ * @param arguments Arguments itself.
+ *
+ * @return Instance of the benchmark and parameters for tasking runtime.
+ */
+std::tuple create_benchmark(int count_arguments, char **arguments);
+
+/**
+ * Starts the benchmark.
+ *
+ * @param count_arguments Number of CLI arguments.
+ * @param arguments Arguments itself.
+ *
+ * @return Return code of the application.
+ */
+int main(int count_arguments, char **arguments)
+{
+ int count_arguments =
+ if (mx::system::Environment::is_numa_balancing_enabled())
+ {
+ std::cout << "[Warn] NUMA balancing may be enabled, set '/proc/sys/kernel/numa_balancing' to '0'" << std::endl;
+ }
+
+ auto [benchmark, prefetch_distance, use_system_allocator] = create_benchmark(count_arguments, arguments);
+ if (benchmark == nullptr)
+ {
+ return 1;
+ }
+
+ mx::util::core_set cores{};
+
+ while ((cores = benchmark->core_set()))
+ {
+ mx::tasking::runtime_guard _(use_system_allocator, cores, prefetch_distance);
+ benchmark->start();
+ }
+
+ delete benchmark;
+
+ return 0;
+}
+
+std::tuple create_benchmark(int count_arguments, char **arguments)
+{
+ // Set up arguments.
+ argparse::ArgumentParser argument_parser("blinktree_benchmark");
+ argument_parser.add_argument("cores")
+ .help("Range of the number of cores (1 for using 1 core, 1: for using 1 up to available cores, 1:4 for using "
+ "cores from 1 to 4).")
+ .default_value(std::string("1"));
+ /* Not used for the moment.
+ argument_parser.add_argument("-c", "--channels-per-core")
+ .help("Number of how many channels used per core.")
+ .default_value(std::uint16_t(1))
+ .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
+ */
+ argument_parser.add_argument("-s", "--steps")
+ .help("Steps, how number of cores is increased (1,2,4,6,.. for -s 2).")
+ .default_value(std::uint16_t(2))
+ .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
+ argument_parser.add_argument("-i", "--iterations")
+ .help("Number of iterations for each workload")
+ .default_value(std::uint16_t(1))
+ .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
+ argument_parser.add_argument("-sco", "--system-core-order")
+ .help("Use systems core order. If not, cores are ordered by node id (should be preferred).")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("-p", "--perf")
+ .help("Use performance counter.")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--exclusive")
+ .help("Are all node accesses exclusive?")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--latched")
+ .help("Prefer latch for synchronization?")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--olfit")
+ .help("Prefer OLFIT for synchronization?")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--sync4me")
+ .help("Let the tasking layer decide the synchronization primitive.")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--print-stats")
+ .help("Print tree statistics after every iteration.")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("--disable-check")
+ .help("Disable tree check while benchmarking.")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("-f", "--workload-files")
+ .help("Files containing the workloads (workloads/fill workloads/mixed for example).")
+ .nargs(2)
+ .default_value(
+ std::vector{"workloads/fill_randint_workloada", "workloads/mixed_randint_workloada"});
+ argument_parser.add_argument("-pd", "--prefetch-distance")
+ .help("Distance of prefetched data objects (0 = disable prefetching).")
+ .default_value(std::uint16_t(0))
+ .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
+ argument_parser.add_argument("--system-allocator")
+ .help("Use the systems malloc interface to allocate tasks (default disabled).")
+ .implicit_value(true)
+ .default_value(false);
+ argument_parser.add_argument("-ot", "--out-tree")
+ .help("Name of the file, the tree will be written in json format.")
+ .default_value(std::string(""));
+ argument_parser.add_argument("-os", "--out-statistics")
+ .help("Name of the file, the task statistics will be written in json format.")
+ .default_value(std::string(""));
+ argument_parser.add_argument("-o", "--out")
+ .help("Name of the file, the results will be written to.")
+ .default_value(std::string(""));
+ argument_parser.add_argument("--profiling")
+ .help("Enable profiling (default disabled).")
+ .implicit_value(true)
+ .default_value(false);
+
+ // Parse arguments.
+ try
+ {
+ argument_parser.parse_args(count_arguments, arguments);
+ }
+ catch (std::runtime_error &e)
+ {
+ std::cout << argument_parser << std::endl;
+ return {nullptr, 0U, false};
+ }
+
+ auto order =
+ argument_parser.get("-sco") ? mx::util::core_set::Order::Ascending : mx::util::core_set::Order::NUMAAware;
+ auto cores =
+ benchmark::Cores({argument_parser.get("cores"), argument_parser.get("-s"), order});
+ auto workload_files = argument_parser.get>("-f");
+ const auto isolation_level = argument_parser.get("--exclusive")
+ ? mx::synchronization::isolation_level::Exclusive
+ : mx::synchronization::isolation_level::ExclusiveWriter;
+ auto preferred_synchronization_method = mx::synchronization::protocol::Queue;
+ if (argument_parser.get("--latched"))
+ {
+ preferred_synchronization_method = mx::synchronization::protocol::Latch;
+ }
+ else if (argument_parser.get("--olfit"))
+ {
+ preferred_synchronization_method = mx::synchronization::protocol::OLFIT;
+ }
+ else if (argument_parser.get("--sync4me"))
+ {
+ preferred_synchronization_method = mx::synchronization::protocol::None;
+ }
+
+ // Create the benchmark.
+ auto *benchmark =
+ new Benchmark(std::move(cores), argument_parser.get("-i"), std::move(workload_files[0]),
+ std::move(workload_files[1]), argument_parser.get("-p"), isolation_level,
+ preferred_synchronization_method, argument_parser.get("--print-stats"),
+ argument_parser.get("--disable-check") == false, argument_parser.get("-o"),
+ argument_parser.get("-os"), argument_parser.get("-ot"),
+ argument_parser.get("--profiling"));
+
+ return {benchmark, argument_parser.get("-pd"), argument_parser.get("--system-allocator")};
+}
+
+void Libc::Component::construct(Libc::Env &env) {
+ std::cout << "Starting B-link tree benchmark" << std::endl;
+
+ mx::system::Environment::set_env(&env);
+
+ Libc::with_libc([&]()
+ { main(3, {"blinktree", "1:4", "-o /dev/log"}); });
+}
diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/request_scheduler.h b/repos/mml/src/app/blinktree/blinktree_benchmark/request_scheduler.h
new file mode 100644
index 0000000000..677a29f4d0
--- /dev/null
+++ b/repos/mml/src/app/blinktree/blinktree_benchmark/request_scheduler.h
@@ -0,0 +1,252 @@
+#pragma once
+
+#include "config.h"
+#include "listener.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace application::blinktree_benchmark {
+
+class RequestIndex
+{
+public:
+ static RequestIndex make_finished() { return RequestIndex{std::numeric_limits::max(), 0UL}; }
+ static RequestIndex make_no_new() { return RequestIndex{0UL, 0UL}; }
+
+ RequestIndex(const std::uint64_t index, const std::uint64_t count) noexcept : _index(index), _count(count) {}
+ explicit RequestIndex(std::pair &&index_and_count) noexcept
+ : _index(std::get<0>(index_and_count)), _count(std::get<1>(index_and_count))
+ {
+ }
+ RequestIndex(RequestIndex &&) noexcept = default;
+ RequestIndex(const RequestIndex &) = default;
+ ~RequestIndex() noexcept = default;
+
+ RequestIndex &operator=(RequestIndex &&) noexcept = default;
+
+ [[nodiscard]] std::uint64_t index() const noexcept { return _index; }
+ [[nodiscard]] std::uint64_t count() const noexcept { return _count; }
+
+ [[nodiscard]] bool is_finished() const noexcept { return _index == std::numeric_limits::max(); }
+ [[nodiscard]] bool has_new() const noexcept { return _count > 0UL; }
+
+ RequestIndex &operator-=(const std::uint64_t count) noexcept
+ {
+ _count -= count;
+ _index += count;
+ return *this;
+ }
+
+private:
+ std::uint64_t _index;
+ std::uint64_t _count;
+};
+
+/**
+ * The RequestContainer manages the workload and allocates new batches of requests
+ * that will be scheduled by the request scheduler.
+ */
+class RequestContainer
+{
+public:
+ RequestContainer(const std::uint16_t core_id, const std::uint64_t max_open_requests,
+ benchmark::Workload &workload) noexcept
+ : _finished_requests(core_id), _local_buffer(workload.next(config::batch_size())),
+ _max_pending_requests(max_open_requests), _workload(workload)
+ {
+ }
+
+ ~RequestContainer() noexcept = default;
+
+ /**
+ * Allocates the next requests to spawn.
+ *
+ * @return Pair of workload-index and number of tuples to request.
+ * When the number is negative, no more requests are available.
+ */
+ RequestIndex next() noexcept
+ {
+ const auto finished_requests = _finished_requests.load();
+
+ const auto pending_requests = _scheduled_requests - finished_requests;
+ if (pending_requests >= _max_pending_requests)
+ {
+ // Too many open requests somewhere in the system.
+ return RequestIndex::make_no_new();
+ }
+
+ if (_local_buffer.has_new() == false)
+ {
+ _local_buffer = RequestIndex{_workload.next(config::batch_size())};
+ }
+
+ if (_local_buffer.has_new())
+ {
+ // How many requests can be scheduled without reaching the request limit?
+ const auto free_requests = _max_pending_requests - pending_requests;
+
+ // Try to spawn all free requests, but at least those in the local buffer.
+ const auto count = std::min(free_requests, _local_buffer.count());
+
+ _scheduled_requests += count;
+
+ const auto index = RequestIndex{_local_buffer.index(), count};
+ _local_buffer -= count;
+
+ return index;
+ }
+
+ // Do we have to wait for pending requests or are we finished?
+ return pending_requests > 0UL ? RequestIndex::make_no_new() : RequestIndex::make_finished();
+ }
+
+ /**
+ * Callback after inserted a value.
+ */
+ void inserted(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
+ {
+ task_finished(core_id);
+ }
+
+ /**
+ * Callback after updated a value.
+ */
+ void updated(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
+ {
+ task_finished(core_id);
+ }
+
+ /**
+ * Callback after removed a value.
+ */
+ void removed(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); }
+
+ /**
+ * Callback after found a value.
+ */
+ void found(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
+ {
+ task_finished(core_id);
+ }
+
+ /**
+ * Callback on missing a value.
+ */
+ void missing(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); }
+
+ const benchmark::NumericTuple &operator[](const std::size_t index) const noexcept { return _workload[index]; }
+
+private:
+ // Number of requests finished by tasks.
+ mx::util::reference_counter_64 _finished_requests;
+
+ // Number of tasks scheduled by the owning request scheduler.
+ std::uint64_t _scheduled_requests = 0UL;
+
+ // Local buffer holding not scheduled, but from global worker owned request items.
+ RequestIndex _local_buffer;
+
+ // Number of requests that can be distributed by this scheduler,
+ // due to system-wide maximal parallel requests.
+ const std::uint64_t _max_pending_requests;
+
+ // Workload to get requests from.
+ benchmark::Workload &_workload;
+
+ /**
+ * Updates the counter of finished requests.
+ */
+ void task_finished(const std::uint16_t core_id) { _finished_requests.add(core_id); }
+};
+
+/**
+ * The RequestScheduler own its own request container and sets up requests for the BLink-Tree.
+ */
+class RequestSchedulerTask final : public mx::tasking::TaskInterface
+{
+public:
+ RequestSchedulerTask(const std::uint16_t core_id, const std::uint16_t channel_id, benchmark::Workload &workload,
+ const mx::util::core_set &core_set,
+ db::index::blinktree::BLinkTree *tree, Listener *listener)
+ : _tree(tree), _listener(listener)
+ {
+ this->annotate(mx::tasking::priority::low);
+ this->is_readonly(false);
+
+ const auto container = mx::tasking::runtime::new_resource(
+ sizeof(RequestContainer), mx::resource::hint{channel_id}, core_id,
+ config::max_parallel_requests() / core_set.size(), workload);
+ this->annotate(container, sizeof(RequestContainer));
+ }
+
+ ~RequestSchedulerTask() final = default;
+
+ mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
+ {
+ // Get some new requests from the container.
+ auto &request_container = *mx::resource::ptr_cast(this->annotated_resource());
+ const auto next_requests = request_container.next();
+
+ if (next_requests.has_new())
+ {
+ for (auto i = next_requests.index(); i < next_requests.index() + next_requests.count(); ++i)
+ {
+ mx::tasking::TaskInterface *task{nullptr};
+ const auto &tuple = request_container[i];
+ if (tuple == benchmark::NumericTuple::INSERT)
+ {
+ task = mx::tasking::runtime::new_task<
+ db::index::blinktree::InsertValueTask>(
+ core_id, tuple.key(), tuple.value(), _tree, request_container);
+ task->is_readonly(_tree->height() > 1U);
+ }
+ else if (tuple == benchmark::NumericTuple::LOOKUP)
+ {
+ task = mx::tasking::runtime::new_task<
+ db::index::blinktree::LookupTask>(
+ core_id, tuple.key(), request_container);
+
+ task->is_readonly(true);
+ }
+ else if (tuple == benchmark::NumericTuple::UPDATE)
+ {
+ task = mx::tasking::runtime::new_task<
+ db::index::blinktree::UpdateTask>(
+ core_id, tuple.key(), tuple.value(), request_container);
+ task->is_readonly(_tree->height() > 1U);
+ }
+
+ task->annotate(_tree->root(), db::index::blinktree::config::node_size() / 4U);
+ mx::tasking::runtime::spawn(*task, channel_id);
+ }
+ }
+ else if (next_requests.is_finished())
+ {
+ // All requests are done. Notify the benchmark and die.
+ _listener->requests_finished();
+ mx::tasking::runtime::delete_resource(this->annotated_resource());
+ return mx::tasking::TaskResult::make_remove();
+ }
+
+ return mx::tasking::TaskResult::make_succeed(this);
+ }
+
+private:
+ // The tree to send requests to.
+ db::index::blinktree::BLinkTree *_tree;
+
+ // Benchmark listener to notify on requests are done.
+ Listener *_listener;
+};
+} // namespace application::blinktree_benchmark
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/db/index/blinktree/b_link_tree.h b/repos/mml/src/app/blinktree/db/index/blinktree/b_link_tree.h
new file mode 100644
index 0000000000..7f14f30cac
--- /dev/null
+++ b/repos/mml/src/app/blinktree/db/index/blinktree/b_link_tree.h
@@ -0,0 +1,366 @@
+#pragma once
+
+#include "config.h"
+#include "node.h"
+#include "node_consistency_checker.h"
+#include "node_iterator.h"
+#include "node_statistics.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace db::index::blinktree {
+
+template class BLinkTree
+{
+public:
+ BLinkTree(const mx::synchronization::isolation_level isolation_level,
+ const mx::synchronization::protocol preferred_synchronization_protocol)
+ : _isolation_level(isolation_level), _preferred_synchronization_protocol(preferred_synchronization_protocol),
+ _root(create_node(NodeType::Leaf, mx::resource::ptr{}, true))
+ {
+ }
+
+ ~BLinkTree() { mx::tasking::runtime::delete_resource>(_root); }
+
+ /**
+ * @return Root node of the tree.
+ */
+ [[nodiscard]] mx::resource::ptr root() const { return _root; }
+
+ /**
+ * @return Height of the tree.
+ */
+ [[nodiscard]] std::uint16_t height() const { return _height; }
+
+ /**
+ * @return True, when the tree does not contain any value.
+ */
+ [[nodiscard]] bool empty() const
+ {
+ return static_cast(_root) == false || _root.template get>()->size() == 0;
+ }
+
+ /**
+ * Creates a node of type inner.
+ *
+ * @param is_branch True, when the children of the new inner node will be leaf nodes.
+ * @param parent Parent of the new inner node.
+ * @param is_root True, then the new inner node will be the root.
+ * @return Inner node.
+ */
+ [[nodiscard]] mx::resource::ptr create_inner_node(const bool is_branch, const mx::resource::ptr parent,
+ const bool is_root = false) const
+ {
+ const auto inner_type = is_branch ? NodeType::Inner | NodeType::Branch : NodeType::Inner;
+ return create_node(inner_type, parent, is_root);
+ }
+
+ /**
+ * Creates a node of type leaf.
+ *
+ * @param parent Parent of the new leaf node.
+ * @return Leaf node.
+ */
+ [[nodiscard]] mx::resource::ptr create_leaf_node(const mx::resource::ptr parent) const
+ {
+ return create_node(NodeType::Leaf, parent, false);
+ }
+
+ /**
+ * Creates a new root node, containing two separators (to the left and right).
+ * The new root node will be set in the tree.
+ *
+ * @param left Link to the "smaller" child node.
+ * @param right Link to the "greater" child node.
+ * @param key Separator key.
+ */
+ void create_new_root(mx::resource::ptr left, mx::resource::ptr right, K key);
+
+ /**
+ * Splits an inner node.
+ *
+ * @param inner_node Node to split.
+ * @param key Key to insert after split.
+ * @param separator Separator to insert after split.
+ * @return Pointer and high key of the new node.
+ */
+ std::pair split(mx::resource::ptr inner_node, K key, mx::resource::ptr separator) const;
+
+ /**
+ * Splits a leaf node.
+ *
+ * @param leaf_node Node to split.
+ * @param key Key to insert after split.
+ * @param value Value to insert after split.
+ * @return Pointer to the leaf node and key for parent.
+ */
+ std::pair split(mx::resource::ptr leaf_node, K key, V value) const;
+
+ /**
+ * @return Begin iterator for iterating ofer nodes.
+ */
+ NodeIterator begin() const { return NodeIterator(mx::resource::ptr_cast>(_root)); }
+
+ /**
+ * @return End iterator (aka empty node iterator).
+ */
+ NodeIterator end() const { return {}; }
+
+ /**
+ * Checks the consistency of the tree.
+ */
+ void check() const;
+
+ /**
+ * Dumps the statistics like height, number of (inner/leaf) nodes, number of records,... .
+ */
+ void print_statistics() const;
+
+ explicit operator nlohmann::json() const
+ {
+ nlohmann::json out;
+ out["height"] = _height;
+ out["root"] = node_to_json(_root);
+
+ return out;
+ }
+
+protected:
+ // Height of the tree.
+ std::uint8_t _height = 1;
+
+ // Isolation of tasks accessing a node.
+ const mx::synchronization::isolation_level _isolation_level;
+
+ // Select a preferred method for synchronization.
+ const mx::synchronization::protocol _preferred_synchronization_protocol;
+
+ // Pointer to the root.
+ alignas(64) mx::resource::ptr _root;
+
+ /**
+ * Creates a new node.
+ *
+ * @param node_type Type of the node.
+ * @param parent Parent of the node.
+ * @param is_root True, if the new node will be the root.
+ * @return Pointer to the new node.
+ */
+ [[nodiscard]] mx::resource::ptr create_node(const NodeType node_type, const mx::resource::ptr parent,
+ const bool is_root) const
+ {
+ const auto is_inner = static_cast(node_type & NodeType::Inner);
+ return mx::tasking::runtime::new_resource>(
+ config::node_size(),
+ mx::resource::hint{_isolation_level, _preferred_synchronization_protocol,
+ predict_access_frequency(is_inner, is_root), predict_read_write_ratio(is_inner)},
+ node_type, parent);
+ }
+
+ /**
+ * Creates a hint for tasking regarding usage of the node.
+ *
+ * @param is_inner True, of the node is an inner node.
+ * @param is_root True, of the node is the root.
+ * @return Hint for usage prediction which will be used for allocating resources.
+ */
+ [[nodiscard]] static mx::resource::hint::expected_access_frequency predict_access_frequency(const bool is_inner,
+ const bool is_root)
+ {
+ if (is_root)
+ {
+ return mx::resource::hint::expected_access_frequency::excessive;
+ }
+
+ if (is_inner)
+ {
+ return mx::resource::hint::expected_access_frequency::high;
+ }
+
+ return mx::resource::hint::expected_access_frequency::normal;
+ }
+
+ /**
+ * Create a hint for the read/write ratio.
+ * Inner nodes will be written very little while
+ * leaf nodes will be written more often.
+ *
+ * @param is_inner True, when the node is an inner node.
+ * @return Predicted read/write ratio.
+ */
+ [[nodiscard]] static mx::resource::hint::expected_read_write_ratio predict_read_write_ratio(const bool is_inner)
+ {
+ return is_inner ? mx::resource::hint::expected_read_write_ratio::heavy_read
+ : mx::resource::hint::expected_read_write_ratio::balanced;
+ }
+
+ /**
+ * Serializes a tree node to json format.
+ *
+ * @param node Node to serialize.
+ * @return JSON representation of the node.
+ */
+ [[nodiscard]] nlohmann::json node_to_json(mx::resource::ptr node) const
+ {
+ auto out = nlohmann::json();
+ auto node_ptr = mx::resource::ptr_cast>(node);
+
+ out["channel_id"] = node.channel_id();
+ out["is_leaf"] = node_ptr->is_leaf();
+ out["size"] = node_ptr->size();
+
+ if (node_ptr->is_inner())
+ {
+ auto children = nlohmann::json::array();
+ for (auto i = 0U; i <= node_ptr->size(); ++i)
+ {
+ children.push_back(node_to_json(node_ptr->separator(i)));
+ }
+ out["children"] = children;
+ }
+
+ return out;
+ }
+};
+
+template
+void BLinkTree::create_new_root(const mx::resource::ptr left, const mx::resource::ptr right, const K key)
+{
+ const auto is_left_inner = mx::resource::ptr_cast>(left)->is_inner();
+ mx::tasking::runtime::modify_predicted_usage(left, predict_access_frequency(is_left_inner, true),
+ predict_access_frequency(is_left_inner, false));
+
+ auto root = this->create_inner_node(mx::resource::ptr_cast>(left)->is_leaf(), mx::resource::ptr{}, true);
+
+ left.template get>()->parent(root);
+ right.template get>()->parent(root);
+
+ root.template get>()->separator(0, left);
+ root.template get>()->insert(0, right, key);
+
+ this->_height++;
+ this->_root = root;
+}
+
+template
+std::pair BLinkTree::split(const mx::resource::ptr inner_node, const K key,
+ const mx::resource::ptr separator) const
+{
+ constexpr std::uint16_t left_size = InnerNode::max_keys / 2;
+ constexpr std::uint16_t right_size = InnerNode::max_keys - left_size;
+
+ auto node_ptr = mx::resource::ptr_cast>(inner_node);
+
+ K key_up;
+ auto new_inner_node = this->create_inner_node(node_ptr->is_branch(), node_ptr->parent());
+ auto new_node_ptr = mx::resource::ptr_cast>(new_inner_node);
+
+ new_node_ptr->high_key(node_ptr->high_key());
+
+ if (key < node_ptr->inner_key(left_size - 1))
+ {
+ node_ptr->move(new_inner_node, left_size, right_size);
+ new_node_ptr->separator(0, node_ptr->separator(left_size));
+ new_node_ptr->size(right_size);
+ node_ptr->size(left_size - 1);
+ key_up = node_ptr->inner_key(left_size - 1);
+ const auto index = node_ptr->index(key);
+ separator.template get>()->parent(inner_node);
+ node_ptr->insert(index, separator, key);
+ }
+ else if (key < node_ptr->inner_key(left_size))
+ {
+ node_ptr->move(new_inner_node, left_size, right_size);
+ new_node_ptr->separator(0, separator);
+ key_up = key;
+ node_ptr->size(left_size);
+ new_node_ptr->size(right_size);
+ }
+ else
+ {
+ node_ptr->move(new_inner_node, left_size + 1, right_size - 1);
+ new_node_ptr->separator(0, node_ptr->separator(left_size + 1));
+ node_ptr->size(left_size);
+ new_node_ptr->size(right_size - 1);
+ key_up = node_ptr->inner_key(left_size);
+
+ const auto index = new_node_ptr->index(key);
+ new_node_ptr->insert(index, separator, key);
+ }
+
+ new_node_ptr->right_sibling(node_ptr->right_sibling());
+ node_ptr->right_sibling(new_inner_node);
+ node_ptr->high_key(key_up);
+
+ for (auto index = 0U; index <= new_node_ptr->size(); ++index)
+ {
+ new_node_ptr->separator(index).template get>()->parent(new_inner_node);
+ }
+
+ return {new_inner_node, key_up};
+}
+
+template
+std::pair BLinkTree::split(const mx::resource::ptr leaf_node_ptr, const K key,
+ const V value) const
+{
+ auto *leaf_node = mx::resource::ptr_cast>(leaf_node_ptr);
+
+ constexpr std::uint16_t left_size = LeafNode::max_items / 2;
+ constexpr std::uint16_t right_size = LeafNode::max_items - left_size;
+
+ auto new_leaf_node_ptr = this->create_leaf_node(leaf_node->parent());
+ auto *new_leaf_node = mx::resource::ptr_cast>(new_leaf_node_ptr);
+
+ leaf_node->move(new_leaf_node_ptr, left_size, right_size);
+ if (leaf_node->right_sibling() != nullptr)
+ {
+ new_leaf_node->right_sibling(leaf_node->right_sibling());
+ }
+ new_leaf_node->high_key(leaf_node->high_key());
+ new_leaf_node->size(right_size);
+ leaf_node->size(left_size);
+ leaf_node->right_sibling(new_leaf_node_ptr);
+
+ if (key < new_leaf_node->leaf_key(0))
+ {
+ leaf_node->insert(leaf_node->index(key), value, key);
+ }
+ else
+ {
+ new_leaf_node->insert(new_leaf_node->index(key), value, key);
+ }
+
+ leaf_node->high_key(new_leaf_node->leaf_key(0));
+
+ return {new_leaf_node_ptr, new_leaf_node->leaf_key(0)};
+}
+
+template void BLinkTree::print_statistics() const
+{
+ NodeStatistics statistics(this->height());
+
+ for (auto node : *this)
+ {
+ statistics += node;
+ }
+
+ std::cout << statistics << std::endl;
+}
+
+template void BLinkTree::check() const
+{
+ for (auto node : *this)
+ {
+ NodeConsistencyChecker::check_and_print_errors(node, std::cerr);
+ }
+}
+} // namespace db::index::blinktree
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/db/index/blinktree/config.h b/repos/mml/src/app/blinktree/db/index/blinktree/config.h
new file mode 100644
index 0000000000..964afd76d5
--- /dev/null
+++ b/repos/mml/src/app/blinktree/db/index/blinktree/config.h
@@ -0,0 +1,11 @@
+#pragma once
+
+#include
+
+namespace db::index::blinktree {
+class config
+{
+public:
+ static constexpr auto node_size() { return 1024U; }
+};
+} // namespace db::index::blinktree
\ No newline at end of file
diff --git a/repos/mml/src/app/blinktree/db/index/blinktree/insert_separator_task.h b/repos/mml/src/app/blinktree/db/index/blinktree/insert_separator_task.h
new file mode 100644
index 0000000000..c43a653f06
--- /dev/null
+++ b/repos/mml/src/app/blinktree/db/index/blinktree/insert_separator_task.h
@@ -0,0 +1,63 @@
+#pragma once
+
+#include "b_link_tree.h"
+#include "node.h"
+#include "task.h"
+#include
+
+namespace db::index::blinktree {
+template class InsertSeparatorTask final : public Task
+{
+public:
+ constexpr InsertSeparatorTask(const K key, const mx::resource::ptr separator, BLinkTree *tree,
+ L &listener) noexcept
+ : Task(key, listener), _tree(tree), _separator(separator)
+ {
+ }
+
+ ~InsertSeparatorTask() override = default;
+
+ mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override;
+
+private:
+ BLinkTree *_tree;
+ mx::resource::ptr _separator;
+};
+
+template
+mx::tasking::TaskResult InsertSeparatorTask