mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
Ported blinktree benchmark
This commit is contained in:
79
repos/ealanos/src/app/blinktree_benchmark/README.md
Normal file
79
repos/ealanos/src/app/blinktree_benchmark/README.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# BLinkTree Benchmark
|
||||
The BLinkTree-benchmark stores `8` byte numeric keys and values.
|
||||
Call `./bin/blinktree_benchmark -h` for help and parameters.
|
||||
|
||||
## How to generate YCSB workload
|
||||
* Workload specifications are done by files in `workloads_specification/`.
|
||||
* Call `make ycsb-a` and `make ycsb-c` to generate workloads **A** and **C**.
|
||||
* Workload files are stored in `workloads/`
|
||||
* Use `./bin/blinktree_benchmark -f <fill-file> <mixed-file>` to pass the desired workload.
|
||||
* Default (if not specified) is `-f workloads/fill_randint_workloada workloads/mixed_randint_workloada`.
|
||||
|
||||
## Important CLI arguments
|
||||
* The first argument is the number of cores:
|
||||
* `./bin/blinktree_benchmark 1` for using a single core.
|
||||
* `./bin/blinktree_benchmark 1:24` for using cores `1` up to `24`.
|
||||
* `-i <NUMBER>` specifies the number of repetitions of each workload.
|
||||
* `-s <NUMBER>` steps of the cores:
|
||||
* `-s 1` will increase the used cores by one (core ids: `0,1,2,3,4,5,6,7,..,23`).
|
||||
* `-s 2` will skip every second core (core ids: `0,1,3,5,7,..23`).
|
||||
* `-pd <NUMBER>` specifies the prefetch distance.
|
||||
* `-p` or `--perf` will activate performance counter (result will be printed to console and output file).
|
||||
* `--latched` will enable latches for synchronization (default off).
|
||||
* `--exclusive` forces the tasks to access tree nodes exclusively (e.g. by using spinlocks or core-based sequencing) (default off).
|
||||
* `--sync4me` will use built-in synchronization selection to choose the matching primitive based on annotations.
|
||||
* `-o <FILE>` will write the results in **json** format to the given file.
|
||||
|
||||
## Understanding the output
|
||||
After started, the benchmark will print a summary of configured cores and workload:
|
||||
|
||||
core configuration:
|
||||
1: 0
|
||||
2: 0 1
|
||||
4: 0 1 2 3
|
||||
workload: fill: 5m / readonly: 5m
|
||||
|
||||
Here, we configured the benchmark to use one to four cores; each line of the core configuration displays the number of cores and the core identifiers.
|
||||
|
||||
Following, the benchmark will be started and print the results for every iteration:
|
||||
|
||||
1 1 0 1478 ms 3.38295e+06 op/s
|
||||
1 1 1 1237 ms 4.04204e+06 op/s
|
||||
2 1 0 964 ms 5.18672e+06 op/s
|
||||
2 1 1 675 ms 7.40741e+06 op/s
|
||||
4 1 0 935 ms 5.34759e+06 op/s
|
||||
4 1 1 532 ms 9.3985e+06 op/s
|
||||
|
||||
* The first column is the number of used cores.
|
||||
* The second column displays the iteration of the benchmark (configured by `-i X`).
|
||||
* Thirdly, the phase-identifier will be printed: `0` for initialization phase (which will be only inserts) and `1` for the workload phase (which is read-only here).
|
||||
* After that, the time and throughput are written.
|
||||
* If `--perf` is enabled, the output will be extended by some perf counters, which are labeled (like throughput).
|
||||
|
||||
## Plot the results
|
||||
When using `-o FILE`, the results will be written to the given file, using `JSON` format.
|
||||
The plot script `scripts/plot_blinktree_benchmark INPUT_FILE [INPUT_FILE ...]` will aggregate and plot the results using one or more of those `JSON` files.
|
||||
|
||||
## Examples
|
||||
|
||||
###### Running workload A using optimistic synchronization
|
||||
|
||||
./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o optimistic.json
|
||||
|
||||
###### Running workload A using best matching synchronization
|
||||
|
||||
./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --sync4me -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o sync4me.json
|
||||
|
||||
###### Running workload A using reader/writer-locks
|
||||
|
||||
./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o rwlocked.json
|
||||
|
||||
###### Running workload A using core-based sequencing
|
||||
|
||||
./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o core-sequenced.json
|
||||
|
||||
###### Running workload A using spin-locks
|
||||
|
||||
./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o spinlocked.json
|
||||
|
||||
|
||||
240
repos/ealanos/src/app/blinktree_benchmark/benchmark.cpp
Normal file
240
repos/ealanos/src/app/blinktree_benchmark/benchmark.cpp
Normal file
@@ -0,0 +1,240 @@
|
||||
#include "benchmark.h"
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <json.hpp>
|
||||
#include <memory>
|
||||
#include <mx/memory/global_heap.h>
|
||||
#include <mx/system/topology.h>
|
||||
#include <base/log.h>
|
||||
|
||||
using namespace application::blinktree_benchmark;
|
||||
|
||||
Benchmark::Benchmark(Libc::Env &env, benchmark::Cores &&cores, const std::uint16_t iterations, std::string &&fill_workload_file,
|
||||
std::string &&mixed_workload_file, const bool use_performance_counter,
|
||||
const mx::synchronization::isolation_level node_isolation_level,
|
||||
const mx::synchronization::protocol preferred_synchronization_method,
|
||||
const bool print_tree_statistics, const bool check_tree, std::string &&result_file_name,
|
||||
std::string &&statistic_file_name, std::string &&tree_file_name, const bool profile)
|
||||
: _cores(std::move(cores)), _iterations(iterations), _node_isolation_level(node_isolation_level),
|
||||
_preferred_synchronization_method(preferred_synchronization_method),
|
||||
_print_tree_statistics(print_tree_statistics), _check_tree(check_tree),
|
||||
_result_file_name(std::move(result_file_name)), _statistic_file_name(std::move(statistic_file_name)),
|
||||
_tree_file_name(std::move(tree_file_name)), _profile(profile), _workload(env)
|
||||
{
|
||||
if (use_performance_counter)
|
||||
{
|
||||
this->_chronometer.add(benchmark::Perf::CYCLES);
|
||||
this->_chronometer.add(benchmark::Perf::INSTRUCTIONS);
|
||||
this->_chronometer.add(benchmark::Perf::L1_ITLB_MISSES);
|
||||
this->_chronometer.add(benchmark::Perf::L1_DTLB_MISSES);
|
||||
//this->_chronometer.add(benchmark::Perf::LLC_MISSES);
|
||||
|
||||
|
||||
//this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY);
|
||||
|
||||
//this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA);
|
||||
//this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE);
|
||||
}
|
||||
std::cout << "core configuration: \n" << this->_cores.dump(2) << std::endl;
|
||||
|
||||
this->_workload.build(fill_workload_file, mixed_workload_file);
|
||||
if (this->_workload.empty(benchmark::phase::FILL) && this->_workload.empty(benchmark::phase::MIXED))
|
||||
{
|
||||
std::exit(1);
|
||||
}
|
||||
|
||||
std::cout << "workload: " << this->_workload << "\n" << std::endl;
|
||||
}
|
||||
|
||||
void Benchmark::start()
|
||||
{
|
||||
// Reset tree.
|
||||
if (this->_tree == nullptr)
|
||||
{
|
||||
this->_tree = std::make_unique<db::index::blinktree::BLinkTree<std::uint64_t, std::int64_t>>(
|
||||
this->_node_isolation_level, this->_preferred_synchronization_method);
|
||||
}
|
||||
|
||||
// Reset request scheduler.
|
||||
if (this->_request_scheduler.empty() == false)
|
||||
{
|
||||
this->_request_scheduler.clear();
|
||||
}
|
||||
|
||||
auto *start_task = mx::tasking::runtime::new_task<StartMeasurementTask>(0U, *this);
|
||||
mx::tasking::runtime::spawn(*start_task, 0U);
|
||||
|
||||
// Create one request scheduler per core.
|
||||
for (auto core_index = 0U; core_index < this->_cores.current().size(); core_index++)
|
||||
{
|
||||
const auto channel_id = core_index;
|
||||
auto *request_scheduler = mx::tasking::runtime::new_task<RequestSchedulerTask>(
|
||||
0U, core_index, channel_id, this->_workload, this->_cores.current(), this->_tree.get(), this);
|
||||
mx::tasking::runtime::spawn(*request_scheduler, 0U);
|
||||
this->_request_scheduler.push_back(request_scheduler);
|
||||
}
|
||||
this->_open_requests = this->_request_scheduler.size();
|
||||
|
||||
// Start measurement.
|
||||
if (this->_profile)
|
||||
{
|
||||
mx::tasking::runtime::profile(this->profile_file_name());
|
||||
}
|
||||
/*this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||
this->_current_iteration + 1, this->_cores.current());*/
|
||||
//Genode::log("Timer started ");
|
||||
}
|
||||
|
||||
const mx::util::core_set &Benchmark::core_set()
|
||||
{
|
||||
if (this->_current_iteration == std::numeric_limits<std::uint16_t>::max())
|
||||
{
|
||||
// This is the very first time we start the benchmark.
|
||||
this->_current_iteration = 0U;
|
||||
return this->_cores.next();
|
||||
}
|
||||
|
||||
// Switch from fill to mixed phase.
|
||||
if (this->_workload == benchmark::phase::FILL && this->_workload.empty(benchmark::phase::MIXED) == false)
|
||||
{
|
||||
this->_workload.reset(benchmark::phase::MIXED);
|
||||
return this->_cores.current();
|
||||
}
|
||||
this->_workload.reset(benchmark::phase::FILL);
|
||||
|
||||
// Run the next iteration.
|
||||
if (++this->_current_iteration < this->_iterations)
|
||||
{
|
||||
return this->_cores.current();
|
||||
}
|
||||
this->_current_iteration = 0U;
|
||||
|
||||
// At this point, all phases and all iterations for the current core configuration
|
||||
// are done. Increase the cores.
|
||||
return this->_cores.next();
|
||||
}
|
||||
|
||||
void Benchmark::requests_finished()
|
||||
{
|
||||
const auto open_requests = --this->_open_requests;
|
||||
|
||||
if (open_requests == 0U) // All request schedulers are done.
|
||||
{
|
||||
std::uint16_t core_id = mx::tasking::runtime::my_channel();
|
||||
if (core_id != 0)
|
||||
{
|
||||
this->_open_requests++;
|
||||
auto *stop_task = mx::tasking::runtime::new_task<StopMeasurementTask>(0U, *this);
|
||||
stop_task->annotate(static_cast<mx::tasking::TaskInterface::channel>(0));
|
||||
mx::tasking::runtime::spawn(*stop_task, core_id);
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop and print time (and performance counter).
|
||||
//Genode::log("Stopping timer");
|
||||
const auto result = this->_chronometer.stop(this->_workload.size());
|
||||
mx::tasking::runtime::stop();
|
||||
mx::tasking::runtime::reset_usage_predictions();
|
||||
|
||||
//_end = Genode::Trace::timestamp();
|
||||
|
||||
std::cout << "core: " << mx::system::topology::core_id() << result.to_json().dump() << std::endl;
|
||||
|
||||
|
||||
// std::cout << result << std::endl;
|
||||
// Dump results to file.
|
||||
if (this->_result_file_name.empty() == false)
|
||||
{
|
||||
//std::ofstream result_file_stream(this->_result_file_name, std::ofstream::app);
|
||||
//result_file_stream << result.to_json().dump() << std::endl;
|
||||
}
|
||||
|
||||
// Dump statistics to file.
|
||||
if constexpr (mx::tasking::config::task_statistics())
|
||||
{
|
||||
/*if (this->_statistic_file_name.empty() == false)
|
||||
{
|
||||
std::ofstream statistic_file_stream(this->_statistic_file_name, std::ofstream::app);
|
||||
nlohmann::json statistic_json;
|
||||
statistic_json["iteration"] = result.iteration();
|
||||
statistic_json["cores"] = result.core_count();
|
||||
statistic_json["phase"] = result.phase();
|
||||
statistic_json["scheduled"] = nlohmann::json();
|
||||
statistic_json["scheduled-on-channel"] = nlohmann::json();
|
||||
statistic_json["scheduled-off-channel"] = nlohmann::json();
|
||||
statistic_json["executed"] = nlohmann::json();
|
||||
statistic_json["executed-reader"] = nlohmann::json();
|
||||
statistic_json["executed-writer"] = nlohmann::json();
|
||||
statistic_json["buffer-fills"] = nlohmann::json();
|
||||
for (auto i = 0U; i < this->_cores.current().size(); i++)
|
||||
{
|
||||
const auto core_id = std::int32_t{this->_cores.current()[i]};
|
||||
const auto core_id_string = std::to_string(core_id);
|
||||
statistic_json["scheduled"][core_id_string] =
|
||||
result.scheduled_tasks(core_id) / double(result.operation_count());
|
||||
statistic_json["scheduled-on-core"][core_id_string] =
|
||||
result.scheduled_tasks_on_core(core_id) / double(result.operation_count());
|
||||
statistic_json["scheduled-off-core"][core_id_string] =
|
||||
result.scheduled_tasks_off_core(core_id) / double(result.operation_count());
|
||||
statistic_json["executed"][core_id_string] =
|
||||
result.executed_tasks(core_id) / double(result.operation_count());
|
||||
statistic_json["executed-reader"][core_id_string] =
|
||||
result.executed_reader_tasks(core_id) / double(result.operation_count());
|
||||
statistic_json["executed-writer"][core_id_string] =
|
||||
result.executed_writer_tasks(core_id) / double(result.operation_count());
|
||||
statistic_json["fill"][core_id_string] =
|
||||
result.worker_fills(core_id) / double(result.operation_count());
|
||||
}
|
||||
|
||||
statistic_file_stream << statistic_json.dump(2) << std::endl;
|
||||
}*/
|
||||
}
|
||||
|
||||
// Check and print the tree.
|
||||
if (this->_check_tree)
|
||||
{
|
||||
this->_tree->check();
|
||||
}
|
||||
|
||||
if (this->_print_tree_statistics)
|
||||
{
|
||||
this->_tree->print_statistics();
|
||||
}
|
||||
|
||||
const auto is_last_phase =
|
||||
this->_workload == benchmark::phase::MIXED || this->_workload.empty(benchmark::phase::MIXED);
|
||||
|
||||
// Dump the tree.
|
||||
/*if (this->_tree_file_name.empty() == false && is_last_phase)
|
||||
{
|
||||
std::ofstream tree_file_stream(this->_tree_file_name);
|
||||
tree_file_stream << static_cast<nlohmann::json>(*(this->_tree)).dump() << std::endl;
|
||||
}*/
|
||||
|
||||
// Delete the tree to free the hole memory.
|
||||
if (is_last_phase)
|
||||
{
|
||||
this->_tree.reset(nullptr);
|
||||
}
|
||||
|
||||
if (this->core_set()) {
|
||||
this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||
this->_current_iteration + 1, this->_cores.current());
|
||||
auto *restart_task = mx::tasking::runtime::new_task<RestartTask>(0U, *this);
|
||||
restart_task->annotate(static_cast<mx::tasking::TaskInterface::channel>(0));
|
||||
mx::tasking::runtime::spawn(*restart_task, core_id);
|
||||
mx::tasking::runtime::resume();
|
||||
} else {
|
||||
Genode::log("Benchmark finished.");
|
||||
mx::tasking::runtime::stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string Benchmark::profile_file_name() const
|
||||
{
|
||||
return "profiling-" + std::to_string(this->_cores.current().size()) + "-cores" + "-phase-" +
|
||||
std::to_string(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload))) + "-iteration-" +
|
||||
std::to_string(this->_current_iteration) + ".json";
|
||||
}
|
||||
166
repos/ealanos/src/app/blinktree_benchmark/benchmark.h
Normal file
166
repos/ealanos/src/app/blinktree_benchmark/benchmark.h
Normal file
@@ -0,0 +1,166 @@
|
||||
#pragma once
|
||||
|
||||
#include "listener.h"
|
||||
#include "request_scheduler.h"
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <benchmark/chronometer.h>
|
||||
#include <benchmark/cores.h>
|
||||
#include <benchmark/workload.h>
|
||||
#include <cstdint>
|
||||
#include <db/index/blinktree/b_link_tree.h>
|
||||
#include <memory>
|
||||
#include <mx/util/core_set.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <libc/component.h>
|
||||
#include <mx/tasking/task.h>
|
||||
|
||||
#include <trace/timestamp.h>
|
||||
#include <base/log.h>
|
||||
|
||||
namespace application::blinktree_benchmark {
|
||||
/**
|
||||
* Benchmark executing the task-based BLink-Tree.
|
||||
*/
|
||||
class Benchmark final : public Listener
|
||||
{
|
||||
public:
|
||||
|
||||
Benchmark(Libc::Env &env, benchmark::Cores &&, std::uint16_t iterations, std::string &&fill_workload_file,
|
||||
std::string &&mixed_workload_file, bool use_performance_counter,
|
||||
mx::synchronization::isolation_level node_isolation_level,
|
||||
mx::synchronization::protocol preferred_synchronization_method, bool print_tree_statistics,
|
||||
bool check_tree, std::string &&result_file_name, std::string &&statistic_file_name,
|
||||
std::string &&tree_file_name, bool profile);
|
||||
|
||||
~Benchmark() noexcept override = default;
|
||||
|
||||
/**
|
||||
* @return Core set the benchmark should run in the current iteration.
|
||||
*/
|
||||
const mx::util::core_set &core_set();
|
||||
|
||||
/**
|
||||
* Callback for request tasks to notify they are out of
|
||||
* new requests.
|
||||
*/
|
||||
void requests_finished() override;
|
||||
|
||||
/**
|
||||
* Starts the benchmark after initialization.
|
||||
*/
|
||||
void start();
|
||||
|
||||
void start_chronometer() {
|
||||
this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||
this->_current_iteration + 1, this->_cores.current());
|
||||
}
|
||||
|
||||
private:
|
||||
// Collection of cores the benchmark should run on.
|
||||
benchmark::Cores _cores;
|
||||
|
||||
// Number of iterations the benchmark should use.
|
||||
const std::uint16_t _iterations;
|
||||
|
||||
// Current iteration within the actual core set.
|
||||
std::uint16_t _current_iteration = std::numeric_limits<std::uint16_t>::max();
|
||||
|
||||
// Workload to get requests from.
|
||||
benchmark::Workload _workload;
|
||||
|
||||
// Tree to run requests on.
|
||||
std::unique_ptr<db::index::blinktree::BLinkTree<std::uint64_t, std::int64_t>> _tree;
|
||||
|
||||
// The synchronization mechanism to use for tree nodes.
|
||||
const mx::synchronization::isolation_level _node_isolation_level;
|
||||
|
||||
// Preferred synchronization method.
|
||||
const mx::synchronization::protocol _preferred_synchronization_method;
|
||||
|
||||
// If true, the tree statistics (height, number of nodes, ...) will be printed.
|
||||
const bool _print_tree_statistics;
|
||||
|
||||
// If true, the tree will be checked for consistency after each iteration.
|
||||
const bool _check_tree;
|
||||
|
||||
// Name of the file to print results to.
|
||||
const std::string _result_file_name;
|
||||
|
||||
// Name of the file to print further statistics.
|
||||
const std::string _statistic_file_name;
|
||||
|
||||
// Name of the file to serialize the tree to.
|
||||
const std::string _tree_file_name;
|
||||
|
||||
// If true, use idle profiling.
|
||||
const bool _profile;
|
||||
|
||||
// Number of open request tasks; used for tracking the benchmark.
|
||||
alignas(64) std::atomic_uint16_t _open_requests = 0;
|
||||
|
||||
// List of request schedulers.
|
||||
alignas(64) std::vector<RequestSchedulerTask *> _request_scheduler;
|
||||
|
||||
// Chronometer for starting/stopping time and performance counter.
|
||||
alignas(64) benchmark::Chronometer<std::uint16_t> _chronometer;
|
||||
|
||||
/**
|
||||
* @return Name of the file to write profiling results to.
|
||||
*/
|
||||
[[nodiscard]] std::string profile_file_name() const;
|
||||
|
||||
friend class StartMeasurementTask;
|
||||
friend class StopMeasurementTask;
|
||||
};
|
||||
|
||||
class StartMeasurementTask : public mx::tasking::TaskInterface
|
||||
{
|
||||
private:
|
||||
Benchmark &_benchmark;
|
||||
|
||||
public:
|
||||
constexpr StartMeasurementTask(Benchmark& benchmark) : _benchmark(benchmark) {}
|
||||
~StartMeasurementTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||
{
|
||||
//_benchmark._chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(_benchmark._workload)), _benchmark._current_iteration + 1, _benchmark._cores.current());
|
||||
//_benchmark._start = Genode::Trace::timestamp();
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
};
|
||||
|
||||
class StopMeasurementTask : public mx::tasking::TaskInterface
|
||||
{
|
||||
private:
|
||||
Benchmark &_benchmark;
|
||||
|
||||
public:
|
||||
constexpr StopMeasurementTask(Benchmark& benchmark) : _benchmark(benchmark) {}
|
||||
~StopMeasurementTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||
{
|
||||
_benchmark.requests_finished();
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
};
|
||||
|
||||
class RestartTask : public mx::tasking::TaskInterface
|
||||
{
|
||||
private:
|
||||
Benchmark &_benchmark;
|
||||
|
||||
public:
|
||||
constexpr RestartTask(Benchmark &benchmark) : _benchmark(benchmark) {}
|
||||
~RestartTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||
{
|
||||
_benchmark.start();
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
};
|
||||
} // namespace application::blinktree_benchmark
|
||||
17
repos/ealanos/src/app/blinktree_benchmark/config.h
Normal file
17
repos/ealanos/src/app/blinktree_benchmark/config.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
namespace application::blinktree_benchmark {
|
||||
class config
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @return Number of requests that will be started at a time by the request scheduler.
|
||||
*/
|
||||
static constexpr auto batch_size() noexcept { return 500U; }
|
||||
|
||||
/**
|
||||
* @return Number of maximal open requests, system-wide.
|
||||
*/
|
||||
static constexpr auto max_parallel_requests() noexcept { return 1500U; }
|
||||
};
|
||||
} // namespace application::blinktree_benchmark
|
||||
15
repos/ealanos/src/app/blinktree_benchmark/listener.h
Normal file
15
repos/ealanos/src/app/blinktree_benchmark/listener.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
namespace application::blinktree_benchmark {
|
||||
/**
|
||||
* The listener will be used to notify the benchmark that request tasks are
|
||||
* done and no more work is available.
|
||||
*/
|
||||
class Listener
|
||||
{
|
||||
public:
|
||||
constexpr Listener() = default;
|
||||
virtual ~Listener() = default;
|
||||
virtual void requests_finished() = 0;
|
||||
};
|
||||
} // namespace application::blinktree_benchmark
|
||||
219
repos/ealanos/src/app/blinktree_benchmark/main.cpp
Normal file
219
repos/ealanos/src/app/blinktree_benchmark/main.cpp
Normal file
@@ -0,0 +1,219 @@
|
||||
#include "benchmark.h"
|
||||
#include <argparse.hpp>
|
||||
#include <benchmark/cores.h>
|
||||
#include <mx/memory/global_heap.h>
|
||||
#include <mx/system/environment.h>
|
||||
//#include <mx/system/thread.h>
|
||||
#include <mx/tasking/runtime.h>
|
||||
#include <mx/util/core_set.h>
|
||||
#include <tuple>
|
||||
#include <libc/component.h>
|
||||
#include <cstring>
|
||||
#include <cstdio>
|
||||
|
||||
using namespace application::blinktree_benchmark;
|
||||
|
||||
/**
|
||||
* Instantiates the BLink-Tree benchmark with CLI arguments.
|
||||
* @param count_arguments Number of CLI arguments.
|
||||
* @param arguments Arguments itself.
|
||||
*
|
||||
* @return Instance of the benchmark and parameters for tasking runtime.
|
||||
*/
|
||||
std::tuple<Benchmark *, std::uint16_t, bool> create_benchmark(Libc::Env& env, int count_arguments, char **arguments);
|
||||
|
||||
/**
|
||||
* Starts the benchmark.
|
||||
*
|
||||
* @param count_arguments Number of CLI arguments.
|
||||
* @param arguments Arguments itself.
|
||||
*
|
||||
* @return Return code of the application.
|
||||
*/
|
||||
extern "C" void wait_for_continue();
|
||||
int bt_main(Libc::Env &env, int count_arguments, char **arguments)
|
||||
{
|
||||
if (mx::system::Environment::is_numa_balancing_enabled())
|
||||
{
|
||||
std::cout << "[Warn] NUMA balancing may be enabled, set '/proc/sys/kernel/numa_balancing' to '0'" << std::endl;
|
||||
}
|
||||
|
||||
auto [benchmark, prefetch_distance, use_system_allocator] = create_benchmark(env, count_arguments, arguments);
|
||||
if (benchmark == nullptr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
Genode::log("Using system allocator = ", (use_system_allocator? "true" : "false"));
|
||||
|
||||
mx::util::core_set cores{};
|
||||
|
||||
//cores = benchmark->core_set();
|
||||
while ((cores = benchmark->core_set()))
|
||||
{
|
||||
mx::tasking::runtime_guard _(env, use_system_allocator, cores, prefetch_distance);
|
||||
benchmark->start_chronometer();
|
||||
benchmark->start();
|
||||
//wait_for_continue();
|
||||
}
|
||||
Genode::log("Benchmark finished.");
|
||||
|
||||
delete benchmark;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::tuple<Benchmark *, std::uint16_t, bool> create_benchmark(Libc::Env &env, int count_arguments, char **arguments)
|
||||
{
|
||||
// Set up arguments.
|
||||
argparse::ArgumentParser argument_parser("blinktree_benchmark");
|
||||
argument_parser.add_argument("cores")
|
||||
.help("Range of the number of cores (1 for using 1 core, 1: for using 1 up to available cores, 1:4 for using "
|
||||
"cores from 1 to 4).")
|
||||
.default_value(std::string("1"));
|
||||
/* Not used for the moment.
|
||||
argument_parser.add_argument("-c", "--channels-per-core")
|
||||
.help("Number of how many channels used per core.")
|
||||
.default_value(std::uint16_t(1))
|
||||
.action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
|
||||
*/
|
||||
argument_parser.add_argument("-s", "--steps")
|
||||
.help("Steps, how number of cores is increased (1,2,4,6,.. for -s 2).")
|
||||
.default_value(std::uint16_t(2))
|
||||
.action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
|
||||
argument_parser.add_argument("-i", "--iterations")
|
||||
.help("Number of iterations for each workload")
|
||||
.default_value(std::uint16_t(1))
|
||||
.action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
|
||||
argument_parser.add_argument("-sco", "--system-core-order")
|
||||
.help("Use systems core order. If not, cores are ordered by node id (should be preferred).")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("-p", "--perf")
|
||||
.help("Use performance counter.")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--exclusive")
|
||||
.help("Are all node accesses exclusive?")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--latched")
|
||||
.help("Prefer latch for synchronization?")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--olfit")
|
||||
.help("Prefer OLFIT for synchronization?")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--sync4me")
|
||||
.help("Let the tasking layer decide the synchronization primitive.")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--print-stats")
|
||||
.help("Print tree statistics after every iteration.")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("--disable-check")
|
||||
.help("Disable tree check while benchmarking.")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("-f", "--workload-files")
|
||||
.help("Files containing the workloads (workloads/fill workloads/mixed for example).")
|
||||
.nargs(2)
|
||||
.default_value(
|
||||
std::vector<std::string>{"workloads/fill_randint_workloada", "workloads/mixed_randint_workloada"});
|
||||
argument_parser.add_argument("-pd", "--prefetch-distance")
|
||||
.help("Distance of prefetched data objects (0 = disable prefetching).")
|
||||
.default_value(std::uint16_t(0))
|
||||
.action([](const std::string &value) { return std::uint16_t(std::stoi(value)); });
|
||||
argument_parser.add_argument("--system-allocator")
|
||||
.help("Use the systems malloc interface to allocate tasks (default disabled).")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
argument_parser.add_argument("-ot", "--out-tree")
|
||||
.help("Name of the file, the tree will be written in json format.")
|
||||
.default_value(std::string(""));
|
||||
argument_parser.add_argument("-os", "--out-statistics")
|
||||
.help("Name of the file, the task statistics will be written in json format.")
|
||||
.default_value(std::string(""));
|
||||
argument_parser.add_argument("-o", "--out")
|
||||
.help("Name of the file, the results will be written to.")
|
||||
.default_value(std::string(""));
|
||||
argument_parser.add_argument("--profiling")
|
||||
.help("Enable profiling (default disabled).")
|
||||
.implicit_value(true)
|
||||
.default_value(false);
|
||||
|
||||
// Parse arguments.
|
||||
try
|
||||
{
|
||||
argument_parser.parse_args(count_arguments, arguments);
|
||||
}
|
||||
catch (std::runtime_error &e)
|
||||
{
|
||||
std::cout << argument_parser << std::endl;
|
||||
return {nullptr, 0U, false};
|
||||
}
|
||||
|
||||
auto order =
|
||||
argument_parser.get<bool>("-sco") ? mx::util::core_set::Order::Ascending : mx::util::core_set::Order::NUMAAware;
|
||||
auto cores =
|
||||
benchmark::Cores({argument_parser.get<std::string>("cores"), argument_parser.get<std::uint16_t>("-s"), order});
|
||||
auto workload_files = argument_parser.get<std::vector<std::string>>("-f");
|
||||
const auto isolation_level = argument_parser.get<bool>("--exclusive")
|
||||
? mx::synchronization::isolation_level::Exclusive
|
||||
: mx::synchronization::isolation_level::ExclusiveWriter;
|
||||
auto preferred_synchronization_method = mx::synchronization::protocol::Queue;
|
||||
if (argument_parser.get<bool>("--latched"))
|
||||
{
|
||||
preferred_synchronization_method = mx::synchronization::protocol::Latch;
|
||||
Genode::log("Set synchronization method to latch");
|
||||
}
|
||||
else if (argument_parser.get<bool>("--olfit"))
|
||||
{
|
||||
preferred_synchronization_method = mx::synchronization::protocol::OLFIT;
|
||||
Genode::log("Set synchronization method to OLFIT");
|
||||
}
|
||||
else if (argument_parser.get<bool>("--sync4me"))
|
||||
{
|
||||
preferred_synchronization_method = mx::synchronization::protocol::None;
|
||||
Genode::log("Set synchronization method to None");
|
||||
} else {
|
||||
Genode::log("Set synchronization method to Queue");
|
||||
}
|
||||
|
||||
Genode::log("Isolation level ", (isolation_level == mx::synchronization::isolation_level::Exclusive) ? "exclusive readers/writers" : "exclusive writers/parallel readers");
|
||||
|
||||
// Create the benchmark.
|
||||
//Genode::Heap _heap{env.ram(), env.rm()};
|
||||
auto *benchmark =
|
||||
new Benchmark(env, std::move(cores), argument_parser.get<std::uint16_t>("-i"), std::move(workload_files[0]),
|
||||
std::move(workload_files[1]), argument_parser.get<bool>("-p"), isolation_level,
|
||||
preferred_synchronization_method, argument_parser.get<bool>("--print-stats"),
|
||||
argument_parser.get<bool>("--disable-check") == false, argument_parser.get<std::string>("-o"),
|
||||
argument_parser.get<std::string>("-os"), argument_parser.get<std::string>("-ot"),
|
||||
argument_parser.get<bool>("--profiling"));
|
||||
|
||||
return {benchmark, argument_parser.get<std::uint16_t>("-pd"), argument_parser.get<bool>("--system-allocator")};
|
||||
}
|
||||
|
||||
void Libc::Component::construct(Libc::Env &env) {
|
||||
|
||||
mx::system::Environment::set_env(&env);
|
||||
|
||||
auto sys_cores = mx::util::core_set::build(64);
|
||||
mx::system::Environment::set_cores(&sys_cores);
|
||||
|
||||
std::uint16_t cores = 60;
|
||||
//env.cpu().affinity_space().total();
|
||||
|
||||
char cores_arg[10];
|
||||
sprintf(cores_arg, "%d", cores);
|
||||
|
||||
char *args[] = {"blinktree_benchmark", "-i", "200", "--olfit", cores_arg};
|
||||
|
||||
Libc::with_libc([&]()
|
||||
{
|
||||
std::cout << "Starting B-link tree benchmark" << std::endl;
|
||||
bt_main(env, 5, args);
|
||||
});
|
||||
}
|
||||
252
repos/ealanos/src/app/blinktree_benchmark/request_scheduler.h
Normal file
252
repos/ealanos/src/app/blinktree_benchmark/request_scheduler.h
Normal file
@@ -0,0 +1,252 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
#include "listener.h"
|
||||
#include <atomic>
|
||||
#include <benchmark/workload.h>
|
||||
#include <cstdint>
|
||||
#include <db/index/blinktree/b_link_tree.h>
|
||||
#include <db/index/blinktree/config.h>
|
||||
#include <db/index/blinktree/insert_value_task.h>
|
||||
#include <db/index/blinktree/lookup_task.h>
|
||||
#include <db/index/blinktree/update_task.h>
|
||||
#include <mx/resource/resource.h>
|
||||
#include <mx/tasking/runtime.h>
|
||||
#include <mx/tasking/task.h>
|
||||
#include <mx/util/core_set.h>
|
||||
#include <mx/util/reference_counter.h>
|
||||
|
||||
namespace application::blinktree_benchmark {
|
||||
|
||||
class RequestIndex
|
||||
{
|
||||
public:
|
||||
static RequestIndex make_finished() { return RequestIndex{std::numeric_limits<decltype(_index)>::max(), 0UL}; }
|
||||
static RequestIndex make_no_new() { return RequestIndex{0UL, 0UL}; }
|
||||
|
||||
RequestIndex(const std::uint64_t index, const std::uint64_t count) noexcept : _index(index), _count(count) {}
|
||||
explicit RequestIndex(std::pair<std::uint64_t, std::uint64_t> &&index_and_count) noexcept
|
||||
: _index(std::get<0>(index_and_count)), _count(std::get<1>(index_and_count))
|
||||
{
|
||||
}
|
||||
RequestIndex(RequestIndex &&) noexcept = default;
|
||||
RequestIndex(const RequestIndex &) = default;
|
||||
~RequestIndex() noexcept = default;
|
||||
|
||||
RequestIndex &operator=(RequestIndex &&) noexcept = default;
|
||||
|
||||
[[nodiscard]] std::uint64_t index() const noexcept { return _index; }
|
||||
[[nodiscard]] std::uint64_t count() const noexcept { return _count; }
|
||||
|
||||
[[nodiscard]] bool is_finished() const noexcept { return _index == std::numeric_limits<decltype(_index)>::max(); }
|
||||
[[nodiscard]] bool has_new() const noexcept { return _count > 0UL; }
|
||||
|
||||
RequestIndex &operator-=(const std::uint64_t count) noexcept
|
||||
{
|
||||
_count -= count;
|
||||
_index += count;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
std::uint64_t _index;
|
||||
std::uint64_t _count;
|
||||
};
|
||||
|
||||
/**
|
||||
* The RequestContainer manages the workload and allocates new batches of requests
|
||||
* that will be scheduled by the request scheduler.
|
||||
*/
|
||||
class RequestContainer
|
||||
{
|
||||
public:
|
||||
RequestContainer(const std::uint16_t core_id, const std::uint64_t max_open_requests,
|
||||
benchmark::Workload &workload) noexcept
|
||||
: _finished_requests(core_id), _local_buffer(workload.next(config::batch_size())),
|
||||
_max_pending_requests(max_open_requests), _workload(workload)
|
||||
{
|
||||
}
|
||||
|
||||
~RequestContainer() noexcept = default;
|
||||
|
||||
/**
|
||||
* Allocates the next requests to spawn.
|
||||
*
|
||||
* @return Pair of workload-index and number of tuples to request.
|
||||
* When the number is negative, no more requests are available.
|
||||
*/
|
||||
RequestIndex next() noexcept
|
||||
{
|
||||
const auto finished_requests = _finished_requests.load();
|
||||
|
||||
const auto pending_requests = _scheduled_requests - finished_requests;
|
||||
if (pending_requests >= _max_pending_requests)
|
||||
{
|
||||
// Too many open requests somewhere in the system.
|
||||
return RequestIndex::make_no_new();
|
||||
}
|
||||
|
||||
if (_local_buffer.has_new() == false)
|
||||
{
|
||||
_local_buffer = RequestIndex{_workload.next(config::batch_size())};
|
||||
}
|
||||
|
||||
if (_local_buffer.has_new())
|
||||
{
|
||||
// How many requests can be scheduled without reaching the request limit?
|
||||
const auto free_requests = _max_pending_requests - pending_requests;
|
||||
|
||||
// Try to spawn all free requests, but at least those in the local buffer.
|
||||
const auto count = std::min(free_requests, _local_buffer.count());
|
||||
|
||||
_scheduled_requests += count;
|
||||
|
||||
const auto index = RequestIndex{_local_buffer.index(), count};
|
||||
_local_buffer -= count;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
// Do we have to wait for pending requests or are we finished?
|
||||
return pending_requests > 0UL ? RequestIndex::make_no_new() : RequestIndex::make_finished();
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback after inserted a value.
|
||||
*/
|
||||
void inserted(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
|
||||
{
|
||||
task_finished(core_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback after updated a value.
|
||||
*/
|
||||
void updated(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
|
||||
{
|
||||
task_finished(core_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback after removed a value.
|
||||
*/
|
||||
void removed(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); }
|
||||
|
||||
/**
|
||||
* Callback after found a value.
|
||||
*/
|
||||
void found(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept
|
||||
{
|
||||
task_finished(core_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback on missing a value.
|
||||
*/
|
||||
void missing(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); }
|
||||
|
||||
const benchmark::NumericTuple &operator[](const std::size_t index) const noexcept { return _workload[index]; }
|
||||
|
||||
private:
|
||||
// Number of requests finished by tasks.
|
||||
mx::util::reference_counter_64 _finished_requests;
|
||||
|
||||
// Number of tasks scheduled by the owning request scheduler.
|
||||
std::uint64_t _scheduled_requests = 0UL;
|
||||
|
||||
// Local buffer holding not scheduled, but from global worker owned request items.
|
||||
RequestIndex _local_buffer;
|
||||
|
||||
// Number of requests that can be distributed by this scheduler,
|
||||
// due to system-wide maximal parallel requests.
|
||||
const std::uint64_t _max_pending_requests;
|
||||
|
||||
// Workload to get requests from.
|
||||
benchmark::Workload &_workload;
|
||||
|
||||
/**
|
||||
* Updates the counter of finished requests.
|
||||
*/
|
||||
void task_finished(const std::uint16_t core_id) { _finished_requests.add(core_id); }
|
||||
};
|
||||
|
||||
/**
|
||||
* The RequestScheduler own its own request container and sets up requests for the BLink-Tree.
|
||||
*/
|
||||
class RequestSchedulerTask final : public mx::tasking::TaskInterface
|
||||
{
|
||||
public:
|
||||
RequestSchedulerTask(const std::uint16_t core_id, const std::uint16_t channel_id, benchmark::Workload &workload,
|
||||
const mx::util::core_set &core_set,
|
||||
db::index::blinktree::BLinkTree<std::uint64_t, std::int64_t> *tree, Listener *listener)
|
||||
: _tree(tree), _listener(listener)
|
||||
{
|
||||
this->annotate(mx::tasking::priority::low);
|
||||
this->is_readonly(false);
|
||||
|
||||
const auto container = mx::tasking::runtime::new_resource<RequestContainer>(
|
||||
sizeof(RequestContainer), mx::resource::hint{channel_id}, core_id,
|
||||
config::max_parallel_requests() / core_set.size(), workload);
|
||||
this->annotate(container, sizeof(RequestContainer));
|
||||
}
|
||||
|
||||
~RequestSchedulerTask() final = default;
|
||||
|
||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||
{
|
||||
// Get some new requests from the container.
|
||||
auto &request_container = *mx::resource::ptr_cast<RequestContainer>(this->annotated_resource());
|
||||
const auto next_requests = request_container.next();
|
||||
|
||||
if (next_requests.has_new())
|
||||
{
|
||||
for (auto i = next_requests.index(); i < next_requests.index() + next_requests.count(); ++i)
|
||||
{
|
||||
mx::tasking::TaskInterface *task{nullptr};
|
||||
const auto &tuple = request_container[i];
|
||||
if (tuple == benchmark::NumericTuple::INSERT)
|
||||
{
|
||||
task = mx::tasking::runtime::new_task<
|
||||
db::index::blinktree::InsertValueTask<std::uint64_t, std::int64_t, RequestContainer>>(
|
||||
core_id, tuple.key(), tuple.value(), _tree, request_container);
|
||||
task->is_readonly(_tree->height() > 1U);
|
||||
}
|
||||
else if (tuple == benchmark::NumericTuple::LOOKUP)
|
||||
{
|
||||
task = mx::tasking::runtime::new_task<
|
||||
db::index::blinktree::LookupTask<std::uint64_t, std::int64_t, RequestContainer>>(
|
||||
core_id, tuple.key(), request_container);
|
||||
|
||||
task->is_readonly(true);
|
||||
}
|
||||
else if (tuple == benchmark::NumericTuple::UPDATE)
|
||||
{
|
||||
task = mx::tasking::runtime::new_task<
|
||||
db::index::blinktree::UpdateTask<std::uint64_t, std::int64_t, RequestContainer>>(
|
||||
core_id, tuple.key(), tuple.value(), request_container);
|
||||
task->is_readonly(_tree->height() > 1U);
|
||||
}
|
||||
|
||||
task->annotate(_tree->root(), db::index::blinktree::config::node_size() / 4U);
|
||||
mx::tasking::runtime::spawn(*task, channel_id);
|
||||
}
|
||||
}
|
||||
else if (next_requests.is_finished())
|
||||
{
|
||||
// All requests are done. Notify the benchmark and die.
|
||||
_listener->requests_finished();
|
||||
mx::tasking::runtime::delete_resource<RequestContainer>(this->annotated_resource());
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
private:
|
||||
// The tree to send requests to.
|
||||
db::index::blinktree::BLinkTree<std::uint64_t, std::int64_t> *_tree;
|
||||
|
||||
// Benchmark listener to notify on requests are done.
|
||||
Listener *_listener;
|
||||
};
|
||||
} // namespace application::blinktree_benchmark
|
||||
33
repos/ealanos/src/app/blinktree_benchmark/target.mk
Normal file
33
repos/ealanos/src/app/blinktree_benchmark/target.mk
Normal file
@@ -0,0 +1,33 @@
|
||||
MXINC_DIR=$(REP_DIR)/src/app/blinktree
|
||||
GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/23.05
|
||||
|
||||
TARGET = blinktree_benchmark
|
||||
# soure file for benchmark framework
|
||||
SRC_MXBENCH = ${REP_DIR}/src/lib/benchmark/workload_set.cpp
|
||||
SRC_MXBENCH += ${REP_DIR}/src/lib/benchmark/workload.cpp
|
||||
SRC_MXBENCH += ${REP_DIR}/src/lib/benchmark/cores.cpp
|
||||
SRC_MXBENCH += ${REP_DIR}/src/lib/benchmark/string_util.cpp
|
||||
SRC_MXBENCH += ${REP_DIR}/src/lib/benchmark/perf.cpp
|
||||
# source files for blinktree benchmark
|
||||
SRC_BTREE += main.cpp
|
||||
SRC_BTREE += benchmark.cpp
|
||||
|
||||
INC_DIR += /usr/local/genode/tool/lib/clang/14.0.5/include/
|
||||
INC_DIR += $(REP_DIR)/src/lib
|
||||
INC_DIR += $(REP_DIR)/include
|
||||
INC_DIR += $(REP_DIR)/include/ealanos/util
|
||||
INC_DIR += $(call select_from_repositories,src/lib/libc)
|
||||
INC_DIR += $(call select_from_repositories,src/lib/libc)/spec/x86_64
|
||||
vpath %.h ${INC_DIR}
|
||||
LD_OPT += --allow-multiple-definition
|
||||
|
||||
SRC_CC = ${SRC_MXBENCH} ${SRC_BTREE}
|
||||
LIBS += base libc stdcxx mxtasking
|
||||
EXT_OBJECTS += /usr/local/genode/tool/lib/libatomic.a /usr/local/genode/tool/23.05/lib/gcc/x86_64-pc-elf/12.3.0/libgcc_eh.a /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a
|
||||
CUSTOM_CC = /usr/local/genode/tool/bin/clang
|
||||
CUSTOM_CXX = /usr/local/genode/tool/bin/clang++
|
||||
CC_OPT += --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -Wno-error -O2 -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 -msse4.2 -DUSE_SSE2#-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||
#CC_OPT += -femulated-tls -DCLANG_CXX11_ATOMICS
|
||||
CC_CXX_WARN_STRICT =
|
||||
CUSTOM_CXX_LIB := $(CROSS_DEV_PREFIX)g++
|
||||
#CXX_LD += $(CROSS_DEV_PREFIX)g++
|
||||
229
repos/ealanos/src/lib/benchmark/chronometer.h
Normal file
229
repos/ealanos/src/lib/benchmark/chronometer.h
Normal file
@@ -0,0 +1,229 @@
|
||||
#pragma once
|
||||
|
||||
#include "perf.h"
|
||||
#include "phase.h"
|
||||
#include <chrono>
|
||||
#include <json.hpp>
|
||||
#include <mx/tasking/config.h>
|
||||
#include <mx/tasking/profiling/statistic.h>
|
||||
#include <mx/tasking/runtime.h>
|
||||
#include <mx/system/environment.h>
|
||||
#include <mx/util/core_set.h>
|
||||
#include <numeric>
|
||||
#include <ostream>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace benchmark {
|
||||
/**
|
||||
* The InterimResult is part of the chronometer, which in turn holds
|
||||
* all results during a benchmark.
|
||||
*/
|
||||
template <typename P> class InterimResult
|
||||
{
|
||||
friend std::ostream &operator<<(std::ostream &stream, const InterimResult &result)
|
||||
{
|
||||
stream << result.core_count() << "\t" << result.iteration() << "\t" << result.phase() << "\t"
|
||||
<< result.time().count() << " ms"
|
||||
<< "\t" << result.throughput() << " op/s";
|
||||
|
||||
for (const auto &[name, value] : result.performance_counter())
|
||||
{
|
||||
const auto value_per_operation = value / double(result.operation_count());
|
||||
stream << "\t" << value_per_operation << " " << name << "/op";
|
||||
}
|
||||
|
||||
if constexpr (mx::tasking::config::task_statistics())
|
||||
{
|
||||
stream << "\t" << result.executed_writer_tasks() / double(result.operation_count()) << " writer/op";
|
||||
stream << "\t" << result.executed_reader_tasks() / double(result.operation_count()) << " reader/op";
|
||||
stream << "\t" << result.scheduled_tasks_on_core() / double(result.operation_count()) << " on-channel/op";
|
||||
stream << "\t" << result.scheduled_tasks_off_core() / double(result.operation_count()) << " off-channel/op";
|
||||
stream << "\t" << result.worker_fills() / double(result.operation_count()) << " fills/op";
|
||||
}
|
||||
|
||||
return stream << std::flush;
|
||||
}
|
||||
|
||||
public:
|
||||
InterimResult(const std::uint64_t operation_count, const P &phase, const std::uint16_t iteration,
|
||||
const std::uint16_t core_count, const std::chrono::milliseconds time,
|
||||
std::vector<PerfCounter> &counter,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> executed_tasks,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> executed_reader_tasks,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> executed_writer_tasks,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> scheduled_tasks,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> scheduled_tasks_on_core,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> scheduled_tasks_off_core,
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> worker_fills)
|
||||
: _operation_count(operation_count), _phase(phase), _iteration(iteration), _core_count(core_count), _time(time),
|
||||
_executed_tasks(std::move(executed_tasks)), _executed_reader_tasks(std::move(executed_reader_tasks)),
|
||||
_executed_writer_tasks(std::move(executed_writer_tasks)), _scheduled_tasks(std::move(scheduled_tasks)),
|
||||
_scheduled_tasks_on_core(std::move(scheduled_tasks_on_core)),
|
||||
_scheduled_tasks_off_core(std::move(scheduled_tasks_off_core)), _worker_fills(std::move(worker_fills))
|
||||
{
|
||||
for (auto &c : counter)
|
||||
{
|
||||
_performance_counter.emplace_back(std::make_pair(c.name(), c.read()));
|
||||
}
|
||||
}
|
||||
|
||||
~InterimResult() = default;
|
||||
|
||||
std::uint64_t operation_count() const noexcept { return _operation_count; }
|
||||
const P &phase() const noexcept { return _phase; }
|
||||
std::uint16_t iteration() const noexcept { return _iteration; }
|
||||
std::uint16_t core_count() const noexcept { return _core_count; }
|
||||
std::chrono::milliseconds time() const noexcept { return _time; }
|
||||
double throughput() const { return _operation_count / (_time.count() / 1000.0); }
|
||||
const std::vector<std::pair<std::string, double>> &performance_counter() const noexcept
|
||||
{
|
||||
return _performance_counter;
|
||||
}
|
||||
|
||||
[[maybe_unused]] std::uint64_t executed_tasks() const noexcept { return sum(_executed_tasks); }
|
||||
[[maybe_unused]] std::uint64_t executed_reader_tasks() const noexcept { return sum(_executed_reader_tasks); }
|
||||
[[maybe_unused]] std::uint64_t executed_writer_tasks() const noexcept { return sum(_executed_writer_tasks); }
|
||||
[[maybe_unused]] std::uint64_t scheduled_tasks() const noexcept { return sum(_scheduled_tasks); }
|
||||
[[maybe_unused]] std::uint64_t scheduled_tasks_on_core() const noexcept { return sum(_scheduled_tasks_on_core); }
|
||||
[[maybe_unused]] std::uint64_t scheduled_tasks_off_core() const noexcept { return sum(_scheduled_tasks_off_core); }
|
||||
[[maybe_unused]] std::uint64_t worker_fills() const noexcept { return sum(_worker_fills); }
|
||||
|
||||
std::uint64_t executed_tasks(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _executed_tasks.at(channel_id);
|
||||
}
|
||||
std::uint64_t executed_reader_tasks(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _executed_reader_tasks.at(channel_id);
|
||||
}
|
||||
std::uint64_t executed_writer_tasks(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _executed_writer_tasks.at(channel_id);
|
||||
}
|
||||
std::uint64_t scheduled_tasks(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _scheduled_tasks.at(channel_id);
|
||||
}
|
||||
std::uint64_t scheduled_tasks_on_core(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _scheduled_tasks_on_core.at(channel_id);
|
||||
}
|
||||
std::uint64_t scheduled_tasks_off_core(const std::uint16_t channel_id) const noexcept
|
||||
{
|
||||
return _scheduled_tasks_off_core.at(channel_id);
|
||||
}
|
||||
std::uint64_t worker_fills(const std::uint16_t channel_id) const noexcept { return _worker_fills.at(channel_id); }
|
||||
|
||||
[[nodiscard]] nlohmann::json to_json() const noexcept
|
||||
{
|
||||
auto json = nlohmann::json{};
|
||||
json["iteration"] = iteration();
|
||||
json["cores"] = core_count();
|
||||
json["phase"] = phase();
|
||||
json["throughput"] = throughput();
|
||||
for (const auto &[name, value] : performance_counter())
|
||||
{
|
||||
json[name] = value / double(operation_count());
|
||||
}
|
||||
|
||||
if constexpr (mx::tasking::config::task_statistics())
|
||||
{
|
||||
json["executed-writer-tasks"] = executed_writer_tasks() / double(operation_count());
|
||||
json["executed-reader-tasks"] = executed_reader_tasks() / double(operation_count());
|
||||
json["scheduled-tasks-on-channel"] = scheduled_tasks_on_core() / double(operation_count());
|
||||
json["scheduled-tasks-off-channel"] = scheduled_tasks_off_core() / double(operation_count());
|
||||
json["buffer-fills"] = worker_fills() / double(operation_count());
|
||||
}
|
||||
|
||||
return json;
|
||||
}
|
||||
|
||||
private:
|
||||
const std::uint64_t _operation_count;
|
||||
const P &_phase;
|
||||
const std::uint16_t _iteration;
|
||||
const std::uint16_t _core_count;
|
||||
const std::chrono::milliseconds _time;
|
||||
std::vector<std::pair<std::string, double>> _performance_counter;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _executed_tasks;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _executed_reader_tasks;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _executed_writer_tasks;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _scheduled_tasks;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _scheduled_tasks_on_core;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _scheduled_tasks_off_core;
|
||||
const std::unordered_map<std::uint16_t, std::uint64_t> _worker_fills;
|
||||
|
||||
std::uint64_t sum(const std::unordered_map<std::uint16_t, std::uint64_t> &map) const noexcept
|
||||
{
|
||||
return std::accumulate(map.begin(), map.end(), 0U,
|
||||
[](const auto ¤t, const auto &item) { return current + item.second; });
|
||||
}
|
||||
};
|
||||
/**
|
||||
* The Chronometer is the "benchmark clock", which will be started and stopped
|
||||
* before and after each benchmark run. On stopping, the chronometer will calculate
|
||||
* used time, persist performance counter values, and mx::tasking statistics.
|
||||
*/
|
||||
template <typename P> class Chronometer
|
||||
{
|
||||
public:
|
||||
Chronometer() = default;
|
||||
~Chronometer() = default;
|
||||
|
||||
void start(const P phase, const std::uint16_t iteration, const mx::util::core_set &core_set)
|
||||
{
|
||||
_current_phase = phase;
|
||||
_current_iteration = iteration;
|
||||
_core_set = core_set;
|
||||
_perf.start();
|
||||
|
||||
//_start = std::chrono::steady_clock::now();
|
||||
_start = Genode::Trace::timestamp();
|
||||
}
|
||||
|
||||
InterimResult<P> stop(const std::uint64_t count_operations)
|
||||
{
|
||||
const auto end = Genode::Trace::timestamp();
|
||||
//const auto end = std::chrono::steady_clock::now();
|
||||
_perf.stop();
|
||||
|
||||
//const auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(end-_start);
|
||||
const auto milliseconds = std::chrono::milliseconds((end-_start)/mx::system::Environment::get_cpu_freq());
|
||||
|
||||
return {count_operations,
|
||||
_current_phase,
|
||||
_current_iteration,
|
||||
mx::tasking::runtime::workers_count(),
|
||||
milliseconds,
|
||||
_perf.counter(),
|
||||
statistic_map(mx::tasking::profiling::Statistic::Executed),
|
||||
statistic_map(mx::tasking::profiling::Statistic::ExecutedReader),
|
||||
statistic_map(mx::tasking::profiling::Statistic::ExecutedWriter),
|
||||
statistic_map(mx::tasking::profiling::Statistic::Scheduled),
|
||||
statistic_map(mx::tasking::profiling::Statistic::ScheduledOnChannel),
|
||||
statistic_map(mx::tasking::profiling::Statistic::ScheduledOffChannel),
|
||||
statistic_map(mx::tasking::profiling::Statistic::Fill)};
|
||||
}
|
||||
void add(PerfCounter &performance_counter) { _perf.add(performance_counter); }
|
||||
private:
|
||||
std::uint16_t _current_iteration{0U};
|
||||
P _current_phase;
|
||||
mx::util::core_set _core_set;
|
||||
alignas(64) Perf _perf;
|
||||
//alignas(64) std::chrono::steady_clock::time_point _start;
|
||||
alignas(64) size_t _start;
|
||||
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> statistic_map(
|
||||
const mx::tasking::profiling::Statistic::Counter counter)
|
||||
{
|
||||
std::unordered_map<std::uint16_t, std::uint64_t> statistics;
|
||||
for (auto i = 0U; i < mx::tasking::runtime::channels(); ++i)
|
||||
{
|
||||
statistics[i] = mx::tasking::runtime::statistic(counter, i);
|
||||
}
|
||||
return statistics;
|
||||
}
|
||||
};
|
||||
} // namespace benchmark
|
||||
100
repos/ealanos/src/lib/benchmark/cores.cpp
Normal file
100
repos/ealanos/src/lib/benchmark/cores.cpp
Normal file
@@ -0,0 +1,100 @@
|
||||
#include "cores.h"
|
||||
#include <mx/system/topology.h>
|
||||
#include <regex>
|
||||
#include <sstream>
|
||||
|
||||
using namespace benchmark;
|
||||
|
||||
Cores::Cores(const std::uint16_t min_cores, const std::uint16_t max_cores, const std::uint16_t steps,
|
||||
const mx::util::core_set::Order order)
|
||||
{
|
||||
this->add_for_range(min_cores, max_cores, steps, order);
|
||||
}
|
||||
|
||||
Cores::Cores(const std::string &cores, const std::uint16_t steps, const mx::util::core_set::Order order)
|
||||
{
|
||||
const std::regex single_core_regex("(\\d+)$");
|
||||
const std::regex from_core_regex("(\\d+):$");
|
||||
const std::regex core_range_regex("(\\d+):(\\d+)");
|
||||
|
||||
std::stringstream stream(cores);
|
||||
std::string token;
|
||||
while (std::getline(stream, token, ';'))
|
||||
{
|
||||
std::smatch match;
|
||||
|
||||
if (std::regex_match(token, match, single_core_regex))
|
||||
{
|
||||
const auto core = std::stoi(match[1].str());
|
||||
this->add_for_range(core, core, steps, order);
|
||||
}
|
||||
else if (std::regex_match(token, match, from_core_regex))
|
||||
{
|
||||
this->add_for_range(std::stoi(match[1].str()), mx::system::topology::count_cores(), steps, order);
|
||||
}
|
||||
else if (std::regex_match(token, match, core_range_regex))
|
||||
{
|
||||
this->add_for_range(std::stoi(match[1].str()), std::stoi(match[2].str()), steps, order);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Cores::add_for_range(const std::uint16_t min_cores, const std::uint16_t max_cores, const std::uint16_t steps,
|
||||
const mx::util::core_set::Order order)
|
||||
{
|
||||
if (min_cores == 0U || min_cores == max_cores)
|
||||
{
|
||||
this->_core_sets.push_back(mx::util::core_set::build(max_cores, order));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto cores = min_cores;
|
||||
if (cores % steps != 0U)
|
||||
{
|
||||
this->_core_sets.push_back(mx::util::core_set::build(cores, order));
|
||||
cores++;
|
||||
}
|
||||
|
||||
for (auto count_cores = cores; count_cores <= max_cores; count_cores++)
|
||||
{
|
||||
if (count_cores % steps == 0U)
|
||||
{
|
||||
this->_core_sets.push_back(mx::util::core_set::build(count_cores, order));
|
||||
}
|
||||
}
|
||||
|
||||
if (max_cores % steps != 0U)
|
||||
{
|
||||
this->_core_sets.push_back(mx::util::core_set::build(max_cores, order));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string Cores::dump(const std::uint8_t indent) const
|
||||
{
|
||||
std::stringstream stream;
|
||||
|
||||
for (auto i = 0U; i < this->_core_sets.size(); ++i)
|
||||
{
|
||||
if (i > 0U)
|
||||
{
|
||||
stream << "\n";
|
||||
}
|
||||
const auto &core_set = this->_core_sets[i];
|
||||
if (indent > 0U)
|
||||
{
|
||||
stream << std::string(indent, ' ');
|
||||
}
|
||||
stream << core_set.size() << ": " << core_set;
|
||||
}
|
||||
stream << std::flush;
|
||||
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
namespace benchmark {
|
||||
std::ostream &operator<<(std::ostream &stream, const Cores &cores)
|
||||
{
|
||||
return stream << cores.dump(0U) << std::endl;
|
||||
}
|
||||
} // namespace benchmark
|
||||
53
repos/ealanos/src/lib/benchmark/cores.h
Normal file
53
repos/ealanos/src/lib/benchmark/cores.h
Normal file
@@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <mx/util/core_set.h>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace benchmark {
|
||||
/**
|
||||
* Set of core_sets used for a benchmark that should be performed over
|
||||
* different core counts to benchmark scalability.
|
||||
* Can be created from min and max cores (i.e. 1 core to 32 cores) or from
|
||||
* string identifying the cores (i.e. "1:32").
|
||||
*/
|
||||
class Cores
|
||||
{
|
||||
friend std::ostream &operator<<(std::ostream &stream, const Cores &cores);
|
||||
|
||||
public:
|
||||
Cores(std::uint16_t min_cores, std::uint16_t max_cores, std::uint16_t steps, mx::util::core_set::Order order);
|
||||
Cores(const std::string &cores, std::uint16_t steps, mx::util::core_set::Order order);
|
||||
Cores(Cores &&) noexcept = default;
|
||||
|
||||
~Cores() = default;
|
||||
|
||||
const mx::util::core_set &next()
|
||||
{
|
||||
const auto current_index = _current_index++;
|
||||
if (current_index < _core_sets.size())
|
||||
{
|
||||
return _core_sets[current_index];
|
||||
}
|
||||
|
||||
return _empty_core_set;
|
||||
}
|
||||
|
||||
[[nodiscard]] const mx::util::core_set ¤t() const noexcept { return _core_sets[_current_index - 1]; }
|
||||
[[nodiscard]] std::size_t size() const noexcept { return _core_sets.size(); }
|
||||
|
||||
void reset() { _current_index = 0U; }
|
||||
|
||||
[[nodiscard]] std::string dump(std::uint8_t indent) const;
|
||||
|
||||
private:
|
||||
std::vector<mx::util::core_set> _core_sets;
|
||||
std::uint16_t _current_index = 0U;
|
||||
const mx::util::core_set _empty_core_set;
|
||||
|
||||
void add_for_range(std::uint16_t min_cores, std::uint16_t max_cores, std::uint16_t steps,
|
||||
mx::util::core_set::Order order);
|
||||
};
|
||||
} // namespace benchmark
|
||||
70
repos/ealanos/src/lib/benchmark/perf.cpp
Normal file
70
repos/ealanos/src/lib/benchmark/perf.cpp
Normal file
@@ -0,0 +1,70 @@
|
||||
#include "perf.h"
|
||||
|
||||
using namespace benchmark;
|
||||
|
||||
/**
|
||||
* Counter "Instructions Retired"
|
||||
* Counts when the last uop of an instruction retires.
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::INSTRUCTIONS = {"instr", 0xc0, 0x0};
|
||||
|
||||
/**
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::CYCLES = {"cycles", 0x76, 0x0};
|
||||
|
||||
/**
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::L1_DTLB_MISSES = {"l1-dtlb-miss", 0x45, 0xff};
|
||||
[[maybe_unused]] PerfCounter Perf::L1_ITLB_MISSES = {"l1-itlb-miss", 0x85, 0x0};
|
||||
|
||||
/**
|
||||
* Counter "LLC Misses"
|
||||
* Accesses to the LLC in which the data is not present(miss).
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::LLC_MISSES = {"llc-miss", 0x6, 0xff};
|
||||
|
||||
/**
|
||||
* Counter "LLC Reference"
|
||||
* Accesses to the LLC, in which the data is present(hit) or not present(miss)
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::LLC_REFERENCES = {"llc-ref", 0x4, 0xff};
|
||||
|
||||
/**
|
||||
* Micro architecture "Skylake"
|
||||
* Counter "CYCLE_ACTIVITY.STALLS_MEM_ANY"
|
||||
* EventSel=A3H,UMask=14H, CMask=20
|
||||
* Execution stalls while memory subsystem has an outstanding load.
|
||||
*/
|
||||
//PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3};
|
||||
|
||||
/**
|
||||
* Micro architecture "Skylake"
|
||||
* Counter "SW_PREFETCH_ACCESS.NTA"
|
||||
* EventSel=32H,UMask=01H
|
||||
* Number of PREFETCHNTA instructions executed.
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_NTA = {"sw-prefetch-nta", 0x4b, 0x4};
|
||||
|
||||
/**
|
||||
* Micro architecture "Skylake"
|
||||
* Counter "SW_PREFETCH_ACCESS.T0"
|
||||
* EventSel=32H,UMask=02H
|
||||
* Number of PREFETCHT0 instructions executed.
|
||||
*/
|
||||
//[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T0 = {"sw-prefetch-t0", Genode::Trace::Performance_counter::Type::CORE, 0x4b, };
|
||||
|
||||
/**
|
||||
* Micro architecture "Skylake"
|
||||
* Counter "SW_PREFETCH_ACCESS.T1_T2"
|
||||
* EventSel=32H,UMask=04H
|
||||
* Number of PREFETCHT1 or PREFETCHT2 instructions executed.
|
||||
*/
|
||||
//[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T1_T2 = {"sw-prefetch-t1t2", PERF_TYPE_RAW, 0x530432};
|
||||
|
||||
/**
|
||||
* Micro architecture "Skylake"
|
||||
* Counter "SW_PREFETCH_ACCESS.PREFETCHW"
|
||||
* EventSel=32H,UMask=08H
|
||||
* Number of PREFETCHW instructions executed.
|
||||
*/
|
||||
[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_WRITE = {"sw-prefetch-w", 0x4b, 0x2};
|
||||
176
repos/ealanos/src/lib/benchmark/perf.h
Normal file
176
repos/ealanos/src/lib/benchmark/perf.h
Normal file
@@ -0,0 +1,176 @@
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
//#include <base/trace/perf.h>
|
||||
|
||||
|
||||
/*
|
||||
* For more Performance Counter take a look into the Manual from Intel:
|
||||
* https://software.intel.com/sites/default/files/managed/8b/6e/335279_performance_monitoring_events_guide.pdf
|
||||
*
|
||||
* To get event ids from manual specification see libpfm4:
|
||||
* http://www.bnikolic.co.uk/blog/hpc-prof-events.html
|
||||
* Clone, Make, use examples/check_events to generate event id code from event:
|
||||
* ./check_events <category>:<umask>[:c=<cmask>]
|
||||
* Example:
|
||||
* ./cycle_activity:0x14:c=20
|
||||
*/
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
/**
|
||||
* Represents a Linux Performance Counter.
|
||||
*/
|
||||
class PerfCounter
|
||||
{
|
||||
public:
|
||||
PerfCounter(std::string &&name, const std::uint64_t event_id, const std::uint64_t mask) : _name(std::move(name)), _event_id(static_cast<std::uint64_t>(event_id)), _mask(static_cast<std::uint64_t>(mask))
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
~PerfCounter() = default;
|
||||
|
||||
bool open()
|
||||
{
|
||||
/*try {
|
||||
_counter = Genode::Trace::Performance_counter::acquire(_type);
|
||||
} catch (Genode::Trace::Pfc_no_avail) {
|
||||
std::cerr << "Failed to open performance counters." << std::endl;
|
||||
}
|
||||
|
||||
try {
|
||||
Genode::Trace::Performance_counter::setup(_counter, _event_id, _mask, (_type ==
|
||||
Genode::Trace::Performance_counter::Type::CORE ? 0x30000 : 0x550f000000000000)); } catch
|
||||
(Genode::Trace::Pfc_access_error &e) { std::cerr << "Error while setting up performance
|
||||
counter: " << e.error_code() << std::endl;
|
||||
}*/
|
||||
|
||||
return false; //_counter >= 0;
|
||||
}
|
||||
|
||||
bool start()
|
||||
{
|
||||
/*try {
|
||||
Genode::Trace::Performance_counter::start(_counter);
|
||||
_prev.value = static_cast<std::uint64_t>(Genode::Trace::Performance_counter::read(_counter));
|
||||
}
|
||||
catch (Genode::Trace::Pfc_access_error &e)
|
||||
{
|
||||
std::cerr << "Failed to start counter " << _counter << " " << _name << ": " << static_cast<uint16_t>(e.error_code()) << std::endl;
|
||||
}*/
|
||||
return _prev.value >= 0;
|
||||
}
|
||||
|
||||
bool stop()
|
||||
{
|
||||
/*try {
|
||||
_data.value = Genode::Trace::Performance_counter::read(_counter);
|
||||
Genode::Trace::Performance_counter::stop(_counter);
|
||||
Genode::Trace::Performance_counter::reset(_counter);
|
||||
}
|
||||
catch (Genode::Trace::Pfc_access_error &e)
|
||||
{
|
||||
std::cerr << "Failed to stop counter: " << e.error_code() << std::endl;
|
||||
}
|
||||
// const auto is_read = ::read(_file_descriptor, &_data, sizeof(read_format)) == sizeof(read_format);
|
||||
// ioctl(_file_descriptor, PERF_EVENT_IOC_DISABLE, 0);*/
|
||||
return _data.value >= 0; // is_read;
|
||||
}
|
||||
|
||||
[[nodiscard]] double read() const
|
||||
{
|
||||
return static_cast<double>(_data.value - _prev.value);
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::string &name() const { return _name; }
|
||||
explicit operator const std::string &() const { return name(); }
|
||||
|
||||
bool operator==(const std::string &name) const { return _name == name; }
|
||||
|
||||
private:
|
||||
struct read_format
|
||||
{
|
||||
std::uint64_t value = 0;
|
||||
std::uint64_t time_enabled = 0;
|
||||
std::uint64_t time_running = 0;
|
||||
};
|
||||
|
||||
const std::string _name;
|
||||
//Genode::Trace::Performance_counter::Type _type;
|
||||
std::uint64_t _event_id;
|
||||
std::uint64_t _mask;
|
||||
//Genode::Trace::Performance_counter::Counter _counter;
|
||||
read_format _prev{};
|
||||
read_format _data{};
|
||||
};
|
||||
|
||||
/**
|
||||
* Holds a set of performance counter and starts/stops them together.
|
||||
*/
|
||||
class Perf
|
||||
{
|
||||
public:
|
||||
[[maybe_unused]] static PerfCounter INSTRUCTIONS;
|
||||
[[maybe_unused]] static PerfCounter CYCLES;
|
||||
[[maybe_unused]] static PerfCounter L1_DTLB_MISSES;
|
||||
[[maybe_unused]] static PerfCounter L1_ITLB_MISSES;
|
||||
[[maybe_unused]] [[maybe_unused]] static PerfCounter LLC_MISSES;
|
||||
[[maybe_unused]] static PerfCounter LLC_REFERENCES;
|
||||
//[[maybe_unused]] static PerfCounter STALLED_CYCLES_BACKEND;
|
||||
//[[maybe_unused]] static PerfCounter STALLS_MEM_ANY;
|
||||
[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_NTA;
|
||||
//[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T0;
|
||||
//[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T1_T2;
|
||||
[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_WRITE;
|
||||
|
||||
Perf() noexcept = default;
|
||||
~Perf() noexcept = default;
|
||||
|
||||
bool add(PerfCounter &counter_)
|
||||
{
|
||||
if (counter_.open())
|
||||
{
|
||||
_counter.push_back(counter_);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void start()
|
||||
{
|
||||
for (auto &counter_ : _counter)
|
||||
{
|
||||
counter_.start();
|
||||
}
|
||||
}
|
||||
|
||||
void stop()
|
||||
{
|
||||
for (auto &counter_ : _counter)
|
||||
{
|
||||
counter_.stop();
|
||||
}
|
||||
}
|
||||
|
||||
double operator[](const std::string &name) const
|
||||
{
|
||||
auto counter_iterator = std::find(_counter.begin(), _counter.end(), name);
|
||||
if (counter_iterator != _counter.end())
|
||||
{
|
||||
return counter_iterator->read();
|
||||
}
|
||||
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
std::vector<PerfCounter> &counter() { return _counter; }
|
||||
|
||||
private:
|
||||
std::vector<PerfCounter> _counter;
|
||||
};
|
||||
} // namespace benchmark
|
||||
9
repos/ealanos/src/lib/benchmark/phase.h
Normal file
9
repos/ealanos/src/lib/benchmark/phase.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
namespace benchmark {
|
||||
enum class phase : std::uint8_t
|
||||
{
|
||||
FILL = 0U,
|
||||
MIXED = 1U
|
||||
};
|
||||
}
|
||||
15
repos/ealanos/src/lib/benchmark/string_util.cpp
Normal file
15
repos/ealanos/src/lib/benchmark/string_util.cpp
Normal file
@@ -0,0 +1,15 @@
|
||||
#include "string_util.h"
|
||||
#include <sstream>
|
||||
|
||||
using namespace benchmark;
|
||||
|
||||
void string_util::split(const std::string &text, const char delimiter,
|
||||
const std::function<void(const std::string &line)> &callback)
|
||||
{
|
||||
std::stringstream stream(text);
|
||||
std::string token;
|
||||
while (std::getline(stream, token, delimiter))
|
||||
{
|
||||
callback(token);
|
||||
}
|
||||
}
|
||||
13
repos/ealanos/src/lib/benchmark/string_util.h
Normal file
13
repos/ealanos/src/lib/benchmark/string_util.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
|
||||
namespace benchmark {
|
||||
class string_util
|
||||
{
|
||||
public:
|
||||
static void split(const std::string &text, char delimiter,
|
||||
const std::function<void(const std::string &line)> &callback);
|
||||
};
|
||||
} // namespace benchmark
|
||||
20
repos/ealanos/src/lib/benchmark/workload.cpp
Normal file
20
repos/ealanos/src/lib/benchmark/workload.cpp
Normal file
@@ -0,0 +1,20 @@
|
||||
#include "workload.h"
|
||||
#include <limits>
|
||||
|
||||
using namespace benchmark;
|
||||
|
||||
std::pair<std::uint64_t, std::uint64_t> Workload::next(const std::uint64_t count) noexcept
|
||||
{
|
||||
const auto index = this->_current_index.fetch_add(count, std::memory_order_relaxed);
|
||||
const auto workload_size = this->_workload_set[this->_current_phase].size();
|
||||
|
||||
return index < workload_size ? std::make_pair(index, std::min(count, workload_size - index))
|
||||
: std::make_pair(std::numeric_limits<std::uint64_t>::max(), 0UL);
|
||||
}
|
||||
|
||||
namespace benchmark {
|
||||
std::ostream &operator<<(std::ostream &stream, const Workload &workload)
|
||||
{
|
||||
return stream << workload._workload_set << std::flush;
|
||||
}
|
||||
} // namespace benchmark
|
||||
59
repos/ealanos/src/lib/benchmark/workload.h
Normal file
59
repos/ealanos/src/lib/benchmark/workload.h
Normal file
@@ -0,0 +1,59 @@
|
||||
#pragma once
|
||||
|
||||
#include "phase.h"
|
||||
#include "workload_set.h"
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
#include <libc/component.h>
|
||||
|
||||
namespace benchmark {
|
||||
class Workload
|
||||
{
|
||||
friend std::ostream &operator<<(std::ostream &stream, const Workload &workload);
|
||||
|
||||
public:
|
||||
Workload(Libc::Env &env) : _workload_set(env) {}
|
||||
~Workload() noexcept = default;
|
||||
|
||||
[[maybe_unused]] void build(const std::string &fill_workload_file, const std::string &mixed_workload_file)
|
||||
{
|
||||
_workload_set.build(fill_workload_file, mixed_workload_file);
|
||||
}
|
||||
|
||||
[[maybe_unused]] void build(const std::uint64_t fill_inserts, const std::uint64_t mixed_inserts,
|
||||
const std::uint64_t mixed_lookups, const std::uint64_t mixed_updates,
|
||||
const std::uint64_t mixed_deletes)
|
||||
{
|
||||
_workload_set.build(fill_inserts, mixed_inserts, mixed_lookups, mixed_updates, mixed_deletes);
|
||||
}
|
||||
|
||||
[[maybe_unused]] void shuffle() { _workload_set.shuffle(); }
|
||||
|
||||
std::pair<std::uint64_t, std::uint64_t> next(std::uint64_t count) noexcept;
|
||||
|
||||
[[nodiscard]] std::uint64_t size() const noexcept { return _workload_set[_current_phase].size(); }
|
||||
[[nodiscard]] bool empty() const noexcept { return _workload_set[_current_phase].empty(); }
|
||||
[[nodiscard]] bool empty(const phase phase) const noexcept { return _workload_set[phase].empty(); }
|
||||
|
||||
void reset(const phase phase) noexcept
|
||||
{
|
||||
_current_phase = phase;
|
||||
_current_index = 0;
|
||||
}
|
||||
|
||||
const NumericTuple &operator[](const std::size_t index) const noexcept
|
||||
{
|
||||
return _workload_set[_current_phase][index];
|
||||
}
|
||||
bool operator==(const phase phase) const noexcept { return _current_phase == phase; }
|
||||
explicit operator phase() const noexcept { return _current_phase; }
|
||||
|
||||
private:
|
||||
NumericWorkloadSet _workload_set;
|
||||
phase _current_phase = phase::FILL;
|
||||
|
||||
alignas(64) std::atomic_uint64_t _current_index{0U};
|
||||
};
|
||||
} // namespace benchmark
|
||||
178
repos/ealanos/src/lib/benchmark/workload_set.cpp
Normal file
178
repos/ealanos/src/lib/benchmark/workload_set.cpp
Normal file
@@ -0,0 +1,178 @@
|
||||
#include "workload_set.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace benchmark;
|
||||
|
||||
void NumericWorkloadSet::build(const std::string &fill_workload_file, const std::string &mixed_workload_file)
|
||||
{
|
||||
auto parse = [](auto &file_stream, std::vector<NumericTuple> &data_set) -> bool {
|
||||
std::srand(1337);
|
||||
std::string op_name;
|
||||
std::uint64_t key{};
|
||||
|
||||
bool contains_update = false;
|
||||
|
||||
while (file_stream >> op_name >> key)
|
||||
{
|
||||
if (op_name == "INSERT")
|
||||
{
|
||||
contains_update = true;
|
||||
data_set.emplace_back(NumericTuple{NumericTuple::INSERT, key, std::rand()});
|
||||
}
|
||||
else if (op_name == "READ")
|
||||
{
|
||||
data_set.emplace_back(NumericTuple{NumericTuple::LOOKUP, key});
|
||||
}
|
||||
else if (op_name == "UPDATE")
|
||||
{
|
||||
contains_update = true;
|
||||
data_set.emplace_back(NumericTuple{NumericTuple::UPDATE, key, std::rand()});
|
||||
}
|
||||
}
|
||||
|
||||
return contains_update;
|
||||
};
|
||||
|
||||
// std::mutex out_mutex;
|
||||
|
||||
std::thread fill_thread{[this, &parse, &fill_workload_file]() {
|
||||
std::ifstream fill_file(fill_workload_file);
|
||||
if (fill_file.good())
|
||||
{
|
||||
parse(fill_file, this->_data_sets[static_cast<std::size_t>(phase::FILL)]);
|
||||
}
|
||||
else
|
||||
{
|
||||
//std::lock_guard lock{out_mutex};
|
||||
std::cerr << "Could not open workload file '" << fill_workload_file << "'." << std::endl;
|
||||
}
|
||||
}};
|
||||
//Genode::Mutex out_mutex;
|
||||
|
||||
// Fill_thread fill_thread(_env, out_mutex, fill_workload_file, parse, *this);
|
||||
|
||||
// fill_thread.start();
|
||||
|
||||
|
||||
std::thread mixed_thread{[this, &parse, &mixed_workload_file]() {
|
||||
std::ifstream mixed_file(mixed_workload_file);
|
||||
if (mixed_file.good())
|
||||
{
|
||||
this->_mixed_phase_contains_update =
|
||||
parse(mixed_file, this->_data_sets[static_cast<std::size_t>(phase::MIXED)]);
|
||||
}
|
||||
else
|
||||
{
|
||||
//std::lock_guard lock{out_mutex};
|
||||
std::cerr << "Could not open workload file '" << mixed_workload_file << "'." << std::endl;
|
||||
}
|
||||
}};
|
||||
|
||||
//Mixed_thread mixed_thread(_env, out_mutex, mixed_workload_file, parse, *this);
|
||||
//mixed_thread.start();
|
||||
|
||||
fill_thread.join();
|
||||
mixed_thread.join();
|
||||
}
|
||||
|
||||
void NumericWorkloadSet::build(const std::uint64_t fill_inserts, const std::uint64_t mixed_inserts,
|
||||
const std::uint64_t mixed_lookups, const std::uint64_t mixed_updates,
|
||||
const std::uint64_t mixed_deletes)
|
||||
{
|
||||
std::srand(1337);
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::FILL)].reserve(fill_inserts);
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].reserve(mixed_inserts + mixed_lookups + mixed_updates +
|
||||
mixed_deletes);
|
||||
|
||||
for (auto i = 0U; i < fill_inserts; ++i)
|
||||
{
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::FILL)].emplace_back(
|
||||
NumericTuple{NumericTuple::INSERT, i + 1U, std::rand()});
|
||||
}
|
||||
|
||||
this->_mixed_phase_contains_update = mixed_inserts > 0U || mixed_deletes > 0U || mixed_updates > 0U;
|
||||
|
||||
for (auto i = fill_inserts; i < fill_inserts + mixed_inserts; ++i)
|
||||
{
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].emplace_back(
|
||||
NumericTuple{NumericTuple::INSERT, i + 1U, std::rand()});
|
||||
}
|
||||
|
||||
for (auto i = 0U; i < mixed_lookups; ++i)
|
||||
{
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].push_back(
|
||||
{NumericTuple::LOOKUP, this->_data_sets[static_cast<std::uint16_t>(phase::FILL)][i % fill_inserts].key()});
|
||||
}
|
||||
|
||||
for (auto i = 0U; i < mixed_updates; ++i)
|
||||
{
|
||||
this->_data_sets[static_cast<std::size_t>(phase::MIXED)].push_back(
|
||||
{NumericTuple::UPDATE, this->_data_sets[static_cast<std::uint16_t>(phase::FILL)][i % fill_inserts].key(),
|
||||
std::rand()});
|
||||
}
|
||||
|
||||
for (auto i = 0U; i < mixed_deletes; ++i)
|
||||
{
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].push_back(
|
||||
{NumericTuple::DELETE, this->_data_sets[static_cast<std::uint16_t>(phase::FILL)][i % fill_inserts].key()});
|
||||
}
|
||||
}
|
||||
|
||||
void NumericWorkloadSet::shuffle()
|
||||
{
|
||||
std::srand(1337U + 42U);
|
||||
std::random_device random_device;
|
||||
std::mt19937 mersenne_twister_engine(random_device());
|
||||
|
||||
std::shuffle(this->_data_sets[static_cast<std::uint8_t>(phase::FILL)].begin(),
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::FILL)].end(), mersenne_twister_engine);
|
||||
std::shuffle(this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].begin(),
|
||||
this->_data_sets[static_cast<std::uint8_t>(phase::MIXED)].end(), mersenne_twister_engine);
|
||||
}
|
||||
|
||||
std::ostream &NumericWorkloadSet::nice_print(std::ostream &stream, const std::size_t number) noexcept
|
||||
{
|
||||
if (number >= 1000000U)
|
||||
{
|
||||
return stream << (number / 1000000U) << "m";
|
||||
}
|
||||
|
||||
if (number >= 1000U)
|
||||
{
|
||||
return stream << (number / 1000U) << "k";
|
||||
}
|
||||
|
||||
return stream << number;
|
||||
}
|
||||
|
||||
namespace benchmark {
|
||||
std::ostream &operator<<(std::ostream &stream, const NumericWorkloadSet &workload)
|
||||
{
|
||||
const auto has_fill_and_mixed = workload[phase::FILL].empty() == false && workload[phase::MIXED].empty() == false;
|
||||
|
||||
if (workload[phase::FILL].empty() == false)
|
||||
{
|
||||
stream << "fill: ";
|
||||
NumericWorkloadSet::nice_print(stream, workload[phase::FILL].size());
|
||||
}
|
||||
|
||||
if (has_fill_and_mixed)
|
||||
{
|
||||
stream << " / ";
|
||||
}
|
||||
|
||||
if (workload[phase::MIXED].empty() == false)
|
||||
{
|
||||
stream << (workload._mixed_phase_contains_update ? "mixed: " : "read-only: ");
|
||||
NumericWorkloadSet::nice_print(stream, workload[phase::MIXED].size());
|
||||
}
|
||||
|
||||
return stream << std::flush;
|
||||
}
|
||||
} // namespace benchmark
|
||||
133
repos/ealanos/src/lib/benchmark/workload_set.h
Normal file
133
repos/ealanos/src/lib/benchmark/workload_set.h
Normal file
@@ -0,0 +1,133 @@
|
||||
#pragma once
|
||||
|
||||
#include "phase.h"
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <libc/component.h>
|
||||
namespace benchmark {
|
||||
class NumericTuple
|
||||
{
|
||||
public:
|
||||
enum Type
|
||||
{
|
||||
INSERT,
|
||||
LOOKUP,
|
||||
UPDATE,
|
||||
DELETE
|
||||
};
|
||||
|
||||
constexpr NumericTuple(const Type type, const std::uint64_t key) : _type(type), _key(key) {}
|
||||
constexpr NumericTuple(const Type type, const std::uint64_t key, const std::int64_t value)
|
||||
: _type(type), _key(key), _value(value)
|
||||
{
|
||||
}
|
||||
|
||||
NumericTuple(NumericTuple &&) noexcept = default;
|
||||
NumericTuple(const NumericTuple &) = default;
|
||||
|
||||
~NumericTuple() = default;
|
||||
|
||||
NumericTuple &operator=(NumericTuple &&) noexcept = default;
|
||||
|
||||
[[nodiscard]] std::uint64_t key() const { return _key; };
|
||||
[[nodiscard]] std::int64_t value() const { return _value; }
|
||||
|
||||
bool operator==(const Type type) const { return _type == type; }
|
||||
|
||||
private:
|
||||
Type _type;
|
||||
std::uint64_t _key;
|
||||
std::int64_t _value = 0;
|
||||
};
|
||||
|
||||
class NumericWorkloadSet
|
||||
{
|
||||
friend std::ostream &operator<<(std::ostream &stream, const NumericWorkloadSet &workload_set);
|
||||
friend class Fill_thread;
|
||||
friend class Mixed_thread;
|
||||
|
||||
public:
|
||||
NumericWorkloadSet(Libc::Env &env) : _env(env) {}
|
||||
~NumericWorkloadSet() = default;
|
||||
Libc::Env &_env;
|
||||
|
||||
void build(const std::string &fill_workload_file, const std::string &mixed_workload_file);
|
||||
void build(std::uint64_t fill_inserts, std::uint64_t mixed_inserts, std::uint64_t mixed_lookups,
|
||||
std::uint64_t mixed_updates, std::uint64_t mixed_deletes);
|
||||
void shuffle();
|
||||
|
||||
[[nodiscard]] const std::vector<NumericTuple> &fill() const noexcept { return _data_sets[0]; }
|
||||
[[nodiscard]] const std::vector<NumericTuple> &mixed() const noexcept { return _data_sets[1]; }
|
||||
const std::vector<NumericTuple> &operator[](const phase phase) const noexcept
|
||||
{
|
||||
return _data_sets[static_cast<std::uint16_t>(phase)];
|
||||
}
|
||||
|
||||
explicit operator bool() const { return fill().empty() == false || mixed().empty() == false; }
|
||||
|
||||
private:
|
||||
std::array<std::vector<NumericTuple>, 2> _data_sets;
|
||||
bool _mixed_phase_contains_update = false;
|
||||
|
||||
static std::ostream &nice_print(std::ostream &stream, std::size_t number) noexcept;
|
||||
};
|
||||
|
||||
class Fill_thread : public Genode::Thread
|
||||
{
|
||||
private:
|
||||
//Genode::Mutex &_mutex;
|
||||
const std::string &_fill_workload_file;
|
||||
bool (*parse)(std::ifstream &, std::vector<NumericTuple> &);
|
||||
NumericWorkloadSet &_workload_set;
|
||||
|
||||
public:
|
||||
Fill_thread(Libc::Env &env, Genode::Mutex &mutex, std::string fill_workload_name, bool (*parse)(std::ifstream&, std::vector<NumericTuple>&), NumericWorkloadSet &workload_set)
|
||||
: Genode::Thread(env, Name("btree::fill_thread"), 4*4096), _fill_workload_file(fill_workload_name), _workload_set(workload_set)
|
||||
{
|
||||
this->parse = parse;
|
||||
}
|
||||
|
||||
void entry() {
|
||||
std::ifstream fill_file(_fill_workload_file);
|
||||
if (fill_file.good()) {
|
||||
parse(fill_file, _workload_set._data_sets[static_cast<std::size_t>(phase::FILL)]);
|
||||
} else {
|
||||
//_mutex.acquire();
|
||||
std::cerr << "Could not open workload file '" << _fill_workload_file << "'." << std::endl;
|
||||
//_mutex.release();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class Mixed_thread : public Genode::Thread
|
||||
{
|
||||
private:
|
||||
const std::string &_mixed_workload_file;
|
||||
bool (*parse)(std::ifstream &, std::vector<NumericTuple> &);
|
||||
NumericWorkloadSet &_workload_set;
|
||||
|
||||
public:
|
||||
Mixed_thread(Libc::Env &env, Genode::Mutex &mutex, std::string mixed_workload_name, bool (*parse)(std::ifstream&, std::vector<NumericTuple>&), NumericWorkloadSet &workload_set)
|
||||
: Genode::Thread(env, Name("btree::mixed_thread"), 4*4096),
|
||||
_mixed_workload_file(mixed_workload_name), _workload_set(workload_set)
|
||||
{
|
||||
this->parse = parse;
|
||||
}
|
||||
|
||||
void entry()
|
||||
{
|
||||
std::ifstream mixed_file(_mixed_workload_file);
|
||||
if (mixed_file.good()) {
|
||||
_workload_set._mixed_phase_contains_update = parse(mixed_file, _workload_set._data_sets[static_cast<std::size_t>(phase::MIXED)]);
|
||||
} else {
|
||||
//_mutex.acquire();
|
||||
std::cerr << "Could not open workload file '" << _mixed_workload_file << "'." << std::endl;
|
||||
//_mutex.release();
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace benchmark
|
||||
366
repos/ealanos/src/lib/db/index/blinktree/b_link_tree.h
Normal file
366
repos/ealanos/src/lib/db/index/blinktree/b_link_tree.h
Normal file
@@ -0,0 +1,366 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
#include "node.h"
|
||||
#include "node_consistency_checker.h"
|
||||
#include "node_iterator.h"
|
||||
#include "node_statistics.h"
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <json.hpp>
|
||||
#include <mx/resource/resource.h>
|
||||
#include <mx/tasking/runtime.h>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
|
||||
template <typename K, typename V> class BLinkTree
|
||||
{
|
||||
public:
|
||||
BLinkTree(const mx::synchronization::isolation_level isolation_level,
|
||||
const mx::synchronization::protocol preferred_synchronization_protocol)
|
||||
: _isolation_level(isolation_level), _preferred_synchronization_protocol(preferred_synchronization_protocol),
|
||||
_root(create_node(NodeType::Leaf, mx::resource::ptr{}, true))
|
||||
{
|
||||
}
|
||||
|
||||
~BLinkTree() { mx::tasking::runtime::delete_resource<Node<K, V>>(_root); }
|
||||
|
||||
/**
|
||||
* @return Root node of the tree.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr root() const { return _root; }
|
||||
|
||||
/**
|
||||
* @return Height of the tree.
|
||||
*/
|
||||
[[nodiscard]] std::uint16_t height() const { return _height; }
|
||||
|
||||
/**
|
||||
* @return True, when the tree does not contain any value.
|
||||
*/
|
||||
[[nodiscard]] bool empty() const
|
||||
{
|
||||
return static_cast<bool>(_root) == false || _root.template get<Node<K, V>>()->size() == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a node of type inner.
|
||||
*
|
||||
* @param is_branch True, when the children of the new inner node will be leaf nodes.
|
||||
* @param parent Parent of the new inner node.
|
||||
* @param is_root True, then the new inner node will be the root.
|
||||
* @return Inner node.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr create_inner_node(const bool is_branch, const mx::resource::ptr parent,
|
||||
const bool is_root = false) const
|
||||
{
|
||||
const auto inner_type = is_branch ? NodeType::Inner | NodeType::Branch : NodeType::Inner;
|
||||
return create_node(inner_type, parent, is_root);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a node of type leaf.
|
||||
*
|
||||
* @param parent Parent of the new leaf node.
|
||||
* @return Leaf node.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr create_leaf_node(const mx::resource::ptr parent) const
|
||||
{
|
||||
return create_node(NodeType::Leaf, parent, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new root node, containing two separators (to the left and right).
|
||||
* The new root node will be set in the tree.
|
||||
*
|
||||
* @param left Link to the "smaller" child node.
|
||||
* @param right Link to the "greater" child node.
|
||||
* @param key Separator key.
|
||||
*/
|
||||
void create_new_root(mx::resource::ptr left, mx::resource::ptr right, K key);
|
||||
|
||||
/**
|
||||
* Splits an inner node.
|
||||
*
|
||||
* @param inner_node Node to split.
|
||||
* @param key Key to insert after split.
|
||||
* @param separator Separator to insert after split.
|
||||
* @return Pointer and high key of the new node.
|
||||
*/
|
||||
std::pair<mx::resource::ptr, K> split(mx::resource::ptr inner_node, K key, mx::resource::ptr separator) const;
|
||||
|
||||
/**
|
||||
* Splits a leaf node.
|
||||
*
|
||||
* @param leaf_node Node to split.
|
||||
* @param key Key to insert after split.
|
||||
* @param value Value to insert after split.
|
||||
* @return Pointer to the leaf node and key for parent.
|
||||
*/
|
||||
std::pair<mx::resource::ptr, K> split(mx::resource::ptr leaf_node, K key, V value) const;
|
||||
|
||||
/**
|
||||
* @return Begin iterator for iterating ofer nodes.
|
||||
*/
|
||||
NodeIterator<K, V> begin() const { return NodeIterator(mx::resource::ptr_cast<Node<K, V>>(_root)); }
|
||||
|
||||
/**
|
||||
* @return End iterator (aka empty node iterator).
|
||||
*/
|
||||
NodeIterator<K, V> end() const { return {}; }
|
||||
|
||||
/**
|
||||
* Checks the consistency of the tree.
|
||||
*/
|
||||
void check() const;
|
||||
|
||||
/**
|
||||
* Dumps the statistics like height, number of (inner/leaf) nodes, number of records,... .
|
||||
*/
|
||||
void print_statistics() const;
|
||||
|
||||
explicit operator nlohmann::json() const
|
||||
{
|
||||
nlohmann::json out;
|
||||
out["height"] = _height;
|
||||
out["root"] = node_to_json(_root);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Height of the tree.
|
||||
std::uint8_t _height = 1;
|
||||
|
||||
// Isolation of tasks accessing a node.
|
||||
const mx::synchronization::isolation_level _isolation_level;
|
||||
|
||||
// Select a preferred method for synchronization.
|
||||
const mx::synchronization::protocol _preferred_synchronization_protocol;
|
||||
|
||||
// Pointer to the root.
|
||||
alignas(64) mx::resource::ptr _root;
|
||||
|
||||
/**
|
||||
* Creates a new node.
|
||||
*
|
||||
* @param node_type Type of the node.
|
||||
* @param parent Parent of the node.
|
||||
* @param is_root True, if the new node will be the root.
|
||||
* @return Pointer to the new node.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr create_node(const NodeType node_type, const mx::resource::ptr parent,
|
||||
const bool is_root) const
|
||||
{
|
||||
const auto is_inner = static_cast<bool>(node_type & NodeType::Inner);
|
||||
return mx::tasking::runtime::new_resource<Node<K, V>>(
|
||||
config::node_size(),
|
||||
mx::resource::hint{_isolation_level, _preferred_synchronization_protocol,
|
||||
predict_access_frequency(is_inner, is_root), predict_read_write_ratio(is_inner)},
|
||||
node_type, parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a hint for tasking regarding usage of the node.
|
||||
*
|
||||
* @param is_inner True, of the node is an inner node.
|
||||
* @param is_root True, of the node is the root.
|
||||
* @return Hint for usage prediction which will be used for allocating resources.
|
||||
*/
|
||||
[[nodiscard]] static mx::resource::hint::expected_access_frequency predict_access_frequency(const bool is_inner,
|
||||
const bool is_root)
|
||||
{
|
||||
if (is_root)
|
||||
{
|
||||
return mx::resource::hint::expected_access_frequency::excessive;
|
||||
}
|
||||
|
||||
if (is_inner)
|
||||
{
|
||||
return mx::resource::hint::expected_access_frequency::high;
|
||||
}
|
||||
|
||||
return mx::resource::hint::expected_access_frequency::normal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hint for the read/write ratio.
|
||||
* Inner nodes will be written very little while
|
||||
* leaf nodes will be written more often.
|
||||
*
|
||||
* @param is_inner True, when the node is an inner node.
|
||||
* @return Predicted read/write ratio.
|
||||
*/
|
||||
[[nodiscard]] static mx::resource::hint::expected_read_write_ratio predict_read_write_ratio(const bool is_inner)
|
||||
{
|
||||
return is_inner ? mx::resource::hint::expected_read_write_ratio::heavy_read
|
||||
: mx::resource::hint::expected_read_write_ratio::balanced;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes a tree node to json format.
|
||||
*
|
||||
* @param node Node to serialize.
|
||||
* @return JSON representation of the node.
|
||||
*/
|
||||
[[nodiscard]] nlohmann::json node_to_json(mx::resource::ptr node) const
|
||||
{
|
||||
auto out = nlohmann::json();
|
||||
auto node_ptr = mx::resource::ptr_cast<Node<K, V>>(node);
|
||||
|
||||
out["channel_id"] = node.channel_id();
|
||||
out["is_leaf"] = node_ptr->is_leaf();
|
||||
out["size"] = node_ptr->size();
|
||||
|
||||
if (node_ptr->is_inner())
|
||||
{
|
||||
auto children = nlohmann::json::array();
|
||||
for (auto i = 0U; i <= node_ptr->size(); ++i)
|
||||
{
|
||||
children.push_back(node_to_json(node_ptr->separator(i)));
|
||||
}
|
||||
out["children"] = children;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename K, typename V>
|
||||
void BLinkTree<K, V>::create_new_root(const mx::resource::ptr left, const mx::resource::ptr right, const K key)
|
||||
{
|
||||
const auto is_left_inner = mx::resource::ptr_cast<Node<K, V>>(left)->is_inner();
|
||||
mx::tasking::runtime::modify_predicted_usage(left, predict_access_frequency(is_left_inner, true),
|
||||
predict_access_frequency(is_left_inner, false));
|
||||
|
||||
auto root = this->create_inner_node(mx::resource::ptr_cast<Node<K, V>>(left)->is_leaf(), mx::resource::ptr{}, true);
|
||||
|
||||
left.template get<Node<K, V>>()->parent(root);
|
||||
right.template get<Node<K, V>>()->parent(root);
|
||||
|
||||
root.template get<Node<K, V>>()->separator(0, left);
|
||||
root.template get<Node<K, V>>()->insert(0, right, key);
|
||||
|
||||
this->_height++;
|
||||
this->_root = root;
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
std::pair<mx::resource::ptr, K> BLinkTree<K, V>::split(const mx::resource::ptr inner_node, const K key,
|
||||
const mx::resource::ptr separator) const
|
||||
{
|
||||
constexpr std::uint16_t left_size = InnerNode<K, V>::max_keys / 2;
|
||||
constexpr std::uint16_t right_size = InnerNode<K, V>::max_keys - left_size;
|
||||
|
||||
auto node_ptr = mx::resource::ptr_cast<Node<K, V>>(inner_node);
|
||||
|
||||
K key_up;
|
||||
auto new_inner_node = this->create_inner_node(node_ptr->is_branch(), node_ptr->parent());
|
||||
auto new_node_ptr = mx::resource::ptr_cast<Node<K, V>>(new_inner_node);
|
||||
|
||||
new_node_ptr->high_key(node_ptr->high_key());
|
||||
|
||||
if (key < node_ptr->inner_key(left_size - 1))
|
||||
{
|
||||
node_ptr->move(new_inner_node, left_size, right_size);
|
||||
new_node_ptr->separator(0, node_ptr->separator(left_size));
|
||||
new_node_ptr->size(right_size);
|
||||
node_ptr->size(left_size - 1);
|
||||
key_up = node_ptr->inner_key(left_size - 1);
|
||||
const auto index = node_ptr->index(key);
|
||||
separator.template get<Node<K, V>>()->parent(inner_node);
|
||||
node_ptr->insert(index, separator, key);
|
||||
}
|
||||
else if (key < node_ptr->inner_key(left_size))
|
||||
{
|
||||
node_ptr->move(new_inner_node, left_size, right_size);
|
||||
new_node_ptr->separator(0, separator);
|
||||
key_up = key;
|
||||
node_ptr->size(left_size);
|
||||
new_node_ptr->size(right_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
node_ptr->move(new_inner_node, left_size + 1, right_size - 1);
|
||||
new_node_ptr->separator(0, node_ptr->separator(left_size + 1));
|
||||
node_ptr->size(left_size);
|
||||
new_node_ptr->size(right_size - 1);
|
||||
key_up = node_ptr->inner_key(left_size);
|
||||
|
||||
const auto index = new_node_ptr->index(key);
|
||||
new_node_ptr->insert(index, separator, key);
|
||||
}
|
||||
|
||||
new_node_ptr->right_sibling(node_ptr->right_sibling());
|
||||
node_ptr->right_sibling(new_inner_node);
|
||||
node_ptr->high_key(key_up);
|
||||
|
||||
for (auto index = 0U; index <= new_node_ptr->size(); ++index)
|
||||
{
|
||||
new_node_ptr->separator(index).template get<Node<K, V>>()->parent(new_inner_node);
|
||||
}
|
||||
|
||||
return {new_inner_node, key_up};
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
std::pair<mx::resource::ptr, K> BLinkTree<K, V>::split(const mx::resource::ptr leaf_node_ptr, const K key,
|
||||
const V value) const
|
||||
{
|
||||
auto *leaf_node = mx::resource::ptr_cast<Node<K, V>>(leaf_node_ptr);
|
||||
|
||||
constexpr std::uint16_t left_size = LeafNode<K, V>::max_items / 2;
|
||||
constexpr std::uint16_t right_size = LeafNode<K, V>::max_items - left_size;
|
||||
|
||||
auto new_leaf_node_ptr = this->create_leaf_node(leaf_node->parent());
|
||||
auto *new_leaf_node = mx::resource::ptr_cast<Node<K, V>>(new_leaf_node_ptr);
|
||||
|
||||
leaf_node->move(new_leaf_node_ptr, left_size, right_size);
|
||||
if (leaf_node->right_sibling() != nullptr)
|
||||
{
|
||||
new_leaf_node->right_sibling(leaf_node->right_sibling());
|
||||
}
|
||||
new_leaf_node->high_key(leaf_node->high_key());
|
||||
new_leaf_node->size(right_size);
|
||||
leaf_node->size(left_size);
|
||||
leaf_node->right_sibling(new_leaf_node_ptr);
|
||||
|
||||
if (key < new_leaf_node->leaf_key(0))
|
||||
{
|
||||
leaf_node->insert(leaf_node->index(key), value, key);
|
||||
}
|
||||
else
|
||||
{
|
||||
new_leaf_node->insert(new_leaf_node->index(key), value, key);
|
||||
}
|
||||
|
||||
leaf_node->high_key(new_leaf_node->leaf_key(0));
|
||||
|
||||
return {new_leaf_node_ptr, new_leaf_node->leaf_key(0)};
|
||||
}
|
||||
|
||||
template <typename K, typename V> void BLinkTree<K, V>::print_statistics() const
|
||||
{
|
||||
NodeStatistics<K, V> statistics(this->height());
|
||||
|
||||
for (auto node : *this)
|
||||
{
|
||||
statistics += node;
|
||||
}
|
||||
|
||||
std::cout << statistics << std::endl;
|
||||
}
|
||||
|
||||
template <typename K, typename V> void BLinkTree<K, V>::check() const
|
||||
{
|
||||
for (auto node : *this)
|
||||
{
|
||||
NodeConsistencyChecker<K, V>::check_and_print_errors(node, std::cerr);
|
||||
}
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
11
repos/ealanos/src/lib/db/index/blinktree/config.h
Normal file
11
repos/ealanos/src/lib/db/index/blinktree/config.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <mx/synchronization/synchronization.h>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
class config
|
||||
{
|
||||
public:
|
||||
static constexpr auto node_size() { return 1024U; }
|
||||
};
|
||||
} // namespace db::index::blinktree
|
||||
@@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
#include "b_link_tree.h"
|
||||
#include "node.h"
|
||||
#include "task.h"
|
||||
#include <mx/tasking/runtime.h>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V, class L> class InsertSeparatorTask final : public Task<K, V, L>
|
||||
{
|
||||
public:
|
||||
constexpr InsertSeparatorTask(const K key, const mx::resource::ptr separator, BLinkTree<K, V> *tree,
|
||||
L &listener) noexcept
|
||||
: Task<K, V, L>(key, listener), _tree(tree), _separator(separator)
|
||||
{
|
||||
}
|
||||
|
||||
~InsertSeparatorTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override;
|
||||
|
||||
private:
|
||||
BLinkTree<K, V> *_tree;
|
||||
mx::resource::ptr _separator;
|
||||
};
|
||||
|
||||
template <typename K, typename V, class L>
|
||||
mx::tasking::TaskResult InsertSeparatorTask<K, V, L>::execute(const std::uint16_t core_id,
|
||||
const std::uint16_t /*channel_id*/)
|
||||
{
|
||||
auto *annotated_node = this->annotated_resource().template get<Node<K, V>>();
|
||||
|
||||
// Is the node related to the key?
|
||||
if (annotated_node->high_key() <= this->_key)
|
||||
{
|
||||
this->annotate(annotated_node->right_sibling(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// At this point, we are accessing the related leaf and we are in writer mode.
|
||||
if (!annotated_node->full())
|
||||
{
|
||||
const auto index = annotated_node->index(this->_key);
|
||||
annotated_node->insert(index, this->_separator, this->_key);
|
||||
this->_separator.template get<Node<K, V>>()->parent(this->annotated_resource());
|
||||
this->_listener.inserted(core_id, this->_key, 0U);
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
|
||||
auto [right, key] = this->_tree->split(this->annotated_resource(), this->_key, this->_separator);
|
||||
if (annotated_node->parent() != nullptr)
|
||||
{
|
||||
this->_separator = right;
|
||||
this->_key = key;
|
||||
this->annotate(annotated_node->parent(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
this->_tree->create_new_root(this->annotated_resource(), right, key);
|
||||
this->_listener.inserted(core_id, this->_key, 0U);
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
85
repos/ealanos/src/lib/db/index/blinktree/insert_value_task.h
Normal file
85
repos/ealanos/src/lib/db/index/blinktree/insert_value_task.h
Normal file
@@ -0,0 +1,85 @@
|
||||
#pragma once
|
||||
|
||||
#include "b_link_tree.h"
|
||||
#include "insert_separator_task.h"
|
||||
#include "node.h"
|
||||
#include "task.h"
|
||||
#include <mx/tasking/runtime.h>
|
||||
#include <vector>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V, class L> class InsertValueTask final : public Task<K, V, L>
|
||||
{
|
||||
public:
|
||||
constexpr InsertValueTask(const K key, const V value, BLinkTree<K, V> *tree, L &listener) noexcept
|
||||
: Task<K, V, L>(key, listener), _tree(tree), _value(value)
|
||||
{
|
||||
}
|
||||
|
||||
~InsertValueTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override;
|
||||
|
||||
private:
|
||||
BLinkTree<K, V> *_tree;
|
||||
const V _value;
|
||||
};
|
||||
|
||||
template <typename K, typename V, class L>
|
||||
mx::tasking::TaskResult InsertValueTask<K, V, L>::execute(const std::uint16_t core_id,
|
||||
const std::uint16_t /*channel_id*/)
|
||||
{
|
||||
auto *annotated_node = this->annotated_resource().template get<Node<K, V>>();
|
||||
|
||||
// Is the node related to the key?
|
||||
if (annotated_node->high_key() <= this->_key)
|
||||
{
|
||||
this->annotate(annotated_node->right_sibling(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// If we are accessing an inner node, pick the next related child.
|
||||
if (annotated_node->is_inner())
|
||||
{
|
||||
const auto child = annotated_node->child(this->_key);
|
||||
this->annotate(child, config::node_size() / 4U);
|
||||
this->is_readonly(!annotated_node->is_branch());
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// Is it a leaf, but we are still reading? Upgrade to writer.
|
||||
if (annotated_node->is_leaf() && this->is_readonly())
|
||||
{
|
||||
this->is_readonly(false);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// At this point, we are accessing the related leaf and we are in writer mode.
|
||||
const auto index = annotated_node->index(this->_key);
|
||||
if (index < annotated_node->size() && annotated_node->leaf_key(index) == this->_key)
|
||||
{
|
||||
this->_listener.inserted(core_id, this->_key, this->_value);
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
|
||||
if (annotated_node->full() == false)
|
||||
{
|
||||
annotated_node->insert(index, this->_value, this->_key);
|
||||
this->_listener.inserted(core_id, this->_key, this->_value);
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
|
||||
auto [right, key] = this->_tree->split(this->annotated_resource(), this->_key, this->_value);
|
||||
if (annotated_node->parent() != nullptr)
|
||||
{
|
||||
auto *task = mx::tasking::runtime::new_task<InsertSeparatorTask<K, V, L>>(core_id, key, right, this->_tree,
|
||||
this->_listener);
|
||||
task->annotate(annotated_node->parent(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed_and_remove(task);
|
||||
}
|
||||
|
||||
this->_tree->create_new_root(this->annotated_resource(), right, key);
|
||||
this->_listener.inserted(core_id, this->_key, this->_value);
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
13
repos/ealanos/src/lib/db/index/blinktree/listener.h
Normal file
13
repos/ealanos/src/lib/db/index/blinktree/listener.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V> class Listener
|
||||
{
|
||||
public:
|
||||
virtual void inserted(std::uint16_t core_id, K key, V value) = 0;
|
||||
virtual void updated(std::uint16_t core_id, K key, V value) = 0;
|
||||
virtual void removed(std::uint16_t core_id, K key) = 0;
|
||||
virtual void found(std::uint16_t core_id, K key, V value) = 0;
|
||||
virtual void missing(std::uint16_t core_id, K key) = 0;
|
||||
};
|
||||
} // namespace db::index::blinktree
|
||||
54
repos/ealanos/src/lib/db/index/blinktree/lookup_task.h
Normal file
54
repos/ealanos/src/lib/db/index/blinktree/lookup_task.h
Normal file
@@ -0,0 +1,54 @@
|
||||
#pragma once
|
||||
|
||||
#include "b_link_tree.h"
|
||||
#include "insert_separator_task.h"
|
||||
#include "node.h"
|
||||
#include "task.h"
|
||||
#include <optional>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V, class L> class LookupTask final : public Task<K, V, L>
|
||||
{
|
||||
public:
|
||||
LookupTask(const K key, L &listener) noexcept : Task<K, V, L>(key, listener) {}
|
||||
|
||||
~LookupTask() override { this->_listener.found(_core_id, this->_key, _value); }
|
||||
|
||||
mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override;
|
||||
|
||||
private:
|
||||
V _value;
|
||||
std::uint16_t _core_id{0U};
|
||||
};
|
||||
|
||||
template <typename K, typename V, typename L>
|
||||
mx::tasking::TaskResult LookupTask<K, V, L>::execute(const std::uint16_t core_id, const std::uint16_t /*channel_id*/)
|
||||
{
|
||||
auto *annotated_node = this->annotated_resource().template get<Node<K, V>>();
|
||||
|
||||
// Is the node related to the key?
|
||||
if (annotated_node->high_key() <= this->_key)
|
||||
{
|
||||
this->annotate(annotated_node->right_sibling(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// If we are accessing an inner node, pick the next related child.
|
||||
if (annotated_node->is_inner())
|
||||
{
|
||||
const auto child = annotated_node->child(this->_key);
|
||||
this->annotate(child, config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// We are accessing the correct leaf.
|
||||
const auto index = annotated_node->index(this->_key);
|
||||
if (annotated_node->leaf_key(index) == this->_key)
|
||||
{
|
||||
this->_value = annotated_node->value(index);
|
||||
}
|
||||
_core_id = core_id;
|
||||
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
388
repos/ealanos/src/lib/db/index/blinktree/node.h
Normal file
388
repos/ealanos/src/lib/db/index/blinktree/node.h
Normal file
@@ -0,0 +1,388 @@
|
||||
#pragma once
|
||||
#include "config.h"
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <mx/resource/resource.h>
|
||||
#include <mx/resource/resource_interface.h>
|
||||
#include <mx/tasking/runtime.h>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
|
||||
template <typename K, typename V> class Node;
|
||||
|
||||
/**
|
||||
* Node type.
|
||||
*/
|
||||
enum NodeType : std::uint8_t
|
||||
{
|
||||
Leaf = 1U << 0U,
|
||||
Inner = 1U << 1U,
|
||||
Branch = 1U << 2U
|
||||
};
|
||||
|
||||
inline NodeType operator|(const NodeType a, const NodeType b) noexcept
|
||||
{
|
||||
return static_cast<NodeType>(static_cast<std::uint8_t>(a) | static_cast<std::uint8_t>(b));
|
||||
}
|
||||
|
||||
/**
|
||||
* Header for every node
|
||||
*/
|
||||
template <typename K, typename V> struct NodeHeader
|
||||
{
|
||||
static constexpr std::uint16_t node_size =
|
||||
config::node_size() - sizeof(NodeHeader<K, V>) - sizeof(mx::resource::ResourceInterface);
|
||||
|
||||
// Type of the node.
|
||||
const NodeType node_type;
|
||||
|
||||
// High key.
|
||||
K high_key{std::numeric_limits<K>::max()};
|
||||
|
||||
// Link to the right sibling.
|
||||
mx::resource::ptr right_sibling;
|
||||
|
||||
// Link to the parent. Alignment needed by some CPU architectures (e.g. arm) because of atomicity.
|
||||
alignas(8) std::atomic<mx::resource::ptr> parent;
|
||||
|
||||
// Number of records in the node.
|
||||
std::uint16_t size{0U};
|
||||
|
||||
[[maybe_unused]] NodeHeader(const NodeType node_type_, const mx::resource::ptr parent_) : node_type(node_type_)
|
||||
{
|
||||
this->parent.store(parent_);
|
||||
}
|
||||
|
||||
~NodeHeader() = default;
|
||||
#ifdef __GNUG__
|
||||
};
|
||||
#else
|
||||
} __attribute__((packed));
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Representation of an inner node.
|
||||
*/
|
||||
template <typename K, typename V> struct InnerNode
|
||||
{
|
||||
static constexpr std::uint16_t max_keys =
|
||||
(NodeHeader<K, V>::node_size - sizeof(mx::resource::ptr)) / (sizeof(K) + sizeof(mx::resource::ptr));
|
||||
static constexpr std::uint16_t max_separators = max_keys + 1;
|
||||
|
||||
// Memory for keys.
|
||||
std::array<K, InnerNode::max_keys> keys;
|
||||
|
||||
// Memory for separators.
|
||||
std::array<mx::resource::ptr, InnerNode::max_separators> separators;
|
||||
};
|
||||
|
||||
/**
|
||||
* Representation of a leaf node.
|
||||
*/
|
||||
template <typename K, typename V> struct LeafNode
|
||||
{
|
||||
static constexpr std::uint16_t max_items = NodeHeader<K, V>::node_size / (sizeof(K) + sizeof(V));
|
||||
|
||||
// Memory for keys.
|
||||
std::array<K, LeafNode::max_items> keys;
|
||||
|
||||
// Memory for payloads.
|
||||
std::array<V, LeafNode::max_items> values;
|
||||
};
|
||||
|
||||
/**
|
||||
* Abstract node representation.
|
||||
*/
|
||||
template <typename K, typename V> class Node final : public mx::resource::ResourceInterface
|
||||
{
|
||||
public:
|
||||
constexpr Node(const NodeType node_type, const mx::resource::ptr parent) : _header(node_type, parent)
|
||||
{
|
||||
static_assert(sizeof(Node<K, V>) <= config::node_size());
|
||||
}
|
||||
|
||||
~Node() override
|
||||
{
|
||||
if (is_inner())
|
||||
{
|
||||
for (auto i = 0U; i <= _header.size; ++i)
|
||||
{
|
||||
if (_inner_node.separators[i] != nullptr)
|
||||
{
|
||||
mx::tasking::runtime::delete_resource<Node<K, V>>(_inner_node.separators[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void on_reclaim() override { this->~Node(); }
|
||||
|
||||
/**
|
||||
* @return True, if this node is a leaf node.
|
||||
*/
|
||||
[[nodiscard]] bool is_leaf() const noexcept { return _header.node_type & NodeType::Leaf; }
|
||||
|
||||
/**
|
||||
* @return True, if this node is an inner node.
|
||||
*/
|
||||
[[nodiscard]] bool is_inner() const noexcept { return _header.node_type & NodeType::Inner; }
|
||||
|
||||
/**
|
||||
* @return True, if this node is an inner node and children are leaf nodes.
|
||||
*/
|
||||
[[nodiscard]] bool is_branch() const noexcept { return _header.node_type & NodeType::Branch; }
|
||||
|
||||
/**
|
||||
* @return Number of records stored in the node.
|
||||
*/
|
||||
[[nodiscard]] std::uint16_t size() const noexcept { return _header.size; }
|
||||
|
||||
/**
|
||||
* Updates the number of records stored in the node.
|
||||
* @param size New number of records.
|
||||
*/
|
||||
void size(const std::uint16_t size) noexcept { _header.size = size; }
|
||||
|
||||
/**
|
||||
* @return High key of the node.
|
||||
*/
|
||||
K high_key() const noexcept { return _header.high_key; }
|
||||
|
||||
/**
|
||||
* Updates the high key.
|
||||
* @param high_key New high key.
|
||||
*/
|
||||
[[maybe_unused]] void high_key(const K high_key) noexcept { _header.high_key = high_key; }
|
||||
|
||||
/**
|
||||
* @return Pointer to the right sibling.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr right_sibling() const noexcept { return _header.right_sibling; }
|
||||
|
||||
/**
|
||||
* Updates the right sibling.
|
||||
* @param right_sibling Pointer to the new right sibling.
|
||||
*/
|
||||
[[maybe_unused]] void right_sibling(const mx::resource::ptr right_sibling) noexcept
|
||||
{
|
||||
_header.right_sibling = right_sibling;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Pointer to the parent node.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr parent() const noexcept { return _header.parent; }
|
||||
|
||||
/**
|
||||
* Updates the parent node.
|
||||
* @param parent Pointer to the new parent node.
|
||||
*/
|
||||
void parent(const mx::resource::ptr parent) noexcept { _header.parent = parent; }
|
||||
|
||||
/**
|
||||
* Read the value at a given index.
|
||||
* @param index Index.
|
||||
* @return Value at the index.
|
||||
*/
|
||||
V value(const std::uint16_t index) const noexcept { return _leaf_node.values[index]; }
|
||||
|
||||
/**
|
||||
* Update the value at a given index.
|
||||
* @param index Index.
|
||||
* @param value New value.
|
||||
*/
|
||||
void value(const std::uint16_t index, const V value) noexcept { _leaf_node.values[index] = value; }
|
||||
|
||||
/**
|
||||
* Read the separator at a given index.
|
||||
* @param index Index.
|
||||
* @return Separator at the index.
|
||||
*/
|
||||
[[nodiscard]] mx::resource::ptr separator(const std::uint16_t index) const noexcept
|
||||
{
|
||||
return _inner_node.separators[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the separator for a given index.
|
||||
* @param index Index.
|
||||
* @param separator New separator for the index.
|
||||
*/
|
||||
void separator(const std::uint16_t index, const mx::resource::ptr separator) noexcept
|
||||
{
|
||||
_inner_node.separators[index] = separator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the key from the leaf node.
|
||||
* @param index Index.
|
||||
* @return Key at the index.
|
||||
*/
|
||||
K leaf_key(const std::uint16_t index) const noexcept { return _leaf_node.keys[index]; }
|
||||
|
||||
/**
|
||||
* Read the key from the inner node.
|
||||
* @param index Index.
|
||||
* @return Key at the index.
|
||||
*/
|
||||
K inner_key(const std::uint16_t index) const noexcept { return _inner_node.keys[index]; }
|
||||
|
||||
/**
|
||||
* @return True, if the node can not store further records.
|
||||
*/
|
||||
[[nodiscard]] bool full() const noexcept
|
||||
{
|
||||
const auto max_size = is_leaf() ? LeafNode<K, V>::max_items : InnerNode<K, V>::max_keys;
|
||||
return _header.size >= max_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the index for a given key.
|
||||
* @param key Key.
|
||||
* @return Index for the key.
|
||||
*/
|
||||
std::uint16_t index(K key) const noexcept;
|
||||
|
||||
/**
|
||||
* Calculates the child for a given key using binary search.
|
||||
* @param key Key.
|
||||
* @return Child for the key.
|
||||
*/
|
||||
mx::resource::ptr child(K key) const noexcept;
|
||||
|
||||
/**
|
||||
* Inserts a record into an inner node.
|
||||
* @param index Index.
|
||||
* @param separator Separator.
|
||||
* @param key Key.
|
||||
*/
|
||||
void insert(std::uint16_t index, mx::resource::ptr separator, K key);
|
||||
|
||||
/**
|
||||
* Inserts a record into a leaf node.
|
||||
* @param index Index.
|
||||
* @param value Payload.
|
||||
* @param key Key.
|
||||
*/
|
||||
void insert(std::uint16_t index, V value, K key);
|
||||
|
||||
/**
|
||||
* Moves a range of records into another node.
|
||||
* @param destination Other node.
|
||||
* @param from_index Start index.
|
||||
* @param count Number of records to move.
|
||||
*/
|
||||
void move(mx::resource::ptr destination, std::uint16_t from_index, std::uint16_t count);
|
||||
|
||||
/**
|
||||
* Searches a separator within an inner node.
|
||||
* @param separator Separator to search.
|
||||
* @return True, if the separator was found.
|
||||
*/
|
||||
[[nodiscard]] bool contains(mx::resource::ptr separator) const noexcept;
|
||||
|
||||
private:
|
||||
NodeHeader<K, V> _header;
|
||||
union {
|
||||
InnerNode<K, V> _inner_node;
|
||||
LeafNode<K, V> _leaf_node;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename K, typename V> std::uint16_t Node<K, V>::index(const K key) const noexcept
|
||||
{
|
||||
const auto keys = this->is_leaf() ? this->_leaf_node.keys.cbegin() : this->_inner_node.keys.cbegin();
|
||||
const auto iterator = std::lower_bound(keys, keys + this->size(), key);
|
||||
|
||||
return std::distance(keys, iterator);
|
||||
}
|
||||
|
||||
template <typename K, typename V> mx::resource::ptr Node<K, V>::child(const K key) const noexcept
|
||||
{
|
||||
std::int16_t low = 0;
|
||||
std::int16_t high = size() - 1;
|
||||
while (low <= high)
|
||||
{
|
||||
const auto mid = (low + high) >> 1U; // Will work for size() - 1 < max(std::int32_t)/2
|
||||
if (this->inner_key(mid) <= key)
|
||||
{
|
||||
low = mid + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
high = mid - 1;
|
||||
}
|
||||
}
|
||||
|
||||
return this->_inner_node.separators[high + 1U];
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void Node<K, V>::insert(const std::uint16_t index, const mx::resource::ptr separator, const K key)
|
||||
{
|
||||
if (index < this->size())
|
||||
{
|
||||
const auto offset = this->size() - index;
|
||||
std::memmove(static_cast<void *>(&this->_inner_node.keys[index + 1]),
|
||||
static_cast<void *>(&this->_inner_node.keys[index]), offset * sizeof(K));
|
||||
std::memmove(static_cast<void *>(&this->_inner_node.separators[index + 2]),
|
||||
static_cast<void *>(&this->_inner_node.separators[index + 1]), offset * sizeof(mx::resource::ptr));
|
||||
}
|
||||
|
||||
this->_inner_node.keys[index] = key;
|
||||
this->_inner_node.separators[index + 1] = separator;
|
||||
++this->_header.size;
|
||||
}
|
||||
|
||||
template <typename K, typename V> void Node<K, V>::insert(const std::uint16_t index, const V value, const K key)
|
||||
{
|
||||
if (index < this->size())
|
||||
{
|
||||
const auto offset = this->size() - index;
|
||||
std::memmove(static_cast<void *>(&this->_leaf_node.keys[index + 1]),
|
||||
static_cast<void *>(&this->_leaf_node.keys[index]), offset * sizeof(K));
|
||||
std::memmove(static_cast<void *>(&this->_leaf_node.values[index + 1]),
|
||||
static_cast<void *>(&this->_leaf_node.values[index]), offset * sizeof(V));
|
||||
}
|
||||
|
||||
this->_leaf_node.keys[index] = key;
|
||||
this->_leaf_node.values[index] = value;
|
||||
++this->_header.size;
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void Node<K, V>::move(const mx::resource::ptr destination, const std::uint16_t from_index, const std::uint16_t count)
|
||||
{
|
||||
auto *node = mx::resource::ptr_cast<Node<K, V>>(destination);
|
||||
if (this->is_leaf())
|
||||
{
|
||||
std::memcpy(static_cast<void *>(&node->_leaf_node.keys[0]),
|
||||
static_cast<void *>(&this->_leaf_node.keys[from_index]), count * sizeof(K));
|
||||
std::memcpy(static_cast<void *>(&node->_leaf_node.values[0]),
|
||||
static_cast<void *>(&this->_leaf_node.values[from_index]), count * sizeof(V));
|
||||
}
|
||||
else
|
||||
{
|
||||
std::memcpy(static_cast<void *>(&node->_inner_node.keys[0]),
|
||||
static_cast<void *>(&this->_inner_node.keys[from_index]), count * sizeof(K));
|
||||
std::memcpy(static_cast<void *>(&node->_inner_node.separators[1]),
|
||||
static_cast<void *>(&this->_inner_node.separators[from_index + 1]),
|
||||
count * sizeof(mx::resource::ptr));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V> bool Node<K, V>::contains(const mx::resource::ptr separator) const noexcept
|
||||
{
|
||||
for (auto i = 0U; i <= this->size(); ++i)
|
||||
{
|
||||
if (this->_inner_node.separators[i] == separator)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
@@ -0,0 +1,185 @@
|
||||
#pragma once
|
||||
#include <ostream>
|
||||
|
||||
#include "node.h"
|
||||
|
||||
namespace db::index::blinktree {
|
||||
/**
|
||||
* Validates tree nodes and checks consistency.
|
||||
*/
|
||||
template <typename K, typename V> class NodeConsistencyChecker
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Validates the node and prints errors to the given stream.
|
||||
* @param node Node to validate.
|
||||
* @param stream Stream to print errors.
|
||||
*/
|
||||
static void check_and_print_errors(Node<K, V> *node, std::ostream &stream);
|
||||
|
||||
private:
|
||||
static void check_high_key_valid(Node<K, V> *node, std::ostream &stream);
|
||||
static void check_key_order_valid(Node<K, V> *node, std::ostream &stream);
|
||||
static void check_no_null_separator(Node<K, V> *node, std::ostream &stream);
|
||||
static void check_children_order_valid(Node<K, V> *node, std::ostream &stream);
|
||||
static void check_level_valid(Node<K, V> *node, std::ostream &stream);
|
||||
static void check_and_print_parent(Node<K, V> *node, std::ostream &stream);
|
||||
};
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_and_print_errors(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
check_high_key_valid(node, stream);
|
||||
check_key_order_valid(node, stream);
|
||||
check_no_null_separator(node, stream);
|
||||
check_children_order_valid(node, stream);
|
||||
check_level_valid(node, stream);
|
||||
|
||||
// check_and_print_parent(node, stream);
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_high_key_valid(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
if (node->is_leaf())
|
||||
{
|
||||
if (node->leaf_key(node->size() - 1) >= node->high_key())
|
||||
{
|
||||
stream << "[HighKey ] Leaf " << node << ": Key[" << node->size() - 1
|
||||
<< "] (=" << node->leaf_key(node->size() - 1) << ") >= " << node->high_key() << std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (node->inner_key(node->size() - 1) >= node->high_key())
|
||||
{
|
||||
stream << "[HighKey ] Inner " << node << ": Key[" << node->size() - 1
|
||||
<< "] (=" << node->inner_key(node->size() - 1) << ") >= " << node->high_key() << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_key_order_valid(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
for (auto index = 1U; index < node->size(); index++)
|
||||
{
|
||||
if (node->is_leaf())
|
||||
{
|
||||
if (node->leaf_key(index - 1U) >= node->leaf_key(index))
|
||||
{
|
||||
stream << "[KeyOrder ] Leaf " << node << ": Key[" << index - 1U << "] (=" << node->leaf_key(index - 1U)
|
||||
<< ") >= Key[" << index << "] (=" << node->leaf_key(index) << ")" << std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (node->inner_key(index - 1) >= node->inner_key(index))
|
||||
{
|
||||
stream << "[KeyOrder ] Inner " << node << ": Key[" << index - 1 << "] (=" << node->inner_key(index - 1)
|
||||
<< ") >= Key[" << index << "] (=" << node->inner_key(index) << ")" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_no_null_separator(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
if (node->is_inner())
|
||||
{
|
||||
for (auto index = 0U; index <= node->size(); index++)
|
||||
{
|
||||
if (node->separator(index) == nullptr)
|
||||
{
|
||||
stream << "[Separator ] Inner " << node << ": Separator[" << index << "] is empty." << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_children_order_valid(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
if (node->is_inner())
|
||||
{
|
||||
for (auto index = 0U; index < node->size(); index++)
|
||||
{
|
||||
auto child = node->separator(index).template get<Node<K, V>>();
|
||||
const auto child_last_key =
|
||||
child->is_leaf() ? child->leaf_key(child->size() - 1U) : child->inner_key(child->size() - 1U);
|
||||
if (child_last_key >= node->inner_key(index))
|
||||
{
|
||||
stream << "[ChildOrder] Inner " << node << ": Key[" << index << "] (=" << node->inner_key(index)
|
||||
<< ") <= Separator[" << index << "].Key[" << child->size() - 1U << "] (=" << child_last_key
|
||||
<< ")" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_level_valid(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
if (node->right_sibling() && node->is_leaf() != node->right_sibling().template get<Node<K, V>>()->is_leaf())
|
||||
{
|
||||
stream << "[Level ] Leaf " << node << ": Is marked as leaf, but right sibling is not" << std::endl;
|
||||
}
|
||||
|
||||
if (node->is_inner())
|
||||
{
|
||||
for (auto index = 0U; index < node->size(); index++)
|
||||
{
|
||||
if (node->separator(index).template get<Node<K, V>>()->is_leaf() !=
|
||||
node->separator(index + 1U).template get<Node<K, V>>()->is_leaf())
|
||||
{
|
||||
stream << "[Level ] Inner " << node << ": Separator[" << index
|
||||
<< "] is marked as is_leaf = " << node->separator(index).template get<Node<K, V>>()->is_leaf()
|
||||
<< " but Separator[" << index + 1U << "] is not" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void NodeConsistencyChecker<K, V>::check_and_print_parent(Node<K, V> *node, std::ostream &stream)
|
||||
{
|
||||
const auto parent = node->parent();
|
||||
if (parent)
|
||||
{
|
||||
if (parent.template get<Node<K, V>>()->contains(mx::resource::ptr(node)) == false)
|
||||
{
|
||||
stream << "Wrong parent(1) for node " << node << " (leaf: " << node->is_leaf() << ")" << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto index = 0U;
|
||||
for (; index <= parent.template get<Node<K, V>>()->size(); index++)
|
||||
{
|
||||
if (parent.template get<Node<K, V>>()->separator(index).template get<Node<K, V>>() == node)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (index < parent.template get<Node<K, V>>()->size())
|
||||
{
|
||||
const auto key =
|
||||
node->is_leaf() ? node->leaf_key(node->size() - 1U) : node->inner_key(node->size() - 1);
|
||||
if ((key < parent.template get<Node<K, V>>()->inner_key(index)) == false)
|
||||
{
|
||||
stream << "Wrong parent(2) for node " << node << " (leaf: " << node->is_leaf() << ")" << std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto key = node->is_leaf() ? node->leaf_key(0U) : node->inner_key(0U);
|
||||
if ((key >= parent.template get<Node<K, V>>()->inner_key(index - 1U)) == false)
|
||||
{
|
||||
stream << "Wrong parent(3) for node " << node << " (leaf: " << node->is_leaf() << ")" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
44
repos/ealanos/src/lib/db/index/blinktree/node_iterator.h
Normal file
44
repos/ealanos/src/lib/db/index/blinktree/node_iterator.h
Normal file
@@ -0,0 +1,44 @@
|
||||
#pragma once
|
||||
|
||||
#include "node.h"
|
||||
#include <mx/resource/resource.h>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
/**
|
||||
* Iterator for iterating over nodes of a tree.
|
||||
*/
|
||||
template <typename K, typename V> class NodeIterator
|
||||
{
|
||||
public:
|
||||
NodeIterator() = default;
|
||||
explicit NodeIterator(Node<K, V> *root) : _current_node(root), _first_node_in_level(root) {}
|
||||
~NodeIterator() = default;
|
||||
|
||||
Node<K, V> *&operator*() { return _current_node; }
|
||||
|
||||
NodeIterator<K, V> &operator++()
|
||||
{
|
||||
if (_current_node->right_sibling())
|
||||
{
|
||||
_current_node = _current_node->right_sibling().template get<Node<K, V>>();
|
||||
}
|
||||
else if (_current_node->is_inner())
|
||||
{
|
||||
_first_node_in_level = _first_node_in_level->separator(0).template get<Node<K, V>>();
|
||||
_current_node = _first_node_in_level;
|
||||
}
|
||||
else
|
||||
{
|
||||
_current_node = nullptr;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator!=(const NodeIterator<K, V> &other) const { return _current_node != other._current_node; }
|
||||
|
||||
private:
|
||||
Node<K, V> *_current_node = nullptr;
|
||||
Node<K, V> *_first_node_in_level = nullptr;
|
||||
};
|
||||
} // namespace db::index::blinktree
|
||||
72
repos/ealanos/src/lib/db/index/blinktree/node_statistics.h
Normal file
72
repos/ealanos/src/lib/db/index/blinktree/node_statistics.h
Normal file
@@ -0,0 +1,72 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
#include "node.h"
|
||||
#include <cstdint>
|
||||
#include <ostream>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
/**
|
||||
* Collects and prints statistics of a set of nodes.
|
||||
*/
|
||||
template <typename K, typename V> class NodeStatistics
|
||||
{
|
||||
public:
|
||||
explicit NodeStatistics(const std::uint16_t height) : _tree_height(height) {}
|
||||
~NodeStatistics() = default;
|
||||
|
||||
NodeStatistics &operator+=(Node<K, V> *node)
|
||||
{
|
||||
this->_count_inner_nodes += node->is_inner();
|
||||
this->_count_leaf_nodes += node->is_leaf();
|
||||
|
||||
if (node->is_leaf())
|
||||
{
|
||||
this->_count_leaf_node_keys += node->size();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->_count_inner_node_keys += node->size();
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &stream, const NodeStatistics<K, V> &tree_statistics)
|
||||
{
|
||||
const auto count_nodes = tree_statistics._count_leaf_nodes + tree_statistics._count_inner_nodes;
|
||||
const auto size_in_bytes = count_nodes * config::node_size();
|
||||
stream << "Statistics of the Tree: \n"
|
||||
<< " Node size: " << sizeof(Node<K, V>) << " B\n"
|
||||
<< " Header size: " << sizeof(NodeHeader<K, V>) << " B\n"
|
||||
<< " Inner keys: " << InnerNode<K, V>::max_keys << " (" << sizeof(K) * InnerNode<K, V>::max_keys
|
||||
<< " B)\n"
|
||||
<< " Leaf keys: " << LeafNode<K, V>::max_items << " (" << sizeof(K) * LeafNode<K, V>::max_items
|
||||
<< " B)\n"
|
||||
<< " Tree height: " << tree_statistics._tree_height << "\n"
|
||||
<< " Inner nodes: " << tree_statistics._count_inner_nodes << "\n"
|
||||
<< " Inner entries: " << tree_statistics._count_inner_node_keys << "\n"
|
||||
<< " Leaf nodes: " << tree_statistics._count_leaf_nodes << "\n"
|
||||
<< " Leaf entries: " << tree_statistics._count_leaf_node_keys << "\n"
|
||||
<< " Tree size: " << size_in_bytes / 1024.0 / 1024.0 << " MB";
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
private:
|
||||
// Number of inner nodes.
|
||||
std::uint64_t _count_inner_nodes = 0U;
|
||||
|
||||
// Number of leaf nodes.
|
||||
std::uint64_t _count_leaf_nodes = 0U;
|
||||
|
||||
// Number of records located in inner nodes.
|
||||
std::uint64_t _count_inner_node_keys = 0U;
|
||||
|
||||
// Number of records located in leaf nodes.
|
||||
std::uint64_t _count_leaf_node_keys = 0U;
|
||||
|
||||
// Hight of the tree.
|
||||
const std::uint16_t _tree_height;
|
||||
};
|
||||
} // namespace db::index::blinktree
|
||||
16
repos/ealanos/src/lib/db/index/blinktree/task.h
Normal file
16
repos/ealanos/src/lib/db/index/blinktree/task.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include <mx/tasking/task.h>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V, class L> class Task : public mx::tasking::TaskInterface
|
||||
{
|
||||
public:
|
||||
constexpr Task(const K key, L &listener) : _listener(listener), _key(key) {}
|
||||
~Task() override = default;
|
||||
|
||||
protected:
|
||||
L &_listener;
|
||||
K _key;
|
||||
};
|
||||
} // namespace db::index::blinktree
|
||||
69
repos/ealanos/src/lib/db/index/blinktree/update_task.h
Normal file
69
repos/ealanos/src/lib/db/index/blinktree/update_task.h
Normal file
@@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
#include "b_link_tree.h"
|
||||
#include "insert_separator_task.h"
|
||||
#include "node.h"
|
||||
#include "task.h"
|
||||
#include <iostream>
|
||||
|
||||
namespace db::index::blinktree {
|
||||
template <typename K, typename V, class L> class UpdateTask final : public Task<K, V, L>
|
||||
{
|
||||
public:
|
||||
constexpr UpdateTask(const K key, const V value, L &listener) noexcept : Task<K, V, L>(key, listener), _value(value)
|
||||
{
|
||||
}
|
||||
|
||||
~UpdateTask() override = default;
|
||||
|
||||
mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override;
|
||||
|
||||
private:
|
||||
const V _value;
|
||||
};
|
||||
|
||||
template <typename K, typename V, typename L>
|
||||
mx::tasking::TaskResult UpdateTask<K, V, L>::execute(const std::uint16_t core_id, const std::uint16_t /*channel_id*/)
|
||||
{
|
||||
auto *node = this->annotated_resource().template get<Node<K, V>>();
|
||||
|
||||
// Is the node related to the key?
|
||||
if (node->high_key() <= this->_key)
|
||||
{
|
||||
this->annotate(node->right_sibling(), config::node_size() / 4U);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// If we are accessing an inner node, pick the next related child.
|
||||
if (node->is_inner())
|
||||
{
|
||||
const auto child = node->child(this->_key);
|
||||
this->annotate(child, config::node_size() / 4U);
|
||||
this->is_readonly(!node->is_branch());
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// If the task is still reading, but this is a leaf,
|
||||
// spawn again as writer.
|
||||
if (node->is_leaf() && this->is_readonly())
|
||||
{
|
||||
this->is_readonly(false);
|
||||
return mx::tasking::TaskResult::make_succeed(this);
|
||||
}
|
||||
|
||||
// We are accessing the correct leaf.
|
||||
const auto index = node->index(this->_key);
|
||||
const auto key = node->leaf_key(index);
|
||||
if (key == this->_key)
|
||||
{
|
||||
node->value(index, this->_value);
|
||||
this->_listener.updated(core_id, key, this->_value);
|
||||
}
|
||||
else
|
||||
{
|
||||
this->_listener.missing(core_id, key);
|
||||
}
|
||||
|
||||
return mx::tasking::TaskResult::make_remove();
|
||||
}
|
||||
} // namespace db::index::blinktree
|
||||
Reference in New Issue
Block a user