mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
Revised blinktree to include core allocation times in measurements.
This commit is contained in:
@@ -81,7 +81,7 @@ void Benchmark::start()
|
|||||||
{
|
{
|
||||||
mx::tasking::runtime::profile(this->profile_file_name());
|
mx::tasking::runtime::profile(this->profile_file_name());
|
||||||
}
|
}
|
||||||
/*his->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
/*this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||||
this->_current_iteration + 1, this->_cores.current());*/
|
this->_current_iteration + 1, this->_cores.current());*/
|
||||||
//Genode::log("Timer started ");
|
//Genode::log("Timer started ");
|
||||||
}
|
}
|
||||||
@@ -217,11 +217,17 @@ void Benchmark::requests_finished()
|
|||||||
this->_tree.reset(nullptr);
|
this->_tree.reset(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto *restart_task = mx::tasking::runtime::new_task<RestartTask>(0U, *this);
|
if (this->core_set()) {
|
||||||
restart_task->annotate(static_cast<mx::tasking::TaskInterface::channel>(0));
|
this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||||
mx::tasking::runtime::spawn(*restart_task, core_id);
|
this->_current_iteration + 1, this->_cores.current());
|
||||||
mx::tasking::runtime::resume();
|
auto *restart_task = mx::tasking::runtime::new_task<RestartTask>(0U, *this);
|
||||||
|
restart_task->annotate(static_cast<mx::tasking::TaskInterface::channel>(0));
|
||||||
|
mx::tasking::runtime::spawn(*restart_task, core_id);
|
||||||
|
mx::tasking::runtime::resume();
|
||||||
|
} else {
|
||||||
|
Genode::log("Benchmark finished.");
|
||||||
|
mx::tasking::runtime::stop();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,6 +52,11 @@ public:
|
|||||||
*/
|
*/
|
||||||
void start();
|
void start();
|
||||||
|
|
||||||
|
void start_chronometer() {
|
||||||
|
this->_chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(this->_workload)),
|
||||||
|
this->_current_iteration + 1, this->_cores.current());
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Collection of cores the benchmark should run on.
|
// Collection of cores the benchmark should run on.
|
||||||
benchmark::Cores _cores;
|
benchmark::Cores _cores;
|
||||||
@@ -121,8 +126,7 @@ class StartMeasurementTask : public mx::tasking::TaskInterface
|
|||||||
|
|
||||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||||
{
|
{
|
||||||
Genode::log("Starting timer");
|
//_benchmark._chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(_benchmark._workload)), _benchmark._current_iteration + 1, _benchmark._cores.current());
|
||||||
_benchmark._chronometer.start(static_cast<std::uint16_t>(static_cast<benchmark::phase>(_benchmark._workload)), _benchmark._current_iteration + 1, _benchmark._cores.current());
|
|
||||||
//_benchmark._start = Genode::Trace::timestamp();
|
//_benchmark._start = Genode::Trace::timestamp();
|
||||||
return mx::tasking::TaskResult::make_remove();
|
return mx::tasking::TaskResult::make_remove();
|
||||||
}
|
}
|
||||||
@@ -155,12 +159,7 @@ class RestartTask : public mx::tasking::TaskInterface
|
|||||||
|
|
||||||
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override
|
||||||
{
|
{
|
||||||
if (_benchmark.core_set()) {
|
_benchmark.start();
|
||||||
_benchmark.start();
|
|
||||||
} else {
|
|
||||||
Genode::log("Benchmark finished.");
|
|
||||||
mx::tasking::runtime::stop();
|
|
||||||
}
|
|
||||||
return mx::tasking::TaskResult::make_remove();
|
return mx::tasking::TaskResult::make_remove();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ int bt_main(Libc::Env &env, int count_arguments, char **arguments)
|
|||||||
while ((cores = benchmark->core_set()))
|
while ((cores = benchmark->core_set()))
|
||||||
{
|
{
|
||||||
mx::tasking::runtime_guard _(false, cores, prefetch_distance);
|
mx::tasking::runtime_guard _(false, cores, prefetch_distance);
|
||||||
|
benchmark->start_chronometer();
|
||||||
benchmark->start();
|
benchmark->start();
|
||||||
//wait_for_continue();
|
//wait_for_continue();
|
||||||
}
|
}
|
||||||
@@ -203,13 +204,13 @@ void Libc::Component::construct(Libc::Env &env) {
|
|||||||
mx::system::Environment::set_cores(&sys_cores);
|
mx::system::Environment::set_cores(&sys_cores);
|
||||||
|
|
||||||
mx::memory::GlobalHeap::myself();
|
mx::memory::GlobalHeap::myself();
|
||||||
std::uint16_t cores = 64;
|
std::uint16_t cores = 59;
|
||||||
//env.cpu().affinity_space().total();
|
//env.cpu().affinity_space().total();
|
||||||
|
|
||||||
char cores_arg[10];
|
char cores_arg[10];
|
||||||
sprintf(cores_arg, "%d", cores);
|
sprintf(cores_arg, "%d", cores);
|
||||||
|
|
||||||
char *args[] = {"blinktree_benchmark", "-i", "20", "--olfit", cores_arg};
|
char *args[] = {"blinktree_benchmark", "-i", "200", "--olfit", cores_arg};
|
||||||
|
|
||||||
Libc::with_libc([&]()
|
Libc::with_libc([&]()
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user