mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
mxtasking: Made channel count independent from habitat size.
This commit is contained in:
@@ -1,5 +1,6 @@
|
|||||||
#include "scheduler.h"
|
#include "scheduler.h"
|
||||||
#include "mx/system/environment.h"
|
#include "mx/system/environment.h"
|
||||||
|
#include "mx/util/field_alloc.h"
|
||||||
#include "tukija/syscall-generic.h"
|
#include "tukija/syscall-generic.h"
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <mx/memory/global_heap.h>
|
#include <mx/memory/global_heap.h>
|
||||||
@@ -25,6 +26,7 @@ Scheduler::Scheduler(const mx::util::core_set &core_set, const std::uint16_t pre
|
|||||||
: _core_set(core_set), _count_channels(core_set.size()), _worker({}), _channel_numa_node_map({0U}),
|
: _core_set(core_set), _count_channels(core_set.size()), _worker({}), _channel_numa_node_map({0U}),
|
||||||
_epoch_manager(core_set.size(), resource_allocator, _is_running), _statistic(_count_channels)
|
_epoch_manager(core_set.size(), resource_allocator, _is_running), _statistic(_count_channels)
|
||||||
{
|
{
|
||||||
|
this->_vacant_channels_alloc = new (memory::GlobalHeap::allocate_cache_line_aligned(sizeof(util::Field_Allocator<config::max_cores()>))) util::Field_Allocator<config::max_cores()>(core_set.size());
|
||||||
this->_worker.fill(nullptr);
|
this->_worker.fill(nullptr);
|
||||||
this->_channel_numa_node_map.fill(0U);
|
this->_channel_numa_node_map.fill(0U);
|
||||||
Genode::log("Initializing scheduler");
|
Genode::log("Initializing scheduler");
|
||||||
@@ -37,20 +39,23 @@ Scheduler::Scheduler(const mx::util::core_set &core_set, const std::uint16_t pre
|
|||||||
auto ptr = memory::GlobalHeap::allocate(this->_channel_numa_node_map[worker_id], sizeof(Worker));
|
auto ptr = memory::GlobalHeap::allocate(this->_channel_numa_node_map[worker_id], sizeof(Worker));
|
||||||
this->_worker[worker_id] =
|
this->_worker[worker_id] =
|
||||||
new (ptr)
|
new (ptr)
|
||||||
Worker(worker_id, core_id, this->_channel_numa_node_map[worker_id], this->_is_running, _vacant_channels_alloc, _remainder_channel_count,
|
Worker(worker_id, core_id, this->_channel_numa_node_map[worker_id], this->_is_running, *_vacant_channels_alloc, _remainder_channel_count,
|
||||||
prefetch_distance, this->_epoch_manager[worker_id], this->_epoch_manager.global_epoch(),
|
prefetch_distance, this->_epoch_manager[worker_id], this->_epoch_manager.global_epoch(),
|
||||||
this->_statistic);
|
this->_statistic);
|
||||||
ptr = memory::GlobalHeap::allocate(this->_channel_numa_node_map[worker_id], sizeof(Channel));
|
|
||||||
this->_channels[worker_id] =
|
|
||||||
new (ptr) Channel(worker_id, this->_channel_numa_node_map[worker_id], prefetch_distance);
|
|
||||||
Genode::log("Channel ", worker_id, " created at ", _channels[worker_id]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (auto channel_id : core_set) {
|
||||||
|
auto ptr = memory::GlobalHeap::allocate(this->_channel_numa_node_map[channel_id], sizeof(Channel));
|
||||||
|
this->_channels[channel_id] =
|
||||||
|
new (ptr) Channel(channel_id, this->_channel_numa_node_map[channel_id], prefetch_distance);
|
||||||
|
Genode::log("Channel ", channel_id, " created at ", _channels[channel_id]);
|
||||||
|
}
|
||||||
|
|
||||||
Genode::log("Using ", _count_channels, " Channels");
|
Genode::log("Using ", _count_channels, " Channels");
|
||||||
Genode::log("CPU freq: ", mx::system::Environment::get_cpu_freq(), "kHz");
|
Genode::log("CPU freq: ", mx::system::Environment::get_cpu_freq(), "kHz");
|
||||||
/* We need to state the actual number of channels here. But as _count_channels only denotes the
|
/* We need to state the actual number of channels here. But as _count_channels only denotes the
|
||||||
number of channels from the restricted core_set, defined by the application, we could end up reporting a number below the actual number of channels. This could lead to the queue stealing misbehaving, e.g. not all queues being taken by the workers. Furthermore, we must subtract the queue used by the foreman which is not stealable by definition.*/
|
number of channels from the restricted core_set, defined by the application, we could end up reporting a number below the actual number of channels. This could lead to the queue stealing misbehaving, e.g. not all queues being taken by the workers. Furthermore, we must subtract the queue used by the foreman which is not stealable by definition.*/
|
||||||
Tukija::Cip::cip()->channel_info.count = worker_count - 1;
|
Tukija::Cip::cip()->channel_info.count = core_set.size() - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
Scheduler::~Scheduler() noexcept
|
Scheduler::~Scheduler() noexcept
|
||||||
|
|||||||
@@ -83,8 +83,8 @@ public:
|
|||||||
//Genode::log("Allocation before resume ", allocation);
|
//Genode::log("Allocation before resume ", allocation);
|
||||||
|
|
||||||
unsigned current_cores = Tukija::Cip::cip()->cores_current.count();
|
unsigned current_cores = Tukija::Cip::cip()->cores_current.count();
|
||||||
allocate_cores(_count_channels - current_cores);
|
allocate_cores(_count_channels - current_cores);
|
||||||
_is_running = true;
|
_is_running = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] inline Worker *my_self() noexcept {
|
[[nodiscard]] inline Worker *my_self() noexcept {
|
||||||
@@ -281,7 +281,7 @@ private:
|
|||||||
|
|
||||||
alignas(64) std::array<Channel *, config::max_cores()> _channels{nullptr};
|
alignas(64) std::array<Channel *, config::max_cores()> _channels{nullptr};
|
||||||
|
|
||||||
alignas(64) mx::util::Field_Allocator<config::max_cores()> _vacant_channels_alloc{63};
|
alignas(64) mx::util::Field_Allocator<config::max_cores()> *_vacant_channels_alloc{nullptr};
|
||||||
alignas(64) std::atomic<std::int32_t> _remainder_channel_count{0};
|
alignas(64) std::atomic<std::int32_t> _remainder_channel_count{0};
|
||||||
|
|
||||||
// Map of channel id to NUMA region id.
|
// Map of channel id to NUMA region id.
|
||||||
|
|||||||
Reference in New Issue
Block a user