update taskingProfiler to newer version

- fixed double executions
- added numa
- changed std::cout to Genode::log()
This commit is contained in:
2023-06-16 11:44:29 +02:00
parent 6fd37d3f73
commit 1b70c89b01
6 changed files with 136 additions and 87 deletions

View File

@@ -30,7 +30,7 @@ public:
// memory is unsafe. // memory is unsafe.
static constexpr auto memory_reclamation() { return memory_reclamation_scheme::UpdateEpochPeriodically; } static constexpr auto memory_reclamation() { return memory_reclamation_scheme::UpdateEpochPeriodically; }
static constexpr auto tasking_array_length() { return 90000; } static constexpr auto tasking_array_length() { return 9000000; }
static constexpr auto use_task_queue_length() { return true; } static constexpr auto use_task_queue_length() { return true; }

View File

@@ -42,8 +42,9 @@ public:
} }
}; };
void printFloatUS(std::uint64_t ns) std::string printFloatUS(std::uint64_t ns)
{ {
char* str = new char[100];
std::uint64_t remainder = ns % 1000; std::uint64_t remainder = ns % 1000;
std::uint64_t front = ns / 1000; std::uint64_t front = ns / 1000;
char strRemainder[4]; char strRemainder[4];
@@ -55,12 +56,17 @@ void printFloatUS(std::uint64_t ns)
} }
strRemainder[3] = '\0'; strRemainder[3] = '\0';
std::cout << front << '.' << strRemainder; sprintf(str, "%lu.%s", front, strRemainder);
std::string message(str);
return message;
} }
void TaskingProfiler::init(std::uint16_t corenum) void TaskingProfiler::init(std::uint16_t corenum)
{ {
Genode::log("Hello from TaskingProfiler::init!");
relTime = std::chrono::high_resolution_clock::now(); relTime = std::chrono::high_resolution_clock::now();
corenum++; corenum++;
@@ -76,6 +82,14 @@ void TaskingProfiler::init(std::uint16_t corenum)
Topology::Numa_region const &node = mx::system::Environment::node(numa_id); Topology::Numa_region const &node = mx::system::Environment::node(numa_id);
void *cast_evade = static_cast<void*>(new Genode::Regional_heap(mx::system::Environment::ram(), mx::system::Environment::rm(), const_cast<Topology::Numa_region&>(node))); void *cast_evade = static_cast<void*>(new Genode::Regional_heap(mx::system::Environment::ram(), mx::system::Environment::rm(), const_cast<Topology::Numa_region&>(node)));
task_data[i] = static_cast<task_info*>(cast_evade); task_data[i] = static_cast<task_info*>(cast_evade);
/*
task_data[i] = new task_info[mx::tasking::config::tasking_array_length()];
for(size_t j = mx::tasking::config::tasking_array_length(); j > 0; j--)
{
task_data[i][j] = {0, 0, NULL, tinit, tinit};
__builtin_prefetch(&task_data[i][j], 1, 3);
}
*/
} }
//create an array of pointers to queue_info structs //create an array of pointers to queue_info structs
@@ -86,6 +100,14 @@ void TaskingProfiler::init(std::uint16_t corenum)
Topology::Numa_region const &node = mx::system::Environment::node(numa_id); Topology::Numa_region const &node = mx::system::Environment::node(numa_id);
void *cast_evade = static_cast<void*>(new Genode::Regional_heap(mx::system::Environment::ram(), mx::system::Environment::rm(), const_cast<Topology::Numa_region&>(node))); void *cast_evade = static_cast<void*>(new Genode::Regional_heap(mx::system::Environment::ram(), mx::system::Environment::rm(), const_cast<Topology::Numa_region&>(node)));
queue_data[i] = static_cast<queue_info*>(cast_evade); queue_data[i] = static_cast<queue_info*>(cast_evade);
/*
queue_data[i] = new queue_info[mx::tasking::config::tasking_array_length()];
for(size_t j = mx::tasking::config::tasking_array_length(); j > 0; j--)
{
queue_data[i][j] = {0, tinit};
__builtin_prefetch(&queue_data[i][j], 1, 3);
}
*/
} }
//make prefetch tasks for each cpu //make prefetch tasks for each cpu
@@ -94,6 +116,7 @@ void TaskingProfiler::init(std::uint16_t corenum)
//prefetchTask->annotate(i); //prefetchTask->annotate(i);
mx::tasking::runtime::spawn(*prefetchTask); mx::tasking::runtime::spawn(*prefetchTask);
} }
task_id_counter = new std::uint64_t[total_cores]{0}; task_id_counter = new std::uint64_t[total_cores]{0};
queue_id_counter = new std::uint64_t[total_cores]{0}; queue_id_counter = new std::uint64_t[total_cores]{0};
@@ -126,8 +149,6 @@ void TaskingProfiler::endTask(std::uint16_t cpu_core, std::uint64_t id)
std::chrono::time_point<std::chrono::high_resolution_clock> end = std::chrono::high_resolution_clock::now(); std::chrono::time_point<std::chrono::high_resolution_clock> end = std::chrono::high_resolution_clock::now();
overhead += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); overhead += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
TaskingProfiler::getInstance().saveProfile();
} }
void TaskingProfiler::enqueue(std::uint16_t corenum){ void TaskingProfiler::enqueue(std::uint16_t corenum){
@@ -147,9 +168,9 @@ void TaskingProfiler::saveProfile()
{ {
Genode::log("Saving Profile"); Genode::log("Saving Profile");
std::uint64_t overhead_ms = overhead/1000000; std::uint64_t overhead_ms = overhead/1000000;
std::cout << "Overhead-Time: " << overhead << "ns in ms: " << overhead_ms << "ms" << std::endl; Genode::log("Overhead-Time: ", overhead_ms*1000000, "ns in ms: ", overhead_ms, "ms");
std::uint64_t taskQueueOverhead_ms = taskQueueOverhead/1000000; std::uint64_t taskQueueOverhead_ms = taskQueueOverhead/1000000;
std::cout << "TaskQueueOverhead-Time: " << taskQueueOverhead << "ns in ms: " << taskQueueOverhead_ms << "ms" << std::endl; Genode::log("TaskQueueOverhead-Time: ", taskQueueOverhead_ms*1000000, "ns in ms: ", taskQueueOverhead_ms, "ms");
//get the number of tasks overal //get the number of tasks overal
std::uint64_t tasknum = 0; std::uint64_t tasknum = 0;
@@ -157,8 +178,8 @@ void TaskingProfiler::saveProfile()
{ {
tasknum += task_id_counter[i]; tasknum += task_id_counter[i];
} }
std::cout << "Number of tasks: " << tasknum << std::endl; Genode::log("Number of tasks: ", tasknum);
std::cout << "Overhead-Time per Task: " << overhead/tasknum << "ns" << std::endl; Genode::log("Overhead-Time per Task: ", overhead/tasknum, "ns");
bool first = false; bool first = false;
std::uint64_t firstTime = 0; std::uint64_t firstTime = 0;
@@ -171,19 +192,20 @@ void TaskingProfiler::saveProfile()
std::uint64_t start = 0; std::uint64_t start = 0;
std::uint64_t end = 0; std::uint64_t end = 0;
char* name; char* name;
std::string timestampString;
std::uint64_t taskCounter = 0; std::uint64_t taskCounter = 0;
std::uint64_t queueCounter = 0; std::uint64_t queueCounter = 0;
std::cout << "--Save--" << std::endl; Genode::log("--Save--");
std::cout << "{\"traceEvents\":[" << std::endl; Genode::log("{\"traceEvents\":[");
//Events //Events
for(std::uint16_t cpu_id = 0; cpu_id < total_cores; cpu_id++) for(std::uint16_t cpu_id = 0; cpu_id < total_cores; cpu_id++)
{ {
//Metadata Events for each core (CPU instead of process as name,...) //Metadata Events for each core (CPU instead of process as name,...)
std::cout << "{\"name\":\"process_name\",\"ph\":\"M\",\"pid\":" << cpu_id << ",\"tid\":" << cpu_id << ",\"args\":{\"name\":\"CPU\"}}," << std::endl; Genode::log("{\"name\":\"process_name\",\"ph\":\"M\",\"pid\":", cpu_id, ",\"tid\":", cpu_id, ",\"args\":{\"name\":\"CPU\"}},");
std::cout << "{\"name\":\"process_sort_index\",\"ph\":\"M\",\"pid\":" << cpu_id << ",\"tid\":" << cpu_id << ",\"args\":{\"name\":" << cpu_id << "}}," << std::endl; Genode::log("{\"name\":\"process_sort_index\",\"ph\":\"M\",\"pid\":", cpu_id, ",\"tid\":", cpu_id, ",\"args\":{\"name\":", cpu_id, "}},");
@@ -193,9 +215,8 @@ void TaskingProfiler::saveProfile()
queueCounter = 1; queueCounter = 1;
//Initial taskQueueLength is zero //Initial taskQueueLength is zero
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":"; timestampString = printFloatUS(0);
printFloatUS(0); Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskQueueLength\":", taskQueueLength, "}},");
std::cout << ",\"args\":{\"TaskQueueLength\":" << taskQueueLength << "}}," << std::endl;
//go through all tasks and queues //go through all tasks and queues
while(taskCounter<task_id_counter[cpu_id] || queueCounter<queue_id_counter[cpu_id]){ while(taskCounter<task_id_counter[cpu_id] || queueCounter<queue_id_counter[cpu_id]){
@@ -226,17 +247,23 @@ void TaskingProfiler::saveProfile()
} }
} }
//if the queue element is before the task element, it is an enqueue //if the queue element is before the task element, it is an enqueue
if(qi.timestamp < ti._start && queueCounter <= queue_id_counter[cpu_id]){ //if same time, we print queue first
if(qi.timestamp <= ti._start && queueCounter <= queue_id_counter[cpu_id]){
queueCounter++; queueCounter++;
taskQueueLength++; taskQueueLength++;
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":";
if(timestamp - firstTime == 0){ if(timestamp - firstTime == 0){
printFloatUS(10); timestampString = printFloatUS(10);
} }
else{ else{
printFloatUS(timestamp-firstTime); if(timestamp - firstTime > 0){
timestampString = printFloatUS(timestamp-firstTime);
}
else{
timestampString = printFloatUS(10);
}
} }
std::cout << ",\"args\":{\"TaskQueueLength\":" << taskQueueLength << "}}," << std::endl; Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskQueueLength\":", taskQueueLength, "}},");
} }
//else we print the task itself and a dequeue //else we print the task itself and a dequeue
@@ -245,40 +272,41 @@ void TaskingProfiler::saveProfile()
taskQueueLength--; taskQueueLength--;
//taskQueueLength //taskQueueLength
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":"; if (start-firstTime > 0){
printFloatUS(start-firstTime); timestampString = printFloatUS(start-firstTime);
std::cout << ",\"args\":{\"TaskQueueLength\":" << taskQueueLength << "}}," << std::endl; }
else{
timestampString = printFloatUS(10);
}
Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskQueueLength\":", taskQueueLength, "}},");
//if the endtime of the last task is too large (cannot display right) //if the endtime of the last task is too large (cannot display right)
if(taskCounter == task_id_counter[cpu_id] && ti._end == tinit){ if(taskCounter == task_id_counter[cpu_id] && ti._end == tinit){
end = start + 1000; end = start + 1000;
} }
//Task itself //Task itself
std::cout << "{\"pid\":" << cpu_id << ",\"tid\":" << cpu_id << ",\"ts\":"; Genode::log("{\"pid\":", cpu_id, ",\"tid\":", cpu_id, ",\"ts\":", timestampString.c_str(), ",\"dur\":", printFloatUS(end-start).c_str(), ",\"ph\":\"X\",\"name\":\"", std::string(name).c_str(), "\",\"args\":{\"type\":", ti.type, "}},");
printFloatUS(start-firstTime);
std::cout << ",\"dur\":"; //reset throughput
printFloatUS(end-start); //Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskThroughput\":", 0, "}},");
std::cout << ",\"ph\":\"X\",\"name\":\"" << name << "\",\"args\":{\"type\":" << ti.type << "}}," << std::endl;
//reset throughput if there is a gap of more than 1us
if (start - lastEndTime > 1000 && lastEndTime != 0){
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":";
printFloatUS(lastEndTime - firstTime);
std::cout << ",\"args\":{\"TaskThroughput\":";
//Tasks per microsecond is zero
std::cout << 0;
std::cout << "}}," << std::endl;
}
duration = end - start; duration = end - start;
if (end - start < 0){
duration = 1000;
}
//Task Throughput //Task Throughput
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":"; if (duration == 0){
printFloatUS(start-firstTime); //if duration is zero, we set throughput to zero
std::cout << ",\"args\":{\"TaskThroughput\":"; //ask what to do if duration is zero
//Tasks per microsecond duration = 1;
throughput = 1000/duration; throughput = 0;
std::cout << throughput; }
std::cout << "}}," << std::endl; else{
throughput = 1000/duration;
}
Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskThroughput\":", throughput, "}},");
lastEndTime = end; lastEndTime = end;
} }
} }
@@ -297,43 +325,41 @@ void TaskingProfiler::saveProfile()
name = abi::__cxa_demangle(ti.name, 0, 0, 0); name = abi::__cxa_demangle(ti.name, 0, 0, 0);
//Task itself //Task itself
std::cout << "{\"pid\":" << cpu_id << ",\"tid\":" << cpu_id << ",\"ts\":"; timestampString = printFloatUS(start-firstTime);
printFloatUS(start-firstTime); Genode::log("{\"pid\":", cpu_id, ",\"tid\":", cpu_id, ",\"ts\":", timestampString.c_str(), ",\"dur\":", printFloatUS(end-start).c_str(), ",\"ph\":\"X\",\"name\":\"", std::string(name).c_str(), "\",\"args\":{\"type\":", ti.type, "}},");
std::cout << ",\"dur\":";
printFloatUS(end-start);
std::cout << ",\"ph\":\"X\",\"name\":\"" << name << "\",\"args\":{\"type\":" << ti.type << "}}," << std::endl;
//reset throughput if there is a gap of more than 1us //reset throughput if there is a gap of more than 1us
if (start - lastEndTime > 1000){ if (start - lastEndTime > 1000){
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":"; timestampString = printFloatUS(lastEndTime - firstTime);
printFloatUS(lastEndTime-firstTime); Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskThroughput\":", 0, "}},");
std::cout << ",\"args\":{\"TaskThroughput\":";
//Tasks per microsecond is zero
std::cout << 0;
std::cout << "}}," << std::endl;
} }
duration = end - start; duration = end - start;
//Task Throughput //Task Throughput
std::cout << "{\"pid\":" << cpu_id << ",\"name\":\"CPU" << cpu_id << "\",\"ph\":\"C\",\"ts\":"; timestampString = printFloatUS(start-firstTime);
printFloatUS(start-firstTime);
std::cout << ",\"args\":{\"TaskThroughput\":";
//Tasks per microsecond
throughput = 1000/duration; throughput = 1000/duration;
Genode::log("{\"pid\":", cpu_id, ",\"name\":\"CPU", cpu_id, "\",\"ph\":\"C\",\"ts\":", timestampString.c_str(), ",\"args\":{\"TaskThroughput\":", throughput, "}},");
std::cout << throughput;
std::cout << "}}," << std::endl;
lastEndTime = end; lastEndTime = end;
} }
} }
lastEndTime = 0; lastEndTime = 0;
} }
//sample Task (so we dont need to remove the last comma) //sample Task (so we dont need to remove the last comma)
std::cout << "{\"name\":\"sample\",\"ph\":\"P\",\"ts\":0,\"pid\":5,\"tid\":0}"; Genode::log("{\"name\":\"sample\",\"ph\":\"P\",\"ts\":0,\"pid\":5,\"tid\":0}");
std::cout << "]}" << std::endl;; Genode::log("]}");
} }
//Destructor
TaskingProfiler::~TaskingProfiler()
{
for(std::uint16_t cpu_id = 0; cpu_id < total_cores; cpu_id++)
{
delete[] task_data[cpu_id];
}
delete[] task_data;
delete[] task_id_counter;
delete[] queue_id_counter;
}
//Code for the TaskingProfiler::printTP function //Code for the TaskingProfiler::printTP function

View File

@@ -28,6 +28,9 @@ public:
std::chrono::high_resolution_clock::time_point timestamp; std::chrono::high_resolution_clock::time_point timestamp;
}; };
//Destructor
~TaskingProfiler();
private: private:
TaskingProfiler() {}; TaskingProfiler() {};
std::chrono::time_point<std::chrono::high_resolution_clock> relTime; std::chrono::time_point<std::chrono::high_resolution_clock> relTime;

View File

@@ -10,6 +10,8 @@
#include <mx/util/core_set.h> #include <mx/util/core_set.h>
#include <utility> #include <utility>
#include "profiling/tasking_profiler.h"
namespace mx::tasking { namespace mx::tasking {
/** /**
* The runtime is the central access structure to MxTasking. * The runtime is the central access structure to MxTasking.
@@ -83,8 +85,11 @@ public:
_resource_builder = std::make_unique<resource::Builder>(*_scheduler, *_resource_allocator); _resource_builder = std::make_unique<resource::Builder>(*_scheduler, *_resource_allocator);
} }
//TaskingProfiler::getInstance().init(core_set.max_core_id()); // Create new tasking Profiler.
if constexpr (config::use_tasking_profiler()){
TaskingProfiler::getInstance().init(core_set.max_core_id());
}
return true; return true;
} }
@@ -108,7 +113,8 @@ public:
* Spawns the given task. * Spawns the given task.
* @param task Task to be scheduled. * @param task Task to be scheduled.
*/ */
static void spawn(TaskInterface &task) noexcept { _scheduler->schedule(task); } static void spawn(TaskInterface &task) noexcept {
_scheduler->schedule(task); }
/** /**
* @return Number of available channels. * @return Number of available channels.
@@ -267,6 +273,11 @@ public:
{ {
} }
~runtime_guard() noexcept { runtime::start_and_wait(); } ~runtime_guard() noexcept { runtime::start_and_wait();
if constexpr (config::use_tasking_profiler())
{
TaskingProfiler::getInstance().~TaskingProfiler();
}
}
}; };
} // namespace mx::tasking } // namespace mx::tasking

View File

@@ -7,7 +7,8 @@
#include <vector> #include <vector>
#include <base/log.h> #include <base/log.h>
#include "profiling/taskin_profiler.h" #include "profiling/tasking_profiler.h"
using namespace mx::tasking; using namespace mx::tasking;
@@ -29,16 +30,10 @@ Scheduler::Scheduler(const mx::util::core_set &core_set, const std::uint16_t pre
prefetch_distance, this->_epoch_manager[worker_id], this->_epoch_manager.global_epoch(), prefetch_distance, this->_epoch_manager[worker_id], this->_epoch_manager.global_epoch(),
this->_statistic); this->_statistic);
} }
if constexpr (config::use_tasking_profiler()){
TaskingProfiler::getInstance().init(core_set.max_core_id());
}
} }
Scheduler::~Scheduler() noexcept Scheduler::~Scheduler() noexcept
{ {
if constexpr (config::use_tasking_profiler()){
TaskingProfiler::getInstance().saveProfile();
}
for (auto *worker : this->_worker) for (auto *worker : this->_worker)
{ {
std::uint8_t node_id = worker->channel().numa_node_id(); std::uint8_t node_id = worker->channel().numa_node_id();
@@ -55,7 +50,6 @@ void Scheduler::start_and_wait()
for (auto channel_id = 0U; channel_id < this->_core_set.size(); ++channel_id) for (auto channel_id = 0U; channel_id < this->_core_set.size(); ++channel_id)
{ {
worker_threads[channel_id] = std::thread([this, channel_id] { this->_worker[channel_id]->execute(); }); worker_threads[channel_id] = std::thread([this, channel_id] { this->_worker[channel_id]->execute(); });
//system::thread::pin(worker_threads[channel_id], this->_worker[channel_id]->core_id()); //system::thread::pin(worker_threads[channel_id], this->_worker[channel_id]->core_id());
} }
@@ -78,6 +72,11 @@ void Scheduler::start_and_wait()
worker_thread.join(); worker_thread.join();
} }
if constexpr (config::use_tasking_profiler()){
TaskingProfiler::getInstance().saveProfile();
}
if constexpr (config::memory_reclamation() != config::None) if constexpr (config::memory_reclamation() != config::None)
{ {
// At this point, no task will execute on any resource; // At this point, no task will execute on any resource;

View File

@@ -109,15 +109,19 @@ void Worker::execute()
result = this->execute_optimistic(core_id, channel_id, task); result = this->execute_optimistic(core_id, channel_id, task);
TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler); TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler);
} }
result = this->execute_optimistic(core_id, channel_id, task); else{
result = this->execute_optimistic(core_id, channel_id, task);
}
break; break;
case synchronization::primitive::OLFIT: case synchronization::primitive::OLFIT:
if constexpr (config::use_tasking_profiler()){ if constexpr (config::use_tasking_profiler()){
task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name()); task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name());
result = this->execute_olfit(core_id, channel_id, task); result = this->execute_olfit(core_id, channel_id, task);
TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler); TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler);
} }
result = this->execute_olfit(core_id, channel_id, task); else{
result = this->execute_olfit(core_id, channel_id, task);
}
break; break;
case synchronization::primitive::ScheduleAll: case synchronization::primitive::ScheduleAll:
case synchronization::primitive::None: case synchronization::primitive::None:
@@ -125,24 +129,30 @@ void Worker::execute()
task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name()); task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name());
result = task->execute(core_id, channel_id); result = task->execute(core_id, channel_id);
TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler); TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler);
}
else{
result = task->execute(core_id, channel_id);
} }
result = task->execute(core_id, channel_id);
break; break;
case synchronization::primitive::ReaderWriterLatch: case synchronization::primitive::ReaderWriterLatch:
if constexpr (config::use_tasking_profiler()){ if constexpr (config::use_tasking_profiler()){
task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name()); task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name());
result = Worker::execute_reader_writer_latched(core_id, channel_id, task); result = Worker::execute_reader_writer_latched(core_id, channel_id, task);
TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler); TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler);
} }
result = Worker::execute_reader_writer_latched(core_id, channel_id, task); else{
result = Worker::execute_reader_writer_latched(core_id, channel_id, task);
}
break; break;
case synchronization::primitive::ExclusiveLatch: case synchronization::primitive::ExclusiveLatch:
if constexpr (config::use_tasking_profiler()){ if constexpr (config::use_tasking_profiler()){
task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name()); task_id_profiler = TaskingProfiler::getInstance().startTask(core_id, 0, typeid(*task).name());
result = Worker::execute_exclusive_latched(core_id, channel_id, task); result = Worker::execute_exclusive_latched(core_id, channel_id, task);
TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler); TaskingProfiler::getInstance().endTask(channel_id, task_id_profiler);
} }
result = Worker::execute_exclusive_latched(core_id, channel_id, task); else{
result = Worker::execute_exclusive_latched(core_id, channel_id, task);
}
break; break;
} }