From 7af276ac81cb68d197bf13938adbab8a3c195ec1 Mon Sep 17 00:00:00 2001 From: Alexander Boettcher Date: Fri, 19 Feb 2021 17:02:27 +0100 Subject: [PATCH] cpu_balancer: add support to provide PD service Issue #3837 --- repos/os/recipes/pkg/cpu_balancer/runtime | 2 +- .../recipes/pkg/cpu_balancer_config/runtime | 10 +- repos/os/run/cpu_balancer.run | 8 +- repos/os/src/server/cpu_balancer/component.cc | 153 +++++++++++++----- repos/os/src/server/cpu_balancer/schedule.cc | 3 +- 5 files changed, 131 insertions(+), 45 deletions(-) diff --git a/repos/os/recipes/pkg/cpu_balancer/runtime b/repos/os/recipes/pkg/cpu_balancer/runtime index 2c63568dda..10db4d8240 100644 --- a/repos/os/recipes/pkg/cpu_balancer/runtime +++ b/repos/os/recipes/pkg/cpu_balancer/runtime @@ -1,6 +1,6 @@ - + diff --git a/repos/os/recipes/pkg/cpu_balancer_config/runtime b/repos/os/recipes/pkg/cpu_balancer_config/runtime index be8cfaf3a9..6e53432524 100644 --- a/repos/os/recipes/pkg/cpu_balancer_config/runtime +++ b/repos/os/recipes/pkg/cpu_balancer_config/runtime @@ -1,6 +1,6 @@ - + @@ -10,6 +10,9 @@ + + + @@ -50,7 +53,10 @@ - + + + + diff --git a/repos/os/run/cpu_balancer.run b/repos/os/run/cpu_balancer.run index 309cc9b479..f48150e9d4 100644 --- a/repos/os/run/cpu_balancer.run +++ b/repos/os/run/cpu_balancer.run @@ -24,7 +24,7 @@ create_boot_directory import_from_depot [depot_user]/src/report_rom append config { - + @@ -65,7 +65,10 @@ append_if [expr $report_config eq "yes"] config { append config { - + + + + + diff --git a/repos/os/src/server/cpu_balancer/component.cc b/repos/os/src/server/cpu_balancer/component.cc index a81e30e56d..631b1f1654 100644 --- a/repos/os/src/server/cpu_balancer/component.cc +++ b/repos/os/src/server/cpu_balancer/component.cc @@ -19,6 +19,7 @@ #include #include +#include #include "session.h" #include "config.h" @@ -26,6 +27,8 @@ namespace Cpu { struct Balancer; + struct Pd_root; + struct Cpu_pd_session; using Genode::Affinity; using Genode::Attached_rom_dataspace; @@ -38,6 +41,7 @@ namespace Cpu { using Genode::Signal_handler; using Genode::Sliced_heap; using Genode::Typed_root; + using Genode::Pd_session; } template @@ -68,46 +72,12 @@ auto retry(T &env, FUNC func, HANDLER handler, typedef Genode::Registry > Sleeper_list; typedef Genode::Tslab, 4096> Tslab_sleeper; -struct Cpu::Balancer : Rpc_object> -{ - Genode::Env &env; - Attached_rom_dataspace config { env, "config" }; - Timer::Connection timer { env }; - Sliced_heap slice { env.ram(), env.rm() }; - Child_list list { }; - Constructible trace { }; - Constructible reporter { }; - uint64_t timer_us { 1000 * 1000UL }; - Session_label label { }; - unsigned report_size { 4096 * 1 }; - Tslab_sleeper alloc_thread { slice }; - Sleeper_list sleeper { }; - bool verbose { false }; - bool update_report { false }; - bool use_sleeper { true }; - - Signal_handler signal_config { - env.ep(), *this, &Balancer::handle_config }; - - void handle_config(); - void handle_timeout(); - - /* - * Need extra EP to avoid dead-lock/live-lock (depending on kernel) - * due to down-calls by this component, e.g. parent.upgrade(...), and - * up-calls by parent using this CPU service, e.g. to create initial thread - * - * Additionally, a list_mutex is required due to having 2 EPs now. - */ - Entrypoint ep { env, 4 * 4096, "live/dead-lock", Affinity::Location() }; - - Signal_handler signal_timeout { - ep, *this, &Balancer::handle_timeout }; - - Genode::Mutex list_mutex { }; +namespace Cpu { template - Session_capability _withdraw_quota(Root::Session_args const &args, FUNC const &fn) + Session_capability withdraw_quota(Sliced_heap &slice, + Root::Session_args const &args, + FUNC const &fn) { /* * We need to decrease 'ram_quota' by @@ -152,6 +122,110 @@ struct Cpu::Balancer : Rpc_object> return fn(argbuf); } +} + +struct Cpu::Cpu_pd_session +{ + Parent::Client parent_client { }; + Client_id id; + + Genode::Capability pd_cap; + + Cpu_pd_session(Genode::Env &env, + Root::Session_args const &args, + Affinity const &affinity) + : + id(parent_client, env.id_space()), + pd_cap(env.session(id.id(), args, affinity)) + { } + + virtual ~Cpu_pd_session() { } +}; + +struct Cpu::Pd_root : Rpc_object> +{ + Env &env; + Sliced_heap slice { env.ram(), env.rm() }; + Registry > sessions { }; + + Session_capability session(Root::Session_args const &args, + Affinity const &affinity) override + { + return withdraw_quota(slice, args, + [&] (char const * const session_args) { + return (new (slice) Registered(sessions, env, + session_args, + affinity))->pd_cap; + }); + } + + void upgrade(Session_capability const, Root::Upgrade_args const &) override + { + /* + * The PD cap (of the parent) is pass through to the client directly, + * so we should not get any upgrades here. + */ + Genode::warning("Pd upgrade unexpected"); + } + + void close(Session_capability const cap) override + { + if (!cap.valid()) { + Genode::error("unknown cap"); + return; + } + + sessions.for_each([&](auto &session) { + if (session.pd_cap == cap) + destroy(slice, &session); + }); + } + + Pd_root(Genode::Env &env) : env(env) + { + env.parent().announce(env.ep().manage(*this)); + } +}; + +struct Cpu::Balancer : Rpc_object> +{ + Genode::Env &env; + Attached_rom_dataspace config { env, "config" }; + Timer::Connection timer { env }; + Sliced_heap slice { env.ram(), env.rm() }; + Child_list list { }; + Constructible trace { }; + Constructible reporter { }; + uint64_t timer_us { 1000 * 1000UL }; + Session_label label { }; + unsigned report_size { 4096 * 1 }; + Tslab_sleeper alloc_thread { slice }; + Sleeper_list sleeper { }; + bool verbose { false }; + bool update_report { false }; + bool use_sleeper { true }; + + Cpu::Pd_root pd { env }; + + Signal_handler signal_config { + env.ep(), *this, &Balancer::handle_config }; + + void handle_config(); + void handle_timeout(); + + /* + * Need extra EP to avoid dead-lock/live-lock (depending on kernel) + * due to down-calls by this component, e.g. parent.upgrade(...), and + * up-calls by parent using this CPU service, e.g. to create initial thread + * + * Additionally, a list_mutex is required due to having 2 EPs now. + */ + Entrypoint ep { env, 4 * 4096, "live/dead-lock", Affinity::Location() }; + + Signal_handler signal_timeout { + ep, *this, &Balancer::handle_timeout }; + + Genode::Mutex list_mutex { }; /*********************** ** Session interface ** @@ -160,7 +234,8 @@ struct Cpu::Balancer : Rpc_object> Session_capability session(Root::Session_args const &args, Affinity const &affinity) override { - return _withdraw_quota(args, [&] (char const * const session_args) { + return withdraw_quota(slice, args, + [&] (char const * const session_args) { if (verbose) log("new session '", args.string(), "' -> '", session_args, "' ", affinity.space().width(), "x", affinity.space().height(), " ", diff --git a/repos/os/src/server/cpu_balancer/schedule.cc b/repos/os/src/server/cpu_balancer/schedule.cc index e84ce6a6c5..44cbf1c2ee 100644 --- a/repos/os/src/server/cpu_balancer/schedule.cc +++ b/repos/os/src/server/cpu_balancer/schedule.cc @@ -50,7 +50,8 @@ void Cpu::Session::update_threads(Trace &trace, Session_label const &cpu_balance log("[", _label, "] name='", name, "' at ", current_loc.xpos(), "x", current_loc.ypos(), " has ec/sc time ", time.thread_context, "/", - time.scheduling_context); + time.scheduling_context, + " policy=", policy.string()); }); } catch (Genode::Trace::Nonexistent_subject) { /* how could that be ? */