Merge branch 'ealan' into gpgpu

This commit is contained in:
Marcel Lütke Dreimann
2023-09-27 14:46:20 +02:00
90 changed files with 3292 additions and 390 deletions

View File

@@ -3,11 +3,25 @@
{
"name": "EalánOS",
"includePath": [
"${workspaceFolder}/depot/genodelabs/api/libc/**",
"${workspaceFolder}/depot/genodelabs/api/stdcxx/**",
"${workspaceFolder}/repos/base/**",
"${workspaceFolder}/repos/base-nova/**",
"${workspaceFolder}/repos/**"
"${workspaceFolder}/repos/**",
"${workspaceFolder}/repos/mml/**",
"${workspaceFolder}/repos/libports/include/**",
"${workspaceFolder}/contrib/mxtasking-07a3844690ae8eb15832d93e29567a5a8e6e45af/include/**",
"${workspaceFolder}/contrib/libpfm4-b0ec09148c2be9f4a96203a3d2de4ebed6ce2da0/include/**",
"${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/**",
"${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/spec/x86_64/libc",
"${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/sys/**",
"${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/",
"${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/std",
"${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/c_std",
"${workspaceFolder}/repos/libports/include/spec/x86_64/stdcxx",
"${workspaceFolder}/repos/base-nova/src/core/include/**",
"${workspaceFolder}/repos/base-nova/src/include/**",
"${workspaceFolder}/repos/base-nova/include/**",
"${workspaceFolder}/repos/base/src/core/include/**",
"${workspaceFolder}/repos/base/src/include/**",
"${workspaceFolder}/repos/base/include/**",
"/usr/local/genode/tool/21.05/lib/gcc/x86_64-pc-elf/10.3.0/include"
],
"defines": [
"__GENODE__",
@@ -16,30 +30,35 @@
"_GLIBCXX_ATOMIC_BUILTINS_4",
"_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC"
],
"compilerPath": "/usr/local/genode/tool/21.05/bin/genode-x86-g++",
"compilerPath": "/usr/local/genode/tool/21.05/bin/genode-x86-gcc",
"cStandard": "gnu17",
"cppStandard": "gnu++17",
"intelliSenseMode": "${default}",
"intelliSenseMode": "linux-gcc-x64",
"compilerArgs": [
"-nostdinc",
"-m64",
"-mcmodel=large",
"-MMD",
"-MP",
"-MT"
"-m64"
],
"configurationProvider": "ms-vscode.cmake-tools"
"configurationProvider": "ms-vscode.makefile-tools",
"forcedInclude": [
"${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/stdint.h"
],
"mergeConfigurations": false,
"browse": {
"limitSymbolsToIncludedHeaders": true
}
},
{
"name": "Genode",
"includePath": [
"${workspaceFolder}/**"
"${workspaceFolder}/**",
"${workspaceFolder}/repos/base/**"
],
"defines": [],
"compilerPath": "/usr/bin/clang",
"cStandard": "c17",
"cppStandard": "c++14",
"intelliSenseMode": "linux-clang-x64"
"intelliSenseMode": "linux-clang-x64",
"configurationProvider": "ms-vscode.makefile-tools"
}
],
"version": 4

124
.vscode/settings.json vendored
View File

@@ -5,8 +5,128 @@
"initializer_list": "cpp",
"streambuf": "cpp",
"tuple": "cpp",
"memory": "cpp"
"memory": "cpp",
"*.def": "cpp",
"array": "cpp",
"deque": "cpp",
"forward_list": "cpp",
"list": "cpp",
"string": "cpp",
"vector": "cpp",
"any": "cpp",
"executor": "cpp",
"internet": "cpp",
"io_context": "cpp",
"memory_resource": "cpp",
"socket": "cpp",
"string_view": "cpp",
"timer": "cpp",
"functional": "cpp",
"rope": "cpp",
"slist": "cpp",
"coroutine": "cpp",
"future": "cpp",
"scoped_allocator": "cpp",
"valarray": "cpp",
"regex": "cpp",
"cstdint": "cpp",
"bitset": "cpp",
"random": "cpp",
"optional": "cpp",
"dynamic_bitset": "cpp",
"mutex": "cpp",
"shared_mutex": "cpp",
"algorithm": "cpp",
"atomic": "cpp",
"bit": "cpp",
"cassert": "cpp",
"cctype": "cpp",
"cerrno": "cpp",
"chrono": "cpp",
"ciso646": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"compare": "cpp",
"concepts": "cpp",
"cstddef": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"cwctype": "cpp",
"map": "cpp",
"unordered_map": "cpp",
"exception": "cpp",
"fstream": "cpp",
"ios": "cpp",
"iosfwd": "cpp",
"iostream": "cpp",
"istream": "cpp",
"iterator": "cpp",
"limits": "cpp",
"new": "cpp",
"numeric": "cpp",
"ostream": "cpp",
"queue": "cpp",
"ranges": "cpp",
"ratio": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"system_error": "cpp",
"thread": "cpp",
"type_traits": "cpp",
"typeinfo": "cpp",
"utility": "cpp",
"variant": "cpp",
"charconv": "cpp",
"cfenv": "cpp",
"cinttypes": "cpp",
"csetjmp": "cpp",
"csignal": "cpp",
"cstdarg": "cpp",
"cuchar": "cpp",
"set": "cpp",
"unordered_set": "cpp",
"codecvt": "cpp",
"condition_variable": "cpp",
"iomanip": "cpp",
"*.run": "xml",
"span": "cpp"
},
"vscode-as-git-mergetool.settingsAssistantOnStartup": false,
"makefile.makeDirectory": "build/x86_64"
"makefile.makeDirectory": "build/x86_64",
"C_Cpp.errorSquiggles": "enabledIfIncludesResolve",
"C_Cpp.default.cppStandard": "gnu++17",
"C_Cpp.default.cStandard": "gnu17",
"C_Cpp.workspaceSymbols": "Just My Code",
"C_Cpp.inlayHints.parameterNames.enabled": true,
"C_Cpp.inlayHints.autoDeclarationTypes.showOnLeft": true,
"C_Cpp.intelliSenseMemoryLimit": 16384,
"makefile.makefilePath": "",
"makefile.dryrunSwitches": [
"--keep-going",
"--print-directory",
"KERNEL=nova",
"BOARD=pc",
"run/vscode",
"VERBOSE="
],
"C_Cpp.default.intelliSenseMode": "linux-gcc-x64",
"C_Cpp.default.mergeConfigurations": true,
"C_Cpp.autocompleteAddParentheses": true,
"C_Cpp.intelliSenseCacheSize": 20480,
"makefile.buildBeforeLaunch": false,
"makefile.extensionOutputFolder": ".vscode",
"makefile.configurationCachePath": ".vscode/configurationCache.log",
"explorer.excludeGitIgnore": true,
"makefile.buildLog": ".vscode/build.log",
"definition-autocompletion.update_index_on_change": true,
"definition-autocompletion.update_index_interval": 5,
"C_Cpp.intelliSenseEngineFallback": "disabled",
"makefile.extensionLog": ".vscode/extension.log",
"makefile.ignoreDirectoryCommands": false,
"html.format.wrapLineLength": 80,
"editor.wordWrap": "bounded",
"editor.wordWrapColumn": 90
}

View File

@@ -1 +1 @@
52fcb4b19aa032eaba5484a69c3c4c491c2a6915
cda426e631c475c21b8ff443bfa01f90f38d95a3

View File

@@ -4,7 +4,7 @@ DOWNLOADS := nova.git
# feature/numa branch
URL(nova) := https://github.com/mmueller41/NOVA.git
REV(nova) := 4707840843206d63f72ba9238756355d16b52be3
REV(nova) := 8d024288da12cb3a0366641e62c53876b7d827dd
DIR(nova) := src/kernel/nova
PATCHES := $(sort $(wildcard $(REP_DIR)/patches/*.patch))

View File

@@ -46,7 +46,7 @@ void Genode::Trace::Performance_counter::start(unsigned counter)
{
Nova::uint8_t rc;
Nova::mword_t type = (counter >> 4);
Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter >>4;
Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf;
if ((rc = Nova::hpc_start(sel, type)) != Nova::NOVA_OK)
throw Genode::Trace::Pfc_access_error(rc);

View File

@@ -93,6 +93,7 @@ class Genode::Affinity
return Affinity::Space(node.attribute_value("width", 0U),
node.attribute_value("height", 0U));
}
};
@@ -236,6 +237,30 @@ class Genode::Affinity
}
};
namespace Genode {
static inline void print(Output &out, const Affinity::Space &space)
{
Genode::print(out, "(");
Genode::print(out, space.width());
Genode::print(out, ",");
Genode::print(out, space.height());
Genode::print(out, ")");
}
static inline void print(Output &out, const Affinity::Location &loc)
{
Genode::print(out, "(");
Genode::print(out, loc.xpos());
Genode::print(out, ",");
Genode::print(out, loc.ypos());
Genode::print(out, ",");
Genode::print(out, loc.width());
Genode::print(out, "×");
Genode::print(out, loc.height());
Genode::print(out, ")");
}
}
Genode::Affinity::Location Genode::Affinity::Space::location_of_index(int index) const
{

View File

@@ -300,6 +300,8 @@ class Genode::Child : protected Rpc_object<Parent>,
/* arguments fetched by the child in response to a yield signal */
Mutex _yield_request_mutex { };
Resource_args _yield_request_args { };
Mutex _resource_gain_mutex { };
Resource_args _gained_resources { };
/* number of unanswered heartbeat signals */
unsigned _outstanding_heartbeats = 0;
@@ -782,6 +784,14 @@ class Genode::Child : protected Rpc_object<Parent>,
*/
void yield(Resource_args const &args);
/**
* Bestow resources on the child
*
* By calling this method, the child will be notified about
* the having gained the specified amount of resources.
*/
void accept(Resource_args const &args);
/**
* Notify the child about newly available resources
*/
@@ -818,6 +828,7 @@ class Genode::Child : protected Rpc_object<Parent>,
void resource_request(Resource_args const &) override;
void yield_sigh(Signal_context_capability) override;
Resource_args yield_request() override;
Resource_args gained_resources() override;
void yield_response() override;
void heartbeat_sigh(Signal_context_capability) override;
void heartbeat_response() override;

View File

@@ -42,6 +42,10 @@ struct Genode::Cpu_session_client : Rpc_client<Cpu_session>
Affinity::Space affinity_space() const override {
return call<Rpc_affinity_space>(); }
void move(const Affinity::Location loc) override {
call<Rpc_move>(loc);
}
Dataspace_capability trace_control() override {
return call<Rpc_trace_control>(); }

View File

@@ -22,7 +22,7 @@ namespace Genode { struct Cpu_connection; }
struct Genode::Cpu_connection : Connection<Cpu_session>, Cpu_session_client
{
enum { RAM_QUOTA = 36*1024 };
enum { RAM_QUOTA = 72*1024 };
/**
* Constructor

View File

@@ -138,6 +138,12 @@ struct Genode::Cpu_session : Session
*/
virtual Affinity::Space affinity_space() const = 0;
/**
* @brief Update affinity location of this CPU session
*
*/
virtual void move(const Genode::Affinity::Location ) = 0;
/**
* Translate generic priority value to kernel-specific priority levels
*
@@ -249,6 +255,7 @@ struct Genode::Cpu_session : Session
GENODE_RPC(Rpc_migrate_thread, void, migrate_thread, Thread_capability, Affinity::Location);
GENODE_RPC(Rpc_exception_sigh, void, exception_sigh, Signal_context_capability);
GENODE_RPC(Rpc_affinity_space, Affinity::Space, affinity_space);
GENODE_RPC(Rpc_move, void, move, Affinity::Location);
GENODE_RPC(Rpc_trace_control, Dataspace_capability, trace_control);
GENODE_RPC(Rpc_ref_account, int, ref_account, Cpu_session_capability);
GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Cpu_session_capability, size_t);
@@ -257,7 +264,7 @@ struct Genode::Cpu_session : Session
GENODE_RPC_INTERFACE(Rpc_create_thread, Rpc_kill_thread, Rpc_exception_sigh,
Rpc_affinity_space, Rpc_trace_control, Rpc_ref_account,
Rpc_transfer_quota, Rpc_quota, Rpc_native_cpu, Rpc_migrate_thread);
Rpc_transfer_quota, Rpc_quota, Rpc_native_cpu, Rpc_migrate_thread, Rpc_move);
};

View File

@@ -67,6 +67,8 @@ struct Genode::Parent_client : Rpc_client<Parent>
call<Rpc_yield_sigh>(sigh); }
Resource_args yield_request() override { return call<Rpc_yield_request>(); }
Resource_args gained_resources() override { return call<Rpc_gained_resources>(); }
void yield_response() override { call<Rpc_yield_response>(); }

View File

@@ -281,6 +281,12 @@ class Genode::Parent
*/
virtual void yield_response() = 0;
/**
* Obtain information about the resources gained, e.g. from a resource request
*
*/
virtual Resource_args gained_resources() = 0;
/*
* Health monitoring
*/
@@ -334,6 +340,7 @@ class Genode::Parent
Resource_args const &);
GENODE_RPC(Rpc_yield_sigh, void, yield_sigh, Signal_context_capability);
GENODE_RPC(Rpc_yield_request, Resource_args, yield_request);
GENODE_RPC(Rpc_gained_resources, Resource_args, gained_resources);
GENODE_RPC(Rpc_yield_response, void, yield_response);
GENODE_RPC(Rpc_heartbeat_sigh, void, heartbeat_sigh, Signal_context_capability);
GENODE_RPC(Rpc_heartbeat_response, void, heartbeat_response);
@@ -343,7 +350,7 @@ class Genode::Parent
Rpc_close, Rpc_session_response, Rpc_main_thread,
Rpc_deliver_session_cap, Rpc_resource_avail_sigh,
Rpc_resource_request, Rpc_yield_sigh,
Rpc_yield_request, Rpc_yield_response,
Rpc_yield_request, Rpc_yield_response, Rpc_gained_resources,
Rpc_heartbeat_sigh, Rpc_heartbeat_response);
};

View File

@@ -41,4 +41,9 @@ struct Genode::Topo_session_client : Rpc_client<Topo_session>
unsigned node_count() override {
return call<Rpc_node_count>();
}
void reconstruct(const Affinity affinity) override
{
call<Rpc_reconstruct>(affinity);
}
};

View File

@@ -49,10 +49,12 @@ struct Genode::Topo_session : Session
virtual Topology::Numa_region node_affinity_of(Affinity::Location const &) = 0;
virtual Topology::Numa_region node_at_id(unsigned node_id) = 0;
virtual unsigned node_count() = 0;
virtual void reconstruct(const Affinity) = 0;
GENODE_RPC(Rpc_node_affinity, Topology::Numa_region, node_affinity_of, Affinity::Location const &);
GENODE_RPC(Rpc_node_id, Topology::Numa_region, node_at_id, unsigned);
GENODE_RPC(Rpc_node_count, unsigned, node_count);
GENODE_RPC(Rpc_reconstruct, void, reconstruct, Affinity);
GENODE_RPC_INTERFACE(Rpc_node_affinity, Rpc_node_id, Rpc_node_count);
GENODE_RPC_INTERFACE(Rpc_node_affinity, Rpc_node_id, Rpc_node_count, Rpc_reconstruct);
};

View File

@@ -212,6 +212,8 @@ _ZN6Genode18Signal_transmitterC1ENS_10CapabilityINS_14Signal_contextEEE T
_ZN6Genode18Signal_transmitterC2ENS_10CapabilityINS_14Signal_contextEEE T
_ZN6Genode20env_session_id_spaceEv T
_ZN6Genode21cache_invalidate_dataEmm T
_ZN6Genode22Topo_session_component11reconstructENS_8AffinityE W
_ZThn136_N6Genode22Topo_session_component11reconstructENS_8AffinityE W
_ZN6Genode25env_stack_area_region_mapE B 8
_ZN6Genode27cache_clean_invalidate_dataEmm T
_ZN6Genode28env_stack_area_ram_allocatorE B 8
@@ -261,6 +263,7 @@ _ZN6Genode5Child23initiate_env_pd_sessionEv T
_ZN6Genode5Child4exitEi T
_ZN6Genode5Child5closeENS_8Id_spaceINS_6Parent6ClientEE2IdE T
_ZN6Genode5Child5yieldERKNS_6StringILm160EEE T
_ZN6Genode5Child6acceptERKNS_6StringILm160EEE T
_ZN6Genode5Child7sessionENS_8Id_spaceINS_6Parent6ClientEE2IdERKNS_13Rpc_in_bufferILm64EEERKNS6_ILm160EEERKNS_8AffinityE T
_ZN6Genode5Child7upgradeENS_8Id_spaceINS_6Parent6ClientEE2IdERKNS_13Rpc_in_bufferILm160EEE T
_ZN6Genode5Child8announceERKNS_13Rpc_in_bufferILm64EEE T
@@ -424,7 +427,7 @@ _ZTVN6Genode18Allocator_avl_baseE D 128
_ZTVN6Genode4HeapE D 72
_ZTVN6Genode4SlabE D 72
_ZTVN6Genode5Child14Initial_threadE D 48
_ZTVN6Genode5ChildE D 440
_ZTVN6Genode5ChildE D 456
_ZTVN6Genode6OutputE D 48
_ZTVN6Genode6ThreadE D 48
_ZTVN6Genode7ConsoleE D 48

View File

@@ -255,7 +255,7 @@ ALL_INC_DIR += $(LIBGCC_INC_DIR)
ALL_INC_DIR += $(HOST_INC_DIR)
VERBOSE ?= @
VERBOSE_DIR ?= --no-print-directory
VERBOSE_DIR ?= --print-directory
MSG_LINK ?= @$(ECHO) " LINK "
MSG_COMP ?= @$(ECHO) " COMPILE "

View File

@@ -1 +1 @@
2022-10-11 1574044ae0ee33a9ad3bdadb3c487c47d4f45bff
2023-07-07-a b51a6785a1bcdaf85c2627190b7320fb52517138

View File

@@ -171,6 +171,10 @@ Affinity::Space Cpu_session_component::affinity_space() const
return Affinity::Space(_location.width(), _location.height());
}
void Cpu_session_component::move(const Affinity::Location destination)
{
_location = destination;
}
Dataspace_capability Cpu_session_component::trace_control()
{

View File

@@ -172,6 +172,7 @@ class Genode::Cpu_session_component : public Session_object<Cpu_session>,
void migrate_thread(Thread_capability, Affinity::Location) override;
void exception_sigh(Signal_context_capability) override;
Affinity::Space affinity_space() const override;
void move(const Affinity::Location) override;
Dataspace_capability trace_control() override;
int ref_account(Cpu_session_capability c) override;
int transfer_quota(Cpu_session_capability, size_t) override;

View File

@@ -19,6 +19,8 @@
#include <topo_session_component.h>
#include <base/log.h>
namespace Genode {
class Topo_root : public Root_component<Topo_session_component>
@@ -35,8 +37,10 @@ namespace Genode {
if (ram_quota < Trace::Control_area::SIZE)
throw Insufficient_ram_quota();
if (!affinity.valid())
if (!affinity.valid()) {
log("Location ", affinity.location(), " not within space ", affinity.space());
throw Service_denied();
}
return new (md_alloc())
Topo_session_component(*this->ep(),

View File

@@ -30,7 +30,7 @@ namespace Genode {
class Genode::Topo_session_component : public Session_object<Topo_session>
{
private:
Genode::Affinity &_affinity;
Genode::Affinity _affinity;
Sliced_heap _md_alloc;
Topology::Numa_region _node_affinities[Genode::Platform::MAX_SUPPORTED_CPUS][Genode::Platform::MAX_SUPPORTED_CPUS];
@@ -44,9 +44,10 @@ class Genode::Topo_session_component : public Session_object<Topo_session>
Diag const &diag,
Ram_allocator &ram_alloc,
Region_map &local_rm,
Affinity &affinity
Affinity affinity
);
void construct();
/**
* @brief Topology session interface
@@ -65,5 +66,11 @@ class Genode::Topo_session_component : public Session_object<Topo_session>
unsigned node_count() override
{
return _node_count;
}
}
void reconstruct(Affinity affinity) override
{
_affinity = affinity;
construct();
}
};

View File

@@ -25,19 +25,24 @@ Topo_session_component::Topo_session_component(Rpc_entrypoint &session_ep,
Diag const &diag,
Ram_allocator &ram_alloc,
Region_map &local_rm,
Affinity &affinity)
Affinity affinity)
: Session_object(session_ep, resources, label, diag),
_affinity(affinity),
_md_alloc(ram_alloc, local_rm),
_node_count(0)
{
Affinity::Location location = affinity.location();
construct();
}
void Topo_session_component::construct()
{
Affinity::Location location = _affinity.location();
const unsigned height = location.height();
unsigned width = location.width();
unsigned curr_node_id = 0;
Topology::Numa_region *node_created = new (_md_alloc) Topology::Numa_region[64]();
Genode::log("[", label, "] Creating new topology model of size ", width, "x", height);
Genode::log("[", label(), "] Creating new topology model of size ", width, "x", height);
for (unsigned x = 0; x < width; x++)
{
@@ -53,14 +58,14 @@ Topo_session_component::Topo_session_component(Rpc_entrypoint &session_ep,
unsigned cpu_id = platform_specific().kernel_cpu_id(loc);
unsigned native_id = platform_specific().domain_of_cpu(cpu_id);
log("[", label, "] CPU (", x, "x", y, ") is native CPU ", cpu_id, " on node ", native_id);
log("[", label(), "] CPU (", x, "x", y, ") is native CPU ", cpu_id, " on node ", native_id);
if (node_created[native_id].core_count() == 0)
{
_nodes[curr_node_id] = _node_affinities[x][y] = Topology::Numa_region(curr_node_id, native_id);
_node_affinities[x][y].increment_core_count();
node_created[native_id] = _node_affinities[x][y];
log("[", label, "] Found new native NUMA region ", native_id, " for CPU (", x, "x", y, ")");
log("[", label(), "] Found new native NUMA region ", native_id, " for CPU (", x, "x", y, ")");
_node_count++;
curr_node_id++;
}
@@ -71,4 +76,5 @@ Topo_session_component::Topo_session_component(Rpc_entrypoint &session_ep,
}
}
}
}
}

View File

@@ -45,6 +45,15 @@ void Child::yield(Resource_args const &args)
Signal_transmitter(_yield_sigh).submit();
}
void Child::accept(Resource_args const &args)
{
Mutex::Guard guard{_resource_gain_mutex};
_gained_resources = args;
if (_resource_avail_sigh.valid())
Signal_transmitter(_resource_avail_sigh).submit();
}
void Child::notify_resource_avail() const
{
@@ -691,6 +700,12 @@ Parent::Resource_args Child::yield_request()
return _yield_request_args;
}
Parent::Resource_args Child::gained_resources()
{
Mutex::Guard guard(_resource_gain_mutex);
return _gained_resources;
}
void Child::yield_response() { _policy.yield_response(); }

View File

@@ -0,0 +1,94 @@
set build_components {
core init hoitaja timer app/grant_bench
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory
set config {
<config>
<affinity-space width="64" height="1"/>
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="timer">
<resource name="RAM" quantum="16M"/>
<provides><service name="Timer"/></provides>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="hoitaja" caps="62000">
<resource name="RAM" quantum="250G"/>
<affinity xpos="0" ypos="0" width="64" height="1"/>
<config>
<affinity-space width="64" height="1"/>
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
<service name="Timer"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="parent">
<affinity xpos="1" ypos="0" width="1" height="1"/>
<binary name="benchmark_resource_award"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
}
install_config $config
set boot_modules {
core init hoitaja timer vfs.lib.so ld.lib.so benchmark_resource_award
}
append_platform_drv_boot_modules
build_boot_image $boot_modules
append qemu_args "-nographic "
run_genode_until forever

View File

@@ -1,5 +1,5 @@
set build_components {
core init timer app/blinktree
core init timer app/blinktree app/top
}
source ${genode_dir}/repos/base/run/platform_drv.inc
@@ -22,8 +22,9 @@ set config {
<service name="IO_PORT"/>
<service name="RM"/>
<service name="TOPO"/>
<service name="TRACE"/>
</parent-provides>
<affinity-space width="64" height="1"/>
<affinity-space width="32" height="1"/>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
@@ -40,9 +41,10 @@ set config {
}
append config {
<start name="blinktree" caps="1000">
<affinity xpos="1" ypos="0" width="63" height="1"/>
<resource name="RAM" quantum="240G"/>
<start name="blinktree1" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="16" height="1"/>
<resource name="RAM" quantum="75G"/>
<!--<resource name="CPU" quantum="90"/>-->
<route>
<service name="Timer"> <child name="timer"/> </service>
@@ -62,13 +64,158 @@ append config {
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<!--
<start name="top">
<resource name="RAM" quantum="2M"/>
<affinity xpos="0" ypos="0" width="1" height="1"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> </any-service>
</route>
<config period_ms="1000" sort_time="ec"/>
</start>-->
<!--
<start name="blinktree2" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="1" height="1"/>
<resource name="RAM" quantum="60G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<start name="blinktree3" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="1" height="1"/>
<resource name="RAM" quantum="60G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<start name="blinktree4" caps="1000">
<binary name="blinktree"/>
<affinity xpos="32" ypos="0" width="1" height="1"/>
<resource name="RAM" quantum="60G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<start name="blinktree5" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="63" height="1"/>
<resource name="RAM" quantum="60G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<start name="blinktree6" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="63" height="1"/>
<resource name="RAM" quantum="60G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>
<start name="blinktree7" caps="1000">
<binary name="blinktree"/>
<affinity xpos="1" ypos="0" width="63" height="1"/>
<resource name="RAM" quantum="30G"/>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service> <parent/> <any-child/> </any-service>
</route>
<config>
<vfs>
<dir name="dev">
<log/>
<inline name="rtc">2022-07-20 14:30</inline>
</dir>
<dir name="workloads">
<rom name="fill_randint_workloada"/>
<rom name="mixed_randint_workloada"/>
</dir>
</vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
</start>-->
</config>
}
install_config $config
set boot_modules {
core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so posix.lib.so blinktree fill_randint_workloada mixed_randint_workloada
core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so posix.lib.so blinktree top fill_randint_workloada mixed_randint_workloada
}
append_platform_drv_boot_modules

View File

@@ -0,0 +1,114 @@
set build_components {
core init hoitaja timer app/persistent_cell app/volatile_cell
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory
set config {
<config>
<!--<affinity-space width="64" height="1"/>-->
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="timer">
<resource name="RAM" quantum="16M"/>
<provides><service name="Timer"/></provides>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="hoitaja" caps="62000">
<resource name="RAM" quantum="250G"/>
<!--<affinity xpos="0" ypos="0" width="64" height="1"/>-->
<config prio_levels="32">
<affinity-space width="32" height="1"/>
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
<service name="Timer"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="persistent_cell">
<affinity xpos="1" ypos="0" width="1" height="1"/>
<binary name="persistent_cell"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="persitent_2" priority="-2">
<affinity xpos="2" ypos="0" width="1" height="1"/>
<binary name="persistent_cell"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="volatile" priority="-2">
<affinity xpos="2" ypos="0" width="1" height="1"/>
<binary name="volatile_cell"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
}
install_config $config
set boot_modules {
core init hoitaja timer vfs.lib.so ld.lib.so persistent_cell volatile_cell
}
append_platform_drv_boot_modules
build_boot_image $boot_modules
append qemu_args "-nographic "
run_genode_until forever

View File

@@ -0,0 +1,68 @@
set build_components {
core init timer app/pfm_test
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory
set config {
<config>
<parent-provides>
<service name="LOG"/>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="timer">
<resource name="RAM" quantum="1M"/>
<provides><service name="Timer"/></provides>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
}
append config {
<start name="pfm_test">
<resource name="RAM" quantum="10M"/>
<config>
<vfs> <dir name="dev"> <log/> <inline name="rtc">2022-07-20 14:30</inline> </dir> </vfs>
<libc stdout="/dev/log" stderr="/dev/log" rtc="/dev/rtc"/>
</config>
<route>
<service name="Timer"><child name="timer"/></service>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
}
install_config $config
set boot_modules {
core init timer vfs.lib.so ld.lib.so posix.lib.so libc.lib.so libm.lib.so stdcxx.lib.so pfm_test
}
append_platform_drv_boot_modules
build_boot_image $boot_modules
append qemu_args "-nographic "
run_genode_until forever

View File

@@ -0,0 +1,114 @@
set build_components {
core init hoitaja timer app/yield_bench
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory
set config {
<config>
<!--<affinity-space width="64" height="1"/>-->
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="timer">
<resource name="RAM" quantum="16M"/>
<provides><service name="Timer"/></provides>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="hoitaja" caps="62000">
<resource name="RAM" quantum="250G"/>
<!--<affinity xpos="0" ypos="0" width="64" height="1"/>-->
<config prio_levels="32">
<affinity-space width="64" height="1"/>
<parent-provides>
<service name="LOG"/>
<service name="PD"/>
<service name="CPU"/>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="RM"/>
<service name="SIGNAL"/>
<service name="TOPO"/>
<service name="Timer"/>
</parent-provides>
<default-route>
<any-service><parent/><any-child/></any-service>
</default-route>
<default caps="200"/>
<start name="parent">
<affinity xpos="1" ypos="0" width="1" height="1"/>
<binary name="benchmark_resource_yield"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="child" priority="-2">
<affinity xpos="2" ypos="0" width="1" height="1"/>
<binary name="benchmark_resource_yield"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
<start name="child2" priority="-2">
<affinity xpos="2" ypos="0" width="1" height="1"/>
<binary name="benchmark_resource_yield"/>
<resource name="RAM" quantum="64M"/>
<config child="false">
</config>
<route>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
<route>
<service name="Timer"> <child name="timer"/> </service>
<any-service><parent/><any-child/></any-service>
</route>
</start>
</config>
}
install_config $config
set boot_modules {
core init hoitaja timer vfs.lib.so ld.lib.so benchmark_resource_yield
}
append_platform_drv_boot_modules
build_boot_image $boot_modules
append qemu_args "-nographic "
run_genode_until forever

12
repos/mml/run/vscode.run Normal file
View File

@@ -0,0 +1,12 @@
set build_components {
core init timer hoitaja app/blinktree app/hello_mxtask app/hpc_test app/yield_bench app/persistent_cell app/volatile_cell test/resource_yield app/grant_bench app/top app/cpu_burner
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory

View File

@@ -14,7 +14,8 @@ using namespace benchmark;
/**
*/
[[maybe_unused]] PerfCounter Perf::L1_MISSES = {"l1-miss", Genode::Trace::Performance_counter::Type::CORE, 0x43, 0x5b};
[[maybe_unused]] PerfCounter Perf::L1_DTLB_MISSES = {"l1-dtlb-miss", Genode::Trace::Performance_counter::Type::CORE, 0x45, 0xff};
[[maybe_unused]] PerfCounter Perf::L1_ITLB_MISSES = {"l1-itlb-miss", Genode::Trace::Performance_counter::Type::CORE, 0x85, 0x0};
/**
* Counter "LLC Misses"

View File

@@ -59,7 +59,7 @@ public:
}
catch (Genode::Trace::Pfc_access_error &e)
{
std::cerr << "Failed to start counter: " << e.error_code() << std::endl;
std::cerr << "Failed to start counter " << _counter << " " << _name << ": " << static_cast<uint16_t>(e.error_code()) << std::endl;
}
return _prev.value >= 0;
}
@@ -115,7 +115,8 @@ class Perf
public:
[[maybe_unused]] static PerfCounter INSTRUCTIONS;
[[maybe_unused]] static PerfCounter CYCLES;
[[maybe_unused]] static PerfCounter L1_MISSES;
[[maybe_unused]] static PerfCounter L1_DTLB_MISSES;
[[maybe_unused]] static PerfCounter L1_ITLB_MISSES;
[[maybe_unused]] [[maybe_unused]] static PerfCounter LLC_MISSES;
[[maybe_unused]] static PerfCounter LLC_REFERENCES;
//[[maybe_unused]] static PerfCounter STALLED_CYCLES_BACKEND;

View File

@@ -26,9 +26,15 @@ Benchmark::Benchmark(Libc::Env &env, benchmark::Cores &&cores, const std::uint16
{
this->_chronometer.add(benchmark::Perf::CYCLES);
this->_chronometer.add(benchmark::Perf::INSTRUCTIONS);
this->_chronometer.add(benchmark::Perf::L1_ITLB_MISSES);
this->_chronometer.add(benchmark::Perf::L1_DTLB_MISSES);
//this->_chronometer.add(benchmark::Perf::LLC_MISSES);
//this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY);
this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA);
this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE);
//this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA);
//this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE);
}
std::cout << "core configuration: \n" << this->_cores.dump(2) << std::endl;

View File

@@ -205,7 +205,7 @@ void Libc::Component::construct(Libc::Env &env) {
char cores_arg[10];
sprintf(cores_arg, "%d", cores);
char *args[] = {"blinktree_benchmark", "-i", "4", "-pd", "3", "-p", cores_arg};
char *args[] = {"blinktree_benchmark", "-i", "6", "-pd", "3", "-p", cores_arg};
Libc::with_libc([&]()
{

View File

@@ -0,0 +1,351 @@
/*
* \brief Test for yielding resources
* \author Norman Feske
* \date 2013-10-05
*
* This test exercises the protocol between a parent and child, which is used
* by the parent to regain resources from a child subsystem.
*
* The program acts in either one of two roles, the parent or the child. The
* role is determined by reading a config argument.
*
* The child periodically allocates chunks of RAM until its RAM quota is
* depleted. Once it observes a yield request from the parent, however, it
* cooperatively releases as much resources as requested by the parent.
*
* The parent wait a while to give the child the chance to allocate RAM. It
* then sends a yield request and waits for a response. When getting the
* response, it validates whether the child complied to the request or not.
*/
/*
* Copyright (C) 2013-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* Genode includes */
#include <util/arg_string.h>
#include <base/component.h>
#include <base/attached_rom_dataspace.h>
#include <base/log.h>
#include <timer_session/connection.h>
#include <os/static_parent_services.h>
#include <os/dynamic_rom_session.h>
#include <base/child.h>
#include <trace/timestamp.h>
namespace Test {
class Child;
class Parent;
using namespace Genode;
}
/****************
** Child role **
****************/
/**
* The child eats more and more RAM. However, when receiving a yield request,
* it releases the requested amount of resources.
*/
class Test::Child
{
private:
struct Ram_chunk : List<Ram_chunk>::Element
{
Env &env;
size_t const size;
Ram_dataspace_capability ds_cap;
Ram_chunk(Env &env, size_t size)
:
env(env),size(size), ds_cap(env.ram().alloc(size))
{ }
~Ram_chunk() { env.ram().free(ds_cap); }
};
Env &_env;
Heap _heap { _env.ram(), _env.rm() };
bool const _expand;
List<Ram_chunk> _ram_chunks { };
Timer::Connection _timer { _env };
Signal_handler<Child> _grant_handler;
Genode::uint64_t const _period_ms;
void _handle_grant();
public:
Child(Env &, Xml_node);
void main();
};
void Test::Child::_handle_grant()
{
/* request yield request arguments */
unsigned long start = Genode::Trace::timestamp();
[[maybe_unused]] Genode::Parent::Resource_args const args = _env.parent().gained_resources();
unsigned long end = Genode::Trace::timestamp();
// Genode::Parent::Resource_args const args = _env.parent().yield_request();
_env.parent().yield_response();
log("{\"grant-handle-et\": ", (end - start)/2000, "}");
// size_t const gained_ram_quota = Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
// log("Gained RAM quota: ", gained_ram_quota);
/*
log("yield request: ", args.string());
size_t const requested_ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
log("got request to free ", requested_ram_quota, " MB of RAM");
size_t const requested_cpu_quota =
Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0);
log("released ", requested_cpu_quota, " portions of cpu_quota");
size_t const requested_gpu_quota =
Arg_string::find_arg(args.string(), "gpus").ulong_value(0);
log("got request to release ", requested_gpu_quota, " gpus");*/
}
Test::Child::Child(Env &env, Xml_node config)
:
_env(env),
_expand(config.attribute_value("expand", false)),
_grant_handler(_env.ep(), *this, &Child::_handle_grant),
_period_ms(config.attribute_value("period_ms", (Genode::uint64_t)500))
{
/* register yield signal handler */
_env.parent().resource_avail_sigh(_grant_handler);
}
/*****************
** Parent role **
*****************/
/**
* The parent grants resource requests as long as it has free resources.
* Once in a while, it politely requests the child to yield resources.
*/
class Test::Parent
{
private:
Env &_env;
Timer::Connection _timer { _env };
void _print_status()
{
log("quota: ", _child.pd().ram_quota().value / 1024, " KiB "
"used: ", _child.pd().used_ram().value / 1024, " KiB");
}
size_t _used_ram_prior_yield = 0;
/* perform the test three times */
unsigned _cnt = 5000;
unsigned long _start = 0;
unsigned long _end = 0;
unsigned long _sent = 0;
enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE };
State _state = WAIT;
void _schedule_one_second_timeout()
{
//log("wait ", _wait_cnt, "/", _wait_secs);
_timer.trigger_once(10000);
}
void _init()
{
_state = WAIT;
_schedule_one_second_timeout();
}
void _bestow_resources()
{
/* remember quantum of resources used by the child */
//_used_ram_prior_yield = _child.pd().used_ram().value;
//log("request yield (ram prior yield: ", _used_ram_prior_yield);
/* issue yield request */
Genode::Parent::Resource_args award("ram_quota=5M,cpu_quota=10,gpus=1");
_start = Genode::Trace::timestamp();
_child.accept(award);
_sent = Genode::Trace::timestamp();
_state = YIELD_REQUESTED;
}
void _handle_timeout()
{
//_print_status();
_bestow_resources();
_schedule_one_second_timeout();
}
void _yield_response()
{
_end = Genode::Trace::timestamp();
log("{\"bestow-rtt\": ", (_end-_start)/2000, ", \"bestow-transmit\": ", (_sent-_start)/2000, ",\"bestow-acked\":", (_end-_sent)/2000,"}");
_state = YIELD_GOT_RESPONSE;
//_print_status();
if (_cnt-- > 0) {
_init();
} else {
log("--- test-resource_yield finished ---");
_env.parent().exit(0);
}
}
Signal_handler<Parent> _timeout_handler {
_env.ep(), *this, &Parent::_handle_timeout };
struct Policy : public Genode::Child_policy
{
Env &_env;
Parent &_parent;
Static_parent_services<Pd_session, Cpu_session, Rom_session,
Log_session, Timer::Session, Topo_session>
_parent_services { _env };
Cap_quota const _cap_quota { 50 };
Ram_quota const _ram_quota { 10*1024*1024 };
Binary_name const _binary_name { "benchmark_resource_award" };
/*
* Config ROM service
*/
struct Config_producer : Dynamic_rom_session::Content_producer
{
void produce_content(char *dst, Genode::size_t dst_len) override
{
Xml_generator xml(dst, dst_len, "config", [&] () {
xml.attribute("child", "yes"); });
}
} _config_producer { };
Dynamic_rom_session _config_session { _env.ep().rpc_ep(),
ref_pd(), _env.rm(),
_config_producer };
typedef Genode::Local_service<Dynamic_rom_session> Config_service;
Config_service::Single_session_factory _config_factory { _config_session };
Config_service _config_service { _config_factory };
void yield_response() override
{
_parent._yield_response();
}
Policy(Parent &parent, Env &env) : _env(env), _parent(parent) { }
Name name() const override { return "child"; }
Binary_name binary_name() const override { return _binary_name; }
Pd_session &ref_pd() override { return _env.pd(); }
Pd_session_capability ref_pd_cap() const override { return _env.pd_session_cap(); }
void init(Pd_session &pd, Pd_session_capability pd_cap) override
{
pd.ref_account(ref_pd_cap());
ref_pd().transfer_quota(pd_cap, _cap_quota);
ref_pd().transfer_quota(pd_cap, _ram_quota);
}
Route resolve_session_request(Service::Name const &service_name,
Session_label const &label,
Session::Diag const diag) override
{
auto route = [&] (Service &service) {
return Route { .service = service,
.label = label,
.diag = diag }; };
if (service_name == "ROM" && label == "child -> config")
return route(_config_service);
Service *service_ptr = nullptr;
_parent_services.for_each([&] (Service &s) {
if (!service_ptr && service_name == s.name())
service_ptr = &s; });
if (!service_ptr)
throw Service_denied();
return route(*service_ptr);
}
};
Policy _policy { *this, _env };
Genode::Child _child { _env.rm(), _env.ep().rpc_ep(), _policy };
public:
class Insufficient_yield { };
/**
* Constructor
*/
Parent(Env &env) : _env(env)
{
_timer.sigh(_timeout_handler);
_init();
}
};
/***************
** Component **
***************/
void Component::construct(Genode::Env &env)
{
using namespace Genode;
/*
* Read value '<config child="" />' attribute to decide whether to perform
* the child or the parent role.
*/
static Attached_rom_dataspace config(env, "config");
bool const is_child = config.xml().attribute_value("child", false);
if (is_child) {
log("--- test-resource_yield child role started ---");
static Test::Child child(env, config.xml());
} else {
log("--- test-resource_yield parent role started ---");
static Test::Parent parent(env);
}
}

View File

@@ -0,0 +1,3 @@
TARGET = benchmark_resource_award
SRC_CC = main.cc
LIBS = base

View File

@@ -0,0 +1,36 @@
#include <base/component.h>
#include <base/env.h>
#include <base/log.h>
#include <timer_session/connection.h>
namespace Hoitaja_test {
struct Persistent_cell;
}
struct Hoitaja_test::Persistent_cell
{
Genode::Env &_env;
Timer::Connection _timer{_env};
void _handle_timeout()
{
Genode::log("My affinity is ", _env.cpu().affinity_space());
Genode::log("My PD cap is ", _env.pd_session_cap());
_timer.trigger_once(5 * 1000 * 1000);
}
Genode::Signal_handler<Persistent_cell> _timeout_handler{
_env.ep(), *this, &Persistent_cell::_handle_timeout};
Persistent_cell(Genode::Env &env) : _env(env)
{
Genode::log("My affinity is ", _env.cpu().affinity_space());
Genode::log("My PD cap is ", _env.pd().address_space());
_timer.sigh(_timeout_handler);
_timer.trigger_once(5 * 1000 * 1000);
}
};
void Component::construct(Genode::Env &env) { static Hoitaja_test::Persistent_cell cell(env); }

View File

@@ -0,0 +1,3 @@
TARGET = persistent_cell
SRC_CC = persistent_cell.cc
LIBS += base

View File

@@ -0,0 +1,109 @@
/**
* @file main.cc
* @author Michael Müller (michael.mueller@uos.de)
* @brief Some Tests for using Performance Counters with libpfm and the NOVA syscalls
* @version 0.1
* @date 2022-12-14
*
* @copyright Copyright (c) 2022
*
*/
#include <iostream>
#include <cstring>
#include <cstdlib>
#include <thread>
#include <chrono>
#include <nova/syscall-generic.h>
#include <nova/syscalls.h>
extern "C" {
#include <perfmon/err.h>
#include <perfmon/pfmlib.h>
}
int main(void)
{
pfm_pmu_info_t pinfo;
pfm_pmu_encode_arg_t e;
pfm_event_info_t info;
int ret;
ret = pfm_initialize();
if (ret != PFM_SUCCESS) {
std::cerr << "cannot initialize libpfm: " << pfm_strerror(ret) << std::endl;
return EXIT_FAILURE;
}
memset(&pinfo, 0, sizeof(pfm_pmu_info_t));
ret = pfm_get_pmu_info(PFM_PMU_AMD64_FAM17H_ZEN1, &pinfo);
if (ret != PFM_SUCCESS)
{
std::cerr << "Failed to find PMU" << std::endl;
return -EXIT_FAILURE;
}
if (!pinfo.is_present) {
std::cerr << "No AMD PMU present" << std::endl;
return -EXIT_FAILURE;
}
memset(&e, 0, sizeof(e));
char *fqstr = nullptr;
e.fstr = &fqstr;
do
{
ret = pfm_get_os_event_encoding("ITLB_RELOADS", PFM_PLM0 | PFM_PLM3, PFM_OS_NONE, &e);
if (ret == PFM_ERR_TOOSMALL) {
free(e.codes);
e.codes = NULL;
e.count = 0;
continue;
} else {
std::cerr << "No such event" << std::endl;
return EXIT_FAILURE;
}
} while (ret != PFM_SUCCESS);
memset(&info, 0, sizeof(info));
ret = pfm_get_event_info(e.idx, PFM_OS_NONE, &info);
if (ret) {
std::cerr << "Failed to get event info" << std::endl;
return EXIT_FAILURE;
}
std::cout << "Event found : " << fqstr << std::endl;
std::cout << "Code : " << info.code << std::endl;
Nova::uint8_t rc = 0;
Nova::mword_t umask = 0x6;
Nova::mword_t flags = 0x0;
if ((rc = Nova::hpc_ctrl(Nova::HPC_SETUP, 0, 1, info.code, umask, flags)) != Nova::NOVA_OK) {
std::cerr << "Failed to setup HPC 0 for event" << std::endl;
return EXIT_FAILURE;
}
if ((rc = Nova::hpc_start(0, 1))) {
std::cerr << "Failed to start counter" << std::endl;
return EXIT_FAILURE;
}
std::cout << "Successfully set up hardware performance counter 0" << std::endl;
for (;;) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
Nova::mword_t value;
if ((rc = Nova::hpc_read(0, 1, value)) != Nova::NOVA_OK) {
std::cerr << "Failed to read HPC" << std::endl;
return EXIT_FAILURE;
}
std::cout << "Counter value: " << value << std::endl;
}
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,4 @@
TARGET = pfm_test
SRC_CC = main.cc
LIBS += base posix libm libc stdcxx libpfm4
CC_OPT += -Wno-error -fpermissive -Wno-error=conversion

View File

@@ -0,0 +1,3 @@
TARGET = volatile_cell
SRC_CC = volatile_cell.cc
LIBS += base

View File

@@ -0,0 +1,35 @@
#include <base/component.h>
#include <base/env.h>
#include <timer_session/connection.h>
#include <base/log.h>
namespace Hoitaja_test {
class Volatile_cell;
}
class Hoitaja_test::Volatile_cell
{
private:
Genode::Env &_env;
Timer::Connection _timer{_env};
void _handle_timeout()
{
Genode::log("My time has come. Exiting ...");
_env.parent().exit(0);
}
Genode::Signal_handler<Volatile_cell> _timeout_handler{
_env.ep(), *this, &Volatile_cell::_handle_timeout};
public:
Volatile_cell(Genode::Env &env) : _env(env)
{
Genode::log("My affinity space is ", _env.cpu().affinity_space());
_timer.sigh(_timeout_handler);
_timer.trigger_once(30 * 1000 * 1000);
}
};
void Component::construct(Genode::Env &env) { static Hoitaja_test::Volatile_cell cell(env); }

View File

@@ -0,0 +1,346 @@
/*
* \brief Test for yielding resources
* \author Norman Feske
* \date 2013-10-05
*
* This test exercises the protocol between a parent and child, which is used
* by the parent to regain resources from a child subsystem.
*
* The program acts in either one of two roles, the parent or the child. The
* role is determined by reading a config argument.
*
* The child periodically allocates chunks of RAM until its RAM quota is
* depleted. Once it observes a yield request from the parent, however, it
* cooperatively releases as much resources as requested by the parent.
*
* The parent wait a while to give the child the chance to allocate RAM. It
* then sends a yield request and waits for a response. When getting the
* response, it validates whether the child complied to the request or not.
*/
/*
* Copyright (C) 2013-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* Genode includes */
#include <util/arg_string.h>
#include <base/component.h>
#include <base/attached_rom_dataspace.h>
#include <base/log.h>
#include <timer_session/connection.h>
#include <os/static_parent_services.h>
#include <os/dynamic_rom_session.h>
#include <base/child.h>
#include <trace/timestamp.h>
namespace Test {
class Child;
class Parent;
using namespace Genode;
}
/****************
** Child role **
****************/
/**
* The child eats more and more RAM. However, when receiving a yield request,
* it releases the requested amount of resources.
*/
class Test::Child
{
private:
struct Ram_chunk : List<Ram_chunk>::Element
{
Env &env;
size_t const size;
Ram_dataspace_capability ds_cap;
Ram_chunk(Env &env, size_t size)
:
env(env),size(size), ds_cap(env.ram().alloc(size))
{ }
~Ram_chunk() { env.ram().free(ds_cap); }
};
Env &_env;
Heap _heap { _env.ram(), _env.rm() };
bool const _expand;
List<Ram_chunk> _ram_chunks { };
Timer::Connection _timer { _env };
Signal_handler<Child> _yield_handler;
uint64_t const _period_ms;
void _handle_yield();
public:
Child(Env &, Xml_node);
void main();
};
void Test::Child::_handle_yield()
{
/* request yield request arguments */
//Genode::Parent::Resource_args const args = _env.parent().yield_request();
_env.parent().yield_response();
/*
log("yield request: ", args.string());
size_t const requested_ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
log("got request to free ", requested_ram_quota, " MB of RAM");
size_t const requested_cpu_quota =
Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0);
log("released ", requested_cpu_quota, " portions of cpu_quota");
size_t const requested_gpu_quota =
Arg_string::find_arg(args.string(), "gpus").ulong_value(0);
log("got request to release ", requested_gpu_quota, " gpus");*/
}
Test::Child::Child(Env &env, Xml_node config)
:
_env(env),
_expand(config.attribute_value("expand", false)),
_yield_handler(_env.ep(), *this, &Child::_handle_yield),
_period_ms(config.attribute_value("period_ms", (uint64_t)500))
{
/* register yield signal handler */
_env.parent().yield_sigh(_yield_handler);
}
/*****************
** Parent role **
*****************/
/**
* The parent grants resource requests as long as it has free resources.
* Once in a while, it politely requests the child to yield resources.
*/
class Test::Parent
{
private:
Env &_env;
Timer::Connection _timer { _env };
void _print_status()
{
log("quota: ", _child.pd().ram_quota().value / 1024, " KiB "
"used: ", _child.pd().used_ram().value / 1024, " KiB");
}
size_t _used_ram_prior_yield = 0;
/* perform the test three times */
unsigned _cnt = 5000;
unsigned long _start = 0;
unsigned long _end = 0;
unsigned long _sent = 0;
enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE };
State _state = WAIT;
void _schedule_one_second_timeout()
{
//log("wait ", _wait_cnt, "/", _wait_secs);
_timer.trigger_once(10000);
}
void _init()
{
_state = WAIT;
_schedule_one_second_timeout();
}
void _request_yield()
{
/* remember quantum of resources used by the child */
_used_ram_prior_yield = _child.pd().used_ram().value;
//log("request yield (ram prior yield: ", _used_ram_prior_yield);
/* issue yield request */
Genode::Parent::Resource_args yield_args("ram_quota=5M,cpu_quota=10,gpus=1");
_start = Genode::Trace::timestamp();
_child.yield(yield_args);
_sent = Genode::Trace::timestamp();
_state = YIELD_REQUESTED;
}
void _handle_timeout()
{
//_print_status();
_request_yield();
_schedule_one_second_timeout();
}
void _yield_response()
{
_end = Genode::Trace::timestamp();
log("{\"yield-rtt\": ", (_end-_start)/2000, ", \"yield-request\": ", (_sent-_start)/2000, ",\"yield-response\":", (_end-_sent)/2000,"}");
_state = YIELD_GOT_RESPONSE;
//_print_status();
if (_cnt-- > 0) {
_init();
} else {
log("--- test-resource_yield finished ---");
_env.parent().exit(0);
}
}
Signal_handler<Parent> _timeout_handler {
_env.ep(), *this, &Parent::_handle_timeout };
struct Policy : public Genode::Child_policy
{
Env &_env;
Parent &_parent;
Static_parent_services<Pd_session, Cpu_session, Rom_session,
Log_session, Timer::Session, Topo_session>
_parent_services { _env };
Cap_quota const _cap_quota { 50 };
Ram_quota const _ram_quota { 10*1024*1024 };
Binary_name const _binary_name { "benchmark_resource_yield" };
/*
* Config ROM service
*/
struct Config_producer : Dynamic_rom_session::Content_producer
{
void produce_content(char *dst, Genode::size_t dst_len) override
{
Xml_generator xml(dst, dst_len, "config", [&] () {
xml.attribute("child", "yes"); });
}
} _config_producer { };
Dynamic_rom_session _config_session { _env.ep().rpc_ep(),
ref_pd(), _env.rm(),
_config_producer };
typedef Genode::Local_service<Dynamic_rom_session> Config_service;
Config_service::Single_session_factory _config_factory { _config_session };
Config_service _config_service { _config_factory };
void yield_response() override
{
_parent._yield_response();
}
Policy(Parent &parent, Env &env) : _env(env), _parent(parent) { }
Name name() const override { return "child"; }
Binary_name binary_name() const override { return _binary_name; }
Pd_session &ref_pd() override { return _env.pd(); }
Pd_session_capability ref_pd_cap() const override { return _env.pd_session_cap(); }
void init(Pd_session &pd, Pd_session_capability pd_cap) override
{
pd.ref_account(ref_pd_cap());
ref_pd().transfer_quota(pd_cap, _cap_quota);
ref_pd().transfer_quota(pd_cap, _ram_quota);
}
Route resolve_session_request(Service::Name const &service_name,
Session_label const &label,
Session::Diag const diag) override
{
auto route = [&] (Service &service) {
return Route { .service = service,
.label = label,
.diag = diag }; };
if (service_name == "ROM" && label == "child -> config")
return route(_config_service);
Service *service_ptr = nullptr;
_parent_services.for_each([&] (Service &s) {
if (!service_ptr && service_name == s.name())
service_ptr = &s; });
if (!service_ptr)
throw Service_denied();
return route(*service_ptr);
}
};
Policy _policy { *this, _env };
Genode::Child _child { _env.rm(), _env.ep().rpc_ep(), _policy };
public:
class Insufficient_yield { };
/**
* Constructor
*/
Parent(Env &env) : _env(env)
{
_timer.sigh(_timeout_handler);
_init();
}
};
/***************
** Component **
***************/
void Component::construct(Genode::Env &env)
{
using namespace Genode;
/*
* Read value '<config child="" />' attribute to decide whether to perform
* the child or the parent role.
*/
static Attached_rom_dataspace config(env, "config");
bool const is_child = config.xml().attribute_value("child", false);
if (is_child) {
log("--- test-resource_yield child role started ---");
static Test::Child child(env, config.xml());
} else {
log("--- test-resource_yield parent role started ---");
static Test::Parent parent(env);
}
}

View File

@@ -0,0 +1,3 @@
TARGET = benchmark_resource_yield
SRC_CC = main.cc
LIBS = base

View File

@@ -15,7 +15,7 @@
#define _LIB__SANDBOX__ALIAS_H_
/* local includes */
#include <types.h>
#include <sandbox/types.h>
namespace Sandbox { struct Alias; }

View File

@@ -23,19 +23,57 @@
#include <sandbox/sandbox.h>
/* local includes */
#include <types.h>
#include <verbose.h>
#include <report.h>
#include <name_registry.h>
#include <service.h>
#include <utils.h>
#include <route_model.h>
#include <sandbox/types.h>
#include <sandbox/verbose.h>
#include <sandbox/report.h>
#include <sandbox/name_registry.h>
#include <sandbox/service.h>
#include <sandbox/utils.h>
#include <sandbox/route_model.h>
namespace Sandbox { class Child; }
class Sandbox::Child : Child_policy, Routed_service::Wakeup
{
public:
/**
* Resources assigned to the child
*/
struct Resources
{
long prio_levels_log2;
long priority;
Affinity affinity;
Ram_quota assigned_ram_quota;
Cap_quota assigned_cap_quota;
Cpu_quota assigned_cpu_quota;
Ram_quota effective_ram_quota() const
{
return Genode::Child::effective_quota(assigned_ram_quota);
}
Cap_quota effective_cap_quota() const
{
/* capabilities consumed by 'Genode::Child' */
Cap_quota const effective =
Genode::Child::effective_quota(assigned_cap_quota);
/* capabilities additionally consumed by init */
enum {
STATIC_COSTS = 1 /* possible heap backing-store
allocation for session object */
+ 1 /* buffered XML start node */
+ 2 /* dynamic ROM for config */
+ 2 /* dynamic ROM for session requester */
};
if (effective.value < STATIC_COSTS)
return Cap_quota{0};
return Cap_quota{effective.value - STATIC_COSTS};
}
};
typedef String<80> Version;
@@ -73,7 +111,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
enum class Sample_state_result { CHANGED, UNCHANGED };
private:
protected:
friend class Child_registry;
@@ -109,7 +147,8 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
* The child is no longer referenced by config model and can
* safely be destructed.
*/
ABANDONED
ABANDONED,
};
State _state = State::INITIAL;
@@ -202,48 +241,11 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
return _heartbeat_enabled && (_state == State::ALIVE);
}
/**
* Resources assigned to the child
*/
struct Resources
{
long prio_levels_log2;
long priority;
Affinity affinity;
Ram_quota assigned_ram_quota;
Cap_quota assigned_cap_quota;
Cpu_quota assigned_cpu_quota;
Ram_quota effective_ram_quota() const
{
return Genode::Child::effective_quota(assigned_ram_quota);
}
Cap_quota effective_cap_quota() const
{
/* capabilities consumed by 'Genode::Child' */
Cap_quota const effective =
Genode::Child::effective_quota(assigned_cap_quota);
/* capabilities additionally consumed by init */
enum {
STATIC_COSTS = 1 /* possible heap backing-store
allocation for session object */
+ 1 /* buffered XML start node */
+ 2 /* dynamic ROM for config */
+ 2 /* dynamic ROM for session requester */
};
if (effective.value < STATIC_COSTS)
return Cap_quota{0};
return Cap_quota{effective.value - STATIC_COSTS};
}
};
static
Resources _resources_from_start_node(Xml_node start_node, Prio_levels prio_levels,
Affinity::Space const &affinity_space,
Affinity::Location const &location,
Cap_quota default_cap_quota)
{
unsigned cpu_percent = 0;
@@ -269,7 +271,8 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
return Resources { log2(prio_levels.value),
priority_from_xml(start_node, prio_levels),
Affinity(affinity_space,
affinity_location_from_xml(affinity_space, start_node)),
(location.xpos() == -1 ? affinity_location_from_xml(affinity_space, start_node) : location)),
//affinity_location_from_xml(affinity_space, start_node)),
Ram_quota { ram_bytes },
Cap_quota { caps },
Cpu_quota { cpu_percent } };
@@ -555,6 +558,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
Cpu_quota_transfer &cpu_quota_transfer,
Prio_levels prio_levels,
Affinity::Space const &affinity_space,
Affinity::Location const &location,
Registry<Parent_service> &parent_services,
Registry<Routed_service> &child_services,
Registry<Local_service> &local_services);
@@ -674,6 +678,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup
Sample_state_result sample_state();
/****************************
** Child-policy interface **
****************************/

View File

@@ -15,10 +15,10 @@
#define _LIB__SANDBOX__CHILD_REGISTRY_H_
/* local includes */
#include <child.h>
#include <name_registry.h>
#include <alias.h>
#include <report.h>
#include <sandbox/child.h>
#include <sandbox/name_registry.h>
#include <sandbox/alias.h>
#include <sandbox/report.h>
namespace Sandbox { struct Child_registry; }

View File

@@ -18,7 +18,7 @@
#include <util/list_model.h>
/* local includes */
#include <heartbeat.h>
#include <sandbox/heartbeat.h>
namespace Sandbox {

View File

@@ -15,8 +15,8 @@
#define _LIB__SANDBOX__HEARTBEAT_H_
/* local includes */
#include <state_reporter.h>
#include <child_registry.h>
#include <sandbox/state_reporter.h>
#include <sandbox/child_registry.h>
#include <util/noncopyable.h>
namespace Sandbox { class Heartbeat; }

View File

@@ -0,0 +1,264 @@
#include <sandbox/child.h>
#include <sandbox/alias.h>
#include <sandbox/server.h>
#include <sandbox/heartbeat.h>
#include <sandbox/config_model.h>
#include <base/env.h>
#include <base/heap.h>
#include <base/attached_rom_dataspace.h>
#include <util/xml_node.h>
#include <util/noncopyable.h>
#include <base/registry.h>
#include <base/service.h>
#pragma once
namespace Sandbox {
class Library;
}
namespace Genode {
class Sandbox;
}
struct Sandbox::Library : ::Sandbox::State_reporter::Producer,
::Sandbox::Child::Default_route_accessor,
::Sandbox::Child::Default_caps_accessor,
::Sandbox::Child::Ram_limit_accessor,
::Sandbox::Child::Cap_limit_accessor,
::Sandbox::Child::Cpu_limit_accessor,
::Sandbox::Child::Cpu_quota_transfer,
::Sandbox::Start_model::Factory,
::Sandbox::Parent_provides_model::Factory
{
using Routed_service = ::Sandbox::Routed_service;
using Parent_service = ::Sandbox::Parent_service;
using Local_service = ::Genode::Sandbox::Local_service_base;
using Report_detail = ::Sandbox::Report_detail;
using Child_registry = ::Sandbox::Child_registry;
using Verbose = ::Sandbox::Verbose;
using State_reporter = ::Sandbox::State_reporter;
using Heartbeat = ::Sandbox::Heartbeat;
using Server = ::Sandbox::Server;
using Alias = ::Sandbox::Alias;
using Child = ::Sandbox::Child;
using Prio_levels = ::Sandbox::Prio_levels;
using Ram_info = ::Sandbox::Ram_info;
using Cap_info = ::Sandbox::Cap_info;
using Cpu_quota = ::Sandbox::Cpu_quota;
using Config_model = ::Sandbox::Config_model;
using Start_model = ::Sandbox::Start_model;
using Preservation = ::Sandbox::Preservation;
public:
Env &_env;
Heap &_heap;
Registry<Parent_service> _parent_services { };
Registry<Routed_service> _child_services { };
Registry<Local_service> &_local_services;
Child_registry _children { };
/*
* Global parameters obtained from config
*/
Reconstructible<Verbose> _verbose { };
Config_model::Version _version { };
Constructible<Buffered_xml> _default_route { };
Cap_quota _default_caps { 0 };
Prio_levels _prio_levels { };
Constructible<Affinity::Space> _affinity_space { };
Preservation _preservation { };
Affinity::Space _effective_affinity_space() const
{
return _affinity_space.constructed() ? *_affinity_space
: Affinity::Space { 1, 1 };
}
State_reporter _state_reporter;
Heartbeat _heartbeat { _env, _children, _state_reporter };
/*
* Internal representation of the XML configuration
*/
Config_model _config_model { };
/*
* Variables for tracking the side effects of updating the config model
*/
bool _server_appeared_or_disappeared = false;
bool _state_report_outdated = false;
unsigned _child_cnt = 0;
Cpu_quota _avail_cpu { .percent = 100 };
Cpu_quota _transferred_cpu { .percent = 0 };
Ram_quota _avail_ram() const
{
Ram_quota avail_ram = _env.pd().avail_ram();
if (_preservation.ram.value > avail_ram.value) {
error("RAM preservation exceeds available memory");
return Ram_quota { 0 };
}
/* deduce preserved quota from available quota */
return Ram_quota { avail_ram.value - _preservation.ram.value };
}
Cap_quota _avail_caps() const
{
Cap_quota avail_caps { _env.pd().avail_caps().value };
if (_preservation.caps.value > avail_caps.value) {
error("Capability preservation exceeds available capabilities");
return Cap_quota { 0 };
}
/* deduce preserved quota from available quota */
return Cap_quota { avail_caps.value - _preservation.caps.value };
}
/**
* Child::Ram_limit_accessor interface
*/
Ram_quota resource_limit(Ram_quota const &) const override
{
return _avail_ram();
}
/**
* Child::Cap_limit_accessor interface
*/
Cap_quota resource_limit(Cap_quota const &) const override { return _avail_caps(); }
/**
* Child::Cpu_limit_accessor interface
*/
Cpu_quota resource_limit(Cpu_quota const &) const override { return _avail_cpu; }
/**
* Child::Cpu_quota_transfer interface
*/
void transfer_cpu_quota(Cpu_session_capability cap, Cpu_quota quota) override
{
Cpu_quota const remaining { 100 - min(100u, _transferred_cpu.percent) };
/* prevent division by zero in 'quota_lim_upscale' */
if (remaining.percent == 0)
return;
size_t const fraction =
Cpu_session::quota_lim_upscale(quota.percent, remaining.percent);
_env.cpu().transfer_quota(cap, fraction);
_transferred_cpu.percent += quota.percent;
}
/**
* State_reporter::Producer interface
*/
void produce_state_report(Xml_generator &xml, Report_detail const &detail) const override
{
if (detail.init_ram())
xml.node("ram", [&] () { Ram_info::from_pd(_env.pd()).generate(xml); });
if (detail.init_caps())
xml.node("caps", [&] () { Cap_info::from_pd(_env.pd()).generate(xml); });
if (detail.children())
_children.report_state(xml, detail);
}
/**
* State_reporter::Producer interface
*/
Child::Sample_state_result sample_children_state() override
{
return _children.sample_state();
}
/**
* Default_route_accessor interface
*/
Xml_node default_route() override
{
return _default_route.constructed() ? _default_route->xml()
: Xml_node("<empty/>");
}
/**
* Default_caps_accessor interface
*/
Cap_quota default_caps() override { return _default_caps; }
void _update_aliases_from_config(Xml_node const &);
void _update_parent_services_from_config(Xml_node const &);
void _update_children_config(Xml_node const &);
void _destroy_abandoned_parent_services();
virtual void _destroy_abandoned_children();
Server _server { _env, _heap, _child_services, _state_reporter };
/**
* Sandbox::Start_model::Factory
*/
virtual Child &create_child(Xml_node const &) override;
/**
* Sandbox::Start_model::Factory
*/
virtual void update_child(Child &, Xml_node const &) override;
/**
* Sandbox::Start_model::Factory
*/
Alias &create_alias(Child_policy::Name const &name) override
{
Alias &alias = *new (_heap) Alias(name);
_children.insert_alias(&alias);
return alias;
}
/**
* Sandbox::Start_model::Factory
*/
void destroy_alias(Alias &alias) override
{
_children.remove_alias(&alias);
destroy(_heap, &alias);
}
/**
* Sandbox::Start_model::Factory
*/
bool ready_to_create_child(Start_model::Name const &,
Start_model::Version const &) const override;
/**
* Sandbox::Parent_provides_model::Factory
*/
Parent_service &create_parent_service(Service::Name const &name) override
{
return *new (_heap) Parent_service(_parent_services, _env, name);
}
Library(Env &env, Heap &heap, Registry<Local_service> &local_services,
Genode::Sandbox::State_handler &state_handler)
:
_env(env), _heap(heap), _local_services(local_services),
_state_reporter(_env, *this, state_handler)
{ }
virtual void apply_config(Xml_node const &);
virtual void generate_state_report(Xml_generator &xml) const
{
_state_reporter.generate(xml);
}
};

View File

@@ -18,7 +18,7 @@
#include <base/child.h>
/* local includes */
#include <types.h>
#include <sandbox/types.h>
namespace Sandbox { struct Name_registry; }

View File

@@ -19,7 +19,7 @@
#include <util/xml_node.h>
/* local includes */
#include <types.h>
#include <sandbox/types.h>
namespace Sandbox {
struct Report_update_trigger;

View File

@@ -15,7 +15,7 @@
#define _ROUTE_MODEL_H_
/* local includes */
#include <types.h>
#include <sandbox/types.h>
namespace Sandbox {

View File

@@ -21,7 +21,7 @@
#include <base/heap.h>
namespace Genode { class Sandbox; }
namespace Sandbox { class Library; }
class Genode::Sandbox : Noncopyable
{
@@ -50,9 +50,7 @@ class Genode::Sandbox : Noncopyable
Heap _heap;
class Library;
Library &_library;
::Sandbox::Library &_library;
Registry<Local_service_base> _local_services { };
@@ -60,7 +58,9 @@ class Genode::Sandbox : Noncopyable
Sandbox(Env &, State_handler &);
void apply_config(Xml_node const &);
virtual ~Sandbox() = default;
virtual void apply_config(Xml_node const &);
/**
* Generate state report as configured by the <report> config node

View File

@@ -19,10 +19,10 @@
#include <os/buffered_xml.h>
/* local includes */
#include <types.h>
#include <service.h>
#include <state_reporter.h>
#include <config_model.h>
#include <sandbox/types.h>
#include <sandbox/service.h>
#include <sandbox/state_reporter.h>
#include <sandbox/config_model.h>
namespace Sandbox { class Server; }

View File

@@ -19,8 +19,8 @@
#include <sandbox/sandbox.h>
/* local includes */
#include "report.h"
#include "child.h"
#include <sandbox/report.h>
#include <sandbox/child.h>
namespace Sandbox { class State_reporter; }

View File

@@ -19,7 +19,7 @@
#include <util/xml_node.h>
/* local includes */
#include <types.h>
#include <sandbox/types.h>
namespace Sandbox { struct Verbose; }

View File

@@ -0,0 +1,38 @@
/*
* \brief Suoritin - Task-based CPU Client Interface
* \author Michael Müller
* \date 2023-07-12
*/
/*
* Copyright (C) 2010-2020 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
#include <suoritin/session.h>
#include <base/rpc_client.h>
#include <base/affinity.h>
struct Tukija::Suoritin::Client : Genode::Rpc_client<Tukija::Suoritin::Session>
{
explicit Client(Genode::Capability<Tukija::Suoritin::Session> session)
: Rpc_client<Tukija::Suoritin::Session>(session) { }
void create_channel() override
{
call<Rpc_create_channel>();
}
void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override
{
call<Rpc_register_worker>(name, cap);
}
Capability interface_cap() override {
return call<Rpc_suoritin_cap>();
}
};

View File

@@ -0,0 +1,46 @@
/*
* \brief Suoritin - Task-based CPU Connection
* \author Michael Müller
* \date 2023-07-12
*/
/*
* Copyright (C) 2010-2020 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
#include <suoritin/client.h>
#include <base/connection.h>
namespace Tukija {
namespace Suoritin {
struct Connection;
}
}
struct Tukija::Suoritin::Connection : Genode::Connection<Tukija::Suoritin::Session>, Tukija::Suoritin::Client
{
enum
{
RAM_QUOTA = 32768UL
};
Connection(Genode::Env &env, const char *label="", Genode::Affinity const &affinity = Genode::Affinity())
: Genode::Connection<Tukija::Suoritin::Session>(env, session(env.parent(), affinity, "ram_quota=%u, cap_quota=%u, label=\"%s\"", RAM_QUOTA, CAP_QUOTA, label)), Tukija::Suoritin::Client(cap()) {}
void create_channel() override {
Tukija::Suoritin::Client::create_channel();
}
void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override {
Tukija::Suoritin::Client::register_worker(name, cap);
}
Tukija::Suoritin::Capability interface_cap() override {
return Tukija::Suoritin::Client::interface_cap();
}
};

View File

@@ -0,0 +1,101 @@
/*
* \brief Suoritin - Task-based CPU Service
* \author Michael Müller
* \date 2023-07-12
*/
/*
* Copyright (C) 2010-2020 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
#include <session/session.h>
#include <base/affinity.h>
#include <base/registry.h>
#include <cpu_session/client.h>
namespace Tukija {
namespace Suoritin {
struct Session;
struct Client;
class Capability;
struct Channel;
struct Worker;
typedef Genode::Registry<Genode::Registered<Channel>> Channel_list;
typedef Genode::Registry<Genode::Registered<Worker>> Worker_registry;
}
}
class Tukija::Suoritin::Capability
{
private:
Genode::Ram_dataspace_capability _worker_space;
Genode::Ram_dataspace_capability _channel_space;
public:
Capability(Genode::Ram_dataspace_capability worker_space, Genode::Ram_dataspace_capability channel_space) : _worker_space(worker_space), _channel_space(channel_space) {}
Genode::Ram_dataspace_capability worker_interface() { return _worker_space; }
Genode::Ram_dataspace_capability channel_space() { return _channel_space; }
};
struct Tukija::Suoritin::Worker : Genode::Interface
{
Genode::Thread_capability _cap{};
Genode::Thread::Name _name{};
};
struct Tukija::Suoritin::Channel : Genode::Interface
{
unsigned long _id{0};
unsigned long _length{0};
unsigned long _occupancy{0};
};
struct Tukija::Suoritin::Session : Genode::Session
{
static const char *service_name() { return "TASKING"; }
enum
{
CAP_QUOTA = 6
};
/**
* @brief List of all channels, i.e. worker queues, of the client cell
*
*/
Channel_list _channels{};
/**
* @brief List of worker threads for this client
*
*/
Worker_registry _workers{};
virtual ~Session() { }
/************************
** internal interface **
************************/
Channel_list &channels() { return _channels; }
Worker_registry &workers() { return _workers; }
/********************************
** Suoritin session interface **
********************************/
virtual void create_channel() = 0;
virtual void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) = 0;
virtual Capability interface_cap() = 0;
GENODE_RPC(Rpc_create_channel, void, create_channel);
GENODE_RPC(Rpc_register_worker, void, register_worker, Genode::Thread::Name const&, Genode::Thread_capability);
GENODE_RPC(Rpc_suoritin_cap, Tukija::Suoritin::Capability, interface_cap);
GENODE_RPC_INTERFACE(Rpc_create_channel, Rpc_register_worker, Rpc_suoritin_cap);
};

View File

@@ -1,5 +1,5 @@
SRC_CC = library.cc child.cc server.cc config_model.cc
INC_DIR += $(REP_DIR)/src/lib/sandbox
INC_DIR += $(REP_DIR)/include/sandbox
LIBS += base
SHARED_LIB = yes

View File

@@ -1 +1 @@
2022-10-11 c19f6134a15baf12d7c14f77ee8204a19bd1d2af
2023-07-07-a 36058ca42f3cc198dd7e776037ac196469423e4a

View File

@@ -68,7 +68,7 @@ struct Trace_subject_registry
return nullptr;
}
enum { MAX_CPUS_X = 16, MAX_CPUS_Y = 4, MAX_ELEMENTS_PER_CPU = 6};
enum { MAX_CPUS_X = 64, MAX_CPUS_Y = 1, MAX_ELEMENTS_PER_CPU = 6};
/* accumulated execution time on all CPUs */
unsigned long long total_first [MAX_CPUS_X][MAX_CPUS_Y];

View File

@@ -0,0 +1,80 @@
#pragma once
#include <base/log.h>
#include <base/child.h>
#include <os/session_requester.h>
#include <os/session_policy.h>
#include <os/buffered_xml.h>
#include <sandbox/sandbox.h>
#include <sandbox/child.h>
#include <sandbox/service.h>
#include <sandbox/types.h>
#include <sandbox/verbose.h>
#include <sandbox/report.h>
#include <sandbox/name_registry.h>
#include <sandbox/service.h>
#include <sandbox/utils.h>
#include <sandbox/route_model.h>
#include <state_handler.h>
namespace Hoitaja {
class Cell;
}
class Hoitaja::Cell : public ::Sandbox::Child
{
private:
State_handler &_state_handler;
public:
friend class Habitat;
Cell(Genode::Env &env,
Genode::Allocator &alloc,
::Sandbox::Verbose const &verbose,
::Sandbox::Child::Id id,
::Sandbox::Report_update_trigger &report_update_trigger,
Genode::Xml_node start_node,
::Sandbox::Child::Default_route_accessor &default_route_accessor,
::Sandbox::Child::Default_caps_accessor &default_caps_accessor,
::Sandbox::Name_registry &name_registry,
::Sandbox::Child::Ram_limit_accessor &ram_limit_accessor,
::Sandbox::Child::Cap_limit_accessor &cap_limit_accessor,
::Sandbox::Child::Cpu_limit_accessor &cpu_limit_accessor,
::Sandbox::Child::Cpu_quota_transfer &cpu_quota_transfer,
::Sandbox::Prio_levels prio_levels,
Genode::Affinity::Space const &affinity_space,
Genode::Affinity::Location const &location,
Genode::Registry<::Sandbox::Parent_service> &parent_services,
Genode::Registry<::Sandbox::Routed_service> &child_services,
Genode::Registry<::Sandbox::Child::Local_service> &local_services,
State_handler &state_handler)
: ::Sandbox::Child(env, alloc, verbose, id, report_update_trigger, start_node, default_route_accessor, default_caps_accessor, name_registry, ram_limit_accessor, cap_limit_accessor, cpu_limit_accessor, cpu_quota_transfer, prio_levels, affinity_space, location, parent_services, child_services, local_services), _state_handler(state_handler)
{ }
virtual ~Cell() { };
struct Resources &resources() { return _resources; }
void update_affinity(Genode::Affinity affinity) {
Genode::log("Updating affinity to ", affinity.location(), " in space ", affinity.space());
_resources.affinity = affinity;
Genode::log("Moving CPU session ", _env.cpu_session_cap());
_env.cpu().move(affinity.location());
if (_child.active()) {
_child.cpu().move(affinity.location());
_child.topo().reconstruct(affinity);
}
}
void exit(int exit_value) override
{
::Sandbox::Child::exit(exit_value);
_state_handler.handle_habitat_state(*this);
}
void yield(Genode::Parent::Resource_args &args) {
_child.yield(args);
}
};

View File

@@ -0,0 +1,47 @@
/*
* \brief Hoitaja — Cell Controller
* \author Michael Müller, Norman Feske (Init)
* \date 2023-04-20
*/
/*
* Copyright (C) 2010-2017 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
#include <sandbox/child.h>
namespace Hoitaja
{
class Cell_controller;
}
class Hoitaja::Cell_controller
{
public:
void create_cell();
void destroy_cell();
/**
* @brief Determine which cells shall be shrinked down
*
* @return Sandbox::Child* List of cells to shrink
*/
Sandbox::Child *cells_to_shrink();
/**
* @brief Determine which cell shall be grown up
*
* @return Sandbox::Child* List of cells to grow
*/
Sandbox::Child *cells_to_grow();
/**
* @brief Regather performance metrics for next adaptation cycle
*
*/
void update_metrics();
};

View File

@@ -0,0 +1,214 @@
<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:include schemaLocation="base_types.xsd"/>
<xs:complexType name="template_service">
<xs:choice minOccurs="1" maxOccurs="3">
<xs:element name="parent"/>
<xs:element name="any-child"/>
<xs:element name="child">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="label" type="Session_label" />
</xs:complexType>
</xs:element>
</xs:choice>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="label" type="Session_label" />
<xs:attribute name="label_prefix" type="Session_label" />
<xs:attribute name="label_suffix" type="Session_label" />
<xs:attribute name="label_last" type="Session_label" />
<xs:attribute name="unscoped_label" type="Session_label" />
</xs:complexType>
<xs:complexType name="template_route">
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="service" type="template_service"/>
<xs:element name="any-service" type="template_service"/>
</xs:choice>
</xs:complexType>
<xs:element name="config">
<xs:complexType>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="service">
<xs:complexType>
<xs:choice minOccurs="1" maxOccurs="unbounded">
<xs:element name="default-policy">
<xs:complexType>
<xs:choice minOccurs="1" maxOccurs="1">
<xs:element name="child">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="label" type="Session_label" />
</xs:complexType>
</xs:element><!-- child -->
</xs:choice>
</xs:complexType>
</xs:element><!-- default-policy -->
<xs:element name="policy">
<xs:complexType>
<xs:choice minOccurs="1" maxOccurs="1">
<xs:element name="child">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="label" type="Session_label" />
</xs:complexType>
</xs:element><!-- child -->
</xs:choice>
<xs:attribute name="label" type="Session_label" />
<xs:attribute name="label_prefix" type="Session_label" />
<xs:attribute name="label_suffix" type="Session_label" />
<xs:attribute name="label_last" type="Session_label" />
<xs:attribute name="unscoped_label" type="Session_label" />
</xs:complexType>
</xs:element><!-- policy -->
</xs:choice>
<xs:attribute name="name" type="xs:string" />
</xs:complexType>
</xs:element><!-- "service" -->
<xs:element name="affinity-space">
<xs:complexType>
<xs:attribute name="width" type="xs:int" />
<xs:attribute name="height" type="xs:int" />
</xs:complexType>
</xs:element> <!-- "affinity-space" -->
<xs:element name="parent-provides">
<xs:complexType>
<xs:sequence>
<xs:element name="service" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element> <!-- "parent-provides" -->
<xs:element name="alias">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="child" type="xs:string" />
</xs:complexType>
</xs:element> <!-- "alias" -->
<xs:element name="default-route" type="template_route"/>
<xs:element name="default">
<xs:complexType>
<xs:attribute name="caps" type="xs:int" />
</xs:complexType>
</xs:element> <!-- "default" -->
<xs:element name="report">
<xs:complexType>
<xs:attribute name="ids" type="Boolean" />
<xs:attribute name="requested" type="Boolean" />
<xs:attribute name="provided" type="Boolean" />
<xs:attribute name="session_args" type="Boolean" />
<xs:attribute name="child_caps" type="Boolean" />
<xs:attribute name="child_ram" type="Boolean" />
<xs:attribute name="init_caps" type="Boolean" />
<xs:attribute name="init_ram" type="Boolean" />
<xs:attribute name="delay_ms" type="xs:int" />
<xs:attribute name="buffer" type="Number_of_bytes" />
</xs:complexType>
</xs:element> <!-- "report" -->
<xs:element name="heartbeat">
<xs:complexType>
<xs:attribute name="rate_ms" type="xs:int" />
</xs:complexType>
</xs:element> <!-- "heartbeat" -->
<xs:element name="resource">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="preserve" type="Number_of_bytes" />
</xs:complexType>
</xs:element> <!-- "resource" -->
<xs:element name="start" minOccurs="1" maxOccurs="unbounded">
<xs:complexType>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="binary">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
</xs:complexType>
</xs:element> <!-- "binary" -->
<xs:element name="heartbeat" />
<xs:element name="affinity">
<xs:complexType>
<xs:attribute name="xpos" type="xs:int" />
<xs:attribute name="ypos" type="xs:int" />
<xs:attribute name="width" type="xs:int" />
<xs:attribute name="height" type="xs:int" />
</xs:complexType>
</xs:element> <!-- "affinity" -->
<xs:element name="resource">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="quantum" type="Number_of_bytes" />
</xs:complexType>
</xs:element> <!-- "resource" -->
<xs:element name="exit">
<xs:complexType>
<xs:attribute name="propagate" type="Boolean" />
</xs:complexType>
</xs:element> <!-- "exit" -->
<xs:element name="provides">
<xs:complexType>
<xs:choice maxOccurs="unbounded">
<xs:element name="service">
<xs:complexType>
<xs:attribute name="name" type="xs:string" />
</xs:complexType>
</xs:element>
</xs:choice>
</xs:complexType>
</xs:element> <!-- "provides" -->
<xs:element name="route" type="template_route"/>
<xs:element name="config">
<xs:complexType>
<xs:sequence>
<xs:any minOccurs="0" maxOccurs="unbounded" processContents="skip" />
</xs:sequence>
<xs:anyAttribute processContents="skip"/>
</xs:complexType>
</xs:element> <!-- "config" -->
</xs:choice>
<xs:attribute name="name" type="xs:string" />
<xs:attribute name="ld" type="Boolean" />
<xs:attribute name="caps" type="xs:int" />
<xs:attribute name="priority" type="xs:int" />
<xs:attribute name="managing_system" type="Boolean" />
</xs:complexType>
</xs:element> <!-- "start" -->
</xs:choice>
<xs:attribute name="prio_levels" type="xs:int" />
<xs:attribute name="verbose" type="Boolean" />
<xs:attribute name="ld_verbose" type="Boolean" />
</xs:complexType>
</xs:element> <!-- "config" -->
</xs:schema>

View File

@@ -0,0 +1,109 @@
/*
* \brief Hoitaja — Core Allocator
* \author Michael Müller, Norman Feske (Init)
* \date 2023-04-20
*/
/*
* Copyright (C) 2010-2017 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
/* Genode includes */
#include <sandbox/child.h>
#include <sandbox/library.h>
#include <sandbox/utils.h>
#include <sandbox/types.h>
#include <util/string.h>
/** Hoitaja includes **/
#include "load_controller.h"
#include "cell_controller.h"
#include <cell.h>
namespace Hoitaja
{
class Core_allocator;
}
class Hoitaja::Core_allocator
{
private:
Genode::Affinity::Space &_affinity_space;
::Sandbox::Prio_levels &_prio_levels;
double _resource_coeff; // Coefficient used for calculating resource shares
void _shrink(Cell &cell, unsigned int cores)
{
char yield_args_str[10];
Genode::snprintf(yield_args_str, 7, "cores=%d", cores);
Genode::Parent::Resource_args yield_args(yield_args_str);
cell.yield(yield_args);
}
public:
inline unsigned int _calculate_resource_share(long priority) {
double ref_share = static_cast<double>(_affinity_space.total()) / _resource_coeff;
return static_cast<unsigned int>((1.0 / static_cast<double>(priority)) * ref_share);
}
Core_allocator(Genode::Affinity::Space &affinity_space, ::Sandbox::Prio_levels prio_levels) : _affinity_space(affinity_space), _prio_levels(prio_levels), _resource_coeff(0.0)
{
Genode::log("Created core allocator for ", affinity_space.total(), " cores and ", prio_levels.value, " priorities.");
}
Genode::Affinity::Location allocate_cores_for_cell(Genode::Xml_node const &start_node)
{
// Calculate affinity from global affinity space and priority
long priority = ::Sandbox::priority_from_xml(start_node, _prio_levels);
priority = (priority == 0) ? 1 : priority;
_resource_coeff += (1.0/static_cast<double>(priority)); // treat priority 0 same as 1, to avoid division by zero here
unsigned int cores_share = _calculate_resource_share(priority);
Genode::log("Child ", start_node.attribute_value("name", Genode::String<8>("unknown")), "'s share is ", cores_share, " of ", _affinity_space.total(), " cores, coeff=", _resource_coeff, " priority=", priority);
return Genode::Affinity::Location( _affinity_space.total()-cores_share, 0, cores_share, 1 ); /* always use the core_share last cores, for now */
}
void free_cores_from_cell(Cell &cell)
{
/* Remove cell's coefficient from the global resource coefficient.
* This is necessary in order to be able to redistribute the freed resources correctly. We do not trigger the redistribution itself here, because the child has not been fully destroyed yet, thus its resources might still be occupied at this point. */
_resource_coeff -= 1.0 / static_cast<double>(cell.resources().priority);
}
/**
* @brief Update core allocations for cells reported by Cell controller
*
*/
void update(Hoitaja::Cell &cell, int *xpos) {
Cell::Resources resources = cell.resources();
long priority = (resources.priority == 0)? 1 : resources.priority;
unsigned int cores_share = _calculate_resource_share(priority);
unsigned int cores_to_reclaim = resources.affinity.location().width() * resources.affinity.location().height() - cores_share;
cores_to_reclaim = (static_cast<int>(cores_to_reclaim) < 0) ? 0 : cores_to_reclaim;
Genode::Affinity::Location location(*xpos - cores_share, resources.affinity.location().ypos(), cores_share, resources.affinity.location().height());
cell.update_affinity(Genode::Affinity(resources.affinity.space(), location));
*xpos = location.xpos();
// TODO: Update affinity of existing sessions for cell
// TODO: Send yield request to cell
log("Need to reclaim ", cores_to_reclaim, " cores from ", cell.name());
if (cores_to_reclaim > 0) {
_shrink(cell, cores_to_reclaim);
}
}
};

View File

@@ -0,0 +1,119 @@
#include "habitat.h"
#include <sandbox/utils.h>
::Sandbox::Child &Hoitaja::Habitat::create_child(Genode::Xml_node const &start_node)
{
if (_affinity_space.constructed() && !_core_allocator.constructed())
_core_allocator.construct(*_affinity_space, _prio_levels);
Genode::Affinity::Location allocation = _core_allocator->allocate_cores_for_cell(start_node);
if (allocation.width() < 1) {
Genode::error("failed to create child ", start_node.attribute_value("name", Child_policy::Name()), ": not enough CPU cores left.");
throw ::Sandbox::Start_model::Factory::Creation_failed();
}
// Allocate `cores_share` cores from the Core Allocator and set the childs affinity accordingly
// TODO: Implement core allocation
try {
Hoitaja::Cell &child = *new (_heap)
Hoitaja::Cell(_env, _heap, *_verbose,
Child::Id { ++_child_cnt }, _state_reporter,
start_node, *this, *this, _children, *this, *this, *this, *this,
_prio_levels, _effective_affinity_space(), allocation,
_parent_services, _child_services, _local_services, _habitat_handler);
_children.insert(static_cast<::Sandbox::Child *>(&child));
maintain_cells();
_avail_cpu.percent -= min(_avail_cpu.percent, child.cpu_quota().percent);
if (start_node.has_sub_node("provides"))
_server_appeared_or_disappeared = true;
_state_report_outdated = true;
return static_cast<::Sandbox::Child&>(child);
}
catch (Rom_connection::Rom_connection_failed) {
/*
* The binary does not exist. An error message is printed
* by the Rom_connection constructor.
*/
}
catch (Out_of_ram) {
warning("memory exhausted during child creation"); }
catch (Out_of_caps) {
warning("local capabilities exhausted during child creation"); }
catch (Child::Missing_name_attribute) {
warning("skipped startup of nameless child"); }
catch (Region_map::Region_conflict) {
warning("failed to attach dataspace to local address space "
"during child construction"); }
catch (Region_map::Invalid_dataspace) {
warning("attempt to attach invalid dataspace to local address space "
"during child construction"); }
catch (Service_denied) {
warning("failed to create session during child construction"); }
throw ::Sandbox::Start_model::Factory::Creation_failed();
}
void Hoitaja::Habitat::_destroy_abandoned_children()
{
_children.for_each_child([&] (Child &child) {
if (!child.abandoned())
return;
/* make the child's services unavailable */
child.destroy_services();
child.close_all_sessions();
_state_report_outdated = true;
/* destroy child once all environment sessions are gone */
if (child.env_sessions_closed()) {
_core_allocator->free_cores_from_cell(static_cast<Hoitaja::Cell &>(child));
_children.remove(&child);
Cpu_quota const child_cpu_quota = child.cpu_quota();
destroy(_heap, &child);
/* replenish available CPU quota */
_avail_cpu.percent += child_cpu_quota.percent;
_transferred_cpu.percent -= min(_transferred_cpu.percent,
child_cpu_quota.percent);
}
});
/* We might have formerly occupied resources again now, so redistribute them */
maintain_cells();
}
void Hoitaja::Habitat::maintain_cells()
{
int xpos = _affinity_space->total();
_children.for_each_child([&](Child &child)
{
log(child.name(), " ram: ", child.ram_quota());
Cell &cell = static_cast<Cell&>(child);
_core_allocator->update(cell, &xpos); });
}
void Hoitaja::Habitat::update(Cell &cell)
{
if (cell._exited) {
if (cell._exit_value != 0)
Genode::error(cell.name(), " exited with exit status ", cell._exit_value);
_children.remove(static_cast<Sandbox::Child *>(&cell));
_core_allocator->free_cores_from_cell(cell);
/* Update resource allocations, as there are new resources available */
maintain_cells();
}
}

View File

@@ -0,0 +1,69 @@
#include <sandbox/child.h>
#include <sandbox/alias.h>
#include <sandbox/server.h>
#include <sandbox/heartbeat.h>
#include <sandbox/config_model.h>
#include <sandbox/library.h>
#include <sandbox/sandbox.h>
#include <base/log.h>
#include <base/registry.h>
#include <base/service.h>
#include <base/heap.h>
#include <util/reconstructible.h>
/* Hoitaja includes */
#include <core_allocator.h>
#include <cell.h>
#include <state_handler.h>
#pragma once
namespace Hoitaja {
class Habitat;
using namespace Genode;
}
struct Hoitaja::Habitat : public Sandbox::Library
{
public:
friend class Genode::Sandbox::Local_service_base;
State_handler &_habitat_handler;
Heap _heap;
Genode::Constructible<Hoitaja::Core_allocator> _core_allocator;
Registry<Genode::Sandbox::Local_service_base>
_local_services{};
void apply_config(Xml_node const &config) override {
log("Hoitaja is applying new config.");
Sandbox::Library::apply_config(config);
}
void generate_state_report(Xml_generator &xml) const override {
log("Generating new state report for Hoitaja.");
Sandbox::Library::generate_state_report(xml);
}
void maintain_cells();
/**
* @brief Update cell's resource allocations
*
* @param cell whose resource allocations needs updating
*/
void update(Cell &cell);
Habitat(Env &env, State_handler &habitat_handler, Genode::Sandbox::State_handler &handler)
: Sandbox::Library(env, _heap, _local_services, handler), _habitat_handler(habitat_handler), _heap(env.ram(), env.rm()), _core_allocator()
{
}
Sandbox::Child &create_child(Xml_node const &) override;
void _destroy_abandoned_children() override;
};

View File

@@ -0,0 +1,2 @@
#pragma once

View File

@@ -0,0 +1,28 @@
/*
* \brief Hoitaja — Load Controller
* \author Michael Müller, Norman Feske (Init)
* \date 2023-04-20
*/
/*
* Copyright (C) 2010-2017 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#pragma once
#include <trace_session/connection.h>
#include <base/affinity.h>
namespace Hoitaja {
class Load_controller;
}
class Hoitaja::Load_controller
{
public:
unsigned short *cpu_loads();
Genode::Affinity::Location *idle_cores();
};

View File

@@ -0,0 +1,146 @@
/*
* \brief Hoitaja — Cell Management Component based on Init
* \author Michael Müller, Norman Feske (Init)
* \date 2023-04-20
*/
/*
* Copyright (C) 2010-2017 Genode Labs GmbH
* Copyright (C) 2023 Michael Müller, Osnabrück University
*
* This file is part of EalánOS, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* Genode includes */
#include <base/component.h>
#include <base/attached_rom_dataspace.h>
#include <habitat.h>
#include <os/reporter.h>
#include <timer_session/connection.h>
/** Hoitaja components **/
/* Filtering components */
#include "load_controller.h"
#include "cell_controller.h"
#include "hyperthread_controller.h"
#include "memory_controller.h"
#include "numa_controller.h"
/* Core Allocator */
#include "core_allocator.h"
/* State Handler */
#include "state_handler.h"
namespace Hoitaja {
using namespace Genode;
struct Main;
}
struct Hoitaja::Main : Genode::Sandbox::State_handler, Hoitaja::State_handler
{
Env &_env;
Habitat _sandbox { _env, *this, *this };
Timer::Connection _timer{_env};
Attached_rom_dataspace _config { _env, "config" };
void _handle_resource_avail() { }
Signal_handler<Main> _resource_avail_handler {
_env.ep(), *this, &Main::_handle_resource_avail };
Constructible<Reporter> _reporter { };
size_t _report_buffer_size = 0;
void _handle_config()
{
_config.update();
Xml_node const config = _config.xml();
bool reporter_enabled = false;
config.with_optional_sub_node("report", [&] (Xml_node report) {
reporter_enabled = true;
/* (re-)construct reporter whenever the buffer size is changed */
Number_of_bytes const buffer_size =
report.attribute_value("buffer", Number_of_bytes(4096));
if (buffer_size != _report_buffer_size || !_reporter.constructed()) {
_report_buffer_size = buffer_size;
_reporter.construct(_env, "state", "state", _report_buffer_size);
}
});
if (_reporter.constructed())
_reporter->enabled(reporter_enabled);
_sandbox.apply_config(config);
}
Signal_handler<Main> _config_handler {
_env.ep(), *this, &Main::_handle_config };
void _handle_timeout()
{
//log("Hoitaja's entering its maintance cycle");
// For now just print all cells created by Hoitaja
//_handle_config();
//_timer.trigger_once(1000 * 1000);
}
Signal_handler<Main> _timeout_handler{
_env.ep(), *this, &Main::_handle_timeout};
/**
* Sandbox::State_handler interface
*/
void handle_sandbox_state() override
{
Genode::log("Habitat state changed");
/*
try {
Reporter::Xml_generator xml(*_reporter, [&] () {
_sandbox.generate_state_report(xml); });
}
catch (Xml_generator::Buffer_exceeded) {
error("state report exceeds maximum size");
try to reflect the error condition as state report
try {
Reporter::Xml_generator xml(*_reporter, [&] () {
xml.attribute("error", "report buffer exceeded"); });
}
catch (...) { }
}*/
}
void handle_habitat_state(Cell &cell) override
{
Genode::log("Habitat changed");
_sandbox.update(cell);
}
Main(Env &env) : _env(env)
{
_config.sigh(_config_handler);
_timer.sigh(_timeout_handler);
/* prevent init to block for resource upgrades (never satisfied by core) */
_env.parent().resource_avail_sigh(_resource_avail_handler);
_timer.trigger_once(1000 * 1000);
_handle_config();
}
};
void Component::construct(Genode::Env &env) { static Hoitaja::Main main(env); }

View File

@@ -0,0 +1,2 @@
#pragma once

View File

@@ -0,0 +1,2 @@
#pragma once

View File

@@ -0,0 +1,13 @@
#pragma once
#include <util/interface.h>
namespace Hoitaja {
struct State_handler;
class Cell;
}
struct Hoitaja::State_handler : Genode::Interface
{
virtual void handle_habitat_state(Cell &cell) = 0;
};

View File

@@ -0,0 +1,131 @@
/* Genode includes */
#include <base/session_object.h>
#include <base/affinity.h>
#include <base/attached_dataspace.h>
#include <base/heap.h>
#include <base/session_object.h>
#include <suoritin/session.h>
#include <suoritin/client.h>
#include <suoritin/connection.h>
namespace Tukija {
namespace Suoritin {
class Session_component;
template <class T> class Allocator;
}
}
template <class T>
class Tukija::Suoritin::Allocator : public Genode::Allocator
{
using size_t = Genode::size_t;
private:
Genode::Region_map::Local_addr _dataspace{};
size_t _interface_size;
Genode::Region_map::Local_addr _pos {_dataspace};
public:
Allocator(Genode::Env &env, Genode::Ram_dataspace_capability *_interface_cap, size_t interface_size) : _interface_size(interface_size)
{
*_interface_cap = env.ram().alloc(interface_size);
_dataspace = static_cast<T *>(env.rm().attach(*_interface_cap));
}
Alloc_result try_alloc(size_t) override
{
T *pos = _pos;
if (pos >= static_cast<T*>(_dataspace) + _interface_size)
return Alloc_result(Genode::Ram_allocator::Alloc_error::OUT_OF_RAM);
pos++;
return Alloc_result(static_cast<void *>(pos));
}
void free(void *, size_t) override
{ }
size_t overhead(size_t) const override { return 0; }
bool need_size_for_free() const override { return false; }
T *interface() { return _dataspace; }
};
class Tukija::Suoritin::Session_component : public Genode::Session_object<Tukija::Suoritin::Session>
{
private:
Genode::Affinity _affinity;
Genode::Env &_env;
Genode::Ram_dataspace_capability _workers_interface_cap{};
Genode::Ram_dataspace_capability _channels_interface_cap{};
Allocator<Genode::Registered<Worker>> _worker_allocator;
Allocator<Genode::Registered<Channel>> _channel_allocator;
unsigned long no_channels{0};
unsigned long no_workers{0};
template <class T, typename FUNC>
void construct(FUNC const &fn, Allocator<Genode::Registered<T>> &alloc, Genode::Registry<Genode::Registered<T>> &registry) {
T* object = nullptr;
try {
try {
object = new (alloc) Genode::Registered<T>(registry);
fn(object);
} catch (Genode::Allocator::Out_of_memory) {
Genode::error("Out of RAM on registering worker.");
throw;
}
} catch (...) {
if (object)
destroy(alloc, object);
Genode::error("Exception caught registering worker");
throw;
}
}
public:
Session_component(Genode::Rpc_entrypoint &session_ep,
Genode::Session::Resources const &resources,
Genode::Session::Label const &label,
Genode::Session::Diag const &diag,
Genode::Env &env,
Genode::Affinity &affinity)
: Session_object(session_ep, resources, label, diag), _affinity(affinity.space().total() ? affinity : Genode::Affinity(Genode::Affinity::Space(1,1), Genode::Affinity::Location(0,0,1,1))),
_env(env), _worker_allocator(env, &_workers_interface_cap, _affinity.space().total()*sizeof(Genode::Registered<Worker>)),
_channel_allocator(env, &_channels_interface_cap, _affinity.space().total()*sizeof(Genode::Registered<Channel>))
{
}
void create_channel() override
{
try {
construct<Channel>([&](Channel *) {}, _channel_allocator, _channels);
}
catch (...)
{
Genode::error("Faild to create channel");
}
}
void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override {
try {
construct<Worker>([&](Worker *worker)
{ worker->_cap = cap;
worker->_name = name; }, _worker_allocator, _workers);
}
catch (...)
{
Genode::error("Failed to register worker");
}
}
Capability interface_cap() override {
return Capability{_workers_interface_cap, _channels_interface_cap};
}
};

View File

@@ -0,0 +1,11 @@
TARGET = hoitaja
SRC_CC = main.cc habitat.cc suoritin/component.cc
LIBS = base
INC_DIR += $(PRG_DIR)
CONFIG_XSD = config.xsd
# statically link sandbox library to avoid dependency from sandbox.lib.so
SRC_CC += library.cc child.cc server.cc config_model.cc
INC_DIR += $(REP_DIR)/src/lib/sandbox
vpath %.cc $(REP_DIR)/src/lib/sandbox

View File

@@ -25,11 +25,11 @@ namespace Init {
}
struct Init::Main : Sandbox::State_handler
struct Init::Main : Genode::Sandbox::State_handler
{
Env &_env;
Sandbox _sandbox { _env, *this };
Genode::Sandbox _sandbox { _env, *this };
Attached_rom_dataspace _config { _env, "config" };

View File

@@ -15,7 +15,7 @@
#include <vm_session/vm_session.h>
/* local includes */
#include <child.h>
#include <sandbox/child.h>
void Sandbox::Child::destroy_services()
@@ -679,6 +679,7 @@ Genode::Affinity Sandbox::Child::filter_session_affinity(Affinity const &session
Affinity::Space const &child_space = _resources.affinity.space();
Affinity::Location const &child_location = _resources.affinity.location();
Genode::log("[Hoitaja->", this->name(),"] Using cell's affinity ", child_location, " in ", child_space, " for filtering session affinity.");
/* check if no valid affinity space was specified */
if (session_affinity.space().total() == 0)
return Affinity(child_space, child_location);
@@ -686,18 +687,24 @@ Genode::Affinity Sandbox::Child::filter_session_affinity(Affinity const &session
Affinity::Space const &session_space = session_affinity.space();
Affinity::Location const &session_location = session_affinity.location();
Genode::log("Scaling to session affinity ", session_location, " in ", session_space);
/* scale resolution of resulting space */
Affinity::Space space(child_space.multiply(session_space));
Affinity::Location child_session(child_location.xpos(), child_location.ypos(),
child_location.width() * session_location.width(),
child_location.height() * session_location.height());
Genode::log("Scaled session affinity to ", child_session, " in ", space);
/* subordinate session affinity to child affinity subspace */
Affinity::Location location(child_session
.multiply_position(session_space)
.transpose(session_location.xpos() * child_location.width(),
session_location.ypos() * child_location.height()));
Genode::log("Session affinity subordinated to ", location, " in ", space);
return Affinity(space, location);
}
@@ -743,6 +750,7 @@ Sandbox::Child::Child(Env &env,
Cpu_quota_transfer &cpu_quota_transfer,
Prio_levels prio_levels,
Affinity::Space const &affinity_space,
Affinity::Location const &location,
Registry<Parent_service> &parent_services,
Registry<Routed_service> &child_services,
Registry<Local_service> &local_services)
@@ -759,7 +767,7 @@ Sandbox::Child::Child(Env &env,
_cpu_quota_transfer(cpu_quota_transfer),
_name_registry(name_registry),
_heartbeat_enabled(start_node.has_sub_node("heartbeat")),
_resources(_resources_from_start_node(start_node, prio_levels, affinity_space,
_resources(_resources_from_start_node(start_node, prio_levels, affinity_space, location,
default_caps_accessor.default_caps())),
_parent_services(parent_services),
_child_services(child_services),

View File

@@ -11,7 +11,7 @@
* under the terms of the GNU Affero General Public License version 3.
*/
#include <config_model.h>
#include <sandbox/config_model.h>
using namespace Sandbox;

View File

@@ -16,254 +16,11 @@
#include <sandbox/sandbox.h>
/* local includes */
#include <child.h>
#include <alias.h>
#include <server.h>
#include <heartbeat.h>
#include <config_model.h>
struct Genode::Sandbox::Library : ::Sandbox::State_reporter::Producer,
::Sandbox::Child::Default_route_accessor,
::Sandbox::Child::Default_caps_accessor,
::Sandbox::Child::Ram_limit_accessor,
::Sandbox::Child::Cap_limit_accessor,
::Sandbox::Child::Cpu_limit_accessor,
::Sandbox::Child::Cpu_quota_transfer,
::Sandbox::Start_model::Factory,
::Sandbox::Parent_provides_model::Factory
{
using Routed_service = ::Sandbox::Routed_service;
using Parent_service = ::Sandbox::Parent_service;
using Local_service = ::Genode::Sandbox::Local_service_base;
using Report_detail = ::Sandbox::Report_detail;
using Child_registry = ::Sandbox::Child_registry;
using Verbose = ::Sandbox::Verbose;
using State_reporter = ::Sandbox::State_reporter;
using Heartbeat = ::Sandbox::Heartbeat;
using Server = ::Sandbox::Server;
using Alias = ::Sandbox::Alias;
using Child = ::Sandbox::Child;
using Prio_levels = ::Sandbox::Prio_levels;
using Ram_info = ::Sandbox::Ram_info;
using Cap_info = ::Sandbox::Cap_info;
using Cpu_quota = ::Sandbox::Cpu_quota;
using Config_model = ::Sandbox::Config_model;
using Start_model = ::Sandbox::Start_model;
using Preservation = ::Sandbox::Preservation;
Env &_env;
Heap &_heap;
Registry<Parent_service> _parent_services { };
Registry<Routed_service> _child_services { };
Registry<Local_service> &_local_services;
Child_registry _children { };
/*
* Global parameters obtained from config
*/
Reconstructible<Verbose> _verbose { };
Config_model::Version _version { };
Constructible<Buffered_xml> _default_route { };
Cap_quota _default_caps { 0 };
Prio_levels _prio_levels { };
Constructible<Affinity::Space> _affinity_space { };
Preservation _preservation { };
Affinity::Space _effective_affinity_space() const
{
return _affinity_space.constructed() ? *_affinity_space
: Affinity::Space { 1, 1 };
}
State_reporter _state_reporter;
Heartbeat _heartbeat { _env, _children, _state_reporter };
/*
* Internal representation of the XML configuration
*/
Config_model _config_model { };
/*
* Variables for tracking the side effects of updating the config model
*/
bool _server_appeared_or_disappeared = false;
bool _state_report_outdated = false;
unsigned _child_cnt = 0;
Cpu_quota _avail_cpu { .percent = 100 };
Cpu_quota _transferred_cpu { .percent = 0 };
Ram_quota _avail_ram() const
{
Ram_quota avail_ram = _env.pd().avail_ram();
if (_preservation.ram.value > avail_ram.value) {
error("RAM preservation exceeds available memory");
return Ram_quota { 0 };
}
/* deduce preserved quota from available quota */
return Ram_quota { avail_ram.value - _preservation.ram.value };
}
Cap_quota _avail_caps() const
{
Cap_quota avail_caps { _env.pd().avail_caps().value };
if (_preservation.caps.value > avail_caps.value) {
error("Capability preservation exceeds available capabilities");
return Cap_quota { 0 };
}
/* deduce preserved quota from available quota */
return Cap_quota { avail_caps.value - _preservation.caps.value };
}
/**
* Child::Ram_limit_accessor interface
*/
Ram_quota resource_limit(Ram_quota const &) const override
{
return _avail_ram();
}
/**
* Child::Cap_limit_accessor interface
*/
Cap_quota resource_limit(Cap_quota const &) const override { return _avail_caps(); }
/**
* Child::Cpu_limit_accessor interface
*/
Cpu_quota resource_limit(Cpu_quota const &) const override { return _avail_cpu; }
/**
* Child::Cpu_quota_transfer interface
*/
void transfer_cpu_quota(Cpu_session_capability cap, Cpu_quota quota) override
{
Cpu_quota const remaining { 100 - min(100u, _transferred_cpu.percent) };
/* prevent division by zero in 'quota_lim_upscale' */
if (remaining.percent == 0)
return;
size_t const fraction =
Cpu_session::quota_lim_upscale(quota.percent, remaining.percent);
_env.cpu().transfer_quota(cap, fraction);
_transferred_cpu.percent += quota.percent;
}
/**
* State_reporter::Producer interface
*/
void produce_state_report(Xml_generator &xml, Report_detail const &detail) const override
{
if (detail.init_ram())
xml.node("ram", [&] () { Ram_info::from_pd(_env.pd()).generate(xml); });
if (detail.init_caps())
xml.node("caps", [&] () { Cap_info::from_pd(_env.pd()).generate(xml); });
if (detail.children())
_children.report_state(xml, detail);
}
/**
* State_reporter::Producer interface
*/
Child::Sample_state_result sample_children_state() override
{
return _children.sample_state();
}
/**
* Default_route_accessor interface
*/
Xml_node default_route() override
{
return _default_route.constructed() ? _default_route->xml()
: Xml_node("<empty/>");
}
/**
* Default_caps_accessor interface
*/
Cap_quota default_caps() override { return _default_caps; }
void _update_aliases_from_config(Xml_node const &);
void _update_parent_services_from_config(Xml_node const &);
void _update_children_config(Xml_node const &);
void _destroy_abandoned_parent_services();
void _destroy_abandoned_children();
Server _server { _env, _heap, _child_services, _state_reporter };
/**
* Sandbox::Start_model::Factory
*/
Child &create_child(Xml_node const &) override;
/**
* Sandbox::Start_model::Factory
*/
void update_child(Child &, Xml_node const &) override;
/**
* Sandbox::Start_model::Factory
*/
Alias &create_alias(Child_policy::Name const &name) override
{
Alias &alias = *new (_heap) Alias(name);
_children.insert_alias(&alias);
return alias;
}
/**
* Sandbox::Start_model::Factory
*/
void destroy_alias(Alias &alias) override
{
_children.remove_alias(&alias);
destroy(_heap, &alias);
}
/**
* Sandbox::Start_model::Factory
*/
bool ready_to_create_child(Start_model::Name const &,
Start_model::Version const &) const override;
/**
* Sandbox::Parent_provides_model::Factory
*/
Parent_service &create_parent_service(Service::Name const &name) override
{
return *new (_heap) Parent_service(_parent_services, _env, name);
}
Library(Env &env, Heap &heap, Registry<Local_service> &local_services,
State_handler &state_handler)
:
_env(env), _heap(heap), _local_services(local_services),
_state_reporter(_env, *this, state_handler)
{ }
void apply_config(Xml_node const &);
void generate_state_report(Xml_generator &xml) const
{
_state_reporter.generate(xml);
}
};
#include <sandbox/library.h>
void Genode::Sandbox::Library::_destroy_abandoned_parent_services()
void Sandbox::Library::_destroy_abandoned_parent_services()
{
_parent_services.for_each([&] (Parent_service &service) {
if (service.abandoned())
@@ -271,7 +28,7 @@ void Genode::Sandbox::Library::_destroy_abandoned_parent_services()
}
void Genode::Sandbox::Library::_destroy_abandoned_children()
void Sandbox::Library::_destroy_abandoned_children()
{
_children.for_each_child([&] (Child &child) {
@@ -300,7 +57,7 @@ void Genode::Sandbox::Library::_destroy_abandoned_children()
}
bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const &name,
bool Sandbox::Library::ready_to_create_child(Start_model::Name const &name,
Start_model::Version const &version) const
{
bool exists = false;
@@ -328,7 +85,7 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const
}
::Sandbox::Child &Genode::Sandbox::Library::create_child(Xml_node const &start_node)
::Sandbox::Child &Sandbox::Library::create_child(Xml_node const &start_node)
{
if (!_affinity_space.constructed() && start_node.has_sub_node("affinity"))
warning("affinity-space configuration missing, "
@@ -340,7 +97,7 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const
Child(_env, _heap, *_verbose,
Child::Id { ++_child_cnt }, _state_reporter,
start_node, *this, *this, _children, *this, *this, *this, *this,
_prio_levels, _effective_affinity_space(),
_prio_levels, _effective_affinity_space(), Affinity::Location(-1, -1, 0, 0),
_parent_services, _child_services, _local_services);
_children.insert(&child);
@@ -378,7 +135,7 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const
}
void Genode::Sandbox::Library::update_child(Child &child, Xml_node const &start)
void Sandbox::Library::update_child(Child &child, Xml_node const &start)
{
if (child.abandoned())
return;
@@ -395,7 +152,7 @@ void Genode::Sandbox::Library::update_child(Child &child, Xml_node const &start)
}
void Genode::Sandbox::Library::apply_config(Xml_node const &config)
void Sandbox::Library::apply_config(Xml_node const &config)
{
_server_appeared_or_disappeared = false;
_state_report_outdated = false;
@@ -630,5 +387,5 @@ void Genode::Sandbox::generate_state_report(Xml_generator &xml) const
Genode::Sandbox::Sandbox(Env &env, State_handler &state_handler)
:
_heap(env.ram(), env.rm()),
_library(*new (_heap) Library(env, _heap, _local_services, state_handler))
_library(*new (_heap) ::Sandbox::Library(env, _heap, _local_services, state_handler))
{ }

View File

@@ -16,7 +16,7 @@
#include <os/session_policy.h>
/* local includes */
#include "server.h"
#include <sandbox/server.h>
/******************************

View File

@@ -134,6 +134,16 @@ void Test::Child::_handle_yield()
size_t const requested_ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
size_t const requested_cpu_quota =
Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0);
log("released ", requested_cpu_quota, " portions of cpu_quota");
size_t const requested_gpu_quota =
Arg_string::find_arg(args.string(), "gpus").ulong_value(0);
log("got request to release ", requested_gpu_quota, " gpus");
/* free chunks of RAM to comply with the request */
size_t released_quota = 0;
while (released_quota < requested_ram_quota) {
@@ -208,6 +218,10 @@ class Test::Parent
unsigned _wait_cnt = 0;
unsigned long _start = 0;
unsigned long _end = 0;
enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE };
State _state = WAIT;
@@ -232,7 +246,9 @@ class Test::Parent
log("request yield (ram prior yield: ", _used_ram_prior_yield);
/* issue yield request */
Genode::Parent::Resource_args yield_args("ram_quota=5M");
Genode::Parent::Resource_args yield_args("ram_quota=5M,cpu_quota=10,gpus=1");
_start = _timer.elapsed_us();
_child.yield(yield_args);
_state = YIELD_REQUESTED;
@@ -251,7 +267,9 @@ class Test::Parent
void _yield_response()
{
log("got yield response");
_end = _timer.elapsed_us();
log("got yield response after ", (_end-_start), "us");
_state = YIELD_GOT_RESPONSE;
_print_status();
@@ -281,7 +299,7 @@ class Test::Parent
Parent &_parent;
Static_parent_services<Pd_session, Cpu_session, Rom_session,
Log_session, Timer::Session>
Log_session, Timer::Session, Topo_session>
_parent_services { _env };
Cap_quota const _cap_quota { 50 };

View File

@@ -254,7 +254,7 @@ lappend boot_modules libc.lib.so vfs.lib.so vfs_pipe.lib.so vfs
build_boot_image $boot_modules
append qemu_args " -nographic -m 800 "
append qemu_args " -nographic "
# wait until Noux started
run_genode_until {.*\[init -> vfs\] creating build directory\.\.\..*\n} $boot_timeout

View File

@@ -64,7 +64,7 @@ INSTALL_DIR := $(CURDIR)/bin
export BASE_DIR ?= ../base
export REPOSITORIES ?= $(BASE_DIR:%base=%base-linux) $(BASE_DIR)
export VERBOSE ?= @
export VERBOSE_DIR ?= --no-print-directory
export VERBOSE_DIR ?= --print-directory
export VERBOSE_MK ?= @
export LIB_CACHE_DIR ?= $(BUILD_BASE_DIR)/var/libcache
export LIB_PROGRESS_LOG ?= $(BUILD_BASE_DIR)/progress.log

View File

@@ -9,7 +9,7 @@ VERBOSE ?= @
ECHO := echo -e
HASHSUM := sha1sum
MAKEFLAGS += --no-print-directory
MAKEFLAGS += --print-directory
BRIGHT_COL ?= \x1b[01;33m
DARK_COL ?= \x1b[00;33m

View File

@@ -7,7 +7,7 @@
#
ifndef VERBOSE
MAKEFLAGS += --no-print-directory
MAKEFLAGS += --print-directory
endif
export GENODE_DIR := $(realpath $(dir $(MAKEFILE_LIST))/../..)

View File

@@ -54,7 +54,7 @@ proc run_image { {unused ""} } {
if {[image_disk_size] == 0} {
# resize image to only needed size and get size of resized image
exec resize2fs -M [run_dir].partition 2>/dev/null
exec /sbin/resize2fs -M [run_dir].partition 2>/dev/null
set disk_size_b [expr [regsub {\s.*} [exec wc -c [run_dir].partition] {}]]
set disk_size_kb [expr $disk_size_b / 1024]
}