diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000000..ef46a9e63a --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,78 @@ +{ + "configurations": [ + { + "name": "EalánOS", + "includePath": [ + "${workspaceFolder}/depot/genodelabs/api/libc/**", + "${workspaceFolder}/depot/genodelabs/api/stdcxx/**", + "${workspaceFolder}/repos/**", + "${workspaceFolder}/repos/mml/**", + "${workspaceFolder}/repos/libports/include/**", + "${workspaceFolder}/contrib/mxtasking-07a3844690ae8eb15832d93e29567a5a8e6e45af/include/**", + "${workspaceFolder}/contrib/libpfm4-b0ec09148c2be9f4a96203a3d2de4ebed6ce2da0/include/**", + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/**", + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/spec/x86_64/libc", + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/sys/**", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/std", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/c_std", + "${workspaceFolder}/repos/libports/include/spec/x86_64/stdcxx", + "${workspaceFolder}/repos/base-nova/src/core/include/**", + "${workspaceFolder}/repos/base-nova/src/include/**", + "${workspaceFolder}/repos/base-nova/include/**", + "${workspaceFolder}/repos/base/src/core/include/**", + "${workspaceFolder}/repos/base/src/include/**", + "${workspaceFolder}/repos/base/include/**", + "/usr/local/genode/tool/21.05/lib/gcc/x86_64-pc-elf/10.3.0/include", + "/home/mml/loopbench/**" + ], + "defines": [ + "__GENODE__", + "__FreeBSD__=12", + "_GLIBCXX_HAVE_MBSTATE_T", + "_GLIBCXX_ATOMIC_BUILTINS_4", + "_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC" + ], + "compilerPath": "/usr/local/genode/tool/21.05/bin/genode-x86-gcc", + "cStandard": "gnu17", + "cppStandard": "gnu++17", + "intelliSenseMode": "linux-gcc-x64", + "compilerArgs": [ + "-nostdinc", + "-m64" + ], + "configurationProvider": "ms-vscode.makefile-tools", + "forcedInclude": [ + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/stdint.h" + ], + "mergeConfigurations": true, + "browse": { + "limitSymbolsToIncludedHeaders": true, + "path": [ + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/**", + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/spec/x86_64/libc", + "${workspaceFolder}/contrib/libc-c7cd230b11ca71979f32950803bc78b45adfa0ce/include/libc/sys/**", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/std", + "${workspaceFolder}/contrib/stdcxx-d2865c41fafbbf66051d38e7b742c4d5bc2f05a3/include/stdcxx/c_std", + "${workspaceFolder}/repos/libports/include/spec/x86_64/stdcxx" + ] + } + }, + { + "name": "Genode", + "includePath": [ + "${workspaceFolder}/**", + "${workspaceFolder}/repos/base/**" + ], + "defines": [], + "compilerPath": "/usr/local/genode/tool/21.05/bin/genode-x86-gcc", + "cStandard": "c17", + "cppStandard": "c++20", + "intelliSenseMode": "${default}", + "configurationProvider": "ms-vscode.makefile-tools", + "mergeConfigurations": true + } + ], + "version": 4 +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..a4f0c0c941 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,167 @@ +{ + "files.associations": { + "*.rasi": "css", + "*.bbmodel": "json", + "*.sublime-snippet": "xml", + "*.hbs": "html", + "*.ejs": "html", + "*.emu": "html", + "lesskey": "lesskey", + "*.Xresources": "xdefaults", + "i3/config": "i3", + "i3/*.conf": "i3", + "polybar/config": "ini", + "polybar/*.conf": "ini", + "*.S": "gas", + "*.html.en": "html", + "*.html.de": "html", + "stop_token": "cpp", + "*.tcc": "cpp", + "initializer_list": "cpp", + "streambuf": "cpp", + "tuple": "cpp", + "memory": "cpp", + "*.def": "cpp", + "array": "cpp", + "deque": "cpp", + "forward_list": "cpp", + "list": "cpp", + "string": "cpp", + "vector": "cpp", + "any": "cpp", + "executor": "cpp", + "internet": "cpp", + "io_context": "cpp", + "memory_resource": "cpp", + "socket": "cpp", + "string_view": "cpp", + "timer": "cpp", + "functional": "cpp", + "rope": "cpp", + "slist": "cpp", + "coroutine": "cpp", + "future": "cpp", + "scoped_allocator": "cpp", + "valarray": "cpp", + "regex": "cpp", + "cstdint": "cpp", + "bitset": "cpp", + "random": "cpp", + "optional": "cpp", + "dynamic_bitset": "cpp", + "mutex": "cpp", + "shared_mutex": "cpp", + "algorithm": "cpp", + "atomic": "cpp", + "bit": "cpp", + "cassert": "cpp", + "cctype": "cpp", + "cerrno": "cpp", + "chrono": "cpp", + "ciso646": "cpp", + "clocale": "cpp", + "cmath": "cpp", + "compare": "cpp", + "concepts": "cpp", + "cstddef": "cpp", + "cstdio": "cpp", + "cstdlib": "cpp", + "cstring": "cpp", + "ctime": "cpp", + "cwchar": "cpp", + "cwctype": "cpp", + "map": "cpp", + "unordered_map": "cpp", + "exception": "cpp", + "fstream": "cpp", + "ios": "cpp", + "iosfwd": "cpp", + "iostream": "cpp", + "istream": "cpp", + "iterator": "cpp", + "limits": "cpp", + "new": "cpp", + "numeric": "cpp", + "ostream": "cpp", + "queue": "cpp", + "ranges": "cpp", + "ratio": "cpp", + "sstream": "cpp", + "stdexcept": "cpp", + "system_error": "cpp", + "thread": "cpp", + "type_traits": "cpp", + "typeinfo": "cpp", + "utility": "cpp", + "variant": "cpp", + "charconv": "cpp", + "cfenv": "cpp", + "cinttypes": "cpp", + "csetjmp": "cpp", + "csignal": "cpp", + "cstdarg": "cpp", + "cuchar": "cpp", + "set": "cpp", + "unordered_set": "cpp", + "codecvt": "cpp", + "condition_variable": "cpp", + "iomanip": "cpp", + "*.run": "xml", + "span": "cpp", + "config.h": "c", + "bench.h": "c", + "hash_map": "cpp", + "hash_set": "cpp", + "strstream": "cpp", + "decimal": "cpp", + "buffer": "cpp", + "netfwd": "cpp", + "propagate_const": "cpp", + "source_location": "cpp", + "complex": "cpp", + "numbers": "cpp", + "typeindex": "cpp", + "bool_set": "cpp" + }, + "vscode-as-git-mergetool.settingsAssistantOnStartup": false, + "makefile.makeDirectory": "build/x86_64", + "C_Cpp.errorSquiggles": "enabledIfIncludesResolve", + "C_Cpp.default.cppStandard": "gnu++17", + "C_Cpp.default.cStandard": "gnu17", + "C_Cpp.workspaceSymbols": "Just My Code", + "C_Cpp.inlayHints.parameterNames.enabled": true, + "C_Cpp.inlayHints.autoDeclarationTypes.showOnLeft": true, + "C_Cpp.intelliSenseMemoryLimit": 16384, + "makefile.makefilePath": "", + "makefile.dryrunSwitches": [ + "--keep-going", + "--print-directory", + "KERNEL=nova", + "BOARD=pc", + "run/vscode", + "VERBOSE=" + ], + "C_Cpp.default.intelliSenseMode": "linux-gcc-x64", + "C_Cpp.default.mergeConfigurations": true, + "C_Cpp.autocompleteAddParentheses": true, + "C_Cpp.intelliSenseCacheSize": 20480, + "makefile.buildBeforeLaunch": false, + "makefile.extensionOutputFolder": ".vscode", + "makefile.configurationCachePath": ".vscode/configurationCache.log", + "explorer.excludeGitIgnore": true, + "makefile.buildLog": ".vscode/build.log", + "definition-autocompletion.update_index_on_change": true, + "definition-autocompletion.update_index_interval": 5, + "C_Cpp.intelliSenseEngineFallback": "enabled", + "makefile.extensionLog": ".vscode/extension.log", + "makefile.ignoreDirectoryCommands": false, + "html.format.wrapLineLength": 80, + "editor.wordWrap": "bounded", + "editor.wordWrapColumn": 90, + "editor.fontSize": 13, + "terminal.integrated.shellIntegration.suggestEnabled": true, + "git.mergeEditor": true, + "merge-conflict.autoNavigateNextConflict.enabled": true, + "git.ignoreLimitWarning": true, + "customizeUI.statusBarPosition": "under-panel" +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000..1a5ddae5a8 --- /dev/null +++ b/README.md @@ -0,0 +1,24 @@ +# EalánOS — An Operating System for Heterogeneous Many-core Systems + +EalánOS is a research operating system, based on the [Genode OS Framework](https://genode.org/), that explores new architectural designs and resource management strategies for many-core systems with heterogeneous computing and memory resources. It is a reference implementation of the [MxKernel](https://mxkernel.org/) architecture. + +## MxKernel Architecture +The MxKernel is a new operating system architecture inspired by many-core operating systems, such as [FOS](https://dl.acm.org/doi/abs/10.1145/1531793.1531805) and [Tesselation](https://www.usenix.org/event/hotpar09/tech/full_papers/liu/liu_html/), as well as hypervisors, exokernels and unikernels. +Novel approaches of the MxKernel include the use of tasks, short-lived closed units of work, instead of threads as control-flow abstraction, and the concept of elastic cells as process abstraction. The architecture has first been described in the paper [MxKernel: Rethinking Operating System Architecture for Many-core Hardware](https://ess.cs.uos.de/research/projects/MxKernel/sfma-mxkernel.pdf) presented at the [9th Workshop on Systems for Multi-core and Heterogeneous Architectures](https://sites.google.com/site/sfma2019eurosys/). + +## Task-based programming +EalánOS promotes task-parallel programming by including the [MxTasking](https://github.com/jmuehlig/mxtasking.git) task-parallel runtime library. MxTasking improves on the common task-parallel programming paradigm by allowing tasks to be annotated with hints about the tasks behavior, such as memory accesses. These annotations are used by the runtime environment to implement advanced features, like automatic prefetching of data and automatic synchronization of concurrent memory accesses. + +## Documentation +Because EalánOS is based on Genode, the primary documentation, for now, can be found in the book [Genode Foundations](https://genode.org/documentation/genode-foundations-22-05.pdf). + +## Features added to Genode +EalánOS extends the Genode OS framework by functionality needed and helpful for many-core systems with non-uniform memory access (NUMA), such as +- A topology service that allows to query NUMA information from within a Genode component. +- A port of [MxTasking](https://github.com/jmuehlig/mxtasking.git), a task-based framework designed to aid in developing parallel applications. +- (WiP) A extension of Genode's RAM service that enables applications to allocate memory from a specific NUMA region, similar to libnuma's `numa_alloc_on_node`, and thus improve NUMA-locality of internal data objects. +- (WiP) An interface for using Hardware Performance Monitoring Counters inside Genode components. Currently, performance counters are only implemented for AMD's Zen1 microarchitecture. + +### Acknowledgement +The work on EalánOS and the MxKernel architecture is supported by the German Research Foundation (DFG) as part of the priority program 2037 "[Scalable Data Management on Future Hardware](https://dfg-spp2037.de/)" under Grant numbers SP968/9-1 and SP968/9-2. +The MxTasking framework is developed as part of the same DFG project at the [DBIS group at TU Dortmund Universitiy](http://dbis.cs.tu-dortmund.de/cms/de/home/index.html) and funded under Grant numbers TE1117/2-1. \ No newline at end of file diff --git a/repos/base-hw/lib/mk/bootstrap-hw.inc b/repos/base-hw/lib/mk/bootstrap-hw.inc index 16e2ecf94f..6788c8e1b6 100644 --- a/repos/base-hw/lib/mk/bootstrap-hw.inc +++ b/repos/base-hw/lib/mk/bootstrap-hw.inc @@ -10,6 +10,7 @@ SRC_CC += lib/base/allocator_avl.cc SRC_CC += lib/base/avl_tree.cc SRC_CC += lib/base/elf_binary.cc SRC_CC += lib/base/heap.cc +SRC_CC += lib/base/regional_heap.cc SRC_CC += lib/base/registry.cc SRC_CC += lib/base/log.cc SRC_CC += lib/base/output.cc diff --git a/repos/base-nova/include/nova/cap_map.h b/repos/base-nova/include/nova/cap_map.h index 4ee78bf29e..e5b1d3d221 100644 --- a/repos/base-nova/include/nova/cap_map.h +++ b/repos/base-nova/include/nova/cap_map.h @@ -23,6 +23,8 @@ #include +#include + #include #include @@ -36,9 +38,10 @@ namespace Genode { addr_t _base = 0; addr_t _last = 0; - enum { + enum + { HEADER = sizeof(_base) + sizeof(_mutex) + sizeof(_last), - CAP_RANGE_SIZE = 4096, + CAP_RANGE_SIZE = 131072, WORDS = (CAP_RANGE_SIZE - HEADER - sizeof(Avl_node)) / sizeof(addr_t), }; @@ -51,8 +54,8 @@ namespace Genode { Cap_range(addr_t base) : _base(base) { - static_assert(sizeof(*this) == CAP_RANGE_SIZE, - "Cap_range misconfigured"); + //static_assert(sizeof(*this) == CAP_RANGE_SIZE, + //"Cap_range misconfigured"); for (unsigned i = 0; i < elements(); i++) _cap_array[i] = 0; diff --git a/repos/base-nova/include/nova/syscall-generic.h b/repos/base-nova/include/nova/syscall-generic.h index db8fce16c7..85f5c6b8ac 100644 --- a/repos/base-nova/include/nova/syscall-generic.h +++ b/repos/base-nova/include/nova/syscall-generic.h @@ -3,8 +3,9 @@ * \author Norman Feske * \author Sebastian Sumpf * \author Alexander Boettcher + * \author Michael Müller * \author Benjamin Lamowski - * \date 2009-12-27 + * \date 2022-12-13 */ /* @@ -36,6 +37,7 @@ #define _INCLUDE__NOVA__SYSCALL_GENERIC_H_ #include +#include namespace Nova { @@ -65,6 +67,16 @@ namespace Nova { NOVA_ASSIGN_PCI = 0xd, NOVA_ASSIGN_GSI = 0xe, NOVA_PD_CTRL = 0xf, + NOVA_YIELD = 0x10, + NOVA_MXINIT = 0x11, + NOVA_ALLOC_CORES= 0x12, + NOVA_CORE_ALLOC = 0x13, + NOVA_CREATE_CELL= 0x14, + NOVA_CELL_CTRL = 0x15, + NOVA_CONS_CTRL = 0x16, + NOVA_CPUID = 0x17, + NOVA_RESERVE_CPU= 0x18, + NOVA_CREATE_HAB = 0x19, }; /** @@ -135,11 +147,19 @@ namespace Nova { bool has_feature_svm() const { return feature_flags & (1 << 2); } struct Cpu_desc { + enum Vendor + { + UNKNOWN, + INTEL, + AMD + }; + uint8_t flags; uint8_t thread; uint8_t core; uint8_t package; uint8_t acpi_id; + uint8_t vendor; uint8_t family; uint8_t model; uint8_t stepping:4; @@ -179,6 +199,36 @@ namespace Nova { return desc ? desc->flags & 0x1 : false; } + unsigned numa_nodes() const { + unsigned node_num = 1; + unsigned long nodes = 0x0; + unsigned long last_node = 0; + + for (unsigned cpu = 0; cpu < cpu_max(); cpu++) { + Cpu_desc const *c = cpu_desc_of_cpu(cpu); + if (c->numa_id != last_node && !(nodes & (1<numa_id))) { + node_num++; + nodes |= (1 << c->numa_id); + } + } + return node_num; + } + + unsigned numa_nodes() const { + unsigned node_num = 1; + unsigned long nodes = 0x0; + unsigned long last_node = 0; + + for (unsigned cpu = 0; cpu < cpu_max(); cpu++) { + Cpu_desc const *c = cpu_desc_of_cpu(cpu); + if (c->numa_id != last_node && !(nodes & (1<numa_id))) { + node_num++; + nodes |= (1 << c->numa_id); + } + } + return node_num; + } + /** * Resort CPU ids such, that * - the boot CPU id is ever logical CPU id 0 @@ -196,12 +246,7 @@ namespace Nova { unsigned const num_cpus = cpus(); bool too_many_cpus = false; unsigned cpu_i = 0; - - /* fallback lambda in case re-ordering fails */ - auto remap_failure = [&] { - for (uint16_t i = 0; i < max_cpus; i++) { map_cpus[i] = i; } - return false; - }; + unsigned const num_nodes = numa_nodes(); /* assign boot cpu ever the virtual cpu id 0 */ Cpu_desc const * const boot = cpu_desc_of_cpu(boot_cpu); @@ -211,45 +256,25 @@ namespace Nova { map_cpus[cpu_i++] = (uint8_t)boot_cpu; if (cpu_i >= num_cpus) return true; - if (cpu_i >= max_cpus) - return remap_failure(); - /* assign cores + SMT threads first and skip E-cores */ - bool done = for_all_cpus([&](auto const &cpu, auto const kernel_cpu_id) { - if (kernel_cpu_id == boot_cpu) - return false; + for (uint8_t node = 0; node < num_nodes; node++) { + for (unsigned i = 0; i < num_cpus; i++) { + if (i == boot_cpu || !is_cpu_enabled(i)) + continue; - /* handle normal or P-core */ - if (cpu.e_core()) - return false; + Cpu_desc const *c = cpu_desc_of_cpu(i); + if (!(c->numa_id == node)) + continue; - map_cpus[cpu_i++] = (uint8_t)kernel_cpu_id; + cpu_numa_map[i] = c->numa_id; + map_cpus[cpu_i++] = (uint8_t)i; - too_many_cpus = !!(cpu_i >= max_cpus); + if (cpu_i >= num_cpus) + return true; + } + } - return (cpu_i >= num_cpus || too_many_cpus); - }); - - if (done) - return too_many_cpus ? remap_failure() : true; - - /* assign remaining E-cores */ - done = for_all_cpus([&](auto &cpu, auto &kernel_cpu_id) { - if (kernel_cpu_id == boot_cpu) - return false; - - /* handle solely E-core */ - if (!cpu.e_core()) - return false; - - map_cpus[cpu_i++] = (uint16_t)kernel_cpu_id; - - too_many_cpus = !!(cpu_i >= max_cpus); - - return (cpu_i >= num_cpus || too_many_cpus); - }); - - return too_many_cpus ? remap_failure() : done; + return false; } /** @@ -323,6 +348,28 @@ namespace Nova { SC_EC_TIME = 3, }; + /** + * Hpc operations + * + */ + enum Hpc_op + { + HPC_SETUP = 6U, + HPC_START = 7U, + HPC_STOP = 8U, + HPC_RESET = 9U, + HPC_READ = 10U, + }; + + /** + * Cell operations + */ + enum Cell_op + { + SHRINK = 0, + GROW = 1, + }; + /** * Pd operations */ @@ -612,7 +659,7 @@ namespace Nova { public: - enum { DEFAULT_QUANTUM = 10000, DEFAULT_PRIORITY = 64 }; + enum { DEFAULT_QUANTUM = 1500, DEFAULT_PRIORITY = 64 }; Qpd(mword_t quantum = DEFAULT_QUANTUM, mword_t priority = DEFAULT_PRIORITY) @@ -891,5 +938,13 @@ namespace Nova { SM_SEL_EC = 0x1d, /* convention on Genode */ }; + /** + * Console operations + */ + enum Cons_op + { + LOCK = 0, + UNLOCK = 1, + }; } #endif /* _INCLUDE__NOVA__SYSCALL_GENERIC_H_ */ diff --git a/repos/base-nova/include/spec/64bit/nova/syscalls.h b/repos/base-nova/include/spec/64bit/nova/syscalls.h index f116d73585..82e28b7594 100644 --- a/repos/base-nova/include/spec/64bit/nova/syscalls.h +++ b/repos/base-nova/include/spec/64bit/nova/syscalls.h @@ -37,6 +37,7 @@ #include #include +#include #define ALWAYS_INLINE __attribute__((always_inline)) @@ -45,7 +46,7 @@ namespace Nova { ALWAYS_INLINE inline mword_t rdi(Syscall s, uint8_t flags, mword_t sel) { - return sel << 8 | (flags & 0xf) << 4 | s; + return sel << 9 | (flags & 0xf) << 5 | s; } @@ -155,7 +156,7 @@ namespace Nova { ALWAYS_INLINE inline uint8_t call(mword_t pt) { - return syscall_1(NOVA_CALL, 0, pt, 0); + return syscall_1(NOVA_CALL, 0, 0, pt); } @@ -254,6 +255,36 @@ namespace Nova { return util_time(NOVA_EC_CTRL, ec, Ec_op::EC_TIME, time); } + ALWAYS_INLINE + inline uint8_t hpc_ctrl(Hpc_op op, mword_t sel, mword_t type, mword_t &p1, mword_t &p2, mword_t &p3) + { + uint8_t res = syscall_6(NOVA_EC_CTRL, op, sel, type, p1, p2, p3); + return res; + } + + ALWAYS_INLINE + inline uint8_t hpc_read(mword_t sel, mword_t type, mword_t &value) + { + return syscall_5(NOVA_EC_CTRL, HPC_READ, sel, type, value); + } + + ALWAYS_INLINE + inline uint8_t hpc_start(mword_t sel, mword_t type) + { + return syscall_1(NOVA_EC_CTRL, HPC_START, sel, type); + } + + ALWAYS_INLINE + inline uint8_t hpc_stop(mword_t sel, mword_t type) + { + return syscall_1(NOVA_EC_CTRL, HPC_STOP, sel, type); + } + + ALWAYS_INLINE + inline uint8_t hpc_reset(mword_t sel, mword_t type, mword_t val) + { + return syscall_2(NOVA_EC_CTRL, HPC_RESET, sel, type, val); + } ALWAYS_INLINE inline uint8_t create_sc(mword_t sc, mword_t pd, mword_t ec, Qpd qpd) @@ -416,5 +447,76 @@ namespace Nova { msi_data = cpu; return syscall_5(NOVA_ASSIGN_GSI, flags.value(), sm, msi_addr, msi_data, si); } + + ALWAYS_INLINE + inline uint8_t yield(bool release_core = true, bool block = true) + { + Nova::uint8_t flags = block ? release_core : 3; + return syscall_0(NOVA_YIELD, flags, 0); + } + + ALWAYS_INLINE + inline uint8_t mxinit(mword_t rip, mword_t id, mword_t channel) + { + return syscall_2(NOVA_MXINIT, 0, id, rip, channel); + } + + ALWAYS_INLINE + inline uint8_t alloc_cores(mword_t count, mword_t &allocated, mword_t &remainder) + { + Nova::mword_t rest = 0; + Nova::mword_t null = 0; + Nova::uint8_t res = syscall_6(NOVA_ALLOC_CORES, 0, 0, count, allocated, rest, null); + remainder = rest; + return res; + } + + ALWAYS_INLINE + inline uint8_t wake_core(mword_t core) + { + return syscall_1(NOVA_RESERVE_CPU, 0, 0, core); + } + + ALWAYS_INLINE + inline uint8_t core_allocation(mword_t &allocation, bool mask = false) + { + return syscall_5(NOVA_CORE_ALLOC, static_cast(mask), 0, allocation, allocation); + } + + ALWAYS_INLINE + inline uint8_t cpu_id(mword_t &cpuid) + { + return syscall_5(NOVA_CPUID, 0, 0, cpuid, cpuid); + } + + ALWAYS_INLINE + inline uint8_t create_cell(mword_t pd, mword_t prio, mword_t mask, mword_t start, mword_t count) + { + return syscall_5(NOVA_CREATE_CELL, static_cast(prio), pd, mask, start, count); + } + + ALWAYS_INLINE + inline uint8_t update_cell(mword_t pd, mword_t mask, mword_t index) + { + return syscall_2(NOVA_CELL_CTRL, Cell_op::GROW, pd, mask, index); + } + + ALWAYS_INLINE + inline uint8_t create_habitat(mword_t start_cpu, mword_t size) + { + return syscall_2(NOVA_CREATE_HAB, 0, 0, start_cpu, size); + } + + ALWAYS_INLINE + inline uint8_t acquire_console() + { + return syscall_0(NOVA_CONS_CTRL, Nova::Cons_op::LOCK); + } + + ALWAYS_INLINE + inline uint8_t release_console() + { + return syscall_0(NOVA_CONS_CTRL, Nova::Cons_op::UNLOCK); + } } #endif /* _INCLUDE__SPEC__64BIT__NOVA__SYSCALLS_H_ */ diff --git a/repos/base-nova/lib/mk/base-nova-common.mk b/repos/base-nova/lib/mk/base-nova-common.mk index e990dc33f7..f395bc6486 100644 --- a/repos/base-nova/lib/mk/base-nova-common.mk +++ b/repos/base-nova/lib/mk/base-nova-common.mk @@ -14,6 +14,7 @@ SRC_CC += stack_area_addr.cc SRC_CC += cap_map.cc SRC_CC += capability.cc SRC_CC += signal_transmitter.cc +SRC_CC += perf.cc # # Prevent the compiler from deleting null pointer checks related to 'this == 0' diff --git a/repos/base-nova/lib/mk/core-nova.inc b/repos/base-nova/lib/mk/core-nova.inc index 206f936546..31dbcdd6fc 100644 --- a/repos/base-nova/lib/mk/core-nova.inc +++ b/repos/base-nova/lib/mk/core-nova.inc @@ -43,7 +43,8 @@ SRC_CC += stack_area.cc \ signal_receiver.cc \ vm_session_component.cc \ vm_session_common.cc \ - heartbeat.cc + heartbeat.cc \ + topo_session_component.cc INC_DIR += $(REP_DIR)/src/core/include \ $(REP_DIR)/src/include \ @@ -77,4 +78,5 @@ vpath platform_rom_modules.cc $(GEN_CORE_DIR) vpath stack_area.cc $(GEN_CORE_DIR) vpath heartbeat.cc $(GEN_CORE_DIR) vpath vm_session_common.cc $(GEN_CORE_DIR) +vpath topo_session_component.cc $(GEN_CORE_DIR) vpath %.cc $(REP_DIR)/src/core diff --git a/repos/base-nova/ports/nova.hash b/repos/base-nova/ports/nova.hash index 2101237a0c..ecb39db6d6 100644 --- a/repos/base-nova/ports/nova.hash +++ b/repos/base-nova/ports/nova.hash @@ -1 +1 @@ -d58086480d6a21a06bbd956e2d2e605d0f39b6b2 +968b205c5c1b6a93846626cf7307b19eb6bac891 diff --git a/repos/base-nova/ports/nova.port b/repos/base-nova/ports/nova.port index cc0e8e0ac2..ac04cb3edb 100644 --- a/repos/base-nova/ports/nova.port +++ b/repos/base-nova/ports/nova.port @@ -2,9 +2,9 @@ LICENSE := GPLv2 VERSION := git DOWNLOADS := nova.git -# r10 branch -URL(nova) := https://github.com/alex-ab/NOVA.git -REV(nova) := fc9ad04ecec3911302451fcbf6cd87063be66ad0 +# feature/numa branch +URL(nova) := https://github.com/mmueller41/NOVA.git +REV(nova) := tukija DIR(nova) := src/kernel/nova PATCHES := $(sort $(wildcard $(REP_DIR)/patches/*.patch)) diff --git a/repos/base-nova/recipes/api/base-nova/hash b/repos/base-nova/recipes/api/base-nova/hash index 9c9994f2f2..211cd99a21 100644 --- a/repos/base-nova/recipes/api/base-nova/hash +++ b/repos/base-nova/recipes/api/base-nova/hash @@ -1 +1 @@ -2024-10-07 d1a751a3b41d145c3a97b3431ae1f006050fee10 +2024-09-13 1567ef314f1b68c8871e29ffc0ba19e9dbaaee8a diff --git a/repos/base-nova/recipes/src/base-nova/hash b/repos/base-nova/recipes/src/base-nova/hash index 02d5a1981a..241611d6e5 100644 --- a/repos/base-nova/recipes/src/base-nova/hash +++ b/repos/base-nova/recipes/src/base-nova/hash @@ -1 +1 @@ -2024-12-10 bb446406fbb1173c3f243fe323d5cad8423ff958 +2024-11-26 12d153ce5fc0661b11808621eaffeed3a18aa460 diff --git a/repos/base-nova/src/core/core_log_out.cc b/repos/base-nova/src/core/core_log_out.cc index 5eba5717cd..be31c052c3 100644 --- a/repos/base-nova/src/core/core_log_out.cc +++ b/repos/base-nova/src/core/core_log_out.cc @@ -18,13 +18,13 @@ #include #include -void Core::Core_log::out(char const c) +void Genode::Core_log::out(char const c) { enum { CLOCK = 0, BAUDRATE = 115200 }; - static X86_uart uart(0x3f8/*Bios_data_area::singleton()->serial_port()*/, + static X86_uart uart(0x2f8/*Bios_data_area::singleton()->serial_port()*/, CLOCK, BAUDRATE); if (c == '\n') uart.put_char('\r'); uart.put_char(c); -} +} \ No newline at end of file diff --git a/repos/base-nova/src/core/include/platform.h b/repos/base-nova/src/core/include/platform.h index 2b40e106d9..d5a26f2963 100644 --- a/repos/base-nova/src/core/include/platform.h +++ b/repos/base-nova/src/core/include/platform.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include namespace Core { class Platform; } @@ -51,7 +53,10 @@ class Core::Platform : public Platform_generic /* map of virtual cpu ids in Genode to kernel cpu ids */ uint16_t map_cpu_ids[MAX_SUPPORTED_CPUS]; - uint8_t cpu_numa_map[MAX_SUPPORTED_CPUS]; + uint8_t cpu_numa_map[MAX_SUPPORTED_CPUS]; + /* map of kernel NUMA region to Genode memory ranges */ + Genode::Range_allocator::Range numa_mem_ranges[MAX_SUPPORTED_CPUS]; // TODO: Add new macro for max of numa regions + addr_t _map_pages(addr_t phys_page, addr_t pages, bool guard_page = false); @@ -121,10 +126,27 @@ class Core::Platform : public Platform_generic unsigned pager_index(Affinity::Location location) const; unsigned kernel_cpu_id(Affinity::Location location) const; + /** + * @brief ID of NUMA region the CPU belongs to + * + * @param kernel_cpu_id id of CPU + * @return unsigned ID of corresponding NUMA region + */ unsigned domain_of_cpu(unsigned kernel_cpu_id) const { return cpu_numa_map[kernel_cpu_id]; } + /** + * @brief Return memory range of a given NUMA region + * + * @param numa_id ID of NUMA region + * @return Genode::Range_allocator::Range physical address range for this NUMA region + */ + + Genode::Range_allocator::Range &mem_range(unsigned numa_id) { + return numa_mem_ranges[numa_id]; + } + Affinity::Location sanitize(Affinity::Location location) { return Affinity::Location(location.xpos() % _cpus.width(), location.ypos() % _cpus.height(), @@ -136,15 +158,17 @@ class Core::Platform : public Platform_generic */ unsigned core_pd_sel() const { return _core_pd_sel; } - void for_each_location(auto const &fn) - { - for (unsigned x = 0; x < _cpus.width(); x++) { - for (unsigned y = 0; y < _cpus.height(); y++) { - Affinity::Location location(x, y, 1, 1); - fn(location); + template + void for_each_location(FUNC const &fn) + { + for (unsigned x = 0; x < _cpus.width(); x++) { + for (unsigned y = 0; y < _cpus.height(); y++) { + Affinity::Location location(x, y, 1, 1); + fn(location); + } } } - } -}; + }; +} #endif /* _CORE__INCLUDE__PLATFORM_H_ */ diff --git a/repos/base-nova/src/core/pd_session_support.cc b/repos/base-nova/src/core/pd_session_support.cc index 248038b390..6d0974c07b 100644 --- a/repos/base-nova/src/core/pd_session_support.cc +++ b/repos/base-nova/src/core/pd_session_support.cc @@ -108,6 +108,58 @@ Pd_session::Map_result Pd_session_component::map(Pd_session::Virt_range const vi return Map_result::OK; } +void _calculate_mask_for_location(Nova::mword_t *core_mask, const Affinity::Location &loc) +{ + for (unsigned y = loc.ypos(); y < loc.ypos() + loc.height(); y++) + { + for (unsigned x = loc.xpos(); x < loc.xpos()+loc.width(); x++) + { + unsigned kernel_cpu = platform_specific().kernel_cpu_id(Affinity::Location(x, y, loc.width(), loc.height())); + unsigned i = kernel_cpu / (sizeof(Nova::mword_t) * 8); + unsigned b = kernel_cpu % (sizeof(Nova::mword_t) * 8); + core_mask[i] |= (1UL << b); + + Genode::log("core_mask[", i, "]=", core_mask[i], " i=", i, "b=", b, "kernel_cpu=", kernel_cpu); + } + } +} + + +void Pd_session_component::create_cell(long prioritiy, const Affinity::Location &loc) +{ + Nova::uint8_t err = Nova::NOVA_OK; + unsigned num_cpus = platform_specific().MAX_SUPPORTED_CPUS; + unsigned num_vect = num_cpus / (sizeof(Nova::mword_t) * 8); + Nova::mword_t core_mask[num_vect]; + + Genode::memset(core_mask, 0, sizeof(core_mask)); + + _calculate_mask_for_location(core_mask, loc); + + log("Requested to create new cell for <", this->label(), "> of priority ", prioritiy, " at ", loc); + for (unsigned i = 0; i < num_vect; i++) { + if ((err = Nova::create_cell(_pd->pd_sel(), prioritiy, core_mask[i], i, 1) != Nova::NOVA_OK)) + { + error("Could not create new cell: ", err); + } + } +} + +void Pd_session_component::update_cell(const Affinity::Location &loc) +{ + Nova::uint8_t err = Nova::NOVA_OK; + unsigned num_cpus = platform_specific().affinity_space().total(); + unsigned num_vect = num_cpus / (sizeof(Nova::mword_t) * 8); + Nova::mword_t core_mask[num_vect]; + + _calculate_mask_for_location(core_mask, loc); + + for (unsigned i = 0; i < num_vect; i++) { + if ((err = Nova::update_cell(_pd->pd_sel(), core_mask[i], i))) { + error("Failed to update cell <", label(), ">: ", err); + } + } +} using State = Genode::Pd_session::Managing_system_state; diff --git a/repos/base-nova/src/core/platform.cc b/repos/base-nova/src/core/platform.cc index ca974d40dd..295f90d7df 100644 --- a/repos/base-nova/src/core/platform.cc +++ b/repos/base-nova/src/core/platform.cc @@ -103,7 +103,7 @@ addr_t Core::Platform::_map_pages(addr_t const phys_addr, addr_t const pages, *****************************/ -enum { CORE_PAGER_UTCB_ADDR = 0xbff02000 }; +enum { CORE_PAGER_UTCB_ADDR = 0xfff02000 }; /** @@ -200,7 +200,7 @@ static void page_fault_handler() static addr_t core_pager_stack_top() { - enum { STACK_SIZE = 4*1024 }; + enum { STACK_SIZE = 8*1024 }; static char stack[STACK_SIZE]; return (addr_t)&stack[STACK_SIZE - sizeof(addr_t)]; } @@ -445,6 +445,7 @@ Core::Platform::Platform() size_t kernel_memory = 0; log("Found ", num_mem_desc, " memory entries in HIP"); + memset(numa_mem_ranges, 0, sizeof(Genode::Range_allocator::Range) * MAX_SUPPORTED_CPUS); /* * All "available" ram must be added to our physical allocator before all * non "available" regions that overlaps with ram get removed. @@ -476,6 +477,11 @@ Core::Platform::Platform() _io_mem_alloc.remove_range((addr_t)base, (size_t)size); ram_alloc().add_range((addr_t)base, (size_t)size); + log("Add mem range ", reinterpret_cast(base), "-", reinterpret_cast(base + size), " for node ", mem_desc->domain); + if (numa_mem_ranges[mem_desc->domain].start == 0) + numa_mem_ranges[mem_desc->domain] = {base, base + size}; + else if (base > numa_mem_ranges[mem_desc->domain].end) + numa_mem_ranges[mem_desc->domain].end = base + size; } addr_t hyp_log = 0; @@ -795,13 +801,13 @@ Core::Platform::Platform() log(Number_of_bytes(kernel_memory), " kernel memory"); log(""); /* add capability selector ranges to map */ - unsigned const first_index = 0x2000; + unsigned const first_index = 0x0000; unsigned index = first_index; for (unsigned i = 0; i < 32; i++) { void * phys_ptr = nullptr; - ram_alloc().alloc_aligned(get_page_size(), get_page_size_log2()).with_result( + ram_alloc().alloc_aligned(128*get_page_size(), get_page_size_log2()).with_result( [&] (void *ptr) { phys_ptr = ptr; }, [&] (Range_allocator::Alloc_error) { /* covered by nullptr test below */ }); @@ -809,7 +815,7 @@ Core::Platform::Platform() break; addr_t phys_addr = reinterpret_cast(phys_ptr); - addr_t core_local_addr = _map_pages(phys_addr, 1); + addr_t core_local_addr = _map_pages(phys_addr, 128); if (!core_local_addr) { ram_alloc().free(phys_ptr); @@ -895,6 +901,7 @@ Core::Platform::Platform() (unsigned)(sc_idle_base + kernel_cpu_id), "killed"); }); + log("Added idle ECs to trace sources"); /* add exception handler EC for core and EC root thread to trace sources */ struct Core_trace_source : public Trace::Source::Info_accessor, @@ -943,6 +950,7 @@ Core::Platform::Platform() registry.insert(this); } }; + log("Added exception handler EC"); new (core_mem_alloc()) Core_trace_source(Trace::sources(), @@ -953,6 +961,7 @@ Core::Platform::Platform() Core_trace_source(Trace::sources(), Affinity::Location(0, 0, _cpus.width(), 1), hip.sel_exc + 1, "root"); + log("Created trace sources"); } diff --git a/repos/base-nova/src/core/ram_dataspace_support.cc b/repos/base-nova/src/core/ram_dataspace_support.cc index fbc0cb1c62..c3afb04ae6 100644 --- a/repos/base-nova/src/core/ram_dataspace_support.cc +++ b/repos/base-nova/src/core/ram_dataspace_support.cc @@ -56,29 +56,35 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size) void Ram_dataspace_factory::_clear_ds(Dataspace_component &ds) { + size_t const page_rounded_size = align_addr(ds.size(), get_page_size_log2()); - size_t memset_count = page_rounded_size / 4; + size_t memset_count = page_rounded_size / 32; addr_t memset_ptr = ds.core_local_addr(); - if ((memset_count * 4 == page_rounded_size) && !(memset_ptr & 0x3)) - asm volatile ("rep stosl" : "+D" (memset_ptr), "+c" (memset_count) + if ((memset_count * 32 == page_rounded_size) && !(memset_ptr & 0x3)) + { + asm volatile ("rep stosq" : "+D" (memset_ptr), "+c" (memset_count) : "a" (0) : "memory"); - else + } else memset(reinterpret_cast(memset_ptr), 0, page_rounded_size); +} + +void Ram_dataspace_factory::_unmap_ds_from_core(Dataspace_component &ds) +{ + size_t const page_rounded_size = align_addr(ds.size(), get_page_size_log2()); /* we don't keep any core-local mapping */ unmap_local(*reinterpret_cast(Thread::myself()->utcb()), - ds.core_local_addr(), - page_rounded_size >> get_page_size_log2()); + ds.core_local_addr(), + page_rounded_size >> get_page_size_log2()); - platform().region_alloc().free((void*)ds.core_local_addr(), - page_rounded_size); + platform().region_alloc().free((void *)ds.core_local_addr(), + page_rounded_size); ds.assign_core_local_addr(nullptr); } - void Ram_dataspace_factory::_export_ram_ds(Dataspace_component &ds) { size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2()); diff --git a/repos/base-nova/src/lib/base/perf.cc b/repos/base-nova/src/lib/base/perf.cc new file mode 100644 index 0000000000..fd51679e4d --- /dev/null +++ b/repos/base-nova/src/lib/base/perf.cc @@ -0,0 +1,86 @@ + +/* + * \brief Performance Counter infrastructure, NOVA-specific implemantation + * \author Michael Müller + * \date 2022-12-15 + */ + +#include + +#include +#include +#include + +unsigned long Genode::Trace::Performance_counter::private_freemask { 0xffff }; +unsigned long Genode::Trace::Performance_counter::shared_freemask { 0xffff0000 }; + +void Genode::Trace::Performance_counter::_init_masks() +{ + Nova::Hip::Cpu_desc::Vendor vendor = Nova::Hip::Cpu_desc::AMD; + if (vendor == Nova::Hip::Cpu_desc::AMD) + { + private_freemask = 0x3f; // 6 core performance counters + shared_freemask = 0x1f0000; // 5 L3 complex performance counters + } + else if (vendor == Nova::Hip::Cpu_desc::INTEL) + { + private_freemask = 0x7fff; + shared_freemask = 0x7fff0000; // 15 CBO performance counters + } +} + +void Genode::Trace::Performance_counter::setup(unsigned counter, uint64_t event, uint64_t mask, uint64_t flags) +{ + Nova::mword_t evt = event; + Nova::mword_t msk = mask; + Nova::mword_t flg = flags; + Nova::uint8_t rc; + Nova::mword_t type = (counter >>4); + Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf; + + if ((rc = (Nova::hpc_ctrl(Nova::HPC_SETUP, sel, type, evt, msk, flg))) != Nova::NOVA_OK) + throw Genode::Trace::Pfc_access_error(rc); +} + +void Genode::Trace::Performance_counter::start(unsigned counter) +{ + Nova::uint8_t rc; + Nova::mword_t type = (counter >> 4); + Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf; + + if ((rc = Nova::hpc_start(sel, type)) != Nova::NOVA_OK) + throw Genode::Trace::Pfc_access_error(rc); +} + +void Genode::Trace::Performance_counter::stop(unsigned counter) +{ + Nova::uint8_t rc; + Nova::mword_t type = (counter >>4); + Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf; + + if ((rc = Nova::hpc_stop(sel, type)) != Nova::NOVA_OK) + throw Genode::Trace::Pfc_access_error(rc); +} + +void Genode::Trace::Performance_counter::reset(unsigned counter, unsigned val) +{ + Nova::uint8_t rc; + Nova::mword_t type = (counter >>4); + Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf; + + if ((rc = Nova::hpc_reset(sel, type, val)) != Nova::NOVA_OK) + throw Genode::Trace::Pfc_access_error(rc); +} + +Genode::uint64_t Genode::Trace::Performance_counter::read(unsigned counter) +{ + Nova::uint8_t rc; + Nova::mword_t value = 0; + Nova::mword_t type = (counter >>4); + Nova::mword_t sel = type == Performance_counter::CORE ? counter : counter & 0xf; + + if ((rc = Nova::hpc_read(sel, type, value)) != Nova::NOVA_OK) + throw Genode::Trace::Pfc_access_error(rc); + + return static_cast(value); +} \ No newline at end of file diff --git a/repos/base/include/base/affinity.h b/repos/base/include/base/affinity.h index a41bcc4a24..2d45725ba2 100644 --- a/repos/base/include/base/affinity.h +++ b/repos/base/include/base/affinity.h @@ -93,6 +93,7 @@ class Genode::Affinity return Affinity::Space(node.attribute_value("width", 0U), node.attribute_value("height", 0U)); } + }; @@ -166,6 +167,13 @@ class Genode::Affinity node.attribute_value("height", default_height)); } + bool operator==(Location const &rhs) { + return this->_xpos == rhs._xpos && this->_ypos == rhs._ypos && this->height() == rhs.height() && this->width() == rhs.width(); + } + + bool operator!=(Location const &rhs) { + return !(*this == rhs); + } }; private: @@ -236,6 +244,30 @@ class Genode::Affinity } }; +namespace Genode { + static inline void print(Output &out, const Affinity::Space &space) + { + Genode::print(out, "("); + Genode::print(out, space.width()); + Genode::print(out, ","); + Genode::print(out, space.height()); + Genode::print(out, ")"); + } + + static inline void print(Output &out, const Affinity::Location &loc) + { + Genode::print(out, "("); + Genode::print(out, loc.xpos()); + Genode::print(out, ","); + Genode::print(out, loc.ypos()); + Genode::print(out, ","); + Genode::print(out, loc.width()); + Genode::print(out, "×"); + Genode::print(out, loc.height()); + Genode::print(out, ")"); + } +} + Genode::Affinity::Location Genode::Affinity::Space::location_of_index(int index) const { diff --git a/repos/base/include/base/attached_ram_dataspace.h b/repos/base/include/base/attached_ram_dataspace.h index 1f641f06e9..267da5ed4f 100644 --- a/repos/base/include/base/attached_ram_dataspace.h +++ b/repos/base/include/base/attached_ram_dataspace.h @@ -14,6 +14,7 @@ #ifndef _INCLUDE__BASE__ATTACHED_RAM_DATASPACE_H_ #define _INCLUDE__BASE__ATTACHED_RAM_DATASPACE_H_ +#include #include #include #include @@ -112,6 +113,7 @@ class Genode::Attached_ram_dataspace _size(size), _ram(&ram), _rm(&rm), _cache(cache) { _alloc_and_attach(); + memset(_local_addr, 0, _size); } /** diff --git a/repos/base/include/base/child.h b/repos/base/include/base/child.h index 7c70c369bd..2d30aa2f2a 100644 --- a/repos/base/include/base/child.h +++ b/repos/base/include/base/child.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -334,6 +335,8 @@ class Genode::Child : protected Rpc_object, /* arguments fetched by the child in response to a yield signal */ Mutex _yield_request_mutex { }; Resource_args _yield_request_args { }; + Mutex _resource_gain_mutex { }; + Resource_args _gained_resources { }; /* number of unanswered heartbeat signals */ unsigned _outstanding_heartbeats = 0; @@ -613,6 +616,7 @@ class Genode::Child : protected Rpc_object, Env_connection _cpu { *this, Env::cpu(), _policy.name() }; Env_connection _log { *this, Env::log(), _policy.name() }; Env_connection _binary { *this, Env::binary(), _policy.binary_name() }; + Env_connection _topo { *this, Env::topo(), _policy.name() }; Constructible > _linker { }; @@ -764,6 +768,7 @@ class Genode::Child : protected Rpc_object, Ram_allocator &ram() { return _pd.session(); } Ram_allocator const &ram() const { return _pd.session(); } Cpu_session &cpu() { return _cpu.session(); } + Topo_session &topo() { return _topo.session(); } Pd_session &pd() { return _pd.session(); } Pd_session const &pd() const { return _pd.session(); } @@ -782,6 +787,14 @@ class Genode::Child : protected Rpc_object, */ void yield(Resource_args const &args); + /** + * Bestow resources on the child + * + * By calling this method, the child will be notified about + * the having gained the specified amount of resources. + */ + void accept(Resource_args const &args); + /** * Notify the child about newly available resources */ @@ -818,6 +831,7 @@ class Genode::Child : protected Rpc_object, void resource_request(Resource_args const &) override; void yield_sigh(Signal_context_capability) override; Resource_args yield_request() override; + Resource_args gained_resources() override; void yield_response() override; void heartbeat_sigh(Signal_context_capability) override; void heartbeat_response() override; diff --git a/repos/base/include/base/env.h b/repos/base/include/base/env.h index 57842dad8b..3425abbe67 100644 --- a/repos/base/include/base/env.h +++ b/repos/base/include/base/env.h @@ -18,6 +18,7 @@ #include #include #include +#include namespace Genode { struct Env; } @@ -48,6 +49,12 @@ struct Genode::Env : Interface */ Ram_allocator &ram() { return pd(); } + /** + * @brief Topology model + * + */ + virtual Topo_session &topo() = 0; + /** * Entrypoint for handling RPC requests and signals */ @@ -63,6 +70,12 @@ struct Genode::Env : Interface */ virtual Pd_session_capability pd_session_cap() = 0; + /** + * @brief Return the Topo-session capability + * + */ + virtual Topo_session_capability topo_session_cap() = 0; + /** * ID space of sessions obtained from the parent */ diff --git a/repos/base/include/base/local_connection.h b/repos/base/include/base/local_connection.h index dbacb8bca9..264951f8d9 100644 --- a/repos/base/include/base/local_connection.h +++ b/repos/base/include/base/local_connection.h @@ -93,8 +93,15 @@ struct Genode::Local_connection_base : Noncopyable if (_session_state->phase == Session_state::INSUFFICIENT_RAM_QUOTA || _session_state->phase == Session_state::INSUFFICIENT_CAP_QUOTA) - warning("giving up to increase session quota for ", service.name(), " session " + { + warning("[", label, "] giving up to increase session quota for ", service.name(), " session " "after ", (int)NUM_ATTEMPTS, " attempts"); + if (_session_state->phase == Session_state::INSUFFICIENT_RAM_QUOTA) + warning("Insufficient RAM quota: ", resources.ram_quota.value); + + if (_session_state->phase == Session_state::INSUFFICIENT_CAP_QUOTA) + warning("Insufficient CAP quota ", resources.cap_quota.value); + } } void close() diff --git a/repos/base/include/base/ram_allocator.h b/repos/base/include/base/ram_allocator.h index a11d3f6a4d..316fd06d74 100644 --- a/repos/base/include/base/ram_allocator.h +++ b/repos/base/include/base/ram_allocator.h @@ -40,6 +40,8 @@ struct Genode::Ram_allocator : Interface struct Denied : Exception { }; + typedef unsigned Numa_id; + /** * Allocate RAM dataspace * @@ -50,6 +52,7 @@ struct Genode::Ram_allocator : Interface * \return capability to RAM dataspace, or error code of type 'Alloc_error' */ virtual Alloc_result try_alloc(size_t size, Cache cache = CACHED) = 0; + virtual Alloc_result try_alloc(size_t size, Numa_id numa_id, Cache cache = CACHED) = 0; /** * Allocate RAM dataspace @@ -154,6 +157,10 @@ class Genode::Constrained_ram_allocator : public Ram_allocator ); } + Alloc_result try_alloc(size_t size, Numa_id, Cache cache = CACHED) override { + return this->Constrained_ram_allocator::try_alloc(size, cache); /* overriden in platform specific code */ + } + void free(Ram_dataspace_capability ds) override { size_t const size = _ram_alloc.dataspace_size(ds); diff --git a/repos/base/include/base/regional_heap.h b/repos/base/include/base/regional_heap.h new file mode 100644 index 0000000000..f9332383f1 --- /dev/null +++ b/repos/base/include/base/regional_heap.h @@ -0,0 +1,230 @@ +/* + * \brief Heap partition + * \author Norman Feske + * \date 2006-05-15 + */ + +/* + * Copyright (C) 2022 Michael Müller + * Copyright (C) 2006-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__REGIONAL_HEAP_H_ +#define _INCLUDE__BASE__REGIONAL_HEAP_H_ + +#include +#include +#include +#include +#include +#include +#include + +namespace Genode { + + class Regional_heap; + class Sliced_regional_heap; +} + + +/** + * Heap that uses dataspaces as backing store and has a physical memory range limited to a single NUMA region + * + * The heap class provides an allocator that uses a list of dataspaces of a RAM + * allocator as backing store. One dataspace may be used for holding multiple + * blocks. + */ +class Genode::Regional_heap : public Allocator +{ + private: + + class Dataspace : public List::Element + { + private: + + /* + * Noncopyable + */ + Dataspace(Dataspace const &); + Dataspace &operator = (Dataspace const &); + + public: + + Ram_dataspace_capability cap; + void *local_addr; + size_t size; + + Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size) + : cap(c), local_addr(local_addr), size(size) { } + }; + + /* + * This structure exists only to make sure that the dataspaces are + * destroyed after the AVL allocator. + */ + class Dataspace_pool : public List + { + private: + + /* + * Noncopyable + */ + Dataspace_pool(Dataspace_pool const &); + Dataspace_pool &operator = (Dataspace_pool const &); + + public: + + Ram_allocator *ram_alloc; /* backing store */ + Region_map *region_map; + + Dataspace_pool(Ram_allocator *ram, Region_map *rm) + : ram_alloc(ram), region_map(rm) { } + + ~Dataspace_pool(); + + void remove_and_free(Dataspace &); + + void reassign_resources(Ram_allocator *ram, Region_map *rm) { + ram_alloc = ram, region_map = rm; } + }; + + Mutex mutable _mutex { }; + Reconstructible _alloc; /* local allocator */ + Dataspace_pool _ds_pool; /* list of dataspaces */ + size_t _quota_limit { 0 }; + size_t _quota_used { 0 }; + size_t _chunk_size { 0 }; + Ram_allocator::Numa_id _numa_id; /* ID of NUMA region to serve allocations from */ + + using Alloc_ds_result = Attempt; + + /** + * Allocate a new dataspace of the specified size + * + * \param size number of bytes to allocate + * \param enforce_separate_metadata if true, the new dataspace + * will not contain any meta data + */ + Alloc_ds_result _allocate_dataspace(size_t size, bool enforce_separate_metadata); + + /** + * Try to allocate block at our local allocator + */ + Alloc_result _try_local_alloc(size_t size); + + /** + * Unsynchronized implementation of 'try_alloc' + */ + Alloc_result _unsynchronized_alloc(size_t size); + + public: + + enum { UNLIMITED = ~0 }; + + Regional_heap(Ram_allocator *ram_allocator, + Region_map *region_map, + Topology::Numa_region ®ion, + size_t quota_limit = UNLIMITED, + void *static_addr = 0, + size_t static_size = 0); + + Regional_heap(Ram_allocator &ram, Region_map &rm, Topology::Numa_region ®ion) : Regional_heap(&ram, &rm, region) { } + + ~Regional_heap(); + + /** + * Reconfigure quota limit + * + * \return negative error code if new quota limit is higher than + * currently used quota. + */ + int quota_limit(size_t new_quota_limit); + + /** + * Re-assign RAM allocator and region map + */ + void reassign_resources(Ram_allocator *ram, Region_map *rm) { + _ds_pool.reassign_resources(ram, rm); } + + /** + * Call 'fn' with the start and size of each backing-store region + */ + template + void for_each_region(FN const &fn) const + { + Mutex::Guard guard(_mutex); + for (Dataspace const *ds = _ds_pool.first(); ds; ds = ds->next()) + fn(ds->local_addr, ds->size); + } + + + /************************* + ** Allocator interface ** + *************************/ + + Alloc_result try_alloc(size_t) override; + void free(void *, size_t) override; + size_t consumed() const override { return _quota_used; } + size_t overhead(size_t size) const override { return _alloc->overhead(size); } + bool need_size_for_free() const override { return false; } +}; + + +/** + * Heap that allocates each block at a separate dataspace + */ +class Genode::Sliced_regional_heap : public Allocator +{ + private: + + /** + * Meta-data header placed in front of each allocated block + */ + struct Block : List::Element + { + Ram_dataspace_capability const ds; + size_t const size; + + Block(Ram_dataspace_capability ds, size_t size) : ds(ds), size(size) + { } + }; + + Ram_allocator &_ram_alloc; /* RAM allocator for backing store */ + Region_map &_region_map; /* region map of the address space */ + size_t _consumed = 0; /* number of allocated bytes */ + List _blocks { }; /* list of allocated blocks */ + Mutex _mutex { }; /* serialize allocations */ + + public: + + /** + * Return size of header prepended to each allocated block in bytes + */ + static constexpr size_t meta_data_size() { return sizeof(Block); } + + /** + * Constructor + */ + Sliced_regional_heap(Ram_allocator &ram_alloc, Region_map ®ion_map); + + /** + * Destructor + */ + ~Sliced_regional_heap(); + + + /************************* + ** Allocator interface ** + *************************/ + + Alloc_result try_alloc(size_t) override; + void free(void *, size_t) override; + size_t consumed() const override { return _consumed; } + size_t overhead(size_t size) const override; + bool need_size_for_free() const override { return false; } +}; + +#endif /* _INCLUDE__BASE__HEAP_H_ */ diff --git a/repos/base/include/base/thread.h b/repos/base/include/base/thread.h index af3c86d929..f01673c565 100644 --- a/repos/base/include/base/thread.h +++ b/repos/base/include/base/thread.h @@ -48,6 +48,7 @@ class Genode::Thread using Location = Affinity::Location; using Name = Cpu_session::Name; using Weight = Cpu_session::Weight; + typedef unsigned Numa_id; struct Stack_info { addr_t base; addr_t top; addr_t libc_tls_pointer_offset; }; @@ -432,6 +433,8 @@ class Genode::Thread * Thread affinity */ Affinity::Location affinity() const { return _affinity; } + + void pin(Affinity::Location &loc); }; #endif /* _INCLUDE__BASE__THREAD_H_ */ diff --git a/repos/base/include/base/trace/perf.h b/repos/base/include/base/trace/perf.h new file mode 100644 index 0000000000..e2eb6ee81b --- /dev/null +++ b/repos/base/include/base/trace/perf.h @@ -0,0 +1,93 @@ +/* + * \brief Performance Counter infrastructure + * \author Michael Müller + * \date 2022-12-15 + */ + +#pragma once + +#include + +namespace Genode { namespace Trace { + + class Pfc_no_avail { + }; + + class Performance_counter + { + + private: + static unsigned long private_freemask; + static unsigned long shared_freemask; + + static unsigned _alloc(unsigned long *free_mask) + { + unsigned long current_mask, new_mask; + unsigned bit; + + do + { + current_mask = *free_mask; + bit = __builtin_ffsl(current_mask); + new_mask = current_mask & ~(1 << (bit - 1)); + } while (!__atomic_compare_exchange(free_mask, ¤t_mask, &new_mask, true, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)); + + if (!bit) // Allocation failed + throw Pfc_no_avail(); + + return bit - 1; // number of the allocated counter + } + + static void _init_masks(); + + public: + typedef unsigned int Counter; + + enum Type + { + CORE = 0, + CACHE = 1 + }; + + static unsigned acquire(Type type) { + return (type == Type::CORE) ? alloc_core() : alloc_cbo(); + } + + static unsigned alloc_cbo() { + if (shared_freemask == 0xffff0000) + _init_masks(); + return _alloc(&shared_freemask); + } + + static unsigned alloc_core() { + if (private_freemask == 0xffff) + _init_masks(); + return _alloc(&private_freemask); + } + + static void release(unsigned counter) { + bool core = static_cast(counter >> 4); + if (core) + private_freemask |= (1 << counter); + else + shared_freemask |= (1 << counter); + } + + static void setup(unsigned counter, Genode::uint64_t event, Genode::uint64_t mask, Genode::uint64_t flags); + static void start(unsigned counter); + static void stop(unsigned counter); + static void reset(unsigned counter, unsigned val=0); + static uint64_t read(unsigned counter); + }; + + class Pfc_access_error { + private: + Genode::uint8_t _rc; + + public: + Pfc_access_error(uint8_t rc) : _rc(rc) {} + Genode::uint8_t error_code() { return _rc; } + }; + + } +} \ No newline at end of file diff --git a/repos/base/include/cpu_session/client.h b/repos/base/include/cpu_session/client.h index 48ed7ad007..5fa2e8940d 100644 --- a/repos/base/include/cpu_session/client.h +++ b/repos/base/include/cpu_session/client.h @@ -33,12 +33,19 @@ struct Genode::Cpu_session_client : Rpc_client void kill_thread(Thread_capability thread) override { call(thread); } + void migrate_thread(Thread_capability thread, Affinity::Location loc) override { + call(thread, loc); } + void exception_sigh(Signal_context_capability sigh) override { call(sigh); } Affinity::Space affinity_space() const override { return call(); } + void move(const Affinity::Location loc) override { + call(loc); + } + Dataspace_capability trace_control() override { return call(); } diff --git a/repos/base/include/cpu_session/connection.h b/repos/base/include/cpu_session/connection.h index aedbb5d385..bf56413284 100644 --- a/repos/base/include/cpu_session/connection.h +++ b/repos/base/include/cpu_session/connection.h @@ -22,6 +22,8 @@ namespace Genode { struct Cpu_connection; } struct Genode::Cpu_connection : Connection, Cpu_session_client { + enum { RAM_QUOTA = 72*1024 }; + /** * Constructor * diff --git a/repos/base/include/cpu_session/cpu_session.h b/repos/base/include/cpu_session/cpu_session.h index f47e6c1993..d313c9d8d0 100644 --- a/repos/base/include/cpu_session/cpu_session.h +++ b/repos/base/include/cpu_session/cpu_session.h @@ -21,6 +21,7 @@ #include #include #include +#include namespace Genode { @@ -105,6 +106,14 @@ struct Genode::Cpu_session : Session */ virtual void kill_thread(Thread_capability thread) = 0; + /** + * Migrate a thread to a new location + * + * \param thread capability of the thread to migrate + * \param loc component-local location to migrate the thread to + */ + virtual void migrate_thread(Thread_capability thread, Genode::Affinity::Location loc) = 0; + /** * Register default signal handler for exceptions * @@ -125,6 +134,12 @@ struct Genode::Cpu_session : Session */ virtual Affinity::Space affinity_space() const = 0; + /** + * @brief Update affinity location of this CPU session + * + */ + virtual void move(const Genode::Affinity::Location ) = 0; + /** * Translate generic priority value to kernel-specific priority levels * @@ -231,8 +246,10 @@ struct Genode::Cpu_session : Session Capability, Name const &, Affinity::Location, Weight, addr_t); GENODE_RPC(Rpc_kill_thread, void, kill_thread, Thread_capability); + GENODE_RPC(Rpc_migrate_thread, void, migrate_thread, Thread_capability, Affinity::Location); GENODE_RPC(Rpc_exception_sigh, void, exception_sigh, Signal_context_capability); GENODE_RPC(Rpc_affinity_space, Affinity::Space, affinity_space); + GENODE_RPC(Rpc_move, void, move, Affinity::Location); GENODE_RPC(Rpc_trace_control, Dataspace_capability, trace_control); GENODE_RPC(Rpc_ref_account, int, ref_account, Cpu_session_capability); GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Cpu_session_capability, size_t); @@ -241,7 +258,7 @@ struct Genode::Cpu_session : Session GENODE_RPC_INTERFACE(Rpc_create_thread, Rpc_kill_thread, Rpc_exception_sigh, Rpc_affinity_space, Rpc_trace_control, Rpc_ref_account, - Rpc_transfer_quota, Rpc_quota, Rpc_native_cpu); + Rpc_transfer_quota, Rpc_quota, Rpc_native_cpu, Rpc_migrate_thread, Rpc_move); }; diff --git a/repos/base/include/parent/client.h b/repos/base/include/parent/client.h index e160c4b938..f318636ec3 100644 --- a/repos/base/include/parent/client.h +++ b/repos/base/include/parent/client.h @@ -67,6 +67,8 @@ struct Genode::Parent_client : Rpc_client call(sigh); } Resource_args yield_request() override { return call(); } + + Resource_args gained_resources() override { return call(); } void yield_response() override { call(); } diff --git a/repos/base/include/parent/parent.h b/repos/base/include/parent/parent.h index 99f783c54c..20b3d70eb7 100644 --- a/repos/base/include/parent/parent.h +++ b/repos/base/include/parent/parent.h @@ -71,7 +71,8 @@ class Genode::Parent static Client::Id log() { return { 3 }; } static Client::Id binary() { return { 4 }; } static Client::Id linker() { return { 5 }; } - static Client::Id last() { return { 5 }; } + static Client::Id topo() { return { 6 }; } + static Client::Id last() { return { 6 }; } /** * True if session ID refers to an environment session @@ -283,6 +284,12 @@ class Genode::Parent */ virtual void yield_response() = 0; + /** + * Obtain information about the resources gained, e.g. from a resource request + * + */ + virtual Resource_args gained_resources() = 0; + /* * Health monitoring */ @@ -328,6 +335,7 @@ class Genode::Parent Resource_args const &); GENODE_RPC(Rpc_yield_sigh, void, yield_sigh, Signal_context_capability); GENODE_RPC(Rpc_yield_request, Resource_args, yield_request); + GENODE_RPC(Rpc_gained_resources, Resource_args, gained_resources); GENODE_RPC(Rpc_yield_response, void, yield_response); GENODE_RPC(Rpc_heartbeat_sigh, void, heartbeat_sigh, Signal_context_capability); GENODE_RPC(Rpc_heartbeat_response, void, heartbeat_response); @@ -337,7 +345,7 @@ class Genode::Parent Rpc_close, Rpc_session_response, Rpc_main_thread, Rpc_deliver_session_cap, Rpc_resource_avail_sigh, Rpc_resource_request, Rpc_yield_sigh, - Rpc_yield_request, Rpc_yield_response, + Rpc_yield_request, Rpc_yield_response, Rpc_gained_resources, Rpc_heartbeat_sigh, Rpc_heartbeat_response); }; diff --git a/repos/base/include/pd_session/client.h b/repos/base/include/pd_session/client.h index 5c8007d10a..507be653ea 100644 --- a/repos/base/include/pd_session/client.h +++ b/repos/base/include/pd_session/client.h @@ -80,6 +80,11 @@ struct Genode::Pd_session_client : Rpc_client { return call(size, cache); } + + Alloc_result try_alloc(size_t size, Ram_allocator::Numa_id numa_id, Cache cache = CACHED) override + { + return call(size, numa_id, cache); + } void free(Ram_dataspace_capability ds) override { call(ds); } @@ -106,6 +111,16 @@ struct Genode::Pd_session_client : Rpc_client Attach_dma_result attach_dma(Dataspace_capability ds, addr_t at) override { return call(ds, at); } + + void create_cell(long prioritiy, const Affinity::Location &loc) override + { + call(prioritiy, loc); + } + + void update_cell(const Affinity::Location &loc) override + { + call(loc); + } }; #endif /* _INCLUDE__PD_SESSION__CLIENT_H_ */ diff --git a/repos/base/include/pd_session/pd_session.h b/repos/base/include/pd_session/pd_session.h index c487129690..4b61ff78e1 100644 --- a/repos/base/include/pd_session/pd_session.h +++ b/repos/base/include/pd_session/pd_session.h @@ -21,7 +21,7 @@ #include #include #include - +#include namespace Genode { struct Pd_session; struct Pd_session_client; @@ -29,7 +29,6 @@ namespace Genode { struct Signal_context; } - struct Genode::Pd_session : Session, Ram_allocator { /** @@ -273,6 +272,11 @@ struct Genode::Pd_session : Session, Ram_allocator */ Ram_quota avail_ram() const { return { ram_quota().value - used_ram().value }; } + /** + * \brief Create new dataspace factory for given NUMA node + * \param + */ + /***************************************** ** Access to kernel-specific interface ** @@ -345,6 +349,12 @@ struct Genode::Pd_session : Session, Ram_allocator */ virtual Attach_dma_result attach_dma(Dataspace_capability, addr_t at) = 0; + /* + * Create a new cell at the kernel for this protection domain + */ + virtual void create_cell(long prioritiy, const Affinity::Location &loc) = 0; + + virtual void update_cell(const Affinity::Location &loc) = 0; /********************* ** RPC declaration ** @@ -370,6 +380,7 @@ struct Genode::Pd_session : Session, Ram_allocator GENODE_RPC(Rpc_cap_quota, Cap_quota, cap_quota); GENODE_RPC(Rpc_used_caps, Cap_quota, used_caps); GENODE_RPC(Rpc_try_alloc, Alloc_result, try_alloc, size_t, Cache); + GENODE_RPC(Rpc_try_alloc_numa, Alloc_result, try_alloc, size_t, Ram_allocator::Numa_id, Cache); GENODE_RPC(Rpc_free, void, free, Ram_dataspace_capability); GENODE_RPC(Rpc_transfer_ram_quota, Transfer_ram_quota_result, transfer_quota, Capability, Ram_quota); @@ -382,16 +393,21 @@ struct Genode::Pd_session : Session, Ram_allocator GENODE_RPC(Rpc_attach_dma, Attach_dma_result, attach_dma, Dataspace_capability, addr_t); + GENODE_RPC(Rpc_create_cell, void, create_cell, long, Affinity::Location const &); + + GENODE_RPC(Rpc_update_cell, void, update_cell, Affinity::Location const &); + GENODE_RPC_INTERFACE(Rpc_assign_parent, Rpc_assign_pci, Rpc_map, Rpc_signal_source, Rpc_free_signal_source, Rpc_alloc_context, Rpc_free_context, Rpc_submit, Rpc_alloc_rpc_cap, Rpc_free_rpc_cap, Rpc_address_space, Rpc_stack_area, Rpc_linker_area, Rpc_ref_account, Rpc_transfer_cap_quota, Rpc_cap_quota, Rpc_used_caps, - Rpc_try_alloc, Rpc_free, + Rpc_try_alloc, Rpc_try_alloc_numa, Rpc_free, Rpc_transfer_ram_quota, Rpc_ram_quota, Rpc_used_ram, Rpc_native_pd, Rpc_system_control_cap, - Rpc_dma_addr, Rpc_attach_dma); + Rpc_dma_addr, Rpc_attach_dma, + Rpc_create_cell, Rpc_update_cell); }; #endif /* _INCLUDE__PD_SESSION__PD_SESSION_H_ */ diff --git a/repos/base/include/topo_session/capability.h b/repos/base/include/topo_session/capability.h new file mode 100644 index 0000000000..17f7a1a01b --- /dev/null +++ b/repos/base/include/topo_session/capability.h @@ -0,0 +1,22 @@ +/* + * \brief Topo-session capability type + * \author Michael Müller + * \date 2022-10-06 + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalanOS, witch is based on Genode OS framework + * distributed under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include +#include + +namespace Genode +{ + typedef Capability Topo_session_capability; +} // namespace Genode \ No newline at end of file diff --git a/repos/base/include/topo_session/client.h b/repos/base/include/topo_session/client.h new file mode 100644 index 0000000000..a7ec0a0bb9 --- /dev/null +++ b/repos/base/include/topo_session/client.h @@ -0,0 +1,59 @@ +/* + * \brief Client-side topology session interface + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include +#include +#include +#include + +namespace Genode { + struct Topo_session_client; + struct Node; +} + +struct Genode::Topo_session_client : Rpc_client +{ + explicit Topo_session_client(Topo_session_capability session) + : Rpc_client(session) { } + + Topology::Numa_region node_affinity_of(Affinity::Location const &loc) override { + return call(loc); + } + + Topology::Numa_region node_at_id(unsigned node_id) override { + return call(node_id); + } + + unsigned node_count() override { + return call(); + } + + void reconstruct(const Affinity affinity) override + { + call(affinity); + } + + unsigned phys_id(const Affinity::Location &loc) override + { + return call(loc); + } + + Affinity::Space const global_affinity_space() override + { + return call(); + } +}; \ No newline at end of file diff --git a/repos/base/include/topo_session/connection.h b/repos/base/include/topo_session/connection.h new file mode 100644 index 0000000000..f2ff65b994 --- /dev/null +++ b/repos/base/include/topo_session/connection.h @@ -0,0 +1,50 @@ +/* + * \brief Topology session interface + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include +#include +#include + +namespace Genode { + struct Topo_connection; +} + +struct Genode::Topo_connection : Connection, Topo_session_client +{ + enum + { + RAM_QUOTA = 2097152UL + }; + + Topo_connection(Env &env, const char *label = "", Affinity const &affinity = Affinity()) + : + Connection(env, + session(env.parent(), affinity, "ram_quota=%u, cap_quota=%u, label=\"%s\"", RAM_QUOTA, CAP_QUOTA, label)), + Topo_session_client(cap()) {} + + Topology::Numa_region node_affinity_of(Affinity::Location const &loc) override { + return Topo_session_client::node_affinity_of(loc); + } + + Topology::Numa_region node_at_id(unsigned node_id) override { + return Topo_session_client::node_at_id(node_id); + } + + unsigned node_count() override { + return Topo_session_client::node_count(); + } +}; \ No newline at end of file diff --git a/repos/base/include/topo_session/node.h b/repos/base/include/topo_session/node.h new file mode 100644 index 0000000000..ffc7d631e2 --- /dev/null +++ b/repos/base/include/topo_session/node.h @@ -0,0 +1,58 @@ +/* + * \brief Representation of a NUMA node + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include + +namespace Topology { + struct Numa_region; +} + +struct Topology::Numa_region : Genode::List::Element +{ + /* ID presented to component */ + unsigned _id; + + unsigned _core_count; + Genode::List neighbours; + + /* Physical NUMA node ID */ + unsigned _native_id; + + Numa_region() : _id(0), _core_count(0), neighbours(), _native_id(0) { } + Numa_region(unsigned id, unsigned native_id) : _id(id), _core_count(0), neighbours(), _native_id(native_id) {} + Numa_region(Numa_region ©) : _id(copy.id()), _core_count(copy.core_count()), neighbours(), _native_id(copy.native_id()) { + } + + unsigned native_id() { return _native_id; } + unsigned id() { return _id; } + unsigned core_count() { return _core_count; } + void core_count(unsigned count) { _core_count = count; } + void increment_core_count() { _core_count++; } + Numa_region &operator=(const Numa_region ©) { + if (this == ©) + return *this; + + this->_id = copy._id; + this->_core_count = copy._core_count; + this->_native_id = copy._native_id; + + /* At the moment, we do not copy the list of neighbours, as it is not used by any our applications. */ + /* TODO: Copy list onf neighbours, as soons as any application is going to use that information. */ + + return *this; + } +}; \ No newline at end of file diff --git a/repos/base/include/topo_session/topo_session.h b/repos/base/include/topo_session/topo_session.h new file mode 100644 index 0000000000..1d3b0c8d0e --- /dev/null +++ b/repos/base/include/topo_session/topo_session.h @@ -0,0 +1,64 @@ +/* + * \brief Topology session interface + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include +#include + +namespace Genode { + + struct Topo_session; + struct Topo_session_client; +} + +namespace Topology +{ + struct Numa_region; +} // namespace EalanOS + +struct Genode::Topo_session : Session +{ + /** + * \nooapi + * + */ + static const char *service_name() { return "TOPO"; } + + enum + { + CAP_QUOTA = 2 + }; + + typedef Topo_session_client Client; + + virtual ~Topo_session() { } + + virtual Topology::Numa_region node_affinity_of(Affinity::Location const &) = 0; + virtual Topology::Numa_region node_at_id(unsigned node_id) = 0; + virtual unsigned node_count() = 0; + virtual void reconstruct(const Affinity) = 0; + virtual unsigned phys_id(Affinity::Location const &) = 0; + virtual Affinity::Space const global_affinity_space() = 0; + + GENODE_RPC(Rpc_node_affinity, Topology::Numa_region, node_affinity_of, Affinity::Location const &); + GENODE_RPC(Rpc_node_id, Topology::Numa_region, node_at_id, unsigned); + GENODE_RPC(Rpc_node_count, unsigned, node_count); + GENODE_RPC(Rpc_reconstruct, void, reconstruct, Affinity); + GENODE_RPC(Rpc_phys_id, unsigned, phys_id, Affinity::Location const &); + GENODE_RPC(Rpc_total_core_count, Affinity::Space const, global_affinity_space); + + GENODE_RPC_INTERFACE(Rpc_node_affinity, Rpc_node_id, Rpc_node_count, Rpc_reconstruct, Rpc_phys_id, Rpc_total_core_count); +}; \ No newline at end of file diff --git a/repos/base/lib/mk/base-common.inc b/repos/base/lib/mk/base-common.inc index fc66779e1d..cef38996ba 100644 --- a/repos/base/lib/mk/base-common.inc +++ b/repos/base/lib/mk/base-common.inc @@ -8,6 +8,7 @@ SRC_CC += avl_tree.cc SRC_CC += slab.cc SRC_CC += allocator_avl.cc SRC_CC += heap.cc sliced_heap.cc +SRC_CC += regional_heap.cc SRC_CC += registry.cc SRC_CC += output.cc SRC_CC += child.cc diff --git a/repos/base/lib/symbols/ld b/repos/base/lib/symbols/ld index 5c705f5af1..774550697a 100644 --- a/repos/base/lib/symbols/ld +++ b/repos/base/lib/symbols/ld @@ -38,6 +38,13 @@ # under the terms of the GNU Affero General Public License version 3. # +__atomic_store T +__atomic_load T +_Unwind_GetLanguageSpecificData T +_Unwind_GetRegionStart T +_Unwind_GetIP T +_Unwind_SetGR T +_Unwind_SetIP T _Z11genode_exiti T _Z16main_thread_utcbv T _Z22__ldso_raise_exceptionv T @@ -45,6 +52,9 @@ _ZN5Timer10Connection11set_timeoutEN6Genode12MicrosecondsERNS1_15Timeout_handler _ZN5Timer10Connection9curr_timeEv T _ZN5Timer10ConnectionC1ERN6Genode3EnvERNS1_10EntrypointERKNS1_13Session_labelE T _ZN5Timer10ConnectionC2ERN6Genode3EnvERNS1_10EntrypointERKNS1_13Session_labelE T +_ZN6Genode5Trace19Performance_counter15shared_freemaskE D 8 +_ZN6Genode5Trace19Performance_counter16private_freemaskE D 8 +_ZN6Genode5Trace19Performance_counter11_init_masksEv T _ZN6Genode10Entrypoint16_dispatch_signalERNS_6SignalE T _ZN6Genode10Entrypoint22Signal_proxy_component6signalEv T _ZN6Genode10Entrypoint25_process_incoming_signalsEv T @@ -142,6 +152,26 @@ _ZN6Genode17Region_map_client6detachEm T _ZN6Genode17Region_map_client9dataspaceEv T _ZN6Genode17Region_map_clientC1ENS_10CapabilityINS_10Region_mapEEE T _ZN6Genode17Region_map_clientC2ENS_10CapabilityINS_10Region_mapEEE T +_ZN6Genode13Regional_heap11quota_limitEm T +_ZN6Genode13Regional_heap14Dataspace_pool15remove_and_freeERNS0_9DataspaceE T +_ZN6Genode13Regional_heap14Dataspace_poolD1Ev T +_ZN6Genode13Regional_heap14Dataspace_poolD2Ev T +_ZN6Genode13Regional_heap16_try_local_allocEm T +_ZN6Genode13Regional_heap19_allocate_dataspaceEmb T +_ZN6Genode13Regional_heap21_unsynchronized_allocEm T +_ZN6Genode13Regional_heap4freeEPvm T +_ZN6Genode13Regional_heap9try_allocEm T +_ZN6Genode13Regional_heapC1EPNS_13Ram_allocatorEPNS_10Region_mapERN8Topology11Numa_regionEmPvm T +_ZN6Genode13Regional_heapC2EPNS_13Ram_allocatorEPNS_10Region_mapERN8Topology11Numa_regionEmPvm T +_ZN6Genode13Regional_heapD0Ev T +_ZN6Genode13Regional_heapD1Ev T +_ZN6Genode13Regional_heapD2Ev T +_ZNK6Genode13Regional_heap18need_size_for_freeEv T +_ZNK6Genode13Regional_heap8consumedEv T +_ZNK6Genode13Regional_heap8overheadEm T +_ZTIN6Genode13Regional_heapE T +_ZTSN6Genode13Regional_heapE T +_ZTVN6Genode13Regional_heapE T _ZN6Genode17Rm_session_client6createEm T _ZN6Genode17Rm_session_client7destroyENS_10CapabilityINS_10Region_mapEEE T _ZN6Genode17Rm_session_clientC1ENS_10CapabilityINS_10Rm_sessionEEE T @@ -174,6 +204,8 @@ _ZN6Genode18Signal_transmitterC1ENS_10CapabilityINS_14Signal_contextEEE T _ZN6Genode18Signal_transmitterC2ENS_10CapabilityINS_14Signal_contextEEE T _ZN6Genode20env_session_id_spaceEv T _ZN6Genode21cache_invalidate_dataEmm T +_ZN6Genode22Topo_session_component11reconstructENS_8AffinityE W +_ZThn136_N6Genode22Topo_session_component11reconstructENS_8AffinityE W _ZN6Genode25env_stack_area_region_mapE B 8 _ZN6Genode27cache_clean_invalidate_dataEmm T _ZN6Genode28env_stack_area_ram_allocatorE B 8 @@ -223,6 +255,7 @@ _ZN6Genode5Child23initiate_env_pd_sessionEv T _ZN6Genode5Child4exitEi T _ZN6Genode5Child5closeENS_8Id_spaceINS_6Parent6ClientEE2IdE T _ZN6Genode5Child5yieldERKNS_6StringILm160EEE T +_ZN6Genode5Child6acceptERKNS_6StringILm160EEE T _ZN6Genode5Child7sessionENS_8Id_spaceINS_6Parent6ClientEE2IdERKNS_13Rpc_in_bufferILm64EEERKNS6_ILm160EEERKNS_8AffinityE T _ZN6Genode5Child7upgradeENS_8Id_spaceINS_6Parent6ClientEE2IdERKNS_13Rpc_in_bufferILm160EEE T _ZN6Genode5Child8announceERKNS_13Rpc_in_bufferILm64EEE T @@ -239,6 +272,11 @@ _ZN6Genode5Trace6Logger17_evaluate_controlEv T _ZN6Genode5Trace6Logger3logEPKcm T _ZN6Genode5Trace6LoggerC1Ev T _ZN6Genode5Trace6LoggerC2Ev T +_ZN6Genode5Trace19Performance_counter4readEj T +_ZN6Genode5Trace19Performance_counter4stopEj T +_ZN6Genode5Trace19Performance_counter5resetEjj T +_ZN6Genode5Trace19Performance_counter5setupEjyyy T +_ZN6Genode5Trace19Performance_counter5startEj T _ZN6Genode5Trace18Partitioned_buffer4initEm T _ZN6Genode5Trace18Partitioned_buffer6commitEm T _ZN6Genode5Trace18Partitioned_buffer7reserveEm T @@ -271,6 +309,7 @@ _ZN6Genode6Thread23stack_area_virtual_baseEv T _ZN6Genode6Thread23stack_area_virtual_sizeEv T _ZN6Genode6Thread4joinEv T _ZN6Genode6Thread4nameEPcm T +_ZN6Genode6Thread3pinERNS_8Affinity8LocationE T _ZN6Genode6Thread4utcbEv T _ZN6Genode6Thread5startEv T _ZN6Genode6Thread6myselfEv T @@ -374,7 +413,7 @@ _ZTVN6Genode18Allocator_avl_baseE D 128 _ZTVN6Genode4HeapE D 72 _ZTVN6Genode4SlabE D 72 _ZTVN6Genode5Child14Initial_threadE D 48 -_ZTVN6Genode5ChildE D 440 +_ZTVN6Genode5ChildE D 456 _ZTVN6Genode6OutputE D 48 _ZTVN6Genode6ThreadE D 48 _ZTVN6Genode7ConsoleE D 48 @@ -599,8 +638,12 @@ _ZTVSt8bad_cast D 40 _ZTVSt9bad_alloc D 40 _ZTVSt9exception D 40 _ZTVSt9type_info D 64 +_ZdaPvSt11align_val_t W _ZdlPv W _ZdlPvm W +_ZdlPvSt11align_val_t W +_ZdlPvmSt11align_val_t W +_ZnwmSt11align_val_t W __aeabi_atexit W __aeabi_unwind_cpp_pr0 T __aeabi_unwind_cpp_pr1 T diff --git a/repos/base/mk/global.mk b/repos/base/mk/global.mk index 6d0dc55043..32d86197c4 100644 --- a/repos/base/mk/global.mk +++ b/repos/base/mk/global.mk @@ -257,7 +257,7 @@ ALL_INC_DIR += $(LIBGCC_INC_DIR) ALL_INC_DIR += $(HOST_INC_DIR) VERBOSE ?= @ -VERBOSE_DIR ?= --no-print-directory +VERBOSE_DIR ?= --print-directory MSG_LINK ?= @$(ECHO) " LINK " MSG_COMP ?= @$(ECHO) " COMPILE " diff --git a/repos/base/src/core/core_log.cc b/repos/base/src/core/core_log.cc index 88a043804c..7dfaf6124a 100644 --- a/repos/base/src/core/core_log.cc +++ b/repos/base/src/core/core_log.cc @@ -51,8 +51,10 @@ void Core::init_core_log(Core_log_range const &r) { range = r; } void Core::Core_log::output(char const * str) { + acquire(); for (unsigned i = 0; i < Genode::strlen(str); i++) { out(str[i]); out_mem(str[i]); } + release(); } diff --git a/repos/base/src/core/cpu_session_component.cc b/repos/base/src/core/cpu_session_component.cc index e00093ea92..81e8db242a 100644 --- a/repos/base/src/core/cpu_session_component.cc +++ b/repos/base/src/core/cpu_session_component.cc @@ -15,6 +15,7 @@ /* Genode includes */ #include +#include /* core includes */ #include @@ -22,6 +23,7 @@ #include #include + using namespace Core; @@ -149,6 +151,10 @@ void Cpu_session_component::kill_thread(Thread_capability thread_cap) } } +void Cpu_session_component::migrate_thread(Thread_capability thread_cap, Affinity::Location loc) +{ + _thread_ep.apply(thread_cap, [&] (Cpu_thread_component *t) { t->affinity(_thread_affinity(loc)); }); +} void Cpu_session_component::exception_sigh(Signal_context_capability sigh) { @@ -170,6 +176,10 @@ Affinity::Space Cpu_session_component::affinity_space() const return Affinity::Space(_location.width(), _location.height()); } +void Cpu_session_component::move(const Affinity::Location destination) +{ + _location = destination; +} Dataspace_capability Cpu_session_component::trace_control() { diff --git a/repos/base/src/core/include/account.h b/repos/base/src/core/include/account.h index 81412178b8..92ce3438fb 100644 --- a/repos/base/src/core/include/account.h +++ b/repos/base/src/core/include/account.h @@ -149,11 +149,16 @@ class Core::Account /* make sure to stay within the initial limit */ if (amount.value > _transferrable_quota().value) + { + Genode::log("Limit exceeded : ", _label); throw Limit_exceeded(); + } - /* downgrade from this account */ - if (!_quota_guard.try_downgrade(amount)) - throw Limit_exceeded(); + /* downgrade from this account */ + if (!_quota_guard.try_downgrade(amount)) { + Genode::log("Limit exceeded: ", _label); + throw Limit_exceeded(); + } } /* credit to 'other' */ diff --git a/repos/base/src/core/include/core_env.h b/repos/base/src/core/include/core_env.h index fef3b37bf8..3338c4fbf7 100644 --- a/repos/base/src/core/include/core_env.h +++ b/repos/base/src/core/include/core_env.h @@ -17,6 +17,7 @@ /* Genode includes */ #include +#include /* base-internal includes */ #include @@ -92,6 +93,8 @@ class Core::Core_env : public Noncopyable Pd_session *pd_session() { return &_pd_session; } Cpu_session *cpu_session() { ASSERT_NEVER_CALLED; } Cpu_session_capability cpu_session_cap() { ASSERT_NEVER_CALLED; } + Topo_session *topo_session() override { ASSERT_NEVER_CALLED; } + Topo_session_capability topo_session_cap() override { ASSERT_NEVER_CALLED; } Pd_session_capability pd_session_cap() { return _pd_session.cap(); } }; diff --git a/repos/base/src/core/include/core_log.h b/repos/base/src/core/include/core_log.h index 88d1b92e7b..70f4de2265 100644 --- a/repos/base/src/core/include/core_log.h +++ b/repos/base/src/core/include/core_log.h @@ -33,6 +33,10 @@ namespace Core { struct Core::Core_log { + void acquire(); + + void release(); + void out(char const c); void output(char const * str); diff --git a/repos/base/src/core/include/cpu_session_component.h b/repos/base/src/core/include/cpu_session_component.h index 78559171e2..da276213bc 100644 --- a/repos/base/src/core/include/cpu_session_component.h +++ b/repos/base/src/core/include/cpu_session_component.h @@ -169,8 +169,10 @@ class Core::Cpu_session_component : public Session_object, Create_thread_result create_thread(Capability, Name const &, Affinity::Location, Weight, addr_t) override; void kill_thread(Thread_capability) override; + void migrate_thread(Thread_capability, Affinity::Location) override; void exception_sigh(Signal_context_capability) override; Affinity::Space affinity_space() const override; + void move(const Affinity::Location) override; Dataspace_capability trace_control() override; int ref_account(Cpu_session_capability c) override; int transfer_quota(Cpu_session_capability, size_t) override; diff --git a/repos/base/src/core/include/cpu_thread_component.h b/repos/base/src/core/include/cpu_thread_component.h index 626f222464..4f3da327d9 100644 --- a/repos/base/src/core/include/cpu_thread_component.h +++ b/repos/base/src/core/include/cpu_thread_component.h @@ -174,6 +174,7 @@ class Core::Cpu_thread_component : public Rpc_object, _address_space_region_map.add_client(_rm_client); _platform_thread.pager(_rm_client); + _platform_thread.affinity(location); _trace_sources.insert(&_trace_source); } diff --git a/repos/base/src/core/include/pd_session_component.h b/repos/base/src/core/include/pd_session_component.h index a897de79e0..2ea3127751 100644 --- a/repos/base/src/core/include/pd_session_component.h +++ b/repos/base/src/core/include/pd_session_component.h @@ -340,6 +340,8 @@ class Core::Pd_session_component : public Session_object Alloc_result try_alloc(size_t, Cache) override; + Alloc_result try_alloc(size_t, Ram_allocator::Numa_id, Cache) override; + void free(Ram_dataspace_capability) override; size_t dataspace_size(Ram_dataspace_capability) const override; @@ -372,6 +374,13 @@ class Core::Pd_session_component : public Session_object addr_t dma_addr(Ram_dataspace_capability) override; Attach_dma_result attach_dma(Dataspace_capability, addr_t) override; + + /****************************************** + ** Support for EalánOS cells ** + ******************************************/ + void create_cell(long prioritiy, const Affinity::Location &loc) override; + + void update_cell(const Affinity::Location &loc) override; }; #endif /* _CORE__INCLUDE__PD_SESSION_COMPONENT_H_ */ diff --git a/repos/base/src/core/include/ram_dataspace_factory.h b/repos/base/src/core/include/ram_dataspace_factory.h index c00a2858fb..69fba6607a 100644 --- a/repos/base/src/core/include/ram_dataspace_factory.h +++ b/repos/base/src/core/include/ram_dataspace_factory.h @@ -45,7 +45,7 @@ class Core::Ram_dataspace_factory : public Ram_allocator, Rpc_entrypoint &_ep; Range_allocator &_phys_alloc; - Phys_range const _phys_range; + Phys_range _phys_range; /* @@ -82,6 +82,11 @@ class Core::Ram_dataspace_factory : public Ram_allocator, */ void _clear_ds(Dataspace_component &ds); + /** + * Remove core-local mappings of dataspace + */ + void _unmap_ds_from_core(Dataspace_component &ds); + public: Ram_dataspace_factory(Rpc_entrypoint &ep, @@ -109,6 +114,7 @@ class Core::Ram_dataspace_factory : public Ram_allocator, *****************************/ Alloc_result try_alloc(size_t, Cache) override; + Alloc_result try_alloc(size_t, Ram_allocator::Numa_id, Cache) override; void free(Ram_dataspace_capability) override; size_t dataspace_size(Ram_dataspace_capability ds) const override; }; diff --git a/repos/base/src/core/include/synced_ram_allocator.h b/repos/base/src/core/include/synced_ram_allocator.h index 6b94e1dde6..5ac67fa768 100644 --- a/repos/base/src/core/include/synced_ram_allocator.h +++ b/repos/base/src/core/include/synced_ram_allocator.h @@ -41,6 +41,12 @@ class Core::Synced_ram_allocator : public Ram_allocator Mutex::Guard mutex_guard(_mutex); return _alloc.try_alloc(size, cache); } + + Alloc_result try_alloc(size_t size, Ram_allocator::Numa_id numa_id, Cache cache) override + { + Mutex::Guard mutex_guard(_mutex); + return _alloc.try_alloc(size, numa_id, cache); + } void free(Ram_dataspace_capability ds) override { diff --git a/repos/base/src/core/include/topo_root.h b/repos/base/src/core/include/topo_root.h new file mode 100644 index 0000000000..52e369d8eb --- /dev/null +++ b/repos/base/src/core/include/topo_root.h @@ -0,0 +1,71 @@ +/* + * \brief Topology service root component + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include + +#include + +#include + +namespace Genode { + + class Topo_root : public Root_component + { + private: + Ram_allocator &_ram_alloc; + Region_map &_local_rm; + + protected: + + Topo_session_component *_create_session(char const *args, Affinity const &affinity) override { + size_t ram_quota = Arg_string::find_arg(args, "ram_quota").ulong_value(0); + + if (ram_quota < Trace::Control_area::SIZE) + throw Insufficient_ram_quota(); + + if (!affinity.valid()) { + log("Location ", affinity.location(), " not within space ", affinity.space()); + throw Service_denied(); + } + + return new (md_alloc()) + Topo_session_component(*this->ep(), + session_resources_from_args(args), + session_label_from_args(args), + session_diag_from_args(args), + _ram_alloc, _local_rm, + const_cast(affinity)); + } + + void _upgrade_session(Topo_session_component *topo, const char *args) override + { + topo->upgrade(ram_quota_from_args(args)); + topo->upgrade(cap_quota_from_args(args)); + } + + public: + + Topo_root(Ram_allocator &ram_alloc, + Region_map &local_rm, + Rpc_entrypoint &session_ep, + Allocator &md_alloc) + : + Root_component(&session_ep, &md_alloc), + _ram_alloc(ram_alloc), _local_rm(local_rm) + { } + }; +} \ No newline at end of file diff --git a/repos/base/src/core/include/topo_session_component.h b/repos/base/src/core/include/topo_session_component.h new file mode 100644 index 0000000000..2632bbdedc --- /dev/null +++ b/repos/base/src/core/include/topo_session_component.h @@ -0,0 +1,86 @@ +/* + * \brief Topology session interface + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +/* Genode includes */ +#include +#include +#include +#include +#include +#include + +namespace Genode { + class Topo_session_component; +} + +class Genode::Topo_session_component : public Session_object +{ + private: + Genode::Affinity _affinity; + Sliced_heap _md_alloc; + + Topology::Numa_region _node_affinities[Genode::Platform::MAX_SUPPORTED_CPUS][Genode::Platform::MAX_SUPPORTED_CPUS]; + unsigned _node_count; + Topology::Numa_region _nodes[64]; + + public: + Topo_session_component(Rpc_entrypoint &session_ep, + Resources const &resources, + Label const &label, + Diag const &diag, + Ram_allocator &ram_alloc, + Region_map &local_rm, + Affinity affinity + ); + + void construct(); + + /** + * @brief Topology session interface + */ + + Topology::Numa_region node_affinity_of(Affinity::Location const &loc) override + { + return _node_affinities[loc.xpos()][loc.ypos()]; + } + + Topology::Numa_region node_at_id(unsigned numa_id) override + { + return _nodes[numa_id]; + } + + unsigned node_count() override + { + return _node_count; + } + + void reconstruct(Affinity affinity) override + { + _affinity = affinity; + construct(); + } + + unsigned phys_id(const Affinity::Location &loc) override + { + return platform_specific().kernel_cpu_id(loc); + } + + Affinity::Space const global_affinity_space() override + { + return platform_specific().affinity_space(); + } +}; diff --git a/repos/base/src/core/main.cc b/repos/base/src/core/main.cc index 66064508dd..9b393c29f4 100644 --- a/repos/base/src/core/main.cc +++ b/repos/base/src/core/main.cc @@ -38,6 +38,7 @@ #include #include #include +#include #include using namespace Core; @@ -184,7 +185,7 @@ class Core_child : public Child_policy Pd_session &ref_pd() override { return _core_pd; } Pd_session_capability ref_pd_cap() const override { return _core_pd_cap; } - size_t session_alloc_batch_size() const override { return 128; } + size_t session_alloc_batch_size() const override { return 2*128; } Id_space &server_id_space() override { return _server_ids; } }; @@ -278,6 +279,7 @@ void Genode::bootstrap_component(Genode::Platform &) platform().irq_alloc(), sliced_heap); static Trace_root trace_root (core_ram_alloc, local_rm, ep, sliced_heap, Core::Trace::sources(), trace_policies); + static Topo_root topo_root(core_ram_alloc, local_rm, ep, sliced_heap); static Core_service rom_service (services, rom_root); static Core_service rm_service (services, rm_root); @@ -287,6 +289,7 @@ void Genode::bootstrap_component(Genode::Platform &) static Core_service io_mem_service (services, io_mem_root); static Core_service irq_service (services, irq_root); static Core_service trace_service (services, trace_root); + static Core_service topo_service(services, topo_root); /* make platform-specific services known to service pool */ platform_add_local_services(ep, sliced_heap, services, Core::Trace::sources(), core_ram_alloc); @@ -294,7 +297,7 @@ void Genode::bootstrap_component(Genode::Platform &) size_t const avail_ram_quota = core_pd.avail_ram().value; size_t const avail_cap_quota = core_pd.avail_caps().value; - size_t const preserved_ram_quota = 224*1024; + size_t const preserved_ram_quota = 224*1024+(1<<20); size_t const preserved_cap_quota = 1000; if (avail_ram_quota < preserved_ram_quota) { diff --git a/repos/base/src/core/pd_session_component.cc b/repos/base/src/core/pd_session_component.cc index ad9a88649a..b0f8d4b6bb 100644 --- a/repos/base/src/core/pd_session_component.cc +++ b/repos/base/src/core/pd_session_component.cc @@ -71,6 +71,58 @@ Pd_session_component::try_alloc(size_t ds_size, Cache cache) ); } +Ram_allocator::Alloc_result +Pd_session_component::try_alloc(size_t ds_size, Ram_allocator::Numa_id numa_id, Cache cache) +{ + /* zero-sized dataspaces are not allowed */ + if (!ds_size) + return Alloc_error::DENIED; + + /* dataspace allocation granularity is page size */ + ds_size = align_addr(ds_size, 12); + + using Result = Ram_allocator::Alloc_result; + using Reservation = Genode::Reservation; + + /* track quota use */ + return _ram_quota_guard().with_reservation(Ram_quota{ds_size}, + + [&] (Reservation &ram_reservation) -> Result { + + /* + * In the worst case, we need to allocate a new slab block for + * the meta data of the dataspace to be created. Therefore, we + * temporarily withdraw the slab block size here to trigger an + * exception if the account does not have enough room for the meta + * data. + */ + Ram_quota const overhead { Ram_dataspace_factory::SLAB_BLOCK_SIZE }; + + if (!_ram_quota_guard().have_avail(overhead)) { + ram_reservation.cancel(); + return Ram_allocator::Alloc_error::OUT_OF_RAM; + } + + /* + * Each dataspace is an RPC object and thereby consumes a + * capability. + */ + return _cap_quota_guard().with_reservation(Cap_quota{1}, + + [&] (Genode::Reservation &) -> Result { + return _ram_ds_factory.try_alloc(ds_size, numa_id, cache); + }, + [&] () -> Result { + ram_reservation.cancel(); + return Ram_allocator::Alloc_error::OUT_OF_CAPS; + } + ); + }, + [&] () -> Result { + return Ram_allocator::Alloc_error::OUT_OF_RAM; + } + ); +} void Pd_session_component::free(Ram_dataspace_capability ds_cap) { diff --git a/repos/base/src/core/ram_dataspace_factory.cc b/repos/base/src/core/ram_dataspace_factory.cc index 8c37503834..889864b4c8 100644 --- a/repos/base/src/core/ram_dataspace_factory.cc +++ b/repos/base/src/core/ram_dataspace_factory.cc @@ -13,6 +13,8 @@ /* core includes */ #include +#include +#include using namespace Core; @@ -126,6 +128,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache) Dataspace_component &ds = *ds_ptr; /* create native shared memory representation of dataspace */ +#ifdef ZERO_AT_ALLOC try { _export_ram_ds(ds); } catch (Core_virtual_memory_exhausted) { warning("could not export RAM dataspace of size ", ds.size()); @@ -140,8 +143,8 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache) * function must also make sure to flush all cache lines related to the * address range used by the dataspace. */ - _clear_ds(ds); - + _unmap_ds_from_core(ds); +#endif Dataspace_capability ds_cap = _ep.manage(&ds); phys_alloc_guard.keep = true; @@ -149,6 +152,14 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache) return static_cap_cast(ds_cap); } +Ram_allocator::Alloc_result Ram_dataspace_factory::try_alloc(size_t size, Ram_allocator::Numa_id numa_id, Cache cached=CACHED) +{ + Ram_dataspace_factory::Phys_range old = {_phys_range.start, _phys_range.end}; + _phys_range = {platform_specific().mem_range(numa_id).start, platform_specific().mem_range(numa_id).end}; + Ram_allocator::Alloc_result result = Ram_dataspace_factory::try_alloc(size, cached); + _phys_range = {old.start, old.end}; + return result; +} void Ram_dataspace_factory::free(Ram_dataspace_capability ds_cap) { @@ -176,8 +187,25 @@ void Ram_dataspace_factory::free(Ram_dataspace_capability ds_cap) }); /* call dataspace destructor and free memory */ - if (ds) + if (ds) { + try { _export_ram_ds(*ds); } + catch (Core_virtual_memory_exhausted) { + warning("could not export RAM dataspace of size ", ds->size()); + + /* cleanup unneeded resources */ + destroy(_ds_slab, ds); + return; + } + + /* + * Fill new dataspaces with zeros. For non-cached RAM dataspaces, this + * function must also make sure to flush all cache lines related to the + * address range used by the dataspace. + */ + _clear_ds(*ds); + _unmap_ds_from_core(*ds); destroy(_ds_slab, ds); + } } diff --git a/repos/base/src/core/stack_area.cc b/repos/base/src/core/stack_area.cc index a8116e91d1..6ad9ea6f80 100644 --- a/repos/base/src/core/stack_area.cc +++ b/repos/base/src/core/stack_area.cc @@ -132,6 +132,10 @@ struct Stack_area_ram_allocator : Ram_allocator Alloc_result try_alloc(size_t, Cache) override { return reinterpret_cap_cast(Native_capability()); } + Alloc_result try_alloc(size_t, Ram_allocator::Numa_id, Cache) override { + return reinterpret_cap_cast(Native_capability()); } + + void free(Ram_dataspace_capability) override { } size_t dataspace_size(Ram_dataspace_capability) const override { return 0; } diff --git a/repos/base/src/core/topo_session_component.cc b/repos/base/src/core/topo_session_component.cc new file mode 100644 index 0000000000..736ff8ae17 --- /dev/null +++ b/repos/base/src/core/topo_session_component.cc @@ -0,0 +1,80 @@ +/* + * \brief Topology session interface + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ +#include +#include +#include +#include + +using namespace Genode; + +Topo_session_component::Topo_session_component(Rpc_entrypoint &session_ep, + Resources const &resources, + Label const &label, + Diag const &diag, + Ram_allocator &ram_alloc, + Region_map &local_rm, + Affinity affinity) + : Session_object(session_ep, resources, label, diag), + _affinity(affinity), + _md_alloc(ram_alloc, local_rm), + _node_count(0) +{ + construct(); +} + +void Topo_session_component::construct() +{ + Affinity::Location location = _affinity.location(); + const unsigned height = location.height(); + unsigned width = location.width(); + unsigned curr_node_id = 0; + Topology::Numa_region *node_created = new (_md_alloc) Topology::Numa_region[64](); + + //Genode::log("[", label(), "] Creating new topology model of size ", width, "x", height); + + for (unsigned x = 0; x < width; x++) + { + for (unsigned y = 0; y < height; y++) + { + /* Map component's affinity matrix to its position in the affinity space. + * In order to get the correct physical CPU id for a coordinate in the affinity matrix of a component, + * we need the global coordination for it relative to the whole affinity space. + * But, every component's maintains a local view on its affinity matrix starting by (0,0), since + * affinity locations can have arbitrary coordinates in the affinity space, we need to transpose the + * component's affinity matrix to the global view of the affinity space. */ + Affinity::Location loc = location.transpose(x, y); + unsigned cpu_id = platform_specific().kernel_cpu_id(loc); + unsigned native_id = platform_specific().domain_of_cpu(cpu_id); + + //log("[", label(), "] CPU (", x, "x", y, ") is native CPU ", cpu_id, " on node ", native_id); + + if (node_created[native_id].core_count() == 0) + { + _nodes[curr_node_id] = _node_affinities[x][y] = Topology::Numa_region(curr_node_id, native_id); + _node_affinities[x][y].increment_core_count(); + node_created[native_id] = _node_affinities[x][y]; + //log("[", label(), "] Found new native NUMA region ", native_id, " for CPU (", x, "x", y, ")"); + _node_count++; + curr_node_id++; + } + else + { + (_node_affinities[x][y] = node_created[native_id]).increment_core_count(); + _nodes[curr_node_id].increment_core_count(); + } + } + } + +} \ No newline at end of file diff --git a/repos/base/src/include/base/internal/expanding_topo_session_client.h b/repos/base/src/include/base/internal/expanding_topo_session_client.h new file mode 100644 index 0000000000..654577e6a2 --- /dev/null +++ b/repos/base/src/include/base/internal/expanding_topo_session_client.h @@ -0,0 +1,51 @@ +/* + * \brief Topology session client that upgrades its session quota on demand + * \author Michael Müller + * \date 2022-10-06 + * + * A topology session stores the component's view on the hardware topology, i.e. it's location within the NUMA topology. + */ + +/* + * Copyright (C) 2022 Michael Müller + * + * This file is part of EalánOS which is based on the Genode OS framework + * released under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +#include +#include + +#include + +namespace Genode { + struct Expanding_topo_session_client; +} + +struct Genode::Expanding_topo_session_client : Upgradeable_client +{ + Expanding_topo_session_client(Parent &parent, Genode::Topo_session_capability cap, Parent::Client::Id id) + : + Upgradeable_client + (parent, static_cap_cast(cap), id) + { } + + Topology::Numa_region node_affinity_of(Affinity::Location const &loc) override + { + return retry( + [&]() + { + return retry( + [&]() + { + return Topo_session_client::node_affinity_of(loc); + }, + [&]() + { upgrade_caps(2); }); + }, + [&]() + { upgrade_ram(8 * 1024); }); + } +}; \ No newline at end of file diff --git a/repos/base/src/lib/base/child.cc b/repos/base/src/lib/base/child.cc index 98834b8a60..5e9dc53010 100644 --- a/repos/base/src/lib/base/child.cc +++ b/repos/base/src/lib/base/child.cc @@ -45,6 +45,15 @@ void Child::yield(Resource_args const &args) Signal_transmitter(_yield_sigh).submit(); } +void Child::accept(Resource_args const &args) +{ + Mutex::Guard guard{_resource_gain_mutex}; + + _gained_resources = args; + + if (_resource_avail_sigh.valid()) + Signal_transmitter(_resource_avail_sigh).submit(); +} void Child::notify_resource_avail() const { @@ -690,6 +699,12 @@ Parent::Resource_args Child::yield_request() return _yield_request_args; } +Parent::Resource_args Child::gained_resources() +{ + Mutex::Guard guard(_resource_gain_mutex); + + return _gained_resources; +} void Child::yield_response() { _policy.yield_response(); } @@ -734,7 +749,7 @@ void Child::_try_construct_env_dependent_members() { /* check if the environment sessions are complete */ if (!_pd.cap().valid() || !_cpu.cap().valid() || !_log.cap().valid() - || !_binary.cap().valid()) + || !_binary.cap().valid() || !_topo.cap().valid()) return; /* @@ -803,6 +818,7 @@ void Child::initiate_env_sessions() _cpu .initiate(); _log .initiate(); _binary.initiate(); + _topo.initiate(); /* * Issue environment-session request for obtaining the linker binary. We @@ -885,6 +901,7 @@ void Child::close_all_sessions() * Issue close requests to the providers of the environment sessions, * which may be async services. */ + _topo.close(); _log.close(); _binary.close(); if (_linker.constructed()) @@ -898,6 +915,7 @@ void Child::close_all_sessions() _discard_env_session(Env::log()); _discard_env_session(Env::binary()); _discard_env_session(Env::linker()); + _discard_env_session(Env::topo()); /* * Remove dynamically created sessions from the child's ID space. diff --git a/repos/base/src/lib/base/component.cc b/repos/base/src/lib/base/component.cc index 530df9ae3e..838484bf15 100644 --- a/repos/base/src/lib/base/component.cc +++ b/repos/base/src/lib/base/component.cc @@ -21,6 +21,7 @@ /* base-internal includes */ #include +#include #include namespace Genode { struct Component_env; } @@ -86,6 +87,7 @@ struct Genode::Component_env : Env Region_map &rm() override { return _rm; } Pd_session &pd() override { return _pd; } Entrypoint &ep() override { return _ep; } + Genode::Topo_session &topo() override { return *Genode::env_deprecated()->topo_session(); } Cpu_session_capability cpu_session_cap() override { return _cpu_cap; } Pd_session_capability pd_session_cap() override { return _pd_cap; } diff --git a/repos/base/src/lib/base/env_session_id_space.cc b/repos/base/src/lib/base/env_session_id_space.cc index 4749f42469..d45fe80e8b 100644 --- a/repos/base/src/lib/base/env_session_id_space.cc +++ b/repos/base/src/lib/base/env_session_id_space.cc @@ -34,7 +34,8 @@ Id_space &Genode::env_session_id_space() cpu { dummy, id_space, Parent::Env::cpu() }, log { dummy, id_space, Parent::Env::log() }, binary { dummy, id_space, Parent::Env::binary() }, - linker { dummy, id_space, Parent::Env::linker() }; + linker { dummy, id_space, Parent::Env::linker() }, + topo { dummy, id_space, Parent::Env::topo() }; return id_space; } diff --git a/repos/base/src/lib/base/regional_heap.cc b/repos/base/src/lib/base/regional_heap.cc new file mode 100644 index 0000000000..2ca2a36d08 --- /dev/null +++ b/repos/base/src/lib/base/regional_heap.cc @@ -0,0 +1,348 @@ +/* + * \brief Implementation of Genode heap partition + * \author Norman Feske + * \date 2006-05-17 + */ + +/* + * Copyright (C) 2006-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#include +#include +#include +#include + +using namespace Genode; + + +namespace { + + enum { + MIN_CHUNK_SIZE = 4*1024, /* in machine words */ + MAX_CHUNK_SIZE = 256*1024, + /* + * Allocation sizes >= this value are considered as big + * allocations, which get their own dataspace. In contrast + * to smaller allocations, this memory is released to + * the RAM session when 'free()' is called. + */ + BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */ + }; +} + + +void Regional_heap::Dataspace_pool::remove_and_free(Dataspace &ds) +{ + /* + * read dataspace capability and modify _ds_list before detaching + * possible backing store for Dataspace - we rely on LIFO list + * manipulation here! + */ + + Ram_dataspace_capability ds_cap = ds.cap; + void *ds_local_addr = ds.local_addr; + + remove(&ds); + + /* + * Call 'Dataspace' destructor to properly release the RAM dataspace + * capabilities. Note that we don't free the 'Dataspace' object at the + * local allocator because this is already done by the 'Regional_heap' + * destructor prior executing the 'Dataspace_pool' destructor. + */ + ds.~Dataspace(); + + region_map->detach(ds_local_addr); + ram_alloc->free(ds_cap); +} + + +Regional_heap::Dataspace_pool::~Dataspace_pool() +{ + /* free all ram_dataspaces */ + for (Dataspace *ds; (ds = first()); ) + remove_and_free(*ds); +} + + +int Regional_heap::quota_limit(size_t new_quota_limit) +{ + if (new_quota_limit < _quota_used) return -1; + _quota_limit = new_quota_limit; + return 0; +} + + +Regional_heap::Alloc_ds_result +Regional_heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata) +{ + using Result = Alloc_ds_result; + + return _ds_pool.ram_alloc->try_alloc(size, _numa_id).convert( + + [&] (Ram_dataspace_capability ds_cap) -> Result { + + struct Alloc_guard + { + Ram_allocator &ram; + Ram_dataspace_capability ds; + bool keep = false; + + Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds) + : ram(ram), ds(ds) { } + + ~Alloc_guard() { if (!keep) ram.free(ds); } + + } alloc_guard(*_ds_pool.ram_alloc, ds_cap); + + struct Attach_guard + { + Region_map &rm; + struct { void *ptr = nullptr; }; + bool keep = false; + + Attach_guard(Region_map &rm) : rm(rm) { } + + ~Attach_guard() { if (!keep && ptr) rm.detach(ptr); } + + } attach_guard(*_ds_pool.region_map); + + try { + attach_guard.ptr = _ds_pool.region_map->attach(ds_cap); + } + catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; } + catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; } + catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; } + catch (Region_map::Region_conflict) { return Alloc_error::DENIED; } + + Alloc_result metadata = Alloc_error::DENIED; + + /* allocate the 'Dataspace' structure */ + if (enforce_separate_metadata) { + metadata = _unsynchronized_alloc(sizeof(Regional_heap::Dataspace)); + + } else { + + /* add new local address range to our local allocator */ + _alloc->add_range((addr_t)attach_guard.ptr, size).with_result( + [&] (Range_allocator::Range_ok) { + metadata = _alloc->alloc_aligned(sizeof(Regional_heap::Dataspace), log2(64U)); }, + [&] (Alloc_error error) { + metadata = error; }); + } + + return metadata.convert( + [&] (void *md_ptr) -> Result { + Dataspace &ds = *construct_at(md_ptr, ds_cap, + attach_guard.ptr, size); + _ds_pool.insert(&ds); + alloc_guard.keep = attach_guard.keep = true; + return &ds; + }, + [&] (Alloc_error error) { + return error; }); + }, + [&] (Alloc_error error) { + return error; }); +} + + +Allocator::Alloc_result Regional_heap::_try_local_alloc(size_t size) +{ + return _alloc->alloc_aligned(size, log2(64U)).convert( + + [&] (void *ptr) { + _quota_used += size; + return ptr; }, + + [&] (Alloc_error error) { + return error; }); +} + + +Allocator::Alloc_result Regional_heap::_unsynchronized_alloc(size_t size) +{ + if (size >= BIG_ALLOCATION_THRESHOLD) { + + /* + * big allocation + * + * In this case, we allocate one dataspace without any meta data in it + * and return its local address without going through the allocator. + */ + + /* align to 4K page */ + size_t const dataspace_size = align_addr(size, 12); + + return _allocate_dataspace(dataspace_size, true).convert( + + [&] (Dataspace *ds_ptr) { + _quota_used += ds_ptr->size; + return ds_ptr->local_addr; }, + + [&] (Alloc_error error) { + return error; }); + } + + /* try allocation at our local allocator */ + { + Alloc_result result = _try_local_alloc(size); + if (result.ok()) + return result; + } + + size_t dataspace_size = size + + Allocator_avl::slab_block_size() + + sizeof(Regional_heap::Dataspace); + /* align to 4K page */ + dataspace_size = align_addr(dataspace_size, 12); + + /* + * '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes + * 4K-aligned, too. + */ + size_t const request_size = _chunk_size * sizeof(umword_t); + + Alloc_ds_result result = Alloc_error::DENIED; + + if (dataspace_size < request_size) { + + result = _allocate_dataspace(request_size, false); + if (result.ok()) { + + /* + * Exponentially increase chunk size with each allocated chunk until + * we hit 'MAX_CHUNK_SIZE'. + */ + _chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE); + } + } else { + result = _allocate_dataspace(dataspace_size, false); + } + + if (result.failed()) + return result.convert( + [&] (Dataspace *) { return Alloc_error::DENIED; }, + [&] (Alloc_error error) { return error; }); + + /* allocate originally requested block */ + return _try_local_alloc(size); +} + + +Allocator::Alloc_result Regional_heap::try_alloc(size_t size) +{ + + if (size == 0) + error("attempt to allocate zero-size block from heap"); + + /* serialize access of heap functions */ + Mutex::Guard guard(_mutex); + + /* check requested allocation against quota limit */ + if (size + _quota_used > _quota_limit) + return Alloc_error::DENIED; + + return _unsynchronized_alloc(size); +} + + +void Regional_heap::free(void *addr, size_t) +{ + /* serialize access of heap functions */ + Mutex::Guard guard(_mutex); + + using Size_at_error = Allocator_avl::Size_at_error; + + Allocator_avl::Size_at_result size_at_result = _alloc->size_at(addr); + + if (size_at_result.ok()) { + /* forward request to our local allocator */ + size_at_result.with_result( + [&] (size_t size) { + /* forward request to our local allocator */ + _alloc->free(addr, size); + _quota_used -= size; + }, + [&] (Size_at_error) { }); + + return; + } + + if (size_at_result == Size_at_error::MISMATCHING_ADDR) { + /* address was found in local allocator but is not a block start address */ + error("heap could not free memory block: given address ", addr, + " is not a block start adress"); + return; + } + + /* + * Block could not be found in local allocator. So it is either a big + * allocation or invalid address. + */ + + Regional_heap::Dataspace *ds = nullptr; + for (ds = _ds_pool.first(); ds; ds = ds->next()) + if (((addr_t)addr >= (addr_t)ds->local_addr) && + ((addr_t)addr <= (addr_t)ds->local_addr + ds->size - 1)) + break; + + if (!ds) { + //warning("heap could not free memory block: invalid address"); + throw Region_map::Invalid_dataspace(); + return; + } + + _quota_used -= ds->size; + + _ds_pool.remove_and_free(*ds); + _alloc->free(ds); +} + + +Regional_heap::Regional_heap(Ram_allocator *ram_alloc, + Region_map *region_map, + Topology::Numa_region ®ion, + size_t quota_limit, + void *static_addr, + size_t static_size) +: + _alloc(nullptr), + _ds_pool(ram_alloc, region_map), + _quota_limit(quota_limit), _quota_used(0), + _chunk_size(MIN_CHUNK_SIZE), + _numa_id(region.native_id()) +{ + if (static_addr) + _alloc->add_range((addr_t)static_addr, static_size); +} + + +Regional_heap::~Regional_heap() +{ + /* + * Revert allocations of heap-internal 'Dataspace' objects. Otherwise, the + * subsequent destruction of the 'Allocator_avl' would detect those blocks + * as dangling allocations. + * + * Since no new allocations can occur at the destruction time of the + * 'Regional_heap', it is safe to release the 'Dataspace' objects at the allocator + * yet still access them afterwards during the destruction of the + * 'Allocator_avl'. + */ + for (Regional_heap::Dataspace *ds = _ds_pool.first(); ds; ds = ds->next()) + _alloc->free(ds, sizeof(Dataspace)); + + /* + * Destruct 'Allocator_avl' before destructing the dataspace pool. This + * order is important because some dataspaces of the dataspace pool are + * used as backing store for the allocator's meta data. If we destroyed + * the object pool before the allocator, the subsequent attempt to destruct + * the allocator would access no-longer-present backing store. + */ + _alloc.destruct(); +} diff --git a/repos/base/src/lib/base/thread.cc b/repos/base/src/lib/base/thread.cc index b8ebd7d83b..70f4a961a7 100644 --- a/repos/base/src/lib/base/thread.cc +++ b/repos/base/src/lib/base/thread.cc @@ -241,6 +241,11 @@ size_t Thread::stack_area_virtual_size() return Genode::stack_area_virtual_size(); } +void Thread::pin(Affinity::Location &loc) +{ + _cpu_session->migrate_thread(_thread_cap, loc); + _affinity = loc; +} Thread::Thread(size_t weight, const char *name, size_t stack_size, Type type, Cpu_session *cpu_session, Affinity::Location affinity) diff --git a/repos/gems/recipes/raw/drivers_nic-pc/drivers.config b/repos/gems/recipes/raw/drivers_nic-pc/drivers.config index 309ac11198..ccd6af3e6a 100644 --- a/repos/gems/recipes/raw/drivers_nic-pc/drivers.config +++ b/repos/gems/recipes/raw/drivers_nic-pc/drivers.config @@ -11,6 +11,7 @@ + @@ -18,8 +19,8 @@ - - + + @@ -27,12 +28,13 @@ + - + @@ -45,26 +47,28 @@ + - + + - - + + @@ -77,6 +81,7 @@ + @@ -85,32 +90,15 @@ - - - - - - - - - - - - - - - - - - - - - + + + + diff --git a/repos/libports/lib/import/import-libpfm4.mk b/repos/libports/lib/import/import-libpfm4.mk new file mode 100644 index 0000000000..ba6094e1cf --- /dev/null +++ b/repos/libports/lib/import/import-libpfm4.mk @@ -0,0 +1 @@ +INC_DIR += $(call select_from_ports,libpfm4)/include \ No newline at end of file diff --git a/repos/libports/lib/import/import-stdcxx.mk b/repos/libports/lib/import/import-stdcxx.mk index 0431165dcb..aeb5948698 100644 --- a/repos/libports/lib/import/import-stdcxx.mk +++ b/repos/libports/lib/import/import-stdcxx.mk @@ -36,7 +36,7 @@ include $(call select_from_repositories,lib/import/import-libc.mk) CC_OPT += -D_GLIBCXX_HAVE_MBSTATE_T # use compiler-builtin atomic operations -CC_OPT += -D_GLIBCXX_ATOMIC_BUILTINS_4 +CC_OPT += -D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 # No isinf isnan CC_OPT += -D_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC diff --git a/repos/libports/lib/mk/libpfm4.mk b/repos/libports/lib/mk/libpfm4.mk new file mode 100644 index 0000000000..c998bfad52 --- /dev/null +++ b/repos/libports/lib/mk/libpfm4.mk @@ -0,0 +1,204 @@ +LIBPFM4_DIR := $(call select_from_ports,libpfm4)/src/lib/libpfm4 + +CC_OPT += -D_REENTRANT -fvisibility=hidden + +SRC_CC = $(LIBPFM4_DIR)/lib/pfmlib_common.c + +# build libpfm only for x86_64 for now +CONFIG_PFMLIB_ARCH_X86_64=y +CONFIG_PFMLIB_ARCH_X86=y + +CONFIG_PFMLIB_SHARED?=n +CONFIG_PFMLIB_DEBUG?=y +CONFIG_PFMLIB_NOPYTHON?=y + +# +# list all library support modules +# +ifeq ($(CONFIG_PFMLIB_ARCH_IA64),y) +INCARCH = $(INC_IA64) +#SRCS += pfmlib_gen_ia64.c pfmlib_itanium.c pfmlib_itanium2.c pfmlib_montecito.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_IA64 +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_X86),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_intel_x86_perf_event.c pfmlib_amd64_perf_event.c \ + pfmlib_intel_netburst_perf_event.c \ + pfmlib_intel_snbep_unc_perf_event.c +endif + +INCARCH = $(INC_X86) +SRCS += pfmlib_amd64.c pfmlib_intel_core.c pfmlib_intel_x86.c \ + pfmlib_intel_x86_arch.c pfmlib_intel_atom.c \ + pfmlib_intel_nhm_unc.c pfmlib_intel_nhm.c \ + pfmlib_intel_wsm.c \ + pfmlib_intel_snb.c pfmlib_intel_snb_unc.c \ + pfmlib_intel_ivb.c pfmlib_intel_ivb_unc.c \ + pfmlib_intel_hsw.c \ + pfmlib_intel_bdw.c \ + pfmlib_intel_skl.c \ + pfmlib_intel_icl.c \ + pfmlib_intel_spr.c \ + pfmlib_intel_rapl.c \ + pfmlib_intel_snbep_unc.c \ + pfmlib_intel_snbep_unc_cbo.c \ + pfmlib_intel_snbep_unc_ha.c \ + pfmlib_intel_snbep_unc_imc.c \ + pfmlib_intel_snbep_unc_pcu.c \ + pfmlib_intel_snbep_unc_qpi.c \ + pfmlib_intel_snbep_unc_ubo.c \ + pfmlib_intel_snbep_unc_r2pcie.c \ + pfmlib_intel_snbep_unc_r3qpi.c \ + pfmlib_intel_ivbep_unc_cbo.c \ + pfmlib_intel_ivbep_unc_ha.c \ + pfmlib_intel_ivbep_unc_imc.c \ + pfmlib_intel_ivbep_unc_pcu.c \ + pfmlib_intel_ivbep_unc_qpi.c \ + pfmlib_intel_ivbep_unc_ubo.c \ + pfmlib_intel_ivbep_unc_r2pcie.c \ + pfmlib_intel_ivbep_unc_r3qpi.c \ + pfmlib_intel_ivbep_unc_irp.c \ + pfmlib_intel_hswep_unc_cbo.c \ + pfmlib_intel_hswep_unc_ha.c \ + pfmlib_intel_hswep_unc_imc.c \ + pfmlib_intel_hswep_unc_pcu.c \ + pfmlib_intel_hswep_unc_qpi.c \ + pfmlib_intel_hswep_unc_ubo.c \ + pfmlib_intel_hswep_unc_r2pcie.c \ + pfmlib_intel_hswep_unc_r3qpi.c \ + pfmlib_intel_hswep_unc_irp.c \ + pfmlib_intel_hswep_unc_sbo.c \ + pfmlib_intel_bdx_unc_cbo.c \ + pfmlib_intel_bdx_unc_ubo.c \ + pfmlib_intel_bdx_unc_sbo.c \ + pfmlib_intel_bdx_unc_ha.c \ + pfmlib_intel_bdx_unc_imc.c \ + pfmlib_intel_bdx_unc_irp.c \ + pfmlib_intel_bdx_unc_pcu.c \ + pfmlib_intel_bdx_unc_qpi.c \ + pfmlib_intel_bdx_unc_r2pcie.c \ + pfmlib_intel_bdx_unc_r3qpi.c \ + pfmlib_intel_skx_unc_cha.c \ + pfmlib_intel_skx_unc_iio.c \ + pfmlib_intel_skx_unc_imc.c \ + pfmlib_intel_skx_unc_irp.c \ + pfmlib_intel_skx_unc_m2m.c \ + pfmlib_intel_skx_unc_m3upi.c \ + pfmlib_intel_skx_unc_pcu.c \ + pfmlib_intel_skx_unc_ubo.c \ + pfmlib_intel_skx_unc_upi.c \ + pfmlib_intel_knc.c \ + pfmlib_intel_slm.c \ + pfmlib_intel_tmt.c \ + pfmlib_intel_knl.c \ + pfmlib_intel_knl_unc_imc.c \ + pfmlib_intel_knl_unc_edc.c \ + pfmlib_intel_knl_unc_cha.c \ + pfmlib_intel_knl_unc_m2pcie.c \ + pfmlib_intel_glm.c \ + pfmlib_intel_netburst.c \ + pfmlib_amd64_k7.c pfmlib_amd64_k8.c pfmlib_amd64_fam10h.c \ + pfmlib_amd64_fam11h.c pfmlib_amd64_fam12h.c \ + pfmlib_amd64_fam14h.c pfmlib_amd64_fam15h.c \ + pfmlib_amd64_fam17h.c pfmlib_amd64_fam16h.c \ + pfmlib_amd64_fam19h.c pfmlib_amd64_rapl.c \ + pfmlib_amd64_fam19h_l3.c + +CFLAGS += -DCONFIG_PFMLIB_ARCH_X86 + +ifeq ($(CONFIG_PFMLIB_ARCH_I386),y) +SRCS += pfmlib_intel_coreduo.c pfmlib_intel_p6.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_I386 +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_X86_64),y) +CFLAGS += -DCONFIG_PFMLIB_ARCH_X86_64 +endif + +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_POWERPC),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_powerpc_perf_event.c +endif + +INCARCH = $(INC_POWERPC) +SRCS += pfmlib_powerpc.c pfmlib_power4.c pfmlib_ppc970.c pfmlib_power5.c \ + pfmlib_power6.c pfmlib_power7.c pfmlib_torrent.c pfmlib_power8.c \ + pfmlib_power9.c pfmlib_powerpc_nest.c pfmlib_power10.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_POWERPC +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_S390X),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_s390x_perf_event.c +endif + +INCARCH = $(INC_S390X) +SRCS += pfmlib_s390x_cpumf.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_S390X +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_SPARC),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_sparc_perf_event.c +endif + +INCARCH = $(INC_SPARC) +SRCS += pfmlib_sparc.c pfmlib_sparc_ultra12.c pfmlib_sparc_ultra3.c pfmlib_sparc_ultra4.c pfmlib_sparc_niagara.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_SPARC +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_ARM),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_arm_perf_event.c +endif + +INCARCH = $(INC_ARM) +SRCS += pfmlib_arm.c pfmlib_arm_armv7_pmuv1.c pfmlib_arm_armv6.c pfmlib_arm_armv8.c pfmlib_tx2_unc_perf_event.c pfmlib_kunpeng_unc_perf_event.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_ARM +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_ARM64),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_arm_perf_event.c +endif + +INCARCH = $(INC_ARM64) +SRCS += pfmlib_arm.c pfmlib_arm_armv8.c pfmlib_tx2_unc_perf_event.c pfmlib_kunpeng_unc_perf_event.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_ARM64 +endif + +ifeq ($(CONFIG_PFMLIB_ARCH_MIPS),y) + +ifeq ($(SYS),Linux) +SRCS += pfmlib_mips_perf_event.c +endif + +INCARCH = $(INC_MIPS) +SRCS += pfmlib_mips.c pfmlib_mips_74k.c +CFLAGS += -DCONFIG_PFMLIB_ARCH_MIPS +endif + +ifeq ($(CONFIG_PFMLIB_CELL),y) +INCARCH = $(INC_CELL) +#SRCS += pfmlib_cell.c +CFLAGS += -DCONFIG_PFMLIB_CELL +endif + +SRC_CC += $(addprefix $(LIBPFM4_DIR)/lib/,$(SRCS)) +vpath %.c $(LIBPFM4_DIR)/lib + +CC_OPT += $(CFLAGS) + +INC_DIR += $(LIBPFM4_DIR)/include $(LIBPFM4_DIR)/lib/events +vpath %.h $(INC_DIR) + +LIBS += base libm libc diff --git a/repos/libports/lib/mk/mxtasking.mk b/repos/libports/lib/mk/mxtasking.mk index f78b4f5d97..19f7cc6386 100644 --- a/repos/libports/lib/mk/mxtasking.mk +++ b/repos/libports/lib/mk/mxtasking.mk @@ -1,19 +1,27 @@ MXTASKING_DIR := $(call select_from_ports,mxtasking)/src/lib/mxtasking +GENODE_GCC_TOOLCHAIN_DIR := /usr/local/genode/tool/21.05 SRC_CC = $(shell find $(MXTASKING_DIR)/src/mx -name '*.cpp') vpath %.cpp $(MXTASKING_DIR)/src/mx INC_DIR += $(MXTASKING_DIR)/src $(MXTASKING_DIR)/lib vpath %.h ${INC_DIR} +INC_DIR += $(call select_from_repositories,src/lib/libc) +INC_DIR += $(call select_from_repositories,src/lib/libc)/spec/x86_64 -CC_OPT += -pedantic -Wall \ +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CUSTOM_CC = /usr/local/genode/tool/bin/clang + +CC_OPT += --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -DCLANG_CXX11_ATOMICS +CC_OPT += -std=c++20 -pedantic -Wall \ -Wno-invalid-offsetof -Wcast-align -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization \ -Wformat=2 -Winit-self -Wmissing-declarations -Wmissing-include-dirs -Woverloaded-virtual \ -Wredundant-decls -Wshadow -Wsign-promo -Wstrict-overflow=5 -Wswitch-default -Wundef \ - -Wno-unused -Wold-style-cast -Wno-uninitialized -O1 -g3 -fno-aligned-new + -Wno-unused -Wold-style-cast -Wno-uninitialized -O2 -g -CC_OPT += $(addprefix -I ,$(INC_DIR)) +CC_OPT += $(addprefix -I ,$(INC_DIR)) CC_CXX_WARN_STRICT = LIBS += base libm libc stdcxx +EXT_OBJECTS += /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a /usr/local/genode/tool/lib/libatomic.a #SHARED_LIB = yes diff --git a/repos/libports/lib/symbols/libc b/repos/libports/lib/symbols/libc index 0bde0b6ea0..de073087a1 100644 --- a/repos/libports/lib/symbols/libc +++ b/repos/libports/lib/symbols/libc @@ -1183,8 +1183,12 @@ _ZTVSt8bad_cast D 40 _ZTVSt9bad_alloc D 40 _ZTVSt9exception D 40 _ZTVSt9type_info D 64 +_ZdaPvSt11align_val_t W _ZdlPv W _ZdlPvm W +_ZdlPvSt11align_val_t W +_ZdlPvmSt11align_val_t W +_ZnwmSt11align_val_t W __aeabi_atexit T __aeabi_unwind_cpp_pr0 T __aeabi_unwind_cpp_pr1 T diff --git a/repos/libports/lib/symbols/stdcxx b/repos/libports/lib/symbols/stdcxx index e7a2643251..3fcd01da1b 100644 --- a/repos/libports/lib/symbols/stdcxx +++ b/repos/libports/lib/symbols/stdcxx @@ -129,6 +129,7 @@ _ZNK11__gnu_debug16_Error_formatter8_M_errorEv T _ZNK11__gnu_debug19_Safe_iterator_base11_M_singularEv T _ZNK11__gnu_debug19_Safe_iterator_base14_M_can_compareERKS0_ T _ZNK11__gnu_debug25_Safe_local_iterator_base16_M_get_containerEv T +_ZNSo6sentryD2Ev W _ZNKSt10bad_typeid4whatEv T _ZNKSt10error_code23default_error_conditionEv T _ZNKSt11logic_error4whatEv T @@ -136,6 +137,7 @@ _ZNKSt12bad_weak_ptr4whatEv T _ZNKSt12future_error4whatEv T _ZNKSt13random_device13_M_getentropyEv T _ZNKSt13runtime_error4whatEv T +_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode W _ZNKSt16bad_array_length4whatEv T _ZNKSt17bad_function_call4whatEv T _ZNKSt19__iosfail_type_info11__do_upcastEPKN10__cxxabiv117__class_type_infoEPPv T @@ -151,6 +153,7 @@ _ZNKSt6locale4nameEv T _ZNKSt6locale5facet11_M_cow_shimEPKNS_2idE T _ZNKSt6locale5facet11_M_sso_shimEPKNS_2idE T _ZNKSt6localeeqERKS_ T +_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode W _ZNKSt8__detail20_Prime_rehash_policy11_M_next_bktEm T _ZNKSt8__detail20_Prime_rehash_policy14_M_need_rehashEmmm T _ZNKSt8bad_cast4whatEv T @@ -320,6 +323,7 @@ _ZNSt13__future_base12_Result_baseD1Ev T _ZNSt13__future_base12_Result_baseD2Ev T _ZNSt13__future_base13_State_baseV211_Make_ready6_M_setEv T _ZNSt13__future_base13_State_baseV211_Make_ready6_S_runEPv T +_ZNSt13basic_filebufIcSt11char_traitsIcEED2Ev W _ZNSt13basic_fstreamIcSt11char_traitsIcEE5closeEv T _ZNSt13random_device14_M_init_pretr1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE T _ZNSt13random_device14_M_init_pretr1ERKSs T diff --git a/repos/libports/ports/libpfm4.hash b/repos/libports/ports/libpfm4.hash new file mode 100644 index 0000000000..6eeb6653d7 --- /dev/null +++ b/repos/libports/ports/libpfm4.hash @@ -0,0 +1 @@ +b0ec09148c2be9f4a96203a3d2de4ebed6ce2da0 diff --git a/repos/libports/ports/libpfm4.port b/repos/libports/ports/libpfm4.port new file mode 100644 index 0000000000..f0d7542ca3 --- /dev/null +++ b/repos/libports/ports/libpfm4.port @@ -0,0 +1,13 @@ +LICENSE := PD +DOWNLOADS := libpfm4.git +VERSION := git + +URL(libpfm4) := https://github.com/wcohen/libpfm4.git +REV(libpfm4) := 8aaaf1747e96031a47ed6bd9337ff61a21f8cc64 +DIR(libpfm4) := src/lib/libpfm4 + +DIRS += include +DIRS += include/perfmon + +DIR_CONTENT(include) += src/lib/libpfm4/include/perfmon +DIR_CONTENT(include/perfmon) += src/lib/libpfm4/include/perfmon/*.h \ No newline at end of file diff --git a/repos/libports/ports/mxtasking.hash b/repos/libports/ports/mxtasking.hash index 55a1d66141..78dfa7fce1 100644 --- a/repos/libports/ports/mxtasking.hash +++ b/repos/libports/ports/mxtasking.hash @@ -1 +1 @@ -dafcd5b6d7029c2626ead3b36f755a9fbd5acb13 +6074b9fabac4e9ad3df2ec7fb39bbae5b6ffa520 diff --git a/repos/libports/ports/mxtasking.port b/repos/libports/ports/mxtasking.port index 69155a9dcd..67151fb939 100644 --- a/repos/libports/ports/mxtasking.port +++ b/repos/libports/ports/mxtasking.port @@ -3,7 +3,7 @@ DOWNLOADS := mxtasking.git VERSION := git URL(mxtasking) := https://github.com/mmueller41/mxtasking.git -REV(mxtasking) := c81b9168104be5fceebf35674b867bb965e95d43 +REV(mxtasking) := yritys DIR(mxtasking) := src/lib/mxtasking DIRS += include/mx/memory @@ -42,6 +42,12 @@ DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/vector.h DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/mpsc_queue.h DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/queue.h DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/random.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/bits.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/compiler.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/atomic.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/bit_alloc.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/field_alloc.h +DIR_CONTENT(include/mx/util) += src/lib/mxtasking/src/mx/util/util.h DIR_CONTENT(include/mx/system) += src/lib/mxtasking/src/mx/system/builtin.h DIR_CONTENT(include/mx/system) += src/lib/mxtasking/src/mx/system/cpuid.h DIR_CONTENT(include/mx/system) += src/lib/mxtasking/src/mx/system/cache.h diff --git a/repos/libports/recipes/src/libpfm4/api b/repos/libports/recipes/src/libpfm4/api new file mode 100644 index 0000000000..954b4ab6ae --- /dev/null +++ b/repos/libports/recipes/src/libpfm4/api @@ -0,0 +1 @@ +libpfm4 \ No newline at end of file diff --git a/repos/libports/recipes/src/libpfm4/content.mk b/repos/libports/recipes/src/libpfm4/content.mk new file mode 100644 index 0000000000..1301d4f0ba --- /dev/null +++ b/repos/libports/recipes/src/libpfm4/content.mk @@ -0,0 +1,17 @@ +MIRROR_FROM_REP_DIR := lib/mk/libpfm4.mk lib/import/import-libpfm4.mk + +content: src/lib/libpfm4 COPYING $(MIRROR_FROM_REP_DIR) + +PORT_DIR := $(call port_dir,$(REP_DIR)/ports/libpfm4) + +src/lib/libpfm4: + mkdir -p $@ + cp -r $(PORT_DIR)/src/lib/libpfm4/* $@ + rm -rf $@/.git + echo "LIBS = libpfm4" > $@/target.mk + +$(MIRROR_FROM_REP_DIR): + $(mirror_from_rep_dir) + +LICENSE: + echo "libpfm license, see src/lib/libpfm4/COPYING" > $@ \ No newline at end of file diff --git a/repos/libports/recipes/src/libpfm4/used_api b/repos/libports/recipes/src/libpfm4/used_api new file mode 100644 index 0000000000..186e29c4c6 --- /dev/null +++ b/repos/libports/recipes/src/libpfm4/used_api @@ -0,0 +1,3 @@ +base +libm +libc \ No newline at end of file diff --git a/repos/libports/run/acpica.run b/repos/libports/run/acpica.run index 336353ece0..570efada90 100644 --- a/repos/libports/run/acpica.run +++ b/repos/libports/run/acpica.run @@ -39,13 +39,14 @@ set config { + - + diff --git a/repos/libports/run/netty.inc b/repos/libports/run/netty.inc index 96c8b795e0..100aa4b716 100644 --- a/repos/libports/run/netty.inc +++ b/repos/libports/run/netty.inc @@ -22,6 +22,7 @@ import_from_depot [depot_user]/src/[base_src] \ [depot_user]/src/dynamic_rom \ [depot_user]/src/init \ [depot_user]/src/libc \ + [depot_user]/src/stdcxx \ [depot_user]/src/nic_router \ [depot_user]/src/vfs_audit \ [depot_user]/src/vfs_[ipstack] \ @@ -39,6 +40,7 @@ append config { + @@ -48,12 +50,12 @@ append config { - + - + @@ -64,7 +66,7 @@ append config { - + @@ -73,7 +75,7 @@ append config { - + - + @@ -106,14 +108,14 @@ append config { append_if [use_dynamic_rom] config { - + - <} [ipstack] { ip_addr="10.0.3.55" netmask="255.255.255.0" gateway="10.0.3.1" nameserver="8.8.8.8"/> + @@ -268,4 +270,4 @@ append_qemu_nic_args "host=10.0.2.1,dhcpstart=10.0.2.55,hostfwd=tcp::10080-:80,h run_genode_until forever -# vi: set ft=tcl : +# vi: set ft=tcl diff --git a/repos/libports/src/lib/libc/component.cc b/repos/libports/src/lib/libc/component.cc index 237ec974dc..f89036e8d1 100644 --- a/repos/libports/src/lib/libc/component.cc +++ b/repos/libports/src/lib/libc/component.cc @@ -77,4 +77,4 @@ void Component::construct(Genode::Env &env) * Default stack size for libc-using components */ Genode::size_t Libc::Component::stack_size() __attribute__((weak)); -Genode::size_t Libc::Component::stack_size() { return 32UL*1024*sizeof(long); } +Genode::size_t Libc::Component::stack_size() { return 96UL*1024*sizeof(long); } diff --git a/repos/libports/src/lib/libc/internal/env.h b/repos/libports/src/lib/libc/internal/env.h index 815fe23184..370e11e02b 100644 --- a/repos/libports/src/lib/libc/internal/env.h +++ b/repos/libports/src/lib/libc/internal/env.h @@ -100,6 +100,7 @@ class Libc::Env_implementation : public Libc::Env, public Config_accessor Region_map &rm() override { return _env.rm(); } Pd_session &pd() override { return _env.pd(); } Entrypoint &ep() override { return _env.ep(); } + Topo_session &topo() override { return _env.topo(); } Cpu_session_capability cpu_session_cap() override { return _env.cpu_session_cap(); } @@ -110,6 +111,10 @@ class Libc::Env_implementation : public Libc::Env, public Config_accessor Id_space &id_space() override { return _env.id_space(); } + Topo_session_capability topo_session_cap() override { + return _env.topo_session_cap(); + } + Session_capability session(Parent::Service_name const &name, Parent::Client::Id id, Parent::Session_args const &args, diff --git a/repos/libports/src/lib/libc/internal/malloc_ram_allocator.h b/repos/libports/src/lib/libc/internal/malloc_ram_allocator.h index faf28e9a00..749468d2f7 100644 --- a/repos/libports/src/lib/libc/internal/malloc_ram_allocator.h +++ b/repos/libports/src/lib/libc/internal/malloc_ram_allocator.h @@ -64,6 +64,18 @@ struct Libc::Malloc_ram_allocator : Ram_allocator [&] (Alloc_error error) { return error; }); } + + Alloc_result try_alloc(size_t size, Ram_allocator::Numa_id numa_id, Cache cache) override + { + return _ram.try_alloc(size, numa_id, cache).convert( + + [&] (Ram_dataspace_capability cap) { + new (_md_alloc) Registered(_dataspaces, cap); + return cap; }, + + [&] (Alloc_error error) { + return error; }); + } void free(Ram_dataspace_capability ds_cap) override { diff --git a/repos/libports/src/lib/libc/malloc.cc b/repos/libports/src/lib/libc/malloc.cc index 1a9e67b3b7..6c4bca7e36 100644 --- a/repos/libports/src/lib/libc/malloc.cc +++ b/repos/libports/src/lib/libc/malloc.cc @@ -86,7 +86,7 @@ class Libc::Malloc SLAB_START = 5, /* 32 bytes (log2) */ SLAB_STOP = 11, /* 2048 bytes (log2) */ NUM_SLABS = (SLAB_STOP - SLAB_START) + 1, - DEFAULT_ALIGN = 16 + DEFAULT_ALIGN = 64 }; struct Metadata diff --git a/repos/libports/src/test/netty/tcp/main.cc b/repos/libports/src/test/netty/tcp/main.cc index 4c15a07a56..8ea8a72566 100644 --- a/repos/libports/src/test/netty/tcp/main.cc +++ b/repos/libports/src/test/netty/tcp/main.cc @@ -13,7 +13,7 @@ /* Local includes */ #include - +#include namespace Netty { struct Tcp; } @@ -59,56 +59,69 @@ void Netty::Tcp::server(int const sd, bool const nonblock, bool const read_write Genode::log("okay, accept will not block"); } - Genode::log("test in ", nonblock ? "non-blocking" : "blocking", " mode"); + //Genode::log("test in ", nonblock ? "non-blocking" : "blocking", " mode"); int const cd = accept(sd, pcaddr, &scaddr); - Genode::log("cd=", cd); + //Genode::log("cd=", cd); if (cd == -1) DIE("accept"); - getnames(cd); + //getnames(cd); - size_t count = 0; - static char data[64*1024]; if (nonblock) nonblocking(cd); - while (true) { - int ret = read_write - ? read(cd, data, sizeof(data)) - : recv(cd, data, sizeof(data), 0); + auto con_handler = std::thread{[cd, read_write, nonblock]() + { + size_t count = 0; + static char data[64*1024]; + while (true) + { + //GENODE_LOG_TSC_NAMED(10, "netty_read"); + int ret = read_write + ? read(cd, data, sizeof(data)) + : recv(cd, data, sizeof(data), 0); - if (ret == 0) { - Genode::log("experienced EOF"); - break; - } + if (ret == 0) + { + // Genode::log("experienced EOF"); + break; + } - if (ret > 0) { - /* echo received data */ - ret = read_write - ? write(cd, data, ret) - : send(cd, data, ret, 0); - if (ret == -1) DIE(read_write ? "write" : "send"); + if (ret > 0) + { + //GENODE_LOG_TSC_NAMED(10, "netty_write"); + /* echo received data */ + ret = read_write + ? write(cd, data, ret) + : send(cd, data, ret, 0); + if (ret == -1) + DIE(read_write ? "write" : "send"); - count += ret; - continue; - } + count += ret; + continue; + } - if (!nonblock || errno != EAGAIN) - DIE(read_write ? "read" : "recv"); + if (!nonblock || errno != EAGAIN) + DIE(read_write ? "read" : "recv"); - Genode::log("block in select because of EAGAIN"); - fd_set read_fds; FD_ZERO(&read_fds); FD_SET(cd, &read_fds); - ret = select(cd + 1, &read_fds, nullptr, nullptr, nullptr); - if (ret == -1) DIE("select"); - } + Genode::log("block in select because of EAGAIN"); + fd_set read_fds; + FD_ZERO(&read_fds); + FD_SET(cd, &read_fds); + ret = select(cd + 1, &read_fds, nullptr, nullptr, nullptr); + if (ret == -1) + DIE("select"); - Genode::log("echoed ", count, " bytes"); + ret = shutdown(cd, SHUT_RDWR); + if (ret == -1) DIE("shutdown"); - ret = shutdown(cd, SHUT_RDWR); - if (ret == -1) DIE("shutdown"); + ret = close(cd); + if (ret == -1) DIE("close"); + } + }}; + con_handler.detach(); - ret = close(cd); - if (ret == -1) DIE("close"); + //Genode::log("echoed ", count, " bytes"); } } diff --git a/repos/libports/src/test/netty/tcp/target.mk b/repos/libports/src/test/netty/tcp/target.mk index 14113d8715..b330e810cf 100644 --- a/repos/libports/src/test/netty/tcp/target.mk +++ b/repos/libports/src/test/netty/tcp/target.mk @@ -1,6 +1,6 @@ TARGET = test-netty_tcp SRC_CC = main.cc netty.cc -LIBS = base libc +LIBS = base libc stdcxx INC_DIR += $(PRG_DIR)/.. diff --git a/repos/mml/include/mxip/arch/cc.h b/repos/mml/include/mxip/arch/cc.h new file mode 100644 index 0000000000..b09a2aaefd --- /dev/null +++ b/repos/mml/include/mxip/arch/cc.h @@ -0,0 +1,66 @@ +/* + * \brief Some size definitions and macros needed by LwIP. + * \author Stefan Kalkowski + * \author Emery Hemingway + * \date 2009-11-10 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef __LWIP__ARCH__CC_H__ +#define __LWIP__ARCH__CC_H__ + +#ifdef LITTLE_ENDIAN +#undef LITTLE_ENDIAN +#endif + +#ifdef BIG_ENDIAN +#undef BIG_ENDIAN +#endif + +#ifndef LWIP_RAND +genode_uint32_t genode_rand(); +#define LWIP_RAND() genode_rand() +#endif + +#include +#include +#include + +#ifndef LWIP_PLATFORM_DIAG +void lwip_printf(const char *format, ...); +#define LWIP_PLATFORM_DIAG(x) do { lwip_printf x; } while(0) +#endif /* LWIP_PLATFORM_DIAG */ + + +#ifdef GENODE_RELEASE +#define LWIP_PLATFORM_ASSERT(x) +#else /* GENODE_RELEASE */ +void lwip_platform_assert(char const* msg, char const *file, int line); +#define LWIP_PLATFORM_ASSERT(x) \ + do { \ + lwip_platform_assert(x, __FILE__, __LINE__); \ + } while (0) +#endif /* GENODE_RELEASE */ + + +/* + * XXX: Should these be inlined? + */ +void genode_memcpy( void *dst, const void *src, size_t len); +void *genode_memmove(void *dst, const void *src, size_t len); + +void genode_free(void *ptr); +void *genode_malloc(unsigned long size); +void *genode_calloc(unsigned long number, unsigned long size); + +#define mem_clib_free genode_free +#define mem_clib_malloc genode_malloc +#define mem_clib_calloc genode_calloc + +#endif /* __LWIP__ARCH__CC_H__ */ diff --git a/repos/mml/include/mxip/arch/perf.h b/repos/mml/include/mxip/arch/perf.h new file mode 100644 index 0000000000..eea977777f --- /dev/null +++ b/repos/mml/include/mxip/arch/perf.h @@ -0,0 +1,20 @@ +/* + * \brief Header file with macros needed by LwIP. + * \author Stefan Kalkowski + * \date 2009-11-10 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef __LWIP__ARCH__PERF_H__ +#define __LWIP__ARCH__PERF_H__ + +#define PERF_START +#define PERF_STOP(x) + +#endif /* __LWIP__ARCH__PERF_H__ */ diff --git a/repos/mml/include/mxip/genode_init.h b/repos/mml/include/mxip/genode_init.h new file mode 100644 index 0000000000..6076a5839b --- /dev/null +++ b/repos/mml/include/mxip/genode_init.h @@ -0,0 +1,26 @@ +/* + * \brief Genode native lwIP initalization + * \author Emery Hemingway + * \date 2017-08-21 + */ + +/* + * Copyright (C) 2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__LWIP__GENODE_INIT_H_ +#define _INCLUDE__LWIP__GENODE_INIT_H_ + +#include +#include + +namespace Mxip { + void mxip_init(mx::memory::dynamic::Allocator &heap, ::Timer::Connection &timer); + + Genode::Mutex &mutex(); +} + +#endif diff --git a/repos/mml/include/mxip/lwipopts.h b/repos/mml/include/mxip/lwipopts.h new file mode 100644 index 0000000000..41ac70f175 --- /dev/null +++ b/repos/mml/include/mxip/lwipopts.h @@ -0,0 +1,154 @@ +/* + * \brief Configuration file for LwIP, adapt it to your needs. + * \author Stefan Kalkowski + * \author Emery Hemingway + * \date 2009-11-10 + * + * See lwip/src/include/lwip/opt.h for all options + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef __LWIP__LWIPOPTS_H__ +#define __LWIP__LWIPOPTS_H__ + +/* Genode includes */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Use lwIP without OS-awareness + */ +#define NO_SYS 1 +#define SYS_LIGHTWEIGHT_PROT 0 + +#define LWIP_DNS 0 /* DNS support */ +#define LWIP_DHCP 0 /* DHCP support */ +#define LWIP_SOCKET 0 /* LwIP socket API */ +#define LWIP_NETIF_LOOPBACK 0 /* Looping back to same address? */ +#define LWIP_STATS 0 /* disable stating */ +#define LWIP_ICMP 0 +#define LWIP_SNMP 0 +#define LWIP_TCP_TIMESTAMPS 0 +#define TCP_LISTEN_BACKLOG 255 +#define TCP_MSS 1460 +#define TCP_WND (46 * TCP_MSS) +#define TCP_SND_BUF (46 * TCP_MSS) +#define LWIP_WND_SCALE 3 +#define TCP_RCV_SCALE 2 +#define TCP_SND_QUEUELEN ((512 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) + +#define LWIP_NETIF_STATUS_CALLBACK 1 /* callback function used for interface changes */ +#define LWIP_NETIF_LINK_CALLBACK 1 /* callback function used for link-state changes */ +#define LWIP_SUPPORT_CUSTOM_PBUF 1 + +#define LWIP_SINGLE_NETIF 1 + +#define TCP_QUEUE_OOSEQ 1 +#define LWIP_PCB_ARRAY 1 +/*********************************** + ** Checksum calculation settings ** + ***********************************/ + +/* checksum calculation for outgoing packets can be disabled if the hardware supports it */ +#define LWIP_CHECKSUM_ON_COPY 1 /* calculate checksum during memcpy */ + +/********************* + ** Memory settings ** + *********************/ + +#define MEM_LIBC_MALLOC 1 +#define MEMP_MEM_MALLOC 1 +#define MEMP_MEM_INIT 0 +#define MEMP_NUM_TCP_SEG (2*TCP_SND_QUEUELEN) +/* MEM_ALIGNMENT > 4 e.g. for x86_64 are not supported, see Genode issue #817 */ +#define MEM_ALIGNMENT 4 + +#define DEFAULT_ACCEPTMBOX_SIZE 128 +#define TCPIP_MBOX_SIZE 128 + +#define RECV_BUFSIZE_DEFAULT (512*1024) + +#define PBUF_POOL_SIZE 8192 + +#define MEMP_NUM_SYS_TIMEOUT 64 +#define MEMP_NUM_TCP_PCB 512 +#define MEMP_NUM_PBUF (128*4096) + +#ifndef MEMCPY +#define MEMCPY(dst,src,len) genode_memcpy(dst,src,len) +#endif + +#ifndef MEMMOVE +#define MEMMOVE(dst,src,len) genode_memmove(dst,src,len) +#endif + +/******************** + ** Debug settings ** + ********************/ +#define LWIP_NOASSERT 1 + +/* #define LWIP_DEBUG */ +/* #define DHCP_DEBUG LWIP_DBG_ON */ +/* #define ETHARP_DEBUG LWIP_DBG_ON */ +/* #define NETIF_DEBUG LWIP_DBG_ON */ +/* #define PBUF_DEBUG LWIP_DBG_ON */ +/* #define API_LIB_DEBUG LWIP_DBG_ON */ +/* #define API_MSG_DEBUG LWIP_DBG_ON */ +/* #define SOCKETS_DEBUG LWIP_DBG_ON */ +/* #define ICMP_DEBUG LWIP_DBG_ON */ +/* #define INET_DEBUG LWIP_DBG_ON */ +/* #define IP_DEBUG LWIP_DBG_ON */ +/* #define IP_REASS_DEBUG LWIP_DBG_ON */ +/* #define RAW_DEBUG LWIP_DBG_ON */ +/* #define MEM_DEBUG LWIP_DBG_ON */ +/* #define MEMP_DEBUG LWIP_DBG_ON */ +/* #define SYS_DEBUG LWIP_DBG_ON */ +/* #define TCP_DEBUG LWIP_DBG_ON */ + + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ + +#define LWIP_DHCP_CHECK_LINK_UP 1 + + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/* no Netconn API */ +#define LWIP_NETCONN 0 + + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ + +#define LWIP_IPV6 0 +#define IPV6_FRAG_COPYHEADER 1 + +#define LWIP_IPV4 1 +#define IPV4_FRAG_COPYHEADER 0 +#define IP_REASSEMBLY 0 +#define IP_FRAG 0 + +#ifdef __cplusplus +} +#endif + +#endif /* __LWIP__LWIPOPTS_H__ */ diff --git a/repos/mml/include/mxip/mxnic_netif.h b/repos/mml/include/mxip/mxnic_netif.h new file mode 100644 index 0000000000..040fecbe13 --- /dev/null +++ b/repos/mml/include/mxip/mxnic_netif.h @@ -0,0 +1,555 @@ +/* + * \brief LwIP netif for the Nic session + * \author Emery Hemingway + * \date 2016-09-28 + * + * If you want to use the lwIP API in a native Genode + * component then this is the Nic client to use. + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef __LWIP__NIC_NETIF_H__ +#define __LWIP__NIC_NETIF_H__ + +#if ETH_PAD_SIZE +#error ETH_PAD_SIZE defined but unsupported by lwip/nic_netif.h +#endif + +#ifndef __cplusplus +#error lwip/nic_netif.h is a C++ only header +#endif + +/* Genode includes */ +#include +#include +#include +#include +#include + +/* MxTasking includes */ +#include +#include +#include + +namespace Lwip { + +extern "C" { +/* LwIP includes */ +#include +#include +#if LWIP_IPV6 +#include +#endif +#include +#include +#include +#include +#include +} + + class Nic_netif; + class Receive_task; + class Tx_ready_task; + class Link_state_task; + class Finished_rx_task; + + extern "C" { + + static void nic_netif_pbuf_free(pbuf *p); + static err_t nic_netif_init(struct netif *netif); + static err_t nic_netif_linkoutput(struct netif *netif, struct pbuf *p); + static void nic_netif_status_callback(struct netif *netif); + } + + /** + * Metadata for packet backed pbufs + */ + struct Nic_netif_pbuf + { + struct pbuf_custom p { }; + Nic_netif &netif; + Nic::Packet_descriptor packet; + + Nic_netif_pbuf(Nic_netif &nic, Nic::Packet_descriptor &pkt) + : netif(nic), packet(pkt) + { + p.custom_free_function = nic_netif_pbuf_free; + } + }; + +} + +class Lwip::Nic_netif +{ + friend class Lwip::Receive_task; + friend class Lwip::Tx_ready_task; + friend class Lwip::Link_state_task; + +private: + enum + { + PACKET_SIZE = Nic::Packet_allocator::DEFAULT_PACKET_SIZE, + BUF_SIZE = 128*PACKET_SIZE, + }; + + Genode::Tslab _pbuf_alloc; + + Nic::Packet_allocator _nic_tx_alloc; + Nic::Connection _nic; + + Genode::Entrypoint &_ep; + + struct netif _netif + { }; + + ip_addr_t ip { }; + ip_addr_t nm { }; + ip_addr_t gw { }; + + Genode::Io_signal_handler _link_state_handler; + Genode::Io_signal_handler _rx_packet_handler; + Genode::Io_signal_handler _tx_ready_handler; + + bool _dhcp { false }; + + std::unique_ptr _handler_allocator{nullptr}; + + public: + + void free_pbuf(Nic_netif_pbuf &pbuf) + { + bool message_once = true; + while (!_nic.rx()->ready_to_ack()) { + if (message_once) + Genode::error("Nic rx acknowledge queue congested."); + message_once = false; + _ep.wait_and_dispatch_one_io_signal(); + } + + _nic.rx()->try_ack_packet(pbuf.packet); + wake_up_nic_server(); + + destroy(_pbuf_alloc, &pbuf); + } + + Lwip::pbuf *alloc_pbuf(size_t len, const char *payload) + { + Lwip::pbuf_custom *pbuf = new (this->_pbuf_alloc) Lwip::pbuf_custom(); + + Lwip::pbuf *p = pbuf_alloced_custom(PBUF_TRANSPORT, len, PBUF_RAM, pbuf, static_cast(const_cast(payload)), len); + + return p; + } + + /************************* + ** Nic signal handlers ** + *************************/ + + void handle_link_state(); + void handle_rx_packets(); + + /** + * Handle tx ack_avail and ready_to_submit signals + */ + void handle_tx_ready(); + + void configure(Genode::Xml_node const &config) + { + _dhcp = config.attribute_value("dhcp", false); + + typedef Genode::String Str; + Str ip_str = config.attribute_value("ip_addr", Str()); + + Genode::log("Static IP: ", ip_str); + + if (_dhcp && ip_str != "") { + _dhcp = false; + netif_set_down(&_netif); + Genode::error("refusing to configure lwIP interface with both DHCP and a static IPv4 address"); + return; + } + + netif_set_up(&_netif); + + if (ip_str != "") { + ip_addr_t ipaddr; + if (!ipaddr_aton(ip_str.string(), &ipaddr)) { + Genode::error("lwIP configured with invalid IP address '",ip_str,"'"); + throw ip_str; + } + + netif_set_ipaddr(&_netif, ip_2_ip4(&ipaddr)); + + if (config.has_attribute("netmask")) { + Str str = config.attribute_value("netmask", Str()); + ip_addr_t ip; + ipaddr_aton(str.string(), &ip); + netif_set_netmask(&_netif, ip_2_ip4(&ip)); + } + + if (config.has_attribute("gateway")) { + Str str = config.attribute_value("gateway", Str()); + ip_addr_t ip; + ipaddr_aton(str.string(), &ip); + netif_set_gw(&_netif, ip_2_ip4(&ip)); + } + + } + + if (config.has_attribute("nameserver")) { + /* + * LwIP does not use DNS internally, but the application + * should expect "dns_getserver" to work regardless of + * how the netif configures itself. + */ + Str str = config.attribute_value("nameserver", Str()); + ip_addr_t ip; + ipaddr_aton(str.string(), &ip); + //dns_setserver(0, &ip); + } + + handle_link_state(); + } + + Nic_netif(Genode::Env &env, + Genode::Allocator &alloc, + Genode::Xml_node config) + : + _pbuf_alloc(alloc), _nic_tx_alloc(&alloc), + _nic(env, &_nic_tx_alloc, + BUF_SIZE, BUF_SIZE, + config.attribute_value("label", Genode::String<160>("lwip")).string()), _ep(env.ep()), + _link_state_handler(env.ep(), *this, &Nic_netif::handle_link_state), + _rx_packet_handler( env.ep(), *this, &Nic_netif::handle_rx_packets), + _tx_ready_handler( env.ep(), *this, &Nic_netif::handle_tx_ready) + { + Genode::memset(&_netif, 0x00, sizeof(_netif)); + + _handler_allocator.reset(new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(mx::memory::dynamic::Allocator))) mx::memory::dynamic::Allocator()); + + { + ip4_addr_t v4dummy; + IP4_ADDR(&v4dummy, 0, 0, 0, 0); + + netif* r = netif_add(&_netif, &v4dummy, &v4dummy, &v4dummy, + this, nic_netif_init, ethernet_input); + if (r == NULL) { + Genode::error("failed to initialize Nic to lwIP interface"); + throw r; + } + } + + netif_set_default(&_netif); + netif_set_status_callback( + &_netif, nic_netif_status_callback); + nic_netif_status_callback(&_netif); + + configure(config); + } + + virtual ~Nic_netif() { } + + Lwip::netif& lwip_netif() { return _netif; } + + /** + * Status callback to override in subclass + */ + virtual void status_callback() { } + + /** + * Callback issued by lwIP to initialize netif struct + * + * \noapi + */ + err_t init() + { + /* + * XXX: hostname and MTU could probably be + * set in the Nic client constructor + */ + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + _netif.hostname = ""; +#endif /* LWIP_NETIF_HOSTNAME */ + + Genode::log("Setting name to en"); + _netif.name[0] = 'e'; + _netif.name[1] = 'n'; + + Genode::log("Setting callbacks"); + _netif.output = etharp_output; +#if LWIP_IPV6 + _netif.output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + + _netif.linkoutput = nic_netif_linkoutput; + + /* Set physical MAC address */ + Genode::log("Setting MAC address"); + Nic::Mac_address const mac = _nic.mac_address(); + for(int i=0; i<6; ++i) + _netif.hwaddr[i] = mac.addr[i]; + + Genode::log("Setting MTU and flags"); + _netif.mtu = 1500; /* XXX: just a guess */ + _netif.hwaddr_len = ETHARP_HWADDR_LEN; + _netif.flags = NETIF_FLAG_BROADCAST | + NETIF_FLAG_ETHARP | + NETIF_FLAG_LINK_UP; + + /* set Nic session signal handlers */ + Genode::log("Setting NIC handlers"); + _nic.link_state_sigh(_link_state_handler); + _nic.rx_channel()->sigh_packet_avail(_rx_packet_handler); + _nic.rx_channel()->sigh_ready_to_ack(_rx_packet_handler); + _nic.tx_channel()->sigh_ready_to_submit(_tx_ready_handler); + _nic.tx_channel()->sigh_ack_avail (_tx_ready_handler); + + Genode::log("Finished init of netif"); + return ERR_OK; + } + + /** + * Callback issued by lwIP to write a Nic packet + * + * \noapi + */ + err_t linkoutput(struct pbuf *p) + { + auto &tx = *_nic.tx(); + //GENODE_LOG_TSC(1); + + /* flush acknowledgements */ + while (tx.ack_avail()) + tx.release_packet(tx.get_acked_packet()); + + if (!tx.ready_to_submit()) { + Genode::error("lwIP: Nic packet queue congested, cannot send packet"); + return ERR_WOULDBLOCK; + } + + Nic::Packet_descriptor packet; + try { packet = tx.alloc_packet(p->tot_len); } + catch (...) { + Genode::error("lwIP: Nic packet allocation failed, cannot send packet"); + return ERR_WOULDBLOCK; + } + + /* + * We iterate over the pbuf chain until we have read the entire + * pbuf into the packet. + */ + char *dst = tx.packet_content(packet); + for(struct pbuf *q = p; q != 0; q = q->next) { + char const *src = (char*)q->payload; + Genode::memcpy(dst, src, q->len); + dst += q->len; + } + + tx.try_submit_packet(packet); + wake_up_nic_server(); + LINK_STATS_INC(link.xmit); + return ERR_OK; + } + + bool ready() + { + return netif_is_up(&_netif) && + !ip_addr_isany(&_netif.ip_addr); + } + + void wake_up_nic_server() + { + _nic.rx()->wakeup(); + _nic.tx()->wakeup(); + } +}; + +class Lwip::Finished_rx_task : public mx::tasking::TaskInterface +{ + public: + Finished_rx_task(Lwip::Nic_netif &netif, Nic_netif_pbuf *pbuf) : _netif(netif), _pbuf(pbuf) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + //Genode::log("Executing finished rx task"); + _netif.free_pbuf(*_pbuf); + + return mx::tasking::TaskResult::make_null(); + } + + private: + Lwip::Nic_netif &_netif; + struct Lwip::Nic_netif_pbuf *_pbuf; +}; + +class Lwip::Receive_task : public mx::tasking::TaskInterface +{ +public: + Receive_task(Lwip::pbuf *pbuf, struct netif &netif, Lwip::Nic_netif &net, Lwip::Nic_netif_pbuf *npbuf) : _netif(netif), _pbuf(pbuf), _npbuf(npbuf), _net(net) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + Lwip::err_t rc = _netif.input(_pbuf, &_netif); + + + if (rc != Lwip::ERR_OK) + { + Genode::error("error forwarding Nic packet to lwIP: error=", static_cast(rc)); + pbuf_free(_pbuf); + } + + _net._handler_allocator->free(this); + return mx::tasking::TaskResult::make_null(); + } + +private: + struct netif &_netif; + struct Lwip::pbuf *_pbuf; + Lwip::Nic_netif_pbuf *_npbuf; + Lwip::Nic_netif &_net; +}; + +class Lwip::Tx_ready_task : public mx::tasking::TaskInterface +{ + public: + Tx_ready_task(Nic::Connection &nic, Lwip::Nic_netif &netif) : _nic(nic), _netif(netif) {} + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + auto &tx = *_nic.tx(); + bool progress = false; + + while (tx.ack_avail()) + { + tx.release_packet(tx.try_get_acked_packet()); + progress = true; + } + + if (progress) + _netif.wake_up_nic_server(); + + _netif._handler_allocator->free(this); + return mx::tasking::TaskResult::make_null(); + + /* notify subclass to resume pending transmissions */ + //status_callback(); + } + + private: + Nic::Connection &_nic; + Lwip::Nic_netif &_netif; +}; + +class Lwip::Link_state_task : public mx::tasking::TaskInterface +{ + public: + Link_state_task(Nic::Connection &nic, Lwip::netif &netif, Lwip::Nic_netif &nic_netif, bool dhcp) : _nic(nic), _nic_netif(nic_netif), _netif(netif), _dhcp(dhcp) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + /* + * if the application wants to be informed of the + * link state then it should use 'set_link_callback' + */ + if (_nic.link_state()) { + netif_set_link_up(&_netif); + /*if (_dhcp) { + err_t err = dhcp_start(&_netif); + if (err != ERR_OK) { + Genode::error("failed to configure lwIP interface with DHCP, error ", -err); + } + } else { + //dhcp_inform(&_netif); + }*/ + } else { + netif_set_link_down(&_netif); + if (_dhcp) { + //dhcp_release_and_stop(&_netif); + } + } + _nic_netif._handler_allocator->free(this); + return mx::tasking::TaskResult::make_null(); + } + private: + Nic::Connection &_nic; + Lwip::Nic_netif &_nic_netif; + Lwip::netif &_netif; + bool _dhcp; +}; + + +/************************** + ** LwIP netif callbacks ** + **************************/ + +namespace Lwip +{ + extern "C" { + +/** + * Free a packet buffer backed pbuf + */ +static void nic_netif_pbuf_free(pbuf *p) +{ + Nic_netif_pbuf *nic_pbuf = reinterpret_cast(p); + nic_pbuf->netif.free_pbuf(*nic_pbuf); +} + + +/** + * Initialize the netif + */ +static err_t nic_netif_init(struct netif *netif) +{ + Lwip::Nic_netif *nic_netif = (Lwip::Nic_netif *)netif->state; + return nic_netif->init(); +} + + +/** + * Send a raw packet to the Nic session + */ +static err_t nic_netif_linkoutput(struct netif *netif, struct pbuf *p) +{ + Lwip::Nic_netif *nic_netif = (Lwip::Nic_netif *)netif->state; + return nic_netif->linkoutput(p); +} + + +static void nic_netif_status_callback(struct netif *netif) +{ + Lwip::Nic_netif *nic_netif = (Lwip::Nic_netif *)netif->state; + + if (netif_is_up(netif)) { + /*if (IP_IS_V6_VAL(netif->ip_addr)) { + Genode::log("lwIP Nic interface up" + ", address=",(char const*)ip6addr_ntoa(netif_ip6_addr(netif, 0))); + } else */if (!ip4_addr_isany(netif_ip4_addr(netif))) { + typedef Genode::String Str; + Str address((char const*)ip4addr_ntoa(netif_ip4_addr(netif))); + Str netmask((char const*)ip4addr_ntoa(netif_ip4_netmask(netif))); + Str gateway((char const*)ip4addr_ntoa(netif_ip4_gw(netif))); + + Genode::log("lwIP Nic interface up" + " address=", address, + " netmask=", netmask, + " gateway=", gateway); + } + } else { + Genode::log("lwIP Nic interface down"); + } + + nic_netif->status_callback(); +} + + } +} + +#endif /* __LWIP__NIC_NETIF_H__ */ diff --git a/repos/mml/lib/import/import-mxip.mk b/repos/mml/lib/import/import-mxip.mk new file mode 100644 index 0000000000..0176efdfea --- /dev/null +++ b/repos/mml/lib/import/import-mxip.mk @@ -0,0 +1,2 @@ +INC_DIR += $(call select_from_ports,lwip)/include/lwip +INC_DIR += $(call select_from_repositories,include/mxip) diff --git a/repos/mml/lib/mk/mxip.mk b/repos/mml/lib/mk/mxip.mk new file mode 100644 index 0000000000..ac0c7ad764 --- /dev/null +++ b/repos/mml/lib/mk/mxip.mk @@ -0,0 +1,49 @@ +# +# lwIP TCP/IP library +# +# The library implements TCP and UDP as well as DNS and DHCP. +# + +LWIP_PORT_DIR := $(call select_from_ports,mxip) +LWIPDIR := $(LWIP_PORT_DIR)/src/lib/lwip/src + +-include $(LWIPDIR)/Filelists.mk + +# Genode platform files +SRC_CC = printf.cc rand.cc sys_arch.cc mxnic_netif.cc + +# Core files +SRC_C += $(notdir $(COREFILES)) + +# IPv4 files +SRC_C += $(notdir $(CORE4FILES)) + +# IPv6 files +SRC_C += $(notdir $(CORE6FILES)) + +# Network interface files +SRC_C += $(notdir $(NETIFFILES)) + +INC_DIR += $(REP_DIR)/include/mxip \ + $(LWIP_PORT_DIR)/include/lwip \ + $(LWIPDIR)/include \ + $(LWIPDIR)/include/ipv4 \ + $(LWIPDIR)/include/api \ + $(LWIPDIR)/include/netif \ + +vpath %.cc $(REP_DIR)/src/lib/mxip/platform +vpath %.c $(sort $(dir \ + $(COREFILES) $(CORE4FILES) $(CORE6FILES) $(NETIFFILES))) + +GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/21.05 + +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CUSTOM_CC = /usr/local/genode/tool/bin/clang + +CC_OPT := --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -DCLANG_CXX11_ATOMICS -Wno-error=all -Wno-error=conversion -Wno-error=effc++ -Wno-error=unknown-attributes -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 -mssse3 #-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 + +CC_OLEVEL = -O3 + +LIBS += libm libc stdcxx mxtasking +EXT_OBJECTS += /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a /usr/local/genode/tool/lib/libatomic.a + diff --git a/repos/mml/ports/ciao-ip.hash b/repos/mml/ports/ciao-ip.hash new file mode 100644 index 0000000000..d3d8276f72 --- /dev/null +++ b/repos/mml/ports/ciao-ip.hash @@ -0,0 +1 @@ +9428aff1932ed69e452867f71012ee91412fc1fa diff --git a/repos/mml/ports/ciao-ip.port b/repos/mml/ports/ciao-ip.port new file mode 100644 index 0000000000..5bdbb6a2f5 --- /dev/null +++ b/repos/mml/ports/ciao-ip.port @@ -0,0 +1,146 @@ +LICENSE := GPL +DOWNLOADS := ciao-ip.git +VERSION := git + +URL(ciao-ip) := git@ess-git.inf.uos.de:software/ciao-ip.git +REV(ciao-ip) := master +DIR(ciao-ip) := src/lib/ciao-ip + +CC_OPT += "--target x86_64-linux-gnu" +#ifndef CIAO_IP_CONFIG +# CIAO_IP_CONFIG = $(realpath $(dir $(PORT)))/.ciao-ip-config +#endif + +# Include the configuration file +#include $(CIAO_IP_CONFIG) + +#ACXX := $(which ag++) +#ASPECTSRC = $(shell find -L ./src/lib/ciao-ip/src/ -name "*.ah" -not -name ".*"|cut -b 3-) + +# Apply rules for conditional compilation, i.e., remove files if not selected +#include $(realpath $(dir $(PORT)))/ciao-ip-config.mk + +#ACXXFLAGS += $(foreach file,$(ASPECTSRC),-a $(file)) -p ./src/lib/ciao-ip/src/ + +DIRS += include/ciao-ip/hw/hal +DIRS += include/ciao-ip/ipstack/router +DIRS += include/ciao-ip/ipstack/api +DIRS += include/ciao-ip/ipstack/arp +DIRS += include/ciao-ip/ipstack/arp/ipv4 +DIRS += include/ciao-ip/ipstack/demux +DIRS += include/ciao-ip/ipstack/icmp +DIRS += include/ciao-ip/ipstack/ipv4/ipv4_icmp +DIRS += include/ciao-ip/ipstack/ipv4/ipv4_tcp +DIRS += include/ciao-ip/ipstack/ipv4/ipv4_udp +DIRS += include/ciao-ip/ipstack/ipv4 +DIRS += include/ciao-ip/ipstack/router +DIRS += include/ciao-ip/ipstack/tcp/tcp_history +DIRS += include/ciao-ip/ipstack/tcp/tcp_receivebuffer +DIRS += include/ciao-ip/ipstack/tcp/sws +DIRS += include/ciao-ip/ipstack/tcp/statemachine +DIRS += include/ciao-ip/ipstack/tcp/tcp_options +DIRS += include/ciao-ip/ipstack/tcp +DIRS += include/ciao-ip/ipstack/udp +DIRS += include/ciao-ip/ipstack/util +DIRS += include/ciao-ip/ipstack +DIRS += include/ciao-ip/util + + + +# for f in $(find build -name "*.ah"); do DST=$(dirname $f | sed -s 's/build\/header/include\/ciao-ip/' | sed -s "s/include/DIR_CONTENT(include/" | sed -s "s/$/) += /"); echo $DST $f; done +DIR_CONTENT(include/ciao-ip/hw/hal) += src/lib/ciao-ip/build/header/hw/hal/NetworkDevice.h +DIR_CONTENT(include/ciao-ip/ipstack/icmp) += src/lib/ciao-ip/build/header/ipstack/icmp/ICMP.h +DIR_CONTENT(include/ciao-ip/ipstack/router) += src/lib/ciao-ip/build/header/ipstack/router/Interface.h +DIR_CONTENT(include/ciao-ip/ipstack/router) += src/lib/ciao-ip/build/header/ipstack/router/Router.h +DIR_CONTENT(include/ciao-ip/ipstack/util) += src/lib/ciao-ip/build/header/ipstack/util/MempoolBase.h +DIR_CONTENT(include/ciao-ip/ipstack/util) += src/lib/ciao-ip/build/header/ipstack/util/RingbufferBase.h +DIR_CONTENT(include/ciao-ip/ipstack/util) += src/lib/ciao-ip/build/header/ipstack/util/Mempool.h +DIR_CONTENT(include/ciao-ip/ipstack/util) += src/lib/ciao-ip/build/header/ipstack/util/Ringbuffer.h +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/Eth_Frame.h +DIR_CONTENT(include/ciao-ip/ipstack/as) += src/lib/ciao-ip/build/header/ipstack/as/EventSupport.h +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/Clock.h +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/IPStack_Config_kconf.h +DIR_CONTENT(include/ciao-ip/ipstack/udp) += src/lib/ciao-ip/build/header/ipstack/udp/UDP.h +DIR_CONTENT(include/ciao-ip/ipstack/udp) += src/lib/ciao-ip/build/header/ipstack/udp/UDP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp) += src/lib/ciao-ip/build/header/ipstack/tcp/TCP.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_receivebuffer) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_receivebuffer/TCP_ReceiveBuffer.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_receivebuffer) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_receivebuffer/TCP_RecvElement.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp) += src/lib/ciao-ip/build/header/ipstack/tcp/TCP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp) += src/lib/ciao-ip/build/header/ipstack/tcp/TCP_Config.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_history) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_history/TCP_Record.h +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_history) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_history/TCP_History.h +DIR_CONTENT(include/ciao-ip/ipstack/api) += src/lib/ciao-ip/build/header/ipstack/api/Setup.h +DIR_CONTENT(include/ciao-ip/ipstack/api) += src/lib/ciao-ip/build/header/ipstack/api/IPv4_UDP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/api) += src/lib/ciao-ip/build/header/ipstack/api/IPv4_TCP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/demux) += src/lib/ciao-ip/build/header/ipstack/demux/Demux.h +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/IPv4_TCP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4.h +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_udp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_udp/IPv4_UDP_Socket.h +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/InternetChecksum.h +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/IPStack_Config.h +DIR_CONTENT(include/ciao-ip/ipstack/arp) += src/lib/ciao-ip/build/header/ipstack/arp/ARP_Cache.h +DIR_CONTENT(include/ciao-ip/ipstack/arp) += src/lib/ciao-ip/build/header/ipstack/arp/ARP.h +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/Eth_ARP_IPv4_Packet.h +DIR_CONTENT(include/ciao-ip/util) += src/lib/ciao-ip/build/header/util/types.h +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/Little_Endian.ah +DIR_CONTENT(include/ciao-ip/ipstack/router) += src/lib/ciao-ip/build/header/ipstack/router/Interface_Delegation.ah +DIR_CONTENT(include/ciao-ip/ipstack/router) += src/lib/ciao-ip/build/header/ipstack/router/Router.ah +DIR_CONTENT(include/ciao-ip/ipstack/router) += src/lib/ciao-ip/build/header/ipstack/router/Interface.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/sws) += src/lib/ciao-ip/build/header/ipstack/tcp/sws/SWS_SenderAvoidance.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/sws) += src/lib/ciao-ip/build/header/ipstack/tcp/sws/SWS_ReceiverAvoidance.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/sws) += src/lib/ciao-ip/build/header/ipstack/tcp/sws/SWS_SenderAvoidance_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/synsent.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/synrcvd.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/listen.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/closed.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/closewait.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/finwait2.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/closing.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/established.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/finwait1.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/TCP_Statemachine.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/lastack.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/synsent_dummy.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/statemachine) += src/lib/ciao-ip/build/header/ipstack/tcp/statemachine/timewait.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_options) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_options/MSS.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_options) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_options/MSS_TCP_Segment_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_options) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_options/MSS_TCP_Socket_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_history) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_history/TCP_Record_RetransmissionCounter.ah +DIR_CONTENT(include/ciao-ip/ipstack/tcp/tcp_history) += src/lib/ciao-ip/build/header/ipstack/tcp/tcp_history/TCP_Record_RetransmissionCounter_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/demux) += src/lib/ciao-ip/build/header/ipstack/demux/Demux.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/IPv4_TCP_Listen.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/IPv4_TCP_Tx_Checksumming.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/IPv4_TCP_Socket_Listen_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/Demux_IPv4_TCP_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/IPv4_TCP_Receive.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_tcp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_tcp/Demux_IPv4_TCP_Listen_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/Demux_IPv4_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4_Receive_Ethernet.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/Router_IPv4_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4_Receive.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4_Socket_Ethernet.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_icmp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_icmp/IPv4_ICMP_Ethernet.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_icmp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_icmp/IPv4_ICMP_Receive.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_icmp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_icmp/IPv4_ICMP_Echo_Reply.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4/ipv4_icmp) += src/lib/ciao-ip/build/header/ipstack/ipv4/ipv4_icmp/Demux_IPv4_ICMP_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/IPv4.ah +DIR_CONTENT(include/ciao-ip/ipstack/ipv4) += src/lib/ciao-ip/build/header/ipstack/ipv4/Interface_IPv4_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack) += src/lib/ciao-ip/build/header/ipstack/Receive_Ethernet.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp) += src/lib/ciao-ip/build/header/ipstack/arp/ARP_Ethernet.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp) += src/lib/ciao-ip/build/header/ipstack/arp/ARP.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp) += src/lib/ciao-ip/build/header/ipstack/arp/Demux_ARP_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/ARP_Cache_IPv4_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/ARP_Cache_IPv4_Send_Receive_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/ARP_Cache_IPv4_Send_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_ARP_Send.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_Socket_Ethernet_ARP_Slice.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_ARP.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_Socket_Ethernet_ARP.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_ARP_Receive.ah +DIR_CONTENT(include/ciao-ip/ipstack/arp/ipv4) += src/lib/ciao-ip/build/header/ipstack/arp/ipv4/IPv4_ARP_Send_Receive.ah + +headers: + ACXXFLAGS=$(CC_OPT) make -C src/lib/ciao-ip header + +_dirs: headers \ No newline at end of file diff --git a/repos/mml/ports/mxip.hash b/repos/mml/ports/mxip.hash new file mode 100644 index 0000000000..edaa2b4e63 --- /dev/null +++ b/repos/mml/ports/mxip.hash @@ -0,0 +1 @@ +e310a8d13995d4b3110cb727a298d781b4f03504 diff --git a/repos/mml/ports/mxip.port b/repos/mml/ports/mxip.port new file mode 100644 index 0000000000..461b4202af --- /dev/null +++ b/repos/mml/ports/mxip.port @@ -0,0 +1,23 @@ +LICENSE := BSD +VERSION := 2.1.2 +DOWNLOADS := lwip.archive + +URL(lwip) := http://git.savannah.nongnu.org/cgit/lwip.git/snapshot/lwip-STABLE-2_1_2_RELEASE.tar.gz +SHA(lwip) := da6a3e07944505e6add328f6efafea4ad670700731f36b1ba54bd43d4f35243e +DIR(lwip) := src/lib/lwip + +UNPACKED_DIR := src/lib/lwip + +PATCHES := $(wildcard $(REP_DIR)/src/lib/lwip/*.patch) +PATCH_OPT := -p1 -d src/lib/lwip + +DIRS := \ + include/lwip/lwip \ + include/lwip/lwip/priv \ + include/lwip/lwip/prot \ + include/lwip/netif \ + +DIR_CONTENT(include/lwip/lwip/priv) := $(UNPACKED_DIR)/src/include/lwip/priv/*.h +DIR_CONTENT(include/lwip/lwip/prot) := $(UNPACKED_DIR)/src/include/lwip/prot/*.h +DIR_CONTENT(include/lwip/lwip) := $(UNPACKED_DIR)/src/include/lwip/*.h +DIR_CONTENT(include/lwip/netif) := $(UNPACKED_DIR)/src/include/netif/*.h diff --git a/repos/mml/recipes/api/mxip/content.mk b/repos/mml/recipes/api/mxip/content.mk new file mode 100644 index 0000000000..8c28bde149 --- /dev/null +++ b/repos/mml/recipes/api/mxip/content.mk @@ -0,0 +1,20 @@ +MIRROR_FROM_REP_DIR := \ + $(shell cd $(REP_DIR); find include/mxip src/lib/mxip -type f) \ + lib/import/import-mxip.mk \ + lib/mk/mxip.mk \ + +PORT_DIR := $(call port_dir,$(REP_DIR)/ports/mxip) + +MIRROR_FROM_PORT_DIR := $(shell cd $(PORT_DIR); find include src -type f) + +content: $(MIRROR_FROM_REP_DIR) $(MIRROR_FROM_PORT_DIR) LICENSE + +$(MIRROR_FROM_REP_DIR): + $(mirror_from_rep_dir) + +$(MIRROR_FROM_PORT_DIR): + mkdir -p $(dir $@) + cp -r $(PORT_DIR)/$@ $@ + +LICENSE: + cp $(PORT_DIR)/src/lib/lwip/COPYING $@ diff --git a/repos/mml/recipes/api/mxip/used_api b/repos/mml/recipes/api/mxip/used_api new file mode 100644 index 0000000000..7a5f1ab72b --- /dev/null +++ b/repos/mml/recipes/api/mxip/used_api @@ -0,0 +1,5 @@ +base +libm +libc +stdcxx +mxtasking \ No newline at end of file diff --git a/repos/mml/run/antagonist.run b/repos/mml/run/antagonist.run new file mode 100644 index 0000000000..ce03327a20 --- /dev/null +++ b/repos/mml/run/antagonist.run @@ -0,0 +1,95 @@ +set build_components { + core init hoitaja timer app/antagonist +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components +build $build_components +create_boot_directory + +install_config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + +} +set boot_modules { + core init hoitaja timer vfs.lib.so libm.lib.so libc.lib.so stdcxx.lib.so ld.lib.so stress_genode +} +build_boot_image $boot_modules +append qemu_args "-nographic" +run_genode_until forever diff --git a/repos/mml/run/bestow_resources.run b/repos/mml/run/bestow_resources.run new file mode 100644 index 0000000000..97718b6303 --- /dev/null +++ b/repos/mml/run/bestow_resources.run @@ -0,0 +1,65 @@ +set build_components { + core init hoitaja timer app/grant_bench +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + } + + install_config $config + + set boot_modules { + core init hoitaja timer vfs.lib.so ld.lib.so benchmark_resource_award +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/blinktree.run b/repos/mml/run/blinktree.run index 24f929c092..c005253351 100644 --- a/repos/mml/run/blinktree.run +++ b/repos/mml/run/blinktree.run @@ -1,5 +1,5 @@ set build_components { - core init timer app/blinktree + core init timer app/blinktree hoitaja } source ${genode_dir}/repos/base/run/platform_drv.inc @@ -21,14 +21,16 @@ set config { + + - - + + @@ -36,41 +38,210 @@ set config { } -append_platform_drv_config - append config { - - - - - - - - - - - - 2022-07-20 14:30 - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + } install_config $config set boot_modules { - core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so posix.lib.so blinktree fill_randint_workloada mixed_randint_workloada + core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so hoitaja blinktree fill_randint_workloada mixed_randint_workloada } append_platform_drv_boot_modules build_boot_image $boot_modules append qemu_args "-nographic" + run_genode_until forever + +set rounds 100 +set succeeded 0 + +for {set r 0} {$r < $rounds} {incr r} { + run_genode_until {\[init -> hoitaja -> blinktree1\] Finished\.} 300 + kill_spawned [output_spawn_id] + incr succeeded +} + +puts "$succeeded of $rounds rounds succeeded." + diff --git a/repos/mml/run/blinktree_server.run b/repos/mml/run/blinktree_server.run new file mode 100644 index 0000000000..a9b4801d4f --- /dev/null +++ b/repos/mml/run/blinktree_server.run @@ -0,0 +1,175 @@ +set build_components { + core init timer app/blinktree_server hoitaja server/nic_router +} + + +create_boot_directory +import_from_depot [depot_user]/pkg/[drivers_nic_pkg] \ + +build $build_components +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + + } + + install_config $config + + build_boot_image { core init timer ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so vfs.lib.so blinktree_daemon hoitaja nic_router } + + append qemu_args "-nographic" + append_qemu_nic_args "host=10.0.2.1,dhcpstart=10.0.2.55,hostfwd=tcp::12345-:12345,hostfwd=tcp::18080-:12345,hostfwd=udp::10007-:7,hostfwd=udp::17070-:7070" + + run_genode_until forever + + set rounds 100 + set succeeded 0 + + for {set r 0} {$r < $rounds} {incr r} { + run_genode_until {\[init -> hoitaja -> blinktree1\] Finished\.} 300 + kill_spawned [output_spawn_id] + incr succeeded + } + + puts "$succeeded of $rounds rounds succeeded." + diff --git a/repos/mml/run/blinktree_with_antagonist.run b/repos/mml/run/blinktree_with_antagonist.run new file mode 100644 index 0000000000..867d285940 --- /dev/null +++ b/repos/mml/run/blinktree_with_antagonist.run @@ -0,0 +1,136 @@ +set build_components { + core init timer app/blinktree app/antagonist hoitaja +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components +build $build_components + +create_boot_directory + + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so hoitaja blinktree stress_genode fill_randint_workloada mixed_randint_workloada +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic" + +run_genode_until forever + +set rounds 100 +set succeeded 0 + +for {set r 0} {$r < $rounds} {incr r} { + run_genode_until {\[init -> hoitaja -> blinktree1\] Finished\.} 300 + kill_spawned [output_spawn_id] + incr succeeded +} + +puts "$succeeded of $rounds rounds succeeded." + diff --git a/repos/mml/run/hello_mxtask.run b/repos/mml/run/hello_mxtask.run index de73b12439..39730e2cd0 100644 --- a/repos/mml/run/hello_mxtask.run +++ b/repos/mml/run/hello_mxtask.run @@ -1,5 +1,5 @@ set build_components { - core init timer app/hello_mxtask + core init timer hoitaja app/hello_mxtask } source ${genode_dir}/repos/base/run/platform_drv.inc @@ -19,14 +19,18 @@ set config { + + + + - + @@ -34,20 +38,50 @@ set config { } -append_platform_drv_config - append config { - - - - - - 2022-07-20 14:30 - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + } @@ -55,11 +89,11 @@ append config { install_config $config set boot_modules { - core init timer vfs.lib.so libm.lib.so libc.lib.so stdcxx.lib.so ld.lib.so hello_mxtask + core init timer vfs.lib.so libm.lib.so libc.lib.so stdcxx.lib.so ld.lib.so hello_mxtask hoitaja } append_platform_drv_boot_modules build_boot_image $boot_modules -append qemu_args "-nographic -m 64" +append qemu_args "-nographic" run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/hoitaja_example.run b/repos/mml/run/hoitaja_example.run new file mode 100644 index 0000000000..1b28a90a51 --- /dev/null +++ b/repos/mml/run/hoitaja_example.run @@ -0,0 +1,114 @@ +set build_components { + core init hoitaja timer app/persistent_cell app/volatile_cell +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + } + + install_config $config + + set boot_modules { + core init hoitaja timer vfs.lib.so ld.lib.so persistent_cell volatile_cell +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/hpc_test.run b/repos/mml/run/hpc_test.run new file mode 100644 index 0000000000..11e9f26d63 --- /dev/null +++ b/repos/mml/run/hpc_test.run @@ -0,0 +1,80 @@ +set build_components { + core init timer app/hpc_test +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append config { + + + + + 2022-07-20 14:30 + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so ld.lib.so posix.lib.so libc.lib.so libm.lib.so stdcxx.lib.so hpc_test +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/libpfm_test.run b/repos/mml/run/libpfm_test.run new file mode 100644 index 0000000000..12d1aec044 --- /dev/null +++ b/repos/mml/run/libpfm_test.run @@ -0,0 +1,68 @@ +set build_components { + core init timer app/libpfm_test +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append config { + + + + 2022-07-20 14:30 + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so ld.lib.so posix.lib.so libc.lib.so libm.lib.so stdcxx.lib.so libpfm_test +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/livedemo.run b/repos/mml/run/livedemo.run new file mode 100644 index 0000000000..5e0aa96dd9 --- /dev/null +++ b/repos/mml/run/livedemo.run @@ -0,0 +1,116 @@ +set build_components { + core init timer app/blinktree +} + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so posix.lib.so blinktree fill_randint_workloada mixed_randint_workloada +} + +build_boot_image $boot_modules +append qemu_args "-nographic" +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/loopbench.run b/repos/mml/run/loopbench.run new file mode 100644 index 0000000000..0186cdb92a --- /dev/null +++ b/repos/mml/run/loopbench.run @@ -0,0 +1,108 @@ +set build_components { + core init timer hoitaja app/loopbench app/volatile_cell +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +} + + +append config { + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so libm.lib.so libc.lib.so stdcxx.lib.so ld.lib.so hoitaja loopbench volatile_cell +} + +append_platform_drv_boot_modules +build_boot_image $boot_modules +append qemu_args "-nographic" + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/pfm_test.run b/repos/mml/run/pfm_test.run new file mode 100644 index 0000000000..b82a4adedb --- /dev/null +++ b/repos/mml/run/pfm_test.run @@ -0,0 +1,68 @@ +set build_components { + core init timer app/pfm_test +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append config { + + + + 2022-07-20 14:30 + + + + + + + + +} + +install_config $config + +set boot_modules { + core init timer vfs.lib.so ld.lib.so posix.lib.so libc.lib.so libm.lib.so stdcxx.lib.so pfm_test +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/portal_experiment.run b/repos/mml/run/portal_experiment.run new file mode 100644 index 0000000000..b0b786cdcf --- /dev/null +++ b/repos/mml/run/portal_experiment.run @@ -0,0 +1,141 @@ +set build_components { + core init hoitaja timer app/portal_experiment app/volatile_cell +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components +build $build_components +create_boot_directory + +install_config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2022-07-20 14:30 + + + + + + + + + + + + + + +} +set boot_modules { + core init hoitaja timer vfs.lib.so libm.lib.so libc.lib.so stdcxx.lib.so ld.lib.so portal_experiment volatile_cell +} +build_boot_image $boot_modules +append qemu_args "-nographic" +run_genode_until forever diff --git a/repos/mml/run/raw_nic.run b/repos/mml/run/raw_nic.run new file mode 100644 index 0000000000..f2c730a1ea --- /dev/null +++ b/repos/mml/run/raw_nic.run @@ -0,0 +1,124 @@ +create_boot_directory +import_from_depot [depot_user]/src/[base_src] \ + [depot_user]/pkg/[drivers_nic_pkg] \ + [depot_user]/src/init \ + [depot_user]/src/libc \ + [depot_user]/src/stdcxx \ + [depot_user]/src/nic_router \ + [depot_user]/src/vfs_audit \ + [depot_user]/src/vfs + +append config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +append qemu_args " -nographic " +append_qemu_nic_args "host=10.0.2.1,dhcpstart=10.0.2.55,hostfwd=tcp::10080-:80,hostfwd=tcp::18080-:8080,hostfwd=udp::10007-:7,hostfwd=udp::17070-:7070" + +build { app/raw_nic app/ping } + +install_config $config +build_boot_image { raw_nic ping } +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/resource_yield.run b/repos/mml/run/resource_yield.run new file mode 100644 index 0000000000..f210d87341 --- /dev/null +++ b/repos/mml/run/resource_yield.run @@ -0,0 +1,114 @@ +set build_components { + core init hoitaja timer app/yield_bench +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + } + + install_config $config + + set boot_modules { + core init hoitaja timer vfs.lib.so ld.lib.so benchmark_resource_yield +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/suoritin_tester.run b/repos/mml/run/suoritin_tester.run new file mode 100644 index 0000000000..2bf13a2c10 --- /dev/null +++ b/repos/mml/run/suoritin_tester.run @@ -0,0 +1,94 @@ +set build_components { + core init hoitaja timer app/suoritin_test +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components + +build $build_components + +create_boot_directory + +set config { + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + } + + install_config $config + + set boot_modules { + core init hoitaja timer vfs.lib.so ld.lib.so suoritin_tester +} + +append_platform_drv_boot_modules + +build_boot_image $boot_modules +append qemu_args "-nographic " + +run_genode_until forever \ No newline at end of file diff --git a/repos/mml/run/thread_test.run b/repos/mml/run/thread_test.run index 05aec30223..e1a38d50fa 100644 --- a/repos/mml/run/thread_test.run +++ b/repos/mml/run/thread_test.run @@ -15,10 +15,12 @@ install_config { + + @@ -29,6 +31,7 @@ install_config { + @@ -40,4 +43,5 @@ set boot_modules { core init timer vfs.lib.so ld.lib.so libm.lib.so libc.lib.so stdcxx.lib.so thread_test } build_boot_image $boot_modules -append qemu_args "-nographic -m 64" +append qemu_args "-nographic" +run_genode_until forever diff --git a/repos/mml/run/vscode.run b/repos/mml/run/vscode.run new file mode 100644 index 0000000000..c4b62c9b4b --- /dev/null +++ b/repos/mml/run/vscode.run @@ -0,0 +1,12 @@ +set build_components { + core init timer hoitaja app/blinktree app/hello_mxtask app/hpc_test app/yield_bench app/persistent_cell app/volatile_cell test/resource_yield app/grant_bench app/top app/cpu_burner app/lwip_raw app/blinktree_server +} + +source ${genode_dir}/repos/base/run/platform_drv.inc +append_platform_drv_build_components +build $build_components + +create_boot_directory + + + diff --git a/repos/mml/src/app/antagonist/cpu.h b/repos/mml/src/app/antagonist/cpu.h new file mode 100644 index 0000000000..1acf27c13d --- /dev/null +++ b/repos/mml/src/app/antagonist/cpu.h @@ -0,0 +1,26 @@ +/* + * cpu.h - basic definitions for x86_64 CPUs + */ + +#pragma once + +/* + * Endianness + */ + +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 + +#define __BYTE_ORDER __LITTLE_ENDIAN + + +/* + * Word Size + */ + +#define __32BIT_WORDS 32 +#define __64BIT_WORDS 64 + +#define __WORD_SIZE __64BIT_WORDS + +#define CACHE_LINE_SIZE 64 diff --git a/repos/mml/src/app/antagonist/main.cc b/repos/mml/src/app/antagonist/main.cc new file mode 100644 index 0000000000..a5562a9759 --- /dev/null +++ b/repos/mml/src/app/antagonist/main.cc @@ -0,0 +1,254 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CALLS 100 +#define CORES 14 +#define HYPERCALL + + //Genode::Trace::timestamp(); +static Genode::Trace::Timestamp rdtsc_cost = 0; +Genode::Env *genv = nullptr; +static Genode::Trace::Timestamp start = 0; +static const unsigned long loops = 10000UL; +static Nova::mword_t channel = 0; +static std::atomic counter(0); +static std::atomic ready{false}; +static std::atomic restart{true}; +static std::atomic yield_ctr{-(31-CORES)}; +static unsigned long tsc_freq_khz = 0; +int cores, i; + +struct Channel { + unsigned long yield_flag : 1, + op : 2, + tnum : 61; + unsigned long delta_alloc; + unsigned long delta_activate; + unsigned long delta_setflag; + unsigned long delta_findborrower; + unsigned long delta_block; + unsigned long delta_enter; + unsigned long delta_return; +}; + +struct Cell : public Genode::Thread +{ + Genode::uint16_t _id; + Libc::Env &env; + Timer::Connection &_timer; + + static void *pthread_entry(void *args) { + Cell *cell = reinterpret_cast(args); + cell->entry(); + return nullptr; + } + + void entry() override + { + Genode::Trace::Timestamp latency = 0; + Nova::mword_t channel_id = 0; + Nova::uint64_t count_allocs = 0; + Nova::cpu_id(channel_id); + struct Channel *channels = reinterpret_cast(channel); + struct Channel volatile *my_channel = &channels[channel_id]; + + unsigned long _tsc_freq_ghz = tsc_freq_khz / 1000000UL; + + //Genode::log("Started worker", _id, " on CPU with affinity ", channel_id, Genode::Thread::myself()->affinity(), " signal channel: ", my_channel->yield_flag, " at ", my_channel); + + for (cores = CORES; cores <= 14; cores+=4) { + for (i = 0; i < CALLS; ) { + + if ((i == 0 && yield_ctr >= cores-1) || (i > 0 && yield_ctr >= cores-1)) + ready = true; + + if (_id != 0 && restart.load()) { + yield_ctr.fetch_add(1); + // Genode::log("Worker ", _id, "yielded, yield_ctr = ", yield_ctr.load()); + Nova::yield(); + } + + //Genode::log("Worker ", _id, " on CPU ", channel_id, " woke up"); + counter.fetch_add(1); + if (counter >= cores-1) { + ready = true; + // Genode::log("{\"allocation:\": ", allocation, ", \"id\":", _id, ",\"clk_total\":", (end-::start), ", \"mean_clk\":", (end-::start)/count_allocs ,", \"count\": ", count_allocs, "\"channel-id\":", channel_id, "},"); + } + + if (my_channel->op == 2) { + Nova::mword_t allocation = 0; + Genode::Trace::Timestamp now = Genode::Trace::timestamp(); + Nova::core_allocation(allocation); + my_channel->delta_return = now - my_channel->delta_return; + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"d_block\": ", my_channel->delta_block / _tsc_freq_ghz, ", \"d_enter\":", my_channel->delta_enter / _tsc_freq_ghz, ", \"d_return\":", my_channel->delta_return / _tsc_freq_ghz, ", \"op\": \"yield\"},"); + } + my_channel->op = 0; + if (_id == 0) { + //Genode::log("Waiting on start signal"); + while (ready.load() == false) + __builtin_ia32_pause(); + + //Genode::log("Got start signal"); + _timer.msleep(2); + + //Genode::log("Woke up for new iteration"); + ready = false; + restart = false; + ::start = Genode::Trace::timestamp(); + } + + Genode::Trace::Timestamp end = 0; + while (_id==0) + { + + if (_id == 0) { + Nova::mword_t allocated = 0; + //Genode::log("Allocating 4 cores"); + + my_channel->tnum = i; + my_channel->op = 1; /* 1 for alloc, 2 for yield */ + + my_channel->delta_enter = Genode::Trace::timestamp(); + Nova::uint8_t rc = Nova::alloc_cores(cores, allocated); + if (rc == Nova::NOVA_OK) + { + + while(ready.load() == false) + __builtin_ia32_pause(); + end = Genode::Trace::timestamp(); + my_channel->delta_return = end - my_channel->delta_return; + latency += (end - ::start) / _tsc_freq_ghz; + Nova::mword_t allocation = 0; + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"delta_enter:\" ", my_channel->delta_enter / _tsc_freq_ghz, ", \"delta_alloc\": ", my_channel->delta_alloc / _tsc_freq_ghz, ", \"delta_activate:\": ", my_channel->delta_activate / _tsc_freq_ghz, ", \"delta_setflag\": ", my_channel->delta_setflag / _tsc_freq_ghz, ", \"delta_return\": ", my_channel->delta_return / _tsc_freq_ghz, "},"); + Nova::core_allocation(allocation); + restart = true; + counter = 0; + yield_ctr = 0; + //if (i%100==0) { + + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"allocation\": ", allocation, ",\"start\": ", ::start, ", \"end\": ", end, " ,\"ns\": ", (latency), "},"); + my_channel->delta_setflag = 0; + latency = 0; + //} + i++; + break; + } else { + //Genode::log("cores allocated: ", allocated); + break; + // Genode::log("cores allocated: ", allocated); + } + count_allocs++; + } + } + //Genode::log("Finished allocation. Waiting for yield signal, id = ", channel_id, "\n"); + while (restart.load() == false) { + Channel volatile *res = __atomic_load_n(&my_channel, __ATOMIC_SEQ_CST); + if (res->yield_flag) { + Genode::log("Got yield signal on channel ", channel_id); + Nova::yield(true); + } + } + } + } + Genode::log("Benchmak finished."); + } + Cell(Libc::Env &env, Timer::Connection &timer, Genode::uint16_t id, Location const &location) + : Thread(env, Name("test_", location.xpos(), "x", location.ypos()), 4 * 4096, location, Weight(), env.cpu()), _id(id), env(env), _timer(timer) + { } +}; + + +void Libc::Component::construct(Libc::Env &env) +{ + Nova::uint8_t res = 0; + genv = &env; + + Libc::with_libc([&]() + { + Timer::Connection _timer{env}; + + Genode::Ram_dataspace_capability ds = env.ram().alloc(4096); + channel = env.rm().attach(ds); + + Genode::memset(reinterpret_cast(channel), 0, 4096); + + //Genode::Heap _heap{env.ram(), env.rm()}; + + //Genode::log("Registering MxTasking entrypoint"); + if ((res = Nova::mxinit(0, 0, channel))) { + Genode::error("Failed to init MxTasking: ", res); + } + Genode::log("Registered MxTasking, yielding ..."); + + try { + Genode::Attached_rom_dataspace info(env, "platform_info"); + tsc_freq_khz = info.xml().sub_node("hardware").sub_node("tsc") + .attribute_value("freq_khz", 0ULL); + } catch (...) { }; + + start = Genode::Trace::timestamp(); + for (unsigned c = 0; c < 1000; c++) { + //Genode::Trace::Timestamp start = Genode::Trace::timestamp(); + + /*Nova::uint8_t rc = Nova::yield(); + if (rc != Nova::NOVA_OK) + break;*/ + Genode::Trace::timestamp(); + // Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + // delay += (end - start); + } + Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + rdtsc_cost = (end - start) / 1000 / 2; + + Genode::log("My affinity is ", env.cpu().affinity_space(), " of size ", env.cpu().affinity_space().total()); + Genode::log("Will create workers for affinity space: ", env.topo().global_affinity_space()); + start = Genode::Trace::timestamp(); + Genode::Thread *me = Genode::Thread::myself(); + + unsigned long cpuid = 0; + Nova::cpu_id(cpuid); + + Genode::Affinity::Space space = env.topo().global_affinity_space(); + Genode::log("My main thread is on phys. CPU ", cpuid); + + pthread_t workers[space.total()]; + std::cout << "Creating workers" << std::endl; + Genode::Trace::Timestamp thread_start = Genode::Trace::timestamp(); + for (Genode::uint16_t cpu = 1; cpu < space.total(); cpu++) + { + Genode::String<32> const name{"worker", cpu}; + if (cpu == (space.total() - cpuid)) + continue; + Cell *worker = new Cell(env, _timer, cpu, space.location_of_index(cpu)); + Libc::pthread_create_from_session(&workers[cpu], Cell::pthread_entry, worker, 4 * 4096, name.string(), &env.cpu(), space.location_of_index(cpu)); + // Genode::log("Created worker for CPU ", cpu); + // worker->start(); + } + Genode::Trace::Timestamp thread_stop = Genode::Trace::timestamp(); + Genode::log("Took ", (thread_stop - thread_start) / 2000, " μs to start workers"); + + pthread_t main_pt{}; + + Genode::Affinity::Location loc = me->affinity(); + //Genode::log("Starting main worker on CPU ", cpuid); + Cell *main_cell = new Cell(env, _timer, 0, loc); + + //Cell *main = new (_heap) Cell(env, 0, Genode::Affinity::Location(20,0)); + /*Libc::pthread_create_from_thread(&main_pt, *main, &main); + main->start();*/ + // Nova::yield(false); + //_timer.msleep(10000); + Libc::pthread_create_from_session(&main_pt, Cell::pthread_entry, main_cell, 8 * 4096, "main_worker", &env.cpu(), loc); + pthread_join(main_pt, 0); }); + Genode::log("Leaving component"); +} \ No newline at end of file diff --git a/repos/mml/src/app/antagonist/mem.h b/repos/mml/src/app/antagonist/mem.h new file mode 100644 index 0000000000..dc0fa8ac46 --- /dev/null +++ b/repos/mml/src/app/antagonist/mem.h @@ -0,0 +1,61 @@ +/* + * mem.h - memory management + */ + +#pragma once + +#include "types.h" + +enum { + PGSHIFT_4KB = 12, + PGSHIFT_2MB = 21, + PGSHIFT_1GB = 30, +}; + +enum { + PGSIZE_4KB = (1 << PGSHIFT_4KB), /* 4096 bytes */ + PGSIZE_2MB = (1 << PGSHIFT_2MB), /* 2097152 bytes */ + PGSIZE_1GB = (1 << PGSHIFT_1GB), /* 1073741824 bytes */ +}; + +#define PGMASK_4KB (PGSIZE_4KB - 1) +#define PGMASK_2MB (PGSIZE_2MB - 1) +#define PGMASK_1GB (PGSIZE_1GB - 1) + +/* page numbers */ +#define PGN_4KB(la) (((uintptr_t)(la)) >> PGSHIFT_4KB) +#define PGN_2MB(la) (((uintptr_t)(la)) >> PGSHIFT_2MB) +#define PGN_1GB(la) (((uintptr_t)(la)) >> PGSHIFT_1GB) + +#define PGOFF_4KB(la) (((uintptr_t)(la)) & PGMASK_4KB) +#define PGOFF_2MB(la) (((uintptr_t)(la)) & PGMASK_2MB) +#define PGOFF_1GB(la) (((uintptr_t)(la)) & PGMASK_1GB) + +#define PGADDR_4KB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_4KB)) +#define PGADDR_2MB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_2MB)) +#define PGADDR_1GB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_1GB)) + +typedef unsigned long physaddr_t; /* physical addresses */ +typedef unsigned long virtaddr_t; /* virtual addresses */ + +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +typedef unsigned int mem_key_t; + +extern void *mem_map_anom(void *base, size_t len, size_t pgsize, int node); +extern void *mem_map_file(void *base, size_t len, int fd, off_t offset); +extern void *mem_map_shm(mem_key_t key, void *base, size_t len, + size_t pgsize, bool exclusive); +extern void *mem_map_shm_rdonly(mem_key_t key, void *base, size_t len, + size_t pgsize); +extern int mem_unmap_shm(void *base); +extern int mem_lookup_page_phys_addrs(void *addr, size_t len, size_t pgsize, + physaddr_t *maddrs); + +static inline int +mem_lookup_page_phys_addr(void *addr, size_t pgsize, physaddr_t *paddr) +{ + return mem_lookup_page_phys_addrs(addr, pgsize, pgsize, paddr); +} diff --git a/repos/mml/src/app/antagonist/stress_linux.cc b/repos/mml/src/app/antagonist/stress_linux.cc new file mode 100644 index 0000000000..2ad780b7a5 --- /dev/null +++ b/repos/mml/src/app/antagonist/stress_linux.cc @@ -0,0 +1,145 @@ + +#include "synthetic_worker.h" + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +namespace +{ + + int threads; + uint64_t n; + std::string worker_spec; + + class SyntheticWork : public mx::tasking::TaskInterface + { + private: + SyntheticWorker *_w{nullptr}; + uint64_t *_cnt; + + public: + SyntheticWork(SyntheticWorker *w, uint64_t *cnt) : _w(w), _cnt(cnt) {} + ~SyntheticWork() override = default; + + mx::tasking::TaskResult execute(const std::uint16_t , const std::uint16_t) override + { + _w->Work(n); + (*_cnt)++; + //mx::tasking::runtime::scheduler().allocate_cores(64); + return mx::tasking::TaskResult::make_succeed(this); + } + }; + + void + MainHandler(void *arg) + { + std::vector cnt(threads); + + auto cores = mx::util::core_set::build(threads); + std::cout << "Core set to use: " << cores << std::endl; + mx::tasking::runtime::init(cores, 0, false); + + for (int i = 0; i < threads; ++i) + { + Genode::log("Creating synthetic worker ", i); + auto *w = SyntheticWorkerFactory(worker_spec); + if (w == nullptr) { + std::cerr << "Failed to create worker." << std::endl; + exit(1); + } + auto *work = mx::tasking::runtime::new_task(i, w, &cnt[i]); + work->annotate(static_cast(i)); + mx::tasking::runtime::spawn(*work, mx::system::topology::core_id()); + } + + auto monitor = std::thread([&]() + { + uint64_t last_total = 0; + auto last = std::chrono::steady_clock::now(); + while (1) { + std::chrono::seconds sec(1); + std::this_thread::sleep_for(sec); + auto now = std::chrono::steady_clock::now(); + uint64_t total = 0; + double duration = + std::chrono::duration_cast>(now - last) + .count(); + for (int i = 0; i < threads; i++) total += cnt[i]; + std::cerr << static_cast(total - last_total) / duration + << std::endl; + last_total = total; + last = now; + } }); + mx::tasking::runtime::start_and_wait(); + monitor.join(); + + // never returns + } + +} // anonymous namespace + +void PrintUsage() +{ + std::cerr << "usage: [#threads] [#n] [worker_spec] " + << std::endl; +} + +int main(int argc, char *argv[]) +{ + int ret; + if (argc < 4) + { + PrintUsage(); + return -EINVAL; + } + + threads = std::stoi(argv[1], nullptr, 0); + n = std::stoul(argv[2], nullptr, 0); + worker_spec = std::string(argv[3]); + + // ret = base_init(); + if (ret) + return ret; + + // ret = base_init_thread(); + if (ret) + return ret; + + MainHandler(NULL); + + return 0; +} + +void Libc::Component::construct(Libc::Env &env) { + + mx::system::Environment::set_env(&env); + + auto sys_cores = mx::util::core_set::build(64); + mx::system::Environment::set_cores(&sys_cores); + + mx::memory::GlobalHeap::myself(); + std::uint16_t cores = 64; + //env.cpu().affinity_space().total(); + + char cores_arg[10]; + sprintf(cores_arg, "%d", cores); + + char *args[] = {"stress_genode", cores_arg, "1", "cacheantagonist:4090880"}; + + Libc::with_libc([&]() + { + std::cout << "Starting Cache Antagonist" << std::endl; + main(4, args); + }); +} \ No newline at end of file diff --git a/repos/mml/src/app/antagonist/synthetic_worker.cc b/repos/mml/src/app/antagonist/synthetic_worker.cc new file mode 100644 index 0000000000..1d3921c4d6 --- /dev/null +++ b/repos/mml/src/app/antagonist/synthetic_worker.cc @@ -0,0 +1,315 @@ +// synthetic_worker.cc - support for generation of synthetic work + +extern "C" +{ + #include "mem.h" +#include +#include +} + +#include "synthetic_worker.h" +#include "util.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +bool synth_barrier_wait() { } + +namespace +{ + + void *memcpy_ermsb(void *dst, const void *src, size_t n) + { + asm volatile("rep movsb" : "+D"(dst), "+S"(src), "+c"(n)::"memory"); + return dst; + } + + inline void clflush(volatile void *p) { asm volatile("clflush (%0)" ::"r"(p)); } + + // Store data (indicated by the param c) to the cache line using the + // non-temporal store. + inline void nt_cacheline_store(char *p, int c) + { + /*__m128i i = _mm_set_epi8(c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c); + _mm_stream_si128((__m128i *)&p[0], i); + _mm_stream_si128((__m128i *)&p[16], i); + _mm_stream_si128((__m128i *)&p[32], i); + _mm_stream_si128((__m128i *)&p[48], i);*/ + } + +} // anonymous namespace + +void SqrtWorker::Work(uint64_t n) +{ + constexpr double kNumber = 2350845.545; + for (uint64_t i = 0; i < n; ++i) + { + volatile double v = sqrt(i * kNumber); + std::ignore = v; // silences compiler warning + } +} + +#define SQRT(src_var, dest_var, src_xmm, dest_xmm) \ + asm volatile("movq %1, %%" src_xmm \ + "\n" \ + "sqrtsd %%" src_xmm ", %%" dest_xmm \ + "\n" \ + "movq %%" dest_xmm ", %0 \n" \ + : "=r"(dest_var) \ + : "g"(src_var) \ + : src_xmm, dest_xmm, "memory") + +void AsmSqrtWorker::Work(uint64_t n) +{ + constexpr double kNumber = 2350845.545; + double src_0, src_1, src_2, src_3; + double dest_0, dest_1, dest_2, dest_3; + for (uint64_t i = 0; i < n; i += 4) + { + src_0 = i * kNumber; + src_1 = (i + 1) * kNumber; + src_2 = (i + 2) * kNumber; + src_3 = (i + 3) * kNumber; + SQRT(src_0, dest_0, "xmm0", "xmm1"); + SQRT(src_1, dest_1, "xmm2", "xmm3"); + SQRT(src_2, dest_2, "xmm4", "xmm5"); + SQRT(src_3, dest_3, "xmm6", "xmm7"); + } +} + +StridedMemtouchWorker *StridedMemtouchWorker::Create(std::size_t size, + std::size_t stride) +{ + char *buf = new char[size](); + return new StridedMemtouchWorker(buf, size, stride); +} + +void StridedMemtouchWorker::Work(uint64_t n) +{ + for (uint64_t i = 0; i < n; ++i) + { + volatile char c = buf_[(stride_ * i) % size_]; + std::ignore = c; // silences compiler warning + } +} + +/* TODO: MemStreamWorker is currently broken as clang lacks the intrinsics needed */ +MemStreamWorker *MemStreamWorker::Create(std::size_t size) +{ + void *addr; + int prot, flags; + + prot = PROT_READ | PROT_WRITE; + flags = MAP_PRIVATE | MAP_ANONYMOUS; + // | MAP_POPULATE | MAP_HUGETLB | + // (PGSHIFT_2MB << MAP_HUGE_SHIFT); + + addr = mmap(NULL, size, prot, flags, -1, 0); + if (addr == MAP_FAILED) + return nullptr; + + memset(addr, 0xAB, size); + return new MemStreamWorker(static_cast(addr), size); +} + +MemStreamWorker::~MemStreamWorker() +{ + munmap((void *)buf_, (size_)); +} + +void MemStreamWorker::Work(uint64_t n) +{ + if (n > size_) + n = size_; + for (uint64_t i = 0; i < n; ++i) + { + volatile char c = buf_[i]; + std::ignore = c; // silences compiler warning + } +} + +RandomMemtouchWorker *RandomMemtouchWorker::Create(std::size_t size, + unsigned int seed) +{ + char *buf = new char[size](); + std::vector v(size); + std::iota(std::begin(v), std::end(v), 0); + std::mt19937 g(seed); + std::shuffle(v.begin(), v.end(), g); + return new RandomMemtouchWorker(buf, std::move(v)); +} + +void RandomMemtouchWorker::Work(uint64_t n) +{ + for (uint64_t i = 0; i < n; ++i) + buf_[schedule_[i % schedule_.size()]]++; +} + +CacheAntagonistWorker *CacheAntagonistWorker::Create(std::size_t size) +{ + char *buf = new char[size](); + return new CacheAntagonistWorker(buf, size); +} + +void CacheAntagonistWorker::Work(uint64_t n) +{ + for (uint64_t i = 0; i < n; ++i) + memcpy_ermsb(&buf_[0], &buf_[size_ / 2], size_ / 2); +} + +MemBWAntagonistWorker *MemBWAntagonistWorker::Create(std::size_t size, + int nop_period, + int nop_num) +{ + // non-temporal store won't bypass cache when accessing the remote memory. + auto numa_id = mx::system::topology::node_id(mx::system::topology::core_id()); + char *buf = reinterpret_cast(mx::memory::GlobalHeap::allocate(numa_id, size)); + // numa_alloc_* will allocate memory in pages, therefore it must be cacheline + // aligned. + if (reinterpret_cast(buf) % CACHELINE_SIZE != 0) + { + // Should never be executed. + Genode::error("The allocated memory should be cacheline size aligned."); + return nullptr; + } + // Flush the cache explicitly. Non-temporal store will still write into cache + // if the corresponding data is already at cache. + for (std::size_t i = 0; i < size; i += CACHELINE_SIZE) + { + clflush(reinterpret_cast(buf + i)); + } + return new MemBWAntagonistWorker(buf, size, nop_period, nop_num); +} + +void MemBWAntagonistWorker::Work(uint64_t n) +{ + int cnt = 0; + for (uint64_t k = 0; k < n; k++) + { + for (std::size_t i = 0; i < size_; i += CACHELINE_SIZE) + { + nt_cacheline_store(buf_ + i, 0); + if (cnt++ == nop_period_) + { + cnt = 0; + for (int j = 0; j < nop_num_; j++) + { + asm(""); + } + } + } + } +} + +DynamicCacheAntagonistWorker *DynamicCacheAntagonistWorker::Create( + std::size_t size, int period, int nop_num) +{ + char *buf = new char[size](); + return new DynamicCacheAntagonistWorker(buf, size, period, nop_num); +} + +void DynamicCacheAntagonistWorker::Work(uint64_t n) +{ + double *ptr = reinterpret_cast(buf_); + size_t offset = size_ / 2 / sizeof(double); + for (uint64_t i = 0; i < n; ++i) + { + for (size_t j = 0; j < offset; j++) + { + ptr[j + offset] = ptr[j]; + if (cnt_++ == period_) + { + //synth_barrier_wait(); + cnt_ = 0; + for (int k = 0; k < nop_num_; k++) + { + asm(""); + } + } + } + } +} + +SyntheticWorker *SyntheticWorkerFactory(std::string s) +{ + std::vector tokens = split(s, ':'); + + // the first token is the type of worker, must be specified + if (tokens.size() < 1) + return nullptr; + + if (tokens[0] == "sqrt") + { + if (tokens.size() != 1) + return nullptr; + return new SqrtWorker(); + } + else if (tokens[0] == "asmsqrt") + { + if (tokens.size() != 1) + return nullptr; + return new AsmSqrtWorker(); + } + else if (tokens[0] == "stridedmem") + { + if (tokens.size() != 3) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + unsigned long stride = std::stoul(tokens[2], nullptr, 0); + return StridedMemtouchWorker::Create(size, stride); + } + else if (tokens[0] == "randmem") + { + if (tokens.size() != 3) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + unsigned long seed = std::stoul(tokens[2], nullptr, 0); + if (seed > std::numeric_limits::max()) + return nullptr; + return RandomMemtouchWorker::Create(size, seed); + } + else if (tokens[0] == "memstream") + { + if (tokens.size() != 2) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + return MemStreamWorker::Create(size); + } + else if (tokens[0] == "cacheantagonist") + { + if (tokens.size() != 2) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + return CacheAntagonistWorker::Create(size); + } + else if (tokens[0] == "membwantagonist") + { + if (tokens.size() != 4) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + unsigned long nop_period = std::stoul(tokens[2], nullptr, 0); + unsigned long nop_num = std::stoul(tokens[3], nullptr, 0); + return MemBWAntagonistWorker::Create(size, nop_period, nop_num); + } + else if (tokens[0] == "dynamiccacheantagonist") + { + if (tokens.size() != 4) + return nullptr; + unsigned long size = std::stoul(tokens[1], nullptr, 0); + unsigned long period = std::stoul(tokens[2], nullptr, 0); + unsigned long long nop_num = std::stoul(tokens[3], nullptr, 0); + return DynamicCacheAntagonistWorker::Create(size, period, nop_num); + } + + // invalid type of worker + return nullptr; +} diff --git a/repos/mml/src/app/antagonist/synthetic_worker.h b/repos/mml/src/app/antagonist/synthetic_worker.h new file mode 100644 index 0000000000..f7a86685c0 --- /dev/null +++ b/repos/mml/src/app/antagonist/synthetic_worker.h @@ -0,0 +1,166 @@ +// synthetic_worker.h - support for generation of synthetic work + +#pragma once + +//#include +#include +#include +#include +#include +#include + +#define CACHELINE_SIZE (64) + +class SyntheticWorker +{ +public: + virtual ~SyntheticWorker() {} + // Perform n iterations of fake work. + virtual void Work(uint64_t n) = 0; +}; + +class SqrtWorker : public SyntheticWorker +{ +public: + SqrtWorker() {} + ~SqrtWorker() {} + + // Performs n iterations of sqrt(). + void Work(uint64_t n); +}; + +class AsmSqrtWorker : public SyntheticWorker +{ +public: + AsmSqrtWorker() {} + ~AsmSqrtWorker() {} + + // Performs n iterations of sqrt(). + void Work(uint64_t n); +}; + +class StridedMemtouchWorker : public SyntheticWorker +{ +public: + ~StridedMemtouchWorker() { delete buf_; } + + // Creates a strided memory touching worker. + static StridedMemtouchWorker *Create(std::size_t size, size_t stride); + + // Performs n strided memory touches. + void Work(uint64_t n); + +private: + StridedMemtouchWorker(char *buf, std::size_t size, size_t stride) + : buf_(buf), size_(size), stride_(stride) {} + + volatile char *buf_; + std::size_t size_; + std::size_t stride_; +}; + +class MemStreamWorker : public SyntheticWorker +{ +public: + ~MemStreamWorker(); + + // Creates a memory streaming worker. + static MemStreamWorker *Create(std::size_t size); + + // Performs n memory reads. + void Work(uint64_t n); + +private: + MemStreamWorker(char *buf, std::size_t size) : buf_(buf), size_(size) {} + + volatile char *buf_; + std::size_t size_; +}; + +class RandomMemtouchWorker : public SyntheticWorker +{ +public: + ~RandomMemtouchWorker() { delete buf_; } + + // Creates a random memory touching worker. + static RandomMemtouchWorker *Create(std::size_t size, unsigned int seed); + + // Performs n random memory touches. + void Work(uint64_t n); + +private: + RandomMemtouchWorker(char *buf, std::vector schedule) + : buf_(buf), schedule_(std::move(schedule)) {} + + volatile char *buf_; + std::vector schedule_; +}; + +class CacheAntagonistWorker : public SyntheticWorker +{ +public: + ~CacheAntagonistWorker() { delete buf_; } + + // Creates a cache antagonist worker. + static CacheAntagonistWorker *Create(std::size_t size); + + // Perform n cache accesses. + void Work(uint64_t n); + +private: + CacheAntagonistWorker(char *buf, std::size_t size) : buf_(buf), size_(size) {} + + char *buf_; + std::size_t size_; +}; + +class MemBWAntagonistWorker : public SyntheticWorker +{ +public: + ~MemBWAntagonistWorker() { free(buf_); } + + // Creates a memory bandwidth antagonist worker. It allocates an array whose + // size is indicated by the parameter. + static MemBWAntagonistWorker *Create(std::size_t size, int nop_period, + int nop_num); + + // Perform n times array stores. + void Work(uint64_t n); + +private: + MemBWAntagonistWorker(char *buf, std::size_t size, int nop_period, + int nop_num) + : buf_(buf), size_(size), nop_period_(nop_period), nop_num_(nop_num) {} + + char *buf_; + std::size_t size_; + int nop_period_; + int nop_num_; +}; + +class DynamicCacheAntagonistWorker : public SyntheticWorker +{ +public: + ~DynamicCacheAntagonistWorker() { delete buf_; } + + // Creates a cache antagonist worker. + static DynamicCacheAntagonistWorker *Create(std::size_t size, int period, + int nop_num); + + // Perform n cache accesses. + void Work(uint64_t n); + +private: + DynamicCacheAntagonistWorker(char *buf, std::size_t size, int period, + int nop_num) + : buf_(buf), size_(size), period_(period), nop_num_(nop_num) {} + + char *buf_; + std::size_t size_; + int period_; + int nop_num_; + int cnt_; +}; + +// Parses a string to generate one of the above fake workers. +SyntheticWorker *SyntheticWorkerFactory(std::string s); diff --git a/repos/mml/src/app/antagonist/target.mk b/repos/mml/src/app/antagonist/target.mk new file mode 100644 index 0000000000..87052693a5 --- /dev/null +++ b/repos/mml/src/app/antagonist/target.mk @@ -0,0 +1,16 @@ +MXINC_DIR=$(REP_DIR)/src/app/antagonist +GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/21.05 + +TARGET = stress_genode +# soure file for benchmark framework + +SRC_CC = stress_linux.cc synthetic_worker.cc util.cc +LIBS += base libc stdcxx mxtasking +EXT_OBJECTS += /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a /usr/local/genode/tool/lib/libatomic.a +CUSTOM_CC = /usr/local/genode/tool/bin/clang +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CC_OPT += --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -Wno-error -O2 -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 #-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 +#CC_OPT += -femulated-tls -DCLANG_CXX11_ATOMICS +CC_CXX_WARN_STRICT = +CUSTOM_CXX_LIB := $(CROSS_DEV_PREFIX)g++ +#CXX_LD += $(CROSS_DEV_PREFIX)g++ diff --git a/repos/mml/src/app/antagonist/types.h b/repos/mml/src/app/antagonist/types.h new file mode 100644 index 0000000000..a28c9a2b5d --- /dev/null +++ b/repos/mml/src/app/antagonist/types.h @@ -0,0 +1,50 @@ +/* + * types.h - primitive type definitions + */ + +#pragma once + +#include +#include "cpu.h" + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; + +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; + +#ifndef __WORD_SIZE +#error __WORD_SIZE is undefined +#endif + +#if __WORD_SIZE == __64BIT_WORDS + +typedef unsigned long uint64_t; +typedef signed long int64_t; + +#else /* __WORDSIZE == __64BIT_WORDS */ + +typedef unsigned long long uint64_t; +typedef signed long long int64_t; + +#endif /* __WORDSIZE == __64BIT_WORDS */ + +typedef unsigned long uintptr_t; +typedef long intptr_t; +typedef long off_t; +typedef unsigned long size_t; +typedef long ssize_t; + +typedef struct { + volatile int locked; +} spinlock_t; + +typedef struct { + volatile int cnt; +} atomic_t; + +typedef struct { + volatile long cnt; +} atomic64_t; diff --git a/repos/mml/src/app/antagonist/util.cc b/repos/mml/src/app/antagonist/util.cc new file mode 100644 index 0000000000..d7cb303509 --- /dev/null +++ b/repos/mml/src/app/antagonist/util.cc @@ -0,0 +1,14 @@ +#include "util.h" + +std::vector split(const std::string &text, char sep) +{ + std::vector tokens; + std::string::size_type start = 0, end = 0; + while ((end = text.find(sep, start)) != std::string::npos) + { + tokens.push_back(text.substr(start, end - start)); + start = end + 1; + } + tokens.push_back(text.substr(start)); + return tokens; +} diff --git a/repos/mml/src/app/antagonist/util.h b/repos/mml/src/app/antagonist/util.h new file mode 100644 index 0000000000..46b7006a15 --- /dev/null +++ b/repos/mml/src/app/antagonist/util.h @@ -0,0 +1,6 @@ +#pragma once +#include +#include +#include + +std::vector split(const std::string &text, char sep); diff --git a/repos/mml/src/app/blinktree/benchmark/chronometer.h b/repos/mml/src/app/blinktree/benchmark/chronometer.h index 7014b1aba4..4e2c006dec 100644 --- a/repos/mml/src/app/blinktree/benchmark/chronometer.h +++ b/repos/mml/src/app/blinktree/benchmark/chronometer.h @@ -1,14 +1,13 @@ #pragma once -#ifdef PERF_SUPPORT #include "perf.h" -#endif #include "phase.h" #include #include #include #include #include +#include #include #include #include @@ -50,7 +49,7 @@ template class InterimResult public: InterimResult(const std::uint64_t operation_count, const P &phase, const std::uint16_t iteration, const std::uint16_t core_count, const std::chrono::milliseconds time, - /*std::vector &counter,*/ + std::vector &counter, std::unordered_map executed_tasks, std::unordered_map executed_reader_tasks, std::unordered_map executed_writer_tasks, @@ -64,12 +63,10 @@ public: _scheduled_tasks_on_core(std::move(scheduled_tasks_on_core)), _scheduled_tasks_off_core(std::move(scheduled_tasks_off_core)), _worker_fills(std::move(worker_fills)) { -#ifdef PERF_SUPPORT for (auto &c : counter) { _performance_counter.emplace_back(std::make_pair(c.name(), c.read())); } -#endif } ~InterimResult() = default; @@ -180,27 +177,27 @@ public: _current_phase = phase; _current_iteration = iteration; _core_set = core_set; -#ifdef PERF_SUPPORT _perf.start(); -#endif - _start = std::chrono::steady_clock::now(); + + //_start = std::chrono::steady_clock::now(); + _start = Genode::Trace::timestamp(); } InterimResult

stop(const std::uint64_t count_operations) { - const auto end = std::chrono::steady_clock::now(); -#ifdef PERF_SUPPORT + const auto end = Genode::Trace::timestamp(); + //const auto end = std::chrono::steady_clock::now(); _perf.stop(); -#endif - const auto milliseconds = std::chrono::duration_cast(end - _start); + //const auto milliseconds = std::chrono::duration_cast(end-_start); + const auto milliseconds = std::chrono::milliseconds((end-_start)/2000000UL); return {count_operations, _current_phase, _current_iteration, - _core_set.size(), + mx::tasking::runtime::workers_count(), milliseconds, - //_perf.counter(), + _perf.counter(), statistic_map(mx::tasking::profiling::Statistic::Executed), statistic_map(mx::tasking::profiling::Statistic::ExecutedReader), statistic_map(mx::tasking::profiling::Statistic::ExecutedWriter), @@ -209,17 +206,14 @@ public: statistic_map(mx::tasking::profiling::Statistic::ScheduledOffChannel), statistic_map(mx::tasking::profiling::Statistic::Fill)}; } -#ifdef PERF_SUPPORT void add(PerfCounter &performance_counter) { _perf.add(performance_counter); } -#endif private: std::uint16_t _current_iteration{0U}; P _current_phase; mx::util::core_set _core_set; -#ifdef PERF_SUPPORT alignas(64) Perf _perf; -#endif - alignas(64) std::chrono::steady_clock::time_point _start; + //alignas(64) std::chrono::steady_clock::time_point _start; + alignas(64) size_t _start; std::unordered_map statistic_map( const mx::tasking::profiling::Statistic::Counter counter) diff --git a/repos/mml/src/app/blinktree/benchmark/perf.cpp b/repos/mml/src/app/blinktree/benchmark/perf.cpp index 366e671854..9e6c8be876 100644 --- a/repos/mml/src/app/blinktree/benchmark/perf.cpp +++ b/repos/mml/src/app/blinktree/benchmark/perf.cpp @@ -6,29 +6,28 @@ using namespace benchmark; * Counter "Instructions Retired" * Counts when the last uop of an instruction retires. */ -[[maybe_unused]] PerfCounter Perf::INSTRUCTIONS = {"instr", PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS}; +[[maybe_unused]] PerfCounter Perf::INSTRUCTIONS = {"instr", Genode::Trace::Performance_counter::Type::CORE, 0xc0, 0x0}; /** */ -[[maybe_unused]] PerfCounter Perf::CYCLES = {"cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES}; +[[maybe_unused]] PerfCounter Perf::CYCLES = {"cycles", Genode::Trace::Performance_counter::Type::CORE, 0x76, 0x0}; /** */ -[[maybe_unused]] PerfCounter Perf::L1_MISSES = {"l1-miss", PERF_TYPE_HW_CACHE, - PERF_COUNT_HW_CACHE_L1D | (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)}; +[[maybe_unused]] PerfCounter Perf::L1_DTLB_MISSES = {"l1-dtlb-miss", Genode::Trace::Performance_counter::Type::CORE, 0x45, 0xff}; +[[maybe_unused]] PerfCounter Perf::L1_ITLB_MISSES = {"l1-itlb-miss", Genode::Trace::Performance_counter::Type::CORE, 0x85, 0x0}; /** * Counter "LLC Misses" * Accesses to the LLC in which the data is not present(miss). */ -[[maybe_unused]] PerfCounter Perf::LLC_MISSES = {"llc-miss", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES}; +[[maybe_unused]] PerfCounter Perf::LLC_MISSES = {"llc-miss", Genode::Trace::Performance_counter::Type::CACHE, 0x6, 0xff}; /** * Counter "LLC Reference" * Accesses to the LLC, in which the data is present(hit) or not present(miss) */ -[[maybe_unused]] PerfCounter Perf::LLC_REFERENCES = {"llc-ref", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES}; +[[maybe_unused]] PerfCounter Perf::LLC_REFERENCES = {"llc-ref", Genode::Trace::Performance_counter::Type::CACHE, 0x4, 0xff}; /** * Micro architecture "Skylake" @@ -36,7 +35,7 @@ using namespace benchmark; * EventSel=A3H,UMask=14H, CMask=20 * Execution stalls while memory subsystem has an outstanding load. */ -PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; +//PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; /** * Micro architecture "Skylake" @@ -44,7 +43,7 @@ PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; * EventSel=32H,UMask=01H * Number of PREFETCHNTA instructions executed. */ -[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_NTA = {"sw-prefetch-nta", PERF_TYPE_RAW, 0x530132}; +[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_NTA = {"sw-prefetch-nta", Genode::Trace::Performance_counter::Type::CORE, 0x4b, 0x4}; /** * Micro architecture "Skylake" @@ -52,7 +51,7 @@ PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; * EventSel=32H,UMask=02H * Number of PREFETCHT0 instructions executed. */ -[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T0 = {"sw-prefetch-t0", PERF_TYPE_RAW, 0x530232}; +//[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T0 = {"sw-prefetch-t0", Genode::Trace::Performance_counter::Type::CORE, 0x4b, }; /** * Micro architecture "Skylake" @@ -60,7 +59,7 @@ PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; * EventSel=32H,UMask=04H * Number of PREFETCHT1 or PREFETCHT2 instructions executed. */ -[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T1_T2 = {"sw-prefetch-t1t2", PERF_TYPE_RAW, 0x530432}; +//[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_T1_T2 = {"sw-prefetch-t1t2", PERF_TYPE_RAW, 0x530432}; /** * Micro architecture "Skylake" @@ -68,4 +67,4 @@ PerfCounter Perf::STALLS_MEM_ANY = {"memory-stall", PERF_TYPE_RAW, 0x145314a3}; * EventSel=32H,UMask=08H * Number of PREFETCHW instructions executed. */ -[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_WRITE = {"sw-prefetch-w", PERF_TYPE_RAW, 0x530832}; \ No newline at end of file +[[maybe_unused]] PerfCounter Perf::SW_PREFETCH_ACCESS_WRITE = {"sw-prefetch-w", Genode::Trace::Performance_counter::Type::CORE, 0x4b, 0x2}; \ No newline at end of file diff --git a/repos/mml/src/app/blinktree/benchmark/perf.h b/repos/mml/src/app/blinktree/benchmark/perf.h index 544a675fad..4b61020b74 100644 --- a/repos/mml/src/app/blinktree/benchmark/perf.h +++ b/repos/mml/src/app/blinktree/benchmark/perf.h @@ -1,12 +1,11 @@ #pragma once #include -#include #include -#include // TODO: Find Genode equivalent +#include #include -#include -#include #include +#include + /* * For more Performance Counter take a look into the Manual from Intel: @@ -28,46 +27,62 @@ namespace benchmark { class PerfCounter { public: - PerfCounter(std::string &&name, const std::uint64_t type, const std::uint64_t event_id) : _name(std::move(name)) + PerfCounter(std::string &&name, const Genode::Trace::Performance_counter::Type type, const std::uint64_t event_id, const std::uint64_t mask) : _name(std::move(name)), _type(type), _event_id(static_cast(event_id)), _mask(static_cast(mask)) { - /*std::memset(&_perf_event_attribute, 0, sizeof(perf_event_attr)); - _perf_event_attribute.type = type; - _perf_event_attribute.size = sizeof(perf_event_attr); - _perf_event_attribute.config = event_id; - _perf_event_attribute.disabled = true; - _perf_event_attribute.inherit = 1; - _perf_event_attribute.exclude_kernel = false; - _perf_event_attribute.exclude_hv = false; - _perf_event_attribute.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;*/ + } ~PerfCounter() = default; bool open() { - /*_file_descriptor = syscall(__NR_perf_event_open, &_perf_event_attribute, 0, -1, -1, 0);*/ - return _file_descriptor >= 0; + try { + _counter = Genode::Trace::Performance_counter::acquire(_type); + } catch (Genode::Trace::Pfc_no_avail) { + std::cerr << "Failed to open performance counters." << std::endl; + } + + try { + Genode::Trace::Performance_counter::setup(_counter, _event_id, _mask, (_type == Genode::Trace::Performance_counter::Type::CORE ? 0x30000 : 0x550f000000000000)); + } catch (Genode::Trace::Pfc_access_error &e) { + std::cerr << "Error while setting up performance counter: " << e.error_code() << std::endl; + } + + return _counter >= 0; } bool start() { - //ioctl(_file_descriptor, PERF_EVENT_IOC_RESET, 0); - //ioctl(_file_descriptor, PERF_EVENT_IOC_ENABLE, 0); - return ::read(_file_descriptor, &_prev, sizeof(read_format)) == sizeof(read_format); + try { + Genode::Trace::Performance_counter::start(_counter); + _prev.value = static_cast(Genode::Trace::Performance_counter::read(_counter)); + } + catch (Genode::Trace::Pfc_access_error &e) + { + std::cerr << "Failed to start counter " << _counter << " " << _name << ": " << static_cast(e.error_code()) << std::endl; + } + return _prev.value >= 0; } bool stop() { - //const auto is_read = ::read(_file_descriptor, &_data, sizeof(read_format)) == sizeof(read_format); - //ioctl(_file_descriptor, PERF_EVENT_IOC_DISABLE, 0); - return false; // is_read; + try { + _data.value = Genode::Trace::Performance_counter::read(_counter); + Genode::Trace::Performance_counter::stop(_counter); + Genode::Trace::Performance_counter::reset(_counter); + } + catch (Genode::Trace::Pfc_access_error &e) + { + std::cerr << "Failed to stop counter: " << e.error_code() << std::endl; + } + // const auto is_read = ::read(_file_descriptor, &_data, sizeof(read_format)) == sizeof(read_format); + // ioctl(_file_descriptor, PERF_EVENT_IOC_DISABLE, 0); + return _data.value >= 0; // is_read; } [[nodiscard]] double read() const { - const auto multiplexing_correction = static_cast(_data.time_enabled - _prev.time_enabled) / - static_cast(_data.time_running - _prev.time_running); - return static_cast(_data.value - _prev.value) * multiplexing_correction; + return static_cast(_data.value - _prev.value); } [[nodiscard]] const std::string &name() const { return _name; } @@ -84,8 +99,10 @@ private: }; const std::string _name; - std::int32_t _file_descriptor = -1; - //perf_event_attr _perf_event_attribute{}; + Genode::Trace::Performance_counter::Type _type; + Genode::uint64_t _event_id; + Genode::uint64_t _mask; + Genode::Trace::Performance_counter::Counter _counter; read_format _prev{}; read_format _data{}; }; @@ -98,14 +115,15 @@ class Perf public: [[maybe_unused]] static PerfCounter INSTRUCTIONS; [[maybe_unused]] static PerfCounter CYCLES; - [[maybe_unused]] static PerfCounter L1_MISSES; + [[maybe_unused]] static PerfCounter L1_DTLB_MISSES; + [[maybe_unused]] static PerfCounter L1_ITLB_MISSES; [[maybe_unused]] [[maybe_unused]] static PerfCounter LLC_MISSES; [[maybe_unused]] static PerfCounter LLC_REFERENCES; - [[maybe_unused]] static PerfCounter STALLED_CYCLES_BACKEND; - [[maybe_unused]] static PerfCounter STALLS_MEM_ANY; + //[[maybe_unused]] static PerfCounter STALLED_CYCLES_BACKEND; + //[[maybe_unused]] static PerfCounter STALLS_MEM_ANY; [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_NTA; - [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T0; - [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T1_T2; + //[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T0; + //[[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_T1_T2; [[maybe_unused]] static PerfCounter SW_PREFETCH_ACCESS_WRITE; Perf() noexcept = default; diff --git a/repos/mml/src/app/blinktree/benchmark/workload_set.h b/repos/mml/src/app/blinktree/benchmark/workload_set.h index c800b79591..9ea0b7c3c8 100644 --- a/repos/mml/src/app/blinktree/benchmark/workload_set.h +++ b/repos/mml/src/app/blinktree/benchmark/workload_set.h @@ -79,15 +79,14 @@ private: class Fill_thread : public Genode::Thread { private: - Genode::Mutex &_mutex; + //Genode::Mutex &_mutex; const std::string &_fill_workload_file; bool (*parse)(std::ifstream &, std::vector &); NumericWorkloadSet &_workload_set; public: Fill_thread(Libc::Env &env, Genode::Mutex &mutex, std::string fill_workload_name, bool (*parse)(std::ifstream&, std::vector&), NumericWorkloadSet &workload_set) - : Genode::Thread(env, Name("btree::fill_thread"), 4*4096), - _mutex(mutex), _fill_workload_file(fill_workload_name), _workload_set(workload_set) + : Genode::Thread(env, Name("btree::fill_thread"), 4*4096), _fill_workload_file(fill_workload_name), _workload_set(workload_set) { this->parse = parse; } @@ -107,7 +106,6 @@ class Fill_thread : public Genode::Thread class Mixed_thread : public Genode::Thread { private: - Genode::Mutex &_mutex; const std::string &_mixed_workload_file; bool (*parse)(std::ifstream &, std::vector &); NumericWorkloadSet &_workload_set; @@ -115,7 +113,7 @@ class Mixed_thread : public Genode::Thread public: Mixed_thread(Libc::Env &env, Genode::Mutex &mutex, std::string mixed_workload_name, bool (*parse)(std::ifstream&, std::vector&), NumericWorkloadSet &workload_set) : Genode::Thread(env, Name("btree::mixed_thread"), 4*4096), - _mutex(mutex), _mixed_workload_file(mixed_workload_name), _workload_set(workload_set) + _mixed_workload_file(mixed_workload_name), _workload_set(workload_set) { this->parse = parse; } diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp index c081cc56f9..9ccb51db30 100644 --- a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp +++ b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include using namespace application::blinktree_benchmark; @@ -21,16 +22,20 @@ Benchmark::Benchmark(Libc::Env &env, benchmark::Cores &&cores, const std::uint16 _result_file_name(std::move(result_file_name)), _statistic_file_name(std::move(statistic_file_name)), _tree_file_name(std::move(tree_file_name)), _profile(profile), _workload(env) { -#ifdef PERF_SUPPORT if (use_performance_counter) { this->_chronometer.add(benchmark::Perf::CYCLES); this->_chronometer.add(benchmark::Perf::INSTRUCTIONS); - this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY); - this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA); - this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE); + this->_chronometer.add(benchmark::Perf::L1_ITLB_MISSES); + this->_chronometer.add(benchmark::Perf::L1_DTLB_MISSES); + //this->_chronometer.add(benchmark::Perf::LLC_MISSES); + + + //this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY); + + //this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA); + //this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE); } -#endif std::cout << "core configuration: \n" << this->_cores.dump(2) << std::endl; this->_workload.build(fill_workload_file, mixed_workload_file); @@ -57,6 +62,9 @@ void Benchmark::start() this->_request_scheduler.clear(); } + auto *start_task = mx::tasking::runtime::new_task(0U, *this); + mx::tasking::runtime::spawn(*start_task, 0U); + // Create one request scheduler per core. for (auto core_index = 0U; core_index < this->_cores.current().size(); core_index++) { @@ -73,8 +81,9 @@ void Benchmark::start() { mx::tasking::runtime::profile(this->profile_file_name()); } - this->_chronometer.start(static_cast(static_cast(this->_workload)), - this->_current_iteration + 1, this->_cores.current()); + /*this->_chronometer.start(static_cast(static_cast(this->_workload)), + this->_current_iteration + 1, this->_cores.current());*/ + //Genode::log("Timer started "); } const mx::util::core_set &Benchmark::core_set() @@ -112,22 +121,39 @@ void Benchmark::requests_finished() if (open_requests == 0U) // All request schedulers are done. { + std::uint16_t core_id = mx::tasking::runtime::my_channel(); + if (core_id != 0) + { + this->_open_requests++; + auto *stop_task = mx::tasking::runtime::new_task(0U, *this); + stop_task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*stop_task, core_id); + return; + } + // Stop and print time (and performance counter). + //Genode::log("Stopping timer"); const auto result = this->_chronometer.stop(this->_workload.size()); mx::tasking::runtime::stop(); + mx::tasking::runtime::reset_usage_predictions(); - Genode::log(result.core_count(), "\t", result.iteration(), "\t", result.phase(), "\t", result.time().count(), " ms\t", result.throughput(), " op/s"); + //_end = Genode::Trace::timestamp(); + + std::cout << "core: " << mx::system::topology::core_id() << result.to_json().dump() << std::endl; + + + // std::cout << result << std::endl; // Dump results to file. - /*if (this->_result_file_name.empty() == false) + if (this->_result_file_name.empty() == false) { - std::ofstream result_file_stream(this->_result_file_name, std::ofstream::app); - result_file_stream << result.to_json().dump() << std::endl; + //std::ofstream result_file_stream(this->_result_file_name, std::ofstream::app); + //result_file_stream << result.to_json().dump() << std::endl; } // Dump statistics to file. if constexpr (mx::tasking::config::task_statistics()) { - if (this->_statistic_file_name.empty() == false) + /*if (this->_statistic_file_name.empty() == false) { std::ofstream statistic_file_stream(this->_statistic_file_name, std::ofstream::app); nlohmann::json statistic_json; @@ -162,8 +188,8 @@ void Benchmark::requests_finished() } statistic_file_stream << statistic_json.dump(2) << std::endl; - } - }*/ + }*/ + } // Check and print the tree. if (this->_check_tree) @@ -191,6 +217,18 @@ void Benchmark::requests_finished() { this->_tree.reset(nullptr); } + + if (this->core_set()) { + this->_chronometer.start(static_cast(static_cast(this->_workload)), + this->_current_iteration + 1, this->_cores.current()); + auto *restart_task = mx::tasking::runtime::new_task(0U, *this); + restart_task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*restart_task, core_id); + mx::tasking::runtime::resume(); + } else { + Genode::log("Benchmark finished."); + mx::tasking::runtime::stop(); + } } } diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h index c00f2d2d1e..19c8dc012d 100644 --- a/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h +++ b/repos/mml/src/app/blinktree/blinktree_benchmark/benchmark.h @@ -14,6 +14,10 @@ #include #include #include +#include + +#include +#include namespace application::blinktree_benchmark { /** @@ -22,6 +26,7 @@ namespace application::blinktree_benchmark { class Benchmark final : public Listener { public: + Benchmark(Libc::Env &env, benchmark::Cores &&, std::uint16_t iterations, std::string &&fill_workload_file, std::string &&mixed_workload_file, bool use_performance_counter, mx::synchronization::isolation_level node_isolation_level, @@ -47,6 +52,11 @@ public: */ void start(); + void start_chronometer() { + this->_chronometer.start(static_cast(static_cast(this->_workload)), + this->_current_iteration + 1, this->_cores.current()); + } + private: // Collection of cores the benchmark should run on. benchmark::Cores _cores; @@ -100,5 +110,57 @@ private: * @return Name of the file to write profiling results to. */ [[nodiscard]] std::string profile_file_name() const; + + friend class StartMeasurementTask; + friend class StopMeasurementTask; +}; + +class StartMeasurementTask : public mx::tasking::TaskInterface +{ + private: + Benchmark &_benchmark; + + public: + constexpr StartMeasurementTask(Benchmark& benchmark) : _benchmark(benchmark) {} + ~StartMeasurementTask() override = default; + + mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override + { + //_benchmark._chronometer.start(static_cast(static_cast(_benchmark._workload)), _benchmark._current_iteration + 1, _benchmark._cores.current()); + //_benchmark._start = Genode::Trace::timestamp(); + return mx::tasking::TaskResult::make_remove(); + } +}; + +class StopMeasurementTask : public mx::tasking::TaskInterface +{ + private: + Benchmark &_benchmark; + + public: + constexpr StopMeasurementTask(Benchmark& benchmark) : _benchmark(benchmark) {} + ~StopMeasurementTask() override = default; + + mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override + { + _benchmark.requests_finished(); + return mx::tasking::TaskResult::make_remove(); + } +}; + +class RestartTask : public mx::tasking::TaskInterface +{ + private: + Benchmark &_benchmark; + + public: + constexpr RestartTask(Benchmark &benchmark) : _benchmark(benchmark) {} + ~RestartTask() override = default; + + mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override + { + _benchmark.start(); + return mx::tasking::TaskResult::make_remove(); + } }; } // namespace application::blinktree_benchmark \ No newline at end of file diff --git a/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp b/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp index 7b55f52d1e..33824519c5 100644 --- a/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp +++ b/repos/mml/src/app/blinktree/blinktree_benchmark/main.cpp @@ -1,6 +1,7 @@ #include "benchmark.h" #include #include +#include #include //#include #include @@ -8,6 +9,7 @@ #include #include #include +#include using namespace application::blinktree_benchmark; @@ -41,6 +43,7 @@ int bt_main(Libc::Env &env, int count_arguments, char **arguments) { return 1; } + Genode::log("Using system allocator = ", (use_system_allocator? "true" : "false")); mx::util::core_set cores{}; @@ -48,6 +51,7 @@ int bt_main(Libc::Env &env, int count_arguments, char **arguments) while ((cores = benchmark->core_set())) { mx::tasking::runtime_guard _(false, cores, prefetch_distance); + benchmark->start_chronometer(); benchmark->start(); //wait_for_continue(); } @@ -162,16 +166,23 @@ std::tuple create_benchmark(Libc::Env &env, in if (argument_parser.get("--latched")) { preferred_synchronization_method = mx::synchronization::protocol::Latch; + Genode::log("Set synchronization method to latch"); } else if (argument_parser.get("--olfit")) { preferred_synchronization_method = mx::synchronization::protocol::OLFIT; + Genode::log("Set synchronization method to OLFIT"); } else if (argument_parser.get("--sync4me")) { preferred_synchronization_method = mx::synchronization::protocol::None; + Genode::log("Set synchronization method to None"); + } else { + Genode::log("Set synchronization method to Queue"); } + Genode::log("Isolation level ", (isolation_level == mx::synchronization::isolation_level::Exclusive) ? "exclusive readers/writers" : "exclusive writers/parallel readers"); + // Create the benchmark. //Genode::Heap _heap{env.ram(), env.rm()}; auto *benchmark = @@ -189,16 +200,21 @@ void Libc::Component::construct(Libc::Env &env) { mx::system::Environment::set_env(&env); - std::uint16_t cores = env.cpu().affinity_space().total(); + auto sys_cores = mx::util::core_set::build(64); + mx::system::Environment::set_cores(&sys_cores); + + mx::memory::GlobalHeap::myself(); + std::uint16_t cores = 59; + //env.cpu().affinity_space().total(); char cores_arg[10]; - snprintf(cores_arg, 9, "1:%d", cores); + sprintf(cores_arg, "%d", cores); - char *args[] = {"blinktree_benchmark", cores_arg, "-pd", "3", "--sync4me"}; + char *args[] = {"blinktree_benchmark", "-i", "200", "--olfit", cores_arg}; Libc::with_libc([&]() { std::cout << "Starting B-link tree benchmark" << std::endl; - bt_main(env, 2, args); + bt_main(env, 5, args); }); } diff --git a/repos/mml/src/app/blinktree/target.mk b/repos/mml/src/app/blinktree/target.mk index 06ea86154a..55d290be6c 100644 --- a/repos/mml/src/app/blinktree/target.mk +++ b/repos/mml/src/app/blinktree/target.mk @@ -1,4 +1,5 @@ MXINC_DIR=$(REP_DIR)/src/app/blinktree +GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/21.05 TARGET = blinktree # soure file for benchmark framework @@ -6,11 +7,18 @@ SRC_MXBENCH = benchmark/workload_set.cpp SRC_MXBENCH += benchmark/workload.cpp SRC_MXBENCH += benchmark/cores.cpp SRC_MXBENCH += benchmark/string_util.cpp +SRC_MXBENCH += benchmark/perf.cpp # source files for blinktree benchmark SRC_BTREE += blinktree_benchmark/main.cpp SRC_BTREE += blinktree_benchmark/benchmark.cpp SRC_CC = ${SRC_MXBENCH} ${SRC_BTREE} -LIBS += base libc stdcxx mxtasking -CC_OPT += -Wno-error -fno-aligned-new -I$(MXINC_DIR) +LIBS += base libc stdcxx mxtasking +EXT_OBJECTS += /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a /usr/local/genode/tool/lib/libatomic.a +CUSTOM_CC = /usr/local/genode/tool/bin/clang +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CC_OPT += --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -Wno-error -O2 -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 #-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 +#CC_OPT += -femulated-tls -DCLANG_CXX11_ATOMICS CC_CXX_WARN_STRICT = +CUSTOM_CXX_LIB := $(CROSS_DEV_PREFIX)g++ +#CXX_LD += $(CROSS_DEV_PREFIX)g++ diff --git a/repos/mml/src/app/blinktree_server/README.md b/repos/mml/src/app/blinktree_server/README.md new file mode 100644 index 0000000000..06f1770410 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/README.md @@ -0,0 +1,79 @@ +# BLinkTree Benchmark +The BLinkTree-benchmark stores `8` byte numeric keys and values. +Call `./bin/blinktree_benchmark -h` for help and parameters. + +## How to generate YCSB workload +* Workload specifications are done by files in `workloads_specification/`. +* Call `make ycsb-a` and `make ycsb-c` to generate workloads **A** and **C**. +* Workload files are stored in `workloads/` +* Use `./bin/blinktree_benchmark -f ` to pass the desired workload. +* Default (if not specified) is `-f workloads/fill_randint_workloada workloads/mixed_randint_workloada`. + +## Important CLI arguments +* The first argument is the number of cores: + * `./bin/blinktree_benchmark 1` for using a single core. + * `./bin/blinktree_benchmark 1:24` for using cores `1` up to `24`. +* `-i ` specifies the number of repetitions of each workload. +* `-s ` steps of the cores: + * `-s 1` will increase the used cores by one (core ids: `0,1,2,3,4,5,6,7,..,23`). + * `-s 2` will skip every second core (core ids: `0,1,3,5,7,..23`). +* `-pd ` specifies the prefetch distance. +* `-p` or `--perf` will activate performance counter (result will be printed to console and output file). +* `--latched` will enable latches for synchronization (default off). +* `--exclusive` forces the tasks to access tree nodes exclusively (e.g. by using spinlocks or core-based sequencing) (default off). +* `--sync4me` will use built-in synchronization selection to choose the matching primitive based on annotations. +* `-o ` will write the results in **json** format to the given file. + +## Understanding the output +After started, the benchmark will print a summary of configured cores and workload: + + core configuration: + 1: 0 + 2: 0 1 + 4: 0 1 2 3 + workload: fill: 5m / readonly: 5m + +Here, we configured the benchmark to use one to four cores; each line of the core configuration displays the number of cores and the core identifiers. + +Following, the benchmark will be started and print the results for every iteration: + + 1 1 0 1478 ms 3.38295e+06 op/s + 1 1 1 1237 ms 4.04204e+06 op/s + 2 1 0 964 ms 5.18672e+06 op/s + 2 1 1 675 ms 7.40741e+06 op/s + 4 1 0 935 ms 5.34759e+06 op/s + 4 1 1 532 ms 9.3985e+06 op/s + +* The first column is the number of used cores. +* The second column displays the iteration of the benchmark (configured by `-i X`). +* Thirdly, the phase-identifier will be printed: `0` for initialization phase (which will be only inserts) and `1` for the workload phase (which is read-only here). +* After that, the time and throughput are written. +* If `--perf` is enabled, the output will be extended by some perf counters, which are labeled (like throughput). + +## Plot the results +When using `-o FILE`, the results will be written to the given file, using `JSON` format. +The plot script `scripts/plot_blinktree_benchmark INPUT_FILE [INPUT_FILE ...]` will aggregate and plot the results using one or more of those `JSON` files. + +## Examples + +###### Running workload A using optimistic synchronization + + ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o optimistic.json + +###### Running workload A using best matching synchronization + + ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --sync4me -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o sync4me.json + +###### Running workload A using reader/writer-locks + + ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o rwlocked.json + +###### Running workload A using core-based sequencing + + ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o core-sequenced.json + +###### Running workload A using spin-locks + + ./bin/blinktree_benchmark 1: -s 2 -i 3 -pd 3 -p --latched --exclusive -f workloads/fill_randint_workloada workloads/mixed_randint_workloada -o spinlocked.json + + \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/benchmark.cpp b/repos/mml/src/app/blinktree_server/benchmark.cpp new file mode 100644 index 0000000000..24117ba79f --- /dev/null +++ b/repos/mml/src/app/blinktree_server/benchmark.cpp @@ -0,0 +1,199 @@ +#include "benchmark.h" +#include +#include +#include +#include +#include + +using namespace application::blinktree_benchmark; + +Benchmark::Benchmark(benchmark::Cores &&cores, const std::uint16_t iterations, std::string &&fill_workload_file, + std::string &&mixed_workload_file, const bool use_performance_counter, + const mx::synchronization::isolation_level node_isolation_level, + const mx::synchronization::protocol preferred_synchronization_method, + const bool print_tree_statistics, const bool check_tree, std::string &&result_file_name, + std::string &&statistic_file_name, std::string &&tree_file_name, const bool profile) + : _cores(std::move(cores)), _iterations(iterations), _node_isolation_level(node_isolation_level), + _preferred_synchronization_method(preferred_synchronization_method), + _print_tree_statistics(print_tree_statistics), _check_tree(check_tree), + _result_file_name(std::move(result_file_name)), _statistic_file_name(std::move(statistic_file_name)), + _tree_file_name(std::move(tree_file_name)), _profile(profile) +{ + if (use_performance_counter) + { + this->_chronometer.add(benchmark::Perf::CYCLES); + this->_chronometer.add(benchmark::Perf::INSTRUCTIONS); + this->_chronometer.add(benchmark::Perf::STALLS_MEM_ANY); + this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_NTA); + this->_chronometer.add(benchmark::Perf::SW_PREFETCH_ACCESS_WRITE); + } + + std::cout << "core configuration: \n" << this->_cores.dump(2) << std::endl; + + this->_workload.build(fill_workload_file, mixed_workload_file); + if (this->_workload.empty(benchmark::phase::FILL) && this->_workload.empty(benchmark::phase::MIXED)) + { + std::exit(1); + } + + std::cout << "workload: " << this->_workload << "\n" << std::endl; +} + +void Benchmark::start() +{ + // Reset tree. + if (this->_tree == nullptr) + { + this->_tree = std::make_unique>( + this->_node_isolation_level, this->_preferred_synchronization_method); + } + + // Reset request scheduler. + if (this->_request_scheduler.empty() == false) + { + this->_request_scheduler.clear(); + } + + // Create one request scheduler per core. + for (auto core_index = 0U; core_index < this->_cores.current().size(); core_index++) + { + const auto channel_id = core_index; + auto *request_scheduler = mx::tasking::runtime::new_task( + 0U, core_index, channel_id, this->_workload, this->_cores.current(), this->_tree.get(), this); + mx::tasking::runtime::spawn(*request_scheduler, 0U); + this->_request_scheduler.push_back(request_scheduler); + } + this->_open_requests = this->_request_scheduler.size(); + + // Start measurement. + if (this->_profile) + { + mx::tasking::runtime::profile(this->profile_file_name()); + } + this->_chronometer.start(static_cast(static_cast(this->_workload)), + this->_current_iteration + 1, this->_cores.current()); +} + +const mx::util::core_set &Benchmark::core_set() +{ + if (this->_current_iteration == std::numeric_limits::max()) + { + // This is the very first time we start the benchmark. + this->_current_iteration = 0U; + return this->_cores.next(); + } + + // Switch from fill to mixed phase. + if (this->_workload == benchmark::phase::FILL && this->_workload.empty(benchmark::phase::MIXED) == false) + { + this->_workload.reset(benchmark::phase::MIXED); + return this->_cores.current(); + } + this->_workload.reset(benchmark::phase::FILL); + + // Run the next iteration. + if (++this->_current_iteration < this->_iterations) + { + return this->_cores.current(); + } + this->_current_iteration = 0U; + + // At this point, all phases and all iterations for the current core configuration + // are done. Increase the cores. + return this->_cores.next(); +} + +void Benchmark::requests_finished() +{ + const auto open_requests = --this->_open_requests; + + if (open_requests == 0U) // All request schedulers are done. + { + // Stop and print time (and performance counter). + const auto result = this->_chronometer.stop(this->_workload.size()); + mx::tasking::runtime::stop(); + std::cout << result << std::endl; + + // Dump results to file. + if (this->_result_file_name.empty() == false) + { + std::ofstream result_file_stream(this->_result_file_name, std::ofstream::app); + result_file_stream << result.to_json().dump() << std::endl; + } + + // Dump statistics to file. + if constexpr (mx::tasking::config::task_statistics()) + { + if (this->_statistic_file_name.empty() == false) + { + std::ofstream statistic_file_stream(this->_statistic_file_name, std::ofstream::app); + nlohmann::json statistic_json; + statistic_json["iteration"] = result.iteration(); + statistic_json["cores"] = result.core_count(); + statistic_json["phase"] = result.phase(); + statistic_json["scheduled"] = nlohmann::json(); + statistic_json["scheduled-on-channel"] = nlohmann::json(); + statistic_json["scheduled-off-channel"] = nlohmann::json(); + statistic_json["executed"] = nlohmann::json(); + statistic_json["executed-reader"] = nlohmann::json(); + statistic_json["executed-writer"] = nlohmann::json(); + statistic_json["buffer-fills"] = nlohmann::json(); + for (auto i = 0U; i < this->_cores.current().size(); i++) + { + const auto core_id = std::int32_t{this->_cores.current()[i]}; + const auto core_id_string = std::to_string(core_id); + statistic_json["scheduled"][core_id_string] = + result.scheduled_tasks(core_id) / double(result.operation_count()); + statistic_json["scheduled-on-core"][core_id_string] = + result.scheduled_tasks_on_core(core_id) / double(result.operation_count()); + statistic_json["scheduled-off-core"][core_id_string] = + result.scheduled_tasks_off_core(core_id) / double(result.operation_count()); + statistic_json["executed"][core_id_string] = + result.executed_tasks(core_id) / double(result.operation_count()); + statistic_json["executed-reader"][core_id_string] = + result.executed_reader_tasks(core_id) / double(result.operation_count()); + statistic_json["executed-writer"][core_id_string] = + result.executed_writer_tasks(core_id) / double(result.operation_count()); + statistic_json["fill"][core_id_string] = + result.worker_fills(core_id) / double(result.operation_count()); + } + + statistic_file_stream << statistic_json.dump(2) << std::endl; + } + } + + // Check and print the tree. + if (this->_check_tree) + { + this->_tree->check(); + } + + if (this->_print_tree_statistics) + { + this->_tree->print_statistics(); + } + + const auto is_last_phase = + this->_workload == benchmark::phase::MIXED || this->_workload.empty(benchmark::phase::MIXED); + + // Dump the tree. + if (this->_tree_file_name.empty() == false && is_last_phase) + { + std::ofstream tree_file_stream(this->_tree_file_name); + tree_file_stream << static_cast(*(this->_tree)).dump() << std::endl; + } + + // Delete the tree to free the hole memory. + if (is_last_phase) + { + this->_tree.reset(nullptr); + } + } +} + +std::string Benchmark::profile_file_name() const +{ + return "profiling-" + std::to_string(this->_cores.current().size()) + "-cores" + "-phase-" + + std::to_string(static_cast(static_cast(this->_workload))) + "-iteration-" + + std::to_string(this->_current_iteration) + ".json"; +} \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/benchmark.h b/repos/mml/src/app/blinktree_server/benchmark.h new file mode 100644 index 0000000000..ae0b789b4b --- /dev/null +++ b/repos/mml/src/app/blinktree_server/benchmark.h @@ -0,0 +1,103 @@ +#pragma once + +#include "listener.h" +#include "request_scheduler.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace application::blinktree_benchmark { +/** + * Benchmark executing the task-based BLink-Tree. + */ +class Benchmark final : public Listener +{ +public: + Benchmark(benchmark::Cores &&, std::uint16_t iterations, std::string &&fill_workload_file, + std::string &&mixed_workload_file, bool use_performance_counter, + mx::synchronization::isolation_level node_isolation_level, + mx::synchronization::protocol preferred_synchronization_method, bool print_tree_statistics, + bool check_tree, std::string &&result_file_name, std::string &&statistic_file_name, + std::string &&tree_file_name, bool profile); + + ~Benchmark() noexcept override = default; + + /** + * @return Core set the benchmark should run in the current iteration. + */ + const mx::util::core_set &core_set(); + + /** + * Callback for request tasks to notify they are out of + * new requests. + */ + void requests_finished() override; + + /** + * Starts the benchmark after initialization. + */ + void start(); + +private: + // Collection of cores the benchmark should run on. + benchmark::Cores _cores; + + // Number of iterations the benchmark should use. + const std::uint16_t _iterations; + + // Current iteration within the actual core set. + std::uint16_t _current_iteration = std::numeric_limits::max(); + + // Workload to get requests from. + benchmark::Workload _workload; + + // Tree to run requests on. + std::unique_ptr> _tree; + + // The synchronization mechanism to use for tree nodes. + const mx::synchronization::isolation_level _node_isolation_level; + + // Preferred synchronization method. + const mx::synchronization::protocol _preferred_synchronization_method; + + // If true, the tree statistics (height, number of nodes, ...) will be printed. + const bool _print_tree_statistics; + + // If true, the tree will be checked for consistency after each iteration. + const bool _check_tree; + + // Name of the file to print results to. + const std::string _result_file_name; + + // Name of the file to print further statistics. + const std::string _statistic_file_name; + + // Name of the file to serialize the tree to. + const std::string _tree_file_name; + + // If true, use idle profiling. + const bool _profile; + + // Number of open request tasks; used for tracking the benchmark. + alignas(64) std::atomic_uint16_t _open_requests = 0; + + // List of request schedulers. + alignas(64) std::vector _request_scheduler; + + // Chronometer for starting/stopping time and performance counter. + alignas(64) benchmark::Chronometer _chronometer; + + /** + * @return Name of the file to write profiling results to. + */ + [[nodiscard]] std::string profile_file_name() const; +}; +} // namespace application::blinktree_benchmark \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/config.h b/repos/mml/src/app/blinktree_server/config.h new file mode 100644 index 0000000000..2144075be8 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/config.h @@ -0,0 +1,17 @@ +#pragma once + +namespace application::blinktree_benchmark { +class config +{ +public: + /** + * @return Number of requests that will be started at a time by the request scheduler. + */ + static constexpr auto batch_size() noexcept { return 500U; } + + /** + * @return Number of maximal open requests, system-wide. + */ + static constexpr auto max_parallel_requests() noexcept { return 1500U; } +}; +} // namespace application::blinktree_benchmark \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/listener.h b/repos/mml/src/app/blinktree_server/listener.h new file mode 100644 index 0000000000..5a911fa6b5 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/listener.h @@ -0,0 +1,15 @@ +#pragma once + +namespace application::blinktree_benchmark { +/** + * The listener will be used to notify the benchmark that request tasks are + * done and no more work is available. + */ +class Listener +{ +public: + constexpr Listener() = default; + virtual ~Listener() = default; + virtual void requests_finished() = 0; +}; +} // namespace application::blinktree_benchmark \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/main.cpp b/repos/mml/src/app/blinktree_server/main.cpp new file mode 100644 index 0000000000..d821ad939a --- /dev/null +++ b/repos/mml/src/app/blinktree_server/main.cpp @@ -0,0 +1,159 @@ +#include "benchmark.h" +#include +#include +#include +#include +#include +#include +#include "server.h" +#include +#include + +/* Genode includes */ +#include + +using namespace application::blinktree_server; + +/** + * Instantiates the BLink-Tree server with CLI arguments. + * @param count_arguments Number of CLI arguments. + * @param arguments Arguments itself. + * + * @return Instance of the server. + */ +std::tuple create_server(int count_arguments, char **arguments); + +/** + * Starts the server. + * + * @param count_arguments Number of CLI arguments. + * @param arguments Arguments itself. + * + * @return Return code of the application. + */ +int bt_main(int count_arguments, char **arguments) +{ + if (mx::system::Environment::is_numa_balancing_enabled()) + { + std::cout << "[Warn] NUMA balancing may be enabled, set '/proc/sys/kernel/numa_balancing' to '0'" << std::endl; + } + + auto [server, prefetch_distance, use_system_allocator] = create_server(count_arguments, arguments); + + if (server != nullptr) + { + /// Wait for the server to finish. + server->run(); + + delete server; + } + + return 0; +} + +std::tuple create_server(int count_arguments, char **arguments) +{ + /* + // Set up arguments. + argparse::ArgumentParser argument_parser("blinktree_server"); + argument_parser.add_argument("cores") + .help("Number of cores to use.") + .default_value(std::uint16_t(1)) + .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); }); + argument_parser.add_argument("--port") + .help("Port of the server") + .default_value(std::uint64_t(12345)) + .action([](const std::string &value) { return std::uint64_t(std::stoi(value)); }); + argument_parser.add_argument("-sco", "--system-core-order") + .help("Use systems core order. If not, cores are ordered by node id (should be preferred).") + .implicit_value(true) + .default_value(false); + argument_parser.add_argument("--exclusive") + .help("Are all node accesses exclusive?") + .implicit_value(true) + .default_value(false); + argument_parser.add_argument("--latched") + .help("Prefer latch for synchronization?") + .implicit_value(true) + .default_value(false); + argument_parser.add_argument("--olfit") + .help("Prefer OLFIT for synchronization?") + .implicit_value(true) + .default_value(false); + argument_parser.add_argument("--sync4me") + .help("Let the tasking layer decide the synchronization primitive.") + .implicit_value(true) + .default_value(false); + argument_parser.add_argument("-pd", "--prefetch-distance") + .help("Distance of prefetched data objects (0 = disable prefetching).") + .default_value(std::uint16_t(0)) + .action([](const std::string &value) { return std::uint16_t(std::stoi(value)); }); + argument_parser.add_argument("--system-allocator") + .help("Use the systems malloc interface to allocate tasks (default disabled).") + .implicit_value(true) + .default_value(false); + + // Parse arguments. + try + { + argument_parser.parse_args(count_arguments, arguments); + } + catch (std::runtime_error &e) + { + std::cout << argument_parser << std::endl; + return {nullptr, 0U, false}; + } + + auto order = + argument_parser.get("-sco") ? mx::util::core_set::Order::Ascending : mx::util::core_set::Order::NUMAAware; + auto cores = mx::util::core_set::build(argument_parser.get("cores")-1, order); + const auto isolation_level = argument_parser.get("--exclusive") + ? mx::synchronization::isolation_level::Exclusive + : mx::synchronization::isolation_level::ExclusiveWriter; + auto preferred_synchronization_method = mx::synchronization::protocol::Queue; + if (argument_parser.get("--latched")) + { + preferred_synchronization_method = mx::synchronization::protocol::Latch; + } + else if (argument_parser.get("--olfit")) + { + preferred_synchronization_method = mx::synchronization::protocol::OLFIT; + } + else if (argument_parser.get("--sync4me")) + { + preferred_synchronization_method = mx::synchronization::protocol::None; + } + */ + // Create the benchmark. + //auto *server = new Server(argument_parser.get("--port"), std::move(cores), argument_parser.get("-pd"), isolation_level, preferred_synchronization_method); + + auto cores = mx::util::core_set::build(64); + + auto *server = new Server(12345, std::move(cores), 3, mx::synchronization::isolation_level::ExclusiveWriter, mx::synchronization::protocol::OLFIT); + + return {server, 3, false}; + // return {server, argument_parser.get("-pd"), argument_parser.get("--system-allocator")}; +} + +void Libc::Component::construct(Libc::Env &env) { + + mx::system::Environment::set_env(&env); + + auto sys_cores = mx::util::core_set::build(64); + mx::system::Environment::set_cores(&sys_cores); + + mx::memory::GlobalHeap::myself(); + std::uint16_t cores = 64; + //env.cpu().affinity_space().total(); + + char cores_arg[10]; + sprintf(cores_arg, "%d", cores); + + char *args[] = {"blinktree_server", cores_arg}; + + Libc::with_libc([&]() + { + std::cout << "Starting B-link tree server" << std::endl; + bt_main(2, args); + }); +} \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/network/config.h b/repos/mml/src/app/blinktree_server/network/config.h new file mode 100644 index 0000000000..026c610a96 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/network/config.h @@ -0,0 +1,9 @@ +#pragma once + +namespace application::blinktree_server::network { +class config +{ +public: + static constexpr auto max_connections() noexcept { return 64U; } +}; +} // namespace mx::io::network \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/network/server.cpp b/repos/mml/src/app/blinktree_server/network/server.cpp new file mode 100644 index 0000000000..e2ff91ccbe --- /dev/null +++ b/repos/mml/src/app/blinktree_server/network/server.cpp @@ -0,0 +1,515 @@ +#include "server.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace application::blinktree_server::network; + +mx::tasking::TaskResult RequestTask::execute(const std::uint16_t core_id, const std::uint16_t channel_id) +{ + mx::tasking::TaskInterface* request_task; + + if (this->_type == Type::Insert) + { + request_task = mx::tasking::runtime::new_task< + db::index::blinktree::InsertValueTask>( + core_id, this->_key, this->_value, this->_tree, this->_response_handler); + + request_task->annotate(this->_tree->root(), db::index::blinktree::config::node_size() / 4U); + request_task->is_readonly(true); + } + else if (this->_type == Type::Lookup) + { + request_task = mx::tasking::runtime::new_task< + db::index::blinktree::LookupTask>( + core_id, this->_key, this->_response_handler); + + request_task->annotate(this->_tree->root(), db::index::blinktree::config::node_size() / 4U); + request_task->is_readonly(true); + } + else if(this->_type == Type::Update) + { + request_task = mx::tasking::runtime::new_task< + db::index::blinktree::UpdateTask>( + core_id, this->_key, this->_value, this->_response_handler); + + request_task->annotate(this->_tree->root(), db::index::blinktree::config::node_size() / 4U); + request_task->is_readonly(true); + } + else + { + this->_tree->check(); + this->_tree->print_statistics(); + return mx::tasking::TaskResult::make_null(); + } + + return mx::tasking::TaskResult::make_succeed_and_remove(request_task); +} + +void ResponseHandler::inserted(const std::uint16_t /*core*/, const std::uint64_t key, const std::int64_t /*value*/) +{ + _server->send(_s, std::to_string(key)); + Server::free_handler_task(core_id, static_cast(this)); +} + +void ResponseHandler::updated(const std::uint16_t /*core_id*/, const std::uint64_t key, const std::int64_t /*value*/) +{ + _server-> send(_s, std::to_string(key)); + Server::free_handler_task(core_id, static_cast(this)); +} + +void ResponseHandler::removed(const std::uint16_t /*core_id*/, const std::uint64_t key) +{ + _server-> send(_s, std::to_string(key)); + Server::free_handler_task(core_id, static_cast(this)); +} + +void ResponseHandler::found(const std::uint16_t /*core_id*/, const std::uint64_t /*key*/, const std::int64_t value) +{ + _server-> send(_s, std::to_string(value)); + Server::free_handler_task(core_id, static_cast(this)); +} + +void ResponseHandler::missing(const std::uint16_t /*core_id*/, const std::uint64_t key) +{ + _server-> send(_s, std::to_string(key)); + Server::free_handler_task(core_id, static_cast(this)); +} + +Server *Server::_myself; + +ReceiveTask *Server::_receive_tasks = nullptr; + +Server::Server(Libc::Env &env, + const std::uint64_t port, + const std::uint16_t count_channels, Timer::Connection &timer, Genode::Heap &alloc) noexcept + : _port(port), _socket(nullptr), _client_sockets({nullptr}), + _count_channels(count_channels), _env{env}, _config(env, "config"), _alloc(alloc), _timer(timer), _netif(env, _alloc, _config.xml()) +{ + Server::_myself = this; + this->_buffer.fill('\0'); + + _receive_tasks = static_cast(mx::memory::GlobalHeap::allocate_cache_line_aligned(65536 * sizeof(ReceiveTask))); + + _handler_allocator.reset(new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(mx::memory::dynamic::Allocator))) mx::memory::dynamic::Allocator()); + + _task_allocator.reset(new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(mx::memory::dynamic::Allocator))) mx::memory::dynamic::Allocator()); +} + +Server::~Server() { +} + +bool Server::listen(db::index::blinktree::BLinkTree* tree) +{ + _socket = Lwip::tcp_new(); + + if (!_socket) { + Genode::error("Failed to create server socket"); + return false; + } + + Lwip::err_t rc = Lwip::tcp_bind(_socket, &Lwip::ip_addr_any, _port); + if (rc != Lwip::ERR_OK) { + Genode::error("Failed to bind server socket to port ", _port); + return false; + } + + _socket = Lwip::tcp_listen_with_backlog(_socket, 64); + Lwip::tcp_accept(_socket, &Server::_handle_tcp_connect); + + this->_tree = tree; + + return true; +} + +void Server::parse(struct Server::state *s, std::string &message) +{ + RequestTask::Type request_type; + + std::uint64_t i = s->id; + + if (message[0] == 'D') + { + auto response_handler = new (_handler_allocator->allocate(0, 64, sizeof(ResponseHandler))) ResponseHandler(this, s, 0); + //auto *request_task = new (&this->_request_tasks[i]) RequestTask{this->_tree, *response_handler}; + auto *request_task = mx::tasking::runtime::new_task(0, this->_tree, *response_handler); + request_task->annotate(std::uint16_t(0U)); + mx::tasking::runtime::spawn(*request_task); + } + else + { + switch (message[0]) + { + case 'I': + request_type = RequestTask::Type::Insert; + break; + case 'U': + request_type = RequestTask::Type::Update; + break; + default: + request_type = RequestTask::Type::Lookup; + } + + auto key = 0ULL; + auto index = 2U; // Skip request type and comma. + while (message[index] >= '0' && message[index] <= '9') + { + key = key * 10 + (message[index++] - '0'); + } + + auto channel_id = std::uint16_t(this->_next_worker_id.fetch_add(1U) % this->_count_channels); + if (request_type == RequestTask::Type::Insert || request_type == RequestTask::Type::Lookup) + { + auto value = 0LL; + ++index; + while (message[index] >= '0' && message[index] <= '9') + { + value = value * 10 + (message[index++] - '0'); + } + + auto response_handler = new (_handler_allocator->allocate(mx::system::topology::node_id(channel_id), 64, sizeof(ResponseHandler))) ResponseHandler(this, s, channel_id); + auto *request_task = mx::tasking::runtime::new_task(channel_id, this->_tree, request_type, key, value, *response_handler); + request_task->annotate(channel_id); + mx::tasking::runtime::spawn(*request_task); + } + else + { + //auto *request_task = new (&this->_request_tasks[i]) RequestTask{this->_tree, RequestTask::Type::Lookup, key, this->_response_handlers[i]}; + auto response_handler = new (_handler_allocator->allocate(mx::system::topology::node_id(channel_id), 64, sizeof(ResponseHandler))) ResponseHandler(this, s, channel_id); + auto *request_task = mx::tasking::runtime::new_task(channel_id, this->_tree, request_type, key, *response_handler); + request_task->annotate(channel_id); + mx::tasking::runtime::spawn(*request_task); + } + mx::tasking::runtime::scheduler().allocate_cores(64); + } +} + +class Send_task : public mx::tasking::TaskInterface +{ + private: + struct Server::state *_s; + std::string _message; + + public: + Send_task(Server::state *s, std::string message) : _s(s), _message(message) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + using namespace Lwip; + Lwip::pbuf *ptr = nullptr; + + if (_s->state == Server::CLOSED || _s->state == Server::CLOSING) { + Genode::warning("Tried to send over socket that is to be closed"); + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } + + ptr = Lwip::pbuf_alloc(Lwip::PBUF_TRANSPORT, _message.length(), Lwip::PBUF_RAM); + + if (!(_s->pcb) || !_s) { + Genode::error("Tried sending over invalid pcb"); + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } + + if (!ptr) + { + Genode::error("No memory for sending packet."); + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } + + if (ptr >= reinterpret_cast(0x7FFF80000000UL) || _s->pcb >= reinterpret_cast(0x7FFF80000000UL)) + { + Genode::error("Allocated buffer or pcb is at non-canonical address. Aborting. ptr=", static_cast(ptr), " pcb=", static_cast(_s->pcb), " s=", static_cast(_s)); + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } + + ptr->payload = static_cast(const_cast(_message.c_str())); + ptr->len = _message.length(); + + if (ptr->len > tcp_sndbuf(_s->pcb)) + Genode::warning("Not enough space in send buffer"); + + Lwip::err_t rc = Lwip::ERR_OK; + { + rc = Lwip::tcp_write(_s->pcb, ptr->payload, ptr->len, TCP_WRITE_FLAG_COPY); + } + if (rc == Lwip::ERR_OK) + { + Lwip::tcp_output(_s->pcb); + Lwip::pbuf_free(ptr); + } else { + if (_s->tx == nullptr) + _s->tx = ptr; + else { + Lwip::pbuf_cat(_s->tx, ptr); + } + } + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } +}; + +void +Server::send(struct state *s, std::string &&message) +{ + + const auto length = std::uint64_t(message.size()); + auto response = std::string(length + sizeof(length), '\0'); + + // Write header + std::memcpy(response.data(), static_cast(&length), sizeof(length)); + + // Write data + std::memmove(response.data() + sizeof(length), message.data(), length); + + auto task = new (Server::get_instance()->_task_allocator->allocate(0, 64, sizeof(Send_task))) Send_task(s, response); + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); +} + +std::uint16_t Server::add_client(Lwip::tcp_pcb* client_socket) +{ + for (auto i = 0U; i < this->_client_sockets.size(); ++i) + { + if (this->_client_sockets[i] == 0U) + { + this->_client_sockets[i] = client_socket; + return i; + } + } + + return std::numeric_limits::max(); +} + +void Server::stop() noexcept +{ + this->_is_running = false; +} + +class Close_task : public mx::tasking::TaskInterface +{ + private: + Server::state &_s; + + public: + Close_task(Server::state &s) : _s(s) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) + { + Genode::log("Closing connection for ", static_cast(_s.pcb) , " and state object ", static_cast(&_s)); + Server::tcpbtree_close(_s.pcb, &_s); + _s.state = Server::CLOSED; + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } +}; + +/*********** + * LWIP callback function definitions + ***********/ +Lwip::err_t Server::_handle_tcp_connect(void *arg, struct Lwip::tcp_pcb *newpcb, Lwip::err_t err) +{ + + struct state *s; + + static uint64_t count_connections = 0; + + LWIP_UNUSED_ARG(arg); + + if ((err != Lwip::ERR_OK) || (newpcb == NULL)) { + return Lwip::ERR_VAL; + } + + //Genode::log("Incoming request"); + + s = new (Lwip::mem_malloc(sizeof(struct state))) state(); // static_cast(Lwip::mem_malloc(sizeof(struct state))); + + if (!s) { + Genode::error("Failed to allocate state object for new connection."); + return Lwip::ERR_MEM; + } + //Genode::log("New connection #", count_connections, ": arg=", arg, " pcb=", newpcb, " s=", s, " &s=", static_cast(&s)); + + s->state = states::ACCEPTED; + s->pcb = newpcb; + s->retries = 0; + s->p = nullptr; + s->tx = nullptr; + s->channel_id = 0; //count_connections % Server::get_instance()->_count_channels; + + Lwip::tcp_backlog_accepted(newpcb); + /* Register callback functions */ + Lwip::tcp_arg(newpcb, s); + Lwip::tcp_recv(newpcb, &Server::_handle_tcp_recv); + Lwip::tcp_err(newpcb, &Server::_handle_tcp_error); + Lwip::tcp_poll(newpcb, &Server::_handle_tcp_poll, 50); + Lwip::tcp_sent(newpcb, &Server::_handle_tcp_sent); + newpcb->flags |= TF_NODELAY; + + return Lwip::ERR_OK; +} + +Lwip::err_t Server::_handle_tcp_recv(void *arg, struct Lwip::tcp_pcb *tpcb, struct Lwip::pbuf *p, Lwip::err_t err) +{ + static std::uint16_t next_receive_task = 0; + struct state *s; + Lwip::err_t rc = Lwip::ERR_OK; + + std::uint16_t next_channel_id = 0; + + s = static_cast(arg); + + if (err != Lwip::ERR_OK) { + return err; + } + + if (p == nullptr) { + s->state = CLOSING; + auto task = new (Server::get_instance()->_task_allocator->allocate(0, 64, sizeof(Close_task))) Close_task(*s); + if (!task) { + Genode::warning("Failed to allocate close task"); + return Lwip::ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + Lwip::pbuf_free(p); + rc = Lwip::ERR_OK; + } else if (err != Lwip::ERR_OK) { + rc = err; + } else if (s->state == states::ACCEPTED) { + s->state == states::RECEIVED; + + // TODO: parse message and spawn request task here + rc = Lwip::ERR_OK; + { + ReceiveTask *task = new (Server::get_instance()->_task_allocator->allocate(0, 64, sizeof(ReceiveTask))) ReceiveTask(s, p); + if (!task) { + Genode::warning("Could not allocate request handler task"); + return Lwip::ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + } + Lwip::tcp_recved(s->pcb, p->len); + //Server::get_instance()->send(s, "Nope"); + } + else if (s->state == states::RECEIVED) + { + ReceiveTask *task = new (Server::get_instance()->_task_allocator->allocate(0, 64, sizeof(ReceiveTask))) ReceiveTask(s, p); + if (!task) { + Genode::warning("Could not allocate request handler task"); + return Lwip::ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + Lwip::tcp_recved(s->pcb, p->len); + //Server::get_instance()->send(s, "Nope"); + + rc = Lwip::ERR_OK; + } + else + { + Lwip::tcp_recved(tpcb, p->tot_len); + Lwip::pbuf_free(p); + rc = Lwip::ERR_OK; + } + + return rc; +} + +Lwip::err_t Server::_handle_tcp_poll(void *arg, struct Lwip::tcp_pcb *tpcb) +{ + Lwip::err_t rc; + struct state *s; + + //GENODE_LOG_TSC(1); + s = static_cast(arg); + + if (s) { + if (s->tx) { + rc = Lwip::tcp_write(tpcb, s->tx->payload, s->tx->len, 1); + if (rc == Lwip::ERR_OK) { + Lwip::tcp_output(tpcb); + Lwip::pbuf *ptr = s->tx; + if (ptr->next) { + s->tx = ptr->next; + Lwip::pbuf_ref(s->tx); + } + Lwip::tcp_recved(tpcb, ptr->len); + Lwip::pbuf_free(ptr); + } + // TODO: process remaning pbuf entry + } else { + /*if (s->state == states::CLOSING) { + Server::tcpbtree_close(tpcb, s); + }*/ + } + rc = Lwip::ERR_OK; + } else { + Lwip::tcp_abort(tpcb); + rc = Lwip::ERR_ABRT; + } + + return Lwip::ERR_OK; +} + + +Lwip::err_t Server::_handle_tcp_sent(void *arg, struct Lwip::tcp_pcb *tpcb, std::uint16_t len) +{ + //GENODE_LOG_TSC(1); + struct state *s = static_cast(arg); + s->retries = 0; + + if (s->tx) { + Lwip::err_t rc = Lwip::tcp_write(tpcb, s->tx->payload, s->tx->len, 1); + if (rc == Lwip::ERR_OK) { + Lwip::tcp_output(tpcb); + Lwip::pbuf *ptr = s->tx; + if (ptr->next) { + s->tx = ptr->next; + Lwip::pbuf_ref(s->tx); + } + Lwip::tcp_recved(tpcb, ptr->len); + Lwip::pbuf_free(ptr); + } + tcp_sent(tpcb, &Server::_handle_tcp_sent); // Genode::log("In _handle_tcp_sent"); + } + + return Lwip::ERR_OK; +} + +mx::tasking::TaskResult application::blinktree_server::network::ReceiveTask::execute(std::uint16_t core_id, std::uint16_t channel_id) +{ + Lwip::err_t rc = Lwip::ERR_OK; + + /*rc = Lwip::tcp_write(_state->pcb, _pbuf->payload, _pbuf->len, 3); + Lwip::tcp_output(_state->pcb); + if (rc == Lwip::ERR_OK) { + Lwip::tcp_recved(_state->pcb, _pbuf->tot_len); + Lwip::pbuf_free(_pbuf); + } else if (rc == Lwip::ERR_MEM) { + Genode::warning("Out of memory"); + }*/ + + //Genode::log("Executing application task"); + //Server::get_instance()->send(_state, "Nope"); + // Server::tcp_send(_state->pcb, _state); + + std::string request = std::string(static_cast(_pbuf->payload), _pbuf->len); + Server::get_instance()->parse(_state, request); + + Lwip::pbuf_free(_pbuf); + + Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_null(); +} \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/network/server.h b/repos/mml/src/app/blinktree_server/network/server.h new file mode 100644 index 0000000000..91e1cee63f --- /dev/null +++ b/repos/mml/src/app/blinktree_server/network/server.h @@ -0,0 +1,287 @@ +#pragma once + +#include "config.h" +#include +#include +#include +#include +#include +#include + +/* B-link tree includes */ +#include +#include + +/* lwIP wrapper for Genode's NIC session */ +#include +#include +#include + +/* Genode includes */ +#include +#include +#include + +/* MxTasking includes*/ +#include +#include +#include +#include +#include + +/* lwIP includes */ +namespace Lwip { + extern "C" { + #include + #include + #include + } +} + +namespace application::blinktree_server::network { + + class ResponseHandler; + class RequestTask; + class ReceiveTask; + class Server + { + public: + enum states + { + NONE = 0, + ACCEPTED, + RECEIVED, + CLOSING, + CLOSED + }; + + struct state + { + std::uint8_t state; + std::uint8_t retries; + struct Lwip::tcp_pcb *pcb; + struct Lwip::pbuf *p; + struct Lwip::pbuf *tx; + std::uint16_t channel_id; + std::uint64_t id; + }; + Server(Libc::Env &env, std::uint64_t port, + std::uint16_t count_channels, Timer::Connection &timer, Genode::Heap &alloc) noexcept; + ~Server(); + + [[nodiscard]] std::uint16_t port() const noexcept { return _port; } + void stop() noexcept; + void send(struct Server::state *s, std::string &&message); + bool listen(db::index::blinktree::BLinkTree *tree); + void parse(struct Server::state *s, std::string &message); + + [[nodiscard]] bool is_running() const noexcept { return _is_running; } + + static void tcp_send(struct Lwip::tcp_pcb *tpcb, struct state *s) + { + using namespace Lwip; + struct Lwip::pbuf *ptr; + Lwip::err_t rc = Lwip::ERR_OK; + + + while ((rc == Lwip::ERR_OK) && (s->tx != nullptr) /* && (s->tx->len <= tcp_sndbuf(tpcb) */) + { + ptr = s->tx; + // Genode::log("Sending response"); + rc = Lwip::tcp_write(tpcb, ptr->payload, ptr->len, 1); + if (rc == Lwip::ERR_OK) + { + std::uint16_t plen; + + plen = ptr->len; + + s->tx = ptr->next; + if (s->tx != nullptr) + { + Lwip::pbuf_ref(s->tx); + } + Lwip::tcp_output(tpcb); + Lwip::pbuf_free(ptr); + } + else if (rc == Lwip::ERR_MEM) + { + Genode::warning("Low on memory. Defering to poll()"); + s->tx = ptr; + } + else + { + Genode::warning("An error ", static_cast(rc), " occured."); + } + } + } + + static void tcpbtree_close(struct Lwip::tcp_pcb *tpcb, struct state *s) + { + if (s->pcb != tpcb) { + Genode::error("Tried closing connection with invalid session state"); + return; + } + Lwip::tcp_arg(tpcb, NULL); + Lwip::tcp_sent(tpcb, NULL); + Lwip::tcp_recv(tpcb, NULL); + Lwip::tcp_poll(tpcb, NULL, 0); + Lwip::tcp_err(tpcb, nullptr); + + Server::tcp_free(s); + + Lwip::tcp_close(tpcb); + } + + /* tcp_recv */ + static Lwip::err_t _handle_tcp_recv(void *arg, struct Lwip::tcp_pcb *tpcb, struct Lwip::pbuf *p, Lwip::err_t err); + + /* tcp_err */ + static void _handle_tcp_error(void *arg, Lwip::err_t err) + { + struct state *s; + LWIP_UNUSED_ARG(err); + + s = static_cast(arg); + + Server::tcp_free(s); + } + + /* tcp_poll */ + static Lwip::err_t _handle_tcp_poll(void *arg, struct Lwip::tcp_pcb *tpcb); + + /* tcp_sent */ + static Lwip::err_t _handle_tcp_sent(void *arg, struct Lwip::tcp_pcb *tpcb, std::uint16_t len); + + /* helper function for free */ + static void tcp_free(struct state *s) + { + // Genode::log("Freeing state obj s=", s); + if (s) + { + if (s->p) + Lwip::pbuf_free(s->p); + if (s->tx) + Lwip::pbuf_free(s->tx); + delete s; // Lwip::mem_free(s); + } + } + + static Server *get_instance() { return _myself; } + + static void free_handler_task(std::uint16_t core_id, void* task) + { + Server::get_instance()->_handler_allocator->free(task); + } + + static void free_task(void* task) + { + Server::get_instance()->_task_allocator->free(task); + } + + private: + static Server *_myself; + const std::uint64_t _port; + struct Lwip::tcp_pcb *_socket; + Libc::Env &_env; + + std::array _client_sockets; + std::array _buffer; + static ReceiveTask *_receive_tasks; + + alignas(64) bool _is_running = true; + alignas(64) std::atomic_uint64_t _next_worker_id{0U}; + const std::uint16_t _count_channels; + + std::uint16_t add_client(Lwip::tcp_pcb *client_socket); + + /* Genode environment for NIC session */ + Genode::Attached_rom_dataspace _config; + Genode::Heap &_alloc; + Timer::Connection &_timer; + + /* lwIP network device (NIC session wrapper) */ + Lwip::Nic_netif _netif; + db::index::blinktree::BLinkTree *_tree{nullptr}; + + std::unique_ptr _handler_allocator{nullptr}; + std::unique_ptr _task_allocator{nullptr}; + + /************************************************ + * lwIP callback API: TCP callback functions + ************************************************/ + + /* tcp_accept */ + static Lwip::err_t + _handle_tcp_connect(void *arg, struct Lwip::tcp_pcb *newpcb, Lwip::err_t err); + + + + /* helper function for close() */ + +}; + +class alignas(64) ResponseHandler final : public db::index::blinktree::Listener +{ +public: + ResponseHandler(Server* server, Server::state *s, std::uint16_t _core_id) : _server(server), _s(s), core_id(_core_id) { } + ResponseHandler(ResponseHandler&&) noexcept = default; + ~ResponseHandler() = default; + + void inserted(std::uint16_t core_id, const std::uint64_t key, const std::int64_t value) override; + void updated(std::uint16_t core_id, const std::uint64_t key, const std::int64_t value) override; + void removed(std::uint16_t core_id, const std::uint64_t key) override; + void found(std::uint16_t core_id, const std::uint64_t key, const std::int64_t value) override; + void missing(std::uint16_t core_id, const std::uint64_t key) override; + +private: + Server* _server; + Server::state *_s; + std::uint16_t core_id{0}; +}; + +class alignas(64) RequestTask final : public mx::tasking::TaskInterface +{ +public: + enum Type { Insert, Update, Lookup, Debug }; + + RequestTask(db::index::blinktree::BLinkTree* tree, const Type type, const std::uint64_t key, ResponseHandler& response_handler) noexcept + : _tree(tree), _type(type), _key(key), _response_handler(response_handler) { } + RequestTask(db::index::blinktree::BLinkTree* tree, const Type type, const std::uint64_t key, const std::int64_t value, ResponseHandler& response_handler) noexcept + : _tree(tree), _type(type), _key(key), _value(value), _response_handler(response_handler) { } + RequestTask(db::index::blinktree::BLinkTree* tree, ResponseHandler& response_handler) noexcept + : _tree(tree), _type(Type::Debug), _response_handler(response_handler) { } + ~RequestTask() noexcept = default; + + mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override; + +private: + db::index::blinktree::BLinkTree* _tree; + Type _type; + std::uint64_t _key; + std::uint64_t _value; + ResponseHandler& _response_handler; +}; + +class alignas(64) ReceiveTask final : public mx::tasking::TaskInterface +{ + public: + ReceiveTask(Server::state *state, Lwip::pbuf *pb) : _state(state), _pbuf(pb) {} + + mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override; + + private: + Server::state *_state; + Lwip::pbuf *_pbuf; +}; + +class alignas(64) AcceptTask final : public mx::tasking::TaskInterface +{ + public: + AcceptTask(Lwip::tcp_pcb *newpcb) : _pcb(newpcb) {} + + mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override; + + private: + Lwip::tcp_pcb *_pcb; +}; +} // namespace mx::io::network \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/request_scheduler.h b/repos/mml/src/app/blinktree_server/request_scheduler.h new file mode 100644 index 0000000000..677a29f4d0 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/request_scheduler.h @@ -0,0 +1,252 @@ +#pragma once + +#include "config.h" +#include "listener.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace application::blinktree_benchmark { + +class RequestIndex +{ +public: + static RequestIndex make_finished() { return RequestIndex{std::numeric_limits::max(), 0UL}; } + static RequestIndex make_no_new() { return RequestIndex{0UL, 0UL}; } + + RequestIndex(const std::uint64_t index, const std::uint64_t count) noexcept : _index(index), _count(count) {} + explicit RequestIndex(std::pair &&index_and_count) noexcept + : _index(std::get<0>(index_and_count)), _count(std::get<1>(index_and_count)) + { + } + RequestIndex(RequestIndex &&) noexcept = default; + RequestIndex(const RequestIndex &) = default; + ~RequestIndex() noexcept = default; + + RequestIndex &operator=(RequestIndex &&) noexcept = default; + + [[nodiscard]] std::uint64_t index() const noexcept { return _index; } + [[nodiscard]] std::uint64_t count() const noexcept { return _count; } + + [[nodiscard]] bool is_finished() const noexcept { return _index == std::numeric_limits::max(); } + [[nodiscard]] bool has_new() const noexcept { return _count > 0UL; } + + RequestIndex &operator-=(const std::uint64_t count) noexcept + { + _count -= count; + _index += count; + return *this; + } + +private: + std::uint64_t _index; + std::uint64_t _count; +}; + +/** + * The RequestContainer manages the workload and allocates new batches of requests + * that will be scheduled by the request scheduler. + */ +class RequestContainer +{ +public: + RequestContainer(const std::uint16_t core_id, const std::uint64_t max_open_requests, + benchmark::Workload &workload) noexcept + : _finished_requests(core_id), _local_buffer(workload.next(config::batch_size())), + _max_pending_requests(max_open_requests), _workload(workload) + { + } + + ~RequestContainer() noexcept = default; + + /** + * Allocates the next requests to spawn. + * + * @return Pair of workload-index and number of tuples to request. + * When the number is negative, no more requests are available. + */ + RequestIndex next() noexcept + { + const auto finished_requests = _finished_requests.load(); + + const auto pending_requests = _scheduled_requests - finished_requests; + if (pending_requests >= _max_pending_requests) + { + // Too many open requests somewhere in the system. + return RequestIndex::make_no_new(); + } + + if (_local_buffer.has_new() == false) + { + _local_buffer = RequestIndex{_workload.next(config::batch_size())}; + } + + if (_local_buffer.has_new()) + { + // How many requests can be scheduled without reaching the request limit? + const auto free_requests = _max_pending_requests - pending_requests; + + // Try to spawn all free requests, but at least those in the local buffer. + const auto count = std::min(free_requests, _local_buffer.count()); + + _scheduled_requests += count; + + const auto index = RequestIndex{_local_buffer.index(), count}; + _local_buffer -= count; + + return index; + } + + // Do we have to wait for pending requests or are we finished? + return pending_requests > 0UL ? RequestIndex::make_no_new() : RequestIndex::make_finished(); + } + + /** + * Callback after inserted a value. + */ + void inserted(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept + { + task_finished(core_id); + } + + /** + * Callback after updated a value. + */ + void updated(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept + { + task_finished(core_id); + } + + /** + * Callback after removed a value. + */ + void removed(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); } + + /** + * Callback after found a value. + */ + void found(const std::uint16_t core_id, const std::uint64_t /*key*/, const std::int64_t /*value*/) noexcept + { + task_finished(core_id); + } + + /** + * Callback on missing a value. + */ + void missing(const std::uint16_t core_id, const std::uint64_t /*key*/) noexcept { task_finished(core_id); } + + const benchmark::NumericTuple &operator[](const std::size_t index) const noexcept { return _workload[index]; } + +private: + // Number of requests finished by tasks. + mx::util::reference_counter_64 _finished_requests; + + // Number of tasks scheduled by the owning request scheduler. + std::uint64_t _scheduled_requests = 0UL; + + // Local buffer holding not scheduled, but from global worker owned request items. + RequestIndex _local_buffer; + + // Number of requests that can be distributed by this scheduler, + // due to system-wide maximal parallel requests. + const std::uint64_t _max_pending_requests; + + // Workload to get requests from. + benchmark::Workload &_workload; + + /** + * Updates the counter of finished requests. + */ + void task_finished(const std::uint16_t core_id) { _finished_requests.add(core_id); } +}; + +/** + * The RequestScheduler own its own request container and sets up requests for the BLink-Tree. + */ +class RequestSchedulerTask final : public mx::tasking::TaskInterface +{ +public: + RequestSchedulerTask(const std::uint16_t core_id, const std::uint16_t channel_id, benchmark::Workload &workload, + const mx::util::core_set &core_set, + db::index::blinktree::BLinkTree *tree, Listener *listener) + : _tree(tree), _listener(listener) + { + this->annotate(mx::tasking::priority::low); + this->is_readonly(false); + + const auto container = mx::tasking::runtime::new_resource( + sizeof(RequestContainer), mx::resource::hint{channel_id}, core_id, + config::max_parallel_requests() / core_set.size(), workload); + this->annotate(container, sizeof(RequestContainer)); + } + + ~RequestSchedulerTask() final = default; + + mx::tasking::TaskResult execute(const std::uint16_t core_id, const std::uint16_t channel_id) override + { + // Get some new requests from the container. + auto &request_container = *mx::resource::ptr_cast(this->annotated_resource()); + const auto next_requests = request_container.next(); + + if (next_requests.has_new()) + { + for (auto i = next_requests.index(); i < next_requests.index() + next_requests.count(); ++i) + { + mx::tasking::TaskInterface *task{nullptr}; + const auto &tuple = request_container[i]; + if (tuple == benchmark::NumericTuple::INSERT) + { + task = mx::tasking::runtime::new_task< + db::index::blinktree::InsertValueTask>( + core_id, tuple.key(), tuple.value(), _tree, request_container); + task->is_readonly(_tree->height() > 1U); + } + else if (tuple == benchmark::NumericTuple::LOOKUP) + { + task = mx::tasking::runtime::new_task< + db::index::blinktree::LookupTask>( + core_id, tuple.key(), request_container); + + task->is_readonly(true); + } + else if (tuple == benchmark::NumericTuple::UPDATE) + { + task = mx::tasking::runtime::new_task< + db::index::blinktree::UpdateTask>( + core_id, tuple.key(), tuple.value(), request_container); + task->is_readonly(_tree->height() > 1U); + } + + task->annotate(_tree->root(), db::index::blinktree::config::node_size() / 4U); + mx::tasking::runtime::spawn(*task, channel_id); + } + } + else if (next_requests.is_finished()) + { + // All requests are done. Notify the benchmark and die. + _listener->requests_finished(); + mx::tasking::runtime::delete_resource(this->annotated_resource()); + return mx::tasking::TaskResult::make_remove(); + } + + return mx::tasking::TaskResult::make_succeed(this); + } + +private: + // The tree to send requests to. + db::index::blinktree::BLinkTree *_tree; + + // Benchmark listener to notify on requests are done. + Listener *_listener; +}; +} // namespace application::blinktree_benchmark \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/server.cpp b/repos/mml/src/app/blinktree_server/server.cpp new file mode 100644 index 0000000000..fcd9737ad2 --- /dev/null +++ b/repos/mml/src/app/blinktree_server/server.cpp @@ -0,0 +1,47 @@ +#include "server.h" +#include "network/server.h" +#include +#include +#include +#include + +using namespace application::blinktree_server; + +Server::Server(const std::uint64_t port, mx::util::core_set &&cores, const std::uint16_t prefetch_distance, const mx::synchronization::isolation_level node_isolation_level, const mx::synchronization::protocol preferred_synchronization_method) + : _port(port), _cores(std::move(cores)), _prefetch_distance(prefetch_distance), _node_isolation_level(node_isolation_level), _preferred_synchronization_method(preferred_synchronization_method) +{ +} + +void Server::run() +{ + network::Server* server; + + mx::tasking::runtime::init(this->_cores, this->_prefetch_distance, /* use mx tasking's task allocator*/ false); + + this->_tree = std::make_unique>( + this->_node_isolation_level, this->_preferred_synchronization_method); + + Libc::Env &env = mx::system::Environment::env(); + + static mx::memory::dynamic::Allocator *alloc = new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(mx::memory::dynamic::Allocator))) mx::memory::dynamic::Allocator(); + + static Timer::Connection timer{env}; + + static Genode::Heap _alloc{env.ram(), env.rm()}; + + Mxip::mxip_init(*alloc, timer); + server = new network::Server{env, this->_port, mx::tasking::runtime::channels(), timer, _alloc}; + + std::cout << "Waiting for requests on port :" << this->_port << std::endl; + auto network_thread = std::thread{[server, tree = this->_tree.get()]() { + server->listen(tree); + }}; + mx::tasking::runtime::start_and_wait(); + + + + network_thread.join(); + + + //delete server; +} \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/server.h b/repos/mml/src/app/blinktree_server/server.h new file mode 100644 index 0000000000..af0d868cbf --- /dev/null +++ b/repos/mml/src/app/blinktree_server/server.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +namespace application::blinktree_server { +class Server +{ +public: + Server(std::uint64_t port, mx::util::core_set&& cores, std::uint16_t prefetch_distance, mx::synchronization::isolation_level node_isolation_level, mx::synchronization::protocol preferred_synchronization_method); + + void run(); +private: + const std::uint64_t _port; + + const std::uint16_t _prefetch_distance; + + /// Cores. + mx::util::core_set _cores; + + // The synchronization mechanism to use for tree nodes. + const mx::synchronization::isolation_level _node_isolation_level; + + // Preferred synchronization method. + const mx::synchronization::protocol _preferred_synchronization_method; + + /// Tree. + std::unique_ptr> _tree; +}; +} \ No newline at end of file diff --git a/repos/mml/src/app/blinktree_server/target.mk b/repos/mml/src/app/blinktree_server/target.mk new file mode 100644 index 0000000000..2147dfb97b --- /dev/null +++ b/repos/mml/src/app/blinktree_server/target.mk @@ -0,0 +1,28 @@ +MXINC_DIR=$(REP_DIR)/src/app/blinktree_server +MXINC_DIR+=-I$(REP_DIR)/src/app/blinktree +GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/21.05 +MXBENCH_DIR=$(REP_DIR)/src/app/blinktree + +TARGET = blinktree_daemon +# soure file for benchmark framework +SRC_MXBENCH = ${MXBENCH_DIR}/benchmark/workload_set.cpp +SRC_MXBENCH += ${MXBENCH_DIR}/benchmark/workload.cpp +SRC_MXBENCH += ${MXBENCH_DIR}/benchmark/cores.cpp +SRC_MXBENCH += ${MXBENCH_DIR}/benchmark/string_util.cpp +SRC_MXBENCH += ${MXBENCH_DIR}/benchmark/perf.cpp +# source files for blinktree benchmark +SRC_BTREE += main.cpp +SRC_BTREE += server.cpp +SRC_BTREE += network/server.cpp + +SRC_CC = ${SRC_MXBENCH} ${SRC_BTREE} +LIBS += base libc stdcxx mxtasking mxip +EXT_OBJECTS += /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a /usr/local/genode/tool/lib/libatomic.a +CUSTOM_CC = /usr/local/genode/tool/bin/clang +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CC_OPT := --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -Wno-error -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 #-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 +CC_OPT += -I$(MXBENCH_DIR) +CC_OLEVEL = -O3 +CC_CXX_WARN_STRICT = +CUSTOM_CXX_LIB := $(CROSS_DEV_PREFIX)g++ +#CXX_LD += $(CROSS_DEV_PREFIX)g++ diff --git a/repos/mml/src/app/grant_bench/main.cc b/repos/mml/src/app/grant_bench/main.cc new file mode 100644 index 0000000000..0b67b6ff82 --- /dev/null +++ b/repos/mml/src/app/grant_bench/main.cc @@ -0,0 +1,351 @@ +/* + * \brief Test for yielding resources + * \author Norman Feske + * \date 2013-10-05 + * + * This test exercises the protocol between a parent and child, which is used + * by the parent to regain resources from a child subsystem. + * + * The program acts in either one of two roles, the parent or the child. The + * role is determined by reading a config argument. + * + * The child periodically allocates chunks of RAM until its RAM quota is + * depleted. Once it observes a yield request from the parent, however, it + * cooperatively releases as much resources as requested by the parent. + * + * The parent wait a while to give the child the chance to allocate RAM. It + * then sends a yield request and waits for a response. When getting the + * response, it validates whether the child complied to the request or not. + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Test { + class Child; + class Parent; + using namespace Genode; +} + + +/**************** + ** Child role ** + ****************/ + +/** + * The child eats more and more RAM. However, when receiving a yield request, + * it releases the requested amount of resources. + */ +class Test::Child +{ + private: + + struct Ram_chunk : List::Element + { + Env &env; + + size_t const size; + + Ram_dataspace_capability ds_cap; + + Ram_chunk(Env &env, size_t size) + : + env(env),size(size), ds_cap(env.ram().alloc(size)) + { } + + ~Ram_chunk() { env.ram().free(ds_cap); } + }; + + Env &_env; + Heap _heap { _env.ram(), _env.rm() }; + bool const _expand; + List _ram_chunks { }; + Timer::Connection _timer { _env }; + Signal_handler _grant_handler; + Genode::uint64_t const _period_ms; + + void _handle_grant(); + + + + public: + + Child(Env &, Xml_node); + void main(); +}; + + +void Test::Child::_handle_grant() +{ + /* request yield request arguments */ + unsigned long start = Genode::Trace::timestamp(); + [[maybe_unused]] Genode::Parent::Resource_args const args = _env.parent().gained_resources(); + unsigned long end = Genode::Trace::timestamp(); + // Genode::Parent::Resource_args const args = _env.parent().yield_request(); + + _env.parent().yield_response(); + log("{\"grant-handle-et\": ", (end - start)/2000, "}"); + // size_t const gained_ram_quota = Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0); + + // log("Gained RAM quota: ", gained_ram_quota); + /* + log("yield request: ", args.string()); + + size_t const requested_ram_quota = + Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0); + + log("got request to free ", requested_ram_quota, " MB of RAM"); + + size_t const requested_cpu_quota = + Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0); + + log("released ", requested_cpu_quota, " portions of cpu_quota"); + + size_t const requested_gpu_quota = + Arg_string::find_arg(args.string(), "gpus").ulong_value(0); + + log("got request to release ", requested_gpu_quota, " gpus");*/ +} + + +Test::Child::Child(Env &env, Xml_node config) +: + _env(env), + _expand(config.attribute_value("expand", false)), + _grant_handler(_env.ep(), *this, &Child::_handle_grant), + _period_ms(config.attribute_value("period_ms", (Genode::uint64_t)500)) +{ + /* register yield signal handler */ + _env.parent().resource_avail_sigh(_grant_handler); +} + + +/***************** + ** Parent role ** + *****************/ + +/** + * The parent grants resource requests as long as it has free resources. + * Once in a while, it politely requests the child to yield resources. + */ +class Test::Parent +{ + private: + + Env &_env; + + Timer::Connection _timer { _env }; + + void _print_status() + { + log("quota: ", _child.pd().ram_quota().value / 1024, " KiB " + "used: ", _child.pd().used_ram().value / 1024, " KiB"); + } + + size_t _used_ram_prior_yield = 0; + + /* perform the test three times */ + unsigned _cnt = 5000; + + unsigned long _start = 0; + + unsigned long _end = 0; + unsigned long _sent = 0; + + enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE }; + State _state = WAIT; + + void _schedule_one_second_timeout() + { + //log("wait ", _wait_cnt, "/", _wait_secs); + _timer.trigger_once(10000); + } + + void _init() + { + _state = WAIT; + _schedule_one_second_timeout(); + } + + void _bestow_resources() + { + /* remember quantum of resources used by the child */ + //_used_ram_prior_yield = _child.pd().used_ram().value; + + //log("request yield (ram prior yield: ", _used_ram_prior_yield); + + /* issue yield request */ + Genode::Parent::Resource_args award("ram_quota=5M,cpu_quota=10,gpus=1"); + + _start = Genode::Trace::timestamp(); + _child.accept(award); + _sent = Genode::Trace::timestamp(); + + _state = YIELD_REQUESTED; + } + + void _handle_timeout() + { + //_print_status(); + _bestow_resources(); + _schedule_one_second_timeout(); + } + + void _yield_response() + { + _end = Genode::Trace::timestamp(); + log("{\"bestow-rtt\": ", (_end-_start)/2000, ", \"bestow-transmit\": ", (_sent-_start)/2000, ",\"bestow-acked\":", (_end-_sent)/2000,"}"); + + _state = YIELD_GOT_RESPONSE; + + //_print_status(); + + if (_cnt-- > 0) { + _init(); + } else { + log("--- test-resource_yield finished ---"); + _env.parent().exit(0); + } + } + + Signal_handler _timeout_handler { + _env.ep(), *this, &Parent::_handle_timeout }; + + struct Policy : public Genode::Child_policy + { + Env &_env; + + Parent &_parent; + + Static_parent_services + _parent_services { _env }; + + Cap_quota const _cap_quota { 50 }; + Ram_quota const _ram_quota { 10*1024*1024 }; + Binary_name const _binary_name { "benchmark_resource_award" }; + + /* + * Config ROM service + */ + + struct Config_producer : Dynamic_rom_session::Content_producer + { + void produce_content(char *dst, Genode::size_t dst_len) override + { + Xml_generator xml(dst, dst_len, "config", [&] () { + xml.attribute("child", "yes"); }); + } + } _config_producer { }; + + Dynamic_rom_session _config_session { _env.ep().rpc_ep(), + ref_pd(), _env.rm(), + _config_producer }; + + typedef Genode::Local_service Config_service; + + Config_service::Single_session_factory _config_factory { _config_session }; + Config_service _config_service { _config_factory }; + + void yield_response() override + { + _parent._yield_response(); + } + + Policy(Parent &parent, Env &env) : _env(env), _parent(parent) { } + + Name name() const override { return "child"; } + + Binary_name binary_name() const override { return _binary_name; } + + Pd_session &ref_pd() override { return _env.pd(); } + Pd_session_capability ref_pd_cap() const override { return _env.pd_session_cap(); } + + void init(Pd_session &pd, Pd_session_capability pd_cap) override + { + pd.ref_account(ref_pd_cap()); + ref_pd().transfer_quota(pd_cap, _cap_quota); + ref_pd().transfer_quota(pd_cap, _ram_quota); + } + + Route resolve_session_request(Service::Name const &service_name, + Session_label const &label, + Session::Diag const diag) override + { + auto route = [&] (Service &service) { + return Route { .service = service, + .label = label, + .diag = diag }; }; + + if (service_name == "ROM" && label == "child -> config") + return route(_config_service); + + Service *service_ptr = nullptr; + _parent_services.for_each([&] (Service &s) { + if (!service_ptr && service_name == s.name()) + service_ptr = &s; }); + + if (!service_ptr) + throw Service_denied(); + + return route(*service_ptr); + } + }; + + Policy _policy { *this, _env }; + + Genode::Child _child { _env.rm(), _env.ep().rpc_ep(), _policy }; + + public: + + class Insufficient_yield { }; + + /** + * Constructor + */ + Parent(Env &env) : _env(env) + { + _timer.sigh(_timeout_handler); + _init(); + } +}; + + +/*************** + ** Component ** + ***************/ + +void Component::construct(Genode::Env &env) +{ + using namespace Genode; + + /* + * Read value '' attribute to decide whether to perform + * the child or the parent role. + */ + static Attached_rom_dataspace config(env, "config"); + bool const is_child = config.xml().attribute_value("child", false); + + if (is_child) { + log("--- test-resource_yield child role started ---"); + static Test::Child child(env, config.xml()); + } else { + log("--- test-resource_yield parent role started ---"); + static Test::Parent parent(env); + } +} diff --git a/repos/mml/src/app/grant_bench/target.mk b/repos/mml/src/app/grant_bench/target.mk new file mode 100644 index 0000000000..5c5b4a3e48 --- /dev/null +++ b/repos/mml/src/app/grant_bench/target.mk @@ -0,0 +1,3 @@ +TARGET = benchmark_resource_award +SRC_CC = main.cc +LIBS = base diff --git a/repos/mml/src/app/hello_mxtask/main.cc b/repos/mml/src/app/hello_mxtask/main.cc index 3231f19215..010d995ad3 100644 --- a/repos/mml/src/app/hello_mxtask/main.cc +++ b/repos/mml/src/app/hello_mxtask/main.cc @@ -40,7 +40,7 @@ public: { //std::cout << "Hello World" << std::endl; - Genode::log("Hello world"); + Genode::log("Hello world from channel ", channel_id); // Stop MxTasking runtime after this task. return mx::tasking::TaskResult::make_stop(); } @@ -60,7 +60,7 @@ void Libc::Component::construct(Libc::Env &env) //mx::system::Environment::env = &env; Genode::log("Initialized system environment for MxTasking"); Genode::log("Running on core ", mx::system::topology::core_id()); - const auto cores = mx::util::core_set::build(1); + const auto cores = mx::util::core_set::build(64); std::vector tasks; diff --git a/repos/mml/src/app/hpc_test/main.cc b/repos/mml/src/app/hpc_test/main.cc new file mode 100644 index 0000000000..c23f10d69e --- /dev/null +++ b/repos/mml/src/app/hpc_test/main.cc @@ -0,0 +1,89 @@ +/** + * @file main.cc + * @author Michael Müller (michael.mueller@uos.de) + * @brief Some test for programing hardware performance counters in NOVA + * @version 0.1 + * @date 2022-12-14 + * + * @copyright Copyright (c) 2022 + * + */ + +#include +#include + +#include +#include +#include +#include + +int main(void) +{ + Nova::mword_t event = 0x26; + Nova::mword_t mask = 0x00; + Nova::mword_t flags = 0x70000; + Nova::uint8_t rc; + + if ((rc = Nova::hpc_ctrl(Nova::HPC_SETUP, 0, 1, event, mask, flags)) != Nova::NOVA_OK) { + std::cerr << "Failed to setup performance counter 0" << std::endl; + return -1; + } + + std::cout << "Counter 0 setup" << std::endl; + event = 0x60; + mask = 0xfe; + if ((rc = Nova::hpc_ctrl(Nova::HPC_SETUP, 1, 1, event, mask, flags)) != Nova::NOVA_OK) + { + std::cerr << "Failed to setup performance counter 1, rc = " << static_cast(rc) << std::endl; + return -1; + } + + event = 0x62; + mask = 0x1; + if ((rc = Nova::hpc_ctrl(Nova::HPC_SETUP, 2, 1, event, mask, flags)) != Nova::NOVA_OK) + { + std::cerr << "Failed to setup performance counter 2, rc = " << static_cast(rc) << std::endl; + return -1; + } + if ((rc = Nova::hpc_start(0, 1)) != Nova::NOVA_OK) { + std::cerr << "Failed to start counter 0" << std::endl; + return -2; + } + + if ((rc = Nova::hpc_start(1, 1)) != Nova::NOVA_OK) { + std::cerr << "Failed to start counter 0" << std::endl; + return -2; + } + + if ((rc = Nova::hpc_start(2, 1)) != Nova::NOVA_OK) { + std::cerr << "Failed to start counter 0" << std::endl; + return -2; + } + + for (;;) { + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + Nova::mword_t count = 0; + + _mm_clflush(&count); + if ((rc = Nova::hpc_read(0, 1, count)) != Nova::NOVA_OK) + { + std::cerr << "Failed to read counter 0" << std::endl; + } + std::cout << count << " cache line flushes" << std::endl; + + Nova::mword_t latency = 0; + if ((rc = Nova::hpc_read(2, 1, latency)) != Nova::NOVA_OK) + { + std::cerr << "Failed to read counter 1" << std::endl; + } + Nova::mword_t l2_requests = 0; + if ((rc = Nova::hpc_read(1, 1, l2_requests)) != Nova::NOVA_OK) + { + std::cerr << "Failed to read counter 1" << std::endl; + } + count = (latency * 4) / l2_requests; + std::cout << "L2 latency:" << count << " cycles" << std::endl; + } + + return 0; +} diff --git a/repos/mml/src/app/hpc_test/target.mk b/repos/mml/src/app/hpc_test/target.mk new file mode 100644 index 0000000000..0d72ae45a4 --- /dev/null +++ b/repos/mml/src/app/hpc_test/target.mk @@ -0,0 +1,5 @@ +TARGET = hpc_test +SRC_CC = trace_pfc.cc +LIBS += base posix libm libc stdcxx +CC_OPT += -Wno-error -Wno-permissive -fpermissive -Wno-error=conversion + diff --git a/repos/mml/src/app/hpc_test/trace_pfc.cc b/repos/mml/src/app/hpc_test/trace_pfc.cc new file mode 100644 index 0000000000..15fa27beb0 --- /dev/null +++ b/repos/mml/src/app/hpc_test/trace_pfc.cc @@ -0,0 +1,105 @@ +/** + * @file trace_pfc.cc + * @author Michael Müller (michael.mueller@uos.de) + * @brief Tests for Genode wrappers around Performance counter syscalls in NOVA + * @version 0.1 + * @date 2022-12-15 + * + * @copyright Copyright (c) 2022 + * + */ + +#include + +#include +#include +#include +#include + +using namespace Genode; + +int main(void) +{ + Trace::Performance_counter::Counter ctr_clflush, ctr_l2_latency, ctr_l2_requests, /*ctr_l3_miss,*/ ctr_l2_prefetch; + + try { + ctr_clflush = Trace::Performance_counter::alloc_core(); + ctr_l2_latency = Trace::Performance_counter::alloc_core(); + ctr_l2_requests = Trace::Performance_counter::alloc_core(); + ctr_l2_prefetch = Trace::Performance_counter::acquire(Trace::Performance_counter::Type::CORE); + // ctr_l3_miss = Trace::Performance_counter::alloc_cbo(); + } + catch (Trace::Pfc_no_avail) + { + std::cout << "Unable to allocate performance counters." << std::endl; + return -1; + } + + std::cout << "Performance counter allocation successful." << std::endl; + + try { + Trace::Performance_counter::setup(ctr_clflush, 0x26, 0x00, 0x70000); + Trace::Performance_counter::setup(ctr_l2_latency, 0x62, 0x01, 0x30000); + Trace::Performance_counter::setup(ctr_l2_requests, 0x60, 0xfe, 0x30000); + Trace::Performance_counter::setup(ctr_l2_prefetch, 0xc0, 0x00, 0x30000); + //Trace::Performance_counter::setup(ctr_l3_miss, 0x6, 0xff, 0x550f000000000000); + } catch (Trace::Pfc_access_error &e) { + std::cerr << "PFC access failed. rc=" << e.error_code() << std::endl; + return -1; + } + + std::cout << "Performance counters successfully set up." << std::endl; + + try { + Trace::Performance_counter::start(ctr_clflush); + Trace::Performance_counter::start(ctr_l2_latency); + Trace::Performance_counter::start(ctr_l2_requests); + Trace::Performance_counter::start(ctr_l2_prefetch); + //Trace::Performance_counter::start(ctr_l3_miss); + } catch (Trace::Pfc_access_error &e) { + std::cerr << "PFC access failed. rc=" << e.error_code() << std::endl; + return -1; + } + + std::cout << "Performance counters started." << std::endl; + + for (;;) { + Genode::uint64_t clflushes, latency, requests, /*l3_misses,*/ l2_prefetches; + clflushes = latency = requests = l2_prefetches = 0; + + std::this_thread::sleep_for(std::chrono::seconds(2)); + _mm_clflush(&clflushes); + _mm_clflush(&clflushes); + + try { + clflushes = Trace::Performance_counter::read(ctr_clflush); + latency = Trace::Performance_counter::read(ctr_l2_latency); + requests = Trace::Performance_counter::read(ctr_l2_requests); + l2_prefetches = Trace::Performance_counter::read(ctr_l2_prefetch); + //l3_misses = Trace::Performance_counter::read(ctr_l3_miss); + } catch (Trace::Pfc_access_error &e) { + std::cerr << "PFC access failed. rc=" << e.error_code() << std::endl; + return 1; + } + + std::cout << clflushes << " cache line flushes." << std::endl; + //std::cout << "L2 latency: " << (latency * 4) / requests << " cycles." << std::endl; + std::cout << l2_prefetches << " L2 prefetch requests." << std::endl; + /* + try { + Trace::Performance_counter::stop(ctr_l2_prefetch); + Trace::Performance_counter::reset(ctr_l2_prefetch, 0xdeadbeef); + Trace::Performance_counter::start(ctr_l2_prefetch); + std::cout << Trace::Performance_counter::read(ctr_l2_prefetch) << " L2 prefetches after context-switch" << std::endl; + Trace::Performance_counter::stop(ctr_l2_prefetch); + Trace::Performance_counter::reset(ctr_l2_prefetch, l2_prefetches); + Trace::Performance_counter::start(ctr_l2_prefetch); + } catch (Trace::Pfc_access_error &e) { + std::cerr << "PFC access failed. rc=" << e.error_code() << std::endl; + } +*/ + // std::cout << l3_misses << " L3 misses" << std::endl; + } + + return 0; +} \ No newline at end of file diff --git a/repos/mml/src/app/libpfm_test/check_events.c b/repos/mml/src/app/libpfm_test/check_events.c new file mode 100644 index 0000000000..9edaebd7e1 --- /dev/null +++ b/repos/mml/src/app/libpfm_test/check_events.c @@ -0,0 +1,174 @@ +/* + * check_events.c - show event encoding + * + * Copyright (c) 2009 Google, Inc + * Contributed by Stephane Eranian + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * This file is part of libpfm, a performance monitoring support library for + * applications on Linux. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +int pmu_is_present(pfm_pmu_t p) +{ + pfm_pmu_info_t pinfo; + int ret; + + memset(&pinfo, 0, sizeof(pinfo)); + ret = pfm_get_pmu_info(p, &pinfo); + return ret == PFM_SUCCESS ? pinfo.is_present : 0; +} + +int main(int argc, const char **argv) +{ + pfm_pmu_info_t pinfo; + pfm_pmu_encode_arg_t e; + const char *arg[3]; + const char **p; + char *fqstr; + pfm_event_info_t info; + int j, ret; + pfm_pmu_t i; + int total_supported_events = 0; + int total_available_events = 0; + + unsigned long low, high, msr; + msr = 0xc0010200; + + asm volatile("rdmsr" + : "=a"(low), "=d"(high) + : "c"(msr)); /* + * Initialize pfm library (required before we can use it) + */ + ret = pfm_initialize(); + if (ret != PFM_SUCCESS) + errx(1, "cannot initialize library: %s\n", pfm_strerror(ret)); + + memset(&pinfo, 0, sizeof(pinfo)); + memset(&info, 0, sizeof(info)); + + printf("Supported PMU models:\n"); + for (i = PFM_PMU_NONE; i < PFM_PMU_MAX; i++) + { + ret = pfm_get_pmu_info(i, &pinfo); + if (ret != PFM_SUCCESS) + continue; + + printf("\t[%d, %s, \"%s\"]\n", i, pinfo.name, pinfo.desc); + } + + printf("Detected PMU models:\n"); + for (i = PFM_PMU_NONE; i < PFM_PMU_MAX; i++) + { + ret = pfm_get_pmu_info(i, &pinfo); + if (ret != PFM_SUCCESS) + continue; + if (pinfo.is_present) + { + printf("\t[%d, %s, \"%s\"]\n", i, pinfo.name, pinfo.desc); + total_supported_events += pinfo.nevents; + } + total_available_events += pinfo.nevents; + } + + printf("Total events: %d available, %d supported\n", total_available_events, total_supported_events); + + /* + * be nice to user! + */ + if (argc < 2 && pmu_is_present(PFM_PMU_PERF_EVENT)) + { + arg[0] = "PERF_COUNT_HW_CPU_CYCLES"; + arg[1] = "PERF_COUNT_HW_INSTRUCTIONS"; + arg[2] = NULL; + p = arg; + } + else + { + p = argv + 1; + } + + if (!*p) + errx(1, "you must pass at least one event"); + + memset(&e, 0, sizeof(e)); + while (*p) + { + /* + * extract raw event encoding + * + * For perf_event encoding, use + * #include + * and the function: + * pfm_get_perf_event_encoding() + */ + fqstr = NULL; + e.fstr = &fqstr; + ret = pfm_get_os_event_encoding(*p, PFM_PLM0 | PFM_PLM3, PFM_OS_NONE, &e); + if (ret != PFM_SUCCESS) + { + /* + * codes is too small for this event + * free and let the library resize + */ + if (ret == PFM_ERR_TOOSMALL) + { + free(e.codes); + e.codes = NULL; + e.count = 0; + free(fqstr); + continue; + } + if (ret == PFM_ERR_NOTFOUND && strstr(*p, "::")) + errx(1, "%s: try setting LIBPFM_ENCODE_INACTIVE=1", pfm_strerror(ret)); + errx(1, "cannot encode event %s: %s", *p, pfm_strerror(ret)); + } + ret = pfm_get_event_info(e.idx, PFM_OS_NONE, &info); + if (ret != PFM_SUCCESS) + errx(1, "cannot get event info: %s", pfm_strerror(ret)); + + ret = pfm_get_pmu_info(info.pmu, &pinfo); + if (ret != PFM_SUCCESS) + errx(1, "cannot get PMU info: %s", pfm_strerror(ret)); + + printf("Requested Event: %s\n", *p); + printf("Actual Event: %s\n", fqstr); + printf("PMU : %s\n", pinfo.desc); + printf("IDX : %d\n", e.idx); + printf("Codes :"); + for (j = 0; j < e.count; j++) + printf(" 0x%" PRIx64, e.codes[j]); + putchar('\n'); + + free(fqstr); + p++; + } + if (e.codes) + free(e.codes); + return 0; +} \ No newline at end of file diff --git a/repos/mml/src/app/libpfm_test/showevtinfo.c b/repos/mml/src/app/libpfm_test/showevtinfo.c new file mode 100644 index 0000000000..3c775c8da0 --- /dev/null +++ b/repos/mml/src/app/libpfm_test/showevtinfo.c @@ -0,0 +1,1020 @@ +/* + * showevtinfo.c - show event information + * + * Copyright (c) 2010 Google, Inc + * Contributed by Stephane Eranian + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * This file is part of libpfm, a performance monitoring support library for + * applications on Linux. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MAXBUF 1024 +#define COMBO_MAX 18 + +static struct +{ + int compact; + int sort; + uint8_t encode; + uint8_t combo; + uint8_t combo_lim; + uint8_t name_only; + uint8_t desc; + char *csv_sep; + pfm_event_info_t efilter; + pfm_event_attr_info_t ufilter; + pfm_os_t os; + uint64_t mask; +} options; + +typedef struct +{ + uint64_t code; + int idx; +} code_info_t; + +static void show_event_info_compact(pfm_event_info_t *info); + +static const char *srcs[PFM_ATTR_CTRL_MAX] = { + [PFM_ATTR_CTRL_UNKNOWN] = "???", + [PFM_ATTR_CTRL_PMU] = "PMU", + [PFM_ATTR_CTRL_PERF_EVENT] = "perf_event", +}; + +#ifdef PFMLIB_WINDOWS +int set_env_var(const char *var, const char *value, int ov) +{ + size_t len; + char *str; + int ret; + + len = strlen(var) + 1 + strlen(value) + 1; + + str = malloc(len); + if (!str) + return PFM_ERR_NOMEM; + + sprintf(str, "%s=%s", var, value); + + ret = putenv(str); + + free(str); + + return ret ? PFM_ERR_INVAL : PFM_SUCCESS; +} +#else +static inline int +set_env_var(const char *var, const char *value, int ov) +{ + return setenv(var, value, ov); +} +#endif + +static int +event_has_pname(char *s) +{ + char *p; + return (p = strchr(s, ':')) && *(p + 1) == ':'; +} + +static int +print_codes(char *buf, int plm, int max_encoding) +{ + uint64_t *codes = NULL; + int j, ret, count = 0; + + ret = pfm_get_event_encoding(buf, PFM_PLM0 | PFM_PLM3, NULL, NULL, &codes, &count); + if (ret != PFM_SUCCESS) + { + if (ret == PFM_ERR_NOTFOUND) + errx(1, "encoding failed, try setting env variable LIBPFM_ENCODE_INACTIVE=1"); + return -1; + } + for (j = 0; j < max_encoding; j++) + { + if (j < count) + printf("0x%" PRIx64, codes[j]); + printf("%s", options.csv_sep); + } + free(codes); + return 0; +} + +static int +check_valid(char *buf, int plm) +{ + uint64_t *codes = NULL; + int ret, count = 0; + + ret = pfm_get_event_encoding(buf, PFM_PLM0 | PFM_PLM3, NULL, NULL, &codes, &count); + if (ret != PFM_SUCCESS) + return -1; + free(codes); + return 0; +} + +static int +match_ufilters(pfm_event_attr_info_t *info) +{ + uint32_t ufilter1 = 0; + uint32_t ufilter2 = 0; + + if (options.ufilter.is_dfl) + ufilter1 |= 0x1; + + if (info->is_dfl) + ufilter2 |= 0x1; + + if (options.ufilter.is_precise) + ufilter1 |= 0x2; + + if (info->is_precise) + ufilter2 |= 0x2; + + if (!ufilter1) + return 1; + + /* at least one filter matches */ + return ufilter1 & ufilter2; +} + +static int +match_efilters(pfm_event_info_t *info) +{ + pfm_event_attr_info_t ainfo; + int n = 0; + int i, ret; + + if (options.efilter.is_precise && !info->is_precise) + return 0; + + memset(&ainfo, 0, sizeof(ainfo)); + ainfo.size = sizeof(ainfo); + + pfm_for_each_event_attr(i, info) + { + ret = pfm_get_event_attr_info(info->idx, i, options.os, &ainfo); + if (ret != PFM_SUCCESS) + continue; + if (match_ufilters(&ainfo)) + return 1; + if (ainfo.type == PFM_ATTR_UMASK) + n++; + } + return n ? 0 : 1; +} + +static void +show_event_info_combo(pfm_event_info_t *info) +{ + pfm_event_attr_info_t *ainfo; + pfm_pmu_info_t pinfo; + char buf[MAXBUF]; + size_t len; + int numasks = 0; + int i, j, ret; + uint64_t total, m, u; + + memset(&pinfo, 0, sizeof(pinfo)); + + pinfo.size = sizeof(pinfo); + + ret = pfm_get_pmu_info(info->pmu, &pinfo); + if (ret != PFM_SUCCESS) + errx(1, "cannot get PMU info"); + + ainfo = calloc(info->nattrs, sizeof(*ainfo)); + if (!ainfo) + err(1, "event %s : ", info->name); + + /* + * extract attribute information and count number + * of umasks + * + * we cannot just drop non umasks because we need + * to keep attributes in order for the enumeration + * of 2^n + */ + pfm_for_each_event_attr(i, info) + { + ainfo[i].size = sizeof(*ainfo); + + ret = pfm_get_event_attr_info(info->idx, i, options.os, &ainfo[i]); + if (ret != PFM_SUCCESS) + errx(1, "cannot get attribute info: %s", pfm_strerror(ret)); + + if (ainfo[i].type == PFM_ATTR_UMASK) + numasks++; + } + if (numasks > options.combo_lim) + { + warnx("event %s has too many umasks to print all combinations, dropping to simple enumeration", info->name); + free(ainfo); + show_event_info_compact(info); + return; + } + + if (numasks) + { + if (info->nattrs > (int)((sizeof(total) << 3))) + { + warnx("too many umasks, cannot show all combinations for event %s", info->name); + goto end; + } + total = 1ULL << info->nattrs; + + for (u = 1; u < total; u++) + { + len = sizeof(buf); + len -= snprintf(buf, len, "%s::%s", pinfo.name, info->name); + if (len <= 0) + { + warnx("event name too long%s", info->name); + goto end; + } + for (m = u, j = 0; m; m >>= 1, j++) + { + if (m & 0x1ULL) + { + /* we have hit a non umasks attribute, skip */ + if (ainfo[j].type != PFM_ATTR_UMASK) + break; + + if (len < (1 + strlen(ainfo[j].name))) + { + warnx("umasks combination too long for event %s", buf); + break; + } + strncat(buf, ":", len - 1); + buf[len - 1] = '\0'; + len--; + strncat(buf, ainfo[j].name, len - 1); + buf[len - 1] = '\0'; + len -= strlen(ainfo[j].name); + } + } + /* if found a valid umask combination, check encoding */ + if (m == 0) + { + if (options.encode) + ret = print_codes(buf, PFM_PLM0 | PFM_PLM3, pinfo.max_encoding); + else + ret = check_valid(buf, PFM_PLM0 | PFM_PLM3); + if (!ret) + printf("%s\n", buf); + } + } + } + else + { + snprintf(buf, sizeof(buf) - 1, "%s::%s", pinfo.name, info->name); + buf[sizeof(buf) - 1] = '\0'; + + ret = options.encode ? print_codes(buf, PFM_PLM0 | PFM_PLM3, pinfo.max_encoding) : 0; + if (!ret) + printf("%s\n", buf); + } +end: + free(ainfo); +} + +static void +show_event_info_compact(pfm_event_info_t *info) +{ + pfm_event_attr_info_t ainfo; + pfm_pmu_info_t pinfo; + char buf[MAXBUF]; + int i, ret, um = 0; + + memset(&ainfo, 0, sizeof(ainfo)); + memset(&pinfo, 0, sizeof(pinfo)); + + pinfo.size = sizeof(pinfo); + ainfo.size = sizeof(ainfo); + + ret = pfm_get_pmu_info(info->pmu, &pinfo); + if (ret != PFM_SUCCESS) + errx(1, "cannot get pmu info: %s", pfm_strerror(ret)); + + if (options.name_only) + { + if (options.encode) + printf("0x%-10" PRIx64, info->code); + printf("%s\n", info->name); + return; + } + pfm_for_each_event_attr(i, info) + { + ret = pfm_get_event_attr_info(info->idx, i, options.os, &ainfo); + if (ret != PFM_SUCCESS) + errx(1, "cannot get attribute info: %s", pfm_strerror(ret)); + + if (ainfo.type != PFM_ATTR_UMASK) + continue; + + if (!match_ufilters(&ainfo)) + continue; + + snprintf(buf, sizeof(buf) - 1, "%s::%s:%s", pinfo.name, info->name, ainfo.name); + buf[sizeof(buf) - 1] = '\0'; + + ret = 0; + if (options.encode) + { + ret = print_codes(buf, PFM_PLM0 | PFM_PLM3, pinfo.max_encoding); + } + if (!ret) + { + printf("%s", buf); + if (options.desc) + { + printf("%s", options.csv_sep); + printf("\"%s. %s.\"", info->desc, ainfo.desc); + } + putchar('\n'); + } + um++; + } + if (um == 0) + { + if (!match_efilters(info)) + return; + + snprintf(buf, sizeof(buf) - 1, "%s::%s", pinfo.name, info->name); + buf[sizeof(buf) - 1] = '\0'; + if (options.encode) + { + ret = print_codes(buf, PFM_PLM0 | PFM_PLM3, pinfo.max_encoding); + if (ret) + return; + } + printf("%s", buf); + if (options.desc) + { + printf("%s", options.csv_sep); + printf("\"%s.\"", info->desc); + } + putchar('\n'); + } +} + +int compare_codes(const void *a, const void *b) +{ + const code_info_t *aa = a; + const code_info_t *bb = b; + uint64_t m = options.mask; + + if ((aa->code & m) < (bb->code & m)) + return -1; + if ((aa->code & m) == (bb->code & m)) + return 0; + return 1; +} + +static void +print_event_flags(pfm_event_info_t *info) +{ + int n = 0; + int spec = info->is_speculative; + + if (info->is_precise) + { + printf("[precise] "); + n++; + } + + if (info->support_hw_smpl) + { + printf("[hw_smpl] "); + n++; + } + + if (spec > PFM_EVENT_INFO_SPEC_NA) + { + printf("[%s] ", spec == PFM_EVENT_INFO_SPEC_TRUE ? "speculative" : "non-speculative"); + n++; + } + + if (!n) + printf("None"); +} + +static void +print_attr_flags(pfm_event_attr_info_t *info) +{ + int n = 0; + int spec = info->is_speculative; + + if (info->is_dfl) + { + printf("[default] "); + n++; + } + + if (info->is_precise) + { + printf("[precise] "); + n++; + } + + if (info->support_hw_smpl) + { + printf("[hw_smpl] "); + n++; + } + + if (spec > PFM_EVENT_INFO_SPEC_NA) + { + printf("[%s] ", spec == PFM_EVENT_INFO_SPEC_TRUE ? "speculative" : "non-speculative"); + n++; + } + + if (!n) + printf("None "); +} + +static void +show_event_info(pfm_event_info_t *info) +{ + pfm_event_attr_info_t ainfo; + pfm_pmu_info_t pinfo; + int mod = 0, um = 0; + int i, ret; + const char *src; + + if (options.name_only) + { + printf("%s\n", info->name); + return; + } + + memset(&ainfo, 0, sizeof(ainfo)); + memset(&pinfo, 0, sizeof(pinfo)); + + pinfo.size = sizeof(pinfo); + ainfo.size = sizeof(ainfo); + + if (!match_efilters(info)) + return; + ret = pfm_get_pmu_info(info->pmu, &pinfo); + if (ret) + errx(1, "cannot get pmu info: %s", pfm_strerror(ret)); + + printf("#-----------------------------\n" + "IDX : %d\n" + "PMU name : %s (%s)\n" + "Name : %s\n" + "Equiv : %s\n", + info->idx, + pinfo.name, + pinfo.desc, + info->name, + info->equiv ? info->equiv : "None"); + + printf("Flags : "); + print_event_flags(info); + putchar('\n'); + + printf("Desc : %s\n", info->desc ? info->desc : "no description available"); + printf("Code : 0x%" PRIx64 "\n", info->code); + + pfm_for_each_event_attr(i, info) + { + ret = pfm_get_event_attr_info(info->idx, i, options.os, &ainfo); + if (ret != PFM_SUCCESS) + errx(1, "cannot retrieve event %s attribute info: %s", info->name, pfm_strerror(ret)); + + if (ainfo.ctrl >= PFM_ATTR_CTRL_MAX) + { + warnx("event: %s has unsupported attribute source %d", info->name, ainfo.ctrl); + ainfo.ctrl = PFM_ATTR_CTRL_UNKNOWN; + } + src = srcs[ainfo.ctrl]; + switch (ainfo.type) + { + case PFM_ATTR_UMASK: + if (!match_ufilters(&ainfo)) + continue; + + printf("Umask-%02u : 0x%02" PRIx64 " : %s : [%s] : ", + um, + ainfo.code, + src, + ainfo.name); + + print_attr_flags(&ainfo); + + putchar(':'); + + if (ainfo.equiv) + printf(" Alias to %s", ainfo.equiv); + else + printf(" %s", ainfo.desc); + + putchar('\n'); + um++; + break; + case PFM_ATTR_MOD_BOOL: + printf("Modif-%02u : 0x%02" PRIx64 " : %s : [%s] : %s (boolean)\n", mod, ainfo.code, src, ainfo.name, ainfo.desc); + mod++; + break; + case PFM_ATTR_MOD_INTEGER: + printf("Modif-%02u : 0x%02" PRIx64 " : %s : [%s] : %s (integer)\n", mod, ainfo.code, src, ainfo.name, ainfo.desc); + mod++; + break; + default: + printf("Attr-%02u : 0x%02" PRIx64 " : %s : [%s] : %s\n", i, ainfo.code, ainfo.name, src, ainfo.desc); + } + } +} + +static int +show_info(char *event, regex_t *preg) +{ + pfm_pmu_info_t pinfo; + pfm_event_info_t info; + pfm_pmu_t j; + int i, ret, match = 0, pname; + size_t len, l = 0; + char *fullname = NULL; + + memset(&pinfo, 0, sizeof(pinfo)); + memset(&info, 0, sizeof(info)); + + pinfo.size = sizeof(pinfo); + info.size = sizeof(info); + + pname = event_has_pname(event); + + /* + * scan all supported events, incl. those + * from undetected PMU models + */ + pfm_for_all_pmus(j) + { + + ret = pfm_get_pmu_info(j, &pinfo); + if (ret != PFM_SUCCESS) + continue; + + /* no pmu prefix, just look for detected PMU models */ + if (!pname && !pinfo.is_present) + continue; + + for (i = pinfo.first_event; i != -1; i = pfm_get_event_next(i)) + { + ret = pfm_get_event_info(i, options.os, &info); + if (ret != PFM_SUCCESS) + errx(1, "cannot get event info: %s", pfm_strerror(ret)); + + len = strlen(info.name) + strlen(pinfo.name) + 1 + 2; + if (len > l) + { + l = len; + fullname = realloc(fullname, l); + if (!fullname) + err(1, "cannot allocate memory"); + } + sprintf(fullname, "%s::%s", pinfo.name, info.name); + + if (regexec(preg, fullname, 0, NULL, 0) == 0) + { + if (options.compact) + if (options.combo) + show_event_info_combo(&info); + else + show_event_info_compact(&info); + else + show_event_info(&info); + match++; + } + } + } + if (fullname) + free(fullname); + + return match; +} + +static int +show_info_sorted(char *event, regex_t *preg) +{ + pfm_pmu_info_t pinfo; + pfm_event_info_t info; + pfm_pmu_t j; + int i, ret, n, match = 0; + size_t len, l = 0; + char *fullname = NULL; + code_info_t *codes; + + memset(&pinfo, 0, sizeof(pinfo)); + memset(&info, 0, sizeof(info)); + + pinfo.size = sizeof(pinfo); + info.size = sizeof(info); + + pfm_for_all_pmus(j) + { + + ret = pfm_get_pmu_info(j, &pinfo); + if (ret != PFM_SUCCESS) + continue; + + codes = malloc(pinfo.nevents * sizeof(*codes)); + if (!codes) + err(1, "cannot allocate memory\n"); + + /* scans all supported events */ + n = 0; + for (i = pinfo.first_event; i != -1; i = pfm_get_event_next(i)) + { + + ret = pfm_get_event_info(i, options.os, &info); + if (ret != PFM_SUCCESS) + errx(1, "cannot get event info: %s", pfm_strerror(ret)); + + if (info.pmu != j) + continue; + + codes[n].idx = info.idx; + codes[n].code = info.code; + n++; + } + qsort(codes, n, sizeof(*codes), compare_codes); + for (i = 0; i < n; i++) + { + ret = pfm_get_event_info(codes[i].idx, options.os, &info); + if (ret != PFM_SUCCESS) + errx(1, "cannot get event info: %s", pfm_strerror(ret)); + + len = strlen(info.name) + strlen(pinfo.name) + 1 + 2; + if (len > l) + { + l = len; + fullname = realloc(fullname, l); + if (!fullname) + err(1, "cannot allocate memory"); + } + sprintf(fullname, "%s::%s", pinfo.name, info.name); + + if (regexec(preg, fullname, 0, NULL, 0) == 0) + { + if (options.compact) + show_event_info_compact(&info); + else + show_event_info(&info); + match++; + } + } + free(codes); + } + if (fullname) + free(fullname); + + return match; +} + +static void +usage(void) +{ + printf("showevtinfo [-L] [-E] [-h] [-s] [-m mask]\n" + "-L\t\tlist one event per line (compact mode)\n" + "-E\t\tlist one event per line with encoding (compact mode)\n" + "-M\t\tdisplay all valid unit masks combination (use with -L or -E)\n" + "-h\t\tget help\n" + "-s\t\tsort event by PMU and by code based on -m mask\n" + "-l\t\tmaximum number of umasks to list all combinations (default: %d)\n" + "-F\t\tshow only events and attributes with certain flags (precise,...)\n" + "-m mask\t\thexadecimal event code mask, bits to match when sorting\n" + "-x sep\t\tuse sep as field separator in compact mode\n" + "-D\t\t\tprint event description in compact mode\n" + "-O os\t\tshow attributes for the specific operating system\n", + COMBO_MAX); +} + +/* + * keep: [pmu::]event + * drop everything else + */ +static void +drop_event_attributes(char *str) +{ + char *p; + + p = strchr(str, ':'); + if (!p) + return; + + str = p + 1; + /* keep PMU name */ + if (*str == ':') + str++; + + /* stop string at 1st attribute */ + p = strchr(str, ':'); + if (p) + *p = '\0'; +} + +#define EVENT_FLAGS(n, f, l) \ + { \ + .name = n, .ebit = f, .ubit = l \ + } +struct attr_flags +{ + const char *name; + int ebit; /* bit position in pfm_event_info_t.flags, -1 means ignore */ + int ubit; /* bit position in pfm_event_attr_info_t.flags, -1 means ignore */ +}; + +static const struct attr_flags event_flags[] = { + EVENT_FLAGS("precise", 0, 1), + EVENT_FLAGS("pebs", 0, 1), + EVENT_FLAGS("default", -1, 0), + EVENT_FLAGS("dfl", -1, 0), + EVENT_FLAGS(NULL, 0, 0)}; + +static void +parse_filters(char *arg) +{ + const struct attr_flags *attr; + char *p; + + while (arg) + { + p = strchr(arg, ','); + if (p) + *p++ = 0; + + for (attr = event_flags; attr->name; attr++) + { + if (!strcasecmp(attr->name, arg)) + { + switch (attr->ebit) + { + case 0: + options.efilter.is_precise = 1; + break; + case -1: + break; + default: + errx(1, "unknown event flag %d", attr->ebit); + } + switch (attr->ubit) + { + case 0: + options.ufilter.is_dfl = 1; + break; + case 1: + options.ufilter.is_precise = 1; + break; + case -1: + break; + default: + errx(1, "unknown umaks flag %d", attr->ubit); + } + break; + } + } + arg = p; + } +} + +static const struct +{ + char *name; + pfm_os_t os; +} supported_oses[] = { + {.name = "none", .os = PFM_OS_NONE}, + {.name = "raw", .os = PFM_OS_NONE}, + {.name = "pmu", .os = PFM_OS_NONE}, + + {.name = "perf", .os = PFM_OS_PERF_EVENT}, + {.name = "perf_ext", .os = PFM_OS_PERF_EVENT_EXT}, + { + .name = NULL, + }}; + +static const char *pmu_types[] = { + "unknown type", + "core", + "uncore", + "OS generic", +}; + +static void +setup_os(char *ostr) +{ + int i; + + for (i = 0; supported_oses[i].name; i++) + { + if (!strcmp(supported_oses[i].name, ostr)) + { + options.os = supported_oses[i].os; + return; + } + } + fprintf(stderr, "unknown OS layer %s, choose from:", ostr); + for (i = 0; supported_oses[i].name; i++) + { + if (i) + fputc(',', stderr); + fprintf(stderr, " %s", supported_oses[i].name); + } + fputc('\n', stderr); + exit(1); +} + +int main(int argc, char **argv) +{ + static char *argv_all[2] = {".*", NULL}; + pfm_pmu_info_t pinfo; + char *endptr = NULL; + char default_sep[2] = "\t"; + char *ostr = NULL; + char **args; + pfm_pmu_t i; + int match; + regex_t preg; + int ret, c; + + memset(&pinfo, 0, sizeof(pinfo)); + + pinfo.size = sizeof(pinfo); + + while ((c = getopt(argc, argv, "hELsm:MNl:F:x:DO:")) != -1) + { + switch (c) + { + case 'L': + options.compact = 1; + break; + case 'F': + parse_filters(optarg); + break; + case 'E': + options.compact = 1; + options.encode = 1; + break; + case 'M': + options.combo = 1; + break; + case 'N': + options.name_only = 1; + break; + case 's': + options.sort = 1; + break; + case 'D': + options.desc = 1; + break; + case 'l': + options.combo_lim = atoi(optarg); + break; + case 'x': + options.csv_sep = optarg; + break; + case 'O': + ostr = optarg; + break; + case 'm': + options.mask = strtoull(optarg, &endptr, 16); + if (*endptr) + errx(1, "mask must be in hexadecimal\n"); + break; + case 'h': + usage(); + exit(0); + default: + errx(1, "unknown option error"); + } + } + /* to allow encoding of events from non detected PMU models */ + ret = set_env_var("LIBPFM_ENCODE_INACTIVE", "1", 1); + if (ret != PFM_SUCCESS) + errx(1, "cannot force inactive encoding"); + + ret = pfm_initialize(); + if (ret != PFM_SUCCESS) + errx(1, "cannot initialize libpfm: %s", pfm_strerror(ret)); + + if (options.mask == 0) + options.mask = ~0; + + if (optind == argc) + { + args = argv_all; + } + else + { + args = argv + optind; + } + if (!options.csv_sep) + options.csv_sep = default_sep; + + /* avoid combinatorial explosion */ + if (options.combo_lim == 0) + options.combo_lim = COMBO_MAX; + + if (ostr) + setup_os(ostr); + else + options.os = PFM_OS_NONE; + + if (!options.compact) + { + int total_supported_events = 0; + int total_available_events = 0; + + printf("Supported PMU models:\n"); + pfm_for_all_pmus(i) + { + ret = pfm_get_pmu_info(i, &pinfo); + if (ret != PFM_SUCCESS) + continue; + + printf("\t[%d, %s, \"%s\"]\n", i, pinfo.name, pinfo.desc); + } + + printf("Detected PMU models:\n"); + pfm_for_all_pmus(i) + { + ret = pfm_get_pmu_info(i, &pinfo); + if (ret != PFM_SUCCESS) + continue; + + if (pinfo.is_present) + { + if (pinfo.type >= PFM_PMU_TYPE_MAX) + pinfo.type = PFM_PMU_TYPE_UNKNOWN; + + printf("\t[%d, %s, \"%s\", %d events, %d max encoding, %d counters, %s PMU]\n", + i, + pinfo.name, + pinfo.desc, + pinfo.nevents, + pinfo.max_encoding, + pinfo.num_cntrs + pinfo.num_fixed_cntrs, + pmu_types[pinfo.type]); + + total_supported_events += pinfo.nevents; + } + total_available_events += pinfo.nevents; + } + printf("Total events: %d available, %d supported\n", total_available_events, total_supported_events); + } + + while (*args) + { + /* drop umasks and modifiers */ + drop_event_attributes(*args); + if (regcomp(&preg, *args, REG_ICASE)) + errx(1, "error in regular expression for event \"%s\"", *argv); + + if (options.sort) + match = show_info_sorted(*args, &preg); + else + match = show_info(*args, &preg); + + if (match == 0) + errx(1, "event %s not found", *args); + + args++; + } + + regfree(&preg); + + pfm_terminate(); + + return 0; +} diff --git a/repos/mml/src/app/libpfm_test/target.mk b/repos/mml/src/app/libpfm_test/target.mk new file mode 100644 index 0000000000..5134b51d97 --- /dev/null +++ b/repos/mml/src/app/libpfm_test/target.mk @@ -0,0 +1,5 @@ +TARGET = libpfm_test +SRC_CC = check_events.c +LIBS += base posix libm libc stdcxx libpfm4 +CC_OPT += -Wno-error -Wno-permissive -fpermissive + diff --git a/repos/mml/src/app/loopbench/bench.c b/repos/mml/src/app/loopbench/bench.c new file mode 120000 index 0000000000..c97a0bd9c6 --- /dev/null +++ b/repos/mml/src/app/loopbench/bench.c @@ -0,0 +1 @@ +/home/mml/loopbench/bench.c \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/bench.h b/repos/mml/src/app/loopbench/bench.h new file mode 120000 index 0000000000..7101d0b07d --- /dev/null +++ b/repos/mml/src/app/loopbench/bench.h @@ -0,0 +1 @@ +/home/mml/loopbench/bench.h \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/config.h b/repos/mml/src/app/loopbench/config.h new file mode 120000 index 0000000000..2d5b04b6fb --- /dev/null +++ b/repos/mml/src/app/loopbench/config.h @@ -0,0 +1 @@ +/home/mml/loopbench/config.h \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop.c b/repos/mml/src/app/loopbench/loop.c new file mode 120000 index 0000000000..28a2cf5277 --- /dev/null +++ b/repos/mml/src/app/loopbench/loop.c @@ -0,0 +1 @@ +/home/mml/loopbench/loop.c \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop.h b/repos/mml/src/app/loopbench/loop.h new file mode 120000 index 0000000000..2d8b57437d --- /dev/null +++ b/repos/mml/src/app/loopbench/loop.h @@ -0,0 +1 @@ +/home/mml/loopbench/loop.h \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop_bench_caladan.cpp b/repos/mml/src/app/loopbench/loop_bench_caladan.cpp new file mode 120000 index 0000000000..441ec26714 --- /dev/null +++ b/repos/mml/src/app/loopbench/loop_bench_caladan.cpp @@ -0,0 +1 @@ +/home/mml/loopbench/loop_bench_caladan.cpp \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop_bench_concord.c b/repos/mml/src/app/loopbench/loop_bench_concord.c new file mode 120000 index 0000000000..33f38615b4 --- /dev/null +++ b/repos/mml/src/app/loopbench/loop_bench_concord.c @@ -0,0 +1 @@ +/home/mml/loopbench/loop_bench_concord.c \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop_bench_mxtasking.cpp b/repos/mml/src/app/loopbench/loop_bench_mxtasking.cpp new file mode 120000 index 0000000000..48db951a40 --- /dev/null +++ b/repos/mml/src/app/loopbench/loop_bench_mxtasking.cpp @@ -0,0 +1 @@ +/home/mml/loopbench/loop_bench_mxtasking.cpp \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/loop_bench_pthreads.cpp b/repos/mml/src/app/loopbench/loop_bench_pthreads.cpp new file mode 120000 index 0000000000..46e9806934 --- /dev/null +++ b/repos/mml/src/app/loopbench/loop_bench_pthreads.cpp @@ -0,0 +1 @@ +/home/mml/loopbench/loop_bench_pthreads.cpp \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/profiling.c b/repos/mml/src/app/loopbench/profiling.c new file mode 120000 index 0000000000..06b4830055 --- /dev/null +++ b/repos/mml/src/app/loopbench/profiling.c @@ -0,0 +1 @@ +/home/mml/loopbench/profiling.c \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/profiling.h b/repos/mml/src/app/loopbench/profiling.h new file mode 120000 index 0000000000..496a8045c5 --- /dev/null +++ b/repos/mml/src/app/loopbench/profiling.h @@ -0,0 +1 @@ +/home/mml/loopbench/profiling.h \ No newline at end of file diff --git a/repos/mml/src/app/loopbench/target.mk b/repos/mml/src/app/loopbench/target.mk new file mode 100644 index 0000000000..26cfb9356a --- /dev/null +++ b/repos/mml/src/app/loopbench/target.mk @@ -0,0 +1,10 @@ +TARGET = loopbench +SRC_CC = loop_bench_mxtasking.cpp \ + loop.c \ + bench.c \ + profiling.c + +LIBS += base libc stdcxx mxtasking +CC_OPT += -Wno-error -fno-aligned-new -g +CC_OLEVEL = -O0 +CC_CXX_WARN_STRICT = diff --git a/repos/mml/src/app/persistent_cell/persistent_cell.cc b/repos/mml/src/app/persistent_cell/persistent_cell.cc new file mode 100644 index 0000000000..bc3db06588 --- /dev/null +++ b/repos/mml/src/app/persistent_cell/persistent_cell.cc @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +namespace Hoitaja_test { + struct Persistent_cell; +} + + +struct Hoitaja_test::Persistent_cell +{ + Genode::Env &_env; + Timer::Connection _timer{_env}; + + void _handle_timeout() + { + Genode::log("My affinity is ", _env.cpu().affinity_space()); + Genode::log("My PD cap is ", _env.pd_session_cap()); + _timer.trigger_once(5 * 1000 * 1000); + } + + Genode::Signal_handler _timeout_handler{ + _env.ep(), *this, &Persistent_cell::_handle_timeout}; + + Persistent_cell(Genode::Env &env) : _env(env) + { + Genode::log("My affinity is ", _env.cpu().affinity_space()); + Genode::log("My PD cap is ", _env.pd().address_space()); + _timer.sigh(_timeout_handler); + + _timer.trigger_once(5 * 1000 * 1000); + } +}; + +void Component::construct(Genode::Env &env) { static Hoitaja_test::Persistent_cell cell(env); } \ No newline at end of file diff --git a/repos/mml/src/app/persistent_cell/target.mk b/repos/mml/src/app/persistent_cell/target.mk new file mode 100644 index 0000000000..a8ef285365 --- /dev/null +++ b/repos/mml/src/app/persistent_cell/target.mk @@ -0,0 +1,3 @@ +TARGET = persistent_cell +SRC_CC = persistent_cell.cc +LIBS += base \ No newline at end of file diff --git a/repos/mml/src/app/pfm_test/main.cc b/repos/mml/src/app/pfm_test/main.cc new file mode 100644 index 0000000000..98953cf817 --- /dev/null +++ b/repos/mml/src/app/pfm_test/main.cc @@ -0,0 +1,109 @@ +/** + * @file main.cc + * @author Michael Müller (michael.mueller@uos.de) + * @brief Some Tests for using Performance Counters with libpfm and the NOVA syscalls + * @version 0.1 + * @date 2022-12-14 + * + * @copyright Copyright (c) 2022 + * + */ +#include +#include +#include +#include +#include + +#include +#include + +extern "C" { +#include +#include +} + +int main(void) +{ + pfm_pmu_info_t pinfo; + pfm_pmu_encode_arg_t e; + pfm_event_info_t info; + int ret; + + ret = pfm_initialize(); + if (ret != PFM_SUCCESS) { + std::cerr << "cannot initialize libpfm: " << pfm_strerror(ret) << std::endl; + return EXIT_FAILURE; + } + + memset(&pinfo, 0, sizeof(pfm_pmu_info_t)); + + ret = pfm_get_pmu_info(PFM_PMU_AMD64_FAM17H_ZEN1, &pinfo); + if (ret != PFM_SUCCESS) + { + std::cerr << "Failed to find PMU" << std::endl; + return -EXIT_FAILURE; + } + + if (!pinfo.is_present) { + std::cerr << "No AMD PMU present" << std::endl; + return -EXIT_FAILURE; + } + + memset(&e, 0, sizeof(e)); + + char *fqstr = nullptr; + e.fstr = &fqstr; + + do + { + ret = pfm_get_os_event_encoding("ITLB_RELOADS", PFM_PLM0 | PFM_PLM3, PFM_OS_NONE, &e); + if (ret == PFM_ERR_TOOSMALL) { + free(e.codes); + e.codes = NULL; + e.count = 0; + continue; + } else { + std::cerr << "No such event" << std::endl; + return EXIT_FAILURE; + } + } while (ret != PFM_SUCCESS); + + memset(&info, 0, sizeof(info)); + + ret = pfm_get_event_info(e.idx, PFM_OS_NONE, &info); + if (ret) { + std::cerr << "Failed to get event info" << std::endl; + return EXIT_FAILURE; + } + + std::cout << "Event found : " << fqstr << std::endl; + std::cout << "Code : " << info.code << std::endl; + + Nova::uint8_t rc = 0; + Nova::mword_t umask = 0x6; + Nova::mword_t flags = 0x0; + + if ((rc = Nova::hpc_ctrl(Nova::HPC_SETUP, 0, 1, info.code, umask, flags)) != Nova::NOVA_OK) { + std::cerr << "Failed to setup HPC 0 for event" << std::endl; + return EXIT_FAILURE; + } + + if ((rc = Nova::hpc_start(0, 1))) { + std::cerr << "Failed to start counter" << std::endl; + return EXIT_FAILURE; + } + + std::cout << "Successfully set up hardware performance counter 0" << std::endl; + + for (;;) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + Nova::mword_t value; + if ((rc = Nova::hpc_read(0, 1, value)) != Nova::NOVA_OK) { + std::cerr << "Failed to read HPC" << std::endl; + return EXIT_FAILURE; + } + std::cout << "Counter value: " << value << std::endl; + } + + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/repos/mml/src/app/pfm_test/target.mk b/repos/mml/src/app/pfm_test/target.mk new file mode 100644 index 0000000000..5f61b6d5ed --- /dev/null +++ b/repos/mml/src/app/pfm_test/target.mk @@ -0,0 +1,4 @@ +TARGET = pfm_test +SRC_CC = main.cc +LIBS += base posix libm libc stdcxx libpfm4 +CC_OPT += -Wno-error -fpermissive -Wno-error=conversion \ No newline at end of file diff --git a/repos/mml/src/app/portal_experiment/main.cc b/repos/mml/src/app/portal_experiment/main.cc new file mode 100644 index 0000000000..a5562a9759 --- /dev/null +++ b/repos/mml/src/app/portal_experiment/main.cc @@ -0,0 +1,254 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CALLS 100 +#define CORES 14 +#define HYPERCALL + + //Genode::Trace::timestamp(); +static Genode::Trace::Timestamp rdtsc_cost = 0; +Genode::Env *genv = nullptr; +static Genode::Trace::Timestamp start = 0; +static const unsigned long loops = 10000UL; +static Nova::mword_t channel = 0; +static std::atomic counter(0); +static std::atomic ready{false}; +static std::atomic restart{true}; +static std::atomic yield_ctr{-(31-CORES)}; +static unsigned long tsc_freq_khz = 0; +int cores, i; + +struct Channel { + unsigned long yield_flag : 1, + op : 2, + tnum : 61; + unsigned long delta_alloc; + unsigned long delta_activate; + unsigned long delta_setflag; + unsigned long delta_findborrower; + unsigned long delta_block; + unsigned long delta_enter; + unsigned long delta_return; +}; + +struct Cell : public Genode::Thread +{ + Genode::uint16_t _id; + Libc::Env &env; + Timer::Connection &_timer; + + static void *pthread_entry(void *args) { + Cell *cell = reinterpret_cast(args); + cell->entry(); + return nullptr; + } + + void entry() override + { + Genode::Trace::Timestamp latency = 0; + Nova::mword_t channel_id = 0; + Nova::uint64_t count_allocs = 0; + Nova::cpu_id(channel_id); + struct Channel *channels = reinterpret_cast(channel); + struct Channel volatile *my_channel = &channels[channel_id]; + + unsigned long _tsc_freq_ghz = tsc_freq_khz / 1000000UL; + + //Genode::log("Started worker", _id, " on CPU with affinity ", channel_id, Genode::Thread::myself()->affinity(), " signal channel: ", my_channel->yield_flag, " at ", my_channel); + + for (cores = CORES; cores <= 14; cores+=4) { + for (i = 0; i < CALLS; ) { + + if ((i == 0 && yield_ctr >= cores-1) || (i > 0 && yield_ctr >= cores-1)) + ready = true; + + if (_id != 0 && restart.load()) { + yield_ctr.fetch_add(1); + // Genode::log("Worker ", _id, "yielded, yield_ctr = ", yield_ctr.load()); + Nova::yield(); + } + + //Genode::log("Worker ", _id, " on CPU ", channel_id, " woke up"); + counter.fetch_add(1); + if (counter >= cores-1) { + ready = true; + // Genode::log("{\"allocation:\": ", allocation, ", \"id\":", _id, ",\"clk_total\":", (end-::start), ", \"mean_clk\":", (end-::start)/count_allocs ,", \"count\": ", count_allocs, "\"channel-id\":", channel_id, "},"); + } + + if (my_channel->op == 2) { + Nova::mword_t allocation = 0; + Genode::Trace::Timestamp now = Genode::Trace::timestamp(); + Nova::core_allocation(allocation); + my_channel->delta_return = now - my_channel->delta_return; + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"d_block\": ", my_channel->delta_block / _tsc_freq_ghz, ", \"d_enter\":", my_channel->delta_enter / _tsc_freq_ghz, ", \"d_return\":", my_channel->delta_return / _tsc_freq_ghz, ", \"op\": \"yield\"},"); + } + my_channel->op = 0; + if (_id == 0) { + //Genode::log("Waiting on start signal"); + while (ready.load() == false) + __builtin_ia32_pause(); + + //Genode::log("Got start signal"); + _timer.msleep(2); + + //Genode::log("Woke up for new iteration"); + ready = false; + restart = false; + ::start = Genode::Trace::timestamp(); + } + + Genode::Trace::Timestamp end = 0; + while (_id==0) + { + + if (_id == 0) { + Nova::mword_t allocated = 0; + //Genode::log("Allocating 4 cores"); + + my_channel->tnum = i; + my_channel->op = 1; /* 1 for alloc, 2 for yield */ + + my_channel->delta_enter = Genode::Trace::timestamp(); + Nova::uint8_t rc = Nova::alloc_cores(cores, allocated); + if (rc == Nova::NOVA_OK) + { + + while(ready.load() == false) + __builtin_ia32_pause(); + end = Genode::Trace::timestamp(); + my_channel->delta_return = end - my_channel->delta_return; + latency += (end - ::start) / _tsc_freq_ghz; + Nova::mword_t allocation = 0; + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"delta_enter:\" ", my_channel->delta_enter / _tsc_freq_ghz, ", \"delta_alloc\": ", my_channel->delta_alloc / _tsc_freq_ghz, ", \"delta_activate:\": ", my_channel->delta_activate / _tsc_freq_ghz, ", \"delta_setflag\": ", my_channel->delta_setflag / _tsc_freq_ghz, ", \"delta_return\": ", my_channel->delta_return / _tsc_freq_ghz, "},"); + Nova::core_allocation(allocation); + restart = true; + counter = 0; + yield_ctr = 0; + //if (i%100==0) { + + Genode::log("{\"iteration\": ", i, ", \"cores\":", cores, ", \"allocation\": ", allocation, ",\"start\": ", ::start, ", \"end\": ", end, " ,\"ns\": ", (latency), "},"); + my_channel->delta_setflag = 0; + latency = 0; + //} + i++; + break; + } else { + //Genode::log("cores allocated: ", allocated); + break; + // Genode::log("cores allocated: ", allocated); + } + count_allocs++; + } + } + //Genode::log("Finished allocation. Waiting for yield signal, id = ", channel_id, "\n"); + while (restart.load() == false) { + Channel volatile *res = __atomic_load_n(&my_channel, __ATOMIC_SEQ_CST); + if (res->yield_flag) { + Genode::log("Got yield signal on channel ", channel_id); + Nova::yield(true); + } + } + } + } + Genode::log("Benchmak finished."); + } + Cell(Libc::Env &env, Timer::Connection &timer, Genode::uint16_t id, Location const &location) + : Thread(env, Name("test_", location.xpos(), "x", location.ypos()), 4 * 4096, location, Weight(), env.cpu()), _id(id), env(env), _timer(timer) + { } +}; + + +void Libc::Component::construct(Libc::Env &env) +{ + Nova::uint8_t res = 0; + genv = &env; + + Libc::with_libc([&]() + { + Timer::Connection _timer{env}; + + Genode::Ram_dataspace_capability ds = env.ram().alloc(4096); + channel = env.rm().attach(ds); + + Genode::memset(reinterpret_cast(channel), 0, 4096); + + //Genode::Heap _heap{env.ram(), env.rm()}; + + //Genode::log("Registering MxTasking entrypoint"); + if ((res = Nova::mxinit(0, 0, channel))) { + Genode::error("Failed to init MxTasking: ", res); + } + Genode::log("Registered MxTasking, yielding ..."); + + try { + Genode::Attached_rom_dataspace info(env, "platform_info"); + tsc_freq_khz = info.xml().sub_node("hardware").sub_node("tsc") + .attribute_value("freq_khz", 0ULL); + } catch (...) { }; + + start = Genode::Trace::timestamp(); + for (unsigned c = 0; c < 1000; c++) { + //Genode::Trace::Timestamp start = Genode::Trace::timestamp(); + + /*Nova::uint8_t rc = Nova::yield(); + if (rc != Nova::NOVA_OK) + break;*/ + Genode::Trace::timestamp(); + // Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + // delay += (end - start); + } + Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + rdtsc_cost = (end - start) / 1000 / 2; + + Genode::log("My affinity is ", env.cpu().affinity_space(), " of size ", env.cpu().affinity_space().total()); + Genode::log("Will create workers for affinity space: ", env.topo().global_affinity_space()); + start = Genode::Trace::timestamp(); + Genode::Thread *me = Genode::Thread::myself(); + + unsigned long cpuid = 0; + Nova::cpu_id(cpuid); + + Genode::Affinity::Space space = env.topo().global_affinity_space(); + Genode::log("My main thread is on phys. CPU ", cpuid); + + pthread_t workers[space.total()]; + std::cout << "Creating workers" << std::endl; + Genode::Trace::Timestamp thread_start = Genode::Trace::timestamp(); + for (Genode::uint16_t cpu = 1; cpu < space.total(); cpu++) + { + Genode::String<32> const name{"worker", cpu}; + if (cpu == (space.total() - cpuid)) + continue; + Cell *worker = new Cell(env, _timer, cpu, space.location_of_index(cpu)); + Libc::pthread_create_from_session(&workers[cpu], Cell::pthread_entry, worker, 4 * 4096, name.string(), &env.cpu(), space.location_of_index(cpu)); + // Genode::log("Created worker for CPU ", cpu); + // worker->start(); + } + Genode::Trace::Timestamp thread_stop = Genode::Trace::timestamp(); + Genode::log("Took ", (thread_stop - thread_start) / 2000, " μs to start workers"); + + pthread_t main_pt{}; + + Genode::Affinity::Location loc = me->affinity(); + //Genode::log("Starting main worker on CPU ", cpuid); + Cell *main_cell = new Cell(env, _timer, 0, loc); + + //Cell *main = new (_heap) Cell(env, 0, Genode::Affinity::Location(20,0)); + /*Libc::pthread_create_from_thread(&main_pt, *main, &main); + main->start();*/ + // Nova::yield(false); + //_timer.msleep(10000); + Libc::pthread_create_from_session(&main_pt, Cell::pthread_entry, main_cell, 8 * 4096, "main_worker", &env.cpu(), loc); + pthread_join(main_pt, 0); }); + Genode::log("Leaving component"); +} \ No newline at end of file diff --git a/repos/mml/src/app/portal_experiment/target.mk b/repos/mml/src/app/portal_experiment/target.mk new file mode 100644 index 0000000000..ec8ed005cc --- /dev/null +++ b/repos/mml/src/app/portal_experiment/target.mk @@ -0,0 +1,8 @@ +TARGET = portal_experiment +SRC_CC = main.cc +LIBS = base libc stdcxx +INC_DIR += $(call select_from_repositories,src/lib/libc) +INC_DIR += $(call select_from_repositories,src/lib/libc)/spec/x86_64 +ifdef HYPERCALL +CC_OPT += -DHYPERCALL +endif \ No newline at end of file diff --git a/repos/mml/src/app/raw_nic/main.cpp b/repos/mml/src/app/raw_nic/main.cpp new file mode 100644 index 0000000000..655305a6f4 --- /dev/null +++ b/repos/mml/src/app/raw_nic/main.cpp @@ -0,0 +1,102 @@ +#include +#include +#include +#include +#include +#include + +/* lwIP includes */ +#include + +class Raw_nic +{ + private: + Nic::Packet_allocator _nic_tx_alloc; + Nic::Connection _nic; + Timer::Connection _timer; + Genode::Mutex _mutex{}; + + unsigned char _mac[6]; + + Genode::Io_signal_handler _link_state_handler; + Genode::Io_signal_handler _rx_packet_handler; + Genode::Io_signal_handler _tx_ready_handler; + + enum + { + PACKET_SIZE = Nic::Packet_allocator::DEFAULT_PACKET_SIZE, + BUFF_SIZE = Nic::Session::QUEUE_SIZE * PACKET_SIZE + }; + + public: + void handle_link_state() + { + Genode::log("Link state requested."); + } + + void handle_rx_packets() + { + auto &rx = *_nic.rx(); + + while (rx.packet_avail() && rx.ready_to_ack() ) { + Nic::Packet_descriptor packet = rx.get_packet(); + Genode::log("Received packet of size ", packet.size()); + + //void *eth_frame = rx.packet_content(packet); + + //demux(eth_frame, packet.size()); + if (!rx.ready_to_ack()) + return; + + rx.acknowledge_packet(packet); + } + } + + void handle_tx_ready() + { + auto &tx = *_nic.tx(); + + while (tx.ack_avail()) + tx.release_packet(tx.get_acked_packet()); + + Genode::log("Send packets"); + } + + void send(const void* pkt, unsigned pkt_size ) + { + try { + Nic::Packet_descriptor pkt_desc = _nic.tx()->alloc_packet(pkt_size); + void *pkt_base = _nic.tx()->packet_content(pkt_desc); + + Genode::memcpy(pkt_base, pkt, pkt_size); + + _nic.tx()->submit_packet(pkt_desc); + } catch (Nic::Packet_stream_source::Packet_alloc_failed) { + Genode::warning("Packet allocation has failed."); + } + } + + Raw_nic(Genode::Env &env, Genode::Allocator &alloc) : _nic_tx_alloc(&alloc), _nic(env, &_nic_tx_alloc, BUFF_SIZE, BUFF_SIZE), _timer(env), _link_state_handler(env.ep(), *this, &Raw_nic::handle_link_state), _rx_packet_handler(env.ep(), *this, &Raw_nic::handle_rx_packets), _tx_ready_handler(env.ep(), *this, &Raw_nic::handle_tx_ready) { + Genode::log("Created NIC session."); + Genode::log("Registering callbacks"); + _nic.link_state_sigh(_link_state_handler); + _nic.rx_channel()->sigh_packet_avail(_rx_packet_handler); + _nic.rx_channel()->sigh_ready_to_ack(_rx_packet_handler); + _nic.tx_channel()->sigh_ready_to_submit(_tx_ready_handler); + _nic.tx_channel()->sigh_ack_avail (_tx_ready_handler); + Genode::log("Callbacks registered. Waiting for incoming packets..."); + Genode::log("MAC address from NIC session: ", _nic.mac_address()); + _nic.mac_address().copy(_mac); + Genode::log("Mac address read: ", _mac[0], ":", _mac[1], ":", _mac[2], ":", _mac[3], ":", _mac[4], ":", _mac[5]); + + Lwip::genode_init(alloc, _timer); + } +}; + +void Libc::Component::construct(Libc::Env &env) +{ + static Genode::Heap heap{env.ram(), env.rm()}; + + static Raw_nic _raw_nic(env, heap); + +} \ No newline at end of file diff --git a/repos/mml/src/app/raw_nic/target.mk b/repos/mml/src/app/raw_nic/target.mk new file mode 100644 index 0000000000..7999f80592 --- /dev/null +++ b/repos/mml/src/app/raw_nic/target.mk @@ -0,0 +1,3 @@ +TARGET = raw_nic +SRC_CC += main.cpp +LIBS = base libm libc stdcxx lwip \ No newline at end of file diff --git a/repos/mml/src/app/suoritin_test/main.cc b/repos/mml/src/app/suoritin_test/main.cc new file mode 100644 index 0000000000..c080a139ba --- /dev/null +++ b/repos/mml/src/app/suoritin_test/main.cc @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +class Worker : public Genode::Thread { + + public: + Worker(Genode::Env &env) + : Genode::Thread(env, Name("worker foo"), 4 * 4096) { } + + void entry() override + { + while(true) { + + } + } +}; + +struct Suoritin_tester +{ + Genode::Env &_env; + Genode::Heap _heap{_env.ram(), _env.rm()}; + + Genode::Attached_rom_dataspace _config{_env, "config"}; + + Suoritin_tester(Genode::Env &env) : _env(env) + { + bool greedy = false; // Shall this cell always request more resources? default: no + //Genode::log("Hello from Suoritin tester"); + Tukija::Suoritin::Connection suoritin {_env}; + //Genode::log("Created TASKING session"); + + //Genode::log("Reading config"); + _config.update(); + + if (_config.valid()) { + greedy = _config.xml().attribute_value("greedy", false); + } + + /* Create a single dummy worker */ + Worker *foo = new (_heap) Worker(_env); + Worker *bar = new (_heap) Worker(_env); + + //Genode::log("Querying dataspace capabilities for shared mem interface"); + Genode::Dataspace_capability workers_cap = suoritin.worker_if(); + Genode::Dataspace_capability channels_cap = suoritin.channel_if(); + Genode::Dataspace_capability evt_cap = suoritin.event_channel(); + + //Genode::log("Mapping interface into virtual address space"); + Tukija::Suoritin::Worker* workers = env.rm().attach(workers_cap); + Tukija::Suoritin::Channel *channels = env.rm().attach(channels_cap); + //Tukija::Suoritin::Event_channel *evtchn = env.rm().attach(evt_cap); + + //Genode::log("Registering dummy worker named foo"); + suoritin.register_worker(Genode::Thread::Name("foo"), foo->cap()); + + //Genode::log("Creating one dummy task queue aka channel "); + suoritin.create_channel(*workers); + + suoritin.register_worker(Genode::Thread::Name("bar"), bar->cap()); + //Genode::log("Interfaces mapped succesfully"); + //Genode::log("Workers interface: ", workers); + //Genode::log("Channels interface: ", channels); + + //Genode::log("Writing dummy values"); + if (greedy) + channels[0].length(0xDEADBEEF); + else + channels[0].length(0x20); + // //Genode::log("Channel 0 has length ", channels[0].length()); + + suoritin.create_channel(workers[1]); + channels[1].length(0xF00); + + // Genode::log("Waiting for parent to react"); + + Genode::Trace::Timestamp rpc_delay = 0; + + if (greedy) { + Nova::mword_t response = 0; + Genode::Trace::Timestamp start = Genode::Trace::timestamp(); + + for (unsigned long i = 0; i < 2000; i++) { + channels[0].length(0xDEADBEEF); + + // Genode::Trace::Timestamp start = Genode::Trace::timestamp(); + //Nova::hpc_read(0, 1, response); + suoritin.request_cores(); + // Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + // rpc_delay += (end - start); + + /*while (!(__atomic_load_n(&evtchn->parent_flag, __ATOMIC_SEQ_CST))) + __builtin_ia32_pause(); + __atomic_store_n(&evtchn->parent_flag, false, __ATOMIC_SEQ_CST);*/ + } + Genode::Trace::Timestamp end = Genode::Trace::timestamp(); + Genode::log("{\"response\": \"", response, "\", \"delay\": ", (end-start)/4000, ", \"rpc\":", rpc_delay/4000, "},"); + } + + //Genode::log("Exiting"); + while(true) + ; + //_env.parent().exit(0); + } +}; + +void Component::construct(Genode::Env &env) { + try { + static Suoritin_tester tester(env); + } catch (Genode::Quota_guard::Limit_exceeded) { + Genode::error("Failed: Caps exceeded."); + } +} \ No newline at end of file diff --git a/repos/mml/src/app/suoritin_test/target.mk b/repos/mml/src/app/suoritin_test/target.mk new file mode 100644 index 0000000000..4deed34456 --- /dev/null +++ b/repos/mml/src/app/suoritin_test/target.mk @@ -0,0 +1,3 @@ +TARGET = suoritin_tester +SRC_CC = main.cc +LIBS = base \ No newline at end of file diff --git a/repos/mml/src/app/sythetic_worker.h b/repos/mml/src/app/sythetic_worker.h new file mode 100644 index 0000000000..4f0a0b4c07 --- /dev/null +++ b/repos/mml/src/app/sythetic_worker.h @@ -0,0 +1,167 @@ +// synthetic_worker.h - support for generation of synthetic work + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#define CACHELINE_SIZE (64) + +class SyntheticWorker +{ +public: + virtual ~SyntheticWorker() {} + // Perform n iterations of fake work. + virtual void Work(uint64_t n) = 0; +}; + +class SqrtWorker : public SyntheticWorker +{ +public: + SqrtWorker() {} + ~SqrtWorker() {} + + // Performs n iterations of sqrt(). + void Work(uint64_t n); +}; + +class AsmSqrtWorker : public SyntheticWorker +{ +public: + AsmSqrtWorker() {} + ~AsmSqrtWorker() {} + + // Performs n iterations of sqrt(). + void Work(uint64_t n); +}; + +class StridedMemtouchWorker : public SyntheticWorker +{ +public: + ~StridedMemtouchWorker() { delete buf_; } + + // Creates a strided memory touching worker. + static StridedMemtouchWorker *Create(std::size_t size, size_t stride); + + // Performs n strided memory touches. + void Work(uint64_t n); + +private: + StridedMemtouchWorker(char *buf, std::size_t size, size_t stride) + : buf_(buf), size_(size), stride_(stride) {} + + volatile char *buf_; + std::size_t size_; + std::size_t stride_; +}; + +class MemStreamWorker : public SyntheticWorker +{ +public: + ~MemStreamWorker(); + + // Creates a memory streaming worker. + static MemStreamWorker *Create(std::size_t size); + + // Performs n memory reads. + void Work(uint64_t n); + +private: + MemStreamWorker(char *buf, std::size_t size) : buf_(buf), size_(size) {} + + volatile char *buf_; + std::size_t size_; +}; + +class RandomMemtouchWorker : public SyntheticWorker +{ +public: + ~RandomMemtouchWorker() { delete buf_; } + + // Creates a random memory touching worker. + static RandomMemtouchWorker *Create(std::size_t size, unsigned int seed); + + // Performs n random memory touches. + void Work(uint64_t n); + +private: + RandomMemtouchWorker(char *buf, std::vector schedule) + : buf_(buf), schedule_(std::move(schedule)) {} + + volatile char *buf_; + std::vector schedule_; +}; + +class CacheAntagonistWorker : public SyntheticWorker +{ +public: + ~CacheAntagonistWorker() { delete buf_; } + + // Creates a cache antagonist worker. + static CacheAntagonistWorker *Create(std::size_t size); + + // Perform n cache accesses. + void Work(uint64_t n); + +private: + CacheAntagonistWorker(char *buf, std::size_t size) : buf_(buf), size_(size) {} + + char *buf_; + std::size_t size_; +}; + +class MemBWAntagonistWorker : public SyntheticWorker +{ +public: + ~MemBWAntagonistWorker() { numa_free(buf_, size_); } + + // Creates a memory bandwidth antagonist worker. It allocates an array whose + // size is indicated by the parameter. + static MemBWAntagonistWorker *Create(std::size_t size, int nop_period, + int nop_num); + + // Perform n times array stores. + void Work(uint64_t n); + +private: + MemBWAntagonistWorker(char *buf, std::size_t size, int nop_period, + int nop_num) + : buf_(buf), size_(size), nop_period_(nop_period), nop_num_(nop_num) {} + + char *buf_; + std::size_t size_; + int nop_period_; + int nop_num_; +}; + +class DynamicCacheAntagonistWorker : public SyntheticWorker +{ +public: + ~DynamicCacheAntagonistWorker() { delete buf_; } + + // Creates a cache antagonist worker. + static DynamicCacheAntagonistWorker *Create(std::size_t size, int period, + int nop_num); + + // Perform n cache accesses. + void Work(uint64_t n); + +private: + DynamicCacheAntagonistWorker(char *buf, std::size_t size, int period, + int nop_num) + : buf_(buf), size_(size), period_(period), nop_num_(nop_num) {} + + char *buf_; + std::size_t size_; + int period_; + int nop_num_; + int cnt_; +}; + +// Parses a string to generate one of the above fake workers. +SyntheticWorker *SyntheticWorkerFactory(std::string s); diff --git a/repos/mml/src/app/thread_test/target.mk b/repos/mml/src/app/thread_test/target.mk index 79ffb18ea9..55f1259a48 100644 --- a/repos/mml/src/app/thread_test/target.mk +++ b/repos/mml/src/app/thread_test/target.mk @@ -1,4 +1,4 @@ TARGET = thread_test SRC_CC = thread_test.cc -LIBS += base stdcxx +LIBS += base libc stdcxx CXXFLAGS += -Wno-error diff --git a/repos/mml/src/app/thread_test/thread_test.cc b/repos/mml/src/app/thread_test/thread_test.cc index 44b536721f..4b9853b3c0 100644 --- a/repos/mml/src/app/thread_test/thread_test.cc +++ b/repos/mml/src/app/thread_test/thread_test.cc @@ -1,9 +1,8 @@ #include #include +#include #include -#include -#include -#include +#include namespace Thread_test { class Tester; @@ -16,13 +15,13 @@ class Thread_test::Test_thread : public Thread { private: Env &_env; - uint16_t _id; + Genode::uint16_t _id; Timer::Connection _timer{_env}; public: List_element _list_element{this}; - Test_thread(Env &env, uint16_t id, Location const &location) + Test_thread(Env &env, Genode::uint16_t id, Location const &location) : Thread(env, Name("test_", location.xpos(), "x", location.ypos()), 4 * 4096, location, Weight(), env.cpu()), _env(env), _id(id) @@ -32,12 +31,13 @@ class Thread_test::Test_thread : public Thread { while(true) { Genode::log("Pong from thread ", _id); - auto start = _timer.elapsed_ms(); + auto start = Genode::Trace::timestamp(); // auto start = std::chrono::steady_clock::now (); - _timer.msleep(_id * 1000); - auto end = _timer.elapsed_ms(); + _timer.usleep(_id * 10); + auto end = Genode::Trace::timestamp(); // auto end = std::chrono::steady_clock::now(); - Genode::log("Thread ", _id, " woke up afer", (end-start), " ms."); + Genode::log("Thread ", _id, " woke up afer", (end-start)/2000, " us."); + Genode::log("My affinities are ", this->affinity().xpos(), "x", this->affinity().ypos(), " node: ", _env.topo().node_affinity_of(this->affinity()).id(), " native region: ", _env.topo().node_affinity_of(this->affinity()).native_id()); } } }; @@ -56,6 +56,7 @@ public: { Affinity::Space space = env.cpu().affinity_space(); + Genode::log("Having ", env.topo().node_count(), " NUMA regions."); Genode::log("Size of Affinity space is ", space.total()); Genode::log("-----------------------------"); for (unsigned i = 1; i < space.total(); i++) @@ -68,11 +69,15 @@ public: for (unsigned i = 1; i < space.total(); i++) { Affinity::Location location = env.cpu().affinity_space().location_of_index(i); - Test_thread *thread = new (_heap) Test_thread(env, (uint16_t)i, location); + Test_thread *thread = new (_heap) Test_thread(env, (Genode::uint16_t)i, location); thread->start(); _threads.insert(&thread->_list_element); } + + for (auto thread = _threads.first(); !thread; thread->next()) { + thread->object()->join(); + } /* Test, whether unique_ptrs work */ //auto unique_thread = std::unique_ptr(new (_heap) Test_thread(env, 255, env.cpu().affinity_space().location_of_index(0))); //unique_thread->start(); diff --git a/repos/mml/src/app/volatile_cell/target.mk b/repos/mml/src/app/volatile_cell/target.mk new file mode 100644 index 0000000000..cf18e8f9f4 --- /dev/null +++ b/repos/mml/src/app/volatile_cell/target.mk @@ -0,0 +1,3 @@ +TARGET = volatile_cell +SRC_CC = volatile_cell.cc loop.c +LIBS += base \ No newline at end of file diff --git a/repos/mml/src/app/volatile_cell/volatile_cell.cc b/repos/mml/src/app/volatile_cell/volatile_cell.cc new file mode 100644 index 0000000000..17b1200183 --- /dev/null +++ b/repos/mml/src/app/volatile_cell/volatile_cell.cc @@ -0,0 +1,155 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "loop.h" +namespace Hoitaja_test { + class Volatile_cell; + struct Worker; +} + +#define LOOPS 100000 +#define ALLOC + +static Nova::mword_t channel; + +void short_loop(unsigned long k) +{ + for (unsigned long i = 0; i < k; i++) { + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + asm volatile("nop"); + } +} + +struct Channel { + unsigned long yield_flag : 1, + op : 2, + tnum : 61; + unsigned long delta_alloc; + unsigned long delta_activate; + unsigned long delta_setflag; + unsigned long delta_findborrower; + unsigned long delta_block; + unsigned long delta_enter; + unsigned long delta_return; +}; + + +struct Hoitaja_test::Worker : public Genode::Thread +{ + Genode::uint16_t _id; + + void entry() override + { + Nova::mword_t channel_id = 0; + Nova::cpu_id(channel_id); + //unsigned long volatile *my_channel = &reinterpret_cast(channel)[channel_id]; + Channel *channels = reinterpret_cast(channel); + Channel *my_channel_struct = &channels[channel_id]; + + //Genode::log("Started worker ", _id, " on CPU ", channel_id); + Nova::yield(); + while (true) { + while (!(my_channel_struct->yield_flag)) + short_loop(770); + //Genode::log("Returning core ", channel_id); + my_channel_struct->delta_enter = Genode::Trace::timestamp(); + Nova::yield(false); + } + } + + Worker(Genode::Env &env, Genode::uint16_t id, Location const &location) + : Thread(env, Name("test_", location.xpos(), "x", location.ypos()), 4 * 4096, location, Weight(), env.cpu()), _id(id) + { } +}; + +class Hoitaja_test::Volatile_cell +{ + private: + Genode::Env &_env; + Timer::Connection _timer{_env}; + + public: + Volatile_cell(Genode::Env &env) : _env(env) + { + Genode::log("My affinity space is ", _env.cpu().affinity_space()); + + /* Pseuda-MxTasking Initialization */ + Nova::uint8_t rc = 0; + Genode::Ram_dataspace_capability ds = env.ram().alloc(4096); + channel = env.rm().attach(ds); + + if ((rc = Nova::mxinit(0, 0, channel))) { + Genode::error("Failed to init MxTasking: ", rc); + } + + Genode::Heap _heap{env.ram(), env.rm()}; + + Nova::mword_t my_cpu = 0; + Nova::cpu_id(my_cpu); + + Genode::log("Main thread on CPU ", my_cpu); + + unsigned cores_total = env.topo().global_affinity_space().total(); + + + for (Genode::uint16_t cpu = 1; cpu < cores_total; cpu++) + { + if (cpu == cores_total - my_cpu) + continue; + //Genode::log("Created worker for CPU ", cpu); + Worker *worker = new (_heap) Worker(env, cpu, env.topo().global_affinity_space().location_of_index(cpu)); + worker->start(); + } + Nova::mword_t mask = 0; + Nova::core_allocation(mask, true); + + _timer.msleep(my_cpu); + + Genode::log("Initial allocation: ", mask); + + for (; ;) { +#ifdef ALLOC + Nova::mword_t allocation = 0; + Nova::mword_t remainder = 0; + if (Nova::alloc_cores(32, allocation, remainder) != Nova::NOVA_OK) + { + //Genode::error("Failed to allocate cores"); + } + /*if (allocation != mask) + { + Genode::log("Allocated cmap = ",allocation); + }*/ + //_timer.usleep(2000); + while ((allocation != (mask | 0x1)) && (allocation != mask) ){ + Nova::core_allocation(allocation); + //Genode::log("Core allocation = ", allocation, " mask = ", mask); + __builtin_ia32_pause(); + } + + loop(500); +#endif + + //_timer.msleep(20); + } + while (true) + ; + // Genode::log("My time has come. Exiting ... cmap = ", allocation); + _timer.msleep(1000UL * 1000); + _env.parent().exit(0); + } +}; + +void Component::construct(Genode::Env &env) { static Hoitaja_test::Volatile_cell cell(env); } \ No newline at end of file diff --git a/repos/mml/src/app/yield_bench/main.cc b/repos/mml/src/app/yield_bench/main.cc new file mode 100644 index 0000000000..0d3ce13c99 --- /dev/null +++ b/repos/mml/src/app/yield_bench/main.cc @@ -0,0 +1,346 @@ +/* + * \brief Test for yielding resources + * \author Norman Feske + * \date 2013-10-05 + * + * This test exercises the protocol between a parent and child, which is used + * by the parent to regain resources from a child subsystem. + * + * The program acts in either one of two roles, the parent or the child. The + * role is determined by reading a config argument. + * + * The child periodically allocates chunks of RAM until its RAM quota is + * depleted. Once it observes a yield request from the parent, however, it + * cooperatively releases as much resources as requested by the parent. + * + * The parent wait a while to give the child the chance to allocate RAM. It + * then sends a yield request and waits for a response. When getting the + * response, it validates whether the child complied to the request or not. + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Test { + class Child; + class Parent; + using namespace Genode; +} + + +/**************** + ** Child role ** + ****************/ + +/** + * The child eats more and more RAM. However, when receiving a yield request, + * it releases the requested amount of resources. + */ +class Test::Child +{ + private: + + struct Ram_chunk : List::Element + { + Env &env; + + size_t const size; + + Ram_dataspace_capability ds_cap; + + Ram_chunk(Env &env, size_t size) + : + env(env),size(size), ds_cap(env.ram().alloc(size)) + { } + + ~Ram_chunk() { env.ram().free(ds_cap); } + }; + + Env &_env; + Heap _heap { _env.ram(), _env.rm() }; + bool const _expand; + List _ram_chunks { }; + Timer::Connection _timer { _env }; + Signal_handler _yield_handler; + uint64_t const _period_ms; + + void _handle_yield(); + + + + public: + + Child(Env &, Xml_node); + void main(); +}; + + +void Test::Child::_handle_yield() +{ + /* request yield request arguments */ + //Genode::Parent::Resource_args const args = _env.parent().yield_request(); + _env.parent().yield_response(); +/* + log("yield request: ", args.string()); + + size_t const requested_ram_quota = + Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0); + + log("got request to free ", requested_ram_quota, " MB of RAM"); + + size_t const requested_cpu_quota = + Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0); + + log("released ", requested_cpu_quota, " portions of cpu_quota"); + + size_t const requested_gpu_quota = + Arg_string::find_arg(args.string(), "gpus").ulong_value(0); + + log("got request to release ", requested_gpu_quota, " gpus");*/ + + + +} + + +Test::Child::Child(Env &env, Xml_node config) +: + _env(env), + _expand(config.attribute_value("expand", false)), + _yield_handler(_env.ep(), *this, &Child::_handle_yield), + _period_ms(config.attribute_value("period_ms", (uint64_t)500)) +{ + /* register yield signal handler */ + _env.parent().yield_sigh(_yield_handler); +} + + +/***************** + ** Parent role ** + *****************/ + +/** + * The parent grants resource requests as long as it has free resources. + * Once in a while, it politely requests the child to yield resources. + */ +class Test::Parent +{ + private: + + Env &_env; + + Timer::Connection _timer { _env }; + + void _print_status() + { + log("quota: ", _child.pd().ram_quota().value / 1024, " KiB " + "used: ", _child.pd().used_ram().value / 1024, " KiB"); + } + + size_t _used_ram_prior_yield = 0; + + /* perform the test three times */ + unsigned _cnt = 5000; + + unsigned long _start = 0; + + unsigned long _end = 0; + unsigned long _sent = 0; + + enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE }; + State _state = WAIT; + + void _schedule_one_second_timeout() + { + //log("wait ", _wait_cnt, "/", _wait_secs); + _timer.trigger_once(10000); + } + + void _init() + { + _state = WAIT; + _schedule_one_second_timeout(); + } + + void _request_yield() + { + /* remember quantum of resources used by the child */ + _used_ram_prior_yield = _child.pd().used_ram().value; + + //log("request yield (ram prior yield: ", _used_ram_prior_yield); + + /* issue yield request */ + Genode::Parent::Resource_args yield_args("ram_quota=5M,cpu_quota=10,gpus=1"); + + _start = Genode::Trace::timestamp(); + _child.yield(yield_args); + _sent = Genode::Trace::timestamp(); + + _state = YIELD_REQUESTED; + } + + void _handle_timeout() + { + //_print_status(); + _request_yield(); + _schedule_one_second_timeout(); + } + + void _yield_response() + { + _end = Genode::Trace::timestamp(); + log("{\"yield-rtt\": ", (_end-_start)/2000, ", \"yield-request\": ", (_sent-_start)/2000, ",\"yield-response\":", (_end-_sent)/2000,"}"); + + _state = YIELD_GOT_RESPONSE; + + //_print_status(); + + if (_cnt-- > 0) { + _init(); + } else { + log("--- test-resource_yield finished ---"); + _env.parent().exit(0); + } + } + + Signal_handler _timeout_handler { + _env.ep(), *this, &Parent::_handle_timeout }; + + struct Policy : public Genode::Child_policy + { + Env &_env; + + Parent &_parent; + + Static_parent_services + _parent_services { _env }; + + Cap_quota const _cap_quota { 50 }; + Ram_quota const _ram_quota { 10*1024*1024 }; + Binary_name const _binary_name { "benchmark_resource_yield" }; + + /* + * Config ROM service + */ + + struct Config_producer : Dynamic_rom_session::Content_producer + { + void produce_content(char *dst, Genode::size_t dst_len) override + { + Xml_generator xml(dst, dst_len, "config", [&] () { + xml.attribute("child", "yes"); }); + } + } _config_producer { }; + + Dynamic_rom_session _config_session { _env.ep().rpc_ep(), + ref_pd(), _env.rm(), + _config_producer }; + + typedef Genode::Local_service Config_service; + + Config_service::Single_session_factory _config_factory { _config_session }; + Config_service _config_service { _config_factory }; + + void yield_response() override + { + _parent._yield_response(); + } + + Policy(Parent &parent, Env &env) : _env(env), _parent(parent) { } + + Name name() const override { return "child"; } + + Binary_name binary_name() const override { return _binary_name; } + + Pd_session &ref_pd() override { return _env.pd(); } + Pd_session_capability ref_pd_cap() const override { return _env.pd_session_cap(); } + + void init(Pd_session &pd, Pd_session_capability pd_cap) override + { + pd.ref_account(ref_pd_cap()); + ref_pd().transfer_quota(pd_cap, _cap_quota); + ref_pd().transfer_quota(pd_cap, _ram_quota); + } + + Route resolve_session_request(Service::Name const &service_name, + Session_label const &label, + Session::Diag const diag) override + { + auto route = [&] (Service &service) { + return Route { .service = service, + .label = label, + .diag = diag }; }; + + if (service_name == "ROM" && label == "child -> config") + return route(_config_service); + + Service *service_ptr = nullptr; + _parent_services.for_each([&] (Service &s) { + if (!service_ptr && service_name == s.name()) + service_ptr = &s; }); + + if (!service_ptr) + throw Service_denied(); + + return route(*service_ptr); + } + }; + + Policy _policy { *this, _env }; + + Genode::Child _child { _env.rm(), _env.ep().rpc_ep(), _policy }; + + public: + + class Insufficient_yield { }; + + /** + * Constructor + */ + Parent(Env &env) : _env(env) + { + _timer.sigh(_timeout_handler); + _init(); + } +}; + + +/*************** + ** Component ** + ***************/ + +void Component::construct(Genode::Env &env) +{ + using namespace Genode; + + /* + * Read value '' attribute to decide whether to perform + * the child or the parent role. + */ + static Attached_rom_dataspace config(env, "config"); + bool const is_child = config.xml().attribute_value("child", false); + + if (is_child) { + log("--- test-resource_yield child role started ---"); + static Test::Child child(env, config.xml()); + } else { + log("--- test-resource_yield parent role started ---"); + static Test::Parent parent(env); + } +} diff --git a/repos/mml/src/app/yield_bench/target.mk b/repos/mml/src/app/yield_bench/target.mk new file mode 100644 index 0000000000..74d1d88926 --- /dev/null +++ b/repos/mml/src/app/yield_bench/target.mk @@ -0,0 +1,3 @@ +TARGET = benchmark_resource_yield +SRC_CC = main.cc +LIBS = base diff --git a/repos/mml/src/lib/mxip/include/stdlib.h b/repos/mml/src/lib/mxip/include/stdlib.h new file mode 100644 index 0000000000..2641481f45 --- /dev/null +++ b/repos/mml/src/lib/mxip/include/stdlib.h @@ -0,0 +1,15 @@ +#ifndef _LWIP__INCLUDE__STDLIB_H_ +#define _LWIP__INCLUDE__STDLIB_H_ + +/** + * Simple atoi for LwIP purposes + */ +static inline int atoi(char const *s) +{ + int n = 0; + while ('0' <= *s && *s <= '9') + n = 10*n - (*s++ - '0'); + return n; +} + +#endif diff --git a/repos/mml/src/lib/mxip/include/string.h b/repos/mml/src/lib/mxip/include/string.h new file mode 100644 index 0000000000..0d4b83330f --- /dev/null +++ b/repos/mml/src/lib/mxip/include/string.h @@ -0,0 +1,34 @@ +/* + * \brief Memory manipulation utilities + * \author Emery Hemingway + * \date 2017-08-21 + */ + +/* + * Copyright (C) 2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef __LWIP__INCLUDE__STRING_H__ +#define __LWIP__INCLUDE__STRING_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +void *memcpy(void *dst, const void *src, size_t len); +void *memset(void *b, int c, size_t len); + +size_t strlen(const char *s); + +int memcmp(const void *b1, const void *b2, size_t len); +int strcmp(const char *s1, const char *s2); +int strncmp(const char *s1, const char *s2, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* __LWIP__INCLUDE__STRING_H__ */ \ No newline at end of file diff --git a/repos/mml/src/lib/mxip/memmove.patch b/repos/mml/src/lib/mxip/memmove.patch new file mode 100644 index 0000000000..681805f15d --- /dev/null +++ b/repos/mml/src/lib/mxip/memmove.patch @@ -0,0 +1,12 @@ +diff --git a/src/core/def.c b/src/core/def.c +index 9da36fee..58edce6f 100644 +--- a/src/core/def.c ++++ b/src/core/def.c +@@ -235,6 +235,6 @@ lwip_itoa(char *result, size_t bufsize, int number) + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ +- memmove(res, tmp, (size_t)((result + bufsize) - tmp)); ++ MEMMOVE(res, tmp, (size_t)((result + bufsize) - tmp)); + } + #endif diff --git a/repos/mml/src/lib/mxip/platform/mxnic_netif.cc b/repos/mml/src/lib/mxip/platform/mxnic_netif.cc new file mode 100644 index 0000000000..b70363e46d --- /dev/null +++ b/repos/mml/src/lib/mxip/platform/mxnic_netif.cc @@ -0,0 +1,80 @@ +#include "mxnic_netif.h" +#include + +void Lwip::Nic_netif::handle_rx_packets() +{ + + Nic::Session::Rx::Sink *rx = _nic.rx(); + + bool progress = false; + + while (rx->packet_avail() && rx->ready_to_ack()) { + + try + { + Nic::Packet_descriptor packet = rx->try_get_packet(); + progress = true; + + Nic_netif_pbuf *nic_pbuf = new (this->_pbuf_alloc) Nic_netif_pbuf(*this, packet); + + if (!nic_pbuf) { + Genode::warning("Could not allocate pbuf "); + return; + } + pbuf* p = pbuf_alloced_custom( + PBUF_RAW, + packet.size(), + PBUF_REF, + &nic_pbuf->p, + rx->packet_content(packet), + packet.size()); + LINK_STATS_INC(link.recv); + + if (!p) { + Genode::warning("Initialization of pbuf failed."); + return; + } + + Lwip::Receive_task *task = new (_handler_allocator->allocate(0, 64, sizeof(Lwip::Receive_task))) Lwip::Receive_task(p, _netif, *this, nic_pbuf); + if (task == nullptr) + { + Genode::warning("Could not allocate task object."); + return; + } + task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*task); + + if (progress) + wake_up_nic_server(); + } + catch (Genode::Exception) + { + Genode::warning("Got signal without actual packet in queue"); + } + } +} + +void Lwip::Nic_netif::handle_tx_ready() +{ + Lwip::Tx_ready_task *task = new (_handler_allocator->allocate(0, 64, sizeof(Lwip::Tx_ready_task))) Lwip::Tx_ready_task(_nic, *this); + if (task == nullptr) + { + Genode::warning("Could not allocate tx_ready task object."); + return; + } + task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*task); + + +} + +void Lwip::Nic_netif::handle_link_state() +{ + Lwip::Link_state_task *task = new (_handler_allocator->allocate(0, 64, sizeof(Lwip::Link_state_task))) Lwip::Link_state_task(_nic, _netif, *this, _dhcp); // mx::tasking::runtime::new_task(0, _nic, _netif, _dhcp); + if (task == nullptr) { + Genode::warning("Could not allocate link state task object."); + return; + } + task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*task); +} \ No newline at end of file diff --git a/repos/mml/src/lib/mxip/platform/printf.cc b/repos/mml/src/lib/mxip/platform/printf.cc new file mode 100644 index 0000000000..38d93d694b --- /dev/null +++ b/repos/mml/src/lib/mxip/platform/printf.cc @@ -0,0 +1,36 @@ +/* + * \brief Print function for debugging functionality of lwIP. + * \author Stefan Kalkowski + * \date 2009-10-26 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include + +extern "C" { + +/* LwIP includes */ +#include + + /* Simply map to Genode's printf implementation */ + void lwip_printf(const char *format, ...) + { + va_list list; + va_start(list, format); + + char buf[128] { }; + Genode::String_console(buf, sizeof(buf)).vprintf(format, list); + Genode::log(Genode::Cstring(buf)); + + va_end(list); + } + +} diff --git a/repos/mml/src/lib/mxip/platform/rand.cc b/repos/mml/src/lib/mxip/platform/rand.cc new file mode 100644 index 0000000000..0472c619cf --- /dev/null +++ b/repos/mml/src/lib/mxip/platform/rand.cc @@ -0,0 +1,30 @@ +/* + * \brief Simple random number generator for lwIP + * \author Emery Hemingway + * \date 2016-07-30 + */ + +// *Really* minimal PCG32 code / (c) 2014 M.E. O'Neill / pcg-random.org +// Licensed under Apache License 2.0 (NO WARRANTY, etc. see website) + +/* Genode includes */ +#include +#include + +extern "C" +genode_uint32_t genode_rand() +{ + using namespace Genode; + + static uint64_t const inc = Trace::timestamp()|1; + static uint64_t state = Trace::timestamp(); + uint64_t oldstate = state; + + // Advance internal state + state = oldstate * 6364136223846793005ULL + inc; + + // Calculate output function (XSH RR), uses old state for max ILP + uint32_t xorshifted = (uint32_t)(((oldstate >> 18u) ^ oldstate) >> 27u); + uint32_t rot = (uint32_t)(oldstate >> 59u); + return (xorshifted >> rot) | (xorshifted << ((-rot) & 31)); +} diff --git a/repos/mml/src/lib/mxip/platform/sys_arch.cc b/repos/mml/src/lib/mxip/platform/sys_arch.cc new file mode 100644 index 0000000000..26125d990f --- /dev/null +++ b/repos/mml/src/lib/mxip/platform/sys_arch.cc @@ -0,0 +1,145 @@ +/* + * \brief lwIP platform support + * \author Stefan Kalkowski + * \author Emery Hemingway + * \date 2016-12-01 + */ + +/* + * Copyright (C) 2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include + +#include + +/* MxTasking includes */ +#include +#include +#include + +extern "C" { +/* LwIP includes */ +#include +#include +#include + +/* our abridged copy of string.h */ +#include +} + +namespace Mxip { + + static mx::memory::dynamic::Allocator *_heap; + class Timeout_task : public mx::tasking::TaskInterface + { + public: + Timeout_task() {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + //GENODE_LOG_TSC_NAMED(1, "sys_check_timeouts"); + sys_check_timeouts(); + _heap->free(static_cast(this)); + return mx::tasking::TaskResult::make_null(); + } + }; + + struct Mx_timer + { + void check_timeouts(Genode::Duration) + { + Timeout_task *task = new (_heap->allocate(0, 64, sizeof(Timeout_task))) Timeout_task(); // mx::tasking::runtime::new_task(0); + if (task == nullptr) { + Genode::error("Failed to allocate timeout task"); + return; + } + task->annotate(static_cast(0)); + mx::tasking::runtime::spawn(*task); + } + + ::Timer::Connection &timer; + + Timer::Periodic_timeout timeout{ + timer, *this, &Mx_timer::check_timeouts, Genode::Microseconds{250 * 1000}}; + + Mx_timer(::Timer::Connection &timer) : timer(timer) {} + }; + + static Mx_timer *sys_timer_ptr; + + void mxip_init(mx::memory::dynamic::Allocator &heap, ::Timer::Connection &timer) + { + _heap = &heap; + + static Mx_timer sys_timer(timer); + sys_timer_ptr = &sys_timer; + + lwip_init(); + } + +} + + +extern "C" { + + void lwip_platform_assert(char const* msg, char const *file, int line) + { + Genode::error("Assertion \"", msg, "\" ", file, ":", line); + Genode::sleep_forever(); + } + + void genode_free(void *ptr) + { + Mxip::_heap->free(ptr); + } + + void *genode_malloc(unsigned long size) + { + return Mxip::_heap->allocate(0, 64, size); + } + + void *genode_calloc(unsigned long number, unsigned long size) + { + size *= number; + + void * const ptr = genode_malloc(size); + if (ptr) + Genode::memset(ptr, 0x00, size); + + return ptr; + } + + u32_t sys_now(void) { + /* TODO: Use actual CPU frequency */ + //return (u32_t)Mxip::sys_timer_ptr->timer.curr_time().trunc_to_plain_ms().value; + return __builtin_ia32_rdtsc() / 2000000; + } + + void genode_memcpy(void *dst, const void *src, size_t len) + { + std::memcpy(dst, src, len); } + + void *genode_memmove(void *dst, const void *src, size_t len) { + return std::memmove(dst, src, len); } + + int memcmp(const void *b1, const void *b2, ::size_t len) { + return std::memcmp(b1, b2, len); } + + int strcmp(const char *s1, const char *s2) + { + size_t len = std::min(Genode::strlen(s1), Genode::strlen(s2)); + return std::strncmp(s1, s2, len); + } + + int strncmp(const char *s1, const char *s2, size_t len) { + return std::strncmp(s1, s2, len); } + +} diff --git a/repos/os/include/os/packet_stream.h b/repos/os/include/os/packet_stream.h index 20bbcdec20..5c9a0cccff 100644 --- a/repos/os/include/os/packet_stream.h +++ b/repos/os/include/os/packet_stream.h @@ -84,7 +84,10 @@ namespace Genode { class Packet_descriptor; - template class Packet_descriptor_queue; + template + class Mutex_free_packet_descriptor_queue; + template + class Packet_descriptor_queue; template class Packet_descriptor_transmitter; template class Packet_descriptor_receiver; @@ -148,6 +151,129 @@ class Genode::Packet_descriptor Genode::size_t size() const { return _size; } }; +template +class Genode::Mutex_free_packet_descriptor_queue +{ + private: + + struct cell { + Genode::uint64_t idx; + PACKET_DESCRIPTOR content; + }; + struct { + struct cell _queue[QUEUE_SIZE]; + }; + Genode::uint32_t _length{0}; + + alignas(64) Genode::uint64_t _head{0U}; + alignas(64) Genode::uint64_t _tail{0U}; + + public: + typedef PACKET_DESCRIPTOR Packet_descriptor; + + enum Role { PRODUCER, CONSUMER }; + + Mutex_free_packet_descriptor_queue(Role role) + { + Genode::log("Creating packet queue"); + if (role == PRODUCER) + { + Genode::memset(_queue, 0, sizeof(_queue)); + for (auto i = 0U; i < QUEUE_SIZE; ++i) + { + _queue[i].idx = i; + } + } + } + + bool add(Packet_descriptor packet) + { + auto pos = __atomic_load_n(&_head, __ATOMIC_RELAXED); + Genode::uint64_t slot = 0; + for (;;) { + Genode::log("Searching for free slot"); + slot = pos % QUEUE_SIZE; + const auto idx = __atomic_load_n(&_queue[slot].idx, __ATOMIC_ACQUIRE); + const auto diff = Genode::int64_t(idx) - Genode::int64_t(pos); + if (diff == 0) { + if (__atomic_compare_exchange_n(&_head, &pos, pos+1, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) + break; + } else if (diff < 0) + return false; + else + pos = __atomic_load_n(&_head, __ATOMIC_RELAXED); + } + _queue[slot].content = packet; + __atomic_store_n(&_queue[slot].idx, pos + 1, __ATOMIC_RELEASE); + return true; + } + + bool try_get(Packet_descriptor &packet) + { + auto pos = __atomic_load_n(&_tail, __ATOMIC_RELAXED); + Genode::uint64_t slot = 0; + for (;;) { + slot = pos % QUEUE_SIZE; + const auto idx = __atomic_load_n(&_queue[slot].idx, __ATOMIC_ACQUIRE); + const auto diff = Genode::int64_t(idx) - Genode::int64_t(pos + 1); + if (diff == 0) { + if (__atomic_compare_exchange_n(&_tail, &pos, pos+1, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) + break; + } else if (diff < 0) + return false; + else + pos = __atomic_load_n(&_tail, __ATOMIC_RELAXED); + } + + packet = _queue[slot].content; + __atomic_store_n(&_queue[slot].idx, pos + QUEUE_SIZE, __ATOMIC_RELEASE); + return true; + } + + Packet_descriptor get() + { + Packet_descriptor pkt; + while (!try_get(pkt)) + Genode::log("trying to get packet"); + return pkt; + } + + Packet_descriptor peek() + { + return _queue[__atomic_load_n(&_tail, __ATOMIC_ACQUIRE) % QUEUE_SIZE].content; + } + + bool empty() { + return __atomic_load_n(&_head, __ATOMIC_RELAXED) == __atomic_load_n(&_tail, __ATOMIC_RELAXED); + } + + bool full() { return (_head + 1)%QUEUE_SIZE == _tail; } + + bool single_element() { + return (_tail + 1) % QUEUE_SIZE == _head; + } + + /* + bool single_slot_free() { + auto pos = __atomic_load_n(&_head, __ATOMIC_RELAXED); + const auto idx = __atomic_load_n(&_queue[pos % QUEUE_SIZE].idx, __ATOMIC_ACQUIRE); + const auto diff = Genode::int64_t(idx) - Genode::int64_t(pos); + Genode::log("Got diff free slots: ", diff); + if (diff == 0) + return true; + else + return false; + }*/ + + bool single_slot_free() { return (_head + 2)%QUEUE_SIZE == _tail; } + + unsigned slots_free() { + Genode::log("Checking free slots"); + return static_cast(((_tail > _head) ? _tail - _head + : QUEUE_SIZE - _head + _tail) - + 1); + } +}; /** * Ring buffer shared between source and sink, containing packet descriptors @@ -327,7 +453,11 @@ class Genode::Packet_descriptor_transmitter bool try_tx(typename TX_QUEUE::Packet_descriptor packet) { - Genode::Mutex::Guard mutex_guard(_tx_queue_mutex); + { + //GENODE_LOG_TSC_NAMED(1, "try_tx: _tx_queue_mutex lock"); + _tx_queue_mutex.acquire(); + } + //Genode::Mutex::Guard mutex_guard(_tx_queue_mutex); if (_tx_queue->full()) return false; @@ -337,12 +467,20 @@ class Genode::Packet_descriptor_transmitter if (_tx_queue->single_element()) _tx_wakeup_needed = true; + { + //GENODE_LOG_TSC_NAMED(1, "try_tx: …tx_queue_mutex release"); + _tx_queue_mutex.release(); + } return true; } bool tx_wakeup() { - Genode::Mutex::Guard mutex_guard(_tx_queue_mutex); + { + //GENODE_LOG_TSC_NAMED(1, "tx_wakeup: acquire"); + _tx_queue_mutex.acquire(); + } + //Genode::Mutex::Guard mutex_guard(_tx_queue_mutex); bool signal_submitted = false; @@ -352,6 +490,10 @@ class Genode::Packet_descriptor_transmitter } _tx_wakeup_needed = false; + { + //GENODE_LOG_TSC_NAMED(1, "tx_wakeup: release"); + _tx_queue_mutex.release(); + } return signal_submitted; } diff --git a/repos/os/src/lib/sandbox/alias.h b/repos/os/include/sandbox/alias.h similarity index 97% rename from repos/os/src/lib/sandbox/alias.h rename to repos/os/include/sandbox/alias.h index 5446dc2e73..335f764a19 100644 --- a/repos/os/src/lib/sandbox/alias.h +++ b/repos/os/include/sandbox/alias.h @@ -15,7 +15,7 @@ #define _LIB__SANDBOX__ALIAS_H_ /* local includes */ -#include +#include namespace Sandbox { struct Alias; } diff --git a/repos/os/src/lib/sandbox/child.h b/repos/os/include/sandbox/child.h similarity index 88% rename from repos/os/src/lib/sandbox/child.h rename to repos/os/include/sandbox/child.h index 9755453a6f..8a7d7303be 100644 --- a/repos/os/src/lib/sandbox/child.h +++ b/repos/os/include/sandbox/child.h @@ -23,21 +23,59 @@ #include /* local includes */ -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include namespace Sandbox { class Child; } class Sandbox::Child : Child_policy, Routed_service::Wakeup { public: + /** + * Resources assigned to the child + */ + struct Resources + { + long prio_levels_log2; + long priority; + Affinity affinity; + Ram_quota assigned_ram_quota; + Cap_quota assigned_cap_quota; + Cpu_quota assigned_cpu_quota; - using Version = String<80>; + Ram_quota effective_ram_quota() const + { + return Genode::Child::effective_quota(assigned_ram_quota); + } + + Cap_quota effective_cap_quota() const + { + /* capabilities consumed by 'Genode::Child' */ + Cap_quota const effective = + Genode::Child::effective_quota(assigned_cap_quota); + + /* capabilities additionally consumed by init */ + enum { + STATIC_COSTS = 1 /* possible heap backing-store + allocation for session object */ + + 1 /* buffered XML start node */ + + 2 /* dynamic ROM for config */ + + 2 /* dynamic ROM for session requester */ + }; + + if (effective.value < STATIC_COSTS) + return Cap_quota{0}; + + return Cap_quota{effective.value - STATIC_COSTS}; + } + }; + + typedef String<80> Version; /** * Exception types @@ -62,42 +100,18 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup virtual QUOTA resource_limit(QUOTA const &) const = 0; }; - using Ram_limit_accessor = Resource_limit_accessor; - using Cap_limit_accessor = Resource_limit_accessor; - using Cpu_limit_accessor = Resource_limit_accessor; + typedef Resource_limit_accessor Ram_limit_accessor; + typedef Resource_limit_accessor Cap_limit_accessor; + typedef Resource_limit_accessor Cpu_limit_accessor; struct Cpu_quota_transfer : Interface { - virtual void transfer_cpu_quota(Capability, Pd_session &, - Capability, Cpu_quota) = 0; + virtual void transfer_cpu_quota(Cpu_session_capability, Cpu_quota) = 0; }; enum class Sample_state_result { CHANGED, UNCHANGED }; - /* - * Helper for passing lambda functions as 'Pd_intrinsics::Fn' - */ - - using Pd_intrinsics = Genode::Sandbox::Pd_intrinsics; - - template - static void with_pd_intrinsics(Pd_intrinsics &pd_intrinsics, - Capability cap, PD_SESSION &pd, - FN const &fn) - { - struct Impl : Pd_intrinsics::Fn - { - using Intrinsics = Pd_intrinsics::Intrinsics; - - FN const &_fn; - Impl(FN const &fn) : _fn(fn) { } - void call(Intrinsics &intrinsics) const override { _fn(intrinsics); } - }; - - pd_intrinsics.with_intrinsics(cap, pd, Impl { fn }); - } - - private: + protected: friend class Child_registry; @@ -133,7 +147,8 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup * The child is no longer referenced by config model and can * safely be destructed. */ - ABANDONED + ABANDONED, + }; State _state = State::INITIAL; @@ -193,7 +208,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup throw Missing_name_attribute(); } - using Name = String<64>; + typedef String<64> Name; Name const _unique_name { _name_from_xml(_start_node->xml()) }; static Binary_name _binary_from_xml(Xml_node start_node, @@ -226,48 +241,11 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup return _heartbeat_enabled && (_state == State::ALIVE); } - /** - * Resources assigned to the child - */ - struct Resources - { - long prio_levels_log2; - long priority; - Affinity affinity; - Ram_quota assigned_ram_quota; - Cap_quota assigned_cap_quota; - Cpu_quota assigned_cpu_quota; - - Ram_quota effective_ram_quota() const - { - return Genode::Child::effective_quota(assigned_ram_quota); - } - - Cap_quota effective_cap_quota() const - { - /* capabilities consumed by 'Genode::Child' */ - Cap_quota const effective = - Genode::Child::effective_quota(assigned_cap_quota); - - /* capabilities additionally consumed by init */ - enum { - STATIC_COSTS = 1 /* possible heap backing-store - allocation for session object */ - + 1 /* buffered XML start node */ - + 2 /* dynamic ROM for config */ - + 2 /* dynamic ROM for session requester */ - }; - - if (effective.value < STATIC_COSTS) - return Cap_quota{0}; - - return Cap_quota{effective.value - STATIC_COSTS}; - } - }; static Resources _resources_from_start_node(Xml_node start_node, Prio_levels prio_levels, Affinity::Space const &affinity_space, + Affinity::Location const &location, Cap_quota default_cap_quota) { unsigned cpu_percent = 0; @@ -277,7 +255,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup start_node.for_each_sub_node("resource", [&] (Xml_node rsc) { - using Name = String<8>; + typedef String<8> Name; Name const name = rsc.attribute_value("name", Name()); if (name == "RAM") @@ -293,7 +271,8 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup return Resources { log2(prio_levels.value), priority_from_xml(start_node, prio_levels), Affinity(affinity_space, - affinity_location_from_xml(affinity_space, start_node)), + (location.xpos() == -1 ? affinity_location_from_xml(affinity_space, start_node) : location)), + //affinity_location_from_xml(affinity_space, start_node)), Ram_quota { ram_bytes }, Cap_quota { caps }, Cpu_quota { cpu_percent } }; @@ -304,16 +283,6 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup Ram_quota _configured_ram_quota() const; Cap_quota _configured_cap_quota() const; - Pd_intrinsics &_pd_intrinsics; - - template - void _with_pd_intrinsics(FN const &fn) - { - with_pd_intrinsics(_pd_intrinsics, _child.pd_session_cap(), _child.pd(), fn); - } - - Capability _ref_pd_cap { }; /* defined by 'init' */ - using Local_service = Genode::Sandbox::Local_service_base; Registry &_parent_services; @@ -322,12 +291,12 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup struct Inline_config_rom_service : Abandonable, Dynamic_rom_session::Content_producer { - using Service = Genode::Local_service; + typedef Genode::Local_service Service; Child &_child; Dynamic_rom_session _session { _child._env.ep().rpc_ep(), - _child._env.ram(), _child._env.rm(), + _child.ref_pd(), _child._env.rm(), *this }; Service::Single_session_factory _factory { _session }; @@ -589,10 +558,10 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup Cpu_quota_transfer &cpu_quota_transfer, Prio_levels prio_levels, Affinity::Space const &affinity_space, + Affinity::Location const &location, Registry &parent_services, Registry &child_services, - Registry &local_services, - Pd_intrinsics &pd_intrinsics); + Registry &local_services); virtual ~Child(); @@ -607,7 +576,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup Cap_quota cap_quota() const { return _resources.assigned_cap_quota; } Cpu_quota cpu_quota() const { return _effective_cpu_quota; } - void try_start() + virtual void try_start() { if (_state == State::INITIAL) { _child.initiate_env_pd_session(); @@ -709,23 +678,15 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup Sample_state_result sample_state(); + /**************************** ** Child-policy interface ** ****************************/ Child_policy::Name name() const override { return _unique_name; } - Pd_session &ref_pd() override - { - Pd_session *_ref_pd_ptr = nullptr; - _with_pd_intrinsics([&] (Pd_intrinsics::Intrinsics &intrinsics) { - _ref_pd_ptr = &intrinsics.ref_pd; }); - return *_ref_pd_ptr; - } - - Pd_session_capability ref_pd_cap() const override { return _ref_pd_cap; } - - Ram_allocator &session_md_ram() override { return _env.ram(); } + Pd_session &ref_pd() override { return _env.pd(); } + Pd_session_capability ref_pd_cap() const override { return _env.pd_session_cap(); } void init(Pd_session &, Pd_session_capability) override; void init(Cpu_session &, Cpu_session_capability) override; @@ -759,7 +720,7 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup _child.close_all_sessions(); - _report_update_trigger.trigger_immediate_report_update(); + _report_update_trigger.trigger_report_update(); /* * Print a message as the exit is not handled otherwise. There are @@ -776,17 +737,6 @@ class Sandbox::Child : Child_policy, Routed_service::Wakeup bool initiate_env_sessions() const override { return false; } - void _with_address_space(Pd_session &, With_address_space_fn const &fn) override - { - _with_pd_intrinsics([&] (Pd_intrinsics::Intrinsics &intrinsics) { - fn.call(intrinsics.address_space); }); - } - - void start_initial_thread(Capability cap, addr_t ip) override - { - _pd_intrinsics.start_initial_thread(cap, ip); - } - void yield_response() override { apply_downgrade(); diff --git a/repos/os/src/lib/sandbox/child_registry.h b/repos/os/include/sandbox/child_registry.h similarity index 95% rename from repos/os/src/lib/sandbox/child_registry.h rename to repos/os/include/sandbox/child_registry.h index b13e7527fa..7fdb834357 100644 --- a/repos/os/src/lib/sandbox/child_registry.h +++ b/repos/os/include/sandbox/child_registry.h @@ -15,10 +15,10 @@ #define _LIB__SANDBOX__CHILD_REGISTRY_H_ /* local includes */ -#include -#include -#include -#include +#include +#include +#include +#include namespace Sandbox { struct Child_registry; } diff --git a/repos/os/src/lib/sandbox/config_model.h b/repos/os/include/sandbox/config_model.h similarity index 99% rename from repos/os/src/lib/sandbox/config_model.h rename to repos/os/include/sandbox/config_model.h index 0d32a9018c..756dbb2c71 100644 --- a/repos/os/src/lib/sandbox/config_model.h +++ b/repos/os/include/sandbox/config_model.h @@ -18,7 +18,7 @@ #include /* local includes */ -#include +#include namespace Sandbox { diff --git a/repos/os/src/lib/sandbox/heartbeat.h b/repos/os/include/sandbox/heartbeat.h similarity index 96% rename from repos/os/src/lib/sandbox/heartbeat.h rename to repos/os/include/sandbox/heartbeat.h index d62679a83f..417b29b62a 100644 --- a/repos/os/src/lib/sandbox/heartbeat.h +++ b/repos/os/include/sandbox/heartbeat.h @@ -15,8 +15,8 @@ #define _LIB__SANDBOX__HEARTBEAT_H_ /* local includes */ -#include -#include +#include +#include #include namespace Sandbox { class Heartbeat; } diff --git a/repos/os/include/sandbox/library.h b/repos/os/include/sandbox/library.h new file mode 100644 index 0000000000..3efdf7483c --- /dev/null +++ b/repos/os/include/sandbox/library.h @@ -0,0 +1,264 @@ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#pragma once +namespace Sandbox { + class Library; +} + +namespace Genode { + class Sandbox; +} + +struct Sandbox::Library : ::Sandbox::State_reporter::Producer, + ::Sandbox::Child::Default_route_accessor, + ::Sandbox::Child::Default_caps_accessor, + ::Sandbox::Child::Ram_limit_accessor, + ::Sandbox::Child::Cap_limit_accessor, + ::Sandbox::Child::Cpu_limit_accessor, + ::Sandbox::Child::Cpu_quota_transfer, + ::Sandbox::Start_model::Factory, + ::Sandbox::Parent_provides_model::Factory +{ + using Routed_service = ::Sandbox::Routed_service; + using Parent_service = ::Sandbox::Parent_service; + using Local_service = ::Genode::Sandbox::Local_service_base; + using Report_detail = ::Sandbox::Report_detail; + using Child_registry = ::Sandbox::Child_registry; + using Verbose = ::Sandbox::Verbose; + using State_reporter = ::Sandbox::State_reporter; + using Heartbeat = ::Sandbox::Heartbeat; + using Server = ::Sandbox::Server; + using Alias = ::Sandbox::Alias; + using Child = ::Sandbox::Child; + using Prio_levels = ::Sandbox::Prio_levels; + using Ram_info = ::Sandbox::Ram_info; + using Cap_info = ::Sandbox::Cap_info; + using Cpu_quota = ::Sandbox::Cpu_quota; + using Config_model = ::Sandbox::Config_model; + using Start_model = ::Sandbox::Start_model; + using Preservation = ::Sandbox::Preservation; + +public: + Env &_env; + Heap &_heap; + + Registry _parent_services { }; + Registry _child_services { }; + Registry &_local_services; + Child_registry _children { }; + + /* + * Global parameters obtained from config + */ + Reconstructible _verbose { }; + Config_model::Version _version { }; + Constructible _default_route { }; + Cap_quota _default_caps { 0 }; + Prio_levels _prio_levels { }; + Constructible _affinity_space { }; + Preservation _preservation { }; + + Affinity::Space _effective_affinity_space() const + { + return _affinity_space.constructed() ? *_affinity_space + : Affinity::Space { 1, 1 }; + } + + State_reporter _state_reporter; + + Heartbeat _heartbeat { _env, _children, _state_reporter }; + + /* + * Internal representation of the XML configuration + */ + Config_model _config_model { }; + + /* + * Variables for tracking the side effects of updating the config model + */ + bool _server_appeared_or_disappeared = false; + bool _state_report_outdated = false; + + unsigned _child_cnt = 0; + + Cpu_quota _avail_cpu { .percent = 100 }; + Cpu_quota _transferred_cpu { .percent = 0 }; + + Ram_quota _avail_ram() const + { + Ram_quota avail_ram = _env.pd().avail_ram(); + + if (_preservation.ram.value > avail_ram.value) { + error("RAM preservation exceeds available memory"); + return Ram_quota { 0 }; + } + + /* deduce preserved quota from available quota */ + return Ram_quota { avail_ram.value - _preservation.ram.value }; + } + + Cap_quota _avail_caps() const + { + Cap_quota avail_caps { _env.pd().avail_caps().value }; + + if (_preservation.caps.value > avail_caps.value) { + error("Capability preservation exceeds available capabilities"); + return Cap_quota { 0 }; + } + + /* deduce preserved quota from available quota */ + return Cap_quota { avail_caps.value - _preservation.caps.value }; + } + + /** + * Child::Ram_limit_accessor interface + */ + Ram_quota resource_limit(Ram_quota const &) const override + { + return _avail_ram(); + } + + /** + * Child::Cap_limit_accessor interface + */ + Cap_quota resource_limit(Cap_quota const &) const override { return _avail_caps(); } + + /** + * Child::Cpu_limit_accessor interface + */ + Cpu_quota resource_limit(Cpu_quota const &) const override { return _avail_cpu; } + + /** + * Child::Cpu_quota_transfer interface + */ + void transfer_cpu_quota(Cpu_session_capability cap, Cpu_quota quota) override + { + Cpu_quota const remaining { 100 - min(100u, _transferred_cpu.percent) }; + + /* prevent division by zero in 'quota_lim_upscale' */ + if (remaining.percent == 0) + return; + + size_t const fraction = + Cpu_session::quota_lim_upscale(quota.percent, remaining.percent); + + _env.cpu().transfer_quota(cap, fraction); + + _transferred_cpu.percent += quota.percent; + } + + /** + * State_reporter::Producer interface + */ + void produce_state_report(Xml_generator &xml, Report_detail const &detail) const override + { + if (detail.init_ram()) + xml.node("ram", [&] () { Ram_info::from_pd(_env.pd()).generate(xml); }); + + if (detail.init_caps()) + xml.node("caps", [&] () { Cap_info::from_pd(_env.pd()).generate(xml); }); + + if (detail.children()) + _children.report_state(xml, detail); + } + + /** + * State_reporter::Producer interface + */ + Child::Sample_state_result sample_children_state() override + { + return _children.sample_state(); + } + + /** + * Default_route_accessor interface + */ + Xml_node default_route() override + { + return _default_route.constructed() ? _default_route->xml() + : Xml_node(""); + } + + /** + * Default_caps_accessor interface + */ + Cap_quota default_caps() override { return _default_caps; } + + void _update_aliases_from_config(Xml_node const &); + void _update_parent_services_from_config(Xml_node const &); + void _update_children_config(Xml_node const &); + void _destroy_abandoned_parent_services(); + virtual void _destroy_abandoned_children(); + + Server _server { _env, _heap, _child_services, _state_reporter }; + + /** + * Sandbox::Start_model::Factory + */ + virtual Child &create_child(Xml_node const &) override; + + /** + * Sandbox::Start_model::Factory + */ + virtual void update_child(Child &, Xml_node const &) override; + + /** + * Sandbox::Start_model::Factory + */ + Alias &create_alias(Child_policy::Name const &name) override + { + Alias &alias = *new (_heap) Alias(name); + _children.insert_alias(&alias); + return alias; + } + + /** + * Sandbox::Start_model::Factory + */ + void destroy_alias(Alias &alias) override + { + _children.remove_alias(&alias); + destroy(_heap, &alias); + } + + /** + * Sandbox::Start_model::Factory + */ + bool ready_to_create_child(Start_model::Name const &, + Start_model::Version const &) const override; + + /** + * Sandbox::Parent_provides_model::Factory + */ + Parent_service &create_parent_service(Service::Name const &name) override + { + return *new (_heap) Parent_service(_parent_services, _env, name); + } + + Library(Env &env, Heap &heap, Registry &local_services, + Genode::Sandbox::State_handler &state_handler) + : + _env(env), _heap(heap), _local_services(local_services), + _state_reporter(_env, *this, state_handler) + { } + + virtual void apply_config(Xml_node const &); + + virtual void generate_state_report(Xml_generator &xml) const + { + _state_reporter.generate(xml); + } +}; \ No newline at end of file diff --git a/repos/os/src/lib/sandbox/name_registry.h b/repos/os/include/sandbox/name_registry.h similarity index 96% rename from repos/os/src/lib/sandbox/name_registry.h rename to repos/os/include/sandbox/name_registry.h index 26eafb4b13..b172fff882 100644 --- a/repos/os/src/lib/sandbox/name_registry.h +++ b/repos/os/include/sandbox/name_registry.h @@ -18,7 +18,7 @@ #include /* local includes */ -#include +#include namespace Sandbox { struct Name_registry; } diff --git a/repos/os/src/lib/sandbox/report.h b/repos/os/include/sandbox/report.h similarity index 98% rename from repos/os/src/lib/sandbox/report.h rename to repos/os/include/sandbox/report.h index 7531f2a7ee..6b04a6eaee 100644 --- a/repos/os/src/lib/sandbox/report.h +++ b/repos/os/include/sandbox/report.h @@ -19,7 +19,7 @@ #include /* local includes */ -#include +#include namespace Sandbox { struct Report_update_trigger; diff --git a/repos/os/src/lib/sandbox/route_model.h b/repos/os/include/sandbox/route_model.h similarity index 99% rename from repos/os/src/lib/sandbox/route_model.h rename to repos/os/include/sandbox/route_model.h index e5f1f578c8..b0b387201b 100644 --- a/repos/os/src/lib/sandbox/route_model.h +++ b/repos/os/include/sandbox/route_model.h @@ -15,7 +15,7 @@ #define _ROUTE_MODEL_H_ /* local includes */ -#include +#include namespace Sandbox { diff --git a/repos/os/include/sandbox/sandbox.h b/repos/os/include/sandbox/sandbox.h index eb25be9814..b47bea0f4f 100644 --- a/repos/os/include/sandbox/sandbox.h +++ b/repos/os/include/sandbox/sandbox.h @@ -21,7 +21,7 @@ #include namespace Genode { class Sandbox; } - +namespace Sandbox { class Library; } class Genode::Sandbox : Noncopyable { @@ -79,9 +79,7 @@ class Genode::Sandbox : Noncopyable Heap _heap; - class Library; - - Library &_library; + ::Sandbox::Library &_library; Registry _local_services { }; @@ -98,7 +96,9 @@ class Genode::Sandbox : Noncopyable */ Sandbox(Env &, State_handler &, Pd_intrinsics &); - void apply_config(Xml_node const &); + virtual ~Sandbox() = default; + + virtual void apply_config(Xml_node const &); /** * Generate state report as configured by the config node diff --git a/repos/os/src/lib/sandbox/server.h b/repos/os/include/sandbox/server.h similarity index 95% rename from repos/os/src/lib/sandbox/server.h rename to repos/os/include/sandbox/server.h index ce231ce28f..c6d4e9ac61 100644 --- a/repos/os/src/lib/sandbox/server.h +++ b/repos/os/include/sandbox/server.h @@ -19,10 +19,10 @@ #include /* local includes */ -#include -#include -#include -#include +#include +#include +#include +#include namespace Sandbox { class Server; } diff --git a/repos/os/src/lib/sandbox/service.h b/repos/os/include/sandbox/service.h similarity index 100% rename from repos/os/src/lib/sandbox/service.h rename to repos/os/include/sandbox/service.h diff --git a/repos/os/src/lib/sandbox/state_reporter.h b/repos/os/include/sandbox/state_reporter.h similarity index 98% rename from repos/os/src/lib/sandbox/state_reporter.h rename to repos/os/include/sandbox/state_reporter.h index e911b632c7..1519178eb2 100644 --- a/repos/os/src/lib/sandbox/state_reporter.h +++ b/repos/os/include/sandbox/state_reporter.h @@ -19,8 +19,8 @@ #include /* local includes */ -#include "report.h" -#include "child.h" +#include +#include namespace Sandbox { class State_reporter; } diff --git a/repos/os/src/lib/sandbox/types.h b/repos/os/include/sandbox/types.h similarity index 100% rename from repos/os/src/lib/sandbox/types.h rename to repos/os/include/sandbox/types.h diff --git a/repos/os/src/lib/sandbox/utils.h b/repos/os/include/sandbox/utils.h similarity index 98% rename from repos/os/src/lib/sandbox/utils.h rename to repos/os/include/sandbox/utils.h index 9962f7e1a1..73ab1fa284 100644 --- a/repos/os/src/lib/sandbox/utils.h +++ b/repos/os/include/sandbox/utils.h @@ -205,6 +205,10 @@ namespace Sandbox { return priority; } + inline bool is_brick_from_xml(Xml_node start_node) + { + return start_node.attribute_value("brick", false); + } inline Affinity::Location affinity_location_from_xml(Affinity::Space const &space, Xml_node start_node) diff --git a/repos/os/src/lib/sandbox/verbose.h b/repos/os/include/sandbox/verbose.h similarity index 96% rename from repos/os/src/lib/sandbox/verbose.h rename to repos/os/include/sandbox/verbose.h index ab3e6050a6..65ca297ccf 100644 --- a/repos/os/src/lib/sandbox/verbose.h +++ b/repos/os/include/sandbox/verbose.h @@ -19,7 +19,7 @@ #include /* local includes */ -#include +#include namespace Sandbox { struct Verbose; } diff --git a/repos/os/include/suoritin/client.h b/repos/os/include/suoritin/client.h new file mode 100644 index 0000000000..8761887266 --- /dev/null +++ b/repos/os/include/suoritin/client.h @@ -0,0 +1,49 @@ +/* + * \brief Suoritin - Task-based CPU Client Interface + * \author Michael Müller + * \date 2023-07-12 + */ + +/* + * Copyright (C) 2010-2020 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ +#pragma once + +#include +#include +#include + +struct Tukija::Suoritin::Client : Genode::Rpc_client +{ + explicit Client(Genode::Capability session) + : Rpc_client(session) { } + + void create_channel(Tukija::Suoritin::Worker const &worker) override + { + call(worker); + } + + void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override + { + call(name, cap); + } + + Genode::Dataspace_capability worker_if() override + { + return call(); + } + + Genode::Dataspace_capability channel_if() override + { + return call(); + } + + Genode::Dataspace_capability event_channel() override + { + return call(); + } +}; \ No newline at end of file diff --git a/repos/os/include/suoritin/component.h b/repos/os/include/suoritin/component.h new file mode 100644 index 0000000000..2712c1bab2 --- /dev/null +++ b/repos/os/include/suoritin/component.h @@ -0,0 +1,227 @@ +/* + * \brief Suoritin - Task-based CPU session component and root component + * \author Michael Müller + * \date 2023-10-10 + */ + +/* + * Copyright (C) 2010-2020 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ +#pragma once +/* Genode includes */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace Tukija { + namespace Suoritin { + class Session_component; + template class Allocator; + class Root_component; + } +} + +template +class Tukija::Suoritin::Allocator : public Genode::Allocator +{ + using size_t = Genode::size_t; + +private: + size_t _interface_size; + T* _pos; + T* _interface; + + Allocator(const Tukija::Suoritin::Allocator &) {} + Allocator &operator=(const Allocator&) {} + + public : + + Allocator(T *interface, size_t interface_size) : _interface_size(interface_size), _pos(interface), _interface(interface) + { + } + + Alloc_result try_alloc(size_t) override + { + T *pos = _pos; + Genode::log("Trying to allocate one interface at ", pos); + if (pos >= static_cast(_interface) + _interface_size) + return Alloc_result(Genode::Ram_allocator::Alloc_error::OUT_OF_RAM); + + _pos++; + return Alloc_result(static_cast(pos)); + } + + void free(void *, size_t) override + { } + + size_t overhead(size_t) const override { return 0; } + + bool need_size_for_free() const override { return false; } + + T *interface() { return _interface; } +}; + +class Tukija::Suoritin::Session_component : public Genode::Session_object +{ + private: + Genode::Affinity _affinity; + Genode::Env &_env; + + Genode::Attached_ram_dataspace _workers_if; + Genode::Attached_ram_dataspace _channels_if; + Genode::Attached_ram_dataspace _event_channel; + + Allocator _worker_allocator; + Allocator _channel_allocator; + + + unsigned long no_channels{0}; + unsigned long no_workers{0}; + + template + void construct(FUNC const &fn, Allocator &alloc, Args ...args) { + T* object = nullptr; + + try { + try { + object = new (alloc) T(args...); + fn(object); + } catch (Genode::Allocator::Out_of_memory) { + Genode::error("Out of RAM on registering worker."); + throw; + } + } catch (...) { + if (object) + destroy(alloc, object); + Genode::error("Exception caught registering worker"); + throw; + } + } + + + public: + Session_component( Genode::Rpc_entrypoint &session_ep, + Resources const &resources, + Label const &label, + Diag const &diag, + Genode::Env &env, + Genode::Affinity &affinity) + : + Genode::Session_object(session_ep, resources, label, diag), + _affinity(affinity.space().total() ? affinity : Genode::Affinity(Genode::Affinity::Space(1,1), Genode::Affinity::Location(0,0,1,1))), + _env(env), + _workers_if(env.ram(), env.rm(), sizeof(Worker)*affinity.space().total()), + _channels_if(env.ram(), env.rm(), sizeof(Channel)*affinity.space().total()), + _event_channel(env.ram(), env.rm(), sizeof(Event_channel)), + _worker_allocator(_workers_if.local_addr(), _affinity.space().total()*sizeof(Worker)), + _channel_allocator(_channels_if.local_addr(), _affinity.space().total()*sizeof(Channel)) + { + } + + void create_channel(Worker const &worker) override + { + try { + construct([&](Channel *) {}, _channel_allocator, worker); + } + catch (...) + { + Genode::error("Faild to create channel"); + } + no_channels++; + } + void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override { + try { + construct([&](Worker *w) + { w->_id = (w - workers_if()); }, + _worker_allocator, cap, name); + } + catch (...) + { + Genode::error("Failed to register worker"); + } + } + + Genode::Dataspace_capability worker_if() override + { + return _workers_if.cap(); + } + + Genode::Dataspace_capability channel_if() override + { + return _channels_if.cap(); + } + + Genode::Dataspace_capability event_channel() override + { + return _event_channel.cap(); + } + + /***** + * Internal interface for use by Hoitaja + *****/ + + inline Worker *workers_if() { + return _workers_if.local_addr(); + } + inline Channel *channels_if() { + return _channels_if.local_addr(); + } + + inline Worker &worker(unsigned long id) { + return workers_if()[id]; + } + + void send_request(Genode::Parent::Resource_args &args) + { + Event_channel *evtchn = _event_channel.local_addr(); + evtchn->parent_args = args; + + evtchn->parent_flag = true; + } + + unsigned long channels() { return no_channels; } +}; + +class Tukija::Suoritin::Root_component +: public Genode::Root_component +{ + private: + Genode::Registry> sessions{}; + Genode::Env &_env; + + public: + Session_component *_create_session(const char *args) override + { + Genode::log("Creating new TASKING session"); + return new(md_alloc()) Genode::Registered(sessions, + *this->ep(), + Genode::session_resources_from_args(args), + Genode::session_label_from_args(args), + Genode::session_diag_from_args(args), + _env, + Genode::Affinity(_env.cpu().affinity_space(), Genode::Affinity::Location(0,0,_env.cpu().affinity_space().width(), _env.cpu().affinity_space().height()))); + } + + /* Interal interface for Hoitaja */ + template + void for_each(FN const &fn) { + sessions.for_each(fn); + } + + Root_component(Genode::Env &env, Genode::Allocator &alloc) + : Genode::Root_component(env.ep(), alloc), _env(env) + { + Genode::log("Sta:ted TASKING service"); + } +}; \ No newline at end of file diff --git a/repos/os/include/suoritin/connection.h b/repos/os/include/suoritin/connection.h new file mode 100644 index 0000000000..1fb6513529 --- /dev/null +++ b/repos/os/include/suoritin/connection.h @@ -0,0 +1,56 @@ +/* + * \brief Suoritin - Task-based CPU Connection + * \author Michael Müller + * \date 2023-07-12 + */ + +/* + * Copyright (C) 2010-2020 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ +#pragma once + +#include +#include + +namespace Tukija { + namespace Suoritin { + struct Connection; + } +} + +struct Tukija::Suoritin::Connection : Genode::Connection, Tukija::Suoritin::Client +{ + enum + { + RAM_QUOTA = 32UL /* in kilobytes */ + }; + + Connection(Genode::Env &env, const char *label="", Genode::Affinity const &affinity = Genode::Affinity()) + : Genode::Connection(env, session(env.parent(), affinity, "ram_quota=%uK, cap_quota=%u, label=\"%s\"", RAM_QUOTA, CAP_QUOTA, label)), Tukija::Suoritin::Client(cap()) { + Genode::log("Connecting to TASKING service ..."); + } + + void create_channel(Tukija::Suoritin::Worker const &worker) override { + Tukija::Suoritin::Client::create_channel(worker); + } + + void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) override { + Tukija::Suoritin::Client::register_worker(name, cap); + } + + Genode::Dataspace_capability worker_if() override { + return Tukija::Suoritin::Client::worker_if(); + } + + Genode::Dataspace_capability channel_if() override { + return Tukija::Suoritin::Client::channel_if(); + } + + Genode::Dataspace_capability event_channel() override { + return Tukija::Suoritin::Client::event_channel(); + } +}; \ No newline at end of file diff --git a/repos/os/include/suoritin/session.h b/repos/os/include/suoritin/session.h new file mode 100644 index 0000000000..5e3b957fa4 --- /dev/null +++ b/repos/os/include/suoritin/session.h @@ -0,0 +1,111 @@ +/* + * \brief Suoritin - Task-based CPU Service + * \author Michael Müller + * \date 2023-07-12 + */ + +/* + * Copyright (C) 2010-2020 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ +#pragma once + +#include +#include +#include +#include + +namespace Tukija { + namespace Suoritin { + struct Session; + struct Client; + class Capability; + + struct Channel; + struct Worker; + struct Event_channel; + } +} + + +struct Tukija::Suoritin::Worker : Genode::Interface +{ + Genode::Thread_capability _cap; + Genode::Thread::Name _name; + unsigned long _id; + + inline Genode::Thread_capability cap() { return _cap; } + inline Genode::Thread::Name name() { return _name; } + inline unsigned long id() { return _id; } + + Worker(Genode::Thread_capability cap, Genode::Thread::Name &name) + : + _cap(cap), _name(name), _id(0) {} +}; + +struct Tukija::Suoritin::Channel : Genode::Interface +{ + typedef unsigned long Length; + typedef unsigned long Occupancy; + + unsigned long _id{0}; + alignas(64) volatile unsigned long _length{0}; + alignas(64) volatile unsigned long _occupancy{0}; + alignas(64) unsigned long _worker{0}; + + void length(Length increment) { __atomic_add_fetch(&_length, increment, __ATOMIC_RELAXED); } + void occupancy(Occupancy new_occupancy) { __atomic_store_n(&_occupancy, new_occupancy, __ATOMIC_RELEASE); } + inline Length length() { return __atomic_load_n(&_length, __ATOMIC_ACQUIRE); } + inline Occupancy occupancy() { return __atomic_load_n(&_occupancy, __ATOMIC_ACQUIRE); } + + Channel(Worker &worker) : _worker(worker.id()) { + log("My worker is ", worker.name(), " at ", _worker); + } +}; + +struct Tukija::Suoritin::Event_channel : Genode::Interface +{ + enum + { + PARENT_REQUEST, + PARENT_RESPONSE, + CHILD_REQUEST, + CHILD_RESPONSE + }; + alignas(64) volatile bool parent_flag; + alignas(64) volatile bool child_flag; + alignas(64) Genode::Parent::Resource_args parent_args; + alignas(64) Genode::Parent::Resource_args child_args; +}; + +struct Tukija::Suoritin::Session : Genode::Session +{ + static const char *service_name() { return "TASKING"; } + + enum + { + CAP_QUOTA = 6 + }; + + virtual ~Session() { } + + /******************************** + ** Suoritin session interface ** + ********************************/ + virtual void create_channel(Worker const &worker) = 0; + virtual void register_worker(Genode::Thread::Name const &name, Genode::Thread_capability cap) = 0; + virtual Genode::Dataspace_capability worker_if() = 0; + virtual Genode::Dataspace_capability channel_if() = 0; + virtual Genode::Dataspace_capability event_channel() = 0; + + GENODE_RPC(Rpc_create_channel, void, create_channel, Worker const&); + GENODE_RPC(Rpc_register_worker, void, register_worker, Genode::Thread::Name const&, Genode::Thread_capability); + GENODE_RPC(Rpc_suoritin_worker_if, Genode::Dataspace_capability, worker_if); + GENODE_RPC(Rpc_suoritin_channel_if, Genode::Dataspace_capability, channel_if); + GENODE_RPC(Rpc_suoritin_event_if, Genode::Dataspace_capability, event_channel); + + GENODE_RPC_INTERFACE(Rpc_create_channel, Rpc_register_worker, Rpc_suoritin_worker_if, Rpc_suoritin_channel_if, Rpc_suoritin_event_if); +}; \ No newline at end of file diff --git a/repos/os/lib/mk/sandbox.mk b/repos/os/lib/mk/sandbox.mk index 9e30fd1828..0ebc64faed 100644 --- a/repos/os/lib/mk/sandbox.mk +++ b/repos/os/lib/mk/sandbox.mk @@ -1,5 +1,5 @@ SRC_CC = library.cc child.cc server.cc config_model.cc -INC_DIR += $(REP_DIR)/src/lib/sandbox +INC_DIR += $(REP_DIR)/include/sandbox LIBS += base SHARED_LIB = yes diff --git a/repos/os/run/ahci_block.run b/repos/os/run/ahci_block.run index be331b6525..9692baeb20 100644 --- a/repos/os/run/ahci_block.run +++ b/repos/os/run/ahci_block.run @@ -113,7 +113,7 @@ install_config { - + diff --git a/repos/os/run/ping_nic_router.run b/repos/os/run/ping_nic_router.run index cfad0e4b00..e09f0d901a 100644 --- a/repos/os/run/ping_nic_router.run +++ b/repos/os/run/ping_nic_router.run @@ -33,6 +33,7 @@ append config { + @@ -44,8 +45,8 @@ append config { - - + + @@ -56,7 +57,7 @@ append config { - + @@ -94,7 +95,7 @@ append config { - + diff --git a/repos/os/src/app/top/main.cc b/repos/os/src/app/top/main.cc index ab8e46281b..43c0883fa6 100644 --- a/repos/os/src/app/top/main.cc +++ b/repos/os/src/app/top/main.cc @@ -68,7 +68,7 @@ struct Trace_subject_registry return nullptr; } - enum { MAX_CPUS_X = 16, MAX_CPUS_Y = 4, MAX_ELEMENTS_PER_CPU = 6}; + enum { MAX_CPUS_X = 64, MAX_CPUS_Y = 1, MAX_ELEMENTS_PER_CPU = 6}; /* accumulated execution time on all CPUs */ unsigned long long total_first [MAX_CPUS_X][MAX_CPUS_Y]; diff --git a/repos/os/src/hoitaja/cell.h b/repos/os/src/hoitaja/cell.h new file mode 100644 index 0000000000..fddef126b0 --- /dev/null +++ b/repos/os/src/hoitaja/cell.h @@ -0,0 +1,115 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +namespace Hoitaja { + class Cell; +} + +class Hoitaja::Cell : public ::Sandbox::Child +{ + private: + State_handler &_state_handler; + long _priority{0}; + bool _is_brick{false}; + + public: + friend class Habitat; + + Cell(Genode::Env &env, + Genode::Allocator &alloc, + ::Sandbox::Verbose const &verbose, + ::Sandbox::Child::Id id, + ::Sandbox::Report_update_trigger &report_update_trigger, + Genode::Xml_node start_node, + ::Sandbox::Child::Default_route_accessor &default_route_accessor, + ::Sandbox::Child::Default_caps_accessor &default_caps_accessor, + ::Sandbox::Name_registry &name_registry, + ::Sandbox::Child::Ram_limit_accessor &ram_limit_accessor, + ::Sandbox::Child::Cap_limit_accessor &cap_limit_accessor, + ::Sandbox::Child::Cpu_limit_accessor &cpu_limit_accessor, + ::Sandbox::Child::Cpu_quota_transfer &cpu_quota_transfer, + ::Sandbox::Prio_levels prio_levels, + Genode::Affinity::Space const &affinity_space, + Genode::Affinity::Location const &location, + Genode::Registry<::Sandbox::Parent_service> &parent_services, + Genode::Registry<::Sandbox::Routed_service> &child_services, + Genode::Registry<::Sandbox::Child::Local_service> &local_services, + State_handler &state_handler) + : ::Sandbox::Child(env, alloc, verbose, id, report_update_trigger, start_node, default_route_accessor, default_caps_accessor, name_registry, ram_limit_accessor, cap_limit_accessor, cpu_limit_accessor, cpu_quota_transfer, prio_levels, affinity_space, location, parent_services, child_services, local_services), _state_handler(state_handler) + { + _priority = ::Sandbox::priority_from_xml(start_node, prio_levels); + _priority = (_priority == 0) ? 1 : _priority; + _is_brick = ::Sandbox::is_brick_from_xml(start_node); + + if (_is_brick) { + Genode::log("Creating new brick at Hoitaja <", name(), "> at ", location, " in ", affinity_space); + } else { + Genode::log("Creating new cell at Hoitaja <", name(), "> at ", location, " in ", affinity_space); + } + } + + virtual ~Cell() { }; + + struct Resources &resources() { return _resources; } + + void update_affinity(Genode::Affinity affinity) { + //Genode::log("Updating affinity to ", affinity.location(), " in space ", affinity.space()); + _resources.affinity = affinity; + //Genode::log("Moving CPU session ", _env.cpu_session_cap()); + _env.cpu().move(affinity.location()); + if (_child.active()) { + _child.cpu().move(affinity.location()); + // TODO: Change topology representation + _child.topo().reconstruct(affinity); + } + } + + void create_at_tukija() + { + Genode::log("Creating new cell <", name(), "> at Tukija at ", _resources.affinity.location()); + _child.pd().create_cell(_priority, _resources.affinity.location()); + } + + void exit(int exit_value) override + { + ::Sandbox::Child::exit(exit_value); + _state_handler.handle_habitat_state(*this); + } + + void shrink_cores(Genode::Affinity::Location &cores) { + if (_child.active()) + _child.pd().update_cell(cores); + } + + void grow_cores(Genode::Affinity::Location &cores) { + if (_child.active()) + _child.pd().update_cell(cores); + } + + void try_start() override + { + ::Sandbox::Child::try_start(); + while (!(_child.active())) + __builtin_ia32_pause(); + create_at_tukija(); + } + + bool is_brick() { return _is_brick; } +}; diff --git a/repos/os/src/hoitaja/cell_controller.h b/repos/os/src/hoitaja/cell_controller.h new file mode 100644 index 0000000000..f4d0a92c9b --- /dev/null +++ b/repos/os/src/hoitaja/cell_controller.h @@ -0,0 +1,47 @@ +/* + * \brief Hoitaja — Cell Controller + * \author Michael Müller, Norman Feske (Init) + * \date 2023-04-20 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once +#include + +namespace Hoitaja +{ + class Cell_controller; +} + +class Hoitaja::Cell_controller +{ + public: + void create_cell(); + void destroy_cell(); + + /** + * @brief Determine which cells shall be shrinked down + * + * @return Sandbox::Child* List of cells to shrink + */ + Sandbox::Child *cells_to_shrink(); + /** + * @brief Determine which cell shall be grown up + * + * @return Sandbox::Child* List of cells to grow + */ + Sandbox::Child *cells_to_grow(); + + /** + * @brief Regather performance metrics for next adaptation cycle + * + */ + void update_metrics(); +}; \ No newline at end of file diff --git a/repos/os/src/hoitaja/config.xsd b/repos/os/src/hoitaja/config.xsd new file mode 100644 index 0000000000..6c5a664a89 --- /dev/null +++ b/repos/os/src/hoitaja/config.xsd @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/repos/os/src/hoitaja/core_allocator.h b/repos/os/src/hoitaja/core_allocator.h new file mode 100644 index 0000000000..ce0d2ea3f9 --- /dev/null +++ b/repos/os/src/hoitaja/core_allocator.h @@ -0,0 +1,127 @@ +/* + * \brief Hoitaja — Core Allocator + * \author Michael Müller, Norman Feske (Init) + * \date 2023-04-20 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once +/* Genode includes */ +#include +#include +#include +#include +#include + +#include + +/** Hoitaja includes **/ +#include "load_controller.h" +#include "cell_controller.h" +#include + +namespace Hoitaja +{ + class Core_allocator; +} + +class Hoitaja::Core_allocator +{ + private: + Genode::Affinity::Space &_affinity_space; + + ::Sandbox::Prio_levels &_prio_levels; + + double _resource_coeff; // Coefficient used for calculating resource shares + + unsigned int _cores_for_cells; // Number of cores available to cells. This is the total number of cores in the habitat minus the cores occupied by bricks. + + public: + inline unsigned int _calculate_resource_share(long priority) { + double ref_share = static_cast(_cores_for_cells) / _resource_coeff; + return static_cast((1.0 / static_cast(priority)) * ref_share); + } + + Core_allocator(Genode::Affinity::Space &affinity_space, ::Sandbox::Prio_levels prio_levels) : _affinity_space(affinity_space), _prio_levels(prio_levels), _resource_coeff(0.0), _cores_for_cells(_affinity_space.total()) + { + Genode::log("Created core allocator for ", affinity_space.total(), " cores and ", prio_levels.value, " priorities."); + Nova::create_habitat(0, affinity_space.total()); + } + + unsigned int cores_available() { + return _cores_for_cells; + } + + Genode::Affinity::Location allocate_cores_for_cell(Genode::Xml_node const &start_node) + { + if (::Sandbox::is_brick_from_xml(start_node)) { + Genode::Affinity::Location brick = ::Sandbox::affinity_location_from_xml(_affinity_space, start_node); + _cores_for_cells -= brick.width(); + return brick; + } + + // Calculate affinity from global affinity space and priority + long priority = ::Sandbox::priority_from_xml(start_node, _prio_levels); + priority = (priority == 0) ? 1 : priority; + _resource_coeff += (1.0/static_cast(priority)); // treat priority 0 same as 1, to avoid division by zero here + + unsigned int cores_share = _calculate_resource_share(priority); + + + return Genode::Affinity::Location( _cores_for_cells-cores_share, 0, cores_share, 1 ); /* always use the core_share last cores, for now */ + } + + void free_cores_from_cell(Cell &cell) + { + /* Remove cell's coefficient from the global resource coefficient. + * This is necessary in order to be able to redistribute the freed resources correctly. We do not trigger the redistribution itself here, because the child has not been fully destroyed yet, thus its resources might still be occupied at this point. */ + _resource_coeff -= 1.0 / static_cast(cell.resources().priority); + } + + /** + * @brief Update core allocations for cells reported by Cell controller + * + */ + void update(Hoitaja::Cell &cell, int *xpos, int *lower_limit) { + if (cell.abandoned()) + return; + Cell::Resources resources = cell.resources(); + long priority = (resources.priority == 0)? 1 : resources.priority; + + unsigned int cores_share = _calculate_resource_share(priority); + unsigned int cores_to_reclaim = resources.affinity.location().width() * resources.affinity.location().height() - cores_share; + + cores_to_reclaim = (static_cast(cores_to_reclaim) < 0) ? 0 : cores_to_reclaim; + + if (*xpos - static_cast(cores_share) <= *lower_limit) { + cores_share-= *lower_limit; // Save one core for Hoitaja + } + + Genode::Affinity::Location location(*xpos - cores_share, resources.affinity.location().ypos(), cores_share, resources.affinity.location().height()); + + if (resources.affinity.location() != location) { // Only update, if location has actually changed + cell.update_affinity(Genode::Affinity(resources.affinity.space(), location)); + } + + if (location.width() > resources.affinity.location().width()) { + cell.grow_cores(location); + } + + *xpos = location.xpos(); + // TODO: Update affinity of existing sessions for cell + // TODO: Send yield request to cell + + if (cores_to_reclaim > 0) { + log("Need to reclaim ", cores_to_reclaim, " cores from ", cell.name()); + cell.shrink_cores(location); + } + } + +}; \ No newline at end of file diff --git a/repos/os/src/hoitaja/habitat.cc b/repos/os/src/hoitaja/habitat.cc new file mode 100644 index 0000000000..43084d7d40 --- /dev/null +++ b/repos/os/src/hoitaja/habitat.cc @@ -0,0 +1,136 @@ +#include "habitat.h" +#include + +::Sandbox::Child &Hoitaja::Habitat::create_child(Genode::Xml_node const &start_node) +{ + if (_affinity_space.constructed() && !_core_allocator.constructed()) + _core_allocator.construct(*_affinity_space, _prio_levels); + + Genode::Affinity::Location allocation = _core_allocator->allocate_cores_for_cell(start_node); + + + + if (allocation.width() < 1) { + Genode::error("failed to create child ", start_node.attribute_value("name", Child_policy::Name()), ": not enough CPU cores left."); + throw ::Sandbox::Start_model::Factory::Creation_failed(); + } + + // Allocate `cores_share` cores from the Core Allocator and set the childs affinity accordingly + // TODO: Implement core allocation + + try { + Hoitaja::Cell &child = *new (_heap) + Hoitaja::Cell(_env, _heap, *_verbose, + Child::Id { ++_child_cnt }, _state_reporter, + start_node, *this, *this, _children, *this, *this, *this, *this, + _prio_levels, _env.topo().global_affinity_space(), allocation, + _parent_services, _child_services, _local_services, _habitat_handler); + _children.insert(static_cast<::Sandbox::Child *>(&child)); + + maintain_cells(); + + _avail_cpu.percent -= min(_avail_cpu.percent, child.cpu_quota().percent); + + if (start_node.has_sub_node("provides")) + _server_appeared_or_disappeared = true; + + _state_report_outdated = true; + + return static_cast<::Sandbox::Child&>(child); + } + catch (Rom_connection::Rom_connection_failed) { + /* + * The binary does not exist. An error message is printed + * by the Rom_connection constructor. + */ + } + catch (Out_of_ram) { + warning("memory exhausted during child creation"); } + catch (Out_of_caps) { + warning("local capabilities exhausted during child creation"); } + catch (Child::Missing_name_attribute) { + warning("skipped startup of nameless child"); } + catch (Region_map::Region_conflict) { + warning("failed to attach dataspace to local address space " + "during child construction"); } + catch (Region_map::Invalid_dataspace) { + warning("attempt to attach invalid dataspace to local address space " + "during child construction"); } + catch (Service_denied) { + warning("failed to create session during child construction"); } + + throw ::Sandbox::Start_model::Factory::Creation_failed(); + +} + +void Hoitaja::Habitat::_destroy_abandoned_children() +{ + _children.for_each_child([&] (Child &child) { + + if (!child.abandoned()) + return; + + /* make the child's services unavailable */ + child.destroy_services(); + child.close_all_sessions(); + _state_report_outdated = true; + + /* destroy child once all environment sessions are gone */ + if (child.env_sessions_closed()) { + _core_allocator->free_cores_from_cell(static_cast(child)); + _children.remove(&child); + + Cpu_quota const child_cpu_quota = child.cpu_quota(); + + destroy(_heap, &child); + + /* replenish available CPU quota */ + _avail_cpu.percent += child_cpu_quota.percent; + _transferred_cpu.percent -= min(_transferred_cpu.percent, + child_cpu_quota.percent); + } + }); + + /* We might have formerly occupied resources again now, so redistribute them */ + //maintain_cells(); +} + +void Hoitaja::Habitat::maintain_cells() +{ + int xpos = _affinity_space->total(); + int lower_limit = _affinity_space->total() - _core_allocator->cores_available(); + _children.for_each_child([&](Child &child) + { + //log(child.name(), " ram: ", child.ram_quota()); + Cell &cell = static_cast(child); + if (!(cell.is_brick())) + _core_allocator->update(cell, &xpos, &lower_limit); + }); + /*suoritin.for_each([&](Tukija::Suoritin::Session_component &client) + { Genode::log("Cell ", client.label(), "\n------------"); + for (unsigned long channel_id = 0; channel_id < client.channels(); channel_id++) + { + Tukija::Suoritin::Channel &channel = client.channels_if()[channel_id]; + + Genode::log("\t", "Channel ", channel_id, ": length=", channel.length(), " worker=", client.worker(channel._worker).name(), ",", client.worker(channel._worker).cap() ); + if (channel.length() > 0xFFFF) { + Genode::Parent::Resource_args grant_args("cpu_quota=10"); + client.send_request(grant_args); + } + } + });*/ +} + +void Hoitaja::Habitat::update(Cell &cell) +{ + if (cell._exited) { + if (cell._exit_value != 0) + Genode::error(cell.name(), " exited with exit status ", cell._exit_value); + + _children.remove(static_cast(&cell)); + _core_allocator->free_cores_from_cell(cell); + + /* Update resource allocations, as there are new resources available */ + maintain_cells(); + } +} \ No newline at end of file diff --git a/repos/os/src/hoitaja/habitat.h b/repos/os/src/hoitaja/habitat.h new file mode 100644 index 0000000000..49ff8a7eee --- /dev/null +++ b/repos/os/src/hoitaja/habitat.h @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* Hoitaja includes */ +#include +#include +#include + +#pragma once +namespace Hoitaja { + class Habitat; + using namespace Genode; +} +struct Hoitaja::Habitat : public Sandbox::Library +{ + public: + + friend class Genode::Sandbox::Local_service_base; + + State_handler &_habitat_handler; + + Heap _heap; + Sliced_heap suoritin_heap; + + + Genode::Constructible _core_allocator; + + + Registry + _local_services{}; + + void apply_config(Xml_node const &config) override { + log("Hoitaja is applying new config."); + + Sandbox::Library::apply_config(config); + } + + void generate_state_report(Xml_generator &xml) const override { + log("Generating new state report for Hoitaja."); + Sandbox::Library::generate_state_report(xml); + } + + void maintain_cells(); + + /** + * @brief Update cell's resource allocations + * + * @param cell whose resource allocations needs updating + */ + void update(Cell &cell); + + Habitat(Env &env, State_handler &habitat_handler, Genode::Sandbox::State_handler &handler) + : Sandbox::Library(env, _heap, _local_services, handler), _habitat_handler(habitat_handler), _heap(env.ram(), env.rm()), + suoritin_heap(env.ram(), env.rm()), _core_allocator() + { + } + + Sandbox::Child &create_child(Xml_node const &) override; + + void _destroy_abandoned_children() override; +}; \ No newline at end of file diff --git a/repos/os/src/hoitaja/hyperthread_controller.h b/repos/os/src/hoitaja/hyperthread_controller.h new file mode 100644 index 0000000000..b6393fb363 --- /dev/null +++ b/repos/os/src/hoitaja/hyperthread_controller.h @@ -0,0 +1,2 @@ + +#pragma once \ No newline at end of file diff --git a/repos/os/src/hoitaja/load_controller.h b/repos/os/src/hoitaja/load_controller.h new file mode 100644 index 0000000000..09cb7a62fa --- /dev/null +++ b/repos/os/src/hoitaja/load_controller.h @@ -0,0 +1,28 @@ +/* + * \brief Hoitaja — Load Controller + * \author Michael Müller, Norman Feske (Init) + * \date 2023-04-20 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ +#pragma once + +#include +#include + +namespace Hoitaja { + class Load_controller; +} + +class Hoitaja::Load_controller +{ + public: + unsigned short *cpu_loads(); + Genode::Affinity::Location *idle_cores(); +}; \ No newline at end of file diff --git a/repos/os/src/hoitaja/main.cc b/repos/os/src/hoitaja/main.cc new file mode 100644 index 0000000000..077c64c2b7 --- /dev/null +++ b/repos/os/src/hoitaja/main.cc @@ -0,0 +1,157 @@ +/* + * \brief Hoitaja — Cell Management Component based on Init + * \author Michael Müller, Norman Feske (Init) + * \date 2023-04-20 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * Copyright (C) 2023 Michael Müller, Osnabrück University + * + * This file is part of EalánOS, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include + +/** Hoitaja components **/ +/* Filtering components */ +#include "load_controller.h" +#include "cell_controller.h" +#include "hyperthread_controller.h" +#include "memory_controller.h" +#include "numa_controller.h" +/* Core Allocator */ +#include "core_allocator.h" +/* State Handler */ +#include "state_handler.h" + +namespace Hoitaja { + + using namespace Genode; + + struct Main; +} + +struct Hoitaja::Main : Genode::Sandbox::State_handler, Hoitaja::State_handler +{ + + + Env &_env; + + Habitat _sandbox { _env, *this, *this }; + Timer::Connection _timer{_env}; + + Attached_rom_dataspace _config { _env, "config" }; + + void _handle_resource_avail() { } + + Signal_handler

_resource_avail_handler { + _env.ep(), *this, &Main::_handle_resource_avail }; + + Constructible _reporter { }; + + size_t _report_buffer_size = 0; + + void _handle_config() + { + try { + _config.update(); + + Xml_node const config = _config.xml(); + + bool reporter_enabled = false; + config.with_optional_sub_node("report", [&] (Xml_node report) { + + reporter_enabled = true; + + /* (re-)construct reporter whenever the buffer size is changed */ + Number_of_bytes const buffer_size = + report.attribute_value("buffer", Number_of_bytes(4096)); + + if (buffer_size != _report_buffer_size || !_reporter.constructed()) { + _report_buffer_size = buffer_size; + _reporter.construct(_env, "state", "state", _report_buffer_size); + } + }); + + if (_reporter.constructed()) + _reporter->enabled(reporter_enabled); + + _sandbox.apply_config(config); + } catch (Genode::Quota_guard::Limit_exceeded&) + { + Genode::error("Caps exceeded while handling configuration change."); + } + } + + Signal_handler
_config_handler { + _env.ep(), *this, &Main::_handle_config }; + + void handle_timeout(Genode::Duration) + { + //Genode::log("Hoitaja woke up after ", (now - last) / 2000, " us"); + + _sandbox.maintain_cells(); + + _timeout.schedule(Genode::Microseconds{20}); + } + + Timer::One_shot_timeout
_timeout{_timer, *this, &Main::handle_timeout}; + + /** + * Sandbox::State_handler interface + */ + void handle_sandbox_state() override + { + Genode::log("Habitat state changed"); + /* + try { + Reporter::Xml_generator xml(*_reporter, [&] () { + _sandbox.generate_state_report(xml); }); + } + catch (Xml_generator::Buffer_exceeded) { + + error("state report exceeds maximum size"); + + try to reflect the error condition as state report + try { + Reporter::Xml_generator xml(*_reporter, [&] () { + xml.attribute("error", "report buffer exceeded"); }); + } + catch (...) { } + }*/ + } + + void handle_habitat_state(Cell &cell) override + { + Genode::log("Habitat changed"); + try { + _sandbox.update(cell); + } catch (Genode::Quota_guard::Limit_exceeded) { + Genode::log("CAP quota exceeded in state handler"); + _env.parent().exit(1); + } + } + + Main(Env &env) : _env(env) + { + _config.sigh(_config_handler); + + /* prevent init to block for resource upgrades (never satisfied by core) */ + //_env.parent().resource_avail_sigh(_resource_avail_handler); + _handle_config(); + + Genode::log("Affinity space: ", env.cpu().affinity_space()); + + _timeout.schedule(Genode::Microseconds{20}); + } +}; + +void Component::construct(Genode::Env &env) { static Hoitaja::Main main(env); } + diff --git a/repos/os/src/hoitaja/memory_controller.h b/repos/os/src/hoitaja/memory_controller.h new file mode 100644 index 0000000000..b6393fb363 --- /dev/null +++ b/repos/os/src/hoitaja/memory_controller.h @@ -0,0 +1,2 @@ + +#pragma once \ No newline at end of file diff --git a/repos/os/src/hoitaja/numa_controller.h b/repos/os/src/hoitaja/numa_controller.h new file mode 100644 index 0000000000..b6393fb363 --- /dev/null +++ b/repos/os/src/hoitaja/numa_controller.h @@ -0,0 +1,2 @@ + +#pragma once \ No newline at end of file diff --git a/repos/os/src/hoitaja/state_handler.h b/repos/os/src/hoitaja/state_handler.h new file mode 100644 index 0000000000..e25a24d8b9 --- /dev/null +++ b/repos/os/src/hoitaja/state_handler.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace Hoitaja { + struct State_handler; + class Cell; +} + +struct Hoitaja::State_handler : Genode::Interface +{ + virtual void handle_habitat_state(Cell &cell) = 0; +}; \ No newline at end of file diff --git a/repos/os/src/hoitaja/target.mk b/repos/os/src/hoitaja/target.mk new file mode 100644 index 0000000000..8c566888ac --- /dev/null +++ b/repos/os/src/hoitaja/target.mk @@ -0,0 +1,11 @@ +TARGET = hoitaja +SRC_CC = main.cc habitat.cc +LIBS = base +INC_DIR += $(PRG_DIR) + +CONFIG_XSD = config.xsd + +# statically link sandbox library to avoid dependency from sandbox.lib.so +SRC_CC += library.cc child.cc server.cc config_model.cc +INC_DIR += $(REP_DIR)/src/lib/sandbox +vpath %.cc $(REP_DIR)/src/lib/sandbox diff --git a/repos/os/src/init/main.cc b/repos/os/src/init/main.cc index 4f13681880..54d1b84a70 100644 --- a/repos/os/src/init/main.cc +++ b/repos/os/src/init/main.cc @@ -25,11 +25,11 @@ namespace Init { } -struct Init::Main : Sandbox::State_handler +struct Init::Main : Genode::Sandbox::State_handler { Env &_env; - Sandbox _sandbox { _env, *this }; + Genode::Sandbox _sandbox { _env, *this }; Attached_rom_dataspace _config { _env, "config" }; diff --git a/repos/os/src/lib/sandbox/child.cc b/repos/os/src/lib/sandbox/child.cc index 666c902b0e..04e752640e 100644 --- a/repos/os/src/lib/sandbox/child.cc +++ b/repos/os/src/lib/sandbox/child.cc @@ -15,7 +15,7 @@ #include /* local includes */ -#include +#include void Sandbox::Child::destroy_services() @@ -693,6 +693,7 @@ Genode::Affinity Sandbox::Child::filter_session_affinity(Affinity const &session Affinity::Space const &session_space = session_affinity.space(); Affinity::Location const &session_location = session_affinity.location(); + /* scale resolution of resulting space */ Affinity::Space space(child_space.multiply(session_space)); Affinity::Location child_session(child_location.xpos(), child_location.ypos(), @@ -750,6 +751,7 @@ Sandbox::Child::Child(Env &env, Cpu_quota_transfer &cpu_quota_transfer, Prio_levels prio_levels, Affinity::Space const &affinity_space, + Affinity::Location const &location, Registry &parent_services, Registry &child_services, Registry &local_services, @@ -767,7 +769,7 @@ Sandbox::Child::Child(Env &env, _cpu_quota_transfer(cpu_quota_transfer), _name_registry(name_registry), _heartbeat_enabled(start_node.has_sub_node("heartbeat")), - _resources(_resources_from_start_node(start_node, prio_levels, affinity_space, + _resources(_resources_from_start_node(start_node, prio_levels, affinity_space, location, default_caps_accessor.default_caps())), _pd_intrinsics(pd_intrinsics), _parent_services(parent_services), diff --git a/repos/os/src/lib/sandbox/config_model.cc b/repos/os/src/lib/sandbox/config_model.cc index 57ef127cce..8ec3290b28 100644 --- a/repos/os/src/lib/sandbox/config_model.cc +++ b/repos/os/src/lib/sandbox/config_model.cc @@ -11,7 +11,7 @@ * under the terms of the GNU Affero General Public License version 3. */ -#include +#include using namespace Sandbox; diff --git a/repos/os/src/lib/sandbox/library.cc b/repos/os/src/lib/sandbox/library.cc index 9bad5ee66b..56451d7ec9 100644 --- a/repos/os/src/lib/sandbox/library.cc +++ b/repos/os/src/lib/sandbox/library.cc @@ -16,290 +16,11 @@ #include /* local includes */ -#include -#include -#include -#include -#include - -struct Genode::Sandbox::Library : ::Sandbox::State_reporter::Producer, - ::Sandbox::Child::Default_route_accessor, - ::Sandbox::Child::Default_caps_accessor, - ::Sandbox::Child::Ram_limit_accessor, - ::Sandbox::Child::Cap_limit_accessor, - ::Sandbox::Child::Cpu_limit_accessor, - ::Sandbox::Child::Cpu_quota_transfer, - ::Sandbox::Start_model::Factory, - ::Sandbox::Parent_provides_model::Factory -{ - using Routed_service = ::Sandbox::Routed_service; - using Parent_service = ::Sandbox::Parent_service; - using Local_service = ::Genode::Sandbox::Local_service_base; - using Report_detail = ::Sandbox::Report_detail; - using Child_registry = ::Sandbox::Child_registry; - using Verbose = ::Sandbox::Verbose; - using State_reporter = ::Sandbox::State_reporter; - using Heartbeat = ::Sandbox::Heartbeat; - using Server = ::Sandbox::Server; - using Alias = ::Sandbox::Alias; - using Child = ::Sandbox::Child; - using Prio_levels = ::Sandbox::Prio_levels; - using Ram_info = ::Sandbox::Ram_info; - using Cap_info = ::Sandbox::Cap_info; - using Cpu_quota = ::Sandbox::Cpu_quota; - using Config_model = ::Sandbox::Config_model; - using Start_model = ::Sandbox::Start_model; - using Preservation = ::Sandbox::Preservation; - - Env &_env; - Heap &_heap; - - Pd_intrinsics &_pd_intrinsics; - - Registry _parent_services { }; - Registry _child_services { }; - Registry &_local_services; - Child_registry _children { }; - - /* - * Global parameters obtained from config - */ - Reconstructible _verbose { }; - Config_model::Version _version { }; - Constructible _default_route { }; - Cap_quota _default_caps { 0 }; - Prio_levels _prio_levels { }; - Constructible _affinity_space { }; - Preservation _preservation { }; - - Affinity::Space _effective_affinity_space() const - { - return _affinity_space.constructed() ? *_affinity_space - : Affinity::Space { 1, 1 }; - } - - State_reporter _state_reporter; - - Heartbeat _heartbeat { _env, _children, _state_reporter }; - - /* - * Internal representation of the XML configuration - */ - Config_model _config_model { }; - - /* - * Variables for tracking the side effects of updating the config model - */ - bool _server_appeared_or_disappeared = false; - bool _state_report_outdated = false; - - unsigned _child_cnt = 0; - - Cpu_quota _avail_cpu { .percent = 100 }; - Cpu_quota _transferred_cpu { .percent = 0 }; - - Ram_quota _avail_ram() const - { - Ram_quota avail_ram = _env.pd().avail_ram(); - - if (_preservation.ram.value > avail_ram.value) { - error("RAM preservation exceeds available memory"); - return Ram_quota { 0 }; - } - - /* deduce preserved quota from available quota */ - return Ram_quota { avail_ram.value - _preservation.ram.value }; - } - - Cap_quota _avail_caps() const - { - Cap_quota avail_caps { _env.pd().avail_caps().value }; - - if (_preservation.caps.value > avail_caps.value) { - error("Capability preservation exceeds available capabilities"); - return Cap_quota { 0 }; - } - - /* deduce preserved quota from available quota */ - return Cap_quota { avail_caps.value - _preservation.caps.value }; - } - - /** - * Child::Ram_limit_accessor interface - */ - Ram_quota resource_limit(Ram_quota const &) const override - { - return _avail_ram(); - } - - /** - * Child::Cap_limit_accessor interface - */ - Cap_quota resource_limit(Cap_quota const &) const override { return _avail_caps(); } - - /** - * Child::Cpu_limit_accessor interface - */ - Cpu_quota resource_limit(Cpu_quota const &) const override { return _avail_cpu; } - - /** - * Child::Cpu_quota_transfer interface - */ - void transfer_cpu_quota(Capability pd_cap, Pd_session &pd, - Capability cpu, Cpu_quota quota) override - { - Cpu_quota const remaining { 100 - min(100u, _transferred_cpu.percent) }; - - /* prevent division by zero in 'quota_lim_upscale' */ - if (remaining.percent == 0) - return; - - size_t const fraction = - Cpu_session::quota_lim_upscale(quota.percent, remaining.percent); - - Child::with_pd_intrinsics(_pd_intrinsics, pd_cap, pd, [&] (auto &intrinsics) { - intrinsics.ref_cpu.transfer_quota(cpu, fraction); }); - - _transferred_cpu.percent += quota.percent; - } - - /** - * State_reporter::Producer interface - */ - void produce_state_report(Xml_generator &xml, Report_detail const &detail) const override - { - if (detail.init_ram()) - xml.node("ram", [&] () { Ram_info::from_pd(_env.pd()).generate(xml); }); - - if (detail.init_caps()) - xml.node("caps", [&] () { Cap_info::from_pd(_env.pd()).generate(xml); }); - - if (detail.children()) - _children.report_state(xml, detail); - } - - /** - * State_reporter::Producer interface - */ - Child::Sample_state_result sample_children_state() override - { - return _children.sample_state(); - } - - /** - * Default_route_accessor interface - */ - Xml_node default_route() override - { - return _default_route.constructed() ? _default_route->xml() - : Xml_node(""); - } - - /** - * Default_caps_accessor interface - */ - Cap_quota default_caps() override { return _default_caps; } - - void _update_aliases_from_config(Xml_node const &); - void _update_parent_services_from_config(Xml_node const &); - void _update_children_config(Xml_node const &); - void _destroy_abandoned_parent_services(); - void _destroy_abandoned_children(); - - Server _server { _env, _heap, _child_services, _state_reporter }; - - /** - * Sandbox::Start_model::Factory - */ - Child &create_child(Xml_node const &) override; - - /** - * Sandbox::Start_model::Factory - */ - void update_child(Child &, Xml_node const &) override; - - /** - * Sandbox::Start_model::Factory - */ - Alias &create_alias(Child_policy::Name const &name) override - { - Alias &alias = *new (_heap) Alias(name); - _children.insert_alias(&alias); - return alias; - } - - /** - * Sandbox::Start_model::Factory - */ - void destroy_alias(Alias &alias) override - { - _children.remove_alias(&alias); - destroy(_heap, &alias); - } - - /** - * Sandbox::Start_model::Factory - */ - bool ready_to_create_child(Start_model::Name const &, - Start_model::Version const &) const override; - - /** - * Sandbox::Parent_provides_model::Factory - */ - Parent_service &create_parent_service(Service::Name const &name) override - { - return *new (_heap) Parent_service(_parent_services, _env, name); - } - - /** - * Default way of using the 'Env::pd' as the child's 'ref_pd' and accessing - * the child's address space via RPC. - */ - struct Default_pd_intrinsics : Pd_intrinsics - { - Env &_env; - - void with_intrinsics(Capability, Pd_session &pd, Fn const &fn) override - { - Region_map_client region_map(pd.address_space()); - - Intrinsics intrinsics { _env.pd(), _env.pd_session_cap(), - _env.cpu(), _env.cpu_session_cap(), region_map }; - fn.call(intrinsics); - } - - void start_initial_thread(Capability cap, addr_t ip) override - { - Cpu_thread_client(cap).start(ip, 0); - } - - Default_pd_intrinsics(Env &env) : _env(env) { } - - } _default_pd_intrinsics { _env }; - - Library(Env &env, Heap &heap, Registry &local_services, - State_handler &state_handler, Pd_intrinsics &pd_intrinsics) - : - _env(env), _heap(heap), _pd_intrinsics(pd_intrinsics), - _local_services(local_services), _state_reporter(_env, *this, state_handler) - { } - - Library(Env &env, Heap &heap, Registry &local_services, - State_handler &state_handler) - : - Library(env, heap, local_services, state_handler, _default_pd_intrinsics) - { } - - void apply_config(Xml_node const &); - - void generate_state_report(Xml_generator &xml) const - { - _state_reporter.generate(xml); - } -}; +#include -void Genode::Sandbox::Library::_destroy_abandoned_parent_services() + +void Sandbox::Library::_destroy_abandoned_parent_services() { _parent_services.for_each([&] (Parent_service &service) { if (service.abandoned()) @@ -307,7 +28,7 @@ void Genode::Sandbox::Library::_destroy_abandoned_parent_services() } -void Genode::Sandbox::Library::_destroy_abandoned_children() +void Sandbox::Library::_destroy_abandoned_children() { _children.for_each_child([&] (Child &child) { @@ -336,7 +57,7 @@ void Genode::Sandbox::Library::_destroy_abandoned_children() } -bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const &name, +bool Sandbox::Library::ready_to_create_child(Start_model::Name const &name, Start_model::Version const &version) const { bool exists = false; @@ -364,7 +85,7 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const } -::Sandbox::Child &Genode::Sandbox::Library::create_child(Xml_node const &start_node) +::Sandbox::Child &Sandbox::Library::create_child(Xml_node const &start_node) { if (!_affinity_space.constructed() && start_node.has_sub_node("affinity")) warning("affinity-space configuration missing, " @@ -412,7 +133,7 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const } -void Genode::Sandbox::Library::update_child(Child &child, Xml_node const &start) +void Sandbox::Library::update_child(Child &child, Xml_node const &start) { if (child.abandoned()) return; @@ -429,7 +150,7 @@ void Genode::Sandbox::Library::update_child(Child &child, Xml_node const &start) } -void Genode::Sandbox::Library::apply_config(Xml_node const &config) +void Sandbox::Library::apply_config(Xml_node const &config) { _server_appeared_or_disappeared = false; _state_report_outdated = false; @@ -671,5 +392,5 @@ Genode::Sandbox::Sandbox(Env &env, State_handler &state_handler, Pd_intrinsics & Genode::Sandbox::Sandbox(Env &env, State_handler &state_handler) : _heap(env.ram(), env.rm()), - _library(*new (_heap) Library(env, _heap, _local_services, state_handler)) + _library(*new (_heap) ::Sandbox::Library(env, _heap, _local_services, state_handler)) { } diff --git a/repos/os/src/lib/sandbox/server.cc b/repos/os/src/lib/sandbox/server.cc index c557d237be..7a1335067b 100644 --- a/repos/os/src/lib/sandbox/server.cc +++ b/repos/os/src/lib/sandbox/server.cc @@ -16,7 +16,7 @@ #include /* local includes */ -#include "server.h" +#include /****************************** diff --git a/repos/os/src/server/nic_router/nic_client.cc b/repos/os/src/server/nic_router/nic_client.cc index 11ee622894..56653764a9 100644 --- a/repos/os/src/server/nic_router/nic_client.cc +++ b/repos/os/src/server/nic_router/nic_client.cc @@ -135,9 +135,9 @@ Net::Nic_client_interface::Nic_client_interface(Env &env, Nic::Connection { env, this, BUF_SIZE, BUF_SIZE, label.string() }, _session_link_state_handler { env.ep(), *this, &Nic_client_interface::_handle_session_link_state }, - _interface { env.ep(), timer, mac_address(), alloc, + _interface ( env.ep(), timer, mac_address(), alloc, Mac_address(), config, interfaces, *rx(), *tx(), - *this } + *this ) { /* install packet stream signal handlers */ rx_channel()->sigh_packet_avail(_interface.pkt_stream_signal_handler()); diff --git a/repos/os/src/server/nic_router/session_env.h b/repos/os/src/server/nic_router/session_env.h index e83c800521..b81341cfbf 100644 --- a/repos/os/src/server/nic_router/session_env.h +++ b/repos/os/src/server/nic_router/session_env.h @@ -133,6 +133,11 @@ class Genode::Session_env : public Ram_allocator, return result; } + Alloc_result try_alloc(size_t size, Numa_id, Cache cache) override + { + // TODO: Actually perform allocation from node numa_id here + return try_alloc(size, cache); + } void free(Ram_dataspace_capability ds) override { diff --git a/repos/os/src/server/nic_router/target.mk b/repos/os/src/server/nic_router/target.mk index 3c41f52089..b1d512893f 100644 --- a/repos/os/src/server/nic_router/target.mk +++ b/repos/os/src/server/nic_router/target.mk @@ -32,4 +32,7 @@ INC_DIR += $(PRG_DIR) CONFIG_XSD = config.xsd +CC_OLEVEL += -O3 +CC_CXX_WARN_STRICT = + CC_CXX_WARN_STRICT_CONVERSION = diff --git a/repos/os/src/server/nic_router/uplink_session_root.cc b/repos/os/src/server/nic_router/uplink_session_root.cc index 746c3b5fc2..a6f815a13c 100644 --- a/repos/os/src/server/nic_router/uplink_session_root.cc +++ b/repos/os/src/server/nic_router/uplink_session_root.cc @@ -94,7 +94,7 @@ Net::Uplink_session_component::Uplink_session_component(Session_env _interface { _session_env.ep(), timer, mac, _alloc, Mac_address(), config, interfaces, *_tx.sink(), *_rx.source(), _interface_policy }, - _ram_ds { ram_ds } + _ram_ds ( ram_ds ) { _interface.attach_to_domain(); diff --git a/repos/os/src/test/resource_yield/main.cc b/repos/os/src/test/resource_yield/main.cc index 65cd46ec08..2ea903ce1e 100644 --- a/repos/os/src/test/resource_yield/main.cc +++ b/repos/os/src/test/resource_yield/main.cc @@ -134,6 +134,16 @@ void Test::Child::_handle_yield() size_t const requested_ram_quota = Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0); + size_t const requested_cpu_quota = + Arg_string::find_arg(args.string(), "cpu_quota").ulong_value(0); + + log("released ", requested_cpu_quota, " portions of cpu_quota"); + + size_t const requested_gpu_quota = + Arg_string::find_arg(args.string(), "gpus").ulong_value(0); + + log("got request to release ", requested_gpu_quota, " gpus"); + /* free chunks of RAM to comply with the request */ size_t released_quota = 0; while (released_quota < requested_ram_quota) { @@ -208,6 +218,10 @@ class Test::Parent unsigned _wait_cnt = 0; + unsigned long _start = 0; + + unsigned long _end = 0; + enum State { WAIT, YIELD_REQUESTED, YIELD_GOT_RESPONSE }; State _state = WAIT; @@ -232,7 +246,9 @@ class Test::Parent log("request yield (ram prior yield: ", _used_ram_prior_yield); /* issue yield request */ - Genode::Parent::Resource_args yield_args("ram_quota=5M"); + Genode::Parent::Resource_args yield_args("ram_quota=5M,cpu_quota=10,gpus=1"); + + _start = _timer.elapsed_us(); _child.yield(yield_args); _state = YIELD_REQUESTED; @@ -251,7 +267,9 @@ class Test::Parent void _yield_response() { - log("got yield response"); + _end = _timer.elapsed_us(); + log("got yield response after ", (_end-_start), "us"); + _state = YIELD_GOT_RESPONSE; _print_status(); @@ -281,7 +299,7 @@ class Test::Parent Parent &_parent; Static_parent_services + Log_session, Timer::Session, Topo_session> _parent_services { _env }; Cap_quota const _cap_quota { 50 }; diff --git a/repos/ports/run/tool_chain_auto.run b/repos/ports/run/tool_chain_auto.run index 1bdd2e1d6a..044e6461de 100644 --- a/repos/ports/run/tool_chain_auto.run +++ b/repos/ports/run/tool_chain_auto.run @@ -245,7 +245,7 @@ install_config { build_boot_image [list {*}[build_artifacts] bash_profile genode.tar $binutils.tar $gcc.tar] -append qemu_args " -nographic -m 800 " +append qemu_args " -nographic " # wait until Noux started run_genode_until {.*\[init -> vfs\] creating build directory\.\.\..*\n} $boot_timeout diff --git a/tool/builddir/build.mk b/tool/builddir/build.mk index 302b38daaa..2e27f9167e 100644 --- a/tool/builddir/build.mk +++ b/tool/builddir/build.mk @@ -64,7 +64,7 @@ INSTALL_DIR := $(CURDIR)/bin export BASE_DIR ?= ../base export REPOSITORIES ?= $(BASE_DIR:%base=%base-linux) $(BASE_DIR) export VERBOSE ?= @ -export VERBOSE_DIR ?= --no-print-directory +export VERBOSE_DIR ?= --print-directory export VERBOSE_MK ?= @ export LIB_CACHE_DIR ?= $(BUILD_BASE_DIR)/var/libcache export LIB_PROGRESS_LOG ?= $(BUILD_BASE_DIR)/progress.log diff --git a/tool/depot/mk/common.inc b/tool/depot/mk/common.inc index 29606ee4d8..4385bdbc93 100644 --- a/tool/depot/mk/common.inc +++ b/tool/depot/mk/common.inc @@ -9,7 +9,7 @@ VERBOSE ?= @ ECHO := echo -e HASHSUM := sha1sum -MAKEFLAGS += --no-print-directory +MAKEFLAGS += --print-directory BRIGHT_COL ?= \x1b[01;33m DARK_COL ?= \x1b[00;33m diff --git a/tool/ports/prepare_port b/tool/ports/prepare_port index d95f49d9b0..88e403bc7e 100755 --- a/tool/ports/prepare_port +++ b/tool/ports/prepare_port @@ -7,7 +7,7 @@ # ifndef VERBOSE -MAKEFLAGS += --no-print-directory +MAKEFLAGS += --print-directory endif export GENODE_DIR := $(realpath $(dir $(MAKEFILE_LIST))/../..)