From e7473337cbe083ccb62550c48f04437701858836 Mon Sep 17 00:00:00 2001 From: Michael Mueller Date: Thu, 6 Feb 2025 16:22:46 +0100 Subject: [PATCH] Integrate Tukija as a seperate kernel rather than modifying base-nova. This may improve upgradability in the future. --- repos/base-tukija/README | 5 + repos/base-tukija/etc/specs.conf | 1 + .../include/spec/32bit/nova/syscalls.h | 483 ++++++++ .../include/spec/64bit/tukija/syscalls.h | 420 +++++++ repos/base-tukija/include/tukija/cap_map.h | 132 +++ .../include/tukija/capability_space.h | 47 + .../include/tukija/native_thread.h | 61 + .../include/tukija/receive_window.h | 245 ++++ repos/base-tukija/include/tukija/stdint.h | 28 + .../include/tukija/syscall-generic.h | 916 +++++++++++++++ repos/base-tukija/include/tukija/util.h | 97 ++ .../include/tukija_native_cpu/client.h | 33 + .../tukija_native_cpu/tukija_native_cpu.h | 43 + .../include/tukija_native_pd/client.h | 40 + .../tukija_native_pd/tukija_native_pd.h | 50 + .../base-tukija/lib/mk/base-tukija-common.mk | 21 + repos/base-tukija/lib/mk/base-tukija.mk | 12 + repos/base-tukija/lib/mk/core-tukija.inc | 80 ++ .../lib/mk/spec/x86_32/core-nova.mk | 5 + .../base-tukija/lib/mk/spec/x86_32/ld-nova.mk | 3 + .../lib/mk/spec/x86_32/startup-nova.mk | 1 + .../lib/mk/spec/x86_64/core-tukija.mk | 5 + .../lib/mk/spec/x86_64/ld-tukija.mk | 3 + .../lib/mk/spec/x86_64/startup-tukija.mk | 1 + repos/base-tukija/mk/spec/nova_x86_32.mk | 4 + repos/base-tukija/mk/spec/tukija_x86_64.mk | 4 + repos/base-tukija/patches/README | 2 + .../patches/allow_ioapic_access.patch | 14 + .../patches/allow_iommu_access.patch | 14 + repos/base-tukija/patches/syscall_msi.patch | 13 + repos/base-tukija/ports/tukija.hash | 1 + repos/base-tukija/ports/tukija.port | 11 + .../recipes/api/base-tukija/content.mk | 17 + .../base-tukija/recipes/api/base-tukija/hash | 1 + .../recipes/src/base-tukija/README | 9 + .../recipes/src/base-tukija/content.mk | 21 + .../base-tukija/recipes/src/base-tukija/hash | 1 + .../recipes/src/base-tukija/used_apis | 2 + repos/base-tukija/run/nova.run | 46 + repos/base-tukija/src/core/bios_data_area.cc | 19 + repos/base-tukija/src/core/core-bss.ld | 6 + repos/base-tukija/src/core/core_log_out.cc | 30 + repos/base-tukija/src/core/core_region_map.cc | 98 ++ .../src/core/core_rpc_cap_alloc.cc | 50 + .../src/core/include/imprint_badge.h | 33 + .../base-tukija/src/core/include/ipc_pager.h | 134 +++ .../base-tukija/src/core/include/irq_object.h | 55 + .../base-tukija/src/core/include/map_local.h | 58 + .../src/core/include/native_cpu_component.h | 47 + .../src/core/include/native_pd_component.h | 49 + .../base-tukija/src/core/include/nova_util.h | 296 +++++ repos/base-tukija/src/core/include/pager.h | 409 +++++++ repos/base-tukija/src/core/include/platform.h | 145 +++ .../src/core/include/platform_pd.h | 88 ++ .../src/core/include/platform_thread.h | 215 ++++ .../src/core/include/rpc_cap_factory.h | 71 ++ .../src/core/include/signal_broker.h | 139 +++ .../core/include/signal_source_component.h | 63 + .../src/core/include/spec/x86_32/nova_msr.h | 26 + .../src/core/include/spec/x86_64/nova_msr.h | 55 + repos/base-tukija/src/core/include/util.h | 61 + .../src/core/include/vm_session_component.h | 185 +++ .../src/core/io_mem_session_support.cc | 29 + repos/base-tukija/src/core/ipc_pager.cc | 81 ++ .../src/core/irq_session_component.cc | 278 +++++ .../src/core/native_cpu_component.cc | 46 + .../src/core/native_pd_component.cc | 53 + repos/base-tukija/src/core/pager.cc | 1016 +++++++++++++++++ .../src/core/pd_session_support.cc | 240 ++++ repos/base-tukija/src/core/platform.cc | 1007 ++++++++++++++++ repos/base-tukija/src/core/platform_pd.cc | 79 ++ .../base-tukija/src/core/platform_services.cc | 36 + repos/base-tukija/src/core/platform_thread.cc | 377 ++++++ .../src/core/ram_dataspace_support.cc | 104 ++ repos/base-tukija/src/core/rpc_cap_factory.cc | 98 ++ .../base-tukija/src/core/spec/x86_32/pager.cc | 62 + .../base-tukija/src/core/spec/x86_64/pager.cc | 81 ++ repos/base-tukija/src/core/thread_start.cc | 150 +++ repos/base-tukija/src/core/tukija/target.mk | 4 + .../src/core/vm_session_component.cc | 414 +++++++ .../src/include/base/internal/ipc.h | 125 ++ .../src/include/base/internal/lock_helper.h | 65 ++ .../src/include/base/internal/native_thread.h | 14 + .../src/include/base/internal/native_utcb.h | 46 + .../src/include/base/internal/parent_cap.h | 37 + .../include/base/internal/raw_write_string.h | 19 + .../src/include/base/internal/spin_lock.h | 105 ++ .../nova_native_vcpu/nova_native_vcpu.h | 29 + .../src/include/signal_source/client.h | 104 ++ .../signal_source/nova_signal_source.h | 30 + repos/base-tukija/src/kernel/tukija/target.mk | 62 + repos/base-tukija/src/lib/base/cap_map.cc | 191 ++++ repos/base-tukija/src/lib/base/capability.cc | 77 ++ repos/base-tukija/src/lib/base/ipc.cc | 206 ++++ .../src/lib/base/region_map_client.cc | 49 + .../base-tukija/src/lib/base/rpc_cap_alloc.cc | 74 ++ .../src/lib/base/rpc_entrypoint.cc | 254 +++++ .../src/lib/base/signal_transmitter.cc | 50 + repos/base-tukija/src/lib/base/sleep.cc | 37 + repos/base-tukija/src/lib/base/stack.cc | 120 ++ .../base-tukija/src/lib/base/thread_start.cc | 231 ++++ repos/base-tukija/src/lib/base/vm.cc | 776 +++++++++++++ repos/base-tukija/src/test/nova/ipc.cc | 56 + repos/base-tukija/src/test/nova/main.cc | 763 +++++++++++++ repos/base-tukija/src/test/nova/server.h | 110 ++ repos/base-tukija/src/test/nova/target.mk | 3 + repos/base-tukija/src/timer/nova/main.cc | 415 +++++++ repos/base-tukija/src/timer/nova/target.mk | 6 + tool/run/boot_dir/tukija | 160 +++ 109 files changed, 13593 insertions(+) create mode 100644 repos/base-tukija/README create mode 100644 repos/base-tukija/etc/specs.conf create mode 100644 repos/base-tukija/include/spec/32bit/nova/syscalls.h create mode 100644 repos/base-tukija/include/spec/64bit/tukija/syscalls.h create mode 100644 repos/base-tukija/include/tukija/cap_map.h create mode 100644 repos/base-tukija/include/tukija/capability_space.h create mode 100644 repos/base-tukija/include/tukija/native_thread.h create mode 100644 repos/base-tukija/include/tukija/receive_window.h create mode 100644 repos/base-tukija/include/tukija/stdint.h create mode 100644 repos/base-tukija/include/tukija/syscall-generic.h create mode 100644 repos/base-tukija/include/tukija/util.h create mode 100644 repos/base-tukija/include/tukija_native_cpu/client.h create mode 100644 repos/base-tukija/include/tukija_native_cpu/tukija_native_cpu.h create mode 100644 repos/base-tukija/include/tukija_native_pd/client.h create mode 100644 repos/base-tukija/include/tukija_native_pd/tukija_native_pd.h create mode 100644 repos/base-tukija/lib/mk/base-tukija-common.mk create mode 100644 repos/base-tukija/lib/mk/base-tukija.mk create mode 100644 repos/base-tukija/lib/mk/core-tukija.inc create mode 100644 repos/base-tukija/lib/mk/spec/x86_32/core-nova.mk create mode 100644 repos/base-tukija/lib/mk/spec/x86_32/ld-nova.mk create mode 100644 repos/base-tukija/lib/mk/spec/x86_32/startup-nova.mk create mode 100644 repos/base-tukija/lib/mk/spec/x86_64/core-tukija.mk create mode 100644 repos/base-tukija/lib/mk/spec/x86_64/ld-tukija.mk create mode 100644 repos/base-tukija/lib/mk/spec/x86_64/startup-tukija.mk create mode 100644 repos/base-tukija/mk/spec/nova_x86_32.mk create mode 100644 repos/base-tukija/mk/spec/tukija_x86_64.mk create mode 100644 repos/base-tukija/patches/README create mode 100644 repos/base-tukija/patches/allow_ioapic_access.patch create mode 100644 repos/base-tukija/patches/allow_iommu_access.patch create mode 100644 repos/base-tukija/patches/syscall_msi.patch create mode 100644 repos/base-tukija/ports/tukija.hash create mode 100644 repos/base-tukija/ports/tukija.port create mode 100644 repos/base-tukija/recipes/api/base-tukija/content.mk create mode 100644 repos/base-tukija/recipes/api/base-tukija/hash create mode 100644 repos/base-tukija/recipes/src/base-tukija/README create mode 100644 repos/base-tukija/recipes/src/base-tukija/content.mk create mode 100644 repos/base-tukija/recipes/src/base-tukija/hash create mode 100644 repos/base-tukija/recipes/src/base-tukija/used_apis create mode 100644 repos/base-tukija/run/nova.run create mode 100644 repos/base-tukija/src/core/bios_data_area.cc create mode 100644 repos/base-tukija/src/core/core-bss.ld create mode 100644 repos/base-tukija/src/core/core_log_out.cc create mode 100644 repos/base-tukija/src/core/core_region_map.cc create mode 100644 repos/base-tukija/src/core/core_rpc_cap_alloc.cc create mode 100644 repos/base-tukija/src/core/include/imprint_badge.h create mode 100644 repos/base-tukija/src/core/include/ipc_pager.h create mode 100644 repos/base-tukija/src/core/include/irq_object.h create mode 100644 repos/base-tukija/src/core/include/map_local.h create mode 100644 repos/base-tukija/src/core/include/native_cpu_component.h create mode 100644 repos/base-tukija/src/core/include/native_pd_component.h create mode 100644 repos/base-tukija/src/core/include/nova_util.h create mode 100644 repos/base-tukija/src/core/include/pager.h create mode 100644 repos/base-tukija/src/core/include/platform.h create mode 100644 repos/base-tukija/src/core/include/platform_pd.h create mode 100644 repos/base-tukija/src/core/include/platform_thread.h create mode 100644 repos/base-tukija/src/core/include/rpc_cap_factory.h create mode 100644 repos/base-tukija/src/core/include/signal_broker.h create mode 100644 repos/base-tukija/src/core/include/signal_source_component.h create mode 100644 repos/base-tukija/src/core/include/spec/x86_32/nova_msr.h create mode 100644 repos/base-tukija/src/core/include/spec/x86_64/nova_msr.h create mode 100644 repos/base-tukija/src/core/include/util.h create mode 100644 repos/base-tukija/src/core/include/vm_session_component.h create mode 100644 repos/base-tukija/src/core/io_mem_session_support.cc create mode 100644 repos/base-tukija/src/core/ipc_pager.cc create mode 100644 repos/base-tukija/src/core/irq_session_component.cc create mode 100644 repos/base-tukija/src/core/native_cpu_component.cc create mode 100644 repos/base-tukija/src/core/native_pd_component.cc create mode 100644 repos/base-tukija/src/core/pager.cc create mode 100644 repos/base-tukija/src/core/pd_session_support.cc create mode 100644 repos/base-tukija/src/core/platform.cc create mode 100644 repos/base-tukija/src/core/platform_pd.cc create mode 100644 repos/base-tukija/src/core/platform_services.cc create mode 100644 repos/base-tukija/src/core/platform_thread.cc create mode 100644 repos/base-tukija/src/core/ram_dataspace_support.cc create mode 100644 repos/base-tukija/src/core/rpc_cap_factory.cc create mode 100644 repos/base-tukija/src/core/spec/x86_32/pager.cc create mode 100644 repos/base-tukija/src/core/spec/x86_64/pager.cc create mode 100644 repos/base-tukija/src/core/thread_start.cc create mode 100644 repos/base-tukija/src/core/tukija/target.mk create mode 100644 repos/base-tukija/src/core/vm_session_component.cc create mode 100644 repos/base-tukija/src/include/base/internal/ipc.h create mode 100644 repos/base-tukija/src/include/base/internal/lock_helper.h create mode 100644 repos/base-tukija/src/include/base/internal/native_thread.h create mode 100644 repos/base-tukija/src/include/base/internal/native_utcb.h create mode 100644 repos/base-tukija/src/include/base/internal/parent_cap.h create mode 100644 repos/base-tukija/src/include/base/internal/raw_write_string.h create mode 100644 repos/base-tukija/src/include/base/internal/spin_lock.h create mode 100644 repos/base-tukija/src/include/nova_native_vcpu/nova_native_vcpu.h create mode 100644 repos/base-tukija/src/include/signal_source/client.h create mode 100644 repos/base-tukija/src/include/signal_source/nova_signal_source.h create mode 100644 repos/base-tukija/src/kernel/tukija/target.mk create mode 100644 repos/base-tukija/src/lib/base/cap_map.cc create mode 100644 repos/base-tukija/src/lib/base/capability.cc create mode 100644 repos/base-tukija/src/lib/base/ipc.cc create mode 100644 repos/base-tukija/src/lib/base/region_map_client.cc create mode 100644 repos/base-tukija/src/lib/base/rpc_cap_alloc.cc create mode 100644 repos/base-tukija/src/lib/base/rpc_entrypoint.cc create mode 100644 repos/base-tukija/src/lib/base/signal_transmitter.cc create mode 100644 repos/base-tukija/src/lib/base/sleep.cc create mode 100644 repos/base-tukija/src/lib/base/stack.cc create mode 100644 repos/base-tukija/src/lib/base/thread_start.cc create mode 100644 repos/base-tukija/src/lib/base/vm.cc create mode 100644 repos/base-tukija/src/test/nova/ipc.cc create mode 100644 repos/base-tukija/src/test/nova/main.cc create mode 100644 repos/base-tukija/src/test/nova/server.h create mode 100644 repos/base-tukija/src/test/nova/target.mk create mode 100644 repos/base-tukija/src/timer/nova/main.cc create mode 100644 repos/base-tukija/src/timer/nova/target.mk create mode 100644 tool/run/boot_dir/tukija diff --git a/repos/base-tukija/README b/repos/base-tukija/README new file mode 100644 index 0000000000..938a359142 --- /dev/null +++ b/repos/base-tukija/README @@ -0,0 +1,5 @@ +This repository contains the port of Genode to the NOVA microhypervisor. + +For more information on this kernel, please refer to the official website. + +:[http://hypervisor.org]: Official website for the NOVA microhypervisor. diff --git a/repos/base-tukija/etc/specs.conf b/repos/base-tukija/etc/specs.conf new file mode 100644 index 0000000000..8ffffc6f7b --- /dev/null +++ b/repos/base-tukija/etc/specs.conf @@ -0,0 +1 @@ +SPECS += tukija diff --git a/repos/base-tukija/include/spec/32bit/nova/syscalls.h b/repos/base-tukija/include/spec/32bit/nova/syscalls.h new file mode 100644 index 0000000000..22de9e655f --- /dev/null +++ b/repos/base-tukija/include/spec/32bit/nova/syscalls.h @@ -0,0 +1,483 @@ +/* + * \brief Syscall bindings for the NOVA microhypervisor + * \author Norman Feske + * \author Sebastian Sumpf + * \date 2009-12-27 + */ + +/* + * Copyright (c) 2009 Genode Labs + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INCLUDE__SPEC__32BIT__NOVA__SYSCALLS_H_ +#define _INCLUDE__SPEC__32BIT__NOVA__SYSCALLS_H_ + +#include +#include + +#define ALWAYS_INLINE __attribute__((always_inline)) + +namespace Nova { + + ALWAYS_INLINE + inline unsigned eax(Syscall s, uint8_t flags, unsigned sel) + { + return sel << 8 | (flags & 0xf) << 4 | s; + } + + + ALWAYS_INLINE + inline uint8_t syscall_0(Syscall s, uint8_t flags, unsigned sel = 0) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" mov %%esp, %%ecx;" + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + " sysenter;" + "1:" + : "+a" (status) + : + : "ecx", "edx", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_1(Syscall s, uint8_t flags, mword_t sel, mword_t p1, + mword_t * p2 = 0) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" mov %%esp, %%ecx;" + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + " sysenter;" + "1:" + : "+a" (status), "+D" (p1) + : + : "ecx", "edx", "memory"); + if (p2) *p2 = p1; + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_2(Syscall s, uint8_t flags, unsigned sel, mword_t p1, mword_t p2) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" mov %%esp, %%ecx;" + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + " sysenter;" + "1:" + : "+a" (status) + : "D" (p1), "S" (p2) + : "ecx", "edx"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_3(Syscall s, uint8_t flags, unsigned sel, + mword_t p1, mword_t p2, mword_t p3) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" push %%ebx;" + " mov %%edx, %%ebx;" + " mov %%esp, %%ecx;" + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + " sysenter;" + "1:" + " pop %%ebx;" + : "+a" (status), "+d" (p3) + : "D" (p1), "S" (p2) + : "ecx"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_4(Syscall s, uint8_t flags, unsigned sel, + mword_t p1, mword_t p2, mword_t p3, mword_t p4) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" push %%ebp;" + " push %%ebx;" + + " mov %%ecx, %%ebx;" + " mov %%esp, %%ecx;" + " mov %%edx, %%ebp;" + + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + "sysenter;" + "1:" + + " pop %%ebx;" + " pop %%ebp;" + : "+a" (status), "+c" (p3), "+d" (p4) + : "D" (p1), "S" (p2) + : "memory"); + return (uint8_t)status; + } + + ALWAYS_INLINE + inline uint8_t syscall_5(Syscall s, uint8_t flags, mword_t sel, + mword_t &p1, mword_t &p2, mword_t p3 = ~0UL) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" push %%ebx;" + + " mov %%ecx, %%ebx;" + " mov %%esp, %%ecx;" + + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + "sysenter;" + "1:" + + " pop %%ebx;" + : "+a" (status), "+D" (p1), "+S" (p2), "+c" (p3) + : + : "edx", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_6(Syscall s, uint8_t flags, unsigned sel, + mword_t &p1, mword_t &p2, mword_t &p3, + mword_t &p4) + { + mword_t status = eax(s, flags, sel); + + asm volatile (" push %%ebp;" + " push %%ebx;" + + " mov %%ecx, %%ebx;" + " mov %%esp, %%ecx;" + " mov %%edx, %%ebp;" + + " call 0f;" + "0:" + " addl $(1f-0b), (%%esp);" + " mov (%%esp), %%edx;" + "sysenter;" + "1:" + + " mov %%ebp, %%edx;" + " mov %%ebx, %%ecx;" + " pop %%ebx;" + " pop %%ebp;" + : "+a" (status), "+D" (p1), "+S" (p2), "+c" (p3), + "+d" (p4) + : + : "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t call(unsigned pt) + { + return syscall_1(NOVA_CALL, 0, pt, 0); + } + + + ALWAYS_INLINE + __attribute__((noreturn)) + inline void reply(void *next_sp, unsigned sm = 0) + { + mword_t reg = eax(NOVA_REPLY, 0, sm); + + asm volatile ("sysenter;" + : + : "a" (reg), "c" (next_sp) + : "memory"); + __builtin_unreachable(); + } + + + ALWAYS_INLINE + inline uint8_t create_pd(unsigned pd0, unsigned pd, Crd crd, + unsigned short lower_limit, unsigned upper_limit) + { + return syscall_3(NOVA_CREATE_PD, 0, pd0, pd, crd.value(), + upper_limit << 16 | lower_limit); + } + + + /** + * Create an EC. + * + * \param ec two selectors - ec && ec + 1 + * First selector must be unused and second selector is + * either unused or must be a valid portal selector. + * The thread will call this portal if the PD it runs in runs + * out of kernel memory. + * \param pd selector of PD the EC will created in + * \param cpu CPU number the EC will run on + * \param utcb PD local address where the UTCB of the EC will be appear + * \param esp initial stack address + * \param evt base selector for all exception portals of the EC + * \param global if true - thread requires a SC to be runnable + * if false - thread is runnable solely if it receives a IPC + * (worker thread) + */ + ALWAYS_INLINE + inline uint8_t create_ec(mword_t ec, mword_t pd, mword_t cpu, mword_t utcb, + mword_t esp, mword_t evt, bool global = false) + { + return syscall_4(NOVA_CREATE_EC, global, ec, pd, + (cpu & 0xfff) | (utcb & ~0xfff), + esp, evt); + } + + + ALWAYS_INLINE + inline uint8_t util_time(Syscall const syscall, mword_t const cap, + uint8_t const op, unsigned long long &time) + { + mword_t time_h = 0, time_l = 0; + uint8_t res = syscall_5(syscall, op, cap, time_h, time_l); + time = (uint64_t(time_h) << 32ULL) | uint64_t(time_l); + return res; + } + + + ALWAYS_INLINE + inline uint8_t sc_ec_time(mword_t const cap_sc, mword_t const cap_ec, + unsigned long long &time_sc, + unsigned long long &time_ec) + { + mword_t time_h_sc = cap_ec, time_l_sc = 0; + mword_t time_h_ec = 0, time_l_ec = 0; + uint8_t res = syscall_6(NOVA_SC_CTRL, Sc_op::SC_EC_TIME, cap_sc, + time_h_sc, time_l_sc, time_h_ec, + time_l_ec); + time_sc = (uint64_t(time_h_sc) << 32ULL) | uint64_t(time_l_sc); + time_ec = (uint64_t(time_h_ec) << 32ULL) | uint64_t(time_l_ec); + return res; + } + + + ALWAYS_INLINE + inline uint8_t ec_ctrl(Ec_op op, mword_t ec = ~0UL, mword_t para = ~0UL, + Crd crd = 0) + { + if (op == EC_TIME) + return NOVA_INV_HYPERCALL; + + return syscall_2(NOVA_EC_CTRL, op, ec, para, crd.value()); + } + + + ALWAYS_INLINE + inline uint8_t ec_time(mword_t const ec, unsigned long long &time) + { + return util_time(NOVA_EC_CTRL, ec, Ec_op::EC_TIME, time); + } + + + ALWAYS_INLINE + inline uint8_t create_sc(unsigned sc, unsigned pd, unsigned ec, Qpd qpd) + { + return syscall_3(NOVA_CREATE_SC, 0, sc, pd, ec, qpd.value()); + } + + + ALWAYS_INLINE + inline uint8_t pt_ctrl(mword_t pt, mword_t pt_id) + { + return syscall_1(NOVA_PT_CTRL, 0, pt, pt_id); + } + + + ALWAYS_INLINE + inline uint8_t create_pt(unsigned pt, unsigned pd, unsigned ec, Mtd mtd, + mword_t eip, bool id_equal_pt = true) + { + uint8_t res = syscall_4(NOVA_CREATE_PT, 0, pt, pd, ec, mtd.value(), eip); + + if (!id_equal_pt || res != NOVA_OK) + return res; + + return pt_ctrl(pt, pt); + } + + + ALWAYS_INLINE + inline uint8_t create_sm(unsigned sm, unsigned pd, mword_t cnt) + { + return syscall_3(NOVA_CREATE_SM, 0, sm, pd, cnt, 0); + } + + + ALWAYS_INLINE + inline uint8_t create_si(mword_t si, mword_t pd, mword_t value, mword_t sm) + { + return syscall_3(NOVA_CREATE_SM, 0, si, pd, value, sm); + } + + + /** + * Revoke memory, capabilities or i/o ports from a PD + * + * \param crd describes region and type of resource + * \param self also revoke from source PD iif self == true + * \param remote if true the 'pd' parameter below is used, otherwise + * current PD is used as source PD + * \param pd selector describing remote PD + * \param sm SM selector which gets an up() by the kernel if the + * memory of the current revoke invocation gets freed up + * (end of RCU period) + * \param kim keep_in_mdb - if set to true the kernel will make the + * resource inaccessible for solely for the specified pd. + * All already beforehand delegated resources will not be + * changed, e.g. revoked. All rights of the local resource + * will be removed (independent of what is specified by crd). + */ + ALWAYS_INLINE + inline uint8_t revoke(Crd crd, bool self = true, bool remote = false, + mword_t pd = 0, mword_t sm = 0, bool kim = false) + { + uint8_t flags = self ? 0x1 : 0; + + if (remote) + flags |= 0x2; + if (kim) + flags |= 0x4; + + mword_t value_crd = crd.value(); + return syscall_5(NOVA_REVOKE, flags, sm, value_crd, pd); + } + + + /* + * Shortcut for revoke, where solely the local cap should be revoked and + * not all subsequent delegations of the local cap. + */ + ALWAYS_INLINE + inline uint8_t drop(Crd crd) { + return revoke(crd, true, false, 0, 0, true); } + + + ALWAYS_INLINE + inline uint8_t lookup(Crd &crd) + { + mword_t crd_r; + uint8_t res = syscall_1(NOVA_MISC, 0, 0, crd.value(), &crd_r); + crd = Crd(crd_r); + return res; + } + + + ALWAYS_INLINE + inline uint8_t delegate(mword_t pd_snd, mword_t pd_dst, Crd crd_dst) + { + return syscall_2(NOVA_MISC, 1, pd_snd, crd_dst.value(), pd_dst); + } + + + ALWAYS_INLINE + inline uint8_t acpi_suspend(mword_t sm_auth_acpi, mword_t sleep_state_a, + mword_t sleep_state_b) + { + return syscall_2(NOVA_MISC, 2, sm_auth_acpi, sleep_state_a, sleep_state_b); + } + + + ALWAYS_INLINE + inline uint8_t sm_ctrl(unsigned sm, Sem_op op, unsigned long long timeout = 0) + { + return syscall_2(NOVA_SM_CTRL, op, sm, (mword_t)(timeout >> 32), (mword_t)timeout); + } + + + ALWAYS_INLINE + inline uint8_t si_ctrl(mword_t sm, Sem_op op, mword_t &value, mword_t &cnt) + { + return syscall_5(NOVA_SM_CTRL, op, sm, value, cnt); + } + + + ALWAYS_INLINE + inline uint8_t pd_ctrl(mword_t pd_src, Pd_op op, mword_t pd_dst, + mword_t transfer) + { + return syscall_5(NOVA_PD_CTRL, op, pd_src, pd_dst, transfer); + } + + + ALWAYS_INLINE + inline uint8_t pd_ctrl_debug(mword_t pd, mword_t &limit, mword_t &usage) + { + return syscall_5(NOVA_PD_CTRL, Pd_op::PD_DEBUG, pd, limit, usage); + } + + + ALWAYS_INLINE + inline uint8_t assign_pci(mword_t pd, mword_t mem, mword_t rid) + { + return syscall_2(NOVA_ASSIGN_PCI, 0, pd, mem, rid); + } + + + ALWAYS_INLINE + inline uint8_t assign_gsi(mword_t sm, mword_t dev, mword_t cpu, + mword_t &msi_addr, mword_t &msi_data, + mword_t si = ~0UL, Gsi_flags flags = Gsi_flags()) + { + msi_addr = dev; + msi_data = cpu; + + return syscall_5(NOVA_ASSIGN_GSI, flags.value(), sm, msi_addr, msi_data, si); + } + + + ALWAYS_INLINE + inline uint8_t sc_ctrl(unsigned const sc, unsigned long long &time, uint8_t op = 0) + { + return util_time(NOVA_SC_CTRL, sc, op, time); + } +} +#endif /* _INCLUDE__SPEC__32BIT__NOVA__SYSCALLS_H_ */ diff --git a/repos/base-tukija/include/spec/64bit/tukija/syscalls.h b/repos/base-tukija/include/spec/64bit/tukija/syscalls.h new file mode 100644 index 0000000000..e1ea3d6316 --- /dev/null +++ b/repos/base-tukija/include/spec/64bit/tukija/syscalls.h @@ -0,0 +1,420 @@ +/* + * \brief Syscall bindings for the NOVA microhypervisor x86_64 + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \author Benjamin Lamowski + * \date 2012-06-06 + */ + +/* + * Copyright (c) 2012-2023 Genode Labs + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INCLUDE__SPEC__64BIT__NOVA__SYSCALLS_H_ +#define _INCLUDE__SPEC__64BIT__NOVA__SYSCALLS_H_ + +#include +#include + +#define ALWAYS_INLINE __attribute__((always_inline)) + +namespace Tukija { + + ALWAYS_INLINE + inline mword_t rdi(Syscall s, uint8_t flags, mword_t sel) + { + return sel << 9 | (flags & 0xf) << 5 | s; + } + + + ALWAYS_INLINE + inline uint8_t syscall_0(Syscall s, uint8_t flags, mword_t sel = 0) + { + mword_t status = rdi(s, flags, sel); + + asm volatile ("syscall" + : "+D" (status) + : + : "rcx", "r11", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_1(Syscall s, uint8_t flags, mword_t sel, mword_t p1, + mword_t * p2 = 0) + { + mword_t status = rdi(s, flags, sel); + + asm volatile ("syscall" + : "+D" (status), "+S" (p1) + : + : "rcx", "r11", "memory"); + if (p2) *p2 = p1; + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_2(Syscall s, uint8_t flags, mword_t sel, mword_t p1, + mword_t p2) + { + mword_t status = rdi(s, flags, sel); + + asm volatile ("syscall" + : "+D" (status) + : "S" (p1), "d" (p2) + : "rcx", "r11", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_3(Syscall s, uint8_t flags, unsigned sel, + mword_t p1, mword_t p2, mword_t p3) + { + mword_t status = rdi(s, flags, sel); + + asm volatile ("syscall" + : "+D" (status) + : "S" (p1), "d" (p2), "a" (p3) + : "rcx", "r11", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_4(Syscall s, uint8_t flags, mword_t sel, + mword_t p1, mword_t p2, mword_t p3, mword_t p4) + { + mword_t status = rdi(s, flags, sel); + register mword_t r8 asm ("r8") = p4; + + asm volatile ("syscall;" + : "+D" (status) + : "S" (p1), "d" (p2), "a" (p3), "r" (r8) + : "rcx", "r11", "memory"); + return (uint8_t)status; + } + + + ALWAYS_INLINE + inline uint8_t syscall_5(Syscall s, uint8_t flags, mword_t sel, + mword_t &p1, mword_t &p2, mword_t p3 = ~0UL) + { + mword_t status = rdi(s, flags, sel); + + asm volatile ("syscall" + : "+D" (status), "+S"(p1), "+d"(p2) + : "a" (p3) + : "rcx", "r11", "memory"); + return (uint8_t)status; + } + + ALWAYS_INLINE + inline uint8_t syscall_6(Syscall s, uint8_t flags, mword_t sel, + mword_t &p1, mword_t &p2, mword_t &p3, + mword_t &p4) + { + mword_t status = rdi(s, flags, sel); + + register mword_t r8 asm ("r8") = p4; + + asm volatile ("syscall" + : "+D" (status), "+S"(p1), "+d"(p2), "+a"(p3), "+r"(r8) + : + : "rcx", "r11", "memory"); + + p4 = r8; + + return (uint8_t)status; + } + + ALWAYS_INLINE + inline uint8_t call(mword_t pt) + { + return syscall_1(NOVA_CALL, 0, pt, 0); + } + + + ALWAYS_INLINE + __attribute__((noreturn)) + inline void reply(void *next_sp, unsigned long sm = 0) + { + mword_t syscall = rdi(NOVA_REPLY, 0, sm); + + asm volatile ("mov %1, %%rsp;" + "syscall;" + : + : "D" (syscall), "ir" (next_sp) + : "memory"); + __builtin_unreachable(); + } + + + ALWAYS_INLINE + inline uint8_t create_pd(mword_t pd0, mword_t pd, Crd crd, + unsigned lower_limit, unsigned long upper_limit) + { + return syscall_3(NOVA_CREATE_PD, 0, (unsigned)pd0, pd, crd.value(), + upper_limit << 32 | lower_limit); + } + + + /** + * Create an EC. + * + * \param ec two selectors - ec && ec + 1 + * First selector must be unused and second selector is + * either unused or must be a valid portal selector. + * The thread will call this portal if the PD it runs in runs + * out of kernel memory. + * \param pd selector of PD the EC will created in + * \param cpu CPU number the EC will run on + * \param utcb PD local address where the UTCB of the EC will be appear + * \param esp initial stack address + * \param evt base selector for all exception portals of the EC + * \param global if true - thread requires a SC to be runnable + * if false - thread is runnable solely if it receives a IPC + * (worker thread) + */ + ALWAYS_INLINE + inline uint8_t create_ec(mword_t ec, mword_t pd, mword_t cpu, mword_t utcb, + mword_t esp, mword_t evt, bool global = false) + { + return syscall_4(NOVA_CREATE_EC, global, ec, pd, + (cpu & 0xfff) | (utcb & ~0xfff), + esp, evt); + } + + + ALWAYS_INLINE + inline uint8_t util_time(Syscall const syscall, mword_t const cap, + uint8_t const op, unsigned long long &time) + { + mword_t time_h = 0, time_l = 0; + uint8_t res = syscall_5(syscall, op, cap, time_h, time_l); + time = (time_h << 32ULL) | (time_l & 0xFFFFFFFFULL); + return res; + } + + + ALWAYS_INLINE + inline uint8_t sc_ec_time(mword_t const cap_sc, mword_t const cap_ec, + unsigned long long &time_sc, + unsigned long long &time_ec) + { + mword_t time_h_sc = cap_ec, time_l_sc = 0; + mword_t time_h_ec = 0, time_l_ec = 0; + uint8_t res = syscall_6(NOVA_SC_CTRL, Sc_op::SC_EC_TIME, cap_sc, + time_h_sc, time_l_sc, time_h_ec, + time_l_ec); + time_sc = (time_h_sc << 32ULL) | (time_l_sc & 0xFFFFFFFFULL); + time_ec = (time_h_ec << 32ULL) | (time_l_ec & 0xFFFFFFFFULL); + return res; + } + + + ALWAYS_INLINE + inline uint8_t ec_ctrl(Ec_op op, mword_t ec = ~0UL, mword_t para = ~0UL, + Crd crd = 0) + { + if (op == EC_TIME) + return NOVA_INV_HYPERCALL; + + return syscall_2(NOVA_EC_CTRL, op, ec, para, crd.value()); + } + + + ALWAYS_INLINE + inline uint8_t ec_time(mword_t const ec, unsigned long long &time) + { + return util_time(NOVA_EC_CTRL, ec, Ec_op::EC_TIME, time); + } + + + ALWAYS_INLINE + inline uint8_t create_sc(mword_t sc, mword_t pd, mword_t ec, Qpd qpd) + { + return syscall_3(NOVA_CREATE_SC, 0, (unsigned)sc, pd, ec, qpd.value()); + } + + + ALWAYS_INLINE + inline uint8_t pt_ctrl(mword_t pt, mword_t pt_id) + { + return syscall_1(NOVA_PT_CTRL, 0, pt, pt_id); + } + + + ALWAYS_INLINE + inline uint8_t create_pt(mword_t pt, mword_t pd, mword_t ec, Mtd mtd, + mword_t rip, bool id_equal_pt = true) + { + uint8_t res = syscall_4(NOVA_CREATE_PT, 0, pt, pd, ec, mtd.value(), rip); + + if (!id_equal_pt || res != NOVA_OK) + return res; + + return pt_ctrl(pt, pt); + } + + + ALWAYS_INLINE + inline uint8_t create_sm(mword_t sm, mword_t pd, mword_t cnt) + { + return syscall_3(NOVA_CREATE_SM, 0, (unsigned)sm, pd, cnt, 0); + } + + + ALWAYS_INLINE + inline uint8_t create_si(mword_t si, mword_t pd, mword_t value, mword_t sm) + { + return syscall_3(NOVA_CREATE_SM, 0, (unsigned)si, pd, value, sm); + } + + + /** + * Revoke memory, capabilities or i/o ports from a PD + * + * \param crd describes region and type of resource + * \param self also revoke from source PD iif self == true + * \param remote if true the 'pd' parameter below is used, otherwise + * current PD is used as source PD + * \param pd selector describing remote PD + * \param sm SM selector which gets an up() by the kernel if the + * memory of the current revoke invocation gets freed up + * (end of RCU period) + * \param kim keep_in_mdb - if set to true the kernel will make the + * resource inaccessible solely inside the specified pd. + * All already beforehand delegated resources will not be + * changed, e.g. revoked. All rights of the local resource + * will be removed (independent of what is specified by crd). + */ + ALWAYS_INLINE + inline uint8_t revoke(Crd crd, bool self = true, bool remote = false, + mword_t pd = 0, mword_t sm = 0, bool kim = false) + { + uint8_t flags = self ? 0x1 : 0; + + if (remote) + flags |= 0x2; + + if (kim) + flags |= 0x4; + + mword_t value_crd = crd.value(); + return syscall_5(NOVA_REVOKE, flags, sm, value_crd, pd); + } + + + /* + * Shortcut for revoke, where solely the local cap should be revoked and + * not all subsequent delegations of the local cap. + */ + ALWAYS_INLINE + inline uint8_t drop(Crd crd) { + return revoke(crd, true, false, 0, 0, true); } + + + ALWAYS_INLINE + inline uint8_t lookup(Crd &crd) + { + mword_t crd_r; + uint8_t res = syscall_1(NOVA_MISC, 0, 0, crd.value(), &crd_r); + crd = Crd(crd_r); + return res; + } + + + ALWAYS_INLINE + inline uint8_t delegate(mword_t pd_snd, mword_t pd_dst, Crd crd_dst) + { + return syscall_2(NOVA_MISC, 1, pd_snd, crd_dst.value(), pd_dst); + } + + + ALWAYS_INLINE + inline uint8_t acpi_suspend(mword_t sm_auth_acpi, mword_t sleep_state_a, + mword_t sleep_state_b) + { + return syscall_2(NOVA_MISC, 2, sm_auth_acpi, sleep_state_a, sleep_state_b); + } + + ALWAYS_INLINE + inline uint8_t sm_ctrl(mword_t sm, Sem_op op, unsigned long long timeout = 0) + { + return syscall_2(NOVA_SM_CTRL, op, sm, timeout >> 32, + timeout & 0xFFFFFFFFULL); + } + + + ALWAYS_INLINE + inline uint8_t si_ctrl(mword_t sm, Sem_op op, mword_t &value, mword_t &cnt) + { + return syscall_5(NOVA_SM_CTRL, op, sm, value, cnt); + } + + + ALWAYS_INLINE + inline uint8_t sc_ctrl(mword_t const sc, unsigned long long &time, + Sc_op const op) + { + return util_time(NOVA_SC_CTRL, sc, op, time); + } + + + ALWAYS_INLINE + inline uint8_t pd_ctrl(mword_t pd_src, Pd_op op, mword_t pd_dst, mword_t transfer) + { + return syscall_5(NOVA_PD_CTRL, op, pd_src, pd_dst, transfer); + } + + + ALWAYS_INLINE + inline uint8_t pd_ctrl_debug(mword_t pd, mword_t &limit, mword_t &usage) + { + return syscall_5(NOVA_PD_CTRL, Pd_op::PD_DEBUG, pd, limit, usage); + } + + + ALWAYS_INLINE + inline uint8_t assign_pci(mword_t pd, mword_t mem, mword_t rid) + { + return syscall_2(NOVA_ASSIGN_PCI, 0, pd, mem, rid); + } + + + ALWAYS_INLINE + inline uint8_t assign_gsi(mword_t sm, mword_t dev, mword_t cpu, + mword_t &msi_addr, mword_t &msi_data, + mword_t si = ~0UL, Gsi_flags flags = Gsi_flags()) + { + msi_addr = dev; + msi_data = cpu; + return syscall_5(NOVA_ASSIGN_GSI, flags.value(), sm, msi_addr, msi_data, si); + } +} +#endif /* _INCLUDE__SPEC__64BIT__NOVA__SYSCALLS_H_ */ diff --git a/repos/base-tukija/include/tukija/cap_map.h b/repos/base-tukija/include/tukija/cap_map.h new file mode 100644 index 0000000000..4ee78bf29e --- /dev/null +++ b/repos/base-tukija/include/tukija/cap_map.h @@ -0,0 +1,132 @@ +/* + * \brief Mapping of Genode's capability names to capabilities selectors. + * \author Alexander Boettcher + * \date 2013-08-26 + * + * This header is public to allow user-level VMMs to manually allocate windows + * of consecutive selectors (for virtualization event portals) in the + * component's capability space. + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__CAP_MAP_H_ +#define _INCLUDE__NOVA__CAP_MAP_H_ + +/* Genode includes */ +#include + +#include + +#include +#include + +namespace Genode { + + class Cap_range : public Avl_node { + + private: + + Mutex _mutex { }; + addr_t _base = 0; + addr_t _last = 0; + + enum { + HEADER = sizeof(_base) + sizeof(_mutex) + sizeof(_last), + CAP_RANGE_SIZE = 4096, + WORDS = (CAP_RANGE_SIZE - HEADER - sizeof(Avl_node)) / sizeof(addr_t), + }; + + uint16_t _cap_array[WORDS * sizeof(addr_t) / 2]; + + bool _match(addr_t id) { + return _base <= id && id < _base + elements(); }; + + public: + + Cap_range(addr_t base) : _base(base) + { + static_assert(sizeof(*this) == CAP_RANGE_SIZE, + "Cap_range misconfigured"); + + for (unsigned i = 0; i < elements(); i++) + _cap_array[i] = 0; + } + + addr_t base() const { return _base; } + unsigned elements() const { return sizeof(_cap_array) / sizeof(_cap_array[0]); } + + Cap_range *find_by_id(addr_t); + + void inc(unsigned id); + void dec(unsigned id, bool revoke = true, unsigned num_log2 = 0); + + addr_t alloc(size_t const num_log2); + + /************************ + ** Avl node interface ** + ************************/ + + bool higher(Cap_range *n) { return n->_base > _base; } + + }; + + + class Cap_index + { + private: + + Cap_range * _range; + addr_t _local_name; + + public: + + Cap_index(Cap_range *range, addr_t local_name) + : _range(range), _local_name(local_name) {} + + bool valid() const { return _range; } + + inline void inc() + { + if (_range) + _range->inc((unsigned)(_local_name - _range->base())); + } + + inline void dec() + { + if (_range) + _range->dec((unsigned)(_local_name - _range->base())); + } + }; + + + class Capability_map : private Noncopyable + { + private: + + Avl_tree _tree { }; + + public: + + Cap_index find(addr_t local_sel); + + void insert(Cap_range &range) { _tree.insert(&range); } + + addr_t insert(size_t num_log_2 = 0, addr_t cap = ~0UL); + + void remove(addr_t sel, uint8_t num_log_2 = 0, bool revoke = true); + }; + + + /** + * Get the global Capability_map of the process. + */ + Capability_map &cap_map(); +} + +#endif /* _INCLUDE__NOVA__CAP_MAP_H_ */ diff --git a/repos/base-tukija/include/tukija/capability_space.h b/repos/base-tukija/include/tukija/capability_space.h new file mode 100644 index 0000000000..c84cc8a321 --- /dev/null +++ b/repos/base-tukija/include/tukija/capability_space.h @@ -0,0 +1,47 @@ +/* + * \brief Capability helper + * \author Norman Feske + * \date 2016-06-27 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__CAPABILITY_SPACE_H_ +#define _INCLUDE__NOVA__CAPABILITY_SPACE_H_ + +/* Genode includes */ +#include + +/* NOVA includes */ +#include + +namespace Genode { namespace Capability_space { + + static constexpr unsigned long INVALID_INDEX = ~0UL; + + using Ipc_cap_data = Tukija::Crd; + + static inline Tukija::Crd crd(Native_capability const &cap) + { + /* + * We store the 'Nova::Crd' value in place of the 'Data' pointer. + */ + addr_t value = (addr_t)cap.data(); + Tukija::Crd crd = *(Tukija::Crd *)&value; + return crd; + } + + static inline Native_capability import(addr_t sel, unsigned rights = 0x1f) + { + Tukija::Obj_crd const crd = (sel == INVALID_INDEX) + ? Tukija::Obj_crd() : Tukija::Obj_crd(sel, 0, rights); + return Native_capability((Native_capability::Data *)crd.value()); + } +} } + +#endif /* _INCLUDE__NOVA__CAPABILITY_SPACE_H_ */ diff --git a/repos/base-tukija/include/tukija/native_thread.h b/repos/base-tukija/include/tukija/native_thread.h new file mode 100644 index 0000000000..fb846523e4 --- /dev/null +++ b/repos/base-tukija/include/tukija/native_thread.h @@ -0,0 +1,61 @@ +/* + * \brief Kernel-specific thread meta data + * \author Norman Feske + * \date 2016-03-11 + * + * On most platforms, the 'Genode::Native_thread' type is private to the + * base framework. However, on NOVA, we make the type publicly available to + * expose the low-level thread-specific capability selectors to user-level + * virtual-machine monitors (Seoul or VirtualBox). + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__NATIVE_THREAD_H_ +#define _INCLUDE__NOVA__NATIVE_THREAD_H_ + +#include +#include + +namespace Genode { struct Native_thread; } + +struct Genode::Native_thread +{ + static constexpr unsigned long INVALID_INDEX = ~0UL; + + addr_t ec_sel { 0 }; /* selector for execution context */ + addr_t exc_pt_sel { 0 }; /* base of event portal window */ + addr_t initial_ip { 0 }; /* initial IP of local thread */ + + /* receive window for capability selectors received at the server side */ + Receive_window server_rcv_window { }; + + /* + * Designated selector to populate with the result of an IPC call + * + * By default, the client-side receive window for delegated selectors + * is automatically allocated within the component's selector space. + * However, in special cases such as during the initialization of a + * user-level VMM (ports/include/vmm/vcpu_dispatcher.h), the targeted + * selector is defined manually. The 'client_rcv_sel' provides the + * hook for such a manual allocation. If it contains a valid selector + * value, the value is used as the basis of the receive window of an + * 'ipc_call'. + */ + addr_t client_rcv_sel = INVALID_INDEX; + + void reset_client_rcv_sel() { client_rcv_sel = INVALID_INDEX; } + + Native_capability pager_cap { }; + + Native_thread() : ec_sel(INVALID_INDEX), + exc_pt_sel(INVALID_INDEX), + initial_ip(0) { } +}; + +#endif /* _INCLUDE__NOVA__NATIVE_THREAD_H_ */ diff --git a/repos/base-tukija/include/tukija/receive_window.h b/repos/base-tukija/include/tukija/receive_window.h new file mode 100644 index 0000000000..51882c9af6 --- /dev/null +++ b/repos/base-tukija/include/tukija/receive_window.h @@ -0,0 +1,245 @@ +/* + * \brief Receive window for capability selectors + * \author Alexander Boettcher + * \author Norman Feske + * \date 2016-03-22 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__RECEIVE_WINDOW_H_ +#define _INCLUDE__NOVA__RECEIVE_WINDOW_H_ + +/* Genode includes */ +#include +#include + +/* NOVA includes */ +#include +#include + +namespace Genode { struct Receive_window; } + + +struct Genode::Receive_window +{ + public: + + enum { + MAX_CAP_ARGS_LOG2 = 2, + MAX_CAP_ARGS = 1UL << MAX_CAP_ARGS_LOG2 + }; + + static_assert(MAX_CAP_ARGS == (size_t)Msgbuf_base::MAX_CAPS_PER_MSG, + "Inconsistency between Receive_window and Msgbuf_base"); + + private: + + /** + * Base of portal receive window + */ + addr_t _rcv_pt_base = 0; + + struct { + addr_t sel = 0; + bool del = 0; + } _rcv_pt_sel[MAX_CAP_ARGS]; + + /** + * Normally the received capabilities start from the beginning of + * the receive window (_rcv_pt_base), densely packed ascending. + * However, a receiver may send invalid caps, which will cause + * capability-selector gaps in the receiver window. Or a + * misbehaving sender may even intentionally place a cap at the end + * of the receive window. The position of a cap within the receive + * window is fundamentally important to correctly maintain the + * component-local capability-selector reference count. + * + * Additionally, the position is also required to decide whether a + * kernel capability must be revoked during the receive window + * cleanup/re-usage. '_rcv_pt_cap_free' is used to track this + * information in order to free up and revoke selectors + * (message-buffer cleanup). + * + * Meanings of the enums: + * - FREE_INVALID - invalid cap selector, no cap_map entry + * - FREE_SEL - valid cap selector, invalid kernel capability + * - UNUSED_CAP - valid selector and cap, not read/used yet + * - USED_CAP - valid sel and cap, read/used by stream operator + */ + enum { FREE_INVALID, FREE_SEL, UNUSED_CAP, USED_CAP } + _rcv_pt_cap_free [MAX_CAP_ARGS]; + + /** + * Read counter for unmarshalling portal capability + * selectors + */ + unsigned short _rcv_pt_sel_cnt = 0; + unsigned short _rcv_pt_sel_max = 0; + unsigned short _rcv_wnd_log2 = 0; + + /** + * Reset portal-capability receive window + */ + void _rcv_reset() + { + if (!rcv_invalid()) { rcv_cleanup(false); } + + _rcv_pt_sel_cnt = 0; + _rcv_pt_sel_max = 0; + _rcv_pt_base = INVALID_INDEX; + } + + public: + + static constexpr unsigned long INVALID_INDEX = ~0UL; + + Receive_window() + : + _rcv_pt_base(INVALID_INDEX), _rcv_wnd_log2(MAX_CAP_ARGS_LOG2) + { + _rcv_reset(); + } + + ~Receive_window() + { + _rcv_reset(); + } + + /** + * Set log2 number of capabilities to be received during reply of + * a IPC call. + */ + void rcv_wnd(unsigned short const caps_log2) + { + if (caps_log2 > MAX_CAP_ARGS_LOG2) + nova_die(); + + _rcv_wnd_log2 = caps_log2; + } + + /** + * Return received portal-capability selector + */ + void rcv_pt_sel(Native_capability &cap); + + /** + * Return true if receive window must be re-initialized + */ + bool rcv_invalid() const; + + unsigned num_received_caps() const { return _rcv_pt_sel_max; } + + /** + * Return true if receive window must be re-initialized + * + * After reading portal selectors from the message + * buffer using 'rcv_pt_sel()', we assume that the IDC + * call populated the current receive window with one + * or more portal capabilities. + * To enable the reception of portal capability + * selectors for the next IDC, we need a fresh receive + * window. + * + * \param keep 'true' - Try to keep receive window if + * it's clean. + * 'false' - Free caps of receive window + * because object is freed + * afterwards. + * + * \result 'true' - receive window must be re-initialized + * 'false' - portal selectors has been kept + */ + bool rcv_cleanup(bool keep, unsigned short const new_max = MAX_CAP_ARGS); + + /** + * Initialize receive window for portal capability + * selectors + * + * \param utcb - UTCB of designated receiver + * thread + * \param rcv_window - If specified - receive exactly + * one capability at the specified + * index of rcv_window + * + * Depending on the 'rcv_invalid', 'rcv_cleanup(true)' + * state of the message buffer and the specified + * rcv_window parameter, this function allocates a + * fresh receive window and clears 'rcv_invalid'. + */ + bool prepare_rcv_window(Tukija::Utcb &utcb, + addr_t rcv_window = INVALID_INDEX); + + /** + * Post IPC processing. + * + * Remember where and which caps have been received + * respectively have been translated. + * The information is required to correctly free + * cap indexes and to revoke unused received caps. + * + * \param utcb UTCB of designated receiver thread + */ + void post_ipc(Tukija::Utcb &utcb, addr_t const rcv_window = INVALID_INDEX) + { + using namespace Tukija; + + unsigned const rcv_items = (utcb.items >> 16) & 0xffffu; + + _rcv_pt_sel_max = 0; + _rcv_pt_sel_cnt = 0; + + unsigned short const max = 1U << utcb.crd_rcv.order(); + if (max > MAX_CAP_ARGS) + nova_die(); + + for (unsigned short i = 0; i < MAX_CAP_ARGS; i++) + _rcv_pt_cap_free [i] = (i >= max) ? FREE_INVALID : FREE_SEL; + + for (unsigned i = 0; i < rcv_items; i++) { + Tukija::Utcb::Item * item = utcb.get_item(i); + if (!item) + break; + + Tukija::Crd cap(item->crd); + + /* track which items we got mapped */ + if (!cap.is_null() && item->is_del()) { + /* should never happen */ + if (cap.base() < _rcv_pt_base || + (cap.base() >= _rcv_pt_base + max)) + nova_die(); + _rcv_pt_cap_free [cap.base() - _rcv_pt_base] = UNUSED_CAP; + } + + if (_rcv_pt_sel_max >= max) continue; + + /* track the order of mapped and translated items */ + if (cap.is_null()) { + _rcv_pt_sel[_rcv_pt_sel_max].sel = INVALID_INDEX; + _rcv_pt_sel[_rcv_pt_sel_max++].del = false; + } else { + _rcv_pt_sel[_rcv_pt_sel_max].sel = cap.base(); + _rcv_pt_sel[_rcv_pt_sel_max++].del = item->is_del(); + } + } + + /* + * If a specific rcv_window has been specified, + * (see prepare_rcv_window) then the caller want to take care + * about freeing the * selector. Make the _rcv_pt_base invalid + * so that it is not cleanup twice. + */ + if (rcv_window != INVALID_INDEX) + _rcv_pt_base = INVALID_INDEX; + + utcb.crd_rcv = 0; + } +}; + +#endif /* _INCLUDE__NOVA__RECEIVE_WINDOW_H_ */ diff --git a/repos/base-tukija/include/tukija/stdint.h b/repos/base-tukija/include/tukija/stdint.h new file mode 100644 index 0000000000..acd9d977af --- /dev/null +++ b/repos/base-tukija/include/tukija/stdint.h @@ -0,0 +1,28 @@ +/* + * \brief Integer type definitions used by NOVA syscall bindings + * \author Norman Feske + * \date 2010-01-15 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__STDINT_H_ +#define _INCLUDE__NOVA__STDINT_H_ + +#include + +namespace Tukija { + + typedef unsigned long mword_t; + typedef unsigned char uint8_t; + typedef Genode::uint16_t uint16_t; + typedef Genode::uint32_t uint32_t; + typedef Genode::uint64_t uint64_t; +} + +#endif /* _INCLUDE__NOVA__STDINT_H_ */ diff --git a/repos/base-tukija/include/tukija/syscall-generic.h b/repos/base-tukija/include/tukija/syscall-generic.h new file mode 100644 index 0000000000..57976fda54 --- /dev/null +++ b/repos/base-tukija/include/tukija/syscall-generic.h @@ -0,0 +1,916 @@ +/* + * \brief Syscall bindings for the NOVA microhypervisor + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \author Benjamin Lamowski + * \author Michael Müller + * \date 2009-12-27 + */ + +/* + * Copyright (c) 2009-2023 Genode Labs + * Copyright (c) 2025 Michael Müller, Osnabrück University + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INCLUDE__NOVA__SYSCALL_GENERIC_H_ +#define _INCLUDE__NOVA__SYSCALL_GENERIC_H_ + +#include + +namespace Tukija { + + enum { + PAGE_SIZE_LOG2 = 12, + PAGE_SIZE_BYTE = 1 << PAGE_SIZE_LOG2, + PAGE_MASK_ = ~(PAGE_SIZE_BYTE - 1) + }; + + /** + * NOVA system-call IDs + */ + enum Syscall { + NOVA_CALL = 0x0, + NOVA_REPLY = 0x1, + NOVA_CREATE_PD = 0x2, + NOVA_CREATE_EC = 0x3, + NOVA_CREATE_SC = 0x4, + NOVA_CREATE_PT = 0x5, + NOVA_CREATE_SM = 0x6, + NOVA_REVOKE = 0x7, + NOVA_MISC = 0x8, /* lookup, delegate, acpi_suspend */ + NOVA_EC_CTRL = 0x9, + NOVA_SC_CTRL = 0xa, + NOVA_PT_CTRL = 0xb, + NOVA_SM_CTRL = 0xc, + NOVA_ASSIGN_PCI = 0xd, + NOVA_ASSIGN_GSI = 0xe, + NOVA_PD_CTRL = 0xf, + }; + + /** + * NOVA status codes returned by system-calls + */ + enum Status + { + NOVA_OK = 0, + NOVA_TIMEOUT = 1, + NOVA_IPC_ABORT = 2, + NOVA_INV_HYPERCALL = 3, + NOVA_INV_SELECTOR = 4, + NOVA_INV_PARAMETER = 5, + NOVA_INV_FEATURE = 6, + NOVA_INV_CPU = 7, + NOVA_INVD_DEVICE_ID = 8, + NOVA_PD_OOM = 9, + }; + + /** + * Hypervisor information page + */ + struct Hip + { + struct Mem_desc + { + enum Type { + EFI_SYSTEM_TABLE = -7, + HYPERVISOR_LOG = -6, + FRAMEBUFFER = -5, + ACPI_XSDT = -4, + ACPI_RSDT = -3, + MULTIBOOT_MODULE = -2, + MICROHYPERVISOR = -1, + AVAILABLE_MEMORY = 1, + RESERVED_MEMORY = 2, + ACPI_RECLAIM_MEMORY = 3, + ACPI_NVS_MEMORY = 4 + }; + + uint64_t const addr; + uint64_t const size; + Type const type; + uint32_t const aux; + }; + + uint32_t const signature; /* magic value 0x41564f4e */ + uint16_t const hip_checksum; + uint16_t const hip_length; + uint16_t const cpu_desc_offset; + uint16_t const cpu_desc_size; + uint16_t const mem_desc_offset; + uint16_t const mem_desc_size; + uint32_t const feature_flags; + uint32_t const api_version; + uint32_t const sel; /* number of cap selectors */ + uint32_t const sel_exc; /* number of cap selectors for exceptions */ + uint32_t const sel_vm; /* number of cap selectors for VM handling */ + uint32_t const sel_gsi; /* number of global system interrupts */ + uint32_t const page_sizes; /* supported page sizes */ + uint32_t const utcb_sizes; /* supported utcb sizes */ + uint32_t const tsc_freq; /* time-stamp counter frequency in kHz */ + uint32_t const bus_freq; /* bus frequency in kHz */ + + bool has_feature_iommu() const { return feature_flags & (1 << 0); } + bool has_feature_vmx() const { return feature_flags & (1 << 1); } + bool has_feature_svm() const { return feature_flags & (1 << 2); } + + struct Cpu_desc { + uint8_t flags; + uint8_t thread; + uint8_t core; + uint8_t package; + uint8_t acpi_id; + uint8_t family; + uint8_t model; + uint8_t stepping:4; + uint8_t platform:3; + uint8_t reserved:1; + uint32_t patch; + + bool p_core() const { return flags & 0x2; } + bool e_core() const { return flags & 0x4; } + } __attribute__((packed)); + + unsigned cpu_max() const { + return (mem_desc_offset - cpu_desc_offset) / cpu_desc_size; } + + unsigned cpus() const { + unsigned cpu_num = 0; + + for (unsigned i = 0; i < cpu_max(); i++) + if (is_cpu_enabled(i)) + cpu_num++; + + return cpu_num; + } + + Cpu_desc const * cpu_desc_of_cpu(unsigned i) const { + if (i >= cpu_max()) + return nullptr; + + unsigned long desc_addr = reinterpret_cast(this) + + cpu_desc_offset + i * cpu_desc_size; + return reinterpret_cast(desc_addr); + } + + bool is_cpu_enabled(unsigned i) const { + Cpu_desc const * const desc = cpu_desc_of_cpu(i); + return desc ? desc->flags & 0x1 : false; + } + + /** + * Resort CPU ids such, that + * - the boot CPU id is ever logical CPU id 0 + * - SMT threads of one CPU have logical CPU ids close together + * - P-Core has a smaller logical CPU id than E-Core CPUs + * + * Returns true, if re-mapping succeeded otherwise false. + * + * In case of failure, map_cpus will contain a 1:1 fallback mapping + * without any sorting as mentioned above. + */ + bool remap_cpu_ids(uint16_t *map_cpus, unsigned const max_cpus, + unsigned const boot_cpu) const + { + unsigned const num_cpus = cpus(); + bool too_many_cpus = false; + unsigned cpu_i = 0; + + /* fallback lambda in case re-ordering fails */ + auto remap_failure = [&] { + for (uint16_t i = 0; i < max_cpus; i++) { map_cpus[i] = i; } + return false; + }; + + /* assign boot cpu ever the virtual cpu id 0 */ + Cpu_desc const * const boot = cpu_desc_of_cpu(boot_cpu); + if (!boot) + return remap_failure(); + + map_cpus[cpu_i++] = (uint8_t)boot_cpu; + if (cpu_i >= num_cpus) + return true; + if (cpu_i >= max_cpus) + return remap_failure(); + + /* assign cores + SMT threads first and skip E-cores */ + bool done = for_all_cpus([&](auto const &cpu, auto const kernel_cpu_id) { + if (kernel_cpu_id == boot_cpu) + return false; + + /* handle normal or P-core */ + if (cpu.e_core()) + return false; + + map_cpus[cpu_i++] = (uint8_t)kernel_cpu_id; + + too_many_cpus = !!(cpu_i >= max_cpus); + + return (cpu_i >= num_cpus || too_many_cpus); + }); + + if (done) + return too_many_cpus ? remap_failure() : true; + + /* assign remaining E-cores */ + done = for_all_cpus([&](auto &cpu, auto &kernel_cpu_id) { + if (kernel_cpu_id == boot_cpu) + return false; + + /* handle solely E-core */ + if (!cpu.e_core()) + return false; + + map_cpus[cpu_i++] = (uint16_t)kernel_cpu_id; + + too_many_cpus = !!(cpu_i >= max_cpus); + + return (cpu_i >= num_cpus || too_many_cpus); + }); + + return too_many_cpus ? remap_failure() : done; + } + + /** + * Iterate over all CPUs in a _ever_ _consistent_ order. + */ + bool for_all_cpus(auto const &fn) const + { + for (uint16_t package = 0; package <= 255; package++) { + for (uint16_t core = 0; core <= 255; core++) { + for (uint16_t thread = 0; thread <= 255; thread++) { + for (unsigned i = 0; i < cpu_max(); i++) { + if (!is_cpu_enabled(i)) + continue; + + auto const cpu = cpu_desc_of_cpu(i); + if (!cpu) + continue; + + if (!(cpu->package == package && cpu->core == core && + cpu->thread == thread)) + continue; + + bool done = fn(*cpu, i); + if (done) + return done; + } + } + } + } + return false; + } + + void for_each_enabled_cpu(auto const &fn) const + { + for (unsigned i = 0; i < cpu_max(); i++) { + Cpu_desc const * cpu = cpu_desc_of_cpu(i); + if (!is_cpu_enabled(i)) continue; + if (!cpu) return; + fn(*cpu, i); + } + } + + } __attribute__((packed)); + + + /** + * Semaphore operations + */ + enum Sem_op { SEMAPHORE_UP = 0U, SEMAPHORE_DOWN = 1U, SEMAPHORE_DOWNZERO = 0x3U }; + + /** + * Ec operations + */ + enum Ec_op { + EC_RECALL = 0U, + EC_YIELD = 1U, + EC_DONATE_SC = 2U, + EC_RESCHEDULE = 3U, + EC_MIGRATE = 4U, + EC_TIME = 5U, + EC_GET_VCPU_STATE = 6U, + EC_SET_VCPU_STATE = 7U, + EC_MSR_ACCESS = 8U + }; + + enum Sc_op { + SC_TIME_IDLE = 0, + SC_TIME_CROSS = 1, + SC_TIME_KILLED = 2, + SC_EC_TIME = 3, + }; + + /** + * Pd operations + */ + enum Pd_op { TRANSFER_QUOTA = 0U, PD_DEBUG = 2U }; + + /** + * Hpc operations + * + */ + enum Hpc_op + { + HPC_SETUP = 9U, + HPC_START = 10U, + HPC_STOP = 11U, + HPC_RESET = 12U, + HPC_READ = 13U, + }; + + /** + * Cell operations + */ + enum Cell_op + { + SHRINK = 0, + GROW = 1, + }; + + class Gsi_flags + { + private: + + uint8_t _value { 0 }; + + public: + + enum Mode { HIGH, LOW, EDGE }; + + Gsi_flags() { } + + Gsi_flags(Mode m) + { + switch (m) { + case HIGH: _value = 0b110; break; /* level-high */ + case LOW: _value = 0b111; break; /* level-low */ + case EDGE: _value = 0b100; break; /* edge-triggered */ + } + } + + uint8_t value() const { return _value; } + }; + + + class Descriptor + { + protected: + + mword_t _value { 0 }; + + /** + * Assign bitfield to descriptor + */ + template + void _assign(mword_t new_bits) + { + _value &= ~(MASK << SHIFT); + _value |= (new_bits & MASK) << SHIFT; + } + + /** + * Query bitfield from descriptor + */ + template + mword_t _query() const { return (_value >> SHIFT) & MASK; } + + public: + + mword_t value() const { return _value; } + + } __attribute__((packed)); + + + /** + * Message-transfer descriptor + */ + class Mtd + { + private: + + mword_t const _value; + + public: + + enum { + ACDB = 1U << 0, /* eax, ecx, edx, ebx */ + EBSD = 1U << 1, /* ebp, esi, edi */ + ESP = 1U << 2, + EIP = 1U << 3, + EFL = 1U << 4, /* eflags */ + ESDS = 1U << 5, + FSGS = 1U << 6, + CSSS = 1U << 7, + TR = 1U << 8, + LDTR = 1U << 9, + GDTR = 1U << 10, + IDTR = 1U << 11, + CR = 1U << 12, + DR = 1U << 13, /* DR7 */ + SYS = 1U << 14, /* Sysenter MSRs CS, ESP, EIP */ + QUAL = 1U << 15, /* exit qualification */ + CTRL = 1U << 16, /* execution controls */ + INJ = 1U << 17, /* injection info */ + STA = 1U << 18, /* interruptibility state */ + TSC = 1U << 19, /* time-stamp counter */ + EFER = 1U << 20, /* EFER MSR */ + PDPTE = 1U << 21, /* PDPTE0 .. PDPTE3 */ + R8_R15 = 1U << 22, /* R8 .. R15 */ + SYSCALL_SWAPGS = 1U << 23, /* SYSCALL and SWAPGS MSRs */ + TPR = 1U << 24, /* TPR and TPR threshold */ + TSC_AUX = 1U << 25, /* IA32_TSC_AUX used by rdtscp */ + XSAVE = 1U << 26, /* XCR and XSS used with XSAVE */ + FPU = 1U << 31, /* FPU state */ + + IRQ = EFL | STA | INJ | TSC, + ALL = (0x000fffff & ~CTRL) | EFER | R8_R15 | SYSCALL_SWAPGS | TPR, + }; + + Mtd(mword_t value) : _value(value) { } + + mword_t value() const { return _value; } + }; + + + class Crd : public Descriptor + { + protected: + + /** + * Bitfield holding the descriptor type + */ + enum { + TYPE_MASK = 0x3, TYPE_SHIFT = 0, + BASE_SHIFT = 12, RIGHTS_MASK = 0x1f, + ORDER_MASK = 0x1f, ORDER_SHIFT = 7, + BASE_MASK = (~0UL) >> BASE_SHIFT, + RIGHTS_SHIFT= 2 + }; + + /** + * Capability-range-descriptor types + */ + enum { + NULL_CRD_TYPE = 0, + MEM_CRD_TYPE = 1, + IO_CRD_TYPE = 2, + OBJ_CRD_TYPE = 3, + RIGHTS_ALL = 0x1f, + }; + + void _base(mword_t base) + { _assign(base); } + + void _order(mword_t order) + { _assign(order); } + + public: + + Crd(mword_t base, mword_t order) { + _value = 0; _base(base), _order(order); } + + Crd(mword_t value) { _value = value; } + + mword_t hotspot(mword_t sel_hotspot) const + { + if ((value() & TYPE_MASK) == MEM_CRD_TYPE) + return sel_hotspot & PAGE_MASK_; + + return sel_hotspot << 12; + } + + mword_t addr() const { return base() << BASE_SHIFT; } + mword_t base() const { return _query(); } + mword_t order() const { return _query(); } + bool is_null() const { return (_value & TYPE_MASK) == NULL_CRD_TYPE; } + uint8_t type() const { return (uint8_t)_query(); } + uint8_t rights() const { return (uint8_t)_query(); } + } __attribute__((packed)); + + + class Rights + { + private: + + bool const _readable, _writeable, _executable; + + public: + + Rights(bool readable, bool writeable, bool executable) + : _readable(readable), _writeable(writeable), + _executable(executable) { } + + Rights() : _readable(false), _writeable(false), _executable(false) {} + + bool readable() const { return _readable; } + bool writeable() const { return _writeable; } + bool executable() const { return _executable; } + }; + + + /** + * Memory-capability-range descriptor + */ + class Mem_crd : public Crd + { + private: + + enum { + EXEC_MASK = 0x1, EXEC_SHIFT = 4, + WRITE_MASK = 0x1, WRITE_SHIFT = 3, + READ_MASK = 0x1, READ_SHIFT = 2 + }; + + void _rights(Rights r) + { + _assign(r.executable()); + _assign(r.writeable()); + _assign(r.readable()); + } + + public: + + Mem_crd(mword_t base, mword_t order, Rights rights = Rights()) + : Crd(base, order) + { + _rights(rights); + _assign(MEM_CRD_TYPE); + } + + Rights rights() const + { + return Rights(_query(), + _query(), + _query()); + } + }; + + + /** + * I/O-capability-range descriptor + */ + class Io_crd : public Crd + { + public: + + Io_crd(mword_t base, mword_t order) + : Crd(base, order) + { + _assign(IO_CRD_TYPE); + _assign(RIGHTS_ALL); + } + }; + + + class Obj_crd : public Crd + { + public: + + enum { + RIGHT_EC_RECALL = 0x1U, + RIGHT_PT_CALL = 0x2U, + RIGHT_PT_CTRL = 0x1U, + RIGHT_PT_XCPU = 0x10U, + RIGHT_SM_UP = 0x1U, + RIGHT_SM_DOWN = 0x2U + }; + + Obj_crd() : Crd(0, 0) + { + _assign(NULL_CRD_TYPE); + } + + Obj_crd(mword_t base, mword_t order, + mword_t rights = RIGHTS_ALL) + : Crd(base, order) + { + _assign(OBJ_CRD_TYPE); + _assign(rights); + } + }; + + + /** + * Quantum-priority descriptor + */ + class Qpd : public Descriptor + { + private: + + enum { + PRIORITY_MASK = 0xff, PRIORITY_SHIFT = 0, + QUANTUM_SHIFT = 12, + QUANTUM_MASK = (~0UL) >> QUANTUM_SHIFT + }; + + void _quantum(mword_t quantum) + { _assign(quantum); } + + void _priority(mword_t priority) + { _assign(priority); } + + public: + + enum { DEFAULT_QUANTUM = 10000, DEFAULT_PRIORITY = 64 }; + + Qpd(mword_t quantum = DEFAULT_QUANTUM, + mword_t priority = DEFAULT_PRIORITY) + { + _value = 0; + _quantum(quantum), _priority(priority); + } + + mword_t quantum() const { return _query(); } + mword_t priority() const { return _query(); } + }; + + + /** + * User-level thread-control block + */ + struct Utcb + { + /** + * Return physical size of UTCB in bytes + */ + static constexpr mword_t size() { return 4096; } + + /** + * Number of untyped items uses lowest 16 bit, number of typed items + * uses bit 16-31, bit 32+ are ignored on 64bit + */ + mword_t items; + Crd crd_xlt; /* receive capability-range descriptor for translation */ + Crd crd_rcv; /* receive capability-range descriptor for delegation */ + mword_t tls; + + /** + * Data area + * + * The UTCB entries following the header hold message payload (normal + * IDC operations) or architectural state (exception handling). + */ + union { + + /* exception state */ + struct { + mword_t mtd, instr_len, ip, flags; + unsigned intr_state, actv_state, inj_info, inj_error; + mword_t ax, cx, dx, bx; + mword_t sp, bp, si, di; +#ifdef __x86_64__ + mword_t r8, r9, r10, r11, r12, r13, r14, r15; +#endif + unsigned long long qual[2]; /* exit qualification */ + unsigned ctrl[2]; + mword_t cr0, cr2, cr3, cr4; + unsigned long long xcr0, xss; + mword_t pdpte[4]; +#ifdef __x86_64__ + mword_t cr8, efer; + unsigned long long star; + unsigned long long lstar; + unsigned long long cstar; + unsigned long long fmask; + unsigned long long kernel_gs_base; + unsigned tpr; + unsigned tpr_threshold; +#endif + mword_t dr7, sysenter_cs, sysenter_sp, sysenter_ip; + + struct { + unsigned short sel, ar; + unsigned limit; + mword_t base; +#ifndef __x86_64__ + mword_t reserved; +#endif + } es, cs, ss, ds, fs, gs, ldtr, tr; + struct { + unsigned reserved0; + unsigned limit; + mword_t base; +#ifndef __x86_64__ + mword_t reserved1; +#endif + } gdtr, idtr; + unsigned long long tsc_val, tsc_off, tsc_aux; + unsigned long long exit_reason; + uint8_t fpu[2560]; + } __attribute__((packed)); + mword_t mr[(4096 - 4 * sizeof(mword_t)) / sizeof(mword_t)]; + }; + + /* message payload */ + mword_t * msg() { return mr; } + + struct Item { + mword_t crd; + mword_t hotspot; + bool is_del() const { return hotspot & 0x1; } + }; + +#ifdef __x86_64__ + uint64_t read_r8() const { return r8; } + uint64_t read_r9() const { return r9; } + uint64_t read_r10() const { return r10; } + uint64_t read_r11() const { return r11; } + uint64_t read_r12() const { return r12; } + uint64_t read_r13() const { return r13; } + uint64_t read_r14() const { return r14; } + uint64_t read_r15() const { return r15; } + mword_t read_efer() const { return efer; } + uint64_t read_star() const { return star; } + uint64_t read_lstar() const { return lstar; } + uint64_t read_cstar() const { return cstar; } + uint64_t read_fmask() const { return fmask; } + uint64_t read_kernel_gs_base() const { return kernel_gs_base; } + uint32_t read_tpr() const { return tpr; } + uint32_t read_tpr_threshold() const { return tpr_threshold; } + + void write_r8 (uint64_t value) { r8 = value; } + void write_r9 (uint64_t value) { r9 = value; } + void write_r10 (uint64_t value) { r10 = value; } + void write_r11 (uint64_t value) { r11 = value; } + void write_r12 (uint64_t value) { r12 = value; } + void write_r13 (uint64_t value) { r13 = value; } + void write_r14 (uint64_t value) { r14 = value; } + void write_r15 (uint64_t value) { r15 = value; } + void write_efer (mword_t value) { efer = value; } + void write_star (uint64_t value) { star = value; } + void write_lstar (uint64_t value) { lstar = value; } + void write_cstar (uint64_t value) { cstar = value; } + void write_fmask (uint64_t value) { fmask = value; } + void write_kernel_gs_base (uint64_t value) { kernel_gs_base = value; } + void write_tpr (uint32_t value) { tpr = value; } + void write_tpr_threshold (uint32_t value) { tpr_threshold = value; } +#else + uint64_t read_r8() const { return 0; } + uint64_t read_r9() const { return 0; } + uint64_t read_r10() const { return 0; } + uint64_t read_r11() const { return 0; } + uint64_t read_r12() const { return 0; } + uint64_t read_r13() const { return 0; } + uint64_t read_r14() const { return 0; } + uint64_t read_r15() const { return 0; } + mword_t read_efer() const { return 0; } + uint64_t read_star() const { return 0; } + uint64_t read_lstar() const { return 0; } + uint64_t read_cstar() const { return 0; } + uint64_t read_fmask() const { return 0; } + uint64_t read_kernel_gs_base() const { return 0; } + uint32_t read_tpr() const { return 0; } + uint32_t read_tpr_threshold() const { return 0; } + + void write_r8 (uint64_t) { } + void write_r9 (uint64_t) { } + void write_r10 (uint64_t) { } + void write_r11 (uint64_t) { } + void write_r12 (uint64_t) { } + void write_r13 (uint64_t) { } + void write_r14 (uint64_t) { } + void write_r15 (uint64_t) { } + void write_efer (mword_t) { } + void write_star (uint64_t) { } + void write_lstar (uint64_t) { } + void write_cstar (uint64_t) { } + void write_fmask (uint64_t) { } + void write_kernel_gs_base (uint64_t) { } + void write_tpr (uint32_t) { } + void write_tpr_threshold (uint32_t) { } +#endif + + /** + * Set number of untyped message words + * + * Calling this function has the side effect of removing all typed + * message items from the message buffer. + */ + void set_msg_word(mword_t const num) { items = num; } + + /** + * Return current number of message word in UTCB + */ + unsigned msg_words() { return items & 0xffffU; } + + /** + * Return current number of message items on UTCB + */ + unsigned msg_items() { return (unsigned)(items >> 16); } + + /** + * Append message-transfer item to message buffer + * + * \param exception true to append the item to an exception reply + */ + __attribute__((warn_unused_result)) + bool append_item(Crd crd, mword_t sel_hotspot, + bool kern_pd = false, + bool update_guest_pt = false, + bool translate_map = false, + bool dma_mem = false, + bool write_combined = false) + { + /* transfer items start at the end of the UTCB */ + items += 1 << 16; + Item *item = reinterpret_cast(this); + item += (PAGE_SIZE_BYTE / sizeof(struct Item)) - msg_items(); + + /* check that there is enough space left on UTCB */ + if (msg() + msg_words() >= reinterpret_cast(item)) { + items -= 1 << 16; + return false; + } + + /* map from hypervisor or current pd */ + unsigned h = kern_pd ? (1 << 11) : 0; + + /* map write-combined */ + unsigned wc = write_combined ? (1 << 10) : 0; + + /* update guest page table */ + unsigned g = update_guest_pt ? (1 << 9) : 0; + + /* mark memory dma able */ + unsigned d = dma_mem ? (1 << 8) : 0; + + /* set type of delegation, either 'map' or 'translate and map' */ + unsigned m = translate_map ? 2 : 1; + + item->hotspot = crd.hotspot(sel_hotspot) | g | h | wc | d | m; + item->crd = crd.value(); + + return true; + } + + /** + * Return typed item at postion i in UTCB + * + * \param i position of item requested, starts with 0 + */ + Item * get_item(const unsigned i) { + if (i > (PAGE_SIZE_BYTE / sizeof(struct Item))) return 0; + Item * item = reinterpret_cast(this) + (PAGE_SIZE_BYTE / sizeof(struct Item)) - i - 1; + if (reinterpret_cast(item) < this->msg()) return 0; + return item; + } + + mword_t mtd_value() const { return static_cast(mtd).value(); } + + /** + * Return fault address and type of page-fault message + */ + mword_t pf_addr() const { return (mword_t)qual[1]; } + uint8_t pf_type() const { return (uint8_t)qual[0]; } + }; + + static_assert(sizeof(Utcb) == 4096, "Unexpected size of UTCB"); + + /** + * Size of event-specific portal window mapped at PD creation time + */ + enum { + NUM_INITIAL_PT_LOG2 = 5, + NUM_INITIAL_PT = 1UL << NUM_INITIAL_PT_LOG2, + NUM_INITIAL_PT_RESERVED = 2 * NUM_INITIAL_PT, + NUM_INITIAL_VCPU_PT_LOG2 = 8, + NUM_INITIAL_VCPU_PT = 1UL << NUM_INITIAL_VCPU_PT_LOG2, + }; + + /** + * Event-specific capability selectors + */ + enum { + PT_SEL_PAGE_FAULT = 0xe, + PT_SEL_PARENT = 0x1a, /* convention on Genode */ + EC_SEL_THREAD = 0x1c, /* convention on Genode */ + PT_SEL_STARTUP = 0x1e, + SM_SEL_SIGNAL = 0x1e, /* alias of PT_SEL_STARTUP */ + PT_SEL_RECALL = 0x1f, + SM_SEL_EC = 0x1d, /* convention on Genode */ + }; + +} +#endif /* _INCLUDE__NOVA__SYSCALL_GENERIC_H_ */ diff --git a/repos/base-tukija/include/tukija/util.h b/repos/base-tukija/include/tukija/util.h new file mode 100644 index 0000000000..22f62d07cc --- /dev/null +++ b/repos/base-tukija/include/tukija/util.h @@ -0,0 +1,97 @@ +/* + * \brief Helper code used by core as base framework + * \author Alexander Boettcher + * \date 2012-08-08 + */ + +/* + * Copyright (C) 2012-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA__UTIL_H_ +#define _INCLUDE__NOVA__UTIL_H_ + +#include +#include + +__attribute__((always_inline)) +inline void nova_die() +{ + asm volatile ("ud2a"); +} + + +inline void request_event_portal(Genode::addr_t const cap, + Genode::addr_t const sel, Genode::addr_t event) +{ + Genode::Thread * myself = Genode::Thread::myself(); + Tukija::Utcb *utcb = reinterpret_cast(myself->utcb()); + + /* save original receive window */ + Tukija::Crd orig_crd = utcb->crd_rcv; + + /* request event-handler portal */ + utcb->crd_rcv = Tukija::Obj_crd(sel, 0); + utcb->msg()[0] = event; + utcb->set_msg_word(1); + + Genode::uint8_t res = Tukija::call(cap); + + /* restore original receive window */ + utcb->crd_rcv = orig_crd; + + if (res) + Genode::error("request of event (", Genode::Hex(event), ") ", + "capability selector failed (res=", res, ")"); +} + + +inline void request_native_ec_cap(Genode::addr_t const cap, + Genode::addr_t const sel) +{ + request_event_portal(cap, sel , ~0UL); +} + + +inline void request_signal_sm_cap(Genode::addr_t const cap, + Genode::addr_t const sel) +{ + request_event_portal(cap, sel, ~0UL - 1); +} + + +inline void translate_remote_pager(Genode::addr_t const cap, + Genode::addr_t const sel) +{ + Genode::Thread * myself = Genode::Thread::myself(); + Tukija::Utcb *utcb = reinterpret_cast(myself->utcb()); + + /* save original receive window */ + Tukija::Crd orig_crd = utcb->crd_rcv; + + utcb->crd_rcv = Tukija::Obj_crd(); + + Genode::uint8_t res = Tukija::NOVA_OK; + enum { + TRANSLATE = true, THIS_PD = false, NON_GUEST = false, HOTSPOT = 0 + }; + + /* translate one item */ + utcb->msg()[0] = 0xaffe; + utcb->set_msg_word(1); + + Tukija::Obj_crd obj_crd(sel, 0); + if (utcb->append_item(obj_crd, HOTSPOT, THIS_PD, NON_GUEST, TRANSLATE)) + /* trigger the translation */ + res = Tukija::call(cap); + + /* restore original receive window */ + utcb->crd_rcv = orig_crd; + + if (res != Tukija::NOVA_OK) + Genode::error("setting exception portals for vCPU failed res=", res); +} +#endif /* _INCLUDE__NOVA__UTIL_H_ */ diff --git a/repos/base-tukija/include/tukija_native_cpu/client.h b/repos/base-tukija/include/tukija_native_cpu/client.h new file mode 100644 index 0000000000..d958f11a89 --- /dev/null +++ b/repos/base-tukija/include/tukija_native_cpu/client.h @@ -0,0 +1,33 @@ +/* + * \brief Client-side NOVA-specific CPU session interface + * \author Norman Feske + * \date 2016-04-21 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA_NATIVE_CPU__CLIENT_H_ +#define _INCLUDE__NOVA_NATIVE_CPU__CLIENT_H_ + +#include +#include + +namespace Genode { struct Tukija_native_cpu_client; } + + +struct Genode::Tukija_native_cpu_client : Rpc_client +{ + explicit Tukija_native_cpu_client(Capability cap) + : Rpc_client(cap) { } + + void thread_type(Thread_capability thread_cap, Thread_type thread_type, + Exception_base exception_base) override { + call(thread_cap, thread_type, exception_base); } +}; + +#endif /* _INCLUDE__NOVA_NATIVE_CPU__CLIENT_H_ */ diff --git a/repos/base-tukija/include/tukija_native_cpu/tukija_native_cpu.h b/repos/base-tukija/include/tukija_native_cpu/tukija_native_cpu.h new file mode 100644 index 0000000000..6134b8f714 --- /dev/null +++ b/repos/base-tukija/include/tukija_native_cpu/tukija_native_cpu.h @@ -0,0 +1,43 @@ +/* + * \brief NOVA-specific part of the CPU session interface + * \author Norman Feske + * \date 2016-04-21 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA_NATIVE_CPU__NOVA_NATIVE_CPU_H_ +#define _INCLUDE__NOVA_NATIVE_CPU__NOVA_NATIVE_CPU_H_ + +#include +#include + + +struct Genode::Cpu_session::Native_cpu : Interface +{ + enum Thread_type { GLOBAL, LOCAL, VCPU }; + + /* + * Exception base of thread in caller protection domain - not in core! + */ + struct Exception_base { addr_t exception_base; }; + + + virtual void thread_type(Thread_capability, Thread_type, Exception_base) = 0; + + + /********************* + ** RPC declaration ** + *********************/ + + GENODE_RPC(Rpc_thread_type, void, thread_type, Thread_capability, + Thread_type, Exception_base ); + GENODE_RPC_INTERFACE(Rpc_thread_type); +}; + +#endif /* _INCLUDE__NOVA_NATIVE_CPU__NOVA_NATIVE_CPU_H_ */ diff --git a/repos/base-tukija/include/tukija_native_pd/client.h b/repos/base-tukija/include/tukija_native_pd/client.h new file mode 100644 index 0000000000..6f55bd31b8 --- /dev/null +++ b/repos/base-tukija/include/tukija_native_pd/client.h @@ -0,0 +1,40 @@ +/* + * \brief Client-side stub for the NOVA-specific PD session interface + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA_NATIVE_PD__CLIENT_H_ +#define _INCLUDE__NOVA_NATIVE_PD__CLIENT_H_ + +#include +#include + +namespace Genode { struct Tukija_native_pd_client; } + + +struct Genode::Tukija_native_pd_client : Rpc_client +{ + explicit Tukija_native_pd_client(Capability cap) + : Rpc_client(static_cap_cast(cap)) { } + + Native_capability alloc_rpc_cap(Native_capability ep, + addr_t entry, addr_t mtd) override + { + return call(ep, entry, mtd); + } + + void imprint_rpc_cap(Native_capability cap, unsigned long badge) override + { + call(cap, badge); + } +}; + +#endif /* _INCLUDE__NOVA_NATIVE_PD__CLIENT_H_ */ diff --git a/repos/base-tukija/include/tukija_native_pd/tukija_native_pd.h b/repos/base-tukija/include/tukija_native_pd/tukija_native_pd.h new file mode 100644 index 0000000000..e006bf0e45 --- /dev/null +++ b/repos/base-tukija/include/tukija_native_pd/tukija_native_pd.h @@ -0,0 +1,50 @@ +/* + * \brief NOVA-specific part of the PD session interface + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA_NATIVE_PD__NOVA_NATIVE_PD_H_ +#define _INCLUDE__NOVA_NATIVE_PD__NOVA_NATIVE_PD_H_ + +#include +#include + +struct Genode::Pd_session::Native_pd : Interface +{ + /** + * Allocate RPC object capability + * + * \param ep entry point that will use this capability + * \param entry server-side instruction pointer of the RPC handler + * \param mtd NOVA message transfer descriptor + * + * \throw Out_of_ram + * \throw Out_of_caps + * + * \return new RPC object capability + */ + virtual Native_capability alloc_rpc_cap(Native_capability ep, + addr_t entry, addr_t mtd) = 0; + + /** + * Imprint badge into the portal of the specified RPC capability + */ + virtual void imprint_rpc_cap(Native_capability cap, unsigned long badge) = 0; + + GENODE_RPC_THROW(Rpc_alloc_rpc_cap, Native_capability, alloc_rpc_cap, + GENODE_TYPE_LIST(Out_of_ram, Out_of_caps), + Native_capability, addr_t, addr_t); + GENODE_RPC(Rpc_imprint_rpc_cap, void, imprint_rpc_cap, + Native_capability, unsigned long); + GENODE_RPC_INTERFACE(Rpc_alloc_rpc_cap, Rpc_imprint_rpc_cap); +}; + +#endif /* _INCLUDE__NOVA_NATIVE_PD__NOVA_NATIVE_PD_H_ */ diff --git a/repos/base-tukija/lib/mk/base-tukija-common.mk b/repos/base-tukija/lib/mk/base-tukija-common.mk new file mode 100644 index 0000000000..1b8a8c9238 --- /dev/null +++ b/repos/base-tukija/lib/mk/base-tukija-common.mk @@ -0,0 +1,21 @@ +# +# \brief Portions of base library shared by core and non-core processes +# \author Norman Feske +# \date 2013-02-14 +# + +include $(BASE_DIR)/lib/mk/base-common.inc + +LIBS += startup-tukija + +SRC_CC += thread.cc thread_myself.cc +SRC_CC += stack.cc +SRC_CC += stack_area_addr.cc +SRC_CC += cap_map.cc +SRC_CC += capability.cc +SRC_CC += signal_transmitter.cc + +# +# Prevent the compiler from deleting null pointer checks related to 'this == 0' +# +CC_OPT += -fno-delete-null-pointer-checks diff --git a/repos/base-tukija/lib/mk/base-tukija.mk b/repos/base-tukija/lib/mk/base-tukija.mk new file mode 100644 index 0000000000..639e82c6a6 --- /dev/null +++ b/repos/base-tukija/lib/mk/base-tukija.mk @@ -0,0 +1,12 @@ +include $(BASE_DIR)/lib/mk/base.inc + +LIBS += base-tukija-common cxx timeout +SRC_CC += thread_start.cc +SRC_CC += cache.cc +SRC_CC += signal.cc +SRC_CC += capability_slab.cc + +# +# Prevent the compiler from deleting null pointer checks related to 'this == 0' +# +CC_OPT += -fno-delete-null-pointer-checks diff --git a/repos/base-tukija/lib/mk/core-tukija.inc b/repos/base-tukija/lib/mk/core-tukija.inc new file mode 100644 index 0000000000..5c56cf8fdf --- /dev/null +++ b/repos/base-tukija/lib/mk/core-tukija.inc @@ -0,0 +1,80 @@ +LIBS = base-tukija-common cxx + +GEN_CORE_DIR = $(BASE_DIR)/src/core + +SRC_CC += stack_area.cc \ + core_mem_alloc.cc \ + core_log.cc \ + core_log_out.cc \ + core_region_map.cc \ + core_rpc_cap_alloc.cc \ + cpu_session_component.cc \ + cpu_session_support.cc \ + cpu_thread_component.cc \ + dataspace_component.cc \ + default_log.cc \ + dump_alloc.cc \ + io_mem_session_component.cc \ + io_mem_session_support.cc \ + io_port_session_component.cc \ + io_port_session_support.cc \ + ipc_pager.cc \ + irq_session_component.cc \ + main.cc \ + pager.cc \ + native_cpu_component.cc \ + native_pd_component.cc \ + pd_session_support.cc \ + rpc_cap_factory.cc \ + ram_dataspace_factory.cc \ + platform.cc \ + platform_rom_modules.cc \ + platform_pd.cc \ + platform_services.cc \ + platform_thread.cc \ + pd_session_component.cc \ + ram_dataspace_support.cc \ + region_map_component.cc \ + rom_session_component.cc \ + thread_start.cc \ + bios_data_area.cc \ + trace_session_component.cc \ + signal_transmitter_noinit.cc \ + signal_receiver.cc \ + vm_session_component.cc \ + vm_session_common.cc \ + heartbeat.cc + +INC_DIR += $(REP_DIR)/src/core/include \ + $(REP_DIR)/src/include \ + $(BASE_DIR)/src/include \ + $(GEN_CORE_DIR)/include + +include $(GEN_CORE_DIR)/version.inc + +vpath main.cc $(GEN_CORE_DIR) +vpath pd_session_component.cc $(GEN_CORE_DIR) +vpath rom_session_component.cc $(GEN_CORE_DIR) +vpath core_log.cc $(GEN_CORE_DIR) +vpath cpu_session_component.cc $(GEN_CORE_DIR) +vpath cpu_session_support.cc $(GEN_CORE_DIR) +vpath cpu_thread_component.cc $(GEN_CORE_DIR) +vpath pd_upgrade_ram_quota.cc $(GEN_CORE_DIR) +vpath region_map_component.cc $(GEN_CORE_DIR) +vpath trace_session_component.cc $(GEN_CORE_DIR) +vpath signal_transmitter_noinit.cc $(GEN_CORE_DIR) +vpath signal_receiver.cc $(GEN_CORE_DIR) +vpath io_port_session_component.cc $(GEN_CORE_DIR)/spec/x86 +vpath io_port_session_support.cc $(GEN_CORE_DIR)/spec/x86 +vpath io_mem_session_component.cc $(GEN_CORE_DIR) +vpath io_mem_session_support.cc $(GEN_CORE_DIR) +vpath ram_dataspace_factory.cc $(GEN_CORE_DIR) +vpath dataspace_component.cc $(GEN_CORE_DIR) +vpath core_mem_alloc.cc $(GEN_CORE_DIR) +vpath default_log.cc $(GEN_CORE_DIR) +vpath dump_alloc.cc $(GEN_CORE_DIR) +vpath platform_rom_modules.cc $(GEN_CORE_DIR) +vpath stack_area.cc $(GEN_CORE_DIR) +vpath heartbeat.cc $(GEN_CORE_DIR) +vpath vm_session_common.cc $(GEN_CORE_DIR) +vpath %.cc $(REP_DIR)/src/core diff --git a/repos/base-tukija/lib/mk/spec/x86_32/core-nova.mk b/repos/base-tukija/lib/mk/spec/x86_32/core-nova.mk new file mode 100644 index 0000000000..bd9c978dd7 --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_32/core-nova.mk @@ -0,0 +1,5 @@ +SRC_CC += spec/x86_32/pager.cc + +INC_DIR += $(REP_DIR)/src/core/include/spec/x86_32 + +include $(REP_DIR)/lib/mk/core-tukija.inc diff --git a/repos/base-tukija/lib/mk/spec/x86_32/ld-nova.mk b/repos/base-tukija/lib/mk/spec/x86_32/ld-nova.mk new file mode 100644 index 0000000000..5e150a24b7 --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_32/ld-nova.mk @@ -0,0 +1,3 @@ +BASE_LIBS += base-tukija-common base-tukija + +include $(BASE_DIR)/lib/mk/spec/x86_32/ld-platform.inc diff --git a/repos/base-tukija/lib/mk/spec/x86_32/startup-nova.mk b/repos/base-tukija/lib/mk/spec/x86_32/startup-nova.mk new file mode 100644 index 0000000000..03c229de2e --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_32/startup-nova.mk @@ -0,0 +1 @@ +include $(BASE_DIR)/lib/mk/spec/x86_32/startup.inc diff --git a/repos/base-tukija/lib/mk/spec/x86_64/core-tukija.mk b/repos/base-tukija/lib/mk/spec/x86_64/core-tukija.mk new file mode 100644 index 0000000000..50e0639a58 --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_64/core-tukija.mk @@ -0,0 +1,5 @@ +SRC_CC += spec/x86_64/pager.cc + +INC_DIR += $(REP_DIR)/src/core/include/spec/x86_64 + +include $(REP_DIR)/lib/mk/core-tukija.inc diff --git a/repos/base-tukija/lib/mk/spec/x86_64/ld-tukija.mk b/repos/base-tukija/lib/mk/spec/x86_64/ld-tukija.mk new file mode 100644 index 0000000000..fd15195ca0 --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_64/ld-tukija.mk @@ -0,0 +1,3 @@ +BASE_LIBS += base-tukija-common base-tukija + +include $(BASE_DIR)/lib/mk/spec/x86_64/ld-platform.inc diff --git a/repos/base-tukija/lib/mk/spec/x86_64/startup-tukija.mk b/repos/base-tukija/lib/mk/spec/x86_64/startup-tukija.mk new file mode 100644 index 0000000000..94807e2150 --- /dev/null +++ b/repos/base-tukija/lib/mk/spec/x86_64/startup-tukija.mk @@ -0,0 +1 @@ +include $(BASE_DIR)/lib/mk/spec/x86_64/startup.inc diff --git a/repos/base-tukija/mk/spec/nova_x86_32.mk b/repos/base-tukija/mk/spec/nova_x86_32.mk new file mode 100644 index 0000000000..17b2b520b5 --- /dev/null +++ b/repos/base-tukija/mk/spec/nova_x86_32.mk @@ -0,0 +1,4 @@ +# \deprecated + +SPECS += x86_32 +include $(call select_from_repositories,mk/spec/x86_32.mk) diff --git a/repos/base-tukija/mk/spec/tukija_x86_64.mk b/repos/base-tukija/mk/spec/tukija_x86_64.mk new file mode 100644 index 0000000000..4aa51fa4ee --- /dev/null +++ b/repos/base-tukija/mk/spec/tukija_x86_64.mk @@ -0,0 +1,4 @@ +# \deprecated + +SPECS += x86_64 +include $(call select_from_repositories,mk/spec/x86_64.mk) diff --git a/repos/base-tukija/patches/README b/repos/base-tukija/patches/README new file mode 100644 index 0000000000..468bfa5eff --- /dev/null +++ b/repos/base-tukija/patches/README @@ -0,0 +1,2 @@ +This directory contains patches for the Genode adjusted version of the NOVA +microhypervisor @ github required for using this kernel with Genode. diff --git a/repos/base-tukija/patches/allow_ioapic_access.patch b/repos/base-tukija/patches/allow_ioapic_access.patch new file mode 100644 index 0000000000..888fe1705a --- /dev/null +++ b/repos/base-tukija/patches/allow_ioapic_access.patch @@ -0,0 +1,14 @@ +diff --git a/src/ioapic.cpp b/src/ioapic.cpp +index d3c7991..d357cf3 100644 +--- a/src/ioapic.cpp ++++ b/src/ioapic.cpp +@@ -29,7 +29,8 @@ Ioapic *Ioapic::list; + + Ioapic::Ioapic (Paddr p, unsigned i, unsigned g) : List (list), reg_base ((hwdev_addr -= PAGE_SIZE) | (p & PAGE_MASK)), gsi_base (g), id (i), rid (0) + { +- Pd::kern.Space_mem::delreg (Pd::kern.quota, Pd::kern.mdb_cache, p & ~PAGE_MASK); ++// XXX allow Genode platform driver to access device and take over control ++// Pd::kern.Space_mem::delreg (Pd::kern.quota, Pd::kern.mdb_cache, p & ~PAGE_MASK); + Pd::kern.Space_mem::insert (Pd::kern.quota, reg_base, 0, Hpt::HPT_NX | Hpt::HPT_G | Hpt::HPT_UC | Hpt::HPT_W | Hpt::HPT_P, p & ~PAGE_MASK); + + trace (TRACE_APIC, "APIC:%#lx ID:%#x VER:%#x IRT:%#x PRQ:%u GSI:%u", diff --git a/repos/base-tukija/patches/allow_iommu_access.patch b/repos/base-tukija/patches/allow_iommu_access.patch new file mode 100644 index 0000000000..a569a96d8e --- /dev/null +++ b/repos/base-tukija/patches/allow_iommu_access.patch @@ -0,0 +1,14 @@ +diff --git a/src/iommu_intel.cpp b/src/iommu_intel.cpp +index 1441466..c07224e 100644 +--- a/src/iommu_intel.cpp ++++ b/src/iommu_intel.cpp +@@ -36,7 +36,8 @@ uint32 Dmar::gcmd = GCMD_TE; + + Dmar::Dmar (Paddr p) : List (list), reg_base ((hwdev_addr -= PAGE_SIZE) | (p & PAGE_MASK)), invq (static_cast(Buddy::allocator.alloc (ord, Pd::kern.quota, Buddy::FILL_0))), invq_idx (0) + { +- Pd::kern.Space_mem::delreg (Pd::kern.quota, Pd::kern.mdb_cache, p & ~PAGE_MASK); ++// XXX allow Genode platform driver to access device and take over control ++// Pd::kern.Space_mem::delreg (Pd::kern.quota, Pd::kern.mdb_cache, p & ~PAGE_MASK); + Pd::kern.Space_mem::insert (Pd::kern.quota, reg_base, 0, Hpt::HPT_NX | Hpt::HPT_G | Hpt::HPT_UC | Hpt::HPT_W | Hpt::HPT_P, p & ~PAGE_MASK); + + cap = read(REG_CAP); diff --git a/repos/base-tukija/patches/syscall_msi.patch b/repos/base-tukija/patches/syscall_msi.patch new file mode 100644 index 0000000000..b112ab71ed --- /dev/null +++ b/repos/base-tukija/patches/syscall_msi.patch @@ -0,0 +1,13 @@ +diff --git a/src/syscall.cpp b/src/syscall.cpp +index 838bfee..5619293 100644 +--- a/src/syscall.cpp ++++ b/src/syscall.cpp +@@ -1205,7 +1205,7 @@ void Ec::sys_assign_gsi() + sys_finish(); + } + +- if (EXPECT_FALSE (!Gsi::gsi_table[gsi].ioapic && (!Pd::current->Space_mem::lookup (r->dev(), phys) || ((rid = Pci::phys_to_rid (phys)) == ~0U && (rid = Hpet::phys_to_rid (phys)) == ~0U)))) { ++ if (EXPECT_FALSE (!Gsi::gsi_table[gsi].ioapic && r->dev() && (!Pd::current->Space_mem::lookup (r->dev(), phys) || ((rid = Pci::phys_to_rid (phys)) == ~0U && (rid = Hpet::phys_to_rid (phys)) == ~0U)))) { + trace (TRACE_ERROR, "%s: Non-DEV CAP (%#lx)", __func__, r->dev()); + sys_finish(); + } diff --git a/repos/base-tukija/ports/tukija.hash b/repos/base-tukija/ports/tukija.hash new file mode 100644 index 0000000000..77049ec01e --- /dev/null +++ b/repos/base-tukija/ports/tukija.hash @@ -0,0 +1 @@ +4755efcdeaf8f37dd554a85a6dafa3ba3bd537e5 diff --git a/repos/base-tukija/ports/tukija.port b/repos/base-tukija/ports/tukija.port new file mode 100644 index 0000000000..9c2e68ba0c --- /dev/null +++ b/repos/base-tukija/ports/tukija.port @@ -0,0 +1,11 @@ +LICENSE := GPLv2 +VERSION := git +DOWNLOADS := tukija.git + +# tukija-staging branch +URL(tukija) := git@github.com:mmueller41/tukija.git +REV(tukija) := tukija-staging +DIR(tukija) := src/kernel/tukija + +PATCHES := $(sort $(wildcard $(REP_DIR)/patches/*.patch)) +PATCH_OPT := -p1 -d ${DIR(tukija)} diff --git a/repos/base-tukija/recipes/api/base-tukija/content.mk b/repos/base-tukija/recipes/api/base-tukija/content.mk new file mode 100644 index 0000000000..d4dd61f09a --- /dev/null +++ b/repos/base-tukija/recipes/api/base-tukija/content.mk @@ -0,0 +1,17 @@ +FROM_BASE_TUKIJA := etc include + +# base-nova.lib.a depends on timeout.lib.a, which includes base/internal/gloabls.h +FROM_BASE := lib/mk/timeout.mk src/lib/timeout \ + src/include/base/internal/globals.h + +content: $(FROM_BASE_TUKIJA) $(FROM_BASE) LICENSE + +$(FROM_BASE_TUKIJA): + $(mirror_from_rep_dir) + +$(FROM_BASE): + mkdir -p $(dir $@) + cp -r $(GENODE_DIR)/repos/base/$@ $@ + +LICENSE: + cp $(GENODE_DIR)/LICENSE $@ diff --git a/repos/base-tukija/recipes/api/base-tukija/hash b/repos/base-tukija/recipes/api/base-tukija/hash new file mode 100644 index 0000000000..9c9994f2f2 --- /dev/null +++ b/repos/base-tukija/recipes/api/base-tukija/hash @@ -0,0 +1 @@ +2024-10-07 d1a751a3b41d145c3a97b3431ae1f006050fee10 diff --git a/repos/base-tukija/recipes/src/base-tukija/README b/repos/base-tukija/recipes/src/base-tukija/README new file mode 100644 index 0000000000..47edc49b6f --- /dev/null +++ b/repos/base-tukija/recipes/src/base-tukija/README @@ -0,0 +1,9 @@ +This archive contains the NOVA-specific part of Genode. + +It also contains the source code of the NOVA hypervisor in the src/kernel/nova/ +directory. + +This NOVA version is maintained at 'https://github.com/alex-ab/NOVA.git'. + +Please note that NOVA has a license distinct from Genode. NOVA's license can +be found at 'src/kernel/nova/LICENSE'. diff --git a/repos/base-tukija/recipes/src/base-tukija/content.mk b/repos/base-tukija/recipes/src/base-tukija/content.mk new file mode 100644 index 0000000000..d1e6ca1172 --- /dev/null +++ b/repos/base-tukija/recipes/src/base-tukija/content.mk @@ -0,0 +1,21 @@ +include $(GENODE_DIR)/repos/base/recipes/src/base_content.inc + +content: README +README: + cp $(REP_DIR)/recipes/src/base-tukija/README $@ + +content: src/kernel/tukija +src/kernel: + $(mirror_from_rep_dir) + +KERNEL_PORT_DIR := $(call port_dir,$(REP_DIR)/ports/tukija) + +src/kernel/tukija: src/kernel + cp -r $(KERNEL_PORT_DIR)/src/kernel/tukija/* $@ + +content: + for spec in x86_32 x86_64; do \ + mv lib/mk/spec/$$spec/ld-tukija.mk lib/mk/spec/$$spec/ld.mk; \ + done; + sed -i "s/tukija_timer/timer/" src/timer/tukija/target.mk + diff --git a/repos/base-tukija/recipes/src/base-tukija/hash b/repos/base-tukija/recipes/src/base-tukija/hash new file mode 100644 index 0000000000..02d5a1981a --- /dev/null +++ b/repos/base-tukija/recipes/src/base-tukija/hash @@ -0,0 +1 @@ +2024-12-10 bb446406fbb1173c3f243fe323d5cad8423ff958 diff --git a/repos/base-tukija/recipes/src/base-tukija/used_apis b/repos/base-tukija/recipes/src/base-tukija/used_apis new file mode 100644 index 0000000000..64b6f64e41 --- /dev/null +++ b/repos/base-tukija/recipes/src/base-tukija/used_apis @@ -0,0 +1,2 @@ +base-tukija +base diff --git a/repos/base-tukija/run/nova.run b/repos/base-tukija/run/nova.run new file mode 100644 index 0000000000..a304f23f49 --- /dev/null +++ b/repos/base-tukija/run/nova.run @@ -0,0 +1,46 @@ +build { core init lib/ld test/nova } + +set check_pat 1 +if {[have_include power_on/qemu]} { + set check_pat 0 +} + +if {[get_cmd_switch --autopilot] && [have_spec x86_32]} { + # Disable test for our outdated nighly test machine for 32bit + set check_pat 0 +} + +create_boot_directory + +set config { + + + + + + + + + + + + + } + +append config " + " + +append config { + + +} + +install_config $config + +build_boot_image [build_artifacts] + +append qemu_args "-nographic -smp 2" + +run_genode_until {Test finished} 240 + +puts "\nTest succeeded" diff --git a/repos/base-tukija/src/core/bios_data_area.cc b/repos/base-tukija/src/core/bios_data_area.cc new file mode 100644 index 0000000000..b9454010af --- /dev/null +++ b/repos/base-tukija/src/core/bios_data_area.cc @@ -0,0 +1,19 @@ +/* + * \brief Structure of the Bios Data Area after preparation through Bender + * \author Martin Stein + * \date 2015-07-10 + */ + +/* + * Copyright (C) 2015-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include + +using namespace Genode; + +addr_t Bios_data_area::_mmio_base_virt() { return 0x1000; } diff --git a/repos/base-tukija/src/core/core-bss.ld b/repos/base-tukija/src/core/core-bss.ld new file mode 100644 index 0000000000..994c3a21ad --- /dev/null +++ b/repos/base-tukija/src/core/core-bss.ld @@ -0,0 +1,6 @@ +SECTIONS +{ + .data : { + *(.bss .bss.* .gnu.linkonce.b.* COMMON) + } : rw +} diff --git a/repos/base-tukija/src/core/core_log_out.cc b/repos/base-tukija/src/core/core_log_out.cc new file mode 100644 index 0000000000..7596abbebe --- /dev/null +++ b/repos/base-tukija/src/core/core_log_out.cc @@ -0,0 +1,30 @@ +/* + * \brief Kernel-specific core's 'log' backend + * \author Stefan Kalkowski + * \date 2016-10-10 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include + +/* Genode includes */ +#include +#include + +void Core::Core_log::out(char const c) +{ + enum { CLOCK = 0, BAUDRATE = 115200 }; + + static X86_uart uart(Bios_data_area::singleton()->serial_port(), + CLOCK, BAUDRATE); + if (c == '\n') + uart.put_char('\r'); + uart.put_char(c); +} diff --git a/repos/base-tukija/src/core/core_region_map.cc b/repos/base-tukija/src/core/core_region_map.cc new file mode 100644 index 0000000000..0d9ce1ae65 --- /dev/null +++ b/repos/base-tukija/src/core/core_region_map.cc @@ -0,0 +1,98 @@ +/* + * \brief Core-local region map + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include +#include + +/* NOVA includes */ +#include + +using namespace Core; + + +/** + * Map dataspace core-locally + */ +static inline void * alloc_region(Dataspace_component &ds, const size_t size) +{ + /* + * Allocate range in core's virtual address space + * + * Start with trying to use natural alignment. If this does not work, + * successively weaken the alignment constraint until we hit the page size. + */ + void *virt_addr = 0; + size_t align_log2 = log2(ds.size()); + for (; align_log2 >= get_page_size_log2(); align_log2--) { + + platform().region_alloc().alloc_aligned(size, (unsigned)align_log2).with_result( + [&] (void *ptr) { virt_addr = ptr; }, + [&] (Allocator::Alloc_error) { }); + + if (virt_addr) + break; + } + + return virt_addr; +} + +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr) +{ + return _ep.apply(ds_cap, [&] (Dataspace_component * const ds_ptr) -> Attach_result { + + if (!ds_ptr) + return Attach_error::INVALID_DATASPACE; + + Dataspace_component &ds = *ds_ptr; + + /* attach attributes 'use_at' and 'offset' not supported within core */ + if (attr.use_at || attr.offset) + return Attach_error::REGION_CONFLICT; + + const size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2()); + + /* allocate the virtual region contiguous for the dataspace */ + void * virt_ptr = alloc_region(ds, page_rounded_size); + if (!virt_ptr) + return Attach_error::OUT_OF_RAM; + + /* map it */ + Tukija::Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + const Tukija::Rights rights(true, attr.writeable && ds.writeable(), attr.executable); + + if (map_local(platform_specific().core_pd_sel(), utcb, + ds.phys_addr(), reinterpret_cast(virt_ptr), + page_rounded_size >> get_page_size_log2(), rights, true)) { + platform().region_alloc().free(virt_ptr, page_rounded_size); + + return Attach_error::OUT_OF_RAM; + } + + return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size }; + }); +} + + +void Core_region_map::detach(addr_t core_local_addr) +{ + size_t size = platform_specific().region_alloc_size_at((void *)core_local_addr); + + unmap_local(*reinterpret_cast(Thread::myself()->utcb()), + core_local_addr, size >> get_page_size_log2()); + + platform().region_alloc().free((void *)core_local_addr); +} diff --git a/repos/base-tukija/src/core/core_rpc_cap_alloc.cc b/repos/base-tukija/src/core/core_rpc_cap_alloc.cc new file mode 100644 index 0000000000..a4b1c37613 --- /dev/null +++ b/repos/base-tukija/src/core/core_rpc_cap_alloc.cc @@ -0,0 +1,50 @@ +/* + * \brief Core-specific back end of the RPC entrypoint + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* core includes */ +#include +#include +#include + +/* base-internal includes */ +#include + +using namespace Genode; + + +void Genode::init_rpc_cap_alloc(Parent &) { } + + +static Core::Rpc_cap_factory &rpc_cap_factory() +{ + static Core::Rpc_cap_factory inst(Core::platform().core_mem_alloc()); + return inst; +} + + +Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session &, Native_capability ep, + addr_t entry) +{ + Native_capability cap = rpc_cap_factory().alloc(ep, entry, 0); + imprint_badge(cap.local_name(), cap.local_name()); + return cap; +} + + +void Rpc_entrypoint::_free_rpc_cap(Pd_session &, Native_capability cap) +{ + rpc_cap_factory().free(cap); +} diff --git a/repos/base-tukija/src/core/include/imprint_badge.h b/repos/base-tukija/src/core/include/imprint_badge.h new file mode 100644 index 0000000000..07a4a0ab22 --- /dev/null +++ b/repos/base-tukija/src/core/include/imprint_badge.h @@ -0,0 +1,33 @@ +/* + * \brief Utility to imprint a badge into a NOVA portal + * \author Norman Feske + * \date 2016-03-03 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__IMPRINT_BADGE_H_ +#define _CORE__INCLUDE__IMPRINT_BADGE_H_ + +/* NOVA includes */ +#include + +static inline bool imprint_badge(unsigned long pt_sel, unsigned long badge) +{ + using namespace Tukija; + + /* assign badge to portal */ + if (pt_ctrl(pt_sel, badge) != NOVA_OK) + return false; + + /* disable PT_CTRL permission to prevent subsequent imprint attempts */ + revoke(Obj_crd(pt_sel, 0, Obj_crd::RIGHT_PT_CTRL)); + return true; +} + +#endif /* _CORE__INCLUDE__IMPRINT_BADGE_H_ */ diff --git a/repos/base-tukija/src/core/include/ipc_pager.h b/repos/base-tukija/src/core/include/ipc_pager.h new file mode 100644 index 0000000000..105e6a642b --- /dev/null +++ b/repos/base-tukija/src/core/include/ipc_pager.h @@ -0,0 +1,134 @@ +/* + * \brief Low-level page-fault handling + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__IPC_PAGER_H_ +#define _CORE__INCLUDE__IPC_PAGER_H_ + +/* Genode includes */ +#include +#include +#include + +/* core includes */ +#include + +/* NOVA includes */ +#include + +namespace Core { class Ipc_pager; } + + +namespace Core { enum { PAGE_SIZE_LOG2 = 12 }; } + + +static inline Tukija::Rights nova_map_rights(Core::Mapping const &mapping) +{ + return Tukija::Rights(true, mapping.writeable, mapping.executable); +} + + +static inline Tukija::Mem_crd nova_src_crd(Core::Mapping const &mapping) +{ + return Tukija::Mem_crd(mapping.src_addr >> Core::PAGE_SIZE_LOG2, + mapping.size_log2 - Core::PAGE_SIZE_LOG2, + nova_map_rights(mapping)); +} + + +static inline Tukija::Mem_crd nova_dst_crd(Core::Mapping const &mapping) +{ + return Tukija::Mem_crd(mapping.dst_addr >> Core::PAGE_SIZE_LOG2, + mapping.size_log2 - Core::PAGE_SIZE_LOG2, + nova_map_rights(mapping)); +} + + +class Core::Ipc_pager +{ + private: + + addr_t _pd_dst; + addr_t _pd_core; + addr_t _fault_ip; + addr_t _fault_addr; + addr_t _sp; + uint8_t _fault_type; + uint8_t _syscall_res; + uint8_t _normal_ipc; + + public: + + Ipc_pager (Tukija::Utcb &, addr_t pd_dst, addr_t pd_core); + + /* + * Intel manual: 6.15 EXCEPTION AND INTERRUPT REFERENCE + * Interrupt 14—Page-Fault Exception (#PF) + */ + enum { + ERR_I = 1 << 4, + ERR_R = 1 << 3, + ERR_U = 1 << 2, + ERR_W = 1 << 1, + ERR_P = 1 << 0, + }; + + /** + * Answer current page fault + */ + void reply_and_wait_for_fault(addr_t sm = 0UL); + + /** + * Request instruction pointer of current fault + */ + addr_t fault_ip() { return _fault_ip; } + + /** + * Request page-fault address of current fault + */ + addr_t fault_addr() { return _fault_addr; } + + /** + * Set page-fault reply parameters + */ + void set_reply_mapping(Mapping m); + + /** + * Return true if fault was a write fault + */ + bool write_fault() const { return _fault_type & ERR_W; } + + /** + * Return true if fault was a non-executable fault + */ + bool exec_fault() const { + return _fault_type & ERR_P && _fault_type & ERR_I; } + + /** + * Return result of delegate syscall + */ + uint8_t syscall_result() const { return _syscall_res; } + + /** + * Return low level fault type info + * Intel manual: 6.15 EXCEPTION AND INTERRUPT REFERENCE + * Interrupt 14—Page-Fault Exception (#PF) + */ + addr_t fault_type() { return _fault_type; } + + /** + * Return stack pointer address valid during page-fault + */ + addr_t sp() { return _sp; } +}; + +#endif /* _CORE__INCLUDE__IPC_PAGER_H_ */ diff --git a/repos/base-tukija/src/core/include/irq_object.h b/repos/base-tukija/src/core/include/irq_object.h new file mode 100644 index 0000000000..80fcece2bf --- /dev/null +++ b/repos/base-tukija/src/core/include/irq_object.h @@ -0,0 +1,55 @@ +/* + * \brief Tukija-specific instance of the IRQ object + * \author Alexander Boettcher + */ + +/* + * Copyright (C) 2015-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__IRQ_OBJECT_H_ +#define _CORE__INCLUDE__IRQ_OBJECT_H_ + +#include /* Gsi_flags */ + +/* core includes */ +#include + +namespace Core { class Irq_object; class Irq_args; } + + +class Core::Irq_object +{ + private: + + Signal_context_capability _sigh_cap { }; + + addr_t _kernel_caps; + addr_t _msi_addr; + addr_t _msi_data; + addr_t _device_phys = 0; /* PCI config extended address */ + + Tukija::Gsi_flags _gsi_flags { }; + + enum { KERNEL_CAP_COUNT_LOG2 = 0 }; + + addr_t irq_sel() const { return _kernel_caps; } + + public: + + Irq_object(); + ~Irq_object(); + + addr_t msi_address() const { return _msi_addr; } + addr_t msi_value() const { return _msi_data; } + + void sigh(Signal_context_capability cap); + void ack_irq(); + + void start(unsigned irq, addr_t, Irq_args const &); +}; + +#endif /* _CORE__INCLUDE__IRQ_OBJECT_H_ */ diff --git a/repos/base-tukija/src/core/include/map_local.h b/repos/base-tukija/src/core/include/map_local.h new file mode 100644 index 0000000000..3fafee214e --- /dev/null +++ b/repos/base-tukija/src/core/include/map_local.h @@ -0,0 +1,58 @@ +/* + * \brief Core-local mapping + * \author Norman Feske + * \date 2010-02-15 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__MAP_LOCAL_H_ +#define _CORE__INCLUDE__MAP_LOCAL_H_ + +/* Genode includes */ +#include + +/* core includes */ +#include + +namespace Core { + + /** + * Map pages locally within core + * + * On NOVA, address-space mappings from core to core originate always from + * the physical address space. + * + * \param from_phys physical source address + * \param to_virt core-local destination address + * \param num_pages number of pages to map + * + * \return true on success + */ + inline bool map_local(addr_t from_phys, addr_t to_virt, size_t num_pages, + bool read = true, bool write = true, bool exec = true) + { + return (::map_local(platform_specific().core_pd_sel(), + *(Tukija::Utcb *)Thread::myself()->utcb(), + from_phys, to_virt, num_pages, + Tukija::Rights(read, write, exec), true) == 0); + } + + /** + * Unmap pages locally within core + * + * \param virt core-local address + * \param num_pages number of pages to unmap + */ + inline void unmap_local(addr_t virt, size_t num_pages) + { + ::unmap_local(*(Tukija::Utcb *)Thread::myself()->utcb(), virt, num_pages); + } +} + +#endif /* _CORE__INCLUDE__MAP_LOCAL_H_ */ diff --git a/repos/base-tukija/src/core/include/native_cpu_component.h b/repos/base-tukija/src/core/include/native_cpu_component.h new file mode 100644 index 0000000000..5381da6cae --- /dev/null +++ b/repos/base-tukija/src/core/include/native_cpu_component.h @@ -0,0 +1,47 @@ +/* + * \brief Kernel-specific part of the CPU-session interface + * \author Norman Feske + * \date 2016-04-21 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__NATIVE_CPU_COMPONENT_H_ +#define _CORE__INCLUDE__NATIVE_CPU_COMPONENT_H_ + +/* Genode includes */ +#include +#include + +/* core includes */ +#include + +namespace Core { + + class Cpu_session_component; + class Native_cpu_component; +} + + +class Core::Native_cpu_component : public Rpc_object +{ + private: + + Cpu_session_component &_cpu_session; + Rpc_entrypoint &_thread_ep; + + public: + + Native_cpu_component(Cpu_session_component &, char const *); + ~Native_cpu_component(); + + void thread_type(Thread_capability, Thread_type, Exception_base) override; +}; + +#endif /* _CORE__INCLUDE__NATIVE_CPU_COMPONENT_H_ */ diff --git a/repos/base-tukija/src/core/include/native_pd_component.h b/repos/base-tukija/src/core/include/native_pd_component.h new file mode 100644 index 0000000000..43e2e4c42b --- /dev/null +++ b/repos/base-tukija/src/core/include/native_pd_component.h @@ -0,0 +1,49 @@ +/* + * \brief Kernel-specific part of the PD-session interface + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__NATIVE_PD_COMPONENT_H_ +#define _CORE__INCLUDE__NATIVE_PD_COMPONENT_H_ + +/* Genode includes */ +#include + +/* core includes */ +#include + +namespace Core { + + class Pd_session_component; + class Native_pd_component; +} + + +class Core::Native_pd_component : public Rpc_object +{ + private: + + Pd_session_component &_pd_session; + + public: + + Native_pd_component(Pd_session_component &pd, char const *args); + + ~Native_pd_component(); + + /** + * Native_pd interface + */ + Native_capability alloc_rpc_cap(Native_capability, addr_t, addr_t) override; + void imprint_rpc_cap(Native_capability, unsigned long) override; +}; + +#endif /* _CORE__INCLUDE__NATIVE_PD_COMPONENT_H_ */ diff --git a/repos/base-tukija/src/core/include/nova_util.h b/repos/base-tukija/src/core/include/nova_util.h new file mode 100644 index 0000000000..b70d72755b --- /dev/null +++ b/repos/base-tukija/src/core/include/nova_util.h @@ -0,0 +1,296 @@ +/* + * \brief NOVA-specific convenience functions + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2010-01-19 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__NOVA_UTIL_H_ +#define _CORE__INCLUDE__NOVA_UTIL_H_ + +/* NOVA includes */ +#include + +/* core includes */ +#include +#include + + +/** + * Return boot CPU number. It is required if threads in core should be placed + * on the same CPU as the main thread. + */ +inline Genode::addr_t boot_cpu() +{ + /** + * Initial value of ax and di register, saved by the crt0 startup code + * and SOLELY VALID in 'core' !!! + * + * For x86_32 - __initial_ax contains the number of the boot CPU. + * For x86_64 - __initial_di contains the number of the boot CPU. + */ + extern Genode::addr_t __initial_ax; + extern Genode::addr_t __initial_di; + + return (sizeof(void *) > 4) ? __initial_di : __initial_ax; +} + +/** + * Establish a mapping + * + * \param utcb UTCB of the calling EC + * \param src_crd capability range descriptor of source + * resource to map locally + * \param dst_crd capability range descriptor of mapping + * target + * \param kern_pd Whether to map the items from the kernel or from core + * \param dma_mem Whether the memory is usable for DMA or not + */ +static int map_local(Genode::addr_t const pd, Tukija::Utcb &utcb, + Tukija::Crd const src_crd, Tukija::Crd const dst_crd, + bool const kern_pd = false, bool const dma_mem = false, + bool const write_combined = false) +{ + /* asynchronously map capabilities */ + utcb.set_msg_word(0); + + /* ignore return value as one item always fits into the utcb */ + bool const ok = utcb.append_item(src_crd, 0, kern_pd, false, false, + dma_mem, write_combined); + (void)ok; + + Tukija::uint8_t res = Tukija::delegate(pd, pd, dst_crd); + if (res != Tukija::NOVA_OK) { + + using Hex = Genode::Hex; + error("map_local failed ", + Hex(src_crd.addr()), ":", Hex(src_crd.order()), ":", Hex(src_crd.type()), "->", + Hex(dst_crd.addr()), ":", Hex(dst_crd.order()), ":", Hex(dst_crd.type()), " - ", + "result=", Hex(res), " " + "msg=", Hex(utcb.msg_items()), ":", + Hex(utcb.msg_words()), ":", + Hex(utcb.msg()[0]), " !!! " + "utcb=", &utcb, " " + "kern=", kern_pd); + return res > 0 ? res : -1; + } + /* clear receive window */ + utcb.crd_rcv = 0; + + return 0; +} + + +static inline int unmap_local(Tukija::Crd crd, bool self = true) { + return Tukija::revoke(crd, self); } + +inline int map_local_phys_to_virt(Tukija::Utcb &utcb, Tukija::Crd const src, + Tukija::Crd const dst, Genode::addr_t const pd) +{ + return map_local(pd, utcb, src, dst, true); +} + +inline int map_local_one_to_one(Tukija::Utcb &utcb, Tukija::Crd const crd, + Genode::addr_t const pd) +{ + return map_local(pd, utcb, crd, crd, true); +} + + +/** + * Find least significant set bit in value + */ +inline unsigned char +lsb_bit(unsigned long const &value, unsigned char const shift = 0) +{ + unsigned long const scan = value >> shift; + if (scan == 0) return 0; + + unsigned char pos = (unsigned char)__builtin_ctzl(scan); + unsigned char res = shift ? pos + shift : pos; + return res; +} + +/** + * Remap pages in the local address space + * + * \param utcb UTCB of the main thread + * \param from_start physical source address + * \param to_start local virtual destination address + * \param num_pages number of pages to map + */ +inline int map_local(Genode::addr_t const pd, Tukija::Utcb &utcb, + Genode::addr_t from_start, Genode::addr_t to_start, + Genode::size_t num_pages, + Tukija::Rights const &permission, + bool kern_pd = false, bool dma_mem = false, + bool write_combined = false) +{ + using namespace Tukija; + using namespace Genode; + + size_t const size = num_pages << get_page_size_log2(); + + addr_t const from_end = from_start + size; + addr_t const to_end = to_start + size; + + for (addr_t offset = 0; offset < size; ) { + + addr_t const from_curr = from_start + offset; + addr_t const to_curr = to_start + offset; + + /* + * The common alignment corresponds to the number of least significant + * zero bits in both addresses. + */ + addr_t const common_bits = from_curr | to_curr; + + /* find least set bit in common bits */ + size_t order = lsb_bit(common_bits, get_page_size_log2()); + + /* look if flexpage fits into both 'from' and 'to' address range */ + if ((from_end - from_curr) < (1UL << order)) + order = log2(from_end - from_curr); + + if ((to_end - to_curr) < (1UL << order)) + order = log2(to_end - to_curr); + + if (order >= sizeof(void *)*8) + return 1; + + int const res = map_local(pd, utcb, + Mem_crd((from_curr >> 12), order - get_page_size_log2(), permission), + Mem_crd((to_curr >> 12), order - get_page_size_log2(), permission), + kern_pd, dma_mem, write_combined); + if (res) return res; + + /* advance offset by current flexpage size */ + offset += (1UL << order); + } + return 0; +} + + +/** + * Unmap pages from the local address space + * + * \param utcb UTCB of the main thread + * \param start local virtual address + * \param num_pages number of pages to unmap + * \param self unmap from this pd or solely from other pds + * \param self map from this pd or solely from other pds + * \param rights rights to be revoked, default: all rwx + */ +inline void unmap_local(Tukija::Utcb &, Genode::addr_t start, + Genode::size_t num_pages, + bool const self = true, + Tukija::Rights const rwx = Tukija::Rights(true, true, true)) +{ + using namespace Tukija; + using namespace Genode; + + Genode::addr_t base = start >> get_page_size_log2(); + + if (start & (get_page_size() - 1)) { + error("unmap failed - unaligned address specified"); + return; + } + + while (num_pages) { + unsigned char const base_bit = lsb_bit(base); + unsigned char const order_bit = (unsigned char)min(log2(num_pages), 31U); + unsigned char const order = min(order_bit, base_bit); + + Mem_crd const crd(base, order, rwx); + + unmap_local(crd, self); + + num_pages -= 1UL << order; + base += 1UL << order; + } +} + + +inline Tukija::uint8_t syscall_retry(Core::Pager_object &pager, auto const &fn) +{ + Tukija::uint8_t res; + do { + res = fn(); + } while (res == Tukija::NOVA_PD_OOM && Tukija::NOVA_OK == pager.handle_oom()); + + return res; +} + +inline Tukija::uint8_t async_map(Core::Pager_object &pager, + Genode::addr_t const source_pd, + Genode::addr_t const target_pd, + Tukija::Obj_crd const &source_initial_caps, + Tukija::Obj_crd const &target_initial_caps, + Tukija::Utcb &utcb) +{ + /* asynchronously map capabilities */ + utcb.set_msg_word(0); + + /* ignore return value as one item always fits into the utcb */ + bool const ok = utcb.append_item(source_initial_caps, 0); + (void)ok; + + return syscall_retry(pager, + [&] { + return Tukija::delegate(source_pd, target_pd, target_initial_caps); + }); +} + +inline Tukija::uint8_t map_vcpu_portals(Core::Pager_object &pager, + Genode::addr_t const source_exc_base, + Genode::addr_t const target_exc_base, + Tukija::Utcb &utcb, + Genode::addr_t const source_pd) +{ + using Tukija::Obj_crd; + using Tukija::NUM_INITIAL_VCPU_PT_LOG2; + + Obj_crd const source_initial_caps(source_exc_base, NUM_INITIAL_VCPU_PT_LOG2); + Obj_crd const target_initial_caps(target_exc_base, NUM_INITIAL_VCPU_PT_LOG2); + + return async_map(pager, source_pd, pager.pd_sel(), + source_initial_caps, target_initial_caps, utcb); +} + +inline Tukija::uint8_t map_pagefault_portal(Core::Pager_object &pager, + Genode::addr_t const source_exc_base, + Genode::addr_t const target_exc_base, + Genode::addr_t const target_pd, + Tukija::Utcb &utcb) +{ + using Tukija::Obj_crd; + using Tukija::PT_SEL_PAGE_FAULT; + + Genode::addr_t const source_pd = Core::platform_specific().core_pd_sel(); + + Obj_crd const source_initial_caps(source_exc_base + PT_SEL_PAGE_FAULT, 0); + Obj_crd const target_initial_caps(target_exc_base + PT_SEL_PAGE_FAULT, 0); + + return async_map(pager, source_pd, target_pd, + source_initial_caps, target_initial_caps, utcb); +} + +inline Tukija::Hip const &kernel_hip() +{ + /** + * Initial value of esp register, saved by the crt0 startup code. + * This value contains the address of the hypervisor information page. + */ + extern Genode::addr_t __initial_sp; + return *reinterpret_cast(__initial_sp); +} + +#endif /* _CORE__INCLUDE__NOVA_UTIL_H_ */ diff --git a/repos/base-tukija/src/core/include/pager.h b/repos/base-tukija/src/core/include/pager.h new file mode 100644 index 0000000000..a26e2fc4c2 --- /dev/null +++ b/repos/base-tukija/src/core/include/pager.h @@ -0,0 +1,409 @@ +/* + * \brief Paging-server framework + * \author Norman Feske + * \date 2006-04-28 + */ + +/* + * Copyright (C) 2006-2020 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__PAGER_H_ +#define _CORE__INCLUDE__PAGER_H_ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include + +/* NOVA includes */ +#include + +/* core includes */ +#include +#include + +namespace Core { + + class Pager_entrypoint; + class Pager_object; + class Exception_handlers; + + using Pager_capability = Capability; + + extern void init_page_fault_handling(Rpc_entrypoint &); +} + + +class Core::Exception_handlers +{ + private: + + template + __attribute__((regparm(1))) static void _handler(Pager_object &); + + public: + + Exception_handlers(Pager_object &); + + template + void register_handler(Pager_object &, Tukija::Mtd, + void (__attribute__((regparm(1)))*)(Pager_object &) = nullptr); +}; + + +class Core::Pager_object : public Object_pool::Entry +{ + private: + + unsigned long _badge; /* used for debugging */ + + /** + * User-level signal handler registered for this pager object via + * 'Cpu_session::exception_handler()'. + */ + Signal_context_capability _exception_sigh { }; + + /** + * selectors for + * - cleanup portal + * - semaphore used by caller used to notify paused state + * - semaphore used to block during page fault handling or pausing + */ + addr_t _selectors; + + addr_t _initial_esp = 0; + addr_t _initial_eip = 0; + addr_t _client_exc_pt_sel; + + Mutex _state_lock { }; + + struct + { + struct Thread_state thread; + addr_t sel_client_ec; + enum { + BLOCKED = 0x1U, + DEAD = 0x2U, + SINGLESTEP = 0x4U, + SIGNAL_SM = 0x8U, + SUBMIT_SIGNAL = 0x20U, + BLOCKED_PAUSE_SM = 0x40U, + MIGRATE = 0x80U + }; + uint8_t _status; + bool modified; + + /* convenience function to access pause/recall state */ + inline bool blocked() { return _status & BLOCKED;} + inline void block() { _status |= BLOCKED; } + inline void unblock() { _status &= (uint8_t)(~BLOCKED); } + inline bool blocked_pause_sm() { return _status & BLOCKED_PAUSE_SM;} + inline void block_pause_sm() { _status |= (uint8_t)BLOCKED_PAUSE_SM; } + inline void unblock_pause_sm() { _status &= (uint8_t)(~BLOCKED_PAUSE_SM); } + + inline void mark_dead() { _status |= DEAD; } + inline bool is_dead() { return _status & DEAD; } + + inline bool singlestep() { return _status & SINGLESTEP; } + + inline void mark_signal_sm() { _status |= SIGNAL_SM; } + inline bool has_signal_sm() { return _status & SIGNAL_SM; } + + inline bool to_submit() { return _status & SUBMIT_SIGNAL; } + inline void submit_signal() { _status |= SUBMIT_SIGNAL; } + inline void reset_submit() { _status &= (uint8_t)(~SUBMIT_SIGNAL); } + + bool migrate() const { return _status & MIGRATE; } + void reset_migrate() { _status &= (uint8_t)(~MIGRATE); } + void request_migrate() { _status |= MIGRATE; } + } _state { }; + + Cpu_session_capability _cpu_session_cap; + Thread_capability _thread_cap; + Affinity::Location _location; + Affinity::Location _next_location { }; + Exception_handlers _exceptions; + + addr_t _pd_target; + + void _copy_state_from_utcb(Tukija::Utcb const &utcb); + void _copy_state_to_utcb(Tukija::Utcb &utcb) const; + + uint8_t _unsynchronized_client_recall(bool get_state_and_block); + + addr_t sel_pt_cleanup() const { return _selectors; } + addr_t sel_sm_block_pause() const { return _selectors + 1; } + addr_t sel_sm_block_oom() const { return _selectors + 2; } + addr_t sel_oom_portal() const { return _selectors + 3; } + + __attribute__((regparm(1))) + static void _page_fault_handler(Pager_object &); + + __attribute__((regparm(1))) + static void _startup_handler(Pager_object &); + + __attribute__((regparm(1))) + static void _invoke_handler(Pager_object &); + + __attribute__((regparm(1))) + static void _recall_handler(Pager_object &); + + __attribute__((regparm(3))) + static void _oom_handler(addr_t, addr_t, addr_t); + + void _construct_pager(); + bool _migrate_thread(); + + public: + + Pager_object(Cpu_session_capability cpu_session_cap, + Thread_capability thread_cap, + unsigned long badge, Affinity::Location location, + Session_label const &, + Cpu_session::Name const &); + + virtual ~Pager_object(); + + unsigned long badge() const { return _badge; } + void reset_badge() + { + Mutex::Guard guard(_state_lock); + _badge = 0; + } + + const char * client_thread() const; + const char * client_pd() const; + + enum class Pager_result { STOP, CONTINUE }; + + virtual Pager_result pager(Ipc_pager &ps) = 0; + + /** + * Assign user-level exception handler for the pager object + */ + void exception_handler(Signal_context_capability sigh) + { + _exception_sigh = sigh; + } + + Affinity::Location location() const { return _location; } + + void migrate(Affinity::Location); + + /** + * Assign PD selector to PD + */ + void assign_pd(addr_t pd_sel) { _pd_target = pd_sel; } + addr_t pd_sel() const { return _pd_target; } + + void exception(uint8_t exit_id); + + /** + * Return base of initial portal window + */ + addr_t exc_pt_sel_client() { return _client_exc_pt_sel; } + + /** + * Set initial stack pointer used by the startup handler + */ + addr_t initial_esp() { return _initial_esp; } + void initial_esp(addr_t esp) { _initial_esp = esp; } + + /** + * Set initial instruction pointer used by the startup handler + */ + void initial_eip(addr_t eip) { _initial_eip = eip; } + + /** + * Continue execution of pager object + */ + void wake_up(); + + /** + * Notify exception handler about the occurrence of an exception + */ + bool submit_exception_signal() + { + if (!_exception_sigh.valid()) return false; + + _state.reset_submit(); + + Signal_transmitter transmitter(_exception_sigh); + transmitter.submit(); + + return true; + } + + /** + * Copy thread state of recalled thread. + */ + bool copy_thread_state(Thread_state * state_dst) + { + Mutex::Guard _state_lock_guard(_state_lock); + + if (!state_dst || !_state.blocked()) + return false; + + *state_dst = _state.thread; + + return true; + } + + /* + * Copy thread state to recalled thread. + */ + bool copy_thread_state(Thread_state state_src) + { + Mutex::Guard _state_lock_guard(_state_lock); + + if (!_state.blocked()) + return false; + + _state.thread = state_src; + _state.modified = true; + + return true; + } + + uint8_t client_recall(bool get_state_and_block); + void client_set_ec(addr_t ec) { _state.sel_client_ec = ec; } + + inline void single_step(bool on) + { + _state_lock.acquire(); + + if (_state.is_dead() || !_state.blocked() || + (on && (_state._status & _state.SINGLESTEP)) || + (!on && !(_state._status & _state.SINGLESTEP))) { + _state_lock.release(); + return; + } + + if (on) + _state._status |= _state.SINGLESTEP; + else + _state._status &= (uint8_t)(~_state.SINGLESTEP); + + _state_lock.release(); + + /* force client in exit and thereby apply single_step change */ + client_recall(false); + } + + /** + * Return CPU session that was used to created the thread + */ + Cpu_session_capability cpu_session_cap() const { return _cpu_session_cap; } + + /** + * Return thread capability + * + * This function enables the destructor of the thread's + * address-space region map to kill the thread. + */ + Thread_capability thread_cap() const { return _thread_cap; } + + /** + * Note in the thread state that an unresolved page + * fault occurred. + */ + void unresolved_page_fault_occurred() + { + _state.thread.state = Thread_state::State::PAGE_FAULT; + } + + /** + * Make sure nobody is in the handler anymore by doing an IPC to a + * local cap pointing to same serving thread (if not running in the + * context of the serving thread). When the call returns + * we know that nobody is handled by this object anymore, because + * all remotely available portals had been revoked beforehand. + */ + void cleanup_call(); + + /** + * Portal called by thread that causes a out of memory in kernel. + */ + addr_t create_oom_portal(); + + enum Policy { + STOP = 1, + UPGRADE_CORE_TO_DST = 2, + UPGRADE_PREFER_SRC_TO_DST = 3, + }; + + enum Oom { + SEND = 1, REPLY = 2, SELF = 4, + SRC_CORE_PD = ~0UL, SRC_PD_UNKNOWN = 0, + NO_NOTIFICATION = 0 + }; + + /** + * Implements policy on how to react on out of memory in kernel. + * + * Used solely inside core. On Genode core creates all the out + * of memory portals per EC. If the PD of a EC runs out of kernel + * memory it causes a OOM portal traversal, which is handled + * by the pager object of the causing thread. + * + * /param pd_sel PD selector from where to transfer kernel memory + * resources. The PD of this pager_object is the + * target PD. + * /param pd debug feature - string of PD (transfer_from) + * /param thread debug feature - string of EC (transfer_from) + */ + uint8_t handle_oom(addr_t pd_sel = SRC_CORE_PD, + const char * pd = "core", + const char * thread = "unknown", + Policy = Policy::UPGRADE_CORE_TO_DST); + static uint8_t handle_oom(addr_t pd_from, addr_t pd_to, + char const * src_pd, + char const * src_thread, + Policy policy, + addr_t sm_notify = NO_NOTIFICATION, + char const * dst_pd = "unknown", + char const * dst_thread = "unknown"); + + void print(Output &out) const; +}; + + +/** + * Paging entry point + * + * For a paging entry point can hold only one activation. So, paging is + * strictly serialized for one entry point. + */ +class Core::Pager_entrypoint : public Object_pool +{ + public: + + /** + * Constructor + * + * \param cap_factory factory for creating capabilities + * for the pager objects managed by this + * entry point + */ + Pager_entrypoint(Rpc_cap_factory &cap_factory); + + /** + * Associate Pager_object with the entry point + */ + Pager_capability manage(Pager_object &) { + return Pager_capability(); } + + /** + * Dissolve Pager_object from entry point + */ + void dissolve(Pager_object &obj); +}; + +#endif /* _CORE__INCLUDE__PAGER_H_ */ diff --git a/repos/base-tukija/src/core/include/platform.h b/repos/base-tukija/src/core/include/platform.h new file mode 100644 index 0000000000..7674a6ec68 --- /dev/null +++ b/repos/base-tukija/src/core/include/platform.h @@ -0,0 +1,145 @@ +/* + * \brief Platform interface + * \author Norman Feske + * \author Alexander Boettcher + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2022 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__PLATFORM_H_ +#define _CORE__INCLUDE__PLATFORM_H_ + +/* core includes */ +#include +#include +#include + +namespace Core { class Platform; } + + +class Core::Platform : public Platform_generic +{ + public: + + enum { MAX_SUPPORTED_CPUS = 64}; + + private: + + Core_mem_allocator _core_mem_alloc { }; /* core-accessible memory */ + Phys_allocator _io_mem_alloc; /* MMIO allocator */ + Phys_allocator _io_port_alloc; /* I/O port allocator */ + Phys_allocator _irq_alloc; /* IRQ allocator */ + Rom_fs _rom_fs { }; /* ROM file system */ + unsigned _gsi_base_sel { 0 }; /* cap selector of 1st IRQ */ + unsigned _core_pd_sel { 0 }; /* cap selector of root PD */ + addr_t _core_phys_start { 0 }; + + /** + * Virtual address range usable by non-core processes + */ + const addr_t _vm_base; + size_t _vm_size; + + /* available CPUs */ + Affinity::Space _cpus; + + /* map of virtual cpu ids in Genode to kernel cpu ids */ + uint16_t map_cpu_ids[MAX_SUPPORTED_CPUS]; + + addr_t _map_pages(addr_t phys_page, addr_t pages, + bool guard_page = false); + + size_t _max_caps = 0; + + void _init_rom_modules(); + + addr_t _rom_module_phys(addr_t virt); + + public: + + /** + * Constructor + */ + Platform(); + + + /******************************** + ** Generic platform interface ** + ********************************/ + + Range_allocator &ram_alloc() override { return _core_mem_alloc.phys_alloc(); } + Range_allocator &io_mem_alloc() override { return _io_mem_alloc; } + Range_allocator &io_port_alloc() override { return _io_port_alloc; } + Range_allocator &irq_alloc() override { return _irq_alloc; } + Range_allocator ®ion_alloc() override { return _core_mem_alloc.virt_alloc(); } + Range_allocator &core_mem_alloc() override { return _core_mem_alloc; } + addr_t vm_start() const override { return _vm_base; } + size_t vm_size() const override { return _vm_size; } + Rom_fs &rom_fs() override { return _rom_fs; } + size_t max_caps() const override { return _max_caps; } + void wait_for_exit() override; + + bool supports_direct_unmap() const override { return true; } + + Address_space &core_pd() { ASSERT_NEVER_CALLED; } + + Affinity::Space affinity_space() const override { return _cpus; } + + + /******************* + ** NOVA specific ** + *******************/ + + /** + * Return capability selector of first global system interrupt + */ + int gsi_base_sel() const { return _gsi_base_sel; } + + /** + * Determine size of a core local mapping required for a + * core_rm_session detach(). + */ + size_t region_alloc_size_at(void * addr) + { + using Size_at_error = Allocator_avl::Size_at_error; + + return (_core_mem_alloc.virt_alloc())()->size_at(addr).convert( + [ ] (size_t s) { return s; }, + [ ] (Size_at_error) { return 0U; }); + } + + /** + * Return kernel CPU ID for given Genode CPU + */ + unsigned pager_index(Affinity::Location location) const; + unsigned kernel_cpu_id(Affinity::Location location) const; + + Affinity::Location sanitize(Affinity::Location location) { + return Affinity::Location(location.xpos() % _cpus.width(), + location.ypos() % _cpus.height(), + location.width(), location.height()); + } + + /** + * PD kernel capability selector of core + */ + unsigned core_pd_sel() const { return _core_pd_sel; } + + void for_each_location(auto const &fn) + { + for (unsigned x = 0; x < _cpus.width(); x++) { + for (unsigned y = 0; y < _cpus.height(); y++) { + Affinity::Location location(x, y, 1, 1); + fn(location); + } + } + } +}; + +#endif /* _CORE__INCLUDE__PLATFORM_H_ */ diff --git a/repos/base-tukija/src/core/include/platform_pd.h b/repos/base-tukija/src/core/include/platform_pd.h new file mode 100644 index 0000000000..cb4ee59cc7 --- /dev/null +++ b/repos/base-tukija/src/core/include/platform_pd.h @@ -0,0 +1,88 @@ +/* + * \brief Protection-domain facility + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__PLATFORM_PD_H_ +#define _CORE__INCLUDE__PLATFORM_PD_H_ + +#include +#include +#include + +namespace Core { + + class Platform_thread; + class Platform_pd; +} + + +class Core::Platform_pd : public Address_space +{ + private: + + Native_capability _parent { }; + addr_t const _pd_sel; + const char * _label; + + /* + * Noncopyable + */ + Platform_pd(Platform_pd const &); + Platform_pd &operator = (Platform_pd const &); + + public: + + bool has_any_threads = false; + + /** + * Constructors + */ + Platform_pd(Allocator &md_alloc, char const *, + signed pd_id = -1, bool create = true); + + /** + * Destructor + */ + ~Platform_pd(); + + /** + * Assign parent interface to protection domain + */ + void assign_parent(Native_capability parent); + + /** + * Return portal capability selector for parent interface + */ + addr_t parent_pt_sel() { return _parent.local_name(); } + + /** + * Capability selector of this task. + * + * \return PD selector + */ + addr_t pd_sel() const { return _pd_sel; } + + /** + * Label of this protection domain + * + * \return name of this protection domain + */ + const char * name() const { return _label; } + + /***************************** + ** Address-space interface ** + *****************************/ + + void flush(addr_t, size_t, Core_local_addr) override; +}; + +#endif /* _CORE__INCLUDE__PLATFORM_PD_H_ */ diff --git a/repos/base-tukija/src/core/include/platform_thread.h b/repos/base-tukija/src/core/include/platform_thread.h new file mode 100644 index 0000000000..b2b00ab0bd --- /dev/null +++ b/repos/base-tukija/src/core/include/platform_thread.h @@ -0,0 +1,215 @@ +/* + * \brief Thread facility + * \author Norman Feske + * \author Alexander Boettcher + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__PLATFORM_THREAD_H_ +#define _CORE__INCLUDE__PLATFORM_THREAD_H_ + +/* Genode includes */ +#include +#include +#include +#include + +/* base-internal includes */ +#include + +/* core includes */ +#include +#include + +namespace Core { + + class Platform_pd; + class Platform_thread; +} + + +class Core::Platform_thread +{ + private: + + Platform_pd &_pd; + Pager_object *_pager; + addr_t _id_base; + addr_t _sel_exc_base; + Affinity::Location _location; + + enum { + MAIN_THREAD = 0x1U, + VCPU = 0x2U, + WORKER = 0x4U, + SC_CREATED = 0x8U, + REMOTE_PD = 0x10U, + }; + uint8_t _features; + uint8_t _priority; + + Stack::Name _name; + + addr_t _sel_ec() const { return _id_base; } + addr_t _sel_pt_oom() const { return _id_base + 1; } + addr_t _sel_sc() const { return _id_base + 2; } + + /* convenience function to access _feature variable */ + inline bool main_thread() const { return _features & MAIN_THREAD; } + inline bool vcpu() const { return _features & VCPU; } + inline bool worker() const { return _features & WORKER; } + inline bool sc_created() const { return _features & SC_CREATED; } + inline bool remote_pd() const { return _features & REMOTE_PD; } + + /* + * Noncopyable + */ + Platform_thread(Platform_thread const &); + Platform_thread &operator = (Platform_thread const &); + + /** + * Create OOM portal and delegate it + */ + bool _create_and_map_oom_portal(Tukija::Utcb &); + + public: + + /* mark as vcpu in remote pd if it is a vcpu */ + addr_t remote_vcpu() { + if (!vcpu()) + return Native_thread::INVALID_INDEX; + + _features |= Platform_thread::REMOTE_PD; + return _sel_exc_base; + } + + /** + * Constructor + */ + Platform_thread(Platform_pd &, Rpc_entrypoint &, Ram_allocator &, Region_map &, + size_t quota, char const *name, unsigned priority, + Affinity::Location affinity, addr_t utcb); + + /** + * Destructor + */ + ~Platform_thread(); + + /** + * Return true if thread creation succeeded + */ + bool valid() const { return true; } + + /** + * Start thread + * + * \param ip instruction pointer to start at + * \param sp stack pointer to use + */ + void start(void *ip, void *sp); + + /** + * Pause this thread + */ + void pause(); + + /** + * Enable/disable single stepping + */ + void single_step(bool); + + /** + * Resume this thread + */ + void resume(); + + /** + * Override thread state with 's' + */ + void state(Thread_state s); + + /** + * Read thread state + */ + Thread_state state(); + + /************************ + ** Accessor functions ** + ************************/ + + /** + * Set thread type and exception portal base + */ + void thread_type(Cpu_session::Native_cpu::Thread_type thread_type, + Cpu_session::Native_cpu::Exception_base exception_base); + + /** + * Set pager + */ + void pager(Pager_object &pager); + + /** + * Return pager object + */ + Pager_object &pager() + { + if (_pager) + return *_pager; + + ASSERT_NEVER_CALLED; + } + + /** + * Return identification of thread when faulting + */ + unsigned long pager_object_badge() { return (unsigned long)this; } + + /** + * Set the executing CPU for this thread + */ + void affinity(Affinity::Location location); + + /** + * Pager_object starts migration preparation and calls for + * finalization of the migration. + * The method delegates the new exception portals to + * the protection domain and set the new acknowledged location. + */ + void prepare_migration(); + void finalize_migration(Affinity::Location const location) { + _location = location; } + + /** + * Get the executing CPU for this thread + */ + Affinity::Location affinity() const { return _location; } + + /** + * Get thread name + */ + const char *name() const { return _name.string(); } + + /** + * Get pd name + */ + const char *pd_name() const; + + /** + * Set CPU quota of the thread to 'quota' + */ + void quota(size_t const) { /* not supported*/ } + + /** + * Return execution time consumed by the thread + */ + Trace::Execution_time execution_time() const; +}; + +#endif /* _CORE__INCLUDE__PLATFORM_THREAD_H_ */ diff --git a/repos/base-tukija/src/core/include/rpc_cap_factory.h b/repos/base-tukija/src/core/include/rpc_cap_factory.h new file mode 100644 index 0000000000..78b37267cd --- /dev/null +++ b/repos/base-tukija/src/core/include/rpc_cap_factory.h @@ -0,0 +1,71 @@ +/* + * \brief RPC capability factory + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__RPC_CAP_FACTORY_H_ +#define _CORE__INCLUDE__RPC_CAP_FACTORY_H_ + +/* Genode includes */ +#include +#include +#include +#include + +/* core includes */ +#include + +namespace Core { class Rpc_cap_factory; } + + +class Core::Rpc_cap_factory +{ + private: + + struct Cap_object : List::Element + { + addr_t _cap_sel; + + Cap_object(addr_t cap_sel) : _cap_sel(cap_sel) {} + }; + + enum { SBS = 960*sizeof(long) }; + uint8_t _initial_sb[SBS]; + + Tslab _slab; + List _list { }; + Mutex _mutex { }; + + public: + + Rpc_cap_factory(Allocator &md_alloc); + + ~Rpc_cap_factory(); + + /** + * Allocate RPC capability + * + * \throw Allocator::Out_of_memory + * + * This function is invoked via Native_pd::alloc_rpc_cap. + */ + Native_capability alloc(Native_capability ep, addr_t entry, addr_t mtd); + + Native_capability alloc(Native_capability) + { + warning("unexpected call to non-implemented Rpc_cap_factory::alloc"); + return Native_capability(); + } + + void free(Native_capability cap); +}; + +#endif /* _CORE__INCLUDE__RPC_CAP_FACTORY_H_ */ diff --git a/repos/base-tukija/src/core/include/signal_broker.h b/repos/base-tukija/src/core/include/signal_broker.h new file mode 100644 index 0000000000..1edbc303ce --- /dev/null +++ b/repos/base-tukija/src/core/include/signal_broker.h @@ -0,0 +1,139 @@ +/* + * \brief NOVA-specific signal-delivery mechanism + * \author Norman Feske + * \date 2016-01-04 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__SIGNAL_BROKER_H_ +#define _CORE__INCLUDE__SIGNAL_BROKER_H_ + +/* Genode includes */ +#include + +/* NOVA includes */ +#include + +/* core includes */ +#include +#include +#include + +namespace Core { class Signal_broker; } + + +class Core::Signal_broker +{ + private: + + Allocator &_md_alloc; + Rpc_entrypoint &_source_ep; + Object_pool _obj_pool { }; + Rpc_entrypoint &_context_ep; + Signal_source_component _source; + Capability _source_cap; + Signal_context_slab _context_slab { _md_alloc }; + + public: + + class Invalid_signal_source : public Exception { }; + + Signal_broker(Allocator &md_alloc, + Rpc_entrypoint &source_ep, + Rpc_entrypoint &context_ep) + : + _md_alloc(md_alloc), + _source_ep(source_ep), + _context_ep(context_ep), + _source(&_context_ep), + _source_cap(_source_ep.manage(&_source)) + { } + + ~Signal_broker() + { + /* remove source from entrypoint */ + _source_ep.dissolve(&_source); + + /* free all signal contexts */ + while (Signal_context_component *r = _context_slab.any_signal_context()) + free_context(reinterpret_cap_cast(r->cap())); + } + + Capability alloc_signal_source() { return _source_cap; } + + void free_signal_source(Capability) { } + + /* + * \throw Allocator::Out_of_memory + */ + Signal_context_capability + alloc_context(Capability, unsigned long imprint) + { + /* + * XXX For now, we ignore the signal-source argument as we + * create only a single receiver for each PD. + */ + + Native_capability sm = _source.blocking_semaphore(); + + if (!sm.valid()) { + warning("signal receiver sm is not valid"); + for (;;); + return Signal_context_capability(); + } + + Native_capability si = Capability_space::import(cap_map().insert()); + Signal_context_capability cap = reinterpret_cap_cast(si); + + uint8_t res = Tukija::create_si(cap.local_name(), + platform_specific().core_pd_sel(), + imprint, sm.local_name()); + if (res != Tukija::NOVA_OK) { + warning("creating signal failed - error ", res); + return Signal_context_capability(); + } + + /* the _contexts_slab may throw Allocator::Out_of_memory */ + _obj_pool.insert(new (&_context_slab) Signal_context_component(cap)); + + /* return unique capability for the signal context */ + return cap; + } + + void free_context(Signal_context_capability context_cap) + { + Signal_context_component *context; + auto lambda = [&] (Signal_context_component *c) { + context = c; + if (context) _obj_pool.remove(context); + }; + _obj_pool.apply(context_cap, lambda); + + if (!context) { + warning(this, " - specified signal-context capability has wrong type ", + Hex(context_cap.local_name())); + return; + } + destroy(&_context_slab, context); + + Tukija::revoke(Tukija::Obj_crd(context_cap.local_name(), 0)); + cap_map().remove(context_cap.local_name(), 0); + } + + void submit(Signal_context_capability, unsigned) + { + /* + * On NOVA, signals are submitted directly to the kernel, not + * by using core as a proxy. + */ + ASSERT_NEVER_CALLED; + } +}; + +#endif /* _CORE__INCLUDE__SIGNAL_BROKER_H_ */ diff --git a/repos/base-tukija/src/core/include/signal_source_component.h b/repos/base-tukija/src/core/include/signal_source_component.h new file mode 100644 index 0000000000..65f9042241 --- /dev/null +++ b/repos/base-tukija/src/core/include/signal_source_component.h @@ -0,0 +1,63 @@ +/* + * \brief Signal-delivery mechanism + * \author Norman Feske + * \date 2009-08-05 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__SIGNAL_SOURCE_COMPONENT_H_ +#define _CORE__INCLUDE__SIGNAL_SOURCE_COMPONENT_H_ + +/* Genode includes */ +#include + +/* core includes */ +#include +#include + +namespace Core { + + class Signal_context_component; + class Signal_source_component; +} + + +struct Core::Signal_context_component : Object_pool::Entry +{ + Signal_context_component(Signal_context_capability cap) + : Object_pool::Entry(cap) { } + + Signal_source_component *source() { ASSERT_NEVER_CALLED; } +}; + + +class Core::Signal_source_component : public Rpc_object +{ + private: + + Native_capability _blocking_semaphore { }; + + public: + + Signal_source_component(Rpc_entrypoint *) { } + + void register_semaphore(Native_capability const &cap) + { + _blocking_semaphore = cap; + } + + Native_capability blocking_semaphore() const { return _blocking_semaphore; } + + Signal wait_for_signal() override { /* unused on NOVA */ return Signal(0, 0); } + + void submit(Signal_context_component *, unsigned long) { /* unused on NOVA */ } +}; + +#endif /* _CORE__INCLUDE__SIGNAL_SOURCE_COMPONENT_H_ */ diff --git a/repos/base-tukija/src/core/include/spec/x86_32/nova_msr.h b/repos/base-tukija/src/core/include/spec/x86_32/nova_msr.h new file mode 100644 index 0000000000..35e7768230 --- /dev/null +++ b/repos/base-tukija/src/core/include/spec/x86_32/nova_msr.h @@ -0,0 +1,26 @@ +/* + * \brief Guarded MSR access on NOVA + * \author Alexander Boettcher + * \date 2023-10-03 + */ + +/* + * Copyright (C) 2023 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__SPEC_X86_32__NOVA_MSR_H_ +#define _CORE__INCLUDE__SPEC_X86_32__NOVA_MSR_H_ + +#include + +static Genode::Pd_session::Managing_system_state msr_access(Genode::Pd_session::Managing_system_state const &, + Nova::Utcb &, + Genode::addr_t const) +{ + return { }; /* not supported for now on x86_32 */ +} + +#endif /* _CORE__INCLUDE__SPEC_X86_32__NOVA_MSR_H_ */ diff --git a/repos/base-tukija/src/core/include/spec/x86_64/nova_msr.h b/repos/base-tukija/src/core/include/spec/x86_64/nova_msr.h new file mode 100644 index 0000000000..075a1396a7 --- /dev/null +++ b/repos/base-tukija/src/core/include/spec/x86_64/nova_msr.h @@ -0,0 +1,55 @@ +/* + * \brief Guarded MSR access on NOVA + * \author Alexander Boettcher + * \date 2023-10-03 + */ + +/* + * Copyright (C) 2023 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__SPEC_X86_64__NOVA_MSR_H_ +#define _CORE__INCLUDE__SPEC_X86_64__NOVA_MSR_H_ + +#include + +static Genode::Pd_session::Managing_system_state msr_access(Genode::Pd_session::Managing_system_state const &state, + Tukija::Utcb &utcb, + Genode::addr_t const msr_cap) +{ + Genode::Pd_session::Managing_system_state result { }; + + utcb.set_msg_word(state.ip); /* count */ + utcb.msg()[0] = state.r8; + utcb.msg()[1] = state.r9; + utcb.msg()[2] = state.r10; + utcb.msg()[3] = state.r11; + utcb.msg()[4] = state.r12; + utcb.msg()[5] = state.r13; + utcb.msg()[6] = state.r14; + utcb.msg()[7] = state.r15; + + auto const res = Tukija::ec_ctrl(Tukija::Ec_op::EC_MSR_ACCESS, msr_cap); + + result.trapno = (res == Tukija::NOVA_OK) ? 1 : 0; + + if (res != Tukija::NOVA_OK) + return result; + + result.ip = utcb.msg_words(); /* bitmap about valid returned words */ + result.r8 = utcb.msg()[0]; + result.r9 = utcb.msg()[1]; + result.r10 = utcb.msg()[2]; + result.r11 = utcb.msg()[3]; + result.r12 = utcb.msg()[4]; + result.r13 = utcb.msg()[5]; + result.r14 = utcb.msg()[6]; + result.r15 = utcb.msg()[7]; + + return result; +} + +#endif /* _CORE__INCLUDE__SPEC_X86_64__NOVA_MSR_H_ */ diff --git a/repos/base-tukija/src/core/include/util.h b/repos/base-tukija/src/core/include/util.h new file mode 100644 index 0000000000..a0a5cd49b1 --- /dev/null +++ b/repos/base-tukija/src/core/include/util.h @@ -0,0 +1,61 @@ +/* + * \brief Core-internal utilities + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__INCLUDE__UTIL_H_ +#define _CORE__INCLUDE__UTIL_H_ + +/* Genode includes */ +#include + +/* base-internal includes */ +#include +#include + +namespace Core { + + constexpr size_t get_super_page_size_log2() { return 22; } + constexpr size_t get_super_page_size() { return 1 << get_super_page_size_log2(); } + + template + inline T trunc_page(T addr) { return addr & _align_mask(size_t(get_page_size_log2())); } + + template + inline T round_page(T addr) { return trunc_page(addr + get_page_size() - 1); } + + inline addr_t map_src_addr(addr_t /* core_local */, addr_t phys) { return phys; } + + inline Log2 kernel_constrained_map_size(Log2 size) + { + /* Tukija::Mem_crd order has 5 bits available and is in 4K page units */ + return { min(size.log2, uint8_t((1 << 5) - 1 + 12)) }; + } + + inline unsigned scale_priority(unsigned const prio, char const * name) + { + using Tukija::Qpd; + unsigned priority = Cpu_session::scale_priority(Qpd::DEFAULT_PRIORITY, + prio); + if (priority == 0) { + warning("priority of thread '", name, "' below minimum - boost to 1"); + priority = 1; + } + if (priority > Tukija::Qpd::DEFAULT_PRIORITY) { + warning("priority of thread '", name, "' above maximum - limit to ", + (unsigned)Qpd::DEFAULT_PRIORITY); + priority = Qpd::DEFAULT_PRIORITY; + } + return priority; + } +} + +#endif /* _CORE__INCLUDE__UTIL_H_ */ diff --git a/repos/base-tukija/src/core/include/vm_session_component.h b/repos/base-tukija/src/core/include/vm_session_component.h new file mode 100644 index 0000000000..c2eb55a54a --- /dev/null +++ b/repos/base-tukija/src/core/include/vm_session_component.h @@ -0,0 +1,185 @@ +/* + * \brief Core-specific instance of the VM session interface + * \author Alexander Boettcher + * \author Christian Helmuth + * \date 2018-08-26 + */ + +/* + * Copyright (C) 2018-2021 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _CORE__VM_SESSION_COMPONENT_H_ +#define _CORE__VM_SESSION_COMPONENT_H_ + +/* Genode includes */ +#include +#include +#include +#include +#include + +/* core includes */ +#include +#include + +namespace Core { class Vm_session_component; } + + +class Core::Vm_session_component +: + private Ram_quota_guard, + private Cap_quota_guard, + public Rpc_object, + private Region_map_detach +{ + private: + + using Con_ram_allocator = Constrained_ram_allocator; + using Avl_region = Allocator_avl_tpl; + + class Vcpu : public Rpc_object, + public Trace::Source::Info_accessor + { + public: + + struct Creation_failed { }; + + private: + + Rpc_entrypoint &_ep; + Constrained_ram_allocator &_ram_alloc; + Cap_quota_guard &_cap_alloc; + Trace::Source_registry &_trace_sources; + addr_t _sel_sm_ec_sc; + bool _alive { false }; + unsigned const _id; + Affinity::Location const _location; + unsigned const _priority; + Session_label const &_label; + addr_t const _pd_sel; + + struct Trace_control_slot + { + unsigned index = 0; + Trace::Control_area &_trace_control_area; + + Trace_control_slot(Trace::Control_area &trace_control_area) + : _trace_control_area(trace_control_area) + { + if (!_trace_control_area.alloc(index)) + throw Out_of_ram(); + } + + ~Trace_control_slot() + { + _trace_control_area.free(index); + } + + Trace::Control &control() + { + return *_trace_control_area.at(index); + } + }; + + Trace_control_slot _trace_control_slot; + + Trace::Source _trace_source { *this, _trace_control_slot.control() }; + + public: + + Vcpu(Rpc_entrypoint &, + Constrained_ram_allocator &ram_alloc, + Cap_quota_guard &cap_alloc, + unsigned id, + unsigned kernel_id, + Affinity::Location, + unsigned priority, + Session_label const &, + addr_t pd_sel, + addr_t core_pd_sel, + addr_t vmm_pd_sel, + Trace::Control_area &, + Trace::Source_registry &); + + ~Vcpu(); + + addr_t sm_sel() const { return _sel_sm_ec_sc + 0; } + addr_t ec_sel() const { return _sel_sm_ec_sc + 1; } + addr_t sc_sel() const { return _sel_sm_ec_sc + 2; } + + /******************************* + ** Native_vcpu RPC interface ** + *******************************/ + + Capability state(); + void startup(); + void exit_handler(unsigned, Signal_context_capability); + + /******************************************** + ** Trace::Source::Info_accessor interface ** + ********************************************/ + + Trace::Source::Info trace_source_info() const override; + }; + + Rpc_entrypoint &_ep; + Trace::Control_area _trace_control_area; + Trace::Source_registry &_trace_sources; + Con_ram_allocator _constrained_md_ram_alloc; + Sliced_heap _heap; + Avl_region _map { &_heap }; + addr_t _pd_sel { 0 }; + unsigned _next_vcpu_id { 0 }; + unsigned _priority; + Session_label const _session_label; + + Registry> _vcpus { }; + + /* helpers for vm_session_common.cc */ + void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr); + void _detach_vm_memory(addr_t, size_t); + void _with_region(addr_t, auto const &); + + protected: + + Ram_quota_guard &_ram_quota_guard() { return *this; } + Cap_quota_guard &_cap_quota_guard() { return *this; } + + public: + + using Ram_quota_guard::upgrade; + using Cap_quota_guard::upgrade; + + Vm_session_component(Rpc_entrypoint &, Resources, Label const &, + Diag, Ram_allocator &ram, Region_map &, unsigned, + Trace::Source_registry &); + ~Vm_session_component(); + + + /********************************* + ** Region_map_detach interface ** + *********************************/ + + /* used on destruction of attached dataspaces */ + void detach_at(addr_t) override; + void unmap_region(addr_t, size_t) override; + void reserve_and_flush(addr_t) override; + + + /************************** + ** Vm session interface ** + **************************/ + + Capability create_vcpu(Thread_capability) override; + void attach_pic(addr_t) override { /* unused on NOVA */ } + + void attach(Dataspace_capability, addr_t, Attach_attr) override; + void detach(addr_t, size_t) override; + +}; + +#endif /* _CORE__VM_SESSION_COMPONENT_H_ */ diff --git a/repos/base-tukija/src/core/io_mem_session_support.cc b/repos/base-tukija/src/core/io_mem_session_support.cc new file mode 100644 index 0000000000..d249d0368e --- /dev/null +++ b/repos/base-tukija/src/core/io_mem_session_support.cc @@ -0,0 +1,29 @@ +/* + * \brief Implementation of the IO_MEM session interface + * \author Norman Feske + * \author Sebastian Sumpf + * \date 2009-03-29 + * + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include + +using namespace Core; + + +void Io_mem_session_component::_unmap_local(addr_t, size_t, addr_t) { } + +Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(addr_t, size_t) +{ + return { .core_local_addr = 0, .success = true }; +} diff --git a/repos/base-tukija/src/core/ipc_pager.cc b/repos/base-tukija/src/core/ipc_pager.cc new file mode 100644 index 0000000000..7ebc5f8b4d --- /dev/null +++ b/repos/base-tukija/src/core/ipc_pager.cc @@ -0,0 +1,81 @@ +/* + * \brief Low-level page-fault handling + * \author Norman Feske + * \date 2010-01-25 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* core includes */ +#include + +/* NOVA includes */ +#include + +using namespace Core; + + +void Mapping::prepare_map_operation() const { } + + +Ipc_pager::Ipc_pager(Tukija::Utcb &utcb, addr_t pd_dst, addr_t pd_core) +: + _pd_dst(pd_dst), + _pd_core(pd_core), + _fault_ip(utcb.ip), + _fault_addr(utcb.pf_addr()), + _sp(utcb.sp), + _fault_type(utcb.pf_type()), + _syscall_res(Tukija::NOVA_OK), + _normal_ipc((((addr_t)&utcb.qual[2] - (addr_t)utcb.msg()) / sizeof(addr_t)) + == utcb.msg_words()) +{ + + /* + * When this function is called from the page-fault handler EC, a page + * fault already occurred. So we never wait but immediately read the + * page-fault information from our UTCB. + */ +} + + +void Ipc_pager::set_reply_mapping(Mapping const mapping) +{ + Tukija::Utcb &utcb = *(Tukija::Utcb *)Thread::myself()->utcb(); + + utcb.set_msg_word(0); + + bool res = utcb.append_item(nova_src_crd(mapping), 0, true, false, false, + mapping.dma_buffer, mapping.write_combined); + + /* one item always fits in the UTCB */ + (void)res; + + /* asynchronously map memory */ + _syscall_res = Tukija::delegate(_pd_core, _pd_dst, nova_dst_crd(mapping)); +} + + +void Ipc_pager::reply_and_wait_for_fault(addr_t sm) +{ + Thread &myself = *Thread::myself(); + Tukija::Utcb &utcb = *reinterpret_cast(myself.utcb()); + + utcb.mtd = 0; + + /* + * If it was a normal IPC and the mapping failed, caller may re-try. + * Otherwise nothing left to be delegated - done asynchronously beforehand. + */ + utcb.set_msg_word((_normal_ipc && _syscall_res != Tukija::NOVA_OK) ? 1 : 0); + + Tukija::reply(myself.stack_top(), sm); +} diff --git a/repos/base-tukija/src/core/irq_session_component.cc b/repos/base-tukija/src/core/irq_session_component.cc new file mode 100644 index 0000000000..02a9b82779 --- /dev/null +++ b/repos/base-tukija/src/core/irq_session_component.cc @@ -0,0 +1,278 @@ +/* + * \brief Implementation of IRQ session component + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include + +/* NOVA includes */ +#include +#include + +using namespace Core; + + +static bool irq_ctrl(addr_t irq_sel, addr_t &msi_addr, addr_t &msi_data, + addr_t sig_sel, Tukija::Gsi_flags flags, addr_t virt_addr) +{ + /* assign IRQ to CPU && request msi data to be used by driver */ + uint8_t res = Tukija::assign_gsi(irq_sel, virt_addr, boot_cpu(), + msi_addr, msi_data, sig_sel, flags); + + if (res != Tukija::NOVA_OK) + error("setting up MSI failed - error ", res); + + /* nova syscall interface specifies msi addr/data to be 32bit */ + msi_addr = msi_addr & ~0U; + msi_data = msi_data & ~0U; + + return res == Tukija::NOVA_OK; +} + + +static bool associate_gsi(addr_t irq_sel, Signal_context_capability sig_cap, + Tukija::Gsi_flags gsi_flags) +{ + addr_t dummy1 = 0, dummy2 = 0; + + return irq_ctrl(irq_sel, dummy1, dummy2, sig_cap.local_name(), gsi_flags, 0); +} + + +static void deassociate(addr_t irq_sel) +{ + addr_t dummy1 = 0, dummy2 = 0; + + if (!irq_ctrl(irq_sel, dummy1, dummy2, irq_sel, Tukija::Gsi_flags(), 0)) + warning("Irq could not be de-associated"); +} + + +static bool associate_msi(addr_t irq_sel, addr_t phys_mem, addr_t &msi_addr, + addr_t &msi_data, Signal_context_capability sig_cap) +{ + if (!phys_mem) + return irq_ctrl(irq_sel, msi_addr, msi_data, sig_cap.local_name(), Tukija::Gsi_flags(), 0); + + return platform().region_alloc().alloc_aligned(4096, 12).convert( + + [&] (void *virt_ptr) { + + addr_t const virt_addr = reinterpret_cast(virt_ptr); + + using Tukija::Rights; + using Tukija::Utcb; + + Tukija::Mem_crd phys_crd(phys_mem >> 12, 0, Rights(true, false, false)); + Tukija::Mem_crd virt_crd(virt_addr >> 12, 0, Rights(true, false, false)); + + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + + if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) { + platform().region_alloc().free(virt_ptr, 4096); + return false; + } + + /* try to assign MSI to device */ + bool res = irq_ctrl(irq_sel, msi_addr, msi_data, sig_cap.local_name(), Tukija::Gsi_flags(), virt_addr); + + unmap_local(Tukija::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true))); + platform().region_alloc().free(virt_ptr, 4096); + + return res; + }, + [&] (Range_allocator::Alloc_error) { + return false; + }); +} + + +void Irq_object::sigh(Signal_context_capability cap) +{ + if (!_sigh_cap.valid() && !cap.valid()) + return; + + if (_sigh_cap.valid() && _sigh_cap == cap) { + /* avoid useless overhead, e.g. with IOMMUs enabled */ + return; + } + + if ((_sigh_cap.valid() && !cap.valid())) { + deassociate(irq_sel()); + _sigh_cap = Signal_context_capability(); + return; + } + + /* associate GSI or MSI to device belonging to device_phys */ + bool ok = false; + if (_device_phys || (_msi_addr && _msi_data)) + ok = associate_msi(irq_sel(), _device_phys, _msi_addr, _msi_data, cap); + else + ok = associate_gsi(irq_sel(), cap, _gsi_flags); + + if (!ok) { + deassociate(irq_sel()); + _sigh_cap = Signal_context_capability(); + return; + } + + _sigh_cap = cap; +} + + +void Irq_object::ack_irq() +{ + if (Tukija::NOVA_OK != Tukija::sm_ctrl(irq_sel(), Tukija::SEMAPHORE_DOWN)) + error("Unmasking irq of selector ", irq_sel(), " failed"); +} + + +void Irq_object::start(unsigned irq, addr_t const device_phys, Irq_args const &irq_args) +{ + /* map IRQ SM cap from kernel to core at irq_sel selector */ + using Tukija::Obj_crd; + + Obj_crd src(platform_specific().gsi_base_sel() + irq, 0); + Obj_crd dst(irq_sel(), 0); + enum { MAP_FROM_KERNEL_TO_CORE = true }; + + int ret = map_local(platform_specific().core_pd_sel(), + *(Tukija::Utcb *)Thread::myself()->utcb(), + src, dst, MAP_FROM_KERNEL_TO_CORE); + if (ret) { + error("getting IRQ from kernel failed - ", irq); + throw Service_denied(); + } + + /* initialize GSI IRQ flags */ + auto gsi_flags = [] (Irq_args const &args) { + if (args.trigger() == Irq_session::TRIGGER_UNCHANGED + || args.polarity() == Irq_session::POLARITY_UNCHANGED) + return Tukija::Gsi_flags(); + + if (args.trigger() == Irq_session::TRIGGER_EDGE) + return Tukija::Gsi_flags(Tukija::Gsi_flags::EDGE); + + if (args.polarity() == Irq_session::POLARITY_HIGH) + return Tukija::Gsi_flags(Tukija::Gsi_flags::HIGH); + else + return Tukija::Gsi_flags(Tukija::Gsi_flags::LOW); + }; + + _gsi_flags = gsi_flags(irq_args); + + /* associate GSI or MSI to device belonging to device_phys */ + bool ok = false; + if (irq_args.type() == Irq_session::TYPE_LEGACY) + ok = associate_gsi(irq_sel(), _sigh_cap, _gsi_flags); + else + ok = associate_msi(irq_sel(), device_phys, _msi_addr, _msi_data, _sigh_cap); + + if (!ok) + throw Service_denied(); + + _device_phys = device_phys; +} + + +Irq_object::Irq_object() +: + _kernel_caps(cap_map().insert(KERNEL_CAP_COUNT_LOG2)), + _msi_addr(0UL), _msi_data(0UL) +{ } + + +Irq_object::~Irq_object() +{ + if (_sigh_cap.valid()) + deassociate(irq_sel()); + + /* revoke IRQ SM */ + Tukija::revoke(Tukija::Obj_crd(_kernel_caps, KERNEL_CAP_COUNT_LOG2)); + enum { NO_REVOKE_REQUIRED = false }; + cap_map().remove(_kernel_caps, KERNEL_CAP_COUNT_LOG2, NO_REVOKE_REQUIRED); +} + + +/*************************** + ** IRQ session component ** + ***************************/ + + +Irq_session_component::Irq_session_component(Range_allocator &irq_alloc, + const char *args) +: + _irq_number(~0U), _irq_alloc(irq_alloc), _irq_object() +{ + Irq_args const irq_args(args); + + long irq_number = irq_args.irq_number(); + if (irq_args.type() != Irq_session::TYPE_LEGACY) { + + if ((unsigned long)irq_number >= kernel_hip().sel_gsi) + throw Service_denied(); + + irq_number = kernel_hip().sel_gsi - 1 - irq_number; + /* XXX last GSI number unknown - assume 40 GSIs (depends on IO-APIC) */ + if (irq_number < 40) + throw Service_denied(); + } + + if (irq_alloc.alloc_addr(1, irq_number).failed()) { + error("unavailable IRQ ", irq_number, " requested"); + throw Service_denied(); + } + + _irq_number = (unsigned)irq_number; + + long device_phys = Arg_string::find_arg(args, "device_config_phys").long_value(0); + _irq_object.start(_irq_number, device_phys, irq_args); +} + + +Irq_session_component::~Irq_session_component() +{ + if (_irq_number == ~0U) + return; + + addr_t free_irq = _irq_number; + _irq_alloc.free((void *)free_irq); +} + + +void Irq_session_component::ack_irq() +{ + _irq_object.ack_irq(); +} + + +void Irq_session_component::sigh(Signal_context_capability cap) +{ + _irq_object.sigh(cap); +} + + +Irq_session::Info Irq_session_component::info() +{ + if (!_irq_object.msi_address() || !_irq_object.msi_value()) + return { .type = Info::Type::INVALID, .address = 0, .value = 0 }; + + return { + .type = Info::Type::MSI, + .address = _irq_object.msi_address(), + .value = _irq_object.msi_value() + }; +} diff --git a/repos/base-tukija/src/core/native_cpu_component.cc b/repos/base-tukija/src/core/native_cpu_component.cc new file mode 100644 index 0000000000..a809117365 --- /dev/null +++ b/repos/base-tukija/src/core/native_cpu_component.cc @@ -0,0 +1,46 @@ +/* + * \brief Core implementation of the CPU session interface extension + * \author Norman Feske + * \date 2016-04-21 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include + +using namespace Core; + + +void Native_cpu_component::thread_type(Thread_capability thread_cap, + Thread_type thread_type, + Exception_base exception_base) +{ + auto lambda = [&] (Cpu_thread_component *thread) { + if (!thread) + return; + + thread->platform_thread().thread_type(thread_type, exception_base); + }; + + _thread_ep.apply(thread_cap, lambda); +} + +Native_cpu_component::Native_cpu_component(Cpu_session_component &cpu_session, char const *) +: + _cpu_session(cpu_session), _thread_ep(_cpu_session._thread_ep) +{ + _thread_ep.manage(this); +} + + +Core::Native_cpu_component::~Native_cpu_component() +{ + _thread_ep.dissolve(this); +} diff --git a/repos/base-tukija/src/core/native_pd_component.cc b/repos/base-tukija/src/core/native_pd_component.cc new file mode 100644 index 0000000000..79eb442af8 --- /dev/null +++ b/repos/base-tukija/src/core/native_pd_component.cc @@ -0,0 +1,53 @@ +/* + * \brief Kernel-specific part of the PD-session interface + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include + +using namespace Core; + + +Native_capability Native_pd_component::alloc_rpc_cap(Native_capability ep, + addr_t entry, addr_t mtd) +{ + try { + _pd_session._consume_cap(Pd_session_component::RPC_CAP); + return _pd_session._rpc_cap_factory.alloc(ep, entry, mtd); } + + catch (Allocator::Out_of_memory) { throw Out_of_ram(); } +} + + +void Native_pd_component::imprint_rpc_cap(Native_capability cap, unsigned long badge) +{ + if (cap.valid()) + imprint_badge(cap.local_name(), badge); +} + + +Native_pd_component::Native_pd_component(Pd_session_component &pd, char const *) +: + _pd_session(pd) +{ + _pd_session._ep.manage(this); +} + + +Native_pd_component::~Native_pd_component() +{ + _pd_session._ep.dissolve(this); +} + + diff --git a/repos/base-tukija/src/core/pager.cc b/repos/base-tukija/src/core/pager.cc new file mode 100644 index 0000000000..e1ce660fdd --- /dev/null +++ b/repos/base-tukija/src/core/pager.cc @@ -0,0 +1,1016 @@ +/* + * \brief Pager framework + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2010-01-25 + */ + +/* + * Copyright (C) 2010-2020 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* base-internal includes */ +#include + +/* core includes */ +#include +#include +#include +#include +#include + +/* NOVA includes */ +#include +#include /* map_local */ +#include + +static bool verbose_oom = false; + +using namespace Core; +using namespace Tukija; + + +static Rpc_entrypoint *_core_ep_ptr; + +void Core::init_page_fault_handling(Rpc_entrypoint &ep) { _core_ep_ptr = &ep; } + + +/** + * Pager threads - one thread per CPU + */ +struct Pager_thread: public Thread +{ + Pager_thread(Affinity::Location location) + : Thread(Cpu_session::Weight::DEFAULT_WEIGHT, "pager", 2 * 4096, location) + { + /* creates local EC */ + Thread::start(); + + reinterpret_cast(Thread::utcb())->crd_xlt = Obj_crd(0, ~0UL); + } + + void entry() override { } +}; + +enum { PAGER_CPUS = Core::Platform::MAX_SUPPORTED_CPUS }; +static Constructible pager_threads[PAGER_CPUS]; + +static void with_pager_thread(Affinity::Location location, + Core::Platform &platform, auto const &fn) +{ + unsigned const pager_index = platform.pager_index(location); + unsigned const kernel_cpu_id = platform.kernel_cpu_id(location); + + if (kernel_hip().is_cpu_enabled(kernel_cpu_id) && + pager_index < PAGER_CPUS && pager_threads[pager_index].constructed()) { + + fn(*pager_threads[pager_index]); + return; + } + + warning("invalid CPU parameter used in pager object: ", + pager_index, "->", kernel_cpu_id, " location=", + location.xpos(), "x", location.ypos(), " ", + location.width(), "x", location.height()); +} + + +/** + * Utility for the formatted output of page-fault information + */ +struct Page_fault_info +{ + char const * const pd; + char const * const thread; + unsigned const cpu; + addr_t const ip, addr, sp; + uint8_t const pf_type; + + Page_fault_info(char const *pd, char const *thread, unsigned cpu, + addr_t ip, addr_t addr, addr_t sp, unsigned type) + : + pd(pd), thread(thread), cpu(cpu), ip(ip), addr(addr), + sp(sp), pf_type((uint8_t)type) + { } + + void print(Genode::Output &out) const + { + Genode::print(out, "pd='", pd, "' " + "thread='", thread, "' " + "cpu=", cpu, " " + "ip=", Hex(ip), " " + "address=", Hex(addr), " " + "stack pointer=", Hex(sp), " " + "qualifiers=", Hex(pf_type), " ", + pf_type & Ipc_pager::ERR_I ? "I" : "i", + pf_type & Ipc_pager::ERR_R ? "R" : "r", + pf_type & Ipc_pager::ERR_U ? "U" : "u", + pf_type & Ipc_pager::ERR_W ? "W" : "w", + pf_type & Ipc_pager::ERR_P ? "P" : "p"); + } +}; + + +void Pager_object::_page_fault_handler(Pager_object &obj) +{ + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + + Ipc_pager ipc_pager(utcb, obj.pd_sel(), platform_specific().core_pd_sel()); + + /* potential request to ask for EC cap or signal SM cap */ + if (utcb.msg_words() == 1) + _invoke_handler(obj); + + /* + * obj.pager() (pager thread) may issue a signal to the remote region + * handler thread which may respond via wake_up() (ep thread) before + * we are done here - we have to lock the whole page lookup procedure + */ + obj._state_lock.acquire(); + + obj._state.thread.cpu.ip = ipc_pager.fault_ip(); + obj._state.thread.cpu.sp = 0; + obj._state.thread.cpu.trapno = PT_SEL_PAGE_FAULT; + + obj._state.block(); + obj._state.block_pause_sm(); + + /* lookup fault address and decide what to do */ + unsigned error = (obj.pager(ipc_pager) == Pager_object::Pager_result::STOP); + + /* don't open receive window for pager threads */ + if (utcb.crd_rcv.value()) + nova_die(); + + if (!error && ipc_pager.syscall_result() != Tukija::NOVA_OK) { + /* something went wrong - by default don't answer the page fault */ + error = 4; + + /* dst pd has not enough kernel quota ? - try to recover */ + if (ipc_pager.syscall_result() == Tukija::NOVA_PD_OOM) { + uint8_t res = obj.handle_oom(); + if (res == Tukija::NOVA_PD_OOM) { + obj._state.unblock_pause_sm(); + obj._state.unblock(); + obj._state_lock.release(); + + /* block until revoke is due */ + ipc_pager.reply_and_wait_for_fault(obj.sel_sm_block_oom()); + } else if (res == Tukija::NOVA_OK) + /* succeeded to recover - continue normally */ + error = 0; + } + } + + /* good case - found a valid region which is mappable */ + if (!error) { + obj._state.unblock_pause_sm(); + obj._state.unblock(); + obj._state_lock.release(); + ipc_pager.reply_and_wait_for_fault(); + } + + char const * const client_thread = obj.client_thread(); + char const * const client_pd = obj.client_pd(); + + unsigned const cpu_id = platform_specific().pager_index(myself.affinity()); + + Page_fault_info const fault_info(client_pd, client_thread, cpu_id, + ipc_pager.fault_ip(), + ipc_pager.fault_addr(), + ipc_pager.sp(), + (uint8_t)ipc_pager.fault_type()); + obj._state_lock.release(); + + /* block the faulting thread until region manager is done */ + ipc_pager.reply_and_wait_for_fault(obj.sel_sm_block_pause()); +} + + +void Pager_object::exception(uint8_t exit_id) +{ + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + + if (exit_id > PT_SEL_PARENT) + nova_die(); + + addr_t const fault_ip = utcb.ip; + addr_t const fault_sp = utcb.sp; + addr_t const fault_bp = utcb.bp; + + uint8_t res = 0xFF; + addr_t mtd = 0; + + _state_lock.acquire(); + + /* remember exception type for Cpu_session::state() calls */ + _state.thread.cpu.trapno = exit_id; + + if (_exception_sigh.valid()) { + _state.submit_signal(); + res = _unsynchronized_client_recall(true); + } + + if (res != NOVA_OK) { + /* nobody handles this exception - so thread will be stopped finally */ + _state.mark_dead(); + + unsigned const cpu_id = platform_specific().pager_index(myself.affinity()); + + warning("unresolvable exception ", exit_id, ", " + "pd '", client_pd(), "', " + "thread '", client_thread(), "', " + "cpu ", cpu_id, ", " + "ip=", Hex(fault_ip), " " + "sp=", Hex(fault_sp), " " + "bp=", Hex(fault_bp), " ", + res == 0xFF ? "no signal handler" + : (res == NOVA_OK ? "" : "recall failed")); + + Tukija::revoke(Obj_crd(exc_pt_sel_client(), NUM_INITIAL_PT_LOG2)); + + enum { TRAP_BREAKPOINT = 3 }; + + if (exit_id == TRAP_BREAKPOINT) { + utcb.ip = fault_ip - 1; + mtd = Mtd::EIP; + } + } + + _state_lock.release(); + + utcb.set_msg_word(0); + utcb.mtd = mtd; + + reply(myself.stack_top()); +} + + +bool Pager_object::_migrate_thread() +{ + bool const valid_migrate = (_state.migrate() && _badge); + if (!valid_migrate) + return false; + + _state.reset_migrate(); + + try { + /* revoke all exception portals pointing to current pager */ + Platform_thread &thread = *reinterpret_cast(_badge); + + Tukija::revoke(Obj_crd(_selectors, 2)); + + /* revoke all exception portals selectors */ + Tukija::revoke(Obj_crd(exc_pt_sel_client()+0x00, 4)); + Tukija::revoke(Obj_crd(exc_pt_sel_client()+0x10, 3)); + Tukija::revoke(Obj_crd(exc_pt_sel_client()+0x18, 1)); + Tukija::revoke(Obj_crd(exc_pt_sel_client()+0x1f, 0)); + + /* re-create portals bound to pager on new target CPU */ + _location = _next_location; + _exceptions = Exception_handlers(*this); + _construct_pager(); + + /* map all exception portals to thread pd */ + thread.prepare_migration(); + + /* syscall to migrate */ + unsigned const migrate_to = platform_specific().kernel_cpu_id(_location); + uint8_t res = syscall_retry(*this, [&] { + return ec_ctrl(EC_MIGRATE, _state.sel_client_ec, migrate_to, + Obj_crd(EC_SEL_THREAD, 0, Obj_crd::RIGHT_EC_RECALL)); + }); + + if (res == Tukija::NOVA_OK) + thread.finalize_migration(_location); + + return true; + } catch (...) { + return false; + } +} + + +void Pager_object::_recall_handler(Pager_object &obj) +{ + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + + /* acquire mutex */ + obj._state_lock.acquire(); + + /* check for migration */ + if (obj._migrate_thread()) { + /* release mutex */ + obj._state_lock.release(); + + utcb.set_msg_word(0); + utcb.mtd = 0; + reply(myself.stack_top()); + } + + if (obj._state.modified) { + obj._copy_state_to_utcb(utcb); + obj._state.modified = false; + } else + utcb.mtd = 0; + + /* switch on/off single step */ + bool singlestep_state = obj._state.thread.cpu.eflags & 0x100UL; + if (obj._state.singlestep() && !singlestep_state) { + utcb.flags |= 0x100UL; + utcb.mtd |= Mtd::EFL; + } else if (!obj._state.singlestep() && singlestep_state) { + utcb.flags &= ~0x100UL; + utcb.mtd |= Mtd::EFL; + } + + /* deliver signal if it was requested */ + if (obj._state.to_submit()) + obj.submit_exception_signal(); + + /* block until Cpu_session()::resume() respectively wake_up() call */ + + unsigned long sm = 0; + + if (obj._state.blocked()) { + sm = obj.sel_sm_block_pause(); + obj._state.block_pause_sm(); + } + + obj._state_lock.release(); + + utcb.set_msg_word(0); + reply(myself.stack_top(), sm); +} + + +void Pager_object::_startup_handler(Pager_object &obj) +{ + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + + utcb.ip = obj._initial_eip; + utcb.sp = obj._initial_esp; + utcb.mtd = Mtd::EIP | Mtd::ESP; + + if (obj._state.singlestep()) { + utcb.flags = 0x100UL; + utcb.mtd |= Mtd::EFL; + } + + obj._state.unblock(); + + utcb.set_msg_word(0); + + reply(myself.stack_top()); +} + + +void Pager_object::_invoke_handler(Pager_object &obj) +{ + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + + /* receive window must be closed - otherwise implementation bug */ + if (utcb.crd_rcv.value()) + nova_die(); + + /* if protocol is violated ignore request */ + if (utcb.msg_words() != 1) { + utcb.mtd = 0; + utcb.set_msg_word(0); + reply(myself.stack_top()); + } + + addr_t const event = utcb.msg()[0]; + + /* check for translated pager portals - required for vCPU in remote PDs */ + if (utcb.msg_items() == 1 && utcb.msg_words() == 1 && event == 0xaffe) { + + Tukija::Utcb::Item const &item = *utcb.get_item(0); + Tukija::Crd const cap(item.crd); + + /* valid item which got translated ? */ + if (!cap.is_null() && !item.is_del() && _core_ep_ptr) { + _core_ep_ptr->apply(cap.base(), + [&] (Cpu_thread_component *source) { + if (!source) + return; + + Platform_thread &p = source->platform_thread(); + addr_t const sel_exc_base = p.remote_vcpu(); + if (sel_exc_base == Native_thread::INVALID_INDEX) + return; + + /* delegate VM-exit portals */ + map_vcpu_portals(p.pager(), sel_exc_base, sel_exc_base, + utcb, obj.pd_sel()); + + /* delegate portal to contact pager */ + map_pagefault_portal(obj, p.pager().exc_pt_sel_client(), + sel_exc_base, obj.pd_sel(), utcb); + }); + } + + utcb.mtd = 0; + utcb.set_msg_word(0); + reply(myself.stack_top()); + } + + utcb.mtd = 0; + utcb.set_msg_word(0); + + /* native ec cap requested */ + if (event == ~0UL) { + /** + * Return native EC cap with specific rights mask set. + * If the cap is mapped the kernel will demote the + * rights of the EC as specified by the rights mask. + * + * The cap is supposed to be returned to clients, + * which they have to use as argument to identify + * the thread to which they want attach portals. + * + * The demotion by the kernel during the map operation + * takes care that the EC cap itself contains + * no usable rights for the clients. + */ + bool res = utcb.append_item(Obj_crd(obj._state.sel_client_ec, 0, + Obj_crd::RIGHT_EC_RECALL), 0); + (void)res; + } + + /* semaphore for signaling thread is requested, reuse PT_SEL_STARTUP. */ + if (event == ~0UL - 1) { + /* create semaphore only once */ + if (!obj._state.has_signal_sm()) { + + revoke(Obj_crd(obj.exc_pt_sel_client() + PT_SEL_STARTUP, 0)); + + bool res = Tukija::create_sm(obj.exc_pt_sel_client() + PT_SEL_STARTUP, + platform_specific().core_pd_sel(), 0); + if (res != Tukija::NOVA_OK) + reply(myself.stack_top()); + + obj._state.mark_signal_sm(); + } + + bool res = utcb.append_item(Obj_crd(obj.exc_pt_sel_client() + + PT_SEL_STARTUP, 0), 0); + (void)res; + } + + reply(myself.stack_top()); +} + + +void Pager_object::wake_up() +{ + Mutex::Guard _state_lock_guard(_state_lock); + + if (!_state.blocked()) + return; + + _state.thread.state = Thread_state::State::VALID; + + _state.unblock(); + + if (_state.blocked_pause_sm()) { + + uint8_t res = sm_ctrl(sel_sm_block_pause(), SEMAPHORE_UP); + + if (res == NOVA_OK) + _state.unblock_pause_sm(); + else + warning("canceling blocked client failed (thread sm)"); + } +} + + +uint8_t Pager_object::client_recall(bool get_state_and_block) +{ + Mutex::Guard _state_lock_guard(_state_lock); + return _unsynchronized_client_recall(get_state_and_block); +} + + +uint8_t Pager_object::_unsynchronized_client_recall(bool get_state_and_block) +{ + enum { STATE_REQUESTED = 1UL, STATE_INVALID = ~0UL }; + + uint8_t res = ec_ctrl(EC_RECALL, _state.sel_client_ec, + get_state_and_block ? STATE_REQUESTED : STATE_INVALID); + + if (res != NOVA_OK) + return res; + + if (get_state_and_block) { + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + _copy_state_from_utcb(utcb); + _state.block(); + } + + return res; +} + + +void Pager_object::cleanup_call() +{ + /* revoke ec and sc cap of client before the sm cap */ + if (_state.sel_client_ec != Native_thread::INVALID_INDEX) + revoke(Obj_crd(_state.sel_client_ec, 2)); + + /* revoke all portals handling the client. */ + revoke(Obj_crd(exc_pt_sel_client(), NUM_INITIAL_PT_LOG2)); + + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + utcb.set_msg_word(0); + utcb.mtd = 0; + if (uint8_t res = call(sel_pt_cleanup())) + error(&utcb, " - cleanup call to pager failed res=", res); +} + + +void Pager_object::print(Output &out) const +{ + Platform_thread const * const faulter = reinterpret_cast(_badge); + Genode::print(out, "pager_object: pd='", + faulter ? faulter->pd_name() : "unknown", "' thread='", + faulter ? faulter->name() : "unknown", "'"); +} + + +static uint8_t create_portal(addr_t pt, addr_t pd, addr_t ec, Mtd mtd, + addr_t eip, Pager_object * oom_handler) +{ + uint8_t res = syscall_retry(*oom_handler, + [&] { return create_pt(pt, pd, ec, mtd, eip); }); + + if (res != NOVA_OK) + return res; + + addr_t const badge_localname = reinterpret_cast(oom_handler); + + res = pt_ctrl(pt, badge_localname); + if (res == NOVA_OK) + revoke(Obj_crd(pt, 0, Obj_crd::RIGHT_PT_CTRL)); + else + revoke(Obj_crd(pt, 0)); + + return res; +} + + +/************************ + ** Exception handlers ** + ************************/ + +template +void Exception_handlers::register_handler(Pager_object &obj, Mtd mtd, + void (* __attribute__((regparm(1))) func)(Pager_object &)) +{ + uint8_t res = !Tukija::NOVA_OK; + with_pager_thread(obj.location(), platform_specific(), [&] (Pager_thread &pager_thread) { + addr_t const ec_sel = pager_thread.native_thread().ec_sel; + + /* compiler generates instance of exception entry if not specified */ + addr_t entry = func ? (addr_t)func : (addr_t)(&_handler); + res = create_portal(obj.exc_pt_sel_client() + EV, + platform_specific().core_pd_sel(), ec_sel, mtd, entry, &obj); + }); + + if (res != Tukija::NOVA_OK) + error("failed to register exception handler"); +} + + +template +void Exception_handlers::_handler(Pager_object &obj) +{ + obj.exception(EV); +} + + +Exception_handlers::Exception_handlers(Pager_object &obj) +{ + Mtd const mtd (Mtd::EBSD | Mtd::ESP | Mtd::EIP); + + register_handler<0>(obj, mtd); + register_handler<1>(obj, mtd); + register_handler<2>(obj, mtd); + register_handler<3>(obj, mtd); + register_handler<4>(obj, mtd); + register_handler<5>(obj, mtd); + register_handler<6>(obj, mtd); + register_handler<7>(obj, mtd); + register_handler<8>(obj, mtd); + register_handler<9>(obj, mtd); + register_handler<10>(obj, mtd); + register_handler<11>(obj, mtd); + register_handler<12>(obj, mtd); + register_handler<13>(obj, mtd); + + register_handler<15>(obj, mtd); + register_handler<16>(obj, mtd); + register_handler<17>(obj, mtd); + register_handler<18>(obj, mtd); + register_handler<19>(obj, mtd); + register_handler<20>(obj, mtd); + register_handler<21>(obj, mtd); + register_handler<22>(obj, mtd); + register_handler<23>(obj, mtd); + register_handler<24>(obj, mtd); + register_handler<25>(obj, mtd); +} + + +/****************** + ** Pager object ** + ******************/ + +void Pager_object::_construct_pager() +{ + /* create portal for page-fault handler - 14 */ + _exceptions.register_handler<14>(*this, Mtd::QUAL | Mtd::ESP | Mtd::EIP, + _page_fault_handler); + + /* create portal for recall handler */ + Mtd const mtd_recall(Mtd::ESP | Mtd::EIP | Mtd::ACDB | Mtd::EFL | + Mtd::EBSD | Mtd::FSGS); + _exceptions.register_handler(*this, mtd_recall, + _recall_handler); + + addr_t const pd_sel = platform_specific().core_pd_sel(); + + uint8_t res = !Tukija::NOVA_OK; + + with_pager_thread(_location, platform_specific(), [&] (Pager_thread &pager_thread) { + + addr_t const ec_sel = pager_thread.native_thread().ec_sel; + + /* create portal for final cleanup call used during destruction */ + res = create_portal(sel_pt_cleanup(), pd_sel, ec_sel, Mtd(0), + reinterpret_cast(_invoke_handler), + this); + }); + if (res != Tukija::NOVA_OK) { + error("could not create pager cleanup portal, error=", res); + return; + } + + /* semaphore used to block paged thread during recall */ + res = Tukija::create_sm(sel_sm_block_pause(), pd_sel, 0); + if (res != Tukija::NOVA_OK) { + error("failed to initialize sel_sm_block_pause, error=", res); + return; + } + + /* semaphore used to block paged thread during OOM memory revoke */ + res = Tukija::create_sm(sel_sm_block_oom(), pd_sel, 0); + if (res != Tukija::NOVA_OK) { + error("failed to initialize sel_sm_block_oom, error=", res); + return; + } +} + + +Pager_object::Pager_object(Cpu_session_capability cpu_session_cap, + Thread_capability thread_cap, unsigned long badge, + Affinity::Location location, Session_label const &, + Cpu_session::Name const &) +: + _badge(badge), + _selectors(cap_map().insert(2)), + _client_exc_pt_sel(cap_map().insert(NUM_INITIAL_PT_LOG2)), + _cpu_session_cap(cpu_session_cap), _thread_cap(thread_cap), + _location(location), + _exceptions(*this), + _pd_target(Native_thread::INVALID_INDEX) +{ + _state._status = 0; + _state.modified = false; + _state.sel_client_ec = Native_thread::INVALID_INDEX; + _state.block(); + + if (Native_thread::INVALID_INDEX == _selectors || + Native_thread::INVALID_INDEX == _client_exc_pt_sel) { + error("failed to complete construction of pager object"); + return; + } + + _construct_pager(); + + /* create portal for startup handler */ + Mtd const mtd_startup(Mtd::ESP | Mtd::EIP); + _exceptions.register_handler(*this, mtd_startup, + _startup_handler); + + /* + * Create semaphore required for Genode locking. It can be later on + * requested by the thread the same way as all exception portals. + */ + addr_t const pd_sel = platform_specific().core_pd_sel(); + uint8_t const res = Tukija::create_sm(exc_pt_sel_client() + SM_SEL_EC, + pd_sel, 0); + if (res != Tukija::NOVA_OK) + error("failed to create locking semaphore for pager object"); +} + + +void Pager_object::migrate(Affinity::Location location) +{ + Mutex::Guard _state_lock_guard(_state_lock); + + if (_state.blocked()) + return; + + if (location.xpos() == _location.xpos() && + location.ypos() == _location.ypos()) + return; + + /* initiate migration by recall handler */ + bool const just_recall = false; + uint8_t const res = _unsynchronized_client_recall(just_recall); + if (res == Tukija::NOVA_OK) { + _next_location = location; + + _state.request_migrate(); + } +} + + +Pager_object::~Pager_object() +{ + /* revoke portal used for the cleanup call and sm cap for blocking state */ + revoke(Obj_crd(_selectors, 2)); + cap_map().remove(_selectors, 2, false); + cap_map().remove(exc_pt_sel_client(), NUM_INITIAL_PT_LOG2, false); +} + + +uint8_t Pager_object::handle_oom(addr_t transfer_from, + char const * src_pd, char const * src_thread, + enum Pager_object::Policy policy) +{ + return handle_oom(transfer_from, pd_sel(), src_pd, src_thread, policy, + sel_sm_block_oom(), client_pd(), client_thread()); +} + +uint8_t Pager_object::handle_oom(addr_t pd_from, addr_t pd_to, + char const * src_pd, char const * src_thread, + Policy policy, addr_t sm_notify, + char const * dst_pd, char const * dst_thread) +{ + addr_t const core_pd_sel = platform_specific().core_pd_sel(); + + enum { QUOTA_TRANSFER_PAGES = 2 }; + + if (pd_from == SRC_CORE_PD) + pd_from = core_pd_sel; + + /* request current kernel quota usage of target pd */ + addr_t limit_before = 0, usage_before = 0; + Tukija::pd_ctrl_debug(pd_to, limit_before, usage_before); + + if (verbose_oom) { + addr_t limit_source = 0, usage_source = 0; + /* request current kernel quota usage of source pd */ + Tukija::pd_ctrl_debug(pd_from, limit_source, usage_source); + + log("oom - '", dst_pd, "':'", dst_thread, "' " + "(", usage_before, "/", limit_before, ") - " + "transfer ", (long)QUOTA_TRANSFER_PAGES, " pages " + "from '", src_pd, "':'", src_thread, "' " + "(", usage_source, "/", limit_source, ")"); + } + + uint8_t res = Tukija::NOVA_PD_OOM; + + if (pd_from != pd_to) { + /* upgrade quota */ + uint8_t res = Tukija::pd_ctrl(pd_from, Pd_op::TRANSFER_QUOTA, + pd_to, QUOTA_TRANSFER_PAGES); + if (res == Tukija::NOVA_OK) + return res; + } + + /* retry upgrade using core quota if policy permits */ + if (policy == UPGRADE_PREFER_SRC_TO_DST) { + if (pd_from != core_pd_sel) { + res = Tukija::pd_ctrl(core_pd_sel, Pd_op::TRANSFER_QUOTA, + pd_to, QUOTA_TRANSFER_PAGES); + if (res == Tukija::NOVA_OK) + return res; + } + } + + warning("kernel memory quota upgrade failed - trigger memory free up for " + "causing '", dst_pd, "':'", dst_thread, "' - " + "donator is '", src_pd, "':'", src_thread, "', " + "policy=", (int)policy); + + /* if nothing helps try to revoke memory */ + enum { REMOTE_REVOKE = true, PD_SELF = true }; + Mem_crd crd_all(0, ~0U, Rights(true, true, true)); + Tukija::revoke(crd_all, PD_SELF, REMOTE_REVOKE, pd_to, sm_notify); + + /* re-request current kernel quota usage of target pd */ + addr_t limit_after = 0, usage_after = 0; + Tukija::pd_ctrl_debug(pd_to, limit_after, usage_after); + /* if we could free up memory we continue */ + if (usage_after < usage_before) + return Tukija::NOVA_OK; + + /* + * There is still the chance that memory gets freed up, but one has to + * wait until RCU period is over. If we are in the pager code, we can + * instruct the kernel to block the faulting client thread during the reply + * syscall. If we are in a normal (non-pagefault) RPC service call, + * we can't block. The caller of this function can decide based on + * the return value what to do and whether blocking is ok. + */ + return Tukija::NOVA_PD_OOM; +} + + +void Pager_object::_oom_handler(addr_t pager_dst, addr_t pager_src, + addr_t reason) +{ + if (sizeof(void *) == 4) { + /* On 32 bit edx and ecx as second and third regparm parameter is not + * available. It is used by the kernel internally to store ip/sp. + */ + asm volatile ("" : "=D" (pager_src)); + asm volatile ("" : "=S" (reason)); + } + + Thread &myself = *Thread::myself(); + Utcb &utcb = *reinterpret_cast(myself.utcb()); + Pager_object &obj_dst = *reinterpret_cast(pager_dst); + Pager_object &obj_src = *reinterpret_cast(pager_src); + + /* Policy used if the Process of the paged thread runs out of memory */ + enum Policy policy = Policy::UPGRADE_CORE_TO_DST; + + + /* check assertions - cases that should not happen on Genode@Tukija */ + enum { NO_OOM_PT = 0UL }; + + /* all relevant (user) threads should have a OOM PT */ + bool assert = pager_dst == NO_OOM_PT; + + /* + * This pager thread does solely reply to IPC calls - it should never + * cause OOM during the sending phase of a IPC. + */ + assert |= ((reason & (SELF | SEND)) == (SELF | SEND)); + + /* + * This pager thread should never send words (untyped items) - it just + * answers page faults by typed items (memory mappings). + */ + assert |= utcb.msg_words(); + + if (assert) { + error("unknown OOM case - stop core pager thread"); + utcb.set_msg_word(0); + reply(myself.stack_top(), myself.native_thread().exc_pt_sel + Tukija::SM_SEL_EC); + } + + /* be strict in case of the -strict- STOP policy - stop causing thread */ + if (policy == STOP) { + error("PD has insufficient kernel memory left - stop thread"); + utcb.set_msg_word(0); + reply(myself.stack_top(), obj_dst.sel_sm_block_pause()); + } + + char const * src_pd = "core"; + char const * src_thread = "pager"; + + addr_t transfer_from = SRC_CORE_PD; + + switch (pager_src) { + case SRC_PD_UNKNOWN: + /* should not happen on Genode - we create and know every PD in core */ + error("Unknown PD has insufficient kernel memory left - stop thread"); + utcb.set_msg_word(0); + reply(myself.stack_top(), myself.native_thread().exc_pt_sel + Tukija::SM_SEL_EC); + + case SRC_CORE_PD: + /* core PD -> other PD, which has insufficient kernel resources */ + + if (!(reason & SELF)) { + /* case that src thread != this thread in core */ + src_thread = "unknown"; + utcb.set_msg_word(0); + } + + transfer_from = platform_specific().core_pd_sel(); + break; + default: + /* non core PD -> non core PD */ + utcb.set_msg_word(0); + + if (pager_src == pager_dst || policy == UPGRADE_CORE_TO_DST) + transfer_from = platform_specific().core_pd_sel(); + else { + /* delegation of items between different PDs */ + src_pd = obj_src.client_pd(); + src_thread = obj_src.client_thread(); + transfer_from = obj_src.pd_sel(); + } + } + + uint8_t res = obj_dst.handle_oom(transfer_from, src_pd, src_thread, policy); + if (res == Tukija::NOVA_OK) + /* handling succeeded - continue with original IPC */ + reply(myself.stack_top()); + + /* transfer nothing */ + utcb.set_msg_word(0); + + if (res != Tukija::NOVA_PD_OOM) + error("upgrading kernel memory failed, policy ", (int)policy, ", " + "error ", (int)res, " - stop thread finally"); + + /* else: caller will get blocked until RCU period is over */ + + /* block caller in semaphore */ + reply(myself.stack_top(), obj_dst.sel_sm_block_oom()); +} + + +addr_t Pager_object::create_oom_portal() +{ + uint8_t res = !Tukija::NOVA_OK; + + with_pager_thread(_location, platform_specific(), + [&] (Pager_thread &thread) { + addr_t const core_pd_sel = platform_specific().core_pd_sel(); + addr_t const ec_sel = thread.native_thread().ec_sel; + res = create_portal(sel_oom_portal(), core_pd_sel, ec_sel, Mtd(0), + reinterpret_cast(_oom_handler), + this); + }); + + if (res == Tukija::NOVA_OK) + return sel_oom_portal(); + + error("creating portal for out of memory notification failed"); + return 0; +} + + +const char * Pager_object::client_thread() const +{ + Platform_thread * client = reinterpret_cast(_badge); + return client ? client->name() : "unknown"; +} + + +const char * Pager_object::client_pd() const +{ + Platform_thread * client = reinterpret_cast(_badge); + return client ? client->pd_name() : "unknown"; +} + +/********************** + ** Pager entrypoint ** + **********************/ + +Pager_entrypoint::Pager_entrypoint(Rpc_cap_factory &) +{ + /* detect enabled CPUs and create per CPU a pager thread */ + platform_specific().for_each_location([&](Affinity::Location &location) { + unsigned const pager_index = platform_specific().pager_index(location); + unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(location); + + if (!kernel_hip().is_cpu_enabled(kernel_cpu_id)) + return; + + /* sanity check for pager threads */ + if (pager_index >= PAGER_CPUS) { + error("too many CPUs for pager"); + return; + } + + pager_threads[pager_index].construct(location); + }); +} + + +void Pager_entrypoint::dissolve(Pager_object &obj) +{ + /* take care that no faults are in-flight */ + obj.cleanup_call(); +} diff --git a/repos/base-tukija/src/core/pd_session_support.cc b/repos/base-tukija/src/core/pd_session_support.cc new file mode 100644 index 0000000000..59895b264a --- /dev/null +++ b/repos/base-tukija/src/core/pd_session_support.cc @@ -0,0 +1,240 @@ +/* + * \brief Extension of core implementation of the PD session interface + * \author Alexander Boettcher + * \date 2013-01-11 + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include + +#include /* kernel_hip */ +#include + +using namespace Core; + +inline Tukija::uint8_t retry_syscall(addr_t pd_sel, auto const &fn) +{ + Tukija::uint8_t res; + do { + res = fn(); + } while (res == Tukija::NOVA_PD_OOM && + Tukija::NOVA_OK == Pager_object::handle_oom(Pager_object::SRC_CORE_PD, + pd_sel, + "core", "ep", + Pager_object::Policy::UPGRADE_CORE_TO_DST)); + + return res; +} + +bool Pd_session_component::assign_pci(addr_t pci_config_memory, uint16_t bdf) +{ + return retry_syscall(_pd->pd_sel(), [&] { + return Tukija::assign_pci(_pd->pd_sel(), pci_config_memory, bdf); + }) == Tukija::NOVA_OK; +} + + +Pd_session::Map_result Pd_session_component::map(Pd_session::Virt_range const virt_range) +{ + Platform_pd &target_pd = *_pd; + Tukija::Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + addr_t const pd_core = platform_specific().core_pd_sel(); + addr_t const pd_dst = target_pd.pd_sel(); + + auto map_memory = [&] (Mapping const &mapping) + { + /* asynchronously map memory */ + uint8_t err = retry_syscall(_pd->pd_sel(), [&] { + utcb.set_msg_word(0); + + bool res = utcb.append_item(nova_src_crd(mapping), 0, true, false, + false, + mapping.dma_buffer, + mapping.write_combined); + + /* one item ever fits on the UTCB */ + (void)res; + + return Tukija::delegate(pd_core, pd_dst, nova_dst_crd(mapping)); + }); + + if (err != Tukija::NOVA_OK) { + error("could not eagerly map memory ", + Hex_range(mapping.dst_addr, 1UL << mapping.size_log2) , " " + "error=", err); + } + }; + + addr_t virt = virt_range.start; + size_t size = virt_range.num_bytes; + try { + while (size) { + + Fault const artificial_fault { + .hotspot = { virt }, + .access = Access::READ, + .rwx = Rwx::rwx(), + .bounds = { .start = 0, .end = ~0UL }, + }; + + _address_space.with_mapping_for_fault(artificial_fault, + [&] (Mapping const &mapping) + { + map_memory(mapping); + + size_t const mapped_bytes = 1 << mapping.size_log2; + + virt += mapped_bytes; + size = size < mapped_bytes ? 0 : size - mapped_bytes; + }, + + [&] (Region_map_component &, Fault const &) { /* don't reflect */ } + ); + } + } + catch (Out_of_ram) { return Map_result::OUT_OF_RAM; } + catch (Out_of_caps) { return Map_result::OUT_OF_CAPS; } + catch (...) { + error(__func__, " failed ", Hex(virt), "+", Hex(size)); + } + return Map_result::OK; +} + + +using State = Genode::Pd_session::Managing_system_state; + + +class System_control_component : public Genode::Rpc_object +{ + public: + + State system_control(State const &) override; +}; + + +class System_control_impl : public Core::System_control +{ + private: + + System_control_component objects [Core::Platform::MAX_SUPPORTED_CPUS] { }; + + auto with_location(auto const &location, auto const &fn) + { + unsigned const index = platform_specific().pager_index(location); + + if (index < Core::Platform::MAX_SUPPORTED_CPUS) + return fn (objects[index]); + + return Capability { }; + } + + auto with_location(auto const &location, auto const &fn) const + { + unsigned const index = platform_specific().pager_index(location); + + if (index < Core::Platform::MAX_SUPPORTED_CPUS) + return fn (objects[index]); + + return Capability { }; + } + + public: + + Capability control_cap(Affinity::Location const) const override; + + void manage(Rpc_entrypoint &ep, Affinity::Location const &location) + { + with_location(location, [&](auto &object) { + ep.manage(&object); + return object.cap(); + }); + } + +}; + + +static System_control_impl &system_instance() +{ + static System_control_impl system_control { }; + return system_control; +} + + +System_control & Core::init_system_control(Allocator &alloc, Rpc_entrypoint &) +{ + enum { ENTRYPOINT_STACK_SIZE = 20 * 1024 }; + + platform_specific().for_each_location([&](Affinity::Location const &location) { + + unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(location); + + if (!kernel_hip().is_cpu_enabled(kernel_cpu_id)) + return; + + auto ep = new (alloc) Rpc_entrypoint (nullptr, ENTRYPOINT_STACK_SIZE, + "system_control", location); + + system_instance().manage(*ep, location); + }); + + return system_instance(); +}; + + +Capability System_control_impl::control_cap(Affinity::Location const location) const +{ + return with_location(location, [&](auto &object) { + return object.cap(); + }); +} + + +static State acpi_suspend(State const &request) +{ + State respond { .trapno = 0 }; + + /* + * The trapno/ip/sp registers used below are just convention to transfer + * the intended sleep state S0 ... S5. The values are read out by an + * ACPI AML component and are of type TYP_SLPx as described in the + * ACPI specification, e.g. TYP_SLPa and TYP_SLPb. The values differ + * between different PC systems/boards. + * + * \note trapno/ip/sp registers are chosen because they exist in + * Managing_system_state for x86_32 and x86_64. + */ + uint8_t const sleep_type_a = uint8_t(request.ip); + uint8_t const sleep_type_b = uint8_t(request.sp); + + auto const cap_suspend = platform_specific().core_pd_sel() + 3; + auto const result = Tukija::acpi_suspend(cap_suspend, sleep_type_a, + sleep_type_b); + + if (result == Tukija::NOVA_OK) + respond.trapno = 1 /* success, which means we resumed already */; + + return respond; +} + + +State System_control_component::system_control(State const &request) +{ + if (request.trapno == State::ACPI_SUSPEND_REQUEST) + return acpi_suspend(request); + + if (request.trapno == State::MSR_ACCESS) { + auto const msr_cap = platform_specific().core_pd_sel() + 4; + Tukija::Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + return msr_access(request, utcb, msr_cap); + } + + return State(); +} diff --git a/repos/base-tukija/src/core/platform.cc b/repos/base-tukija/src/core/platform.cc new file mode 100644 index 0000000000..55cc358b12 --- /dev/null +++ b/repos/base-tukija/src/core/platform.cc @@ -0,0 +1,1007 @@ +/* + * \brief Platform interface implementation + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2022 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* core includes */ +#include +#include +#include +#include +#include +#include + +/* base-internal includes */ +#include +#include +#include + +/* NOVA includes */ +#include +#include + +using namespace Core; +using namespace Tukija; + + +enum { verbose_boot_info = true }; + +Native_utcb *main_thread_utcb(); + + +/** + * Initial value of esp register, saved by the crt0 startup code. + * This value contains the address of the hypervisor information page. + */ +extern addr_t __initial_sp; + + +/** + * Pointer to the UTCB of the main thread + */ +static Utcb *__main_thread_utcb; + + +/** + * Virtual address range consumed by core's program image + */ +extern unsigned _prog_img_beg, _prog_img_end; + + +/** + * Map preserved physical pages core-exclusive + * + * This function uses the virtual-memory region allocator to find a region + * fitting the desired mapping. Other allocators are left alone. + */ +addr_t Core::Platform::_map_pages(addr_t const phys_addr, addr_t const pages, + bool guard_page) +{ + addr_t const size = pages << get_page_size_log2(); + + /* try to reserve contiguous virtual area */ + return region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0), + get_page_size_log2()).convert( + [&] (void *core_local_ptr) { + + addr_t const core_local_addr = reinterpret_cast(core_local_ptr); + + int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr, + core_local_addr, pages, + Tukija::Rights(true, true, false), true); + + return res ? 0 : core_local_addr; + }, + + [&] (Allocator::Alloc_error) { + return 0UL; }); +} + + +/***************************** + ** Core page-fault handler ** + *****************************/ + + +enum { CORE_PAGER_UTCB_ADDR = 0xbff02000 }; + + +/** + * IDC handler for the page-fault portal + */ +static void page_fault_handler() +{ + Utcb *utcb = (Utcb *)CORE_PAGER_UTCB_ADDR; + + addr_t const pf_addr = utcb->pf_addr(); + addr_t const pf_ip = utcb->ip; + addr_t const pf_sp = utcb->sp; + uint8_t const pf_type = utcb->pf_type(); + + error("\nPAGE-FAULT IN CORE addr=", Hex(pf_addr), " ip=", Hex(pf_ip), + " (", (pf_type & Ipc_pager::ERR_W) ? "write" : "read", ")"); + + log("\nstack pointer ", Hex(pf_sp), ", qualifiers ", Hex(pf_type), " ", + pf_type & Ipc_pager::ERR_I ? "I" : "i", + pf_type & Ipc_pager::ERR_R ? "R" : "r", + pf_type & Ipc_pager::ERR_U ? "U" : "u", + pf_type & Ipc_pager::ERR_W ? "W" : "w", + pf_type & Ipc_pager::ERR_P ? "P" : "p"); + + if ((stack_area_virtual_base() <= pf_sp) && + (pf_sp < stack_area_virtual_base() + + stack_area_virtual_size())) + { + addr_t utcb_addr_f = pf_sp / stack_virtual_size(); + utcb_addr_f *= stack_virtual_size(); + utcb_addr_f += stack_virtual_size(); + utcb_addr_f -= 4096; + + Tukija::Utcb * utcb_fault = reinterpret_cast(utcb_addr_f); + unsigned last_items = utcb_fault->msg_items(); + + log("faulter utcb ", utcb_fault, ", last message item count ", last_items); + + for (unsigned i = 0; i < last_items; i++) { + Tukija::Utcb::Item * item = utcb_fault->get_item(i); + if (!item) + break; + + Tukija::Crd crd(item->crd); + if (crd.is_null()) + continue; + + log(i, " - " + "type=", Hex(crd.type()), " " + "rights=", Hex(crd.rights()), " " + "region=", Hex(crd.addr()), "+", Hex(1UL << (12 + crd.order())), " " + "hotspot=", Hex(crd.hotspot(item->hotspot)), + "(", Hex(item->hotspot), ")" + " - ", item->is_del() ? "delegated" : "translated"); + } + } + + /* dump stack trace */ + struct Core_img + { + addr_t _beg = 0; + addr_t _end = 0; + addr_t *_ip = nullptr; + + Core_img(addr_t sp) + { + extern addr_t _dtors_end; + _beg = (addr_t)&_prog_img_beg; + _end = (addr_t)&_dtors_end; + + _ip = (addr_t *)sp; + for (;!ip_valid(); _ip++) {} + } + + addr_t *ip() { return _ip; } + void next_ip() { _ip = ((addr_t *)*(_ip - 1)) + 1;} + bool ip_valid() { return (*_ip >= _beg) && (*_ip < _end); } + }; + + int count = 1; + log(" #", count++, " ", Hex(pf_sp, Hex::PREFIX, Hex::PAD), " ", + Hex(pf_ip, Hex::PREFIX, Hex::PAD)); + + Core_img dump(pf_sp); + while (dump.ip_valid()) { + log(" #", count++, " ", Hex((addr_t)dump.ip(), Hex::PREFIX, Hex::PAD), + " ", Hex(*dump.ip(), Hex::PREFIX, Hex::PAD)); + dump.next_ip(); + } + + sleep_forever(); +} + + +static addr_t core_pager_stack_top() +{ + enum { STACK_SIZE = 4*1024 }; + static char stack[STACK_SIZE]; + return (addr_t)&stack[STACK_SIZE - sizeof(addr_t)]; +} + + +/** + * Startup handler for core threads + */ +static void startup_handler() +{ + Utcb *utcb = (Utcb *)CORE_PAGER_UTCB_ADDR; + + /* initial IP is on stack */ + utcb->ip = *reinterpret_cast(utcb->sp); + utcb->mtd = Mtd::EIP | Mtd::ESP; + utcb->set_msg_word(0); + + reply((void*)core_pager_stack_top()); +} + + +static addr_t init_core_page_fault_handler(addr_t const core_pd_sel) +{ + /* create fault handler EC for core main thread */ + enum { + GLOBAL = false, + EXC_BASE = 0 + }; + + addr_t ec_sel = cap_map().insert(1); + + uint8_t ret = create_ec(ec_sel, core_pd_sel, boot_cpu(), + CORE_PAGER_UTCB_ADDR, core_pager_stack_top(), + EXC_BASE, GLOBAL); + if (ret) + log(__func__, ": create_ec returned ", ret); + + /* set up page-fault portal */ + create_pt(PT_SEL_PAGE_FAULT, core_pd_sel, ec_sel, + Mtd(Mtd::QUAL | Mtd::ESP | Mtd::EIP), + (addr_t)page_fault_handler); + revoke(Obj_crd(PT_SEL_PAGE_FAULT, 0, Obj_crd::RIGHT_PT_CTRL)); + + /* startup portal for global core threads */ + create_pt(PT_SEL_STARTUP, core_pd_sel, ec_sel, + Mtd(Mtd::EIP | Mtd::ESP), + (addr_t)startup_handler); + revoke(Obj_crd(PT_SEL_STARTUP, 0, Obj_crd::RIGHT_PT_CTRL)); + + return ec_sel; +} + + +static bool cpuid_invariant_tsc() +{ + unsigned long cpuid = 0x80000007, edx = 0; +#ifdef __x86_64__ + asm volatile ("cpuid" : "+a" (cpuid), "=d" (edx) : : "rbx", "rcx"); +#else + asm volatile ("push %%ebx \n" + "cpuid \n" + "pop %%ebx" : "+a" (cpuid), "=d" (edx) : : "ecx"); +#endif + return edx & 0x100; +} + +/* boot framebuffer resolution */ +struct Resolution : Register<64> +{ + struct Bpp : Bitfield<0, 8> { }; + struct Type : Bitfield<8, 8> { enum { VGA_TEXT = 2 }; }; + struct Height : Bitfield<16, 24> { }; + struct Width : Bitfield<40, 24> { }; +}; + +static Affinity::Space setup_affinity_space(Hip const &hip) +{ + unsigned cpus = 0; + unsigned ids_thread = 0; + Bit_array<1 << (sizeof(Hip::Cpu_desc::thread) * 8)> threads; + + hip.for_each_enabled_cpu([&](Hip::Cpu_desc const &cpu, unsigned) { + cpus ++; + if (threads.get(cpu.thread, 1)) return; + + threads.set(cpu.thread, 1); + ids_thread ++; + }); + + if (ids_thread && ((cpus % ids_thread) == 0)) + return Affinity::Space(cpus / ids_thread, ids_thread); + + /* mixture of system with cores with and without hyperthreads ? */ + return Affinity::Space(cpus, 1); +} + +/************** + ** Platform ** + **************/ + +Core::Platform::Platform() +: + _io_mem_alloc(&core_mem_alloc()), _io_port_alloc(&core_mem_alloc()), + _irq_alloc(&core_mem_alloc()), + _vm_base(0x1000), _vm_size(0), _cpus(Affinity::Space(1,1)) +{ + bool warn_reorder = false; + bool error_overlap = false; + + Hip const &hip = *(Hip *)__initial_sp; + + /* determine number of available CPUs */ + _cpus = setup_affinity_space(hip); + + /* register UTCB of main thread */ + __main_thread_utcb = (Utcb *)(__initial_sp - get_page_size()); + + /* set core pd selector */ + _core_pd_sel = hip.sel_exc; + + /* create lock used by capability allocator */ + Tukija::create_sm(Tukija::SM_SEL_EC, core_pd_sel(), 0); + + /* locally map the whole I/O port range */ + enum { ORDER_64K = 16 }; + map_local_one_to_one(*__main_thread_utcb, Io_crd(0, ORDER_64K), _core_pd_sel); + /* map BDA region, console reads IO ports at BDA_VIRT_ADDR + 0x400 */ + enum { BDA_PHY = 0x0U, BDA_VIRT = 0x1U, BDA_VIRT_ADDR = 0x1000U }; + map_local_phys_to_virt(*__main_thread_utcb, + Mem_crd(BDA_PHY, 0, Rights(true, false, false)), + Mem_crd(BDA_VIRT, 0, Rights(true, false, false)), + _core_pd_sel); + + + /* + * Now that we can access the I/O ports for comport 0, printf works... + */ + + + /* + * remap main utcb to default utcb address + * we do this that early, because Core_mem_allocator uses + * the main_thread_utcb very early to establish mappings + */ + if (map_local(_core_pd_sel, *__main_thread_utcb, (addr_t)__main_thread_utcb, + (addr_t)main_thread_utcb(), 1, Rights(true, true, false))) { + error("could not remap utcb of main thread"); + } + + /* + * Mark successful boot of hypervisor for automatic tests. This must be + * done before core_log is initialized to prevent unexpected-reboot + * detection. + */ + log("\nHypervisor ", String((char const *)&hip.signature), + " (API v", hip.api_version, ")"); + + /* init genode cpu ids based on kernel cpu ids (used for syscalls) */ + warn_reorder = !hip.remap_cpu_ids(map_cpu_ids, + sizeof(map_cpu_ids) / sizeof(map_cpu_ids[0]), + (unsigned)boot_cpu()); + + /* configure virtual address spaces */ +#ifdef __x86_64__ + _vm_size = 0x7fffc0000000UL - _vm_base; +#else + _vm_size = 0xc0000000UL - _vm_base; +#endif + + /* set up page fault handler for core - for debugging */ + addr_t const ec_core_exc_sel = init_core_page_fault_handler(core_pd_sel()); + + /* initialize core allocators */ + size_t const num_mem_desc = (hip.hip_length - hip.mem_desc_offset) + / hip.mem_desc_size; + + addr_t mem_desc_base = ((addr_t)&hip + hip.mem_desc_offset); + + /* define core's virtual address space */ + addr_t virt_beg = _vm_base; + addr_t virt_end = _vm_size; + _core_mem_alloc.virt_alloc().add_range(virt_beg, virt_end - virt_beg); + + /* exclude core image from core's virtual address allocator */ + addr_t const core_virt_beg = trunc_page((addr_t)&_prog_img_beg); + addr_t const core_virt_end = round_page((addr_t)&_prog_img_end); + addr_t const binaries_beg = trunc_page((addr_t)&_boot_modules_binaries_begin); + addr_t const binaries_end = round_page((addr_t)&_boot_modules_binaries_end); + + size_t const core_size = binaries_beg - core_virt_beg; + region_alloc().remove_range(core_virt_beg, core_size); + + /* ROM modules are un-used by core - de-detach region */ + addr_t const binaries_size = binaries_end - binaries_beg; + unmap_local(*__main_thread_utcb, binaries_beg, binaries_size >> 12); + + /* preserve Bios Data Area (BDA) in core's virtual address space */ + region_alloc().remove_range(BDA_VIRT_ADDR, 0x1000); + + /* preserve stack area in core's virtual address space */ + region_alloc().remove_range(stack_area_virtual_base(), + stack_area_virtual_size()); + + /* exclude utcb of core pager thread + empty guard pages before and after */ + region_alloc().remove_range(CORE_PAGER_UTCB_ADDR - get_page_size(), + get_page_size() * 3); + + /* exclude utcb of main thread and hip + empty guard pages before and after */ + region_alloc().remove_range((addr_t)__main_thread_utcb - get_page_size(), + get_page_size() * 4); + + /* sanity checks */ + addr_t check [] = { + reinterpret_cast(__main_thread_utcb), CORE_PAGER_UTCB_ADDR, + BDA_VIRT_ADDR + }; + + for (unsigned i = 0; i < sizeof(check) / sizeof(check[0]); i++) { + if (stack_area_virtual_base() <= check[i] && + check[i] < stack_area_virtual_base() + stack_area_virtual_size()) + { + error("overlapping area - ", + Hex_range(stack_area_virtual_base(), + stack_area_virtual_size()), " vs ", + Hex(check[i])); + + error_overlap = true; + } + } + + /* initialize core's physical-memory and I/O memory allocator */ + _io_mem_alloc.add_range(0, ~0xfffUL); + Hip::Mem_desc *mem_desc = (Hip::Mem_desc *)mem_desc_base; + + Hip::Mem_desc *boot_fb = nullptr; + + bool efi_boot = false; + size_t kernel_memory = 0; + + /* + * All "available" ram must be added to our physical allocator before all + * non "available" regions that overlaps with ram get removed. + */ + for (unsigned i = 0; i < num_mem_desc; i++, mem_desc++) { + /* 32/64bit EFI image handle pointer - see multiboot spec 2 */ + if (mem_desc->type == 20 || mem_desc->type == 19) + efi_boot = true; + + if (mem_desc->type == Hip::Mem_desc::FRAMEBUFFER) + boot_fb = mem_desc; + if (mem_desc->type == Hip::Mem_desc::MICROHYPERVISOR) + kernel_memory += (size_t)mem_desc->size; + + if (mem_desc->type != Hip::Mem_desc::AVAILABLE_MEMORY) continue; + + if (!mem_desc->size) continue; + + /* skip regions above 4G on 32 bit, no op on 64 bit */ + if (mem_desc->addr > ~0UL) continue; + + uint64_t base = round_page(mem_desc->addr); + uint64_t size; + /* truncate size if base+size larger then natural 32/64 bit boundary */ + if (mem_desc->addr >= ~0ULL - mem_desc->size + 1) + size = trunc_page(~0ULL - mem_desc->addr + 1); + else + size = trunc_page(mem_desc->addr + mem_desc->size) - base; + + _io_mem_alloc.remove_range((addr_t)base, (size_t)size); + ram_alloc().add_range((addr_t)base, (size_t)size); + } + + addr_t hyp_log = 0; + size_t hyp_log_size = 0; + + /* + * Exclude all non-available memory from physical allocator AFTER all + * available RAM was added - otherwise the non-available memory gets not + * properly removed from the physical allocator + */ + mem_desc = (Hip::Mem_desc *)mem_desc_base; + for (unsigned i = 0; i < num_mem_desc; i++, mem_desc++) { + if (mem_desc->type == Hip::Mem_desc::AVAILABLE_MEMORY) continue; + + if (mem_desc->type == Hip::Mem_desc::HYPERVISOR_LOG) { + hyp_log = (addr_t)mem_desc->addr; + hyp_log_size = (size_t)mem_desc->size; + } + + uint64_t base = trunc_page(mem_desc->addr); + uint64_t size = mem_desc->size; + + /* remove framebuffer from available memory */ + if (mem_desc->type == Hip::Mem_desc::FRAMEBUFFER) { + uint32_t const height = (uint32_t)Resolution::Height::get(mem_desc->size); + uint32_t const pitch = mem_desc->aux; + /* calculate size of framebuffer */ + size = pitch * height; + } + + /* skip regions above 4G on 32 bit, no op on 64 bit */ + if (mem_desc->addr > ~0UL) continue; + + /* truncate size if base+size larger then natural 32/64 bit boundary */ + if (mem_desc->addr + size < mem_desc->addr) + size = 0UL - base; + else + size = round_page(mem_desc->addr + size) - base; + + if (!size) + continue; + + /* make acpi regions as io_mem available to platform driver */ + if (mem_desc->type == Hip::Mem_desc::ACPI_RECLAIM_MEMORY || + mem_desc->type == Hip::Mem_desc::ACPI_NVS_MEMORY) + _io_mem_alloc.add_range((addr_t)base, (size_t)size); + + ram_alloc().remove_range((addr_t)base, (size_t)size); + } + + /* needed as I/O memory by the VESA driver */ + _io_mem_alloc.add_range(0, 0x1000); + ram_alloc().remove_range(0, 0x1000); + + /* exclude pages holding multi-boot command lines from core allocators */ + mem_desc = (Hip::Mem_desc *)mem_desc_base; + addr_t prev_cmd_line_page = ~0, curr_cmd_line_page = 0; + for (unsigned i = 0; i < num_mem_desc; i++, mem_desc++) { + if (mem_desc->type != Hip::Mem_desc::MULTIBOOT_MODULE) continue; + if (!mem_desc->aux) continue; + + curr_cmd_line_page = mem_desc->aux >> get_page_size_log2(); + if (curr_cmd_line_page == prev_cmd_line_page) continue; + + ram_alloc().remove_range(curr_cmd_line_page << get_page_size_log2(), + get_page_size() * 2); + prev_cmd_line_page = curr_cmd_line_page; + } + + /* sanity checks that regions don't overlap - could be bootloader issue */ + mem_desc = (Hip::Mem_desc *)mem_desc_base; + for (unsigned i = 0; i < num_mem_desc; i++, mem_desc++) { + + if (mem_desc->type == Hip::Mem_desc::AVAILABLE_MEMORY) continue; + if (mem_desc->type == Hip::Mem_desc::ACPI_RSDT) continue; + if (mem_desc->type == Hip::Mem_desc::ACPI_XSDT) continue; + if (mem_desc->type == Hip::Mem_desc::FRAMEBUFFER) continue; + if (mem_desc->type == Hip::Mem_desc::EFI_SYSTEM_TABLE) continue; + + Hip::Mem_desc * mem_d = (Hip::Mem_desc *)mem_desc_base; + for (unsigned j = 0; j < num_mem_desc; j++, mem_d++) { + if (mem_d->type == Hip::Mem_desc::AVAILABLE_MEMORY) continue; + if (mem_d->type == Hip::Mem_desc::ACPI_RSDT) continue; + if (mem_d->type == Hip::Mem_desc::ACPI_XSDT) continue; + if (mem_d->type == Hip::Mem_desc::FRAMEBUFFER) continue; + if (mem_d->type == Hip::Mem_desc::EFI_SYSTEM_TABLE) continue; + if (mem_d == mem_desc) continue; + + /* if regions are disjunct all is fine */ + if ((mem_d->addr + mem_d->size <= mem_desc->addr) || + (mem_d->addr >= mem_desc->addr + mem_desc->size)) + continue; + + error("region overlap ", + Hex_range((addr_t)mem_desc->addr, (size_t)mem_desc->size), " " + "(", (int)mem_desc->type, ") with ", + Hex_range((addr_t)mem_d->addr, (size_t)mem_d->size), " " + "(", (int)mem_d->type, ")"); + + error_overlap = true; + } + } + + /* + * From now on, it is save to use the core allocators... + */ + + uint64_t efi_sys_tab_phy = 0UL; + uint64_t rsdt = 0UL; + uint64_t xsdt = 0UL; + + mem_desc = (Hip::Mem_desc *)mem_desc_base; + for (unsigned i = 0; i < num_mem_desc; i++, mem_desc++) { + if (mem_desc->type == Hip::Mem_desc::EFI_SYSTEM_TABLE) efi_sys_tab_phy = mem_desc->addr; + if (mem_desc->type == Hip::Mem_desc::ACPI_RSDT) rsdt = mem_desc->addr; + if (mem_desc->type == Hip::Mem_desc::ACPI_XSDT) xsdt = mem_desc->addr; + if (mem_desc->type != Hip::Mem_desc::MULTIBOOT_MODULE) continue; + if (!mem_desc->addr || !mem_desc->size) continue; + + /* assume core's ELF image has one-page header */ + _core_phys_start = (addr_t)trunc_page(mem_desc->addr + get_page_size()); + } + + _init_rom_modules(); + + auto export_pages_as_rom_module = [&] (auto rom_name, size_t pages, auto content_fn) + { + size_t const bytes = pages << get_page_size_log2(); + ram_alloc().alloc_aligned(bytes, get_page_size_log2()).with_result( + + [&] (void *phys_ptr) { + + addr_t const phys_addr = reinterpret_cast(phys_ptr); + char * const core_local_ptr = (char *)_map_pages(phys_addr, pages); + + if (!core_local_ptr) { + warning("failed to export ", rom_name, " as ROM module"); + ram_alloc().free(phys_ptr, bytes); + return; + } + + memset(core_local_ptr, 0, bytes); + content_fn(core_local_ptr, bytes); + + new (core_mem_alloc()) + Rom_module(_rom_fs, rom_name, phys_addr, bytes); + + /* leave the ROM backing store mapped within core */ + }, + + [&] (Range_allocator::Alloc_error) { + warning("failed to allocate physical memory for exporting ", + rom_name, " as ROM module"); }); + }; + + export_pages_as_rom_module("platform_info", 1 + (MAX_SUPPORTED_CPUS / 32), + [&] (char * const ptr, size_t const size) { + Xml_generator xml(ptr, size, "platform_info", [&] + { + xml.node("kernel", [&] { + xml.attribute("name", "tukija"); + xml.attribute("acpi", true); + xml.attribute("msi" , true); + xml.attribute("iommu", hip.has_feature_iommu()); + }); + if (efi_sys_tab_phy) { + xml.node("efi-system-table", [&] { + xml.attribute("address", String<32>(Hex(efi_sys_tab_phy))); + }); + } + xml.node("acpi", [&] { + + xml.attribute("revision", 2); /* XXX */ + + if (rsdt) + xml.attribute("rsdt", String<32>(Hex(rsdt))); + + if (xsdt) + xml.attribute("xsdt", String<32>(Hex(xsdt))); + }); + xml.node("affinity-space", [&] { + xml.attribute("width", _cpus.width()); + xml.attribute("height", _cpus.height()); + }); + xml.node("boot", [&] { + if (!boot_fb) + return; + + if (!efi_boot && (Resolution::Type::get(boot_fb->size) != Resolution::Type::VGA_TEXT)) + return; + + xml.node("framebuffer", [&] { + xml.attribute("phys", String<32>(Hex(boot_fb->addr))); + xml.attribute("width", Resolution::Width::get(boot_fb->size)); + xml.attribute("height", Resolution::Height::get(boot_fb->size)); + xml.attribute("bpp", Resolution::Bpp::get(boot_fb->size)); + xml.attribute("type", Resolution::Type::get(boot_fb->size)); + xml.attribute("pitch", boot_fb->aux); + }); + }); + xml.node("hardware", [&] { + xml.node("features", [&] { + xml.attribute("svm", hip.has_feature_svm()); + xml.attribute("vmx", hip.has_feature_vmx()); + }); + xml.node("tsc", [&] { + xml.attribute("invariant", cpuid_invariant_tsc()); + xml.attribute("freq_khz" , hip.tsc_freq); + }); + xml.node("cpus", [&] { + for_each_location([&](Affinity::Location &location) { + unsigned const kernel_cpu_id = Platform::kernel_cpu_id(location); + auto const cpu_ptr = hip.cpu_desc_of_cpu(kernel_cpu_id); + + if (!cpu_ptr) + return; + + auto const &cpu = *cpu_ptr; + + xml.node("cpu", [&] { + xml.attribute("xpos", location.xpos()); + xml.attribute("ypos", location.ypos()); + xml.attribute("id", kernel_cpu_id); + xml.attribute("package", cpu.package); + xml.attribute("core", cpu.core); + xml.attribute("thread", cpu.thread); + xml.attribute("family", String<5>(Hex(cpu.family))); + xml.attribute("model", String<5>(Hex(cpu.model))); + xml.attribute("stepping", String<5>(Hex(cpu.stepping))); + xml.attribute("platform", String<5>(Hex(cpu.platform))); + xml.attribute("patch", String<12>(Hex(cpu.patch))); + if (cpu.p_core()) xml.attribute("cpu_type", "P"); + if (cpu.e_core()) xml.attribute("cpu_type", "E"); + }); + }); + }); + }); + }); + } + ); + + export_pages_as_rom_module("core_log", 4, + [&] (char * const ptr, size_t const size) { + init_core_log( Core_log_range { (addr_t)ptr, size } ); + }); + + /* export hypervisor log memory */ + if (hyp_log && hyp_log_size) + new (core_mem_alloc()) + Rom_module(_rom_fs, "kernel_log", hyp_log, hyp_log_size); + + /* show all warnings/errors after init_core_log setup core_log */ + if (warn_reorder) + warning("re-ordering of CPU ids for SMT and P/E cores failed"); + if (hip.api_version != 10) + error("running on a unsupported kernel API version ", hip.api_version); + if (binaries_end != core_virt_end) + error("mismatch in address layout of binaries with core"); + if (error_overlap) + error("memory overlap issues detected"); + if (hip.sel_exc + 3 > NUM_INITIAL_PT_RESERVED) + error("configuration error (NUM_INITIAL_PT_RESERVED)"); + + /* map idle SCs */ + auto const log2cpu = log2(hip.cpu_max()); + auto const sc_idle_base = cap_map().insert(log2cpu + 1); + + if (map_local(_core_pd_sel, *__main_thread_utcb, Obj_crd(0, log2cpu), + Obj_crd(sc_idle_base, log2cpu), true)) + error("idle SC information unavailable"); + + + if (verbose_boot_info) { + if (hip.has_feature_iommu()) + log("Hypervisor features IOMMU"); + if (hip.has_feature_vmx()) + log("Hypervisor features VMX"); + if (hip.has_feature_svm()) + log("Hypervisor features SVM"); + log("Hypervisor reports ", _cpus.width(), "x", _cpus.height(), " " + "CPU", _cpus.total() > 1 ? "s" : " "); + if (!cpuid_invariant_tsc()) + warning("CPU has no invariant TSC."); + + log("mapping: affinity space -> kernel cpu id - package:core:thread"); + + for_each_location([&](Affinity::Location &location) { + unsigned const kernel_cpu_id = Platform::kernel_cpu_id(location); + Hip::Cpu_desc const * cpu = hip.cpu_desc_of_cpu(kernel_cpu_id); + + Genode::String<16> text ("failure"); + if (cpu) + text = Genode::String<16>(cpu->package, ":", + cpu->core, ":", cpu->thread, + cpu->e_core() ? " E" : + cpu->p_core() ? " P" : ""); + + log(" remap (", location.xpos(), "x", location.ypos(),") -> ", + kernel_cpu_id, " - ", text, + boot_cpu() == kernel_cpu_id ? " boot cpu" : ""); + }); + } + + /* I/O port allocator (only meaningful for x86) */ + _io_port_alloc.add_range(0, 0x10000); + + /* IRQ allocator */ + _irq_alloc.add_range(0, hip.sel_gsi); + _gsi_base_sel = (hip.mem_desc_offset - hip.cpu_desc_offset) / hip.cpu_desc_size; + + log(_rom_fs); + + log(Number_of_bytes(kernel_memory), " kernel memory"); log(""); + + /* add capability selector ranges to map */ + unsigned const first_index = 0x2000; + unsigned index = first_index; + for (unsigned i = 0; i < 32; i++) + { + void * phys_ptr = nullptr; + + ram_alloc().alloc_aligned(get_page_size(), get_page_size_log2()).with_result( + [&] (void *ptr) { phys_ptr = ptr; }, + [&] (Range_allocator::Alloc_error) { /* covered by nullptr test below */ }); + + if (phys_ptr == nullptr) + break; + + addr_t phys_addr = reinterpret_cast(phys_ptr); + addr_t core_local_addr = _map_pages(phys_addr, 1); + + if (!core_local_addr) { + ram_alloc().free(phys_ptr); + break; + } + + Cap_range &range = *reinterpret_cast(core_local_addr); + construct_at(&range, index); + + cap_map().insert(range); + + index = (unsigned)(range.base() + range.elements()); + } + _max_caps = index - first_index; + + /* add idle ECs to trace sources */ + for_each_location([&](Affinity::Location &location) { + unsigned const kernel_cpu_id = Platform::kernel_cpu_id(location); + if (!hip.cpu_desc_of_cpu(kernel_cpu_id)) return; + + struct Trace_source : public Trace::Source::Info_accessor, + private Trace::Control, + private Trace::Source + { + Affinity::Location const affinity; + unsigned const sc_sel; + Genode::String<8> const name; + + /** + * Trace::Source::Info_accessor interface + */ + Info trace_source_info() const override + { + uint64_t sc_time = 0; + uint64_t ec_time = 0; + uint8_t res = 0; + + if (name == "killed") { + res = Tukija::sc_ec_time(sc_sel, sc_sel, + sc_time, ec_time); + } else { + auto syscall_op = (name == "cross") + ? Sc_op::SC_TIME_CROSS + : Sc_op::SC_TIME_IDLE; + + res = Tukija::sc_ctrl(sc_sel, sc_time, + syscall_op); + + if (syscall_op == Sc_op::SC_TIME_IDLE) + ec_time = sc_time; + } + + if (res != Tukija::NOVA_OK) + warning("sc_ctrl on ", name, " failed" + ", res=", res); + + return { Session_label("kernel"), Trace::Thread_name(name), + Trace::Execution_time(ec_time, sc_time), affinity }; + } + + Trace_source(Trace::Source_registry ®istry, + Affinity::Location const affinity, + unsigned const sc_sel, + char const * type_name) + : + Trace::Control(), + Trace::Source(*this, *this), affinity(affinity), + sc_sel(sc_sel), name(type_name) + { + registry.insert(this); + } + }; + + new (core_mem_alloc()) Trace_source(Trace::sources(), location, + (unsigned)(sc_idle_base + kernel_cpu_id), + "idle"); + + new (core_mem_alloc()) Trace_source(Trace::sources(), location, + (unsigned)(sc_idle_base + kernel_cpu_id), + "cross"); + + new (core_mem_alloc()) Trace_source(Trace::sources(), location, + (unsigned)(sc_idle_base + kernel_cpu_id), + "killed"); + }); + + /* add exception handler EC for core and EC root thread to trace sources */ + struct Core_trace_source : public Trace::Source::Info_accessor, + private Trace::Control, + private Trace::Source + { + Affinity::Location const location; + addr_t const ec_sc_sel; + Genode::String<8> const name; + + /** + * Trace::Source::Info_accessor interface + */ + Info trace_source_info() const override + { + uint64_t ec_time = 0; + uint64_t sc_time = 0; + + if (name == "root") { + uint8_t res = Tukija::sc_ec_time(ec_sc_sel + 1, + ec_sc_sel, + sc_time, + ec_time); + if (res != Tukija::NOVA_OK) + warning("sc_ec_time for root failed " + "res=", res); + } else { + uint8_t res = Tukija::ec_time(ec_sc_sel, ec_time); + if (res != Tukija::NOVA_OK) + warning("ec_time for", name, " thread " + "failed res=", res); + } + + return { Session_label("core"), name, + Trace::Execution_time(ec_time, sc_time), location }; + } + + Core_trace_source(Trace::Source_registry ®istry, + Affinity::Location loc, addr_t sel, + char const *name) + : + Trace::Control(), + Trace::Source(*this, *this), location(loc), ec_sc_sel(sel), + name(name) + { + registry.insert(this); + } + }; + + new (core_mem_alloc()) + Core_trace_source(Trace::sources(), + Affinity::Location(0, 0, _cpus.width(), 1), + ec_core_exc_sel, "core_fault"); + + new (core_mem_alloc()) + Core_trace_source(Trace::sources(), + Affinity::Location(0, 0, _cpus.width(), 1), + hip.sel_exc + 1, "root"); +} + + +addr_t Core::Platform::_rom_module_phys(addr_t virt) +{ + return virt - (addr_t)&_prog_img_beg + _core_phys_start; +} + + +unsigned Core::Platform::kernel_cpu_id(Affinity::Location location) const +{ + unsigned const cpu_id = pager_index(location); + + if (cpu_id >= sizeof(map_cpu_ids) / sizeof(map_cpu_ids[0])) { + error("invalid genode cpu id ", cpu_id); + return ~0U; + } + + return map_cpu_ids[cpu_id]; +} + + +unsigned Core::Platform::pager_index(Affinity::Location location) const +{ + return (location.xpos() * _cpus.height() + location.ypos()) + % (_cpus.width() * _cpus.height()); +} + + +/**************************************** + ** Support for core memory management ** + ****************************************/ + +bool Mapped_mem_allocator::_map_local(addr_t virt_addr, addr_t phys_addr, size_t size) +{ + /* platform_specific()->core_pd_sel() deadlocks if called from platform constructor */ + Hip const &hip = *(Hip const *)__initial_sp; + addr_t const core_pd_sel = hip.sel_exc; + + map_local(core_pd_sel, + *(Utcb *)Thread::myself()->utcb(), phys_addr, + virt_addr, size / get_page_size(), + Rights(true, true, false), true); + return true; +} + + +bool Mapped_mem_allocator::_unmap_local(addr_t virt_addr, addr_t, size_t size) +{ + unmap_local(*(Utcb *)Thread::myself()->utcb(), + virt_addr, size / get_page_size()); + return true; +} + + +/******************************** + ** Generic platform interface ** + ********************************/ + +void Core::Platform::wait_for_exit() { sleep_forever(); } + diff --git a/repos/base-tukija/src/core/platform_pd.cc b/repos/base-tukija/src/core/platform_pd.cc new file mode 100644 index 0000000000..9274193500 --- /dev/null +++ b/repos/base-tukija/src/core/platform_pd.cc @@ -0,0 +1,79 @@ +/* + * \brief Protection-domain facility + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* core includes */ +#include +#include + +using namespace Core; + + +void Platform_pd::assign_parent(Native_capability parent) +{ + if (!_parent.valid() && parent.valid()) + _parent = parent; +} + + +Platform_pd::Platform_pd(Allocator &, char const *label, signed, bool) +: + _pd_sel(cap_map().insert()), _label(label) +{ + if (_pd_sel == Native_thread::INVALID_INDEX) { + error("platform pd creation failed "); + return; + } + + /* create task */ + enum { KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE = 2, UPPER_LIMIT_PAGES = 32 }; + uint8_t res = Tukija::create_pd(_pd_sel, platform_specific().core_pd_sel(), + Tukija::Obj_crd(), + KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE, + UPPER_LIMIT_PAGES); + + if (res != Tukija::NOVA_OK) + error("create_pd returned ", res); +} + + +Platform_pd::~Platform_pd() +{ + if (_pd_sel == Native_thread::INVALID_INDEX) + return; + + /* Revoke and free cap, pd is gone */ + Tukija::revoke(Tukija::Obj_crd(_pd_sel, 0)); + cap_map().remove(_pd_sel, 0, false); +} + + +void Platform_pd::flush(addr_t remote_virt, size_t size, Core_local_addr) +{ + Tukija::Rights const revoke_rwx(true, true, true); + + Flexpage_iterator flex(remote_virt, size, remote_virt, size, 0); + Flexpage page = flex.page(); + + if (pd_sel() == Native_thread::INVALID_INDEX) + return; + + while (page.valid()) { + Tukija::Mem_crd mem(page.addr >> 12, page.log2_order - 12, revoke_rwx); + Tukija::revoke(mem, true, true, pd_sel()); + + page = flex.page(); + } +} diff --git a/repos/base-tukija/src/core/platform_services.cc b/repos/base-tukija/src/core/platform_services.cc new file mode 100644 index 0000000000..cc53dc48c0 --- /dev/null +++ b/repos/base-tukija/src/core/platform_services.cc @@ -0,0 +1,36 @@ +/* + * \brief Platform specific services for NOVA + * \author Alexander Boettcher + * \date 2018-08-26 + */ + +/* + * Copyright (C) 2018 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include + +/* + * Add x86 specific services + */ +void Core::platform_add_local_services(Rpc_entrypoint &ep, + Sliced_heap &heap, + Registry &services, + Trace::Source_registry &trace_sources, + Ram_allocator &core_ram, + Region_map &core_rm, + Range_allocator &io_port_ranges) +{ + static Vm_root vm_root(ep, heap, core_ram, core_rm, trace_sources); + static Core_service vm(services, vm_root); + + static Io_port_root io_root(io_port_ranges, heap); + + static Core_service io_port(services, io_root); +} diff --git a/repos/base-tukija/src/core/platform_thread.cc b/repos/base-tukija/src/core/platform_thread.cc new file mode 100644 index 0000000000..6473928b3a --- /dev/null +++ b/repos/base-tukija/src/core/platform_thread.cc @@ -0,0 +1,377 @@ +/* + * \brief Thread facility + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include +#include +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include +#include + +using namespace Core; + + +static uint8_t map_thread_portals(Pager_object &pager, + addr_t const target_exc_base, + Tukija::Utcb &utcb) +{ + using Tukija::Obj_crd; + using Tukija::NUM_INITIAL_PT_LOG2; + + addr_t const source_pd = platform_specific().core_pd_sel(); + addr_t const source_exc_base = pager.exc_pt_sel_client(); + addr_t const target_pd = pager.pd_sel(); + + /* xxx better map portals with solely pt_call and sm separately ? xxx */ + addr_t const rights = Obj_crd::RIGHT_EC_RECALL | + Obj_crd::RIGHT_PT_CTRL | Obj_crd::RIGHT_PT_CALL | Obj_crd::RIGHT_PT_XCPU | + Obj_crd::RIGHT_SM_UP | Obj_crd::RIGHT_SM_DOWN; + + Obj_crd const source_initial_caps(source_exc_base, NUM_INITIAL_PT_LOG2, + rights); + Obj_crd const target_initial_caps(target_exc_base, NUM_INITIAL_PT_LOG2, + rights); + + return async_map(pager, source_pd, target_pd, + source_initial_caps, target_initial_caps, utcb); +} + + +/********************* + ** Platform thread ** + *********************/ + + +void Platform_thread::affinity(Affinity::Location location) +{ + if (!_pager) + return; + + if (worker() || vcpu() || !sc_created()) + return; + + _pager->migrate(platform_specific().sanitize(location)); +} + + +bool Platform_thread::_create_and_map_oom_portal(Tukija::Utcb &utcb) +{ + addr_t const pt_oom = pager().create_oom_portal(); + if (!pt_oom) + return false; + + addr_t const source_pd = platform_specific().core_pd_sel(); + return !map_local(source_pd, utcb, Tukija::Obj_crd(pt_oom, 0), + Tukija::Obj_crd(_sel_pt_oom(), 0)); +} + + +void Platform_thread::prepare_migration() +{ + using Tukija::Utcb; + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + + /* map exception portals to target pd */ + map_thread_portals(pager(), _sel_exc_base, utcb); + /* re-create pt_oom portal */ + _create_and_map_oom_portal(utcb); +} + + +void Platform_thread::start(void *ip, void *sp) +{ + using namespace Tukija; + + if (!_pager) { + error("pager undefined"); + return; + } + + Pager_object &pager = *_pager; + + if (main_thread() && !vcpu() && (_pd.parent_pt_sel() == Native_thread::INVALID_INDEX)) { + error("protection domain undefined"); + return; + } + + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(_location); + addr_t const source_pd = platform_specific().core_pd_sel(); + + if (!_create_and_map_oom_portal(utcb)) { + error("setup of out-of-memory notification portal - failed"); + return; + } + + if (!main_thread()) { + addr_t const initial_sp = reinterpret_cast(sp); + addr_t const utcb_addr = vcpu() ? 0 : round_page(initial_sp); + + if (_sel_exc_base == Native_thread::INVALID_INDEX) { + error("exception base not specified"); + return; + } + + uint8_t res = syscall_retry(pager, + [&] { + return create_ec(_sel_ec(), _pd.pd_sel(), kernel_cpu_id, + utcb_addr, initial_sp, _sel_exc_base, + !worker()); + }); + + if (res != Tukija::NOVA_OK) { + error("creation of new thread failed ", res); + return; + } + + if (!vcpu()) + res = map_thread_portals(pager, _sel_exc_base, utcb); + + if (res != NOVA_OK) { + revoke(Obj_crd(_sel_ec(), 0)); + error("creation of new thread/vcpu failed ", res); + return; + } + + if (worker()) { + /* local/worker threads do not require a startup portal */ + revoke(Obj_crd(pager.exc_pt_sel_client() + PT_SEL_STARTUP, 0)); + } + + pager.initial_eip((addr_t)ip); + pager.initial_esp(initial_sp); + pager.client_set_ec(_sel_ec()); + return; + } + + if (!vcpu() && _sel_exc_base != Native_thread::INVALID_INDEX) { + error("thread already started"); + return; + } + + addr_t pd_utcb = 0; + + if (!vcpu()) { + _sel_exc_base = 0; + + pd_utcb = stack_area_virtual_base() + stack_virtual_size() - get_page_size(); + + addr_t remap_src[] = { _pd.parent_pt_sel() }; + addr_t remap_dst[] = { PT_SEL_PARENT }; + + /* remap exception portals for first thread */ + for (unsigned i = 0; i < sizeof(remap_dst)/sizeof(remap_dst[0]); i++) { + if (map_local(source_pd, utcb, + Obj_crd(remap_src[i], 0), + Obj_crd(pager.exc_pt_sel_client() + remap_dst[i], 0))) + return; + } + } + + /* create first thread in task */ + enum { THREAD_GLOBAL = true }; + uint8_t res = create_ec(_sel_ec(), _pd.pd_sel(), kernel_cpu_id, + pd_utcb, 0, _sel_exc_base, + THREAD_GLOBAL); + if (res != NOVA_OK) { + error("create_ec returned ", res); + return; + } + + pager.client_set_ec(_sel_ec()); + pager.initial_eip((addr_t)ip); + pager.initial_esp((addr_t)sp); + + if (vcpu()) + _features |= REMOTE_PD; + else + res = map_thread_portals(pager, 0, utcb); + + if (res == NOVA_OK) { + res = syscall_retry(pager, + [&] { + /* let the thread run */ + return create_sc(_sel_sc(), _pd.pd_sel(), _sel_ec(), + Qpd(Qpd::DEFAULT_QUANTUM, _priority)); + }); + } + + if (res != NOVA_OK) { + pager.client_set_ec(Native_thread::INVALID_INDEX); + pager.initial_eip(0); + pager.initial_esp(0); + + error("create_sc returned ", res); + + /* cap_selector free for _sel_ec is done in de-constructor */ + revoke(Obj_crd(_sel_ec(), 0)); + return; + } + + _features |= SC_CREATED; +} + + +void Platform_thread::pause() +{ + if (!_pager) + return; + + _pager->client_recall(true); +} + + +void Platform_thread::resume() +{ + using namespace Tukija; + + if (worker() || sc_created()) { + if (_pager) + _pager->wake_up(); + return; + } + + if (!_pager) { + error("pager undefined - resuming thread failed"); + return; + } + + uint8_t res = syscall_retry(*_pager, + [&] { + return create_sc(_sel_sc(), _pd.pd_sel(), _sel_ec(), + Qpd(Qpd::DEFAULT_QUANTUM, _priority)); + }); + + if (res == NOVA_OK) + _features |= SC_CREATED; + else + error("create_sc failed ", res); +} + + +Thread_state Platform_thread::state() +{ + Thread_state s { }; + if (_pager && _pager->copy_thread_state(&s)) + return s; + + return { .state = Thread_state::State::UNAVAILABLE, .cpu = { } }; +} + + +void Platform_thread::state(Thread_state s) +{ + if (_pager && _pager->copy_thread_state(s)) + /* the new state is transferred to the kernel by the recall handler */ + _pager->client_recall(false); +} + + +void Platform_thread::single_step(bool on) +{ + if (!_pager) return; + + _pager->single_step(on); +} + + +const char * Platform_thread::pd_name() const { return _pd.name(); } + + +Trace::Execution_time Platform_thread::execution_time() const +{ + uint64_t sc_time = 0; + uint64_t ec_time = 0; + + if (!sc_created()) { + /* time executed by EC (on whatever SC) */ + uint8_t res = Tukija::ec_time(_sel_ec(), ec_time); + if (res != Tukija::NOVA_OK) + warning("ec_time failed res=", res); + return { ec_time, sc_time, Tukija::Qpd::DEFAULT_QUANTUM, _priority }; + } + + uint8_t res = Tukija::sc_ec_time(_sel_sc(), _sel_ec(), sc_time, ec_time); + if (res != Tukija::NOVA_OK) + warning("sc_ctrl failed res=", res); + + return { ec_time, sc_time, Tukija::Qpd::DEFAULT_QUANTUM, _priority }; +} + + +void Platform_thread::pager(Pager_object &pager) +{ + _pager = &pager; + _pager->assign_pd(_pd.pd_sel()); +} + + +void Platform_thread::thread_type(Cpu_session::Native_cpu::Thread_type thread_type, + Cpu_session::Native_cpu::Exception_base exception_base) +{ + /* you can do it only once */ + if (_sel_exc_base != Native_thread::INVALID_INDEX) + return; + + if (!main_thread() || (thread_type == Cpu_session::Native_cpu::Thread_type::VCPU)) + _sel_exc_base = exception_base.exception_base; + + if (thread_type == Cpu_session::Native_cpu::Thread_type::LOCAL) + _features |= WORKER; + else if (thread_type == Cpu_session::Native_cpu::Thread_type::VCPU) + _features |= VCPU; +} + + +Platform_thread::Platform_thread(Platform_pd &pd, Rpc_entrypoint &, Ram_allocator &, + Region_map &, size_t, const char *name, + unsigned prio, Affinity::Location affinity, addr_t) +: + _pd(pd), _pager(0), _id_base(cap_map().insert(2)), + _sel_exc_base(Native_thread::INVALID_INDEX), + _location(platform_specific().sanitize(affinity)), + _features(0), + _priority((uint8_t)(scale_priority(prio, name))), + _name(name) +{ + if (!pd.has_any_threads) + _features |= MAIN_THREAD; + + pd.has_any_threads = true; +} + + +Platform_thread::~Platform_thread() +{ + if (_pager) { + /* reset pager and badge used for debug output */ + _pager->reset_badge(); + _pager = 0; + } + + using namespace Tukija; + + /* free ec and sc caps */ + revoke(Obj_crd(_id_base, 2)); + cap_map().remove(_id_base, 2, false); +} diff --git a/repos/base-tukija/src/core/ram_dataspace_support.cc b/repos/base-tukija/src/core/ram_dataspace_support.cc new file mode 100644 index 0000000000..d0df663a6b --- /dev/null +++ b/repos/base-tukija/src/core/ram_dataspace_support.cc @@ -0,0 +1,104 @@ +/* + * \brief Export RAM dataspace as shared memory object + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* core includes */ +#include +#include +#include +#include + +/* NOVA includes */ +#include + +using namespace Core; + + +void Ram_dataspace_factory::_revoke_ram_ds(Dataspace_component &) { } + + +static inline void * alloc_region(Dataspace_component &ds, const size_t size) +{ + /* + * Allocate range in core's virtual address space + * + * Start with trying to use natural alignment. If this does not work, + * successively weaken the alignment constraint until we hit the page size. + */ + void *virt_addr = 0; + size_t align_log2 = log2(ds.size()); + for (; align_log2 >= get_page_size_log2(); align_log2--) { + + platform().region_alloc().alloc_aligned(size, (unsigned)align_log2).with_result( + [&] (void *ptr) { virt_addr = ptr; }, + [&] (Range_allocator::Alloc_error) { /* try next iteration */ } + ); + if (virt_addr) + return virt_addr; + } + + error("alloc_region of size ", size, " unexpectedly failed"); + return nullptr; +} + + +void Ram_dataspace_factory::_clear_ds(Dataspace_component &ds) +{ + size_t const page_rounded_size = align_addr(ds.size(), get_page_size_log2()); + + size_t memset_count = page_rounded_size / 4; + addr_t memset_ptr = ds.core_local_addr(); + + if ((memset_count * 4 == page_rounded_size) && !(memset_ptr & 0x3)) + asm volatile ("rep stosl" : "+D" (memset_ptr), "+c" (memset_count) + : "a" (0) : "memory"); + else + memset(reinterpret_cast(memset_ptr), 0, page_rounded_size); + + /* we don't keep any core-local mapping */ + unmap_local(*reinterpret_cast(Thread::myself()->utcb()), + ds.core_local_addr(), + page_rounded_size >> get_page_size_log2()); + + platform().region_alloc().free((void*)ds.core_local_addr(), + page_rounded_size); + + ds.assign_core_local_addr(nullptr); +} + + +void Ram_dataspace_factory::_export_ram_ds(Dataspace_component &ds) { + + size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2()); + + /* allocate the virtual region contiguous for the dataspace */ + void * const virt_ptr = alloc_region(ds, page_rounded_size); + if (!virt_ptr) + throw Core_virtual_memory_exhausted(); + + /* map it writeable for _clear_ds */ + Tukija::Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + const Tukija::Rights rights_rw(true, true, false); + + if (map_local(platform_specific().core_pd_sel(), utcb, ds.phys_addr(), + reinterpret_cast(virt_ptr), + page_rounded_size >> get_page_size_log2(), rights_rw, true)) { + platform().region_alloc().free(virt_ptr, page_rounded_size); + throw Core_virtual_memory_exhausted(); + } + + /* assign virtual address to the dataspace to be used by clear_ds */ + ds.assign_core_local_addr(virt_ptr); +} diff --git a/repos/base-tukija/src/core/rpc_cap_factory.cc b/repos/base-tukija/src/core/rpc_cap_factory.cc new file mode 100644 index 0000000000..089245170b --- /dev/null +++ b/repos/base-tukija/src/core/rpc_cap_factory.cc @@ -0,0 +1,98 @@ +/* + * \brief RPC capability factory + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include +#include + +/* NOVA includes */ +#include + +using namespace Core; + + +Native_capability Rpc_cap_factory::alloc(Native_capability ep, addr_t entry, addr_t mtd) +{ + addr_t const pt_sel = cap_map().insert(); + addr_t const pd_sel = platform_specific().core_pd_sel(); + addr_t const ec_sel = ep.local_name(); + + using namespace Tukija; + + Mutex::Guard guard(_mutex); + + /* create cap object */ + Cap_object * pt_cap = new (&_slab) Cap_object(pt_sel); + if (!pt_cap) + return Native_capability(); + + _list.insert(pt_cap); + + /* create portal */ + uint8_t const res = create_pt(pt_sel, pd_sel, ec_sel, Mtd(mtd), entry); + if (res == NOVA_OK) + return Capability_space::import(pt_sel); + + error("cap alloc - " + "cap=", Hex(ec_sel), ":", Hex(ep.local_name()), " " + "entry=", Hex(entry), " " + "mtd=", Hex(mtd), " " + "xpt=", Hex(pt_sel), " " + "res=", res); + + _list.remove(pt_cap); + destroy(&_slab, pt_cap); + + /* cleanup unused selectors */ + cap_map().remove(pt_sel, 0, false); + + return Native_capability(); +} + + +void Rpc_cap_factory::free(Native_capability cap) +{ + if (!cap.valid()) return; + + Mutex::Guard guard(_mutex); + + for (Cap_object *obj = _list.first(); obj ; obj = obj->next()) { + if (cap.local_name() == (long)obj->_cap_sel) { + Tukija::revoke(Tukija::Obj_crd(obj->_cap_sel, 0)); + cap_map().remove(obj->_cap_sel, 0, false); + + _list.remove(obj); + destroy(&_slab, obj); + return; + } + } + warning("attempt to free invalid cap object"); +} + + +Rpc_cap_factory::Rpc_cap_factory(Allocator &md_alloc) +: _slab(md_alloc, _initial_sb) { } + + +Rpc_cap_factory::~Rpc_cap_factory() +{ + Mutex::Guard guard(_mutex); + + for (Cap_object *obj; (obj = _list.first()); ) { + Tukija::revoke(Tukija::Obj_crd(obj->_cap_sel, 0)); + cap_map().remove(obj->_cap_sel, 0, false); + + _list.remove(obj); + destroy(&_slab, obj); + } +} diff --git a/repos/base-tukija/src/core/spec/x86_32/pager.cc b/repos/base-tukija/src/core/spec/x86_32/pager.cc new file mode 100644 index 0000000000..176969fdfd --- /dev/null +++ b/repos/base-tukija/src/core/spec/x86_32/pager.cc @@ -0,0 +1,62 @@ +/* + * \brief Copy thread state - x86_32 + * \author Alexander Boettcher + * \date 2012-08-23 + */ + +/* + * Copyright (C) 2012-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include + +/* NOVA includes */ +#include + +using namespace Core; + +void Pager_object::_copy_state_from_utcb(Nova::Utcb const &utcb) +{ + _state.thread.cpu.eax = utcb.ax; + _state.thread.cpu.ecx = utcb.cx; + _state.thread.cpu.edx = utcb.dx; + _state.thread.cpu.ebx = utcb.bx; + + _state.thread.cpu.ebp = utcb.bp; + _state.thread.cpu.esi = utcb.si; + _state.thread.cpu.edi = utcb.di; + + _state.thread.cpu.sp = utcb.sp; + _state.thread.cpu.ip = utcb.ip; + _state.thread.cpu.eflags = utcb.flags; + + _state.thread.state = utcb.qual[0] ? Thread_state::State::EXCEPTION + : Thread_state::State::VALID; +} + + +void Pager_object::_copy_state_to_utcb(Nova::Utcb &utcb) const +{ + utcb.ax = _state.thread.cpu.eax; + utcb.cx = _state.thread.cpu.ecx; + utcb.dx = _state.thread.cpu.edx; + utcb.bx = _state.thread.cpu.ebx; + + utcb.bp = _state.thread.cpu.ebp; + utcb.si = _state.thread.cpu.esi; + utcb.di = _state.thread.cpu.edi; + + utcb.sp = _state.thread.cpu.sp; + utcb.ip = _state.thread.cpu.ip; + utcb.flags = _state.thread.cpu.eflags; + + utcb.mtd = Nova::Mtd::ACDB | + Nova::Mtd::EBSD | + Nova::Mtd::ESP | + Nova::Mtd::EIP | + Nova::Mtd::EFL; +} diff --git a/repos/base-tukija/src/core/spec/x86_64/pager.cc b/repos/base-tukija/src/core/spec/x86_64/pager.cc new file mode 100644 index 0000000000..b253e39794 --- /dev/null +++ b/repos/base-tukija/src/core/spec/x86_64/pager.cc @@ -0,0 +1,81 @@ +/* + * \brief Copy thread state - x86_64 + * \author Alexander Boettcher + * \date 2012-08-23 + */ + +/* + * Copyright (C) 2012-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* core includes */ +#include + +/* NOVA includes */ +#include + +using namespace Core; + +void Pager_object::_copy_state_from_utcb(Tukija::Utcb const &utcb) +{ + _state.thread.cpu.rax = utcb.ax; + _state.thread.cpu.rcx = utcb.cx; + _state.thread.cpu.rdx = utcb.dx; + _state.thread.cpu.rbx = utcb.bx; + + _state.thread.cpu.rbp = utcb.bp; + _state.thread.cpu.rsi = utcb.si; + _state.thread.cpu.rdi = utcb.di; + + _state.thread.cpu.r8 = utcb.r8; + _state.thread.cpu.r9 = utcb.r9; + _state.thread.cpu.r10 = utcb.r10; + _state.thread.cpu.r11 = utcb.r11; + _state.thread.cpu.r12 = utcb.r12; + _state.thread.cpu.r13 = utcb.r13; + _state.thread.cpu.r14 = utcb.r14; + _state.thread.cpu.r15 = utcb.r15; + + _state.thread.cpu.sp = utcb.sp; + _state.thread.cpu.ip = utcb.ip; + _state.thread.cpu.eflags = utcb.flags; + + _state.thread.state = utcb.qual[0] ? Thread_state::State::EXCEPTION + : Thread_state::State::VALID; +} + + +void Pager_object::_copy_state_to_utcb(Tukija::Utcb &utcb) const +{ + utcb.ax = _state.thread.cpu.rax; + utcb.cx = _state.thread.cpu.rcx; + utcb.dx = _state.thread.cpu.rdx; + utcb.bx = _state.thread.cpu.rbx; + + utcb.bp = _state.thread.cpu.rbp; + utcb.si = _state.thread.cpu.rsi; + utcb.di = _state.thread.cpu.rdi; + + utcb.r8 = _state.thread.cpu.r8; + utcb.r9 = _state.thread.cpu.r9; + utcb.r10 = _state.thread.cpu.r10; + utcb.r11 = _state.thread.cpu.r11; + utcb.r12 = _state.thread.cpu.r12; + utcb.r13 = _state.thread.cpu.r13; + utcb.r14 = _state.thread.cpu.r14; + utcb.r15 = _state.thread.cpu.r15; + + utcb.sp = _state.thread.cpu.sp; + utcb.ip = _state.thread.cpu.ip; + utcb.flags = _state.thread.cpu.eflags; + + utcb.mtd = Tukija::Mtd::ACDB | + Tukija::Mtd::EBSD | + Tukija::Mtd::R8_R15 | + Tukija::Mtd::EIP | + Tukija::Mtd::ESP | + Tukija::Mtd::EFL; +} diff --git a/repos/base-tukija/src/core/thread_start.cc b/repos/base-tukija/src/core/thread_start.cc new file mode 100644 index 0000000000..5898bbe6a0 --- /dev/null +++ b/repos/base-tukija/src/core/thread_start.cc @@ -0,0 +1,150 @@ +/* + * \brief NOVA-specific implementation of the Thread API for core + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2010-01-19 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include + +/* core includes */ +#include +#include +#include + +using namespace Core; + + +void Thread::_init_platform_thread(size_t, Type type) +{ + /* + * This function is called for constructing server activations and pager + * objects. It allocates capability selectors for the thread's execution + * context and a synchronization-helper semaphore needed for 'Lock'. + */ + using namespace Tukija; + + if (type == MAIN) + { + /* set EC selector according to NOVA spec */ + native_thread().ec_sel = platform_specific().core_pd_sel() + 1; + + /* + * Exception base of first thread in core is 0. We have to set + * it here so that Thread code finds the semaphore of the + * main thread. + */ + native_thread().exc_pt_sel = 0; + + return; + } + native_thread().ec_sel = cap_map().insert(1); + native_thread().exc_pt_sel = cap_map().insert(NUM_INITIAL_PT_LOG2); + + /* create running semaphore required for locking */ + addr_t rs_sel = native_thread().exc_pt_sel + SM_SEL_EC; + uint8_t res = create_sm(rs_sel, platform_specific().core_pd_sel(), 0); + if (res != NOVA_OK) + error("Thread::_init_platform_thread: create_sm returned ", res); +} + + +void Thread::_deinit_platform_thread() +{ + unmap_local(Tukija::Obj_crd(native_thread().ec_sel, 1)); + unmap_local(Tukija::Obj_crd(native_thread().exc_pt_sel, Tukija::NUM_INITIAL_PT_LOG2)); + + cap_map().remove(native_thread().ec_sel, 1, false); + cap_map().remove(native_thread().exc_pt_sel, Tukija::NUM_INITIAL_PT_LOG2, false); + + /* revoke utcb */ + Tukija::Rights rwx(true, true, true); + addr_t utcb = reinterpret_cast(&_stack->utcb()); + Tukija::revoke(Tukija::Mem_crd(utcb >> 12, 0, rwx)); +} + + +Thread::Start_result Thread::start() +{ + /* + * On NOVA, core almost never starts regular threads. This simply creates a + * local EC + */ + using namespace Tukija; + + addr_t sp = _stack->top(); + Utcb &utcb = *reinterpret_cast(&_stack->utcb()); + + /* create local EC */ + enum { LOCAL_THREAD = false }; + unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(_affinity); + uint8_t res = create_ec(native_thread().ec_sel, + platform_specific().core_pd_sel(), kernel_cpu_id, + (mword_t)&utcb, sp, native_thread().exc_pt_sel, LOCAL_THREAD); + if (res != NOVA_OK) { + error("Thread::start: create_ec returned ", res); + return Start_result::DENIED; + } + + /* default: we don't accept any mappings or translations */ + utcb.crd_rcv = Obj_crd(); + utcb.crd_xlt = Obj_crd(); + + if (map_local(platform_specific().core_pd_sel(), + *reinterpret_cast(Thread::myself()->utcb()), + Obj_crd(PT_SEL_PAGE_FAULT, 0), + Obj_crd(native_thread().exc_pt_sel + PT_SEL_PAGE_FAULT, 0))) { + error("Thread::start: failed to create page-fault portal"); + return Start_result::DENIED; + } + + struct Core_trace_source : public Core::Trace::Source::Info_accessor, + private Core::Trace::Control, + private Core::Trace::Source + { + Thread &thread; + + /** + * Trace::Source::Info_accessor interface + */ + Info trace_source_info() const override + { + uint64_t ec_time = 0; + + uint8_t res = Tukija::ec_time(thread.native_thread().ec_sel, ec_time); + if (res != Tukija::NOVA_OK) + warning("ec_time for core thread failed res=", res); + + return { Session_label("core"), thread.name(), + Trace::Execution_time(ec_time, 0), thread._affinity }; + } + + Core_trace_source(Core::Trace::Source_registry ®istry, Thread &t) + : + Core::Trace::Control(), + Core::Trace::Source(*this, *this), thread(t) + { + registry.insert(this); + } + }; + + new (platform().core_mem_alloc()) + Core_trace_source(Core::Trace::sources(), *this); + + return Start_result::OK; +} diff --git a/repos/base-tukija/src/core/tukija/target.mk b/repos/base-tukija/src/core/tukija/target.mk new file mode 100644 index 0000000000..40243f16c6 --- /dev/null +++ b/repos/base-tukija/src/core/tukija/target.mk @@ -0,0 +1,4 @@ +LIBS := core-tukija +CORE_LIB := core-tukija.a + +include $(BASE_DIR)/src/core/target.inc diff --git a/repos/base-tukija/src/core/vm_session_component.cc b/repos/base-tukija/src/core/vm_session_component.cc new file mode 100644 index 0000000000..b0f68cd48c --- /dev/null +++ b/repos/base-tukija/src/core/vm_session_component.cc @@ -0,0 +1,414 @@ +/* + * \brief Core-specific instance of the VM session interface + * \author Alexander Boettcher + * \author Christian Helmuth + * \date 2018-08-26 + */ + +/* + * Copyright (C) 2018-2021 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include + +/* core includes */ +#include +#include +#include +#include +#include +#include + +#include + +/* NOVA includes */ +#include + +using namespace Core; + + +enum { CAP_RANGE_LOG2 = 2, CAP_RANGE = 1 << CAP_RANGE_LOG2 }; + +static addr_t invalid_sel() { return ~0UL; } + +static Tukija::uint8_t map_async_caps(Tukija::Obj_crd const src, + Tukija::Obj_crd const dst, + addr_t const dst_pd) +{ + using Tukija::Utcb; + + Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); + addr_t const src_pd = platform_specific().core_pd_sel(); + + utcb.set_msg_word(0); + /* ignore return value as one item always fits into the utcb */ + bool const ok = utcb.append_item(src, 0); + (void)ok; + + /* asynchronously map capabilities */ + return Tukija::delegate(src_pd, dst_pd, dst); +} + + +static Tukija::uint8_t kernel_quota_upgrade(addr_t const pd_target) +{ + return Pager_object::handle_oom(Pager_object::SRC_CORE_PD, pd_target, + "core", "ep", + Pager_object::Policy::UPGRADE_CORE_TO_DST); +} + + +static uint8_t _with_kernel_quota_upgrade(addr_t const pd_target, + auto const &fn) +{ + uint8_t res; + do { + res = fn(); + } while (res == Tukija::NOVA_PD_OOM && + Tukija::NOVA_OK == kernel_quota_upgrade(pd_target)); + return res; +} + + +/******************************** + ** Vm_session_component::Vcpu ** + ********************************/ + +Core::Trace::Source::Info Vm_session_component::Vcpu::trace_source_info() const +{ + uint64_t ec_time = 0; + uint64_t sc_time = 0; + + uint8_t res = Tukija::sc_ec_time(sc_sel(), ec_sel(), sc_time, ec_time); + if (res != Tukija::NOVA_OK) + warning("vCPU sc_ec_time failed res=", res); + + return { _label, String<5>("vCPU"), + Trace::Execution_time(ec_time, sc_time, + Tukija::Qpd::DEFAULT_QUANTUM, _priority), + _location }; +} + + +void Vm_session_component::Vcpu::startup() +{ + /* initialize SC on first call - do nothing on subsequent calls */ + if (_alive) return; + + uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] { + return Tukija::create_sc(sc_sel(), _pd_sel, ec_sel(), + Tukija::Qpd(Tukija::Qpd::DEFAULT_QUANTUM, _priority)); + }); + + if (res == Tukija::NOVA_OK) + _alive = true; + else + error("create_sc=", res); +} + + +void Vm_session_component::Vcpu::exit_handler(unsigned const exit, + Signal_context_capability const cap) +{ + if (!cap.valid()) + return; + + if (exit >= Tukija::NUM_INITIAL_VCPU_PT) + return; + + /* map handler into vCPU-specific range of VM protection domain */ + addr_t const pt = Tukija::NUM_INITIAL_VCPU_PT * _id + exit; + + uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] { + Tukija::Obj_crd const src(cap.local_name(), 0); + Tukija::Obj_crd const dst(pt, 0); + + return map_async_caps(src, dst, _pd_sel); + }); + + if (res != Tukija::NOVA_OK) + error("map pt ", res, " failed"); +} + + +Vm_session_component::Vcpu::Vcpu(Rpc_entrypoint &ep, + Constrained_ram_allocator &ram_alloc, + Cap_quota_guard &cap_alloc, + unsigned const id, + unsigned const kernel_id, + Affinity::Location const location, + unsigned const priority, + Session_label const &label, + addr_t const pd_sel, + addr_t const core_pd_sel, + addr_t const vmm_pd_sel, + Trace::Control_area &trace_control_area, + Trace::Source_registry &trace_sources) +: + _ep(ep), + _ram_alloc(ram_alloc), + _cap_alloc(cap_alloc), + _trace_sources(trace_sources), + _sel_sm_ec_sc(invalid_sel()), + _id(id), + _location(location), + _priority(priority), + _label(label), + _pd_sel(pd_sel), + _trace_control_slot(trace_control_area) +{ + /* account caps required to setup vCPU */ + Cap_quota_guard::Reservation caps(_cap_alloc, Cap_quota{CAP_RANGE}); + + /* now try to allocate cap indexes */ + _sel_sm_ec_sc = cap_map().insert(CAP_RANGE_LOG2); + if (_sel_sm_ec_sc == invalid_sel()) { + error("out of caps in core"); + throw Creation_failed(); + } + + /* setup resources */ + uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] { + return Tukija::create_sm(sm_sel(), core_pd_sel, 0); + }); + + if (res != Tukija::NOVA_OK) { + cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2); + error("create_sm = ", res); + throw Creation_failed(); + } + + addr_t const event_base = (1U << Tukija::NUM_INITIAL_VCPU_PT_LOG2) * id; + enum { THREAD_GLOBAL = true, NO_UTCB = 0, NO_STACK = 0 }; + res = _with_kernel_quota_upgrade(_pd_sel, [&] { + return Tukija::create_ec(ec_sel(), _pd_sel, kernel_id, + NO_UTCB, NO_STACK, event_base, THREAD_GLOBAL); + }); + + if (res != Tukija::NOVA_OK) { + cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2); + error("create_ec = ", res); + throw Creation_failed(); + } + + addr_t const dst_sm_ec_sel = Tukija::NUM_INITIAL_PT_RESERVED + _id*CAP_RANGE; + + res = _with_kernel_quota_upgrade(vmm_pd_sel, [&] { + using namespace Tukija; + + enum { CAP_LOG2_COUNT = 1 }; + int permission = Obj_crd::RIGHT_EC_RECALL | Obj_crd::RIGHT_SM_UP | + Obj_crd::RIGHT_SM_DOWN; + Obj_crd const src(sm_sel(), CAP_LOG2_COUNT, permission); + Obj_crd const dst(dst_sm_ec_sel, CAP_LOG2_COUNT); + + return map_async_caps(src, dst, vmm_pd_sel); + }); + + if (res != Tukija::NOVA_OK) { + cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2); + error("map sm ", res, " ", _id); + throw Creation_failed(); + } + + _ep.manage(this); + + _trace_sources.insert(&_trace_source); + + caps.acknowledge(); +} + + +Vm_session_component::Vcpu::~Vcpu() +{ + _ep.dissolve(this); + + _trace_sources.remove(&_trace_source); + + if (_sel_sm_ec_sc != invalid_sel()) { + _cap_alloc.replenish(Cap_quota{CAP_RANGE}); + cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2); + } +} + +/************************** + ** Vm_session_component ** + **************************/ + +void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc, + addr_t const guest_phys, + Attach_attr const attribute) +{ + using Tukija::Utcb; + Utcb & utcb = *reinterpret_cast(Thread::myself()->utcb()); + addr_t const src_pd = platform_specific().core_pd_sel(); + + Flexpage_iterator flex(dsc.phys_addr() + attribute.offset, attribute.size, + guest_phys, attribute.size, guest_phys); + + Flexpage page = flex.page(); + while (page.valid()) { + Tukija::Rights const map_rights (true, + dsc.writeable() && attribute.writeable, + attribute.executable); + Tukija::Mem_crd const mem(page.addr >> 12, page.log2_order - 12, + map_rights); + + utcb.set_msg_word(0); + /* ignore return value as one item always fits into the utcb */ + bool const ok = utcb.append_item(mem, 0, true, true); + (void)ok; + + /* receive window in destination pd */ + Tukija::Mem_crd crd_mem(page.hotspot >> 12, page.log2_order - 12, + map_rights); + + /* asynchronously map memory */ + uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] { + return Tukija::delegate(src_pd, _pd_sel, crd_mem); }); + + if (res != Tukija::NOVA_OK) + error("could not map VM memory ", res); + + page = flex.page(); + } +} + +void Vm_session_component::_detach_vm_memory(addr_t guest_phys, size_t size) +{ + Tukija::Rights const revoke_rwx(true, true, true); + + Flexpage_iterator flex(guest_phys, size, guest_phys, size, 0); + Flexpage page = flex.page(); + + while (page.valid()) { + Tukija::Mem_crd mem(page.addr >> 12, page.log2_order - 12, revoke_rwx); + Tukija::revoke(mem, true, true, _pd_sel); + + page = flex.page(); + } +} + + +Capability Vm_session_component::create_vcpu(Thread_capability cap) +{ + if (!cap.valid()) return { }; + + /* lookup vmm pd and cpu location of handler thread in VMM */ + addr_t kernel_cpu_id = 0; + Affinity::Location vcpu_location; + + auto lambda = [&] (Cpu_thread_component *ptr) { + if (!ptr) + return invalid_sel(); + + Cpu_thread_component &thread = *ptr; + + vcpu_location = thread.platform_thread().affinity(); + kernel_cpu_id = platform_specific().kernel_cpu_id(thread.platform_thread().affinity()); + + return thread.platform_thread().pager().pd_sel(); + }; + addr_t const vmm_pd_sel = _ep.apply(cap, lambda); + + /* if VMM pd lookup failed then deny to create vCPU */ + if (!vmm_pd_sel || vmm_pd_sel == invalid_sel()) + return { }; + + /* XXX this is a quite limited ID allocator... */ + unsigned const vcpu_id = _next_vcpu_id; + + try { + Vcpu &vcpu = + *new (_heap) Registered(_vcpus, + _ep, + _constrained_md_ram_alloc, + _cap_quota_guard(), + vcpu_id, + (unsigned)kernel_cpu_id, + vcpu_location, + _priority, + _session_label, + _pd_sel, + platform_specific().core_pd_sel(), + vmm_pd_sel, + _trace_control_area, + _trace_sources); + ++_next_vcpu_id; + return vcpu.cap(); + + } catch (Vcpu::Creation_failed&) { + return { }; + } +} + + +Vm_session_component::Vm_session_component(Rpc_entrypoint &ep, + Resources resources, + Label const &label, + Diag, + Ram_allocator &ram, + Region_map &local_rm, + unsigned const priority, + Trace::Source_registry &trace_sources) +: + Ram_quota_guard(resources.ram_quota), + Cap_quota_guard(resources.cap_quota), + _ep(ep), + _trace_control_area(ram, local_rm), _trace_sources(trace_sources), + _constrained_md_ram_alloc(ram, _ram_quota_guard(), _cap_quota_guard()), + _heap(_constrained_md_ram_alloc, local_rm), + _priority(scale_priority(priority, "VM session")), + _session_label(label) +{ + _cap_quota_guard().withdraw(Cap_quota{1}); + + _pd_sel = cap_map().insert(); + if (!_pd_sel || _pd_sel == invalid_sel()) + throw Service_denied(); + + addr_t const core_pd = platform_specific().core_pd_sel(); + enum { KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE = 2, UPPER_LIMIT_PAGES = 32 }; + uint8_t res = Tukija::create_pd(_pd_sel, core_pd, Tukija::Obj_crd(), + KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE, + UPPER_LIMIT_PAGES); + if (res != Tukija::NOVA_OK) { + error("create_pd = ", res); + cap_map().remove(_pd_sel, 0, true); + throw Service_denied(); + } + + /* + * Configure managed VM area. The two ranges work around the size + * limitation to ULONG_MAX. + */ + _map.add_range(0, 0UL - 0x1000); + _map.add_range(0UL - 0x1000, 0x1000); +} + + +Vm_session_component::~Vm_session_component() +{ + _vcpus.for_each([&] (Vcpu &vcpu) { + destroy(_heap, &vcpu); }); + + /* detach all regions */ + while (true) { + addr_t out_addr = 0; + + if (!_map.any_block_addr(&out_addr)) + break; + + detach_at(out_addr); + } + + if (_pd_sel && _pd_sel != invalid_sel()) + cap_map().remove(_pd_sel, 0, true); +} diff --git a/repos/base-tukija/src/include/base/internal/ipc.h b/repos/base-tukija/src/include/base/internal/ipc.h new file mode 100644 index 0000000000..fdde6569d1 --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/ipc.h @@ -0,0 +1,125 @@ +/* + * \brief IPC utility functions + * \author Norman Feske + * \date 2016-03-08 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__IPC_H_ +#define _INCLUDE__BASE__INTERNAL__IPC_H_ + +/* NOVA includes */ +#include +#include +#include + +/** + * Copy message registers from UTCB to destination message buffer + * + * \return protocol word delivered via the first UTCB message register + * + * The caller of this function must ensure that utcb.msg_words is greater + * than 0. + */ +static inline Tukija::mword_t copy_utcb_to_msgbuf(Tukija::Utcb &utcb, + Genode::Receive_window &rcv_window, + Genode::Msgbuf_base &rcv_msg) +{ + using namespace Genode; + using namespace Tukija; + + size_t num_msg_words = utcb.msg_words(); + + /* + * Handle the reception of a malformed message. This should never happen + * because the utcb.msg_words is checked by the caller of this function. + */ + if (num_msg_words < 1) + return 0; + + /* the UTCB contains the protocol word followed by the message data */ + mword_t const protocol_word = utcb.msg()[0]; + size_t num_data_words = num_msg_words - 1; + + if (num_data_words*sizeof(mword_t) > rcv_msg.capacity()) { + error("receive message buffer too small msg " + "size=", num_data_words*sizeof(mword_t), " " + "buf size=", rcv_msg.capacity()); + num_data_words = rcv_msg.capacity()/sizeof(mword_t); + } + + /* read message payload into destination message buffer */ + mword_t *src = (mword_t *)(void *)(&utcb.msg()[1]); + mword_t *dst = (mword_t *)rcv_msg.data(); + for (unsigned i = 0; i < num_data_words; i++) + *dst++ = *src++; + + /* extract caps from UTCB */ + for (unsigned i = 0; i < rcv_window.num_received_caps(); i++) { + Native_capability cap; + rcv_window.rcv_pt_sel(cap); + rcv_msg.insert(cap); + } + + return protocol_word; +} + + +/** + * Copy message payload to UTCB message registers + */ +static inline bool copy_msgbuf_to_utcb(Tukija::Utcb &utcb, + Genode::Msgbuf_base const &snd_msg, + Tukija::mword_t protocol_value) +{ + using namespace Genode; + using namespace Tukija; + + /* look up address and size of message payload */ + mword_t *msg_buf = (mword_t *)snd_msg.data(); + + /* size of message payload in machine words */ + size_t const num_data_words = snd_msg.data_size()/sizeof(mword_t); + + /* account for protocol value in front of the message */ + size_t num_msg_words = 1 + num_data_words; + + enum { NUM_MSG_REGS = 256 }; + if (num_msg_words > NUM_MSG_REGS) { + error("message does not fit into UTCB message registers"); + num_msg_words = NUM_MSG_REGS; + } + + utcb.msg()[0] = protocol_value; + + /* store message into UTCB message registers */ + mword_t *src = (mword_t *)&msg_buf[0]; + mword_t *dst = (mword_t *)(void *)&utcb.msg()[1]; + for (unsigned i = 0; i < num_data_words; i++) + *dst++ = *src++; + + utcb.set_msg_word((unsigned)num_msg_words); + + /* append portal capability selectors */ + for (unsigned i = 0; i < snd_msg.used_caps(); i++) { + + Native_capability const &cap = snd_msg.cap(i); + Tukija::Crd const crd = Capability_space::crd(cap); + + if (crd.base() == ~0UL) continue; + + if (!utcb.append_item(crd, i, false, false, true)) + return false; + } + + return true; +} + + +#endif /* _INCLUDE__BASE__INTERNAL__IPC_H_ */ diff --git a/repos/base-tukija/src/include/base/internal/lock_helper.h b/repos/base-tukija/src/include/base/internal/lock_helper.h new file mode 100644 index 0000000000..937729c3bc --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/lock_helper.h @@ -0,0 +1,65 @@ +/* + * \brief Helper functions for the Lock implementation + * \author Norman Feske + * \author Alexander Boettcher + * \date 2009-10-02 + * + * For documentation about the interface, please revisit the 'base-pistachio' + * implementation. + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ +#define _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ + +/* Genode includes */ +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include +#include + + +extern int main_thread_running_semaphore(); + + +static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base) +{ + Genode::addr_t sem = thread_base ? + thread_base->native_thread().exc_pt_sel + Tukija::SM_SEL_EC : + main_thread_running_semaphore(); + + Tukija::sm_ctrl(sem, Tukija::SEMAPHORE_UP); + return true; +} + + +static inline void thread_switch_to(Genode::Thread *) { } + + +static inline void thread_stop_myself(Genode::Thread *myself) +{ + using namespace Genode; + using namespace Tukija; + + addr_t sem; + if (myself) + sem = myself->native_thread().exc_pt_sel + SM_SEL_EC; + else + sem = main_thread_running_semaphore(); + + if (sm_ctrl(sem, SEMAPHORE_DOWNZERO)) + nova_die(); +} + +#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */ diff --git a/repos/base-tukija/src/include/base/internal/native_thread.h b/repos/base-tukija/src/include/base/internal/native_thread.h new file mode 100644 index 0000000000..8ce3a62584 --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/native_thread.h @@ -0,0 +1,14 @@ +/* + * \brief Kernel-specific thread meta data + * \author Norman Feske + * \date 2016-03-11 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#include diff --git a/repos/base-tukija/src/include/base/internal/native_utcb.h b/repos/base-tukija/src/include/base/internal/native_utcb.h new file mode 100644 index 0000000000..2f309b65ef --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/native_utcb.h @@ -0,0 +1,46 @@ +/* + * \brief UTCB definition + * \author Norman Feske + * \date 2016-03-08 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__NATIVE_UTCB_H_ +#define _INCLUDE__BASE__INTERNAL__NATIVE_UTCB_H_ + +#include + +namespace Genode { struct Native_utcb; } + +class Genode::Native_utcb +{ + private: + + /** + * Size of the NOVA-specific user-level thread-control + * block + */ + enum { UTCB_SIZE = 4096 }; + + /** + * User-level thread control block + * + * The UTCB is one 4K page, shared between the kernel + * and the user process. It is not backed by a + * dataspace but provided by the kernel. + */ + addr_t _utcb[UTCB_SIZE/sizeof(addr_t)]; + + public: + + Native_utcb() { } +}; + +#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_UTCB_H_ */ + diff --git a/repos/base-tukija/src/include/base/internal/parent_cap.h b/repos/base-tukija/src/include/base/internal/parent_cap.h new file mode 100644 index 0000000000..8cd48cd96b --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/parent_cap.h @@ -0,0 +1,37 @@ +/* + * \brief Interface to obtain the parent capability for the component + * \author Norman Feske + * \author Alexander Boettcher + * \date 2010-01-26 + * + * The parent portal is, by convention, capability selector 'PT_CAP_PARENT' + * supplied with the initial portals when the PD is created. + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__PARENT_CAP_H_ +#define _INCLUDE__BASE__INTERNAL__PARENT_CAP_H_ + +/* Genode includes */ +#include + +/* NOVA includes */ +#include + + +namespace Genode { + + static inline Parent_capability parent_cap() + { + return reinterpret_cap_cast( + Capability_space::import(Tukija::PT_SEL_PARENT)); + } +} + +#endif /* _INCLUDE__BASE__INTERNAL__PARENT_CAP_H_ */ diff --git a/repos/base-tukija/src/include/base/internal/raw_write_string.h b/repos/base-tukija/src/include/base/internal/raw_write_string.h new file mode 100644 index 0000000000..431937617d --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/raw_write_string.h @@ -0,0 +1,19 @@ +/* + * \brief Kernel-specific raw-output back end + * \author Norman Feske + * \date 2016-03-08 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__RAW_WRITE_STRING_H_ +#define _INCLUDE__BASE__INTERNAL__RAW_WRITE_STRING_H_ + +namespace Genode { void raw_write_string(char const *) { } } + +#endif /* _INCLUDE__BASE__INTERNAL__RAW_WRITE_STRING_H_ */ diff --git a/repos/base-tukija/src/include/base/internal/spin_lock.h b/repos/base-tukija/src/include/base/internal/spin_lock.h new file mode 100644 index 0000000000..115ec95fd6 --- /dev/null +++ b/repos/base-tukija/src/include/base/internal/spin_lock.h @@ -0,0 +1,105 @@ +/* + * \brief Nova specific user land "Spin lock" implementation + * \author Alexander Boettcher + * \date 2014-02-07 + */ + +/* + * Copyright (C) 2014-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__BASE__INTERNAL__SPIN_LOCK_H_ +#define _INCLUDE__BASE__INTERNAL__SPIN_LOCK_H_ + +/* Genode includes */ +#include +#include +#include + +/* base-internal includes */ +#include + + +enum State { + SPINLOCK_LOCKED = 0, SPINLOCK_UNLOCKED = 1, SPINLOCK_CONTENDED = 2, +}; + +enum { RESERVED_BITS = 12, COUNTER_MASK = 0xFFC }; + +template +static inline void spinlock_lock(volatile T *lock_variable) +{ + using Genode::cmpxchg; + + Genode::Thread * myself = Genode::Thread::myself(); + T const tid = (T)(myself ? myself->native_thread().ec_sel + : (Genode::addr_t)Tukija::EC_SEL_THREAD); + + unsigned help_counter = 0; + + /* sanity check that ec_sel fits into the lock_variable */ + if (tid >= (1 << (sizeof(*lock_variable) * 8 - RESERVED_BITS))) + nova_die(); + + if (myself) { + Tukija::Utcb * utcb = reinterpret_cast(myself->utcb()); + help_counter = utcb->tls & COUNTER_MASK; + } + + /* try to get lock */ + do { + T raw = *lock_variable; + + if (raw != SPINLOCK_UNLOCKED) { + if (!(raw & SPINLOCK_CONTENDED)) + /* if it fails - just re-read and retry */ + if (!Genode::cmpxchg(lock_variable, raw, raw | SPINLOCK_CONTENDED)) + continue; + + /* + * Donate remaining time slice to help the spinlock holder to + * pass the critical section. + */ + unsigned long const ec = raw >> RESERVED_BITS; + unsigned long const tls = raw & COUNTER_MASK; + Tukija::ec_ctrl(Tukija::EC_DONATE_SC, ec, tls); + continue; + } + } while (!cmpxchg(lock_variable, (T)SPINLOCK_UNLOCKED, + (tid << RESERVED_BITS) | help_counter | SPINLOCK_LOCKED)); +} + + +template +static inline void spinlock_unlock(volatile T *lock_variable) +{ + using Tukija::Utcb; + + Genode::Thread * myself = Genode::Thread::myself(); + Utcb * utcb = myself ? reinterpret_cast(myself->utcb()) : 0; + + /* unlock */ + T old; + do { + old = *lock_variable; + } while (!Genode::cmpxchg(lock_variable, old, (T)SPINLOCK_UNLOCKED)); + + /* de-flag time donation help request and set new counter */ + if (utcb) { + utcb->tls = (((utcb->tls & COUNTER_MASK) + 4) % 4096) & COUNTER_MASK; + /* take care that compiler generates code that writes tls to memory */ + Genode::memory_barrier(); + } + + /* + * If anybody donated time, request kernel for a re-schedule in order that + * the helper can get its time donation (SC) back. + */ + if (old & SPINLOCK_CONTENDED) + Tukija::ec_ctrl(Tukija::EC_RESCHEDULE); +} + +#endif /* _INCLUDE__BASE__INTERNAL__SPIN_LOCK_H_ */ diff --git a/repos/base-tukija/src/include/nova_native_vcpu/nova_native_vcpu.h b/repos/base-tukija/src/include/nova_native_vcpu/nova_native_vcpu.h new file mode 100644 index 0000000000..038c763832 --- /dev/null +++ b/repos/base-tukija/src/include/nova_native_vcpu/nova_native_vcpu.h @@ -0,0 +1,29 @@ + /* + * \brief NOVA vCPU RPC interface + * \author Christian Helmuth + * \author Alexander Böttcher + * \date 2021-01-19 + */ + +/* + * Copyright (C) 2021 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_ +#define _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_ + +#include +#include + +struct Genode::Vm_session::Native_vcpu : Interface +{ + GENODE_RPC(Rpc_startup, void, startup); + GENODE_RPC(Rpc_exit_handler, void, exit_handler, unsigned, Signal_context_capability); + + GENODE_RPC_INTERFACE(Rpc_startup, Rpc_exit_handler); +}; + +#endif /* _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_ */ diff --git a/repos/base-tukija/src/include/signal_source/client.h b/repos/base-tukija/src/include/signal_source/client.h new file mode 100644 index 0000000000..75346d32de --- /dev/null +++ b/repos/base-tukija/src/include/signal_source/client.h @@ -0,0 +1,104 @@ +/* + * \brief NOVA-specific signal-source client interface + * \author Norman Feske + * \date 2010-02-03 + * + * On NOVA, the signal source server does not provide a blocking + * 'wait_for_signal' function because this kernel does no support + * out-of-order IPC replies. Instead, we use a shared semaphore + * to let the client block until a signal is present at the + * server. The shared semaphore gets initialized with the first + * call of 'wait_for_signal()'. + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__SIGNAL_SOURCE__CLIENT_H_ +#define _INCLUDE__SIGNAL_SOURCE__CLIENT_H_ + +/* Genode includes */ +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include +#include +#include + +namespace Genode { + + class Signal_source_client : public Rpc_client + { + private: + + /** + * Capability referring to a NOVA semaphore + */ + Native_capability _sem { }; + + public: + + /** + * Constructor + */ + Signal_source_client(Cpu_session &, Capability cap) + : Rpc_client(static_cap_cast(cap)) + { + /* request mapping of semaphore capability selector */ + Thread * myself = Thread::myself(); + auto const &exc_base = myself->native_thread().exc_pt_sel; + request_signal_sm_cap(exc_base + Tukija::PT_SEL_PAGE_FAULT, + exc_base + Tukija::SM_SEL_SIGNAL); + _sem = Capability_space::import(exc_base + Tukija::SM_SEL_SIGNAL); + call(_sem); + } + + ~Signal_source_client() + { + Tukija::revoke(Tukija::Obj_crd(_sem.local_name(), 0)); + } + + + /***************************** + ** Signal source interface ** + *****************************/ + + Signal wait_for_signal() override + { + using namespace Tukija; + + mword_t imprint, count; + do { + + /* + * We set an invalid imprint (0) to detect a spurious + * unblock. In this case, NOVA does not block + * SEMAPHORE_DOWN nor touch our input values if the + * deblocking (chained) semaphore was dequeued before we + * intend to block. + */ + imprint = 0; + count = 0; + + /* block on semaphore until signal context was submitted */ + if (uint8_t res = si_ctrl(_sem.local_name(), SEMAPHORE_DOWN, + imprint, count)) + warning("signal reception failed - error ", res); + + } while (imprint == 0); + + return Signal(imprint, (int)count); + } + }; +} + +#endif /* _INCLUDE__SIGNAL_SOURCE__CLIENT_H_ */ diff --git a/repos/base-tukija/src/include/signal_source/nova_signal_source.h b/repos/base-tukija/src/include/signal_source/nova_signal_source.h new file mode 100644 index 0000000000..2c7b98f412 --- /dev/null +++ b/repos/base-tukija/src/include/signal_source/nova_signal_source.h @@ -0,0 +1,30 @@ +/* + * \brief NOVA-specific signal source RPC interface + * \author Norman Feske + * \date 2011-04-12 + */ + +/* + * Copyright (C) 2011-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#ifndef _INCLUDE__SIGNAL_SOURCE__NOVA_SIGNAL_SOURCE_H_ +#define _INCLUDE__SIGNAL_SOURCE__NOVA_SIGNAL_SOURCE_H_ + +#include +#include + +namespace Genode { struct Nova_signal_source; } + +struct Genode::Nova_signal_source : Signal_source +{ + GENODE_RPC(Rpc_register_semaphore, void, register_semaphore, + Native_capability const &); + + GENODE_RPC_INTERFACE_INHERIT(Signal_source, Rpc_register_semaphore); +}; + +#endif /* _INCLUDE__SIGNAL_SOURCE__NOVA_SIGNAL_SOURCE_H_ */ diff --git a/repos/base-tukija/src/kernel/tukija/target.mk b/repos/base-tukija/src/kernel/tukija/target.mk new file mode 100644 index 0000000000..20fed78201 --- /dev/null +++ b/repos/base-tukija/src/kernel/tukija/target.mk @@ -0,0 +1,62 @@ +include $(call select_from_repositories,mk/spec/tukija.mk) + +TARGET = hypervisor +REQUIRES = x86 tukija +NOVA_BUILD_DIR = $(BUILD_BASE_DIR)/kernel +NOVA_SRC_DIR := $(call select_from_ports,tukija)/src/kernel/tukija +SRC_CC = $(sort $(notdir $(wildcard $(NOVA_SRC_DIR)/src/*.cpp))) +SRC_S = $(sort $(notdir $(wildcard $(NOVA_SRC_DIR)/src/*.S))) +INC_DIR = $(NOVA_SRC_DIR)/include +override CC_OLEVEL := -Os +CC_WARN = -Wall -Wextra -Waggregate-return -Wcast-align -Wcast-qual \ + -Wconversion -Wdisabled-optimization -Wformat=2 \ + -Wmissing-format-attribute -Wmissing-noreturn -Wpacked \ + -Wpointer-arith -Wredundant-decls -Wshadow -Wwrite-strings \ + -Wctor-dtor-privacy -Wno-non-virtual-dtor \ + -Wold-style-cast -Woverloaded-virtual -Wsign-promo \ + -Wlogical-op -Wstrict-null-sentinel \ + -Wstrict-overflow=5 -Wvolatile-register-var + +# XXX fix the warnings and remove this option +CC_WARN += -Wno-error=implicit-fallthrough + +CC_OPT += -pipe \ + -fdata-sections -fomit-frame-pointer -freg-struct-return \ + -freorder-blocks -funit-at-a-time -fno-exceptions -fno-rtti \ + -fno-stack-protector -fvisibility-inlines-hidden \ + -fno-asynchronous-unwind-tables -std=gnu++0x -mgeneral-regs-only +# kernel memory: 28M minimum dynamic or 10 pro mill of the system memory +CC_OPT += -DCONFIG_MEMORY_DYN_MIN=0x1c00000 \ + -DCONFIG_MEMORY_DYN_PER_MILL=10 +CC_OPT_PIC := +ifeq ($(filter-out $(SPECS),32bit),) +override CC_MARCH = -m32 +CC_OPT += -mpreferred-stack-boundary=2 -mregparm=3 +else +ifeq ($(filter-out $(SPECS),64bit),) +override CC_MARCH = -m64 +CC_OPT += -mpreferred-stack-boundary=4 -mcmodel=kernel -mno-red-zone +else +$(error Unsupported environment) +endif +endif + +# disable -Wsuggest-override +CC_CXX_WARN_STRICT = -Wextra -Weffc++ -Werror + +git_version = $(shell cd $(NOVA_SRC_DIR) && (git rev-parse HEAD 2>/dev/null || echo 0) | cut -c1-7) +CXX_LINK_OPT = -Wl,--gc-sections -Wl,--warn-common -Wl,-static -Wl,-n -Wl,--defsym=GIT_VER=0x$(call git_version) \ + -Wl,--no-warn-rwx-segments +LD_TEXT_ADDR = # 0xc000000000 - when setting this 64bit compile fails because of relocation issues!! +LD_SCRIPT_STATIC = hypervisor.o + +$(TARGET): hypervisor.o + +hypervisor.o: $(NOVA_SRC_DIR)/src/hypervisor.ld target.mk + $(VERBOSE)$(CC) $(INCLUDES) -DCONFIG_MEMORY_BOOT=8M -MP -MMD -pipe $(CC_MARCH) -xc -E -P $< -o $@ + +clean cleanall: + $(VERBOSE)rm -rf $(NOVA_BUILD_DIR) + +vpath %.cpp $(NOVA_SRC_DIR)/src +vpath %.S $(NOVA_SRC_DIR)/src diff --git a/repos/base-tukija/src/lib/base/cap_map.cc b/repos/base-tukija/src/lib/base/cap_map.cc new file mode 100644 index 0000000000..25f43f7476 --- /dev/null +++ b/repos/base-tukija/src/lib/base/cap_map.cc @@ -0,0 +1,191 @@ +/* + * \brief Mapping of Genode's capability names to kernel capabilities. + * \author Alexander Boettcher + * \date 2013-08-26 + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* NOVA includes */ +#include +#include + +using namespace Genode; + + +Capability_map &Genode::cap_map() +{ + static Genode::Capability_map map; + return map; +} + + +/*********************** + ** Cap_index class ** + ***********************/ + + +Cap_range *Cap_range::find_by_id(addr_t id) +{ + if (_match(id)) return this; + + Cap_range *obj = this->child(id > _base); + return obj ? obj->find_by_id(id) : 0; +} + + +void Cap_range::inc(unsigned id) +{ + bool failure = false; + { + Mutex::Guard guard(_mutex); + + if (_cap_array[id] + 1 == 0) + failure = true; + else + _cap_array[id]++; + } + + if (failure) + error("cap reference counting error - reference overflow of cap=", _base + id); +} + + +void Cap_range::dec(unsigned const id_start, bool revoke, unsigned num_log_2) +{ + bool failure = false; + { + unsigned const end = min(id_start + (1U << num_log_2), elements()); + + Mutex::Guard guard(_mutex); + + for (unsigned id = id_start; id < end; id++) { + if (_cap_array[id] == 0) { + failure = true; + continue; + } + + if (revoke && _cap_array[id] == 1) + Tukija::drop(Tukija::Obj_crd(_base + id, 0)); + + _cap_array[id]--; + } + } + + if (failure) + error("cap reference counting error - one counter of cap ", + "range ", _base + id_start, "+", 1 << num_log_2, " " + "has been already zero"); +} + + +addr_t Cap_range::alloc(size_t const num_log2) +{ + addr_t const step = 1UL << num_log2; + + { + Mutex::Guard guard(_mutex); + + unsigned max = elements(); + addr_t last = _last; + + do { + + /* align i to num_log2 */ + unsigned i = (unsigned)(((_base + last + step - 1) & ~(step - 1)) - _base); + unsigned j; + for (; i + step < max; i += (unsigned)step) { + for (j = 0; j < step; j++) + if (_cap_array[i+j]) + break; + if (j < step) + continue; + + for (j = 0; j < step; j++) + _cap_array[i+j] = 1; + + _last = i; + return _base + i; + } + + max = (unsigned)last; + last = 0; + + } while (max); + } + + Cap_range *child = this->child(LEFT); + if (child) { + addr_t res = child->alloc(num_log2); + if (res != ~0UL) + return res; + } + child = this->child(RIGHT); + if (child) { + addr_t res = child->alloc(num_log2); + return res; + } + + return ~0UL; +} + + +/**************************** + ** Capability_map class ** + ****************************/ + + +Cap_index Capability_map::find(Genode::addr_t id) { + return Cap_index(_tree.first() ? _tree.first()->find_by_id(id) : 0, id); } + + +addr_t Capability_map::insert(size_t const num_log_2, addr_t const sel) +{ + if (sel == ~0UL) + return _tree.first() ? _tree.first()->alloc(num_log_2) : ~0UL; + + Cap_range * range = _tree.first() ? _tree.first()->find_by_id(sel) : 0; + if (!range) + return ~0UL; + + for (unsigned i = 0; i < 1UL << num_log_2; i++) + range->inc((unsigned)(sel + i - range->base())); + + return sel; +} + + +void Capability_map::remove(Genode::addr_t const sel, uint8_t num_log_2, + bool revoke) +{ + Cap_range * range = _tree.first() ? _tree.first()->find_by_id(sel) : 0; + if (!range) + return; + + range->dec((unsigned)(sel - range->base()), revoke, num_log_2); + + Genode::addr_t last_sel = sel + (1UL << num_log_2); + Genode::addr_t last_range = range->base() + range->elements(); + + while (last_sel > last_range) { + uint8_t left_log2 = (uint8_t)log2(last_sel - last_range); + + /* take care for a case which should not happen */ + if (left_log2 >= sizeof(last_range)*8) { + error("cap remove error"); + return; + } + + remove(last_range, left_log2, revoke); + + last_range += 1UL << left_log2; + } +} diff --git a/repos/base-tukija/src/lib/base/capability.cc b/repos/base-tukija/src/lib/base/capability.cc new file mode 100644 index 0000000000..9cf4b8df6e --- /dev/null +++ b/repos/base-tukija/src/lib/base/capability.cc @@ -0,0 +1,77 @@ +/* + * \brief Capability lifetime management + * \author Norman Feske + * \date 2015-05-06 + */ + +/* + * Copyright (C) 2015-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include +#include + +using namespace Genode; + + +Native_capability::Native_capability() { } + + +void Native_capability::_inc() +{ + if (!valid()) return; + + Cap_index idx(cap_map().find(local_name())); + idx.inc(); +} + + +void Native_capability::_dec() +{ + if (!valid()) return; + + Cap_index idx(cap_map().find(local_name())); + idx.dec(); +} + + +long Native_capability::local_name() const +{ + if (valid()) + return Capability_space::crd(*this).base(); + else + return Capability_space::INVALID_INDEX; +} + + +bool Native_capability::valid() const +{ + return _data != nullptr; +} + + +Native_capability::Raw Native_capability::raw() const +{ + return { 0, 0, 0, 0 }; +} + + +void Native_capability::print(Genode::Output &out) const +{ + using Genode::print; + + print(out, "cap<"); + if (_data) { + print(out, local_name()); + } else { + print(out, "invalid"); + } + print(out, ">"); +} diff --git a/repos/base-tukija/src/lib/base/ipc.cc b/repos/base-tukija/src/lib/base/ipc.cc new file mode 100644 index 0000000000..2b65bc3a2a --- /dev/null +++ b/repos/base-tukija/src/lib/base/ipc.cc @@ -0,0 +1,206 @@ +/* + * \brief Implementation of the IPC API for NOVA + * \author Norman Feske + * \date 2009-10-02 + */ + +/* + * Copyright (C) 2009-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include + +using namespace Genode; + + +/**************** + ** IPC client ** + ****************/ + +Rpc_exception_code Genode::ipc_call(Native_capability dst, + Msgbuf_base &snd_msg, Msgbuf_base &rcv_msg, + size_t rcv_caps) +{ + Receive_window rcv_window; + rcv_msg.reset(); + + /* update receive window for capability selectors if needed */ + if (rcv_caps != ~0UL) { + + /* calculate max order of caps to be received during reply */ + unsigned short log2_max = 0; + if (rcv_caps) { + log2_max = (uint16_t)log2(rcv_caps); + + /* if this happens, the call is bogus and invalid */ + if ((log2_max >= sizeof(rcv_caps) * 8)) + throw Ipc_error(); + + if ((1UL << log2_max) < rcv_caps) + log2_max ++; + } + + rcv_window.rcv_wnd(log2_max); + } + + Thread * const myself = Thread::myself(); + Tukija::Utcb &utcb = *(Tukija::Utcb *)myself->utcb(); + + /* the protocol value is unused as the badge is delivered by the kernel */ + if (!copy_msgbuf_to_utcb(utcb, snd_msg, 0)) { + error("could not setup IPC"); + throw Ipc_error(); + } + + /* + * Determine manually defined selector for receiving the call result. + * See the comment in 'base-nova/include/nova/native_thread.h'. + */ + addr_t const manual_rcv_sel = myself ? myself->native_thread().client_rcv_sel + : Receive_window::INVALID_INDEX; + + /* if we can't setup receive window, die in order to recognize the issue */ + if (!rcv_window.prepare_rcv_window(utcb, manual_rcv_sel)) + /* printf doesn't work here since for IPC also rcv_prepare* is used */ + nova_die(); + + /* establish the mapping via a portal traversal */ + uint8_t res = Tukija::call(dst.local_name()); + + if (res != Tukija::NOVA_OK) + /* If an error occurred, reset word&item count (not done by kernel). */ + utcb.set_msg_word(0); + + /* track potentially received caps and invalidate unused caps slots */ + rcv_window.post_ipc(utcb, manual_rcv_sel); + + if (res != Tukija::NOVA_OK) + return Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT); + + /* handle malformed reply from a server */ + if (utcb.msg_words() < 1) + return Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT); + + return Rpc_exception_code((int)copy_utcb_to_msgbuf(utcb, rcv_window, rcv_msg)); +} + + +/******************** + ** Receive_window ** + ********************/ + +void Receive_window::rcv_pt_sel(Native_capability &cap) +{ + if (_rcv_pt_sel_cnt >= _rcv_pt_sel_max) { + cap = Native_capability(); + return; + } + + /* return only received or translated caps */ + cap = Capability_space::import(_rcv_pt_sel[_rcv_pt_sel_cnt++].sel); +} + + +bool Receive_window::rcv_invalid() const +{ + return _rcv_pt_base == Capability_space::INVALID_INDEX; +} + + +bool Receive_window::rcv_cleanup(bool keep, unsigned short const new_max) +{ + /* mark used mapped capabilities as used to prevent freeing */ + bool reinit = false; + for (unsigned i = 0; i < _rcv_pt_sel_cnt; i++) { + if (!_rcv_pt_sel[i].del) + continue; + + /* should never happen */ + if (_rcv_pt_sel[i].sel < _rcv_pt_base || + (_rcv_pt_sel[i].sel >= _rcv_pt_base + MAX_CAP_ARGS)) + nova_die(); + + _rcv_pt_cap_free [_rcv_pt_sel[i].sel - _rcv_pt_base] = USED_CAP; + + reinit = true; + } + + /* if old receive window was smaller, we need to re-init */ + for (unsigned i = 0; !reinit && i < new_max; i++) + if (_rcv_pt_cap_free[i] == FREE_INVALID) + reinit = true; + + _rcv_pt_sel_cnt = 0; + _rcv_pt_sel_max = 0; + + /* we can keep the cap selectors if none was used */ + if (keep && !reinit) { + for (unsigned i = 0; i < MAX_CAP_ARGS; i++) { + /* revoke received caps which are unused */ + if (_rcv_pt_cap_free[i] == UNUSED_CAP) + Tukija::revoke(Tukija::Obj_crd(_rcv_pt_base + i, 0), true); + + /* free rest of indexes if new_max is smaller then last window */ + if (i >= new_max && _rcv_pt_cap_free[i] == FREE_SEL) + cap_map().remove(_rcv_pt_base + i, 0, false); + } + + return false; + } + + /* decrease ref count if valid selector */ + for (unsigned i = 0; i < MAX_CAP_ARGS; i++) { + if (_rcv_pt_cap_free[i] == FREE_INVALID) + continue; + cap_map().remove(_rcv_pt_base + i, 0, _rcv_pt_cap_free[i] != FREE_SEL); + } + + return true; +} + + +bool Receive_window::prepare_rcv_window(Tukija::Utcb &utcb, addr_t rcv_window) +{ + /* open maximal translate window */ + utcb.crd_xlt = Tukija::Obj_crd(0, ~0UL); + + /* use receive window if specified */ + if (rcv_window != INVALID_INDEX) { + /* cleanup if receive window already used */ + if (!rcv_invalid()) rcv_cleanup(false); + + _rcv_pt_base = rcv_window; + + /* open receive window */ + utcb.crd_rcv = Tukija::Obj_crd(_rcv_pt_base, _rcv_wnd_log2); + return true; + } + + /* allocate receive window if necessary, otherwise use old one */ + if (rcv_invalid() || rcv_cleanup(true, 1U << _rcv_wnd_log2)) + { + _rcv_pt_base = cap_map().insert(_rcv_wnd_log2); + + if (_rcv_pt_base == INVALID_INDEX) { + /* no mappings can be received */ + utcb.crd_rcv = Tukija::Obj_crd(); + return false; + } + } + + /* open receive window */ + utcb.crd_rcv = Tukija::Obj_crd(_rcv_pt_base, _rcv_wnd_log2); + return true; +} diff --git a/repos/base-tukija/src/lib/base/region_map_client.cc b/repos/base-tukija/src/lib/base/region_map_client.cc new file mode 100644 index 0000000000..404c805f7f --- /dev/null +++ b/repos/base-tukija/src/lib/base/region_map_client.cc @@ -0,0 +1,49 @@ +/* + * \brief Client-side region map stub + * \author Norman Feske + * \author Alexander Boettcher + * \date 2016-01-22 + */ + +/* + * Copyright (C) 2006-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#include + +using namespace Genode; + + +Region_map_client::Region_map_client(Capability session) +: Rpc_client(session) { } + + +Region_map::Attach_result +Region_map_client::attach(Dataspace_capability ds, Attr const &attr) +{ + return call(ds, attr); +} + + +void Region_map_client::detach(addr_t at) { + call(at); } + + +void Region_map_client::fault_handler(Signal_context_capability cap) { + call(cap); } + + +Region_map::Fault Region_map_client::fault() { return call(); } + + +Dataspace_capability Region_map_client::dataspace() +{ + if (!_rm_ds_cap.valid()) + _rm_ds_cap = call(); + + return _rm_ds_cap; +} + diff --git a/repos/base-tukija/src/lib/base/rpc_cap_alloc.cc b/repos/base-tukija/src/lib/base/rpc_cap_alloc.cc new file mode 100644 index 0000000000..2e20bef09e --- /dev/null +++ b/repos/base-tukija/src/lib/base/rpc_cap_alloc.cc @@ -0,0 +1,74 @@ +/* + * \brief Core-specific back end of the RPC entrypoint + * \author Norman Feske + * \date 2016-01-19 + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA-specific part of the PD session interface */ +#include + +using namespace Genode; + + +static Parent *_parent_ptr; +static Parent &_parent() +{ + if (_parent_ptr) + return *_parent_ptr; + + error("missing call of init_rpc_cap_alloc"); + for (;;); +} + + +void Genode::init_rpc_cap_alloc(Parent &parent) { _parent_ptr = &parent; } + + +Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session &pd, Native_capability ep, + addr_t entry) +{ + if (!_native_pd_cap.valid()) + _native_pd_cap = pd.native_pd(); + + Tukija_native_pd_client native_pd(_native_pd_cap); + + for (;;) { + + Ram_quota ram_upgrade { 0 }; + Cap_quota cap_upgrade { 0 }; + + try { + Untyped_capability new_obj_cap = native_pd.alloc_rpc_cap(ep, entry, 0); + native_pd.imprint_rpc_cap(new_obj_cap, new_obj_cap.local_name()); + return new_obj_cap; + } + catch (Out_of_ram) { ram_upgrade = Ram_quota { 2*1024*sizeof(long) }; } + catch (Out_of_caps) { cap_upgrade = Cap_quota { 4 }; } + + _parent().upgrade(Parent::Env::pd(), + String<100>("ram_quota=", ram_upgrade, ", " + "cap_quota=", cap_upgrade).string()); + } +} + + +void Rpc_entrypoint::_free_rpc_cap(Pd_session &pd, Native_capability cap) +{ + return pd.free_rpc_cap(cap); +} diff --git a/repos/base-tukija/src/lib/base/rpc_entrypoint.cc b/repos/base-tukija/src/lib/base/rpc_entrypoint.cc new file mode 100644 index 0000000000..c48b4b2100 --- /dev/null +++ b/repos/base-tukija/src/lib/base/rpc_entrypoint.cc @@ -0,0 +1,254 @@ +/* + * \brief NOVA-specific support code for the server-side RPC API + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2010-01-13 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include + +/* base-internal includes */ +#include +#include + +/* NOVA includes */ +#include +#include + +using namespace Genode; + + +/*********************** + ** Server entrypoint ** + ***********************/ + +Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj) +{ + using namespace Tukija; + + /* don't manage RPC object twice */ + if (obj->cap().valid()) { + warning("attempt to manage RPC object twice"); + return obj->cap(); + } + + Untyped_capability ec_cap; + + /* _ec_sel is invalid until thread gets started */ + if (native_thread().ec_sel != Native_thread::INVALID_INDEX) + ec_cap = Capability_space::import(native_thread().ec_sel); + else + ec_cap = Thread::cap(); + + Untyped_capability obj_cap = _alloc_rpc_cap(_pd_session, ec_cap, + (addr_t)&_activation_entry); + if (!obj_cap.valid()) + return obj_cap; + + /* add server object to object pool */ + obj->cap(obj_cap); + insert(obj); + + /* return object capability managed by entrypoint thread */ + return obj_cap; +} + +static void cleanup_call(Rpc_object_base *obj, Tukija::Utcb * ep_utcb, + Native_capability &cap) +{ + + /* effectively invalidate the capability used before */ + obj->cap(Untyped_capability()); + + /* + * The activation may execute a blocking operation in a dispatch function. + * Before resolving the corresponding object, we need to ensure that it is + * no longer used by an activation. Therefore, we to need cancel an + * eventually blocking operation and let the activation leave the context + * of the object. + */ + using namespace Tukija; + + Utcb *utcb = reinterpret_cast(Thread::myself()->utcb()); + /* don't call ourself */ + if (utcb == ep_utcb) + return; + + /* make a IPC to ensure that cap() identifier is not used anymore */ + utcb->msg()[0] = 0xdead; + utcb->set_msg_word(1); + if (uint8_t res = call(cap.local_name())) + error(utcb, " - could not clean up entry point of thread ", ep_utcb, " - res ", res); +} + +void Rpc_entrypoint::_dissolve(Rpc_object_base *obj) +{ + /* don't dissolve RPC object twice */ + if (!obj || !obj->cap().valid()) + return; + + /* de-announce object from cap_session */ + _free_rpc_cap(_pd_session, obj->cap()); + + /* avoid any incoming IPC */ + Tukija::revoke(Tukija::Obj_crd(obj->cap().local_name(), 0), true); + + /* make sure nobody is able to find this object */ + remove(obj); + + cleanup_call(obj, reinterpret_cast(this->utcb()), _cap); +} + +static void reply(Tukija::Utcb &utcb, Rpc_exception_code exc, Msgbuf_base &snd_msg) +{ + copy_msgbuf_to_utcb(utcb, snd_msg, exc.value); + + Tukija::reply(Thread::myself()->stack_top()); +} + + +void Rpc_entrypoint::_activation_entry() +{ + /* retrieve portal id from eax/rdi */ +#ifdef __x86_64__ + addr_t id_pt; asm volatile ("" : "=D" (id_pt)); +#else + addr_t id_pt; asm volatile ("" : "=a" (id_pt)); +#endif + + Rpc_entrypoint &ep = *static_cast(Thread::myself()); + Tukija::Utcb &utcb = *(Tukija::Utcb *)Thread::myself()->utcb(); + + Receive_window &rcv_window = ep.native_thread().server_rcv_window; + rcv_window.post_ipc(utcb); + + /* handle ill-formed message */ + if (utcb.msg_words() < 2) { + ep._rcv_buf.word(0) = ~0UL; /* invalid opcode */ + } else { + copy_utcb_to_msgbuf(utcb, rcv_window, ep._rcv_buf); + } + + Ipc_unmarshaller unmarshaller(ep._rcv_buf); + + Rpc_opcode opcode(0); + unmarshaller.extract(opcode); + + /* default return value */ + Rpc_exception_code exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT); + + /* in case of a portal cleanup call we are done here - just reply */ + if (ep._cap.local_name() == (long)id_pt) { + if (!rcv_window.prepare_rcv_window(utcb)) + warning("out of capability selectors for handling server requests"); + + ep._rcv_buf.reset(); + reply(utcb, exc, ep._snd_buf); + } + + /* atomically lookup and lock referenced object */ + auto lambda = [&] (Rpc_object_base *obj) + { + if (!obj) { + error("could not look up server object, return from call id_pt=", id_pt); + return; + } + + /* dispatch request */ + ep._snd_buf.reset(); + exc = obj->dispatch(opcode, unmarshaller, ep._snd_buf); + }; + ep.apply(id_pt, lambda); + + if (!rcv_window.prepare_rcv_window(*(Tukija::Utcb *)ep.utcb())) + warning("out of capability selectors for handling server requests"); + + ep._rcv_buf.reset(); + reply(utcb, exc, ep._snd_buf); +} + + +void Rpc_entrypoint::entry() +{ + /* + * Thread entry is not used for activations on NOVA + */ +} + + +void Rpc_entrypoint::_block_until_cap_valid() { } + + +bool Rpc_entrypoint::is_myself() const +{ + return (Thread::myself() == this); +} + + +Rpc_entrypoint::Rpc_entrypoint(Pd_session *pd_session, size_t stack_size, + const char *name, Affinity::Location location) +: + Thread(Cpu_session::Weight::DEFAULT_WEIGHT, name, stack_size, location), + _pd_session(*pd_session) +{ + /* set magic value evaluated by thread_nova.cc to start a local thread */ + if (native_thread().ec_sel == Native_thread::INVALID_INDEX) { + native_thread().ec_sel = Native_thread::INVALID_INDEX - 1; + native_thread().initial_ip = (addr_t)&_activation_entry; + } + + /* required to create a 'local' EC */ + Thread::start(); + + /* create cleanup portal */ + _cap = _alloc_rpc_cap(_pd_session, + Capability_space::import(native_thread().ec_sel), + (addr_t)_activation_entry); + if (!_cap.valid()) { + error("failed to allocate RPC cap for new entrypoint"); + return; + } + + Receive_window &rcv_window = Thread::native_thread().server_rcv_window; + + /* prepare portal receive window of new thread */ + if (!rcv_window.prepare_rcv_window(*(Tukija::Utcb *)&_stack->utcb())) + error("failed to prepare receive window for RPC entrypoint"); +} + + +Rpc_entrypoint::~Rpc_entrypoint() +{ + using Pool = Object_pool; + + Pool::remove_all([&] (Rpc_object_base *obj) { + warning("object pool not empty in ", __func__); + + /* don't dissolve RPC object twice */ + if (!obj || !obj->cap().valid()) + return; + + /* de-announce object from cap_session */ + _free_rpc_cap(_pd_session, obj->cap()); + + /* avoid any incoming IPC */ + Tukija::revoke(Tukija::Obj_crd(obj->cap().local_name(), 0), true); + + cleanup_call(obj, reinterpret_cast(this->utcb()), _cap); + }); + + if (!_cap.valid()) + return; + + _free_rpc_cap(_pd_session, _cap); +} diff --git a/repos/base-tukija/src/lib/base/signal_transmitter.cc b/repos/base-tukija/src/lib/base/signal_transmitter.cc new file mode 100644 index 0000000000..beb49e3236 --- /dev/null +++ b/repos/base-tukija/src/lib/base/signal_transmitter.cc @@ -0,0 +1,50 @@ +/* + * \brief NOVA specific implementation of the signaling framework + * \author Alexander Boettcher + * \date 2015-03-16 + */ + +/* + * Copyright (C) 2015-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include + +using namespace Genode; + + +void Genode::init_signal_transmitter(Env &) { } + + +void Signal_transmitter::submit(unsigned cnt) +{ + { + Trace::Signal_submit trace_event(cnt); + } + + if (!_context.valid()) + return; + + using namespace Tukija; + + uint8_t res = NOVA_OK; + for (unsigned i = 0; res == NOVA_OK && i < cnt; i++) + res = sm_ctrl(_context.local_name(), SEMAPHORE_UP); + + if (res == NOVA_OK) + return; + + _context = Signal_context_capability(); +} diff --git a/repos/base-tukija/src/lib/base/sleep.cc b/repos/base-tukija/src/lib/base/sleep.cc new file mode 100644 index 0000000000..091aaa275a --- /dev/null +++ b/repos/base-tukija/src/lib/base/sleep.cc @@ -0,0 +1,37 @@ +/* + * \brief Lay back and relax + * \author Norman Feske + * \author Christian Helmuth + * \date 2006-07-19 + */ + +/* + * Copyright (C) 2006-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include +#include + +void Genode::sleep_forever() +{ + using namespace Tukija; + + Thread *myself = Thread::myself(); + addr_t sem = myself ? (addr_t)SM_SEL_EC + myself->native_thread().exc_pt_sel + : (addr_t)SM_SEL_EC; + + while (1) { + if (Tukija::sm_ctrl(sem, SEMAPHORE_DOWNZERO)) + nova_die(); + } +} diff --git a/repos/base-tukija/src/lib/base/stack.cc b/repos/base-tukija/src/lib/base/stack.cc new file mode 100644 index 0000000000..e1cfb1770f --- /dev/null +++ b/repos/base-tukija/src/lib/base/stack.cc @@ -0,0 +1,120 @@ +/* + * \brief Stack-specific part of the thread library + * \author Norman Feske + * \author Alexander Boettcher + * \author Martin Stein + * \date 2010-01-19 + * + * This part of the thread library is required by the IPC framework + * also if no threads are used. + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include + +/* base-internal includes */ +#include +#include +#include +#include + +/* NOVA includes */ +#include + +using namespace Genode; + +extern addr_t __initial_sp; + + +/******************* + ** local helpers ** + *******************/ + +Native_utcb * main_thread_utcb() +{ + using namespace Genode; + return reinterpret_cast( + stack_area_virtual_base() + stack_virtual_size() - Tukija::PAGE_SIZE_BYTE); +} + + +addr_t main_thread_running_semaphore() { return Tukija::SM_SEL_EC; } + + +class Initial_cap_range : public Cap_range +{ + private: + + enum { CAP_RANGE_START = 4096 }; + + public: + + Initial_cap_range() : Cap_range(CAP_RANGE_START) { } +}; + + +Initial_cap_range &initial_cap_range() +{ + static Initial_cap_range s; + return s; +} + + +/***************************** + ** Startup library support ** + *****************************/ + +void Genode::prepare_init_main_thread() +{ + cap_map().insert(initial_cap_range()); + + /* for Core we can't perform the following code so early */ + if (!__initial_sp) { + + enum { CAP_RANGES = 32 }; + + unsigned index = (unsigned)(initial_cap_range().base() + + initial_cap_range().elements()); + + static char local[CAP_RANGES][sizeof(Cap_range)]; + + for (unsigned i = 0; i < CAP_RANGES; i++) { + + Cap_range &range = *reinterpret_cast(local[i]); + construct_at(&range, index); + + cap_map().insert(range); + + index = (unsigned)(range.base() + range.elements()); + } + } +} + + +/************ + ** Thread ** + ************/ + +/* prevent the compiler from optimizing out the 'this' pointer check */ +__attribute__((optimize("-fno-delete-null-pointer-checks"))) +Native_utcb *Thread::utcb() +{ + /* + * If 'utcb' is called on the object returned by 'myself', + * the 'this' pointer may be NULL (if the calling thread is + * the main thread). Therefore we allow this special case + * here. + */ + if (this == 0) return main_thread_utcb(); + + return &_stack->utcb(); +} diff --git a/repos/base-tukija/src/lib/base/thread_start.cc b/repos/base-tukija/src/lib/base/thread_start.cc new file mode 100644 index 0000000000..9b2dbd7f79 --- /dev/null +++ b/repos/base-tukija/src/lib/base/thread_start.cc @@ -0,0 +1,231 @@ +/* + * \brief NOVA-specific implementation of the Thread API + * \author Norman Feske + * \author Sebastian Sumpf + * \author Alexander Boettcher + * \date 2010-01-19 + */ + +/* + * Copyright (C) 2010-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* base-internal includes */ +#include +#include + +/* NOVA includes */ +#include +#include +#include +#include + +using namespace Genode; + + +static Capability pd_session_cap(Capability pd_cap = { }) +{ + static Capability cap = pd_cap; /* defined once by 'init_thread_start' */ + return cap; +} + + +static Thread_capability main_thread_cap(Thread_capability main_cap = { }) +{ + static Thread_capability cap = main_cap; + return cap; +} + + +/** + * Entry point entered by new threads + */ +void Thread::_thread_start() +{ + using namespace Genode; + + /* catch any exception at this point and try to print an error message */ + try { + Thread::myself()->entry(); + } catch (...) { + try { + error("Thread '", Thread::myself()->name(), "' " + "died because of an uncaught exception"); + } catch (...) { + /* die in a noisy way */ + nova_die(); + } + + throw; + } + + Thread::myself()->_join.wakeup(); + + /* sleep silently */ + Genode::sleep_forever(); +} + + +/***************** + ** Thread base ** + *****************/ + +void Thread::_init_platform_thread(size_t weight, Type type) +{ + using namespace Tukija; + + /* + * Allocate capability selectors for the thread's execution context, + * running semaphore and exception handler portals. + */ + native_thread().ec_sel = Native_thread::INVALID_INDEX; + + /* for main threads the member initialization differs */ + if (type == MAIN) { + _thread_cap = main_thread_cap(); + + native_thread().exc_pt_sel = 0; + native_thread().ec_sel = Tukija::EC_SEL_THREAD; + + request_native_ec_cap(PT_SEL_PAGE_FAULT, native_thread().ec_sel); + return; + } + + /* + * Revoke possible left-over UTCB of a previously destroyed thread + * which used this context location. + * + * This cannot be done in '_deinit_platform_thread()', because a + * self-destructing thread needs its UTCB to call + * 'Cpu_session::kill_thread()' and is not able to revoke the UTCB + * afterwards. + */ + Rights rwx(true, true, true); + addr_t utcb = reinterpret_cast(&_stack->utcb()); + revoke(Mem_crd(utcb >> 12, 0, rwx)); + + native_thread().exc_pt_sel = cap_map().insert(NUM_INITIAL_PT_LOG2); + if (native_thread().exc_pt_sel == Native_thread::INVALID_INDEX) { + error("failed allocate exception-portal selector for new thread"); + return; + } + + _init_cpu_session_and_trace_control(); + + /* create thread at core */ + _cpu_session->create_thread(pd_session_cap(), name(), + _affinity, Weight(weight)).with_result( + [&] (Thread_capability cap) { _thread_cap = cap; }, + [&] (Cpu_session::Create_thread_error) { + error("failed to create new thread for local PD"); }); +} + + +void Thread::_deinit_platform_thread() +{ + using namespace Tukija; + + if (native_thread().ec_sel != Native_thread::INVALID_INDEX) { + revoke(Obj_crd(native_thread().ec_sel, 0)); + } + + /* de-announce thread */ + _thread_cap.with_result( + [&] (Thread_capability cap) { _cpu_session->kill_thread(cap); }, + [&] (Cpu_session::Create_thread_error) { }); + + cap_map().remove(native_thread().exc_pt_sel, NUM_INITIAL_PT_LOG2); +} + + +Thread::Start_result Thread::start() +{ + if (native_thread().ec_sel < Native_thread::INVALID_INDEX - 1) { + error("Thread::start failed due to invalid exception portal selector"); + return Start_result::DENIED; + } + + if (_thread_cap.failed()) + return Start_result::DENIED; + + /* + * Default: create global thread - ec.sel == INVALID_INDEX + * create local thread - ec.sel == INVALID_INDEX - 1 + */ + bool global = native_thread().ec_sel == Native_thread::INVALID_INDEX; + + using namespace Genode; + + /* create EC at core */ + + try { + Cpu_session::Native_cpu::Thread_type thread_type; + + if (global) + thread_type = Cpu_session::Native_cpu::Thread_type::GLOBAL; + else + thread_type = Cpu_session::Native_cpu::Thread_type::LOCAL; + + Cpu_session::Native_cpu::Exception_base exception_base { native_thread().exc_pt_sel }; + + Tukija_native_cpu_client native_cpu(_cpu_session->native_cpu()); + native_cpu.thread_type(cap(), thread_type, exception_base); + } catch (...) { + error("Thread::start failed to set thread type"); + return Start_result::DENIED; + } + + /* local thread have no start instruction pointer - set via portal entry */ + addr_t thread_ip = global ? reinterpret_cast(_thread_start) : native_thread().initial_ip; + + Cpu_thread_client cpu_thread(cap()); + cpu_thread.start(thread_ip, _stack->top()); + + /* request native EC thread cap */ + native_thread().ec_sel = native_thread().exc_pt_sel + Tukija::EC_SEL_THREAD; + + /* + * Requested ec cap that is used for recall and + * creation of portals (Native_pd::alloc_rpc_cap). + */ + request_native_ec_cap(native_thread().exc_pt_sel + Tukija::PT_SEL_PAGE_FAULT, + native_thread().ec_sel); + + using namespace Tukija; + + /* default: we don't accept any mappings or translations */ + Utcb * utcb_obj = reinterpret_cast(utcb()); + utcb_obj->crd_rcv = Obj_crd(); + utcb_obj->crd_xlt = Obj_crd(); + + if (global) + /* request creation of SC to let thread run*/ + cpu_thread.resume(); + + return Start_result::OK; +} + + +void Genode::init_thread_start(Capability pd_cap) +{ + pd_session_cap(pd_cap); +} + + +void Genode::init_thread_bootstrap(Cpu_session &, Thread_capability main_cap) +{ + main_thread_cap(main_cap); +} diff --git a/repos/base-tukija/src/lib/base/vm.cc b/repos/base-tukija/src/lib/base/vm.cc new file mode 100644 index 0000000000..17e249af5e --- /dev/null +++ b/repos/base-tukija/src/lib/base/vm.cc @@ -0,0 +1,776 @@ +/* + * \brief NOVA-specific VM-connection implementation + * \author Alexander Boettcher + * \author Christian Helmuth + * \author Benjamin Lamowski + * \date 2018-08-27 + */ + +/* + * Copyright (C) 2018-2023 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Tukija includes */ +#include +#include +#include +#include +#include + +using namespace Genode; + +using Exit_config = Vm_connection::Exit_config; + + +/****************************** + ** NOVA vCPU implementation ** + ******************************/ + +struct Nova_vcpu : Rpc_client, Noncopyable +{ + private: + + enum { VM_EXIT_STARTUP = 0xfe, VM_EXIT_RECALL = 0xff }; + + using Vcpu_space = Id_space; + + static Vcpu_space &_vcpu_space() + { + static Vcpu_space instance; + return instance; + } + + Vcpu_space::Element _id_elem; + + struct Vcpu_id_space_exhausted : Exception { }; + + Signal_dispatcher_base &_obj; + Allocator &_alloc; + void *_ep_handler { nullptr }; + void *_dispatching { nullptr }; + bool _resume { false }; + bool _last_resume { true }; + + Vcpu_state _vcpu_state __attribute__((aligned(0x10))) { }; + + inline void _read_nova_state(Tukija::Utcb &); + + inline void _write_nova_state(Tukija::Utcb &); + + addr_t _sm_sel() const { + return Tukija::NUM_INITIAL_PT_RESERVED + _id_elem.id().value * 4; } + + addr_t _ec_sel() const { return _sm_sel() + 1; } + + /** + * NOVA badge with 16-bit exit reason and 16-bit artificial vCPU ID + */ + struct Badge + { + uint32_t _value; + + Badge(unsigned long value) + : _value((uint32_t)value) { } + + Badge(uint16_t vcpu_id, uint16_t exit_reason) + : _value((uint32_t)(vcpu_id << 16) | (exit_reason & 0xffffu)) { } + + uint16_t exit_reason() const { return (uint16_t)( _value & 0xffff); } + uint16_t vcpu_id() const { return (uint16_t)((_value >> 16) & 0xffff); } + uint32_t value() const { return _value; } + }; + + void _handle_exit(Tukija::Utcb &); + + __attribute__((regparm(1))) static void _exit_entry(addr_t badge); + + Tukija::Mtd _portal_mtd(unsigned exit, Exit_config const &config) + { + /* TODO define and implement omissions */ + (void)exit; + (void)config; + + Genode::addr_t mtd = 0; + + mtd |= Tukija::Mtd::ACDB; + mtd |= Tukija::Mtd::EBSD; + mtd |= Tukija::Mtd::EFL; + mtd |= Tukija::Mtd::ESP; + mtd |= Tukija::Mtd::EIP; + mtd |= Tukija::Mtd::DR; + mtd |= Tukija::Mtd::R8_R15; + mtd |= Tukija::Mtd::CR; + mtd |= Tukija::Mtd::CSSS; + mtd |= Tukija::Mtd::ESDS; + mtd |= Tukija::Mtd::FSGS; + mtd |= Tukija::Mtd::TR; + mtd |= Tukija::Mtd::LDTR; + mtd |= Tukija::Mtd::GDTR; + mtd |= Tukija::Mtd::IDTR; + mtd |= Tukija::Mtd::SYS; + mtd |= Tukija::Mtd::CTRL; + mtd |= Tukija::Mtd::INJ; + mtd |= Tukija::Mtd::STA; + mtd |= Tukija::Mtd::TSC; + mtd |= Tukija::Mtd::TSC_AUX; + mtd |= Tukija::Mtd::EFER; + mtd |= Tukija::Mtd::PDPTE; + mtd |= Tukija::Mtd::SYSCALL_SWAPGS; + mtd |= Tukija::Mtd::TPR; + mtd |= Tukija::Mtd::QUAL; + mtd |= Tukija::Mtd::XSAVE; + mtd |= Tukija::Mtd::FPU; + + return Tukija::Mtd(mtd); + } + + static Capability _create_vcpu(Vm_connection &, Vcpu_handler_base &); + + static Signal_context_capability _create_exit_handler(Pd_session &pd, + Vcpu_handler_base &handler, + uint16_t vcpu_id, + uint16_t exit_reason, + Tukija::Mtd mtd); + /* + * Noncopyable + */ + Nova_vcpu(Nova_vcpu const &) = delete; + Nova_vcpu &operator = (Nova_vcpu const &) = delete; + + public: + + Nova_vcpu(Env &env, Vm_connection &vm, Allocator &alloc, + Vcpu_handler_base &handler, Exit_config const &exit_config); + + void startup() + { + call(); + } + + void with_state(auto const &fn); +}; + + +void Nova_vcpu::_read_nova_state(Tukija::Utcb &utcb) +{ + using Segment = Genode::Vcpu_state::Segment; + using Range = Genode::Vcpu_state::Range; + + _vcpu_state.discharge(); + _vcpu_state.exit_reason = static_cast(utcb.exit_reason); + + if (utcb.mtd & Tukija::Mtd::FPU) { + _vcpu_state.fpu.charge([&] (Vcpu_state::Fpu::State &fpu) { + auto const fpu_size = unsigned(min(_vcpu_state.fpu.size(), + sizeof(utcb.fpu))); + memcpy(&fpu, utcb.fpu, fpu_size); + return fpu_size; + }); + } + + if (utcb.mtd & Tukija::Mtd::ACDB) { + _vcpu_state.ax.charge(utcb.ax); + _vcpu_state.cx.charge(utcb.cx); + _vcpu_state.dx.charge(utcb.dx); + _vcpu_state.bx.charge(utcb.bx); + } + + if (utcb.mtd & Tukija::Mtd::EBSD) { + _vcpu_state.di.charge(utcb.di); + _vcpu_state.si.charge(utcb.si); + _vcpu_state.bp.charge(utcb.bp); + } + + if (utcb.mtd & Tukija::Mtd::EFL) _vcpu_state.flags.charge(utcb.flags); + if (utcb.mtd & Tukija::Mtd::ESP) _vcpu_state.sp.charge(utcb.sp); + if (utcb.mtd & Tukija::Mtd::DR) _vcpu_state.dr7.charge(utcb.dr7); + + if (utcb.mtd & Tukija::Mtd::EIP) { + _vcpu_state.ip.charge(utcb.ip); + _vcpu_state.ip_len.charge(utcb.instr_len); + } + + if (utcb.mtd & Tukija::Mtd::R8_R15) { + _vcpu_state. r8.charge(utcb.read_r8()); + _vcpu_state. r9.charge(utcb.read_r9()); + _vcpu_state.r10.charge(utcb.read_r10()); + _vcpu_state.r11.charge(utcb.read_r11()); + _vcpu_state.r12.charge(utcb.read_r12()); + _vcpu_state.r13.charge(utcb.read_r13()); + _vcpu_state.r14.charge(utcb.read_r14()); + _vcpu_state.r15.charge(utcb.read_r15()); + } + + if (utcb.mtd & Tukija::Mtd::CR) { + _vcpu_state.cr0.charge(utcb.cr0); + _vcpu_state.cr2.charge(utcb.cr2); + _vcpu_state.cr3.charge(utcb.cr3); + _vcpu_state.cr4.charge(utcb.cr4); + } + if (utcb.mtd & Tukija::Mtd::CSSS) { + _vcpu_state.cs.charge(Segment { .sel = utcb.cs.sel, + .ar = utcb.cs.ar, + .limit = utcb.cs.limit, + .base = utcb.cs.base }); + _vcpu_state.ss.charge(Segment { .sel = utcb.ss.sel, + .ar = utcb.ss.ar, + .limit = utcb.ss.limit, + .base = utcb.ss.base }); + } + + if (utcb.mtd & Tukija::Mtd::ESDS) { + _vcpu_state.es.charge(Segment { .sel = utcb.es.sel, + .ar = utcb.es.ar, + .limit = utcb.es.limit, + .base = utcb.es.base }); + _vcpu_state.ds.charge(Segment { .sel = utcb.ds.sel, + .ar = utcb.ds.ar, + .limit = utcb.ds.limit, + .base = utcb.ds.base }); + } + + if (utcb.mtd & Tukija::Mtd::FSGS) { + _vcpu_state.fs.charge(Segment { .sel = utcb.fs.sel, + .ar = utcb.fs.ar, + .limit = utcb.fs.limit, + .base = utcb.fs.base }); + _vcpu_state.gs.charge(Segment { .sel = utcb.gs.sel, + .ar = utcb.gs.ar, + .limit = utcb.gs.limit, + .base = utcb.gs.base }); + } + + if (utcb.mtd & Tukija::Mtd::TR) { + _vcpu_state.tr.charge(Segment { .sel = utcb.tr.sel, + .ar = utcb.tr.ar, + .limit = utcb.tr.limit, + .base = utcb.tr.base }); + } + + if (utcb.mtd & Tukija::Mtd::LDTR) { + _vcpu_state.ldtr.charge(Segment { .sel = utcb.ldtr.sel, + .ar = utcb.ldtr.ar, + .limit = utcb.ldtr.limit, + .base = utcb.ldtr.base }); + } + + if (utcb.mtd & Tukija::Mtd::GDTR) { + _vcpu_state.gdtr.charge(Range { .limit = utcb.gdtr.limit, + .base = utcb.gdtr.base }); + } + + if (utcb.mtd & Tukija::Mtd::IDTR) { + _vcpu_state.idtr.charge(Range { .limit = utcb.idtr.limit, + .base = utcb.idtr.base }); + } + + if (utcb.mtd & Tukija::Mtd::SYS) { + _vcpu_state.sysenter_cs.charge(utcb.sysenter_cs); + _vcpu_state.sysenter_sp.charge(utcb.sysenter_sp); + _vcpu_state.sysenter_ip.charge(utcb.sysenter_ip); + } + + if (utcb.mtd & Tukija::Mtd::QUAL) { + _vcpu_state.qual_primary.charge(utcb.qual[0]); + _vcpu_state.qual_secondary.charge(utcb.qual[1]); + } + + if (utcb.mtd & Tukija::Mtd::CTRL) { + _vcpu_state.ctrl_primary.charge(utcb.ctrl[0]); + _vcpu_state.ctrl_secondary.charge(utcb.ctrl[1]); + } + + if (utcb.mtd & Tukija::Mtd::INJ) { + _vcpu_state.inj_info.charge(utcb.inj_info); + _vcpu_state.inj_error.charge(utcb.inj_error); + } + + if (utcb.mtd & Tukija::Mtd::STA) { + _vcpu_state.intr_state.charge(utcb.intr_state); + _vcpu_state.actv_state.charge(utcb.actv_state); + } + + if (utcb.mtd & Tukija::Mtd::TSC) { + _vcpu_state.tsc.charge(utcb.tsc_val); + _vcpu_state.tsc_offset.charge(utcb.tsc_off); + } + + if (utcb.mtd & Tukija::Mtd::TSC_AUX) { + _vcpu_state.tsc_aux.charge(utcb.tsc_aux); + } + + if (utcb.mtd & Tukija::Mtd::EFER) { + _vcpu_state.efer.charge(utcb.read_efer()); + } + + if (utcb.mtd & Tukija::Mtd::PDPTE) { + _vcpu_state.pdpte_0.charge(utcb.pdpte[0]); + _vcpu_state.pdpte_1.charge(utcb.pdpte[1]); + _vcpu_state.pdpte_2.charge(utcb.pdpte[2]); + _vcpu_state.pdpte_3.charge(utcb.pdpte[3]); + } + + if (utcb.mtd & Tukija::Mtd::SYSCALL_SWAPGS) { + _vcpu_state.star.charge(utcb.read_star()); + _vcpu_state.lstar.charge(utcb.read_lstar()); + _vcpu_state.cstar.charge(utcb.read_cstar()); + _vcpu_state.fmask.charge(utcb.read_fmask()); + _vcpu_state.kernel_gs_base.charge(utcb.read_kernel_gs_base()); + } + + if (utcb.mtd & Tukija::Mtd::TPR) { + _vcpu_state.tpr.charge(utcb.read_tpr()); + _vcpu_state.tpr_threshold.charge(utcb.read_tpr_threshold()); + } + + if (utcb.mtd & Tukija::Mtd::XSAVE) { + _vcpu_state.xcr0.charge(utcb.xcr0); + _vcpu_state.xss.charge(utcb.xss); + } +} + + +void Nova_vcpu::_write_nova_state(Tukija::Utcb &utcb) +{ + utcb.items = 0; + utcb.mtd = 0; + + if (_vcpu_state.ax.charged() || _vcpu_state.cx.charged() || + _vcpu_state.dx.charged() || _vcpu_state.bx.charged()) { + utcb.mtd |= Tukija::Mtd::ACDB; + utcb.ax = _vcpu_state.ax.value(); + utcb.cx = _vcpu_state.cx.value(); + utcb.dx = _vcpu_state.dx.value(); + utcb.bx = _vcpu_state.bx.value(); + } + + if (_vcpu_state.bp.charged() || _vcpu_state.di.charged() || _vcpu_state.si.charged()) { + utcb.mtd |= Tukija::Mtd::EBSD; + utcb.di = _vcpu_state.di.value(); + utcb.si = _vcpu_state.si.value(); + utcb.bp = _vcpu_state.bp.value(); + } + + if (_vcpu_state.flags.charged()) { + utcb.mtd |= Tukija::Mtd::EFL; + utcb.flags = _vcpu_state.flags.value(); + } + + if (_vcpu_state.sp.charged()) { + utcb.mtd |= Tukija::Mtd::ESP; + utcb.sp = _vcpu_state.sp.value(); + } + + if (_vcpu_state.ip.charged()) { + utcb.mtd |= Tukija::Mtd::EIP; + utcb.ip = _vcpu_state.ip.value(); + utcb.instr_len = _vcpu_state.ip_len.value(); + } + + if (_vcpu_state.dr7.charged()) { + utcb.mtd |= Tukija::Mtd::DR; + utcb.dr7 = _vcpu_state.dr7.value(); + } + + if (_vcpu_state.r8 .charged() || _vcpu_state.r9 .charged() || + _vcpu_state.r10.charged() || _vcpu_state.r11.charged() || + _vcpu_state.r12.charged() || _vcpu_state.r13.charged() || + _vcpu_state.r14.charged() || _vcpu_state.r15.charged()) { + + utcb.mtd |= Tukija::Mtd::R8_R15; + utcb.write_r8 (_vcpu_state.r8.value()); + utcb.write_r9 (_vcpu_state.r9.value()); + utcb.write_r10(_vcpu_state.r10.value()); + utcb.write_r11(_vcpu_state.r11.value()); + utcb.write_r12(_vcpu_state.r12.value()); + utcb.write_r13(_vcpu_state.r13.value()); + utcb.write_r14(_vcpu_state.r14.value()); + utcb.write_r15(_vcpu_state.r15.value()); + } + + if (_vcpu_state.cr0.charged() || _vcpu_state.cr2.charged() || + _vcpu_state.cr3.charged() || _vcpu_state.cr4.charged()) { + utcb.mtd |= Tukija::Mtd::CR; + utcb.cr0 = _vcpu_state.cr0.value(); + utcb.cr2 = _vcpu_state.cr2.value(); + utcb.cr3 = _vcpu_state.cr3.value(); + utcb.cr4 = _vcpu_state.cr4.value(); + } + + if (_vcpu_state.cs.charged() || _vcpu_state.ss.charged()) { + utcb.mtd |= Tukija::Mtd::CSSS; + utcb.cs.sel = _vcpu_state.cs.value().sel; + utcb.cs.ar = _vcpu_state.cs.value().ar; + utcb.cs.limit = _vcpu_state.cs.value().limit; + utcb.cs.base = _vcpu_state.cs.value().base; + + utcb.ss.sel = _vcpu_state.ss.value().sel; + utcb.ss.ar = _vcpu_state.ss.value().ar; + utcb.ss.limit = _vcpu_state.ss.value().limit; + utcb.ss.base = _vcpu_state.ss.value().base; + } + + if (_vcpu_state.es.charged() || _vcpu_state.ds.charged()) { + utcb.mtd |= Tukija::Mtd::ESDS; + utcb.es.sel = _vcpu_state.es.value().sel; + utcb.es.ar = _vcpu_state.es.value().ar; + utcb.es.limit = _vcpu_state.es.value().limit; + utcb.es.base = _vcpu_state.es.value().base; + + utcb.ds.sel = _vcpu_state.ds.value().sel; + utcb.ds.ar = _vcpu_state.ds.value().ar; + utcb.ds.limit = _vcpu_state.ds.value().limit; + utcb.ds.base = _vcpu_state.ds.value().base; + } + + if (_vcpu_state.fs.charged() || _vcpu_state.gs.charged()) { + utcb.mtd |= Tukija::Mtd::FSGS; + utcb.fs.sel = _vcpu_state.fs.value().sel; + utcb.fs.ar = _vcpu_state.fs.value().ar; + utcb.fs.limit = _vcpu_state.fs.value().limit; + utcb.fs.base = _vcpu_state.fs.value().base; + + utcb.gs.sel = _vcpu_state.gs.value().sel; + utcb.gs.ar = _vcpu_state.gs.value().ar; + utcb.gs.limit = _vcpu_state.gs.value().limit; + utcb.gs.base = _vcpu_state.gs.value().base; + } + + if (_vcpu_state.tr.charged()) { + utcb.mtd |= Tukija::Mtd::TR; + utcb.tr.sel = _vcpu_state.tr.value().sel; + utcb.tr.ar = _vcpu_state.tr.value().ar; + utcb.tr.limit = _vcpu_state.tr.value().limit; + utcb.tr.base = _vcpu_state.tr.value().base; + } + + if (_vcpu_state.ldtr.charged()) { + utcb.mtd |= Tukija::Mtd::LDTR; + utcb.ldtr.sel = _vcpu_state.ldtr.value().sel; + utcb.ldtr.ar = _vcpu_state.ldtr.value().ar; + utcb.ldtr.limit = _vcpu_state.ldtr.value().limit; + utcb.ldtr.base = _vcpu_state.ldtr.value().base; + } + + if (_vcpu_state.gdtr.charged()) { + utcb.mtd |= Tukija::Mtd::GDTR; + utcb.gdtr.limit = _vcpu_state.gdtr.value().limit; + utcb.gdtr.base = _vcpu_state.gdtr.value().base; + } + + if (_vcpu_state.idtr.charged()) { + utcb.mtd |= Tukija::Mtd::IDTR; + utcb.idtr.limit = _vcpu_state.idtr.value().limit; + utcb.idtr.base = _vcpu_state.idtr.value().base; + } + + if (_vcpu_state.sysenter_cs.charged() || _vcpu_state.sysenter_sp.charged() || + _vcpu_state.sysenter_ip.charged()) { + utcb.mtd |= Tukija::Mtd::SYS; + utcb.sysenter_cs = _vcpu_state.sysenter_cs.value(); + utcb.sysenter_sp = _vcpu_state.sysenter_sp.value(); + utcb.sysenter_ip = _vcpu_state.sysenter_ip.value(); + } + + if (_vcpu_state.ctrl_primary.charged() || _vcpu_state.ctrl_secondary.charged()) { + utcb.mtd |= Tukija::Mtd::CTRL; + utcb.ctrl[0] = _vcpu_state.ctrl_primary.value(); + utcb.ctrl[1] = _vcpu_state.ctrl_secondary.value(); + } + + if (_vcpu_state.inj_info.charged() || _vcpu_state.inj_error.charged()) { + utcb.mtd |= Tukija::Mtd::INJ; + utcb.inj_info = _vcpu_state.inj_info.value(); + utcb.inj_error = _vcpu_state.inj_error.value(); + } + + if (_vcpu_state.intr_state.charged() || _vcpu_state.actv_state.charged()) { + utcb.mtd |= Tukija::Mtd::STA; + utcb.intr_state = _vcpu_state.intr_state.value(); + utcb.actv_state = _vcpu_state.actv_state.value(); + } + + if (_vcpu_state.tsc.charged() || _vcpu_state.tsc_offset.charged()) { + utcb.mtd |= Tukija::Mtd::TSC; + utcb.tsc_val = _vcpu_state.tsc.value(); + utcb.tsc_off = _vcpu_state.tsc_offset.value(); + } + + if (_vcpu_state.tsc_aux.charged()) { + utcb.mtd |= Tukija::Mtd::TSC_AUX; + utcb.tsc_aux = _vcpu_state.tsc_aux.value(); + } + + if (_vcpu_state.efer.charged()) { + utcb.mtd |= Tukija::Mtd::EFER; + utcb.write_efer(_vcpu_state.efer.value()); + } + + if (_vcpu_state.pdpte_0.charged() || _vcpu_state.pdpte_1.charged() || + _vcpu_state.pdpte_2.charged() || _vcpu_state.pdpte_3.charged()) { + + utcb.mtd |= Tukija::Mtd::PDPTE; + utcb.pdpte[0] = (Tukija::mword_t)_vcpu_state.pdpte_0.value(); + utcb.pdpte[1] = (Tukija::mword_t)_vcpu_state.pdpte_1.value(); + utcb.pdpte[2] = (Tukija::mword_t)_vcpu_state.pdpte_2.value(); + utcb.pdpte[3] = (Tukija::mword_t)_vcpu_state.pdpte_3.value(); + } + + if (_vcpu_state.star.charged() || _vcpu_state.lstar.charged() || + _vcpu_state.cstar.charged() || _vcpu_state.fmask.charged() || + _vcpu_state.kernel_gs_base.charged()) { + + utcb.mtd |= Tukija::Mtd::SYSCALL_SWAPGS; + utcb.write_star (_vcpu_state.star.value()); + utcb.write_lstar(_vcpu_state.lstar.value()); + utcb.write_cstar(_vcpu_state.cstar.value()); + utcb.write_fmask(_vcpu_state.fmask.value()); + utcb.write_kernel_gs_base(_vcpu_state.kernel_gs_base.value()); + } + + if (_vcpu_state.tpr.charged() || _vcpu_state.tpr_threshold.charged()) { + utcb.mtd |= Tukija::Mtd::TPR; + utcb.write_tpr(_vcpu_state.tpr.value()); + utcb.write_tpr_threshold(_vcpu_state.tpr_threshold.value()); + } + + if (_vcpu_state.xcr0.charged() || _vcpu_state.xss.charged()) { + utcb.xcr0 = _vcpu_state.xcr0.value(); + utcb.xss = _vcpu_state.xss.value(); + + utcb.mtd |= Tukija::Mtd::XSAVE; + } + + if (_vcpu_state.fpu.charged()) { + utcb.mtd |= Tukija::Mtd::FPU; + _vcpu_state.fpu.with_state([&] (Vcpu_state::Fpu::State const &fpu) { + memcpy(utcb.fpu, &fpu, sizeof(fpu)); + }); + } +} + + +/* + * Do not touch the UTCB before _read_nova_state() and after + * _write_nova_state(), particularly not by logging diagnostics. + */ +void Nova_vcpu::_handle_exit(Tukija::Utcb &utcb) +{ + if (utcb.exit_reason == VM_EXIT_RECALL) { + /* + * A recall exit is only requested from an asynchronous Signal to the + * vCPU Handler. In that case, VM_EXIT_RECALL has already been processed + * asynchronously by getting and setting the state via system calls and + * the regular exit does not need to be processed. + */ + utcb.mtd = 0; + utcb.items = 0; + return; + } + + _read_nova_state(utcb); + + try { + _dispatching = Thread::myself(); + /* call dispatch handler */ + _obj.dispatch(1); + _dispatching = nullptr; + } catch (...) { + _dispatching = nullptr; + throw; + } + + /* reply to NOVA and transfer vCPU state */ + _write_nova_state(utcb); +} + + +void Nova_vcpu::with_state(auto const &fn) +{ + Thread *myself = Thread::myself(); + bool remote = (_dispatching != myself); + Tukija::Utcb &utcb = *reinterpret_cast(myself->utcb()); + + if (remote) { + if (Thread::myself() != _ep_handler) { + error("vCPU state requested outside of vcpu_handler EP"); + sleep_forever(); + }; + + Exit_config config { }; + Tukija::Mtd mtd = _portal_mtd(0, config); + + uint8_t res = Tukija::ec_ctrl(Tukija::EC_GET_VCPU_STATE, _ec_sel(), mtd.value()); + + if (res != Tukija::NOVA_OK) { + error("Getting vCPU state failed with: ", res); + sleep_forever(); + }; + + _read_nova_state(utcb); + } + + _resume = fn(_vcpu_state); + + if (remote) { + _write_nova_state(utcb); + /* + * A recall is needed + * a) when the vCPU should be stopped or + * b) when the vCPU should be resumed from a stopped state. + */ + bool recall = !(_resume && _last_resume); + + uint8_t res = Tukija::ec_ctrl(Tukija::EC_SET_VCPU_STATE, _ec_sel(), recall); + + if (res != Tukija::NOVA_OK) { + error("Setting vCPU state failed with: ", res); + sleep_forever(); + }; + + /* + * Resume the vCPU and indicate to the next exit if state + * needs to be synced or not. + */ + if (_resume) + Tukija::sm_ctrl(_sm_sel(), Tukija::SEMAPHORE_UP); + } +} + + +static void nova_reply(Thread &myself, Tukija::Utcb &utcb, auto &&... args) +{ + Receive_window &rcv_window = myself.native_thread().server_rcv_window; + + /* reset receive window to values expected by RPC server code */ + rcv_window.prepare_rcv_window(utcb); + + Tukija::reply(myself.stack_top(), args...); +} + + +void Nova_vcpu::_exit_entry(addr_t badge) +{ + Thread &myself = *Thread::myself(); + Tukija::Utcb &utcb = *reinterpret_cast(myself.utcb()); + + Vcpu_space::Id const vcpu_id { Badge(badge).vcpu_id() }; + + _vcpu_space().apply(vcpu_id, + [&] (Nova_vcpu &vcpu) { + vcpu._handle_exit(utcb); + + vcpu._last_resume = vcpu._resume; + if (vcpu._resume) { + nova_reply(myself, utcb); + } else { + nova_reply(myself, utcb, vcpu._sm_sel()); + } + }, + [&] /* missing */ { + /* somebody called us directly ? ... ignore/deny */ + utcb.items = 0; + utcb.mtd = 0; + nova_reply(myself, utcb); + }); +} + + +Signal_context_capability Nova_vcpu::_create_exit_handler(Pd_session &pd, + Vcpu_handler_base &handler, + uint16_t vcpu_id, + uint16_t exit_reason, + Tukija::Mtd mtd) +{ + Thread *tep = reinterpret_cast(&handler.rpc_ep()); + + Native_capability thread_cap = Capability_space::import(tep->native_thread().ec_sel); + + Tukija_native_pd_client native_pd { pd.native_pd() }; + + Native_capability vm_exit_cap = + native_pd.alloc_rpc_cap(thread_cap, (addr_t)Nova_vcpu::_exit_entry, mtd.value()); + + Badge const badge { vcpu_id, exit_reason }; + native_pd.imprint_rpc_cap(vm_exit_cap, badge.value()); + + return reinterpret_cap_cast(vm_exit_cap); +} + + +Capability Nova_vcpu::_create_vcpu(Vm_connection &vm, + Vcpu_handler_base &handler) +{ + Thread &tep { *reinterpret_cast(&handler.rpc_ep()) }; + + return vm.create_vcpu(tep.cap()); +} + + +Nova_vcpu::Nova_vcpu(Env &env, Vm_connection &vm, Allocator &alloc, + Vcpu_handler_base &handler, Exit_config const &exit_config) +: + Rpc_client(_create_vcpu(vm, handler)), + _id_elem(*this, _vcpu_space()), _obj(handler), _alloc(alloc) +{ + /* + * XXX can be alleviated by managing ID values with Bit_allocator + * that allocates lowest free index in dynamic scenarios + */ + if (_id_elem.id().value > 0xffff) + throw Vcpu_id_space_exhausted(); + + _ep_handler = reinterpret_cast(&handler.rpc_ep()); + + uint16_t const vcpu_id = (uint16_t)_id_elem.id().value; + + Signal_context_capability dontcare_exit = + _create_exit_handler(env.pd(), handler, vcpu_id, 0x100, Tukija::Mtd(Tukija::Mtd::EIP)); + + for (unsigned i = 0; i < Tukija::NUM_INITIAL_VCPU_PT; ++i) { + Signal_context_capability signal_exit; + + Tukija::Mtd mtd = _portal_mtd(i, exit_config); + if (mtd.value()) { + signal_exit = _create_exit_handler(env.pd(), handler, vcpu_id, (uint16_t)i, mtd); + } else { + signal_exit = dontcare_exit; + } + + call(i, signal_exit); + } +} + + +/************** + ** vCPU API ** + **************/ + +void Vm_connection::Vcpu::_with_state(With_state::Ft const &fn) +{ + static_cast(_native_vcpu).with_state(fn); +} + + +Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc, + Vcpu_handler_base &handler, Exit_config const &exit_config) +: + _native_vcpu(*new (alloc) Nova_vcpu(vm._env, vm, alloc, handler, exit_config)) +{ + static_cast(_native_vcpu).startup(); +} diff --git a/repos/base-tukija/src/test/nova/ipc.cc b/repos/base-tukija/src/test/nova/ipc.cc new file mode 100644 index 0000000000..caddb39b8a --- /dev/null +++ b/repos/base-tukija/src/test/nova/ipc.cc @@ -0,0 +1,56 @@ +/* + * \brief Helper classes to make raw Nova IPC calls which can't be expressed + * via the Genode base RPC abstractions + * \author Alexander Boettcher + * + */ + +/* + * Copyright (C) 2016-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include + +/* test specific includes */ +#include "server.h" + +using namespace Test; + +long Test::cap_void_manual(Genode::Native_capability dst, + Genode::Native_capability arg1, + Genode::addr_t &local_reply) +{ + if (!arg1.valid()) + return Genode::Rpc_exception_code::INVALID_OBJECT; + + Genode::Thread * myself = Genode::Thread::myself(); + Nova::Utcb *utcb = reinterpret_cast(myself->utcb()); + + /* save original receive window */ + Nova::Crd orig_crd = utcb->crd_rcv; + + /* don't open receive window */ + utcb->crd_rcv = Nova::Obj_crd(); + /* not used on base-nova */ + utcb->msg()[0] = 0; + /* method number of RPC interface to be called on server side */ + utcb->msg()[1] = 0; + utcb->set_msg_word(2); + + Nova::Crd crd = Genode::Capability_space::crd(arg1); + if (!utcb->append_item(crd, 0, false, false, false)) + return Genode::Rpc_exception_code::INVALID_OBJECT; + + Genode::uint8_t res = Nova::call(dst.local_name()); + + /* restore original receive window */ + utcb->crd_rcv = orig_crd; + + local_reply = utcb->msg()[1]; + return (res == (Genode::uint8_t)Nova::NOVA_OK && utcb->msg_words() == 3 && utcb->msg()[2]) + ? utcb->msg()[0] : (long)Genode::Rpc_exception_code::INVALID_OBJECT; +} diff --git a/repos/base-tukija/src/test/nova/main.cc b/repos/base-tukija/src/test/nova/main.cc new file mode 100644 index 0000000000..060b5ce4c3 --- /dev/null +++ b/repos/base-tukija/src/test/nova/main.cc @@ -0,0 +1,763 @@ +/* + * \brief Some platform tests for the base-nova + * \author Alexander Boettcher + * \date 2015-01-02 + * + */ + +/* + * Copyright (C) 2015-2020 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#include "server.h" + +static unsigned failed = 0; + +static unsigned check_pat = 1; + +using namespace Genode; + +void test_translate(Genode::Env &env) +{ + enum { STACK_SIZE = 4096 }; + static Rpc_entrypoint ep(&env.pd(), STACK_SIZE, "rpc_ep_translate", + Affinity::Location()); + + Test::Component component; + Test::Capability session_cap = ep.manage(&component); + Test::Client client(session_cap); + + Genode::addr_t local_name = Native_thread::INVALID_INDEX; + + long rpc = Test::cap_void_manual(session_cap, session_cap, local_name); + if (rpc != Genode::Rpc_exception_code::SUCCESS || + local_name == (addr_t)session_cap.local_name() || + local_name == (addr_t)Native_thread::INVALID_INDEX) + { + failed ++; + error(__func__, ": ipc call failed ", Hex(rpc)); + ep.dissolve(&component); + return; + } + + Genode::Native_capability copy1 = Capability_space::import(local_name); + + rpc = Test::cap_void_manual(session_cap, copy1, local_name); + if (rpc != Genode::Rpc_exception_code::SUCCESS || + local_name == (addr_t)copy1.local_name() || + local_name == (addr_t)Native_thread::INVALID_INDEX) + { + failed ++; + error(__func__, ": ipc call failed ", Hex(rpc)); + ep.dissolve(&component); + return; + } + + Genode::Native_capability copy2 = Capability_space::import(local_name); + + log("delegation session_cap->copy1->copy2 ", + session_cap, "->", copy1, "->", copy2); + + /* sanity checks translate which must work */ + Genode::Native_capability got_cap = client.cap_cap(copy2.local_name()); + if (got_cap.local_name() != copy1.local_name()) { + failed ++; + error(__LINE__, ":", __func__, " translate failed"); + ep.dissolve(&component); + return; + } + + got_cap = client.cap_cap(copy1.local_name()); + if (got_cap.local_name() != session_cap.local_name()) { + failed ++; + error(__LINE__, ":", __func__, " translate failed"); + ep.dissolve(&component); + return; + } + + got_cap = client.cap_cap(session_cap.local_name()); + if (got_cap.local_name() != session_cap.local_name()) { + failed ++; + error(__LINE__, ":", __func__, " translate failed"); + ep.dissolve(&component); + return; + } + + /** + * Test special revoke by make the intermediate cap (copy1) inaccessible + * and check that translate of copy2 get the right results. + */ + + Nova::Obj_crd crd_ses(copy1.local_name(), 0); + enum { SELF = true, LOCAL_REVOKE = false, LOCAL_PD = 0, NO_BLOCKING = 0, KEEP_IN_MDB = true }; + Nova::revoke(crd_ses, SELF, LOCAL_REVOKE, LOCAL_PD, NO_BLOCKING, KEEP_IN_MDB); + + crd_ses = Nova::Obj_crd(copy1.local_name(), 0); + Genode::uint8_t res = Nova::lookup(crd_ses); + if (res != Nova::NOVA_OK || !crd_ses.is_null()) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res)); + ep.dissolve(&component); + return; + } + + /* copy1 should be skipped and session_cap is the valid response */ + got_cap = client.cap_cap(copy2.local_name()); + if (got_cap.local_name() != session_cap.local_name()) { + failed ++; + error(__LINE__, ":", __func__, " translate failed"); + ep.dissolve(&component); + return; + } + + ep.dissolve(&component); +} + +void test_revoke(Genode::Env &env) +{ + enum { STACK_SIZE = 4096 }; + static Rpc_entrypoint ep(&env.pd(), STACK_SIZE, "rpc_ep_revoke", + Affinity::Location()); + + Test::Component component; + Test::Capability session_cap = ep.manage(&component); + Test::Client client(session_cap); + + Genode::addr_t local_name = Native_thread::INVALID_INDEX; + + long rpc = Test::cap_void_manual(session_cap, session_cap, local_name); + if (rpc != Genode::Rpc_exception_code::SUCCESS || + local_name == (addr_t)session_cap.local_name() || + local_name == (addr_t)Native_thread::INVALID_INDEX) + { + failed ++; + error("test_revoke ipc call failed ", Hex(rpc)); + ep.dissolve(&component); + return; + } + + Genode::Native_capability copy_session_cap = Capability_space::import(local_name); + + local_name = Native_thread::INVALID_INDEX; + rpc = Test::cap_void_manual(copy_session_cap, copy_session_cap, local_name); + if (rpc != Genode::Rpc_exception_code::SUCCESS || + local_name == (addr_t)copy_session_cap.local_name() || + local_name == (addr_t)Native_thread::INVALID_INDEX || + local_name == (addr_t)session_cap.local_name()) + { + failed ++; + error("test_revoke ipc call failed ", Hex(rpc)); + ep.dissolve(&component); + return; + } + + Nova::Obj_crd crd_dst(local_name, 0); + Genode::uint8_t res = Nova::lookup(crd_dst); + if (res != Nova::NOVA_OK || crd_dst.base() != local_name || crd_dst.type() != 3 || + crd_dst.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed ", Hex(res)); + ep.dissolve(&component); + return; + } + + Nova::Obj_crd crd_ses(copy_session_cap.local_name(), 0); + res = Nova::lookup(crd_ses); + if (res != Nova::NOVA_OK || crd_ses.base() != (addr_t)copy_session_cap.local_name() + || crd_ses.type() != 3 || crd_ses.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_ses.is_null()); + ep.dissolve(&component); + return; + } + + res = Nova::lookup(crd_dst); + if (res != Nova::NOVA_OK || crd_dst.base() != local_name || crd_dst.type() != 3 || + crd_dst.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_dst.is_null()); + ep.dissolve(&component); + return; + } + + crd_ses = Nova::Obj_crd(copy_session_cap.local_name(), 0); + enum { SELF = true, LOCAL_REVOKE = false, LOCAL_PD = 0, NO_BLOCKING = 0, KEEP_IN_MDB = true }; + Nova::revoke(crd_ses, SELF, LOCAL_REVOKE, LOCAL_PD, NO_BLOCKING, KEEP_IN_MDB); + + crd_ses = Nova::Obj_crd(copy_session_cap.local_name(), 0); + res = Nova::lookup(crd_ses); + if (res != Nova::NOVA_OK || !crd_ses.is_null()) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res)); + ep.dissolve(&component); + return; + } + + res = Nova::lookup(crd_dst); + if (res != Nova::NOVA_OK || crd_dst.base() != local_name || crd_dst.type() != 3 || + crd_dst.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_dst.is_null()); + ep.dissolve(&component); + return; + } + + /* + * Request some other capability and place it on very same selector + * as used before by copy_session_cap + */ + Genode::Thread * myself = Genode::Thread::myself(); + request_native_ec_cap(myself->native_thread().exc_pt_sel + Nova::PT_SEL_PAGE_FAULT, + copy_session_cap.local_name()); + + /* check whether the requested cap before is valid and placed well */ + crd_ses = Nova::Obj_crd(copy_session_cap.local_name(), 0); + res = Nova::lookup(crd_ses); + if (res != Nova::NOVA_OK || crd_ses.base() != (addr_t)copy_session_cap.local_name() || + crd_ses.type() != 3 || crd_ses.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_ses.is_null()); + ep.dissolve(&component); + return; + } + + /* revoke it */ + Nova::revoke(crd_ses, SELF, LOCAL_REVOKE, LOCAL_PD, NO_BLOCKING); + + /* the delegated cap to the client should still be there */ + res = Nova::lookup(crd_dst); + if (res != Nova::NOVA_OK || crd_dst.base() != local_name || crd_dst.type() != 3 || + crd_dst.order() != 0) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_dst.is_null()); + ep.dissolve(&component); + return; + } + + /* kill the original session capability */ + ep.dissolve(&component); + /* manually: cap.free_rpc_cap(session_cap); */ + + /* the delegated cap to the client should be now invalid */ + res = Nova::lookup(crd_dst); + if (res != Nova::NOVA_OK || !crd_dst.is_null()) { + failed ++; + error(__LINE__, " - lookup call failed err=", Hex(res), " is_null=", crd_dst.is_null()); + return; + } +} + +static void portal_entry() +{ + Genode::Thread &myself = *Genode::Thread::myself(); + Nova::Utcb &utcb = *reinterpret_cast(myself.utcb()); + + Nova::Crd const snd_crd(utcb.msg()[0]); + + enum { + HOTSPOT = 0, USER_PD = false, HOST_PGT = false, SOLELY_MAP = false, + NO_DMA = false, EVILLY_DONT_WRITE_COMBINE = false + }; + + utcb.set_msg_word(0); + bool ok = utcb.append_item(snd_crd, HOTSPOT, USER_PD, HOST_PGT, + SOLELY_MAP, NO_DMA, EVILLY_DONT_WRITE_COMBINE); + (void)ok; + + Nova::reply(myself.stack_top()); +} + +void test_pat(Genode::Env &env) +{ + Genode::Thread &myself = *Genode::Thread::myself(); + Nova::Utcb &utcb = *reinterpret_cast(myself.utcb()); + + /* read out the tsc frequenzy once */ + Attached_rom_dataspace const platform_info (env, "platform_info"); + Xml_node const hardware = platform_info.xml().sub_node("hardware"); + uint64_t const tsc_freq = hardware.sub_node("tsc").attribute_value("freq_khz", 1ULL); + + enum { DS_ORDER = 12, PAGE_4K = 12, DS_SIZE = 1ul << (DS_ORDER + PAGE_4K) }; + + Genode::Rm_connection rm(env); + Genode::Region_map_client rm_unused(rm.create(DS_SIZE)); + + Attached_dataspace ds_wc { env.rm(), env.ram().alloc (DS_SIZE, WRITE_COMBINED) }; + Attached_dataspace ds { env.rm(), env.ram().alloc (DS_SIZE) }; + Attached_dataspace remap { env.rm(), rm_unused.dataspace() }; + + auto const memory = addr_t(ds .local_addr()); + auto const memory_wc = addr_t(ds_wc.local_addr()); + auto const memory_remap = addr_t(remap.local_addr()); + + static Rpc_entrypoint ep(&env.pd(), 4096 /* STACK */, "rpc_ep_pat", + Affinity::Location()); + + /* trigger mapping of whole area */ + for (auto offset = 0; offset < DS_SIZE; offset += (1u << PAGE_4K)) { + touch_read_write(reinterpret_cast(memory_wc + offset)); + touch_read_write(reinterpret_cast( memory + offset)); + } + + /* + * Establish memory mapping with evilly wrong mapping attributes + */ + Nova_native_pd_client native_pd { env.pd().native_pd() }; + Thread * thread = reinterpret_cast(&ep); + Native_capability const thread_cap + = Capability_space::import(thread->native_thread().ec_sel); + + Untyped_capability const pt = + native_pd.alloc_rpc_cap(thread_cap, (addr_t)portal_entry, 0 /* MTD */); + + Nova::Rights const all(true, true, true); + Nova::Mem_crd const rcv_crd(memory_remap >> PAGE_4K, DS_ORDER, all); + Nova::Mem_crd const snd_crd(memory_wc >> PAGE_4K, DS_ORDER, all); + Nova::Crd const old_crd = utcb.crd_rcv; + + utcb.crd_rcv = rcv_crd; + utcb.set_msg_word(1); + utcb.msg()[0] = snd_crd.value(); + + uint8_t const res = Nova::call(pt.local_name()); + utcb.crd_rcv = old_crd; + + if (res != Nova::NOVA_OK) { + Genode::error("establishing memory failed ", res); + failed++; + } + + /* sanity check - touch re-mapped area */ + for (auto offset = 0; offset < DS_SIZE; offset += (1 << PAGE_4K)) + touch_read_write(reinterpret_cast(memory_remap + offset)); + + /* + * measure time to write to the memory + */ + memset(reinterpret_cast(memory), 0, DS_SIZE); + Trace::Timestamp normal_start = Trace::timestamp(); + memset(reinterpret_cast(memory), 0, DS_SIZE); + Trace::Timestamp normal_end = Trace::timestamp(); + + memset(reinterpret_cast(memory_wc), 0, DS_SIZE); + Trace::Timestamp map_start = Trace::timestamp(); + memset(reinterpret_cast(memory_wc), 0, DS_SIZE); + Trace::Timestamp map_end = Trace::timestamp(); + + memset(reinterpret_cast(memory_remap), 0, DS_SIZE); + Trace::Timestamp remap_start = Trace::timestamp(); + memset(reinterpret_cast(memory_remap), 0, DS_SIZE); + Trace::Timestamp remap_end = Trace::timestamp(); + + auto normal_run = normal_end - normal_start; + auto map_run = map_end - map_start; + auto remap_run = remap_end - remap_start; + + auto diff_run = map_run > remap_run ? map_run - remap_run : remap_run - map_run; + + log("memory non writecombined ", normal_run * 1000 / tsc_freq, " us"); + log("memory writecombined ", map_run * 1000 / tsc_freq, " us"); + log("memory writecombined remapped ", remap_run * 1000 / tsc_freq, " us"); + log("variance writecombined tests ", diff_run * 1000 / tsc_freq, " us"); + + if (check_pat && diff_run * 10 / tsc_freq) { + failed ++; + + error("PAT test considered failed - time difference above 100us"); + } + + Nova::revoke(Nova::Mem_crd(memory_remap >> PAGE_4K, DS_ORDER, all)); +} + +void test_server_oom(Genode::Env &env) +{ + using namespace Genode; + + enum { STACK_SIZE = 4096 }; + + static Rpc_entrypoint ep(&env.pd(), STACK_SIZE, "rpc_ep_oom", + Affinity::Location()); + + Test::Component component; + Test::Capability session_cap = ep.manage(&component); + Test::Client client(session_cap); + + /* case that during reply we get oom */ + for (unsigned i = 0; i < 20000; i++) { + Genode::Native_capability got_cap = client.void_cap(); + + if (!got_cap.valid()) { + error(i, " cap id ", Hex(got_cap.local_name()), " invalid"); + failed ++; + break; + } + + /* be evil and keep this cap by manually incrementing the ref count */ + Cap_index idx(cap_map().find(got_cap.local_name())); + idx.inc(); + + if (i % 5000 == 4999) + log("received ", i, ". cap"); + } + + /* XXX this code does does no longer work since the removal of 'solely_map' */ +#if 0 + + /* case that during send we get oom */ + for (unsigned i = 0; i < 20000; i++) { + /* be evil and switch translation off - server ever uses a new selector */ + Genode::Native_capability send_cap = session_cap; + send_cap.solely_map(); + + if (!client.cap_void(send_cap)) { + error("sending ", i, ". cap failed"); + failed ++; + break; + } + + if (i % 5000 == 4999) + log("sent ", i, ". cap"); + } +#endif + + ep.dissolve(&component); +} + +class Pager : private Genode::Thread { + + private: + + Native_capability _call_to_map { }; + Attached_ram_dataspace _ds; + static addr_t _ds_mem; + + void entry() override { } + + static void page_fault() + { + Thread * myself = Thread::myself(); + Nova::Utcb * utcb = reinterpret_cast(myself->utcb()); + + if (utcb->msg_words() != 1) { + Genode::error("unexpected"); + while (1) { } + } + + Genode::addr_t map_from = utcb->msg()[0]; +// Genode::error("pager: got map request ", Genode::Hex(map_from)); + + utcb->set_msg_word(0); + utcb->mtd = 0; + + Nova::Mem_crd crd_map(map_from >> 12, 0, Nova::Rights(true, true, true)); + bool res = utcb->append_item(crd_map, 0); + (void)res; + + Nova::reply(myself->stack_top()); + } + + public: + + Pager(Genode::Env &env, Location location) + : + Thread(env, "pager", 0x1000, location, Weight(), env.cpu()), + _ds(env.ram(), env.rm(), 4096) + { + _ds_mem = addr_t(_ds.local_addr()); + + touch_read(reinterpret_cast(_ds_mem)); + + /* request creation of a 'local' EC */ + Thread::native_thread().ec_sel = Native_thread::INVALID_INDEX - 1; + Thread::start(); + + Genode::warning("pager: created"); + + Native_capability thread_cap = + Capability_space::import(Thread::native_thread().ec_sel); + + Genode::Nova_native_pd_client native_pd(env.pd().native_pd()); + Nova::Mtd mtd (Nova::Mtd::QUAL | Nova::Mtd::EIP | Nova::Mtd::ESP); + Genode::addr_t entry = reinterpret_cast(page_fault); + + _call_to_map = native_pd.alloc_rpc_cap(thread_cap, entry, + mtd.value()); + } + + Native_capability call_to_map() { return _call_to_map; } + addr_t mem_st() { return _ds_mem; } +}; + +addr_t Pager::_ds_mem; + +class Cause_mapping : public Genode::Thread { + + private: + + Native_capability _call_to_map { }; + Rm_connection _rm; + Region_map_client _sub_rm; + Attached_dataspace _mem_ds; + addr_t _mem_nd = addr_t(_mem_ds.local_addr()); + addr_t _mem_st; + Nova::Rights const _mapping_rwx = {true, true, true}; + + public: + + unsigned volatile called = 0; + + Cause_mapping(Genode::Env &env, Native_capability call_to_map, + Genode::addr_t mem_st, Location location) + : + Thread(env, "mapper", 0x1000, location, Weight(), env.cpu()), + _call_to_map(call_to_map), + _rm(env), + _sub_rm(_rm.create(0x2000)), + _mem_ds(env.rm(), _sub_rm.dataspace()), + _mem_st(mem_st) + { } + + void entry() override + { + log("mapper: hello"); + + Nova::Utcb * nova_utcb = reinterpret_cast(utcb()); + + while (true) { + called = called + 1; +// log("mapper: request mapping ", Hex(_mem_nd), " ", called); + + Nova::Crd old = nova_utcb->crd_rcv; + +// touch_read((unsigned char *)_mem_st); + + nova_utcb->msg()[0] = _mem_st; + nova_utcb->set_msg_word(1); + nova_utcb->crd_rcv = Nova::Mem_crd(_mem_nd >> 12, 0, + _mapping_rwx); + Nova::call(_call_to_map.local_name()); + //touch_read((unsigned char *)_mem_nd); + + nova_utcb->msg()[0] = _mem_nd; + nova_utcb->set_msg_word(1); + nova_utcb->crd_rcv = Nova::Mem_crd((_mem_nd + 0x1000) >> 12, 0, + _mapping_rwx); + Nova::call(_call_to_map.local_name()); +// touch_read((unsigned char *)_mem_nd + 0x1000); + + nova_utcb->crd_rcv = old; + } + } + + void revoke_remote() + { + Nova::revoke(Nova::Mem_crd(_mem_nd >> 12, 0, _mapping_rwx), true); + } +}; + +void test_delegate_revoke_smp(Genode::Env &env) +{ + Affinity::Space cpus = env.cpu().affinity_space(); + Genode::log("detected ", cpus.width(), "x", cpus.height(), " " + "CPU", cpus.total() > 1 ? "s." : "."); + + Pager pager(env, cpus.location_of_index(1)); + Cause_mapping mapper(env, pager.call_to_map(), pager.mem_st(), + cpus.location_of_index(1)); + mapper.start(); + + for (unsigned i = 0; i < 2000; i++) { + mapper.revoke_remote(); + if (i % 1000 == 0) + Genode::log("main ", i, " ", mapper.called); + } +} + +class Greedy : public Genode::Thread { + + private: + + Genode::Env &_env; + + public: + + Greedy(Genode::Env &env) + : + Thread(env, "greedy", 0x1000), + _env(env) + { } + + void entry() override + { + log("starting"); + + enum { SUB_RM_SIZE = 1280 * 1024 * 1024 }; + + Genode::Ram_dataspace_capability ds = _env.ram().alloc(4096); + + Nova::Rights const mapping_rwx(true, true, true); + + log("cause mappings"); + + for (unsigned i = 0; i < SUB_RM_SIZE / 4096; i++) { + + addr_t const map_to = _env.rm().attach(ds, { }).convert( + [&] (Region_map::Range r) { return r.start; }, + [&] (Region_map::Attach_error) { + error("Greedy: failed to attach RAM dataspace"); + return 0UL; + } + ); + + /* check that we really got the mapping */ + touch_read(reinterpret_cast(map_to)); + + /* print status information in interval of 32M */ + if (i % 8192 == 0) { + log(Hex(i * 4096)); + /* trigger some work to see quota in kernel decreasing */ +// Nova::Rights rwx(true, true, true); +// Nova::revoke(Nova::Mem_crd((map_to - 32 * 1024 * 1024) >> 12, 12, rwx)); + } + } + log("still alive - done"); + } +}; + + +void check(uint8_t res, auto &&... args) +{ + String<128> msg(args...); + + if (res == Nova::NOVA_OK) { + error("res=", res, " ", msg, " - TEST FAILED"); + failed++; + } + else + log("res=", res, " ", msg); +} + + +struct Main +{ + Genode::Env &env; + Genode::Heap heap { env.ram(), env.rm() }; + + Main(Env &env); +}; + + +Main::Main(Env &env) : env(env) +{ + log("testing base-nova platform"); + + { + Attached_rom_dataspace config(env, "config"); + if (!config.xml().has_attribute("check_pat")) { + Genode::error("no check_pat attribute found"); + env.parent().exit(-__LINE__); + return; + } + check_pat = config.xml().attribute_value("check_pat", check_pat); + } + + Thread * myself = Thread::myself(); + if (!myself) { + env.parent().exit(-__LINE__); + return; + } + + /* upgrade available capability indices for this process */ + addr_t index = 512 * 1024; + static char local[128][sizeof(Cap_range)]; + + for (addr_t i = 0; i < sizeof(local) / sizeof (local[0]); i++) { + Cap_range &range = *construct_at(local[i], index); + + cap_map().insert(range); + + index = range.base() + range.elements(); + }; + + addr_t sel_pd = cap_map().insert(); + addr_t sel_ec = myself->native_thread().ec_sel; + addr_t sel_cap = cap_map().insert(); + addr_t handler = 0UL; + uint8_t res = 0; + + Nova::Mtd mtd(Nova::Mtd::ALL); + + if (sel_cap == ~0UL || sel_ec == ~0UL || sel_cap == ~0UL) { + env.parent().exit(-__LINE__); + return; + } + + /* negative syscall tests - they should not succeed */ + res = Nova::create_pt(sel_cap, sel_pd, sel_ec, mtd, handler); + check(res, "create_pt"); + + res = Nova::create_sm(sel_cap, sel_pd, 0); + check(res, "create_sm"); + + /* changing the badge of one of the portal must fail */ + for (unsigned i = 0; i < (1U << Nova::NUM_INITIAL_PT_LOG2); i++) { + addr_t sel_exc = myself->native_thread().exc_pt_sel + i; + res = Nova::pt_ctrl(sel_exc, 0xbadbad); + check(res, "pt_ctrl ", i); + } + + /* test PAT kernel feature */ + test_pat(env); + + /* test special revoke */ + test_revoke(env); + + /* test translate together with special revoke */ + test_translate(env); + + /* test SMP delegate/revoke - skip it on Qemu which takes too long */ + if (check_pat) + test_delegate_revoke_smp(env); + + /** + * Test to provoke out of memory during capability transfer of + * server/client. + * + * Set in hypervisor.ld the memory to a low value of about 1M to let + * trigger the test. + */ + test_server_oom(env); + + /* Test to provoke out of memory in kernel during interaction with core */ + static Greedy core_pagefault_oom(env); + core_pagefault_oom.start(); + core_pagefault_oom.join(); + + if (!failed) + log("Test finished"); + + env.parent().exit(-__LINE__); +} + + +void Component::construct(Genode::Env &env) { static Main main(env); } diff --git a/repos/base-tukija/src/test/nova/server.h b/repos/base-tukija/src/test/nova/server.h new file mode 100644 index 0000000000..e17c29a932 --- /dev/null +++ b/repos/base-tukija/src/test/nova/server.h @@ -0,0 +1,110 @@ +/* + * \brief Dummy server interface + * \author Alexander Boettcher + */ + +/* + * Copyright (C) 2013-2017 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +#pragma once + +/* Genode includes */ +#include +#include +#include +#include + +/* NOVA includes */ +#include +#include + +namespace Test { + struct Session; + struct Client; + struct Component; + + long cap_void_manual(Genode::Native_capability dst, + Genode::Native_capability arg1, + Genode::addr_t &local_name); +} + +/** + * Test session interface definition + */ +struct Test::Session : Genode::Session +{ + static const char *service_name() { return "TEST"; } + + enum { CAP_QUOTA = 2 }; + + GENODE_RPC(Rpc_cap_void, bool, cap_void, Genode::Native_capability, + Genode::addr_t &); + GENODE_RPC(Rpc_void_cap, Genode::Native_capability, + void_cap); + GENODE_RPC(Rpc_cap_cap, Genode::Native_capability, cap_cap, + Genode::addr_t); + GENODE_RPC_INTERFACE(Rpc_cap_void, Rpc_void_cap, Rpc_cap_cap); +}; + +struct Test::Client : Genode::Rpc_client +{ + Client(Genode::Capability cap) : Rpc_client(cap) { } + + bool cap_void(Genode::Native_capability cap, Genode::addr_t &local_name) { + return call(cap, local_name); } + + Genode::Native_capability void_cap() { + return call(); } + + Genode::Native_capability cap_cap(Genode::addr_t cap) { + return call(cap); } +}; + +struct Test::Component : Genode::Rpc_object +{ + /* Test to transfer a object capability during send */ + bool cap_void(Genode::Native_capability, Genode::addr_t &); + /* Test to transfer a object capability during reply */ + Genode::Native_capability void_cap(); + /* Test to transfer a specific object capability during reply */ + Genode::Native_capability cap_cap(Genode::addr_t); +}; + +namespace Test { using Capability = Genode::Capability; } + +/** + * Session implementation + */ +inline bool Test::Component::cap_void(Genode::Native_capability got_cap, + Genode::addr_t &local_name) +{ + local_name = got_cap.local_name(); + + if (!got_cap.valid()) + return false; + + /* be evil and keep this cap by manually incrementing the ref count */ + Genode::Cap_index idx(Genode::cap_map().find(got_cap.local_name())); + idx.inc(); + + return true; +} + +inline Genode::Native_capability Test::Component::void_cap() { + Genode::Native_capability send_cap = cap(); + + /* XXX this code does does no longer work since the removal of 'solely_map' */ +#if 0 + /* be evil and switch translation off - client ever uses a new selector */ + send_cap.solely_map(); +#endif + + return send_cap; +} + +inline Genode::Native_capability Test::Component::cap_cap(Genode::addr_t cap) { + return Genode::Capability_space::import(cap); } diff --git a/repos/base-tukija/src/test/nova/target.mk b/repos/base-tukija/src/test/nova/target.mk new file mode 100644 index 0000000000..ca628489a6 --- /dev/null +++ b/repos/base-tukija/src/test/nova/target.mk @@ -0,0 +1,3 @@ +TARGET = test-nova +SRC_CC = main.cc ipc.cc +LIBS = base-tukija diff --git a/repos/base-tukija/src/timer/nova/main.cc b/repos/base-tukija/src/timer/nova/main.cc new file mode 100644 index 0000000000..712d498d0f --- /dev/null +++ b/repos/base-tukija/src/timer/nova/main.cc @@ -0,0 +1,415 @@ +/* + * \brief Timer driver for NOVA + * \author Norman Feske + * \date 2024-03-07 + */ + +/* + * Copyright (C) 2024 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU Affero General Public License version 3. + */ + +/* Genode includes */ +#include +#include +#include +#include +#include +#include + +/* base-internal includes */ +#include + +/* NOVA includes */ +#include + +namespace Timer { + + using namespace Genode; + + struct Tsc { uint64_t tsc; }; + struct Tsc_rate; + struct Clock; + struct Device; + struct Alarm; + struct Root; + struct Session_component; + struct Main; + + using Alarms = Alarm_registry; +} + + +struct Timer::Clock +{ + uint64_t us; + + static constexpr uint64_t MASK = uint64_t(-1); + + uint64_t value() const { return us; } + + void print(Output &out) const { Genode::print(out, us); } +}; + + +struct Timer::Tsc_rate +{ + unsigned long khz; + + static Tsc_rate from_xml(Xml_node const &node) + { + unsigned long khz = 0; + node.with_optional_sub_node("hardware", [&] (Xml_node const &hardware) { + hardware.with_optional_sub_node("tsc", [&] (Xml_node const &tsc) { + khz = tsc.attribute_value("freq_khz", 0UL); }); }); + return { khz }; + } + + Tsc tsc_from_clock(Clock clock) const + { + return { .tsc = (clock.us*khz)/1000 }; + } + + Clock clock_from_tsc(Tsc tsc) const + { + return { .us = (khz > 0) ? (tsc.tsc*1000)/khz : 0 }; + } +}; + + +class Timer::Device +{ + public: + + struct Wakeup_dispatcher : Interface + { + virtual void dispatch_device_wakeup() = 0; + }; + + struct Deadline : Clock { }; + + static constexpr Deadline infinite_deadline { uint64_t(-1) }; + + private: + + Tsc_rate const _tsc_rate; + + struct Waiter : Thread + { + struct Sel /* NOVA kernel-capability selector */ + { + addr_t value; + + static Sel init_signal_sem(Thread &thread) + { + auto const exc_base = thread.native_thread().exc_pt_sel; + + request_signal_sm_cap(exc_base + Tukija::PT_SEL_PAGE_FAULT, + exc_base + Tukija::SM_SEL_SIGNAL); + + return { exc_base + Tukija::SM_SEL_SIGNAL }; + } + + auto down(Tsc deadline) + { + return Tukija::sm_ctrl(value, Tukija::SEMAPHORE_DOWN, deadline.tsc); + } + + auto up() + { + return Tukija::sm_ctrl(value, Tukija::SEMAPHORE_UP); + } + }; + + Wakeup_dispatcher &_dispatcher; + + Sel _wakeup_sem { }; /* must be initialize by waiter thread */ + + Mutex _mutex { }; /* protect '_deadline' */ + Tsc _deadline { uint64_t(-1) }; + + Waiter(Env &env, Wakeup_dispatcher &dispatcher) + : + Thread(env, "waiter", 8*1024*sizeof(addr_t)), _dispatcher(dispatcher) + { + start(); + } + + void entry() override + { + _wakeup_sem = Sel::init_signal_sem(*this); + + for (;;) { + + auto deadline_tsc = [&] + { + Mutex::Guard guard(_mutex); + return _deadline; + }; + + /* + * Block until timeout fires or it gets canceled. + * When triggered (not canceled by 'update_deadline'), + * call 'dispatch_device_wakeup'. + */ + if (_wakeup_sem.down(deadline_tsc()) == Tukija::NOVA_TIMEOUT) + _dispatcher.dispatch_device_wakeup(); + } + } + + void update_deadline(Tsc const deadline) + { + Mutex::Guard guard(_mutex); + + bool const sooner_than_scheduled = (deadline.tsc < _deadline.tsc); + + _deadline = deadline; + + if (sooner_than_scheduled) + if (_wakeup_sem.up() != Tukija::NOVA_OK) + error("unable to cancel already scheduled timeout"); + } + } _waiter; + + public: + + Device(Env &env, Tsc_rate tsc_rate, Wakeup_dispatcher &dispatcher) + : _tsc_rate(tsc_rate), _waiter(env, dispatcher) { } + + Clock now() const + { + return _tsc_rate.clock_from_tsc( Tsc { Trace::timestamp() }); + } + + void update_deadline(Deadline deadline) + { + _waiter.update_deadline(_tsc_rate.tsc_from_clock(deadline)); + } +}; + + +struct Timer::Alarm : Alarms::Element +{ + Session_component &session; + + Alarm(Alarms &alarms, Session_component &session, Clock time) + : + Alarms::Element(alarms, *this, time), session(session) + { } + + void print(Output &out) const; +}; + + +static Timer::Device::Deadline next_deadline(Timer::Alarms &alarms) +{ + using namespace Timer; + + return alarms.soonest(Clock { 0 }).convert( + [&] (Clock soonest) -> Device::Deadline { + + /* scan alarms for a cluster nearby the soonest */ + uint64_t const MAX_DELAY_US = 250; + Device::Deadline result { soonest.us }; + alarms.for_each_in_range(soonest, Clock { soonest.us + MAX_DELAY_US }, + [&] (Alarm const &alarm) { + result.us = max(result.us, alarm.time.us); }); + + return result; + }, + [&] (Alarms::None) { return Device::infinite_deadline; }); +} + + +struct Timer::Session_component : Session_object +{ + Alarms &_alarms; + Mutex &_alarms_mutex; + Device &_device; + + Signal_context_capability _sigh { }; + + Clock const _creation_time = _device.now(); + + uint64_t _local_now_us() const { return _device.now().us - _creation_time.us; } + + struct Period { uint64_t us; }; + + Constructible _period { }; + Constructible _alarm { }; + + Session_component(Env &env, + Resources const &resources, + Label const &label, + Diag const &diag, + Alarms &alarms, + Mutex &alarms_mutex, + Device &device) + : + Session_object(env.ep(), resources, label, diag), + _alarms(alarms), _alarms_mutex(alarms_mutex), _device(device) + { } + + ~Session_component() + { + Mutex::Guard guard(_alarms_mutex); + + _alarm.destruct(); + } + + /** + * Called by Device::Wakeup_dispatcher with '_alarms_mutex' taken + */ + void handle_wakeup() + { + if (_sigh.valid()) + Signal_transmitter(_sigh).submit(); + + if (_period.constructed()) { + Clock const next = _alarm.constructed() + ? Clock { _alarm->time.us + _period->us } + : Clock { _device.now().us + _period->us }; + + _alarm.construct(_alarms, *this, next); + + } else /* response of 'trigger_once' */ { + _alarm.destruct(); + } + } + + /****************************** + ** Timer::Session interface ** + ******************************/ + + void trigger_once(uint64_t rel_us) override + { + Mutex::Guard guard(_alarms_mutex); + + _period.destruct(); + _alarm.destruct(); + + Clock const now = _device.now(); + + rel_us = max(rel_us, 250u); + _alarm.construct(_alarms, *this, Clock { now.us + rel_us }); + + _device.update_deadline(next_deadline(_alarms)); + } + + void trigger_periodic(uint64_t period_us) override + { + Mutex::Guard guard(_alarms_mutex); + + _period.destruct(); + _alarm.destruct(); + + if (period_us) { + period_us = max(period_us, 1000u); + _period.construct(period_us); + handle_wakeup(); + } + + _device.update_deadline(next_deadline(_alarms)); + } + + void sigh(Signal_context_capability sigh) override { _sigh = sigh; } + + uint64_t elapsed_ms() const override { return _local_now_us()/1000; } + uint64_t elapsed_us() const override { return _local_now_us(); } + + void msleep(uint64_t) override { } + void usleep(uint64_t) override { } +}; + + +struct Timer::Root : public Root_component +{ + private: + + Env &_env; + Alarms &_alarms; + Mutex &_alarms_mutex; + Device &_device; + + protected: + + Session_component *_create_session(const char *args) override + { + return new (md_alloc()) + Session_component(_env, + session_resources_from_args(args), + session_label_from_args(args), + session_diag_from_args(args), + _alarms, _alarms_mutex, _device); + } + + void _upgrade_session(Session_component *s, const char *args) override + { + s->upgrade(ram_quota_from_args(args)); + s->upgrade(cap_quota_from_args(args)); + } + + void _destroy_session(Session_component *session) override + { + Genode::destroy(md_alloc(), session); + } + + public: + + Root(Env &env, Allocator &md_alloc, + Alarms &alarms, Mutex &alarms_mutex, Device &device) + : + Root_component(&env.ep().rpc_ep(), &md_alloc), + _env(env), _alarms(alarms), _alarms_mutex(alarms_mutex), _device(device) + { } +}; + + +void Timer::Alarm::print(Output &out) const { Genode::print(out, session.label()); } + + +struct Timer::Main : Device::Wakeup_dispatcher +{ + Env &_env; + + Attached_rom_dataspace _platform_info { _env, "platform_info" }; + + Tsc_rate const _tsc_rate = Tsc_rate::from_xml(_platform_info.xml()); + + Device _device { _env, _tsc_rate, *this }; + + Mutex _alarms_mutex { }; + Alarms _alarms { }; + + Sliced_heap _sliced_heap { _env.ram(), _env.rm() }; + + Root _root { _env, _sliced_heap, _alarms, _alarms_mutex, _device }; + + /** + * Device::Wakeup_dispatcher + */ + void dispatch_device_wakeup() override + { + Mutex::Guard guard(_alarms_mutex); + + /* handle and remove pending alarms */ + while (_alarms.with_any_in_range({ 0 }, _device.now(), [&] (Alarm &alarm) { + alarm.session.handle_wakeup(); })); + + /* schedule next wakeup */ + _device.update_deadline(next_deadline(_alarms)); + } + + Main(Genode::Env &env) : _env(env) + { + if (_tsc_rate.khz == 0) + warning("could not obtain TSC calibration from platform_info ROM"); + + _env.parent().announce(_env.ep().manage(_root)); + } +}; + + +void Component::construct(Genode::Env &env) { static Timer::Main inst(env); } diff --git a/repos/base-tukija/src/timer/nova/target.mk b/repos/base-tukija/src/timer/nova/target.mk new file mode 100644 index 0000000000..38ed03eb76 --- /dev/null +++ b/repos/base-tukija/src/timer/nova/target.mk @@ -0,0 +1,6 @@ +TARGET = tukija_timer +INC_DIR += $(PRG_DIR) +SRC_CC += main.cc +LIBS += base + +REP_INC_DIR += src/include diff --git a/tool/run/boot_dir/tukija b/tool/run/boot_dir/tukija new file mode 100644 index 0000000000..3a1aaf5948 --- /dev/null +++ b/tool/run/boot_dir/tukija @@ -0,0 +1,160 @@ +source [genode_dir]/tool/run/bender.inc + +proc binary_name_ld_lib_so { } { return "ld-tukija.lib.so" } +proc binary_name_core_a { } { return "core-tukija.a" } +proc binary_name_timer { } { return "tukija_timer" } + +proc kernel_files { } { return hypervisor } +proc kernel_output { } { return "serial" } + +proc boot_output { } { return "serial" } + +proc grub_menuentry { } { return "'Genode on NOVA'" } + +proc run_boot_string { } { + return "\nHypervisor NOVA " +} + +proc apply_microcode { } { return false } + +proc core_link_address { } { return "0x100000" } + + +proc core_ld_opts { } { + set ret { -Wl,-T } + lappend ret "-Wl,[genode_dir]/repos/base/src/ld/genode.ld" + lappend ret "-Wl,[genode_dir]/repos/base-tukija/src/core/core-bss.ld" + return $ret +} + +## +# Populate directory with binaries on NOVA +# +proc run_boot_dir {binaries} { + + # + # Build kernel-specific targets if needed + # + # If the run scripts imports the base-tukija binary archive, [run_dir] is + # already populated, so we can skip the build. + # + set kernel_arg "" + set core_arg "" + set ld_arg "" + if {![file exists [run_dir]/genode/hypervisor]} { set kernel_arg kernel/tukija } + if {![file exists [run_dir]/genode/core-tukija.a]} { set core_arg core/tukija } + if {![file exists [run_dir]/genode/ld.lib.so]} { set ld_arg lib/ld-tukija } + + set targets "$kernel_arg $core_arg $ld_arg" + if {[llength $targets]} { build $targets } + + if {$kernel_arg != ""} { copy_file bin/hypervisor [run_dir]/genode/hypervisor } + if {$core_arg != ""} { copy_file bin/core-tukija.a [run_dir]/genode/core-tukija.a } + if {$ld_arg != ""} { copy_file bin/ld-tukija.lib.so [run_dir]/genode/ld.lib.so } + + # + # Collect contents of the boot image + # + build_core_image $binaries + + exec mkdir -p [run_dir]/boot + copy_file [run_dir]/genode/hypervisor [run_dir]/boot/hypervisor + + remove_genode_dir + + exec mv [run_dir]/image.elf [run_dir]/boot/image.elf + + # + # Setup bender options, e.g. serial and applying Intel HWP mode + # + set options_bender "[boot_output] [bender_intel_hwp_mode_option]" + + if {[apply_microcode]} { + exec cp bin/micro.code [run_dir]/boot/ + append options_bender " microcode" + } + + if {[have_include "image/iso"] || [have_include "image/disk"] || [have_include image/uefi]} { + # + # Compress Genode image, to be uncompressed by GRUB + # + exec gzip -n [run_dir]/boot/image.elf + + if {[have_include "image/disk"]} { + install_disk_bootloader_to_run_dir + } + + if {[have_include "image/iso"]} { + install_iso_bootloader_to_run_dir + } + + if {[have_include image/uefi]} { + install_uefi_bootloader_to_run_dir + append options_bender " serial_fallback" + } + + # + # Generate GRUB2 config file + # + set fh [create_header_grub2_config] + + puts $fh "menuentry [grub_menuentry] {" + puts $fh " insmod multiboot2" + puts $fh " insmod gzio" + puts $fh " multiboot2 /boot/bender $options_bender" + if {[apply_microcode]} { + puts $fh " module2 /boot/micro.code micro.code" + } + puts $fh " module2 /boot/hypervisor hypervisor iommu_intel iommu_amd [kernel_output]" + puts $fh " module2 /boot/image.elf.gz image.elf" + puts $fh "}" + close $fh + } + + # + # Build image + # + run_image + + if {[have_include "load/tftp"]} { + # + # Install PXE bootloader pulsar + # + install_pxe_bootloader_to_run_dir + + # + # Generate pulsar config file + # + set fh [open "[run_dir]/config-52-54-00-12-34-56" "WRONLY CREAT TRUNC"] + puts $fh " exec /boot/bender $options_bender" + if {[apply_microcode]} { + puts $fh " load /boot/micro.code micro.code" + } + puts $fh " load /boot/hypervisor iommu_intel iommu_amd [kernel_output]" + puts $fh " load /boot/image.elf" + close $fh + + generate_tftp_config + } + + if {[have_include "load/ipxe"]} { + create_ipxe_config + update_ipxe_boot_dir + create_symlink_for_iso + } +} + + +## +# Base source archive within depot +# +proc base_src { } { + + if {[have_spec x86]} { return base-tukija } + + global specs + + puts stderr "Test requires base-tukija kernel archive, which is missing for this build configuration" + puts stderr " SPECS=\"$specs\"" + exit 0 +}