diff --git a/repos/base-hw/src/core/kernel/thread.cc b/repos/base-hw/src/core/kernel/thread.cc index 5d17a3a559..a5f3b37593 100644 --- a/repos/base-hw/src/core/kernel/thread.cc +++ b/repos/base-hw/src/core/kernel/thread.cc @@ -329,6 +329,23 @@ void Thread::_call_start_thread() /* join protection domain */ thread._pd = (Pd *) user_arg_3(); thread._ipc_init(*(Native_utcb *)user_arg_4(), *this); + + /* + * Sanity check core threads! + * + * Currently, the model assumes that there is only one core + * entrypoint, which serves requests, and which can destroy + * threads and pds. If this changes, we have to inform all + * cpus about pd destructions to remove their page-tables + * from the hardware in case that a core-thread running with + * that same pd is currently active. Therefore, warn if the + * semantic changes, and additional core threads are started + * across cpu cores. + */ + if (thread._pd == &_core_pd && cpu.id() != _cpu_pool.primary_cpu().id()) + Genode::raw("Error: do not start core threads" + " on CPU cores different than boot cpu"); + thread._become_active(); } @@ -447,6 +464,18 @@ void Thread::_call_delete_thread() } +void Thread::_call_delete_pd() +{ + Genode::Kernel_object & pd = + *(Genode::Kernel_object*)user_arg_1(); + + if (_cpu->active(pd->mmu_regs)) + _cpu->switch_to(_core_pd.mmu_regs); + + _call_delete(); +} + + void Thread::_call_await_request_msg() { if (_ipc_node.can_await_request()) { @@ -822,7 +851,7 @@ void Thread::_call() *(Genode::Platform_pd *) user_arg_3(), _addr_space_id_alloc); return; - case call_id_delete_pd(): _call_delete(); return; + case call_id_delete_pd(): _call_delete_pd(); return; case call_id_new_signal_receiver(): _call_new(); return; case call_id_new_signal_context(): _call_new(*(Signal_receiver*) user_arg_2(), user_arg_3()); diff --git a/repos/base-hw/src/core/kernel/thread.h b/repos/base-hw/src/core/kernel/thread.h index 30f774163d..93e45c3303 100644 --- a/repos/base-hw/src/core/kernel/thread.h +++ b/repos/base-hw/src/core/kernel/thread.h @@ -234,6 +234,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout void _call_restart_thread(); void _call_yield_thread(); void _call_delete_thread(); + void _call_delete_pd(); void _call_await_request_msg(); void _call_send_request_msg(); void _call_send_reply_msg(); diff --git a/repos/base-hw/src/core/spec/arm/cpu.cc b/repos/base-hw/src/core/spec/arm/cpu.cc index f0a7fa4ea8..7c8c722e31 100644 --- a/repos/base-hw/src/core/spec/arm/cpu.cc +++ b/repos/base-hw/src/core/spec/arm/cpu.cc @@ -87,24 +87,25 @@ void Arm_cpu::mmu_fault_status(Fsr::access_t fsr, Thread_fault & fault) } -void Arm_cpu::switch_to(Arm_cpu::Context&, Arm_cpu::Mmu_context & o) +bool Arm_cpu::active(Arm_cpu::Mmu_context & ctx) { - if (o.cidr == 0) return; + return (Cidr::read() == ctx.cidr); +} - Cidr::access_t cidr = Cidr::read(); - if (cidr != o.cidr) { - /** - * First switch to global mappings only to prevent - * that wrong branch predicts result due to ASID - * and Page-Table not being in sync (see ARM RM B 3.10.4) - */ - Cidr::write(0); - Cpu::synchronization_barrier(); - Ttbr0::write(o.ttbr0); - Cpu::synchronization_barrier(); - Cidr::write(o.cidr); - Cpu::synchronization_barrier(); - } + +void Arm_cpu::switch_to(Arm_cpu::Mmu_context & ctx) +{ + /** + * First switch to global mappings only to prevent + * that wrong branch predicts result due to ASID + * and Page-Table not being in sync (see ARM RM B 3.10.4) + */ + Cidr::write(0); + Cpu::synchronization_barrier(); + Ttbr0::write(ctx.ttbr0); + Cpu::synchronization_barrier(); + Cidr::write(ctx.cidr); + Cpu::synchronization_barrier(); } diff --git a/repos/base-hw/src/core/spec/arm/cpu_support.h b/repos/base-hw/src/core/spec/arm/cpu_support.h index bba89ce42a..33d63848be 100644 --- a/repos/base-hw/src/core/spec/arm/cpu_support.h +++ b/repos/base-hw/src/core/spec/arm/cpu_support.h @@ -104,7 +104,8 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu else Tlbiall::write(0); } - void switch_to(Context&, Mmu_context & o); + bool active(Mmu_context &); + void switch_to(Mmu_context &); static void mmu_fault(Context & c, Kernel::Thread_fault & fault); static void mmu_fault_status(Fsr::access_t fsr, diff --git a/repos/base-hw/src/core/spec/arm/kernel/thread.cc b/repos/base-hw/src/core/spec/arm/kernel/thread.cc index efc67e510a..838a730c79 100644 --- a/repos/base-hw/src/core/spec/arm/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/arm/kernel/thread.cc @@ -67,7 +67,8 @@ void Kernel::Thread::Tlb_invalidation::execute() { }; void Thread::proceed(Cpu & cpu) { - cpu.switch_to(*regs, pd().mmu_regs); + if (!cpu.active(pd().mmu_regs) && type() != CORE) + cpu.switch_to(pd().mmu_regs); regs->cpu_exception = cpu.stack_start(); kernel_to_user_context_switch((static_cast(&*regs)), diff --git a/repos/base-hw/src/core/spec/arm_v8/cpu.cc b/repos/base-hw/src/core/spec/arm_v8/cpu.cc index a06e142f1b..617b7fd70b 100644 --- a/repos/base-hw/src/core/spec/arm_v8/cpu.cc +++ b/repos/base-hw/src/core/spec/arm_v8/cpu.cc @@ -26,12 +26,15 @@ Genode::Cpu::Context::Context(bool privileged) } -void Genode::Cpu::switch_to(Context&, Mmu_context & mmu_context) +bool Genode::Cpu::active(Mmu_context & mmu_context) { - if (mmu_context.id() == 0) return; + return (mmu_context.id() == Ttbr::Asid::get(Ttbr0_el1::read())); +} - if (mmu_context.id() != Ttbr::Asid::get(Ttbr0_el1::read())) - Ttbr0_el1::write(mmu_context.ttbr); + +void Genode::Cpu::switch_to(Mmu_context & mmu_context) +{ + Ttbr0_el1::write(mmu_context.ttbr); } diff --git a/repos/base-hw/src/core/spec/arm_v8/cpu.h b/repos/base-hw/src/core/spec/arm_v8/cpu.h index a9ca159ea3..b9ff4dbde6 100644 --- a/repos/base-hw/src/core/spec/arm_v8/cpu.h +++ b/repos/base-hw/src/core/spec/arm_v8/cpu.h @@ -99,7 +99,8 @@ struct Genode::Cpu : Hw::Arm_64_cpu return Ttbr::Asid::get(ttbr) & 0xffff; } }; - void switch_to(Context&, Mmu_context &); + bool active(Mmu_context &); + void switch_to(Mmu_context &); static void mmu_fault(Context &, Kernel::Thread_fault &); diff --git a/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc b/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc index e3bb8df148..b724f84e5d 100644 --- a/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc @@ -108,7 +108,9 @@ bool Kernel::Pd::invalidate_tlb(Cpu &, addr_t addr, size_t size) void Thread::proceed(Cpu & cpu) { - cpu.switch_to(*regs, pd().mmu_regs); + if (!cpu.active(pd().mmu_regs) && type() != CORE) + cpu.switch_to(pd().mmu_regs); + kernel_to_user_context_switch((static_cast(&*regs)), (void*)cpu.stack_start()); } diff --git a/repos/base-hw/src/core/spec/cortex_a15/cpu.h b/repos/base-hw/src/core/spec/cortex_a15/cpu.h index 3a1ad078cf..ca1a447ec2 100644 --- a/repos/base-hw/src/core/spec/cortex_a15/cpu.h +++ b/repos/base-hw/src/core/spec/cortex_a15/cpu.h @@ -115,11 +115,14 @@ class Genode::Cpu : public Arm_v7_cpu */ static unsigned executing_id() { return Mpidr::Aff_0::get(Mpidr::read()); } - - void switch_to(Context &, Mmu_context & mmu_context) + bool active(Mmu_context & mmu_context) { - if (mmu_context.id() && (Ttbr0_64bit::read() != mmu_context.ttbr0)) - Ttbr0_64bit::write(mmu_context.ttbr0); + return (Ttbr0_64bit::read() == mmu_context.ttbr0); + } + + void switch_to(Mmu_context & mmu_context) + { + Ttbr0_64bit::write(mmu_context.ttbr0); } }; diff --git a/repos/base-hw/src/core/spec/riscv/cpu.cc b/repos/base-hw/src/core/spec/riscv/cpu.cc index e182afe444..e2e77f2aa5 100644 --- a/repos/base-hw/src/core/spec/riscv/cpu.cc +++ b/repos/base-hw/src/core/spec/riscv/cpu.cc @@ -53,22 +53,16 @@ Mmu_context::~Mmu_context() } +bool Genode::Cpu::active(Mmu_context & context) +{ + return Satp::read() == context.satp; +} + + void Genode::Cpu::switch_to(Mmu_context & context) { - /* - * The sstatus register defines to which privilege level - * the machin returns when doing an exception return - */ - bool user = Satp::Asid::get(context.satp); - Sstatus::access_t v = Sstatus::read(); - Sstatus::Spp::set(v, user ? 0 : 1); - Sstatus::write(v); - - /* change the translation table when necessary */ - if (user) { - Satp::write(context.satp); - sfence(); - } + Satp::write(context.satp); + sfence(); } diff --git a/repos/base-hw/src/core/spec/riscv/cpu.h b/repos/base-hw/src/core/spec/riscv/cpu.h index 86820b3504..d719abcb1c 100644 --- a/repos/base-hw/src/core/spec/riscv/cpu.h +++ b/repos/base-hw/src/core/spec/riscv/cpu.h @@ -97,6 +97,7 @@ class Genode::Cpu : public Hw::Riscv_cpu static void invalidate_tlb_by_pid(unsigned const /* pid */) { sfence(); } + bool active(Mmu_context & context); void switch_to(Mmu_context & context); static void mmu_fault(Context & c, Kernel::Thread_fault & f); diff --git a/repos/base-hw/src/core/spec/riscv/kernel/thread.cc b/repos/base-hw/src/core/spec/riscv/kernel/thread.cc index 15f568af26..10814fde15 100644 --- a/repos/base-hw/src/core/spec/riscv/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/riscv/kernel/thread.cc @@ -100,7 +100,16 @@ void Kernel::Thread::_call_cache_invalidate_data_region() { } void Kernel::Thread::proceed(Cpu & cpu) { - cpu.switch_to(_pd->mmu_regs); + /* + * The sstatus register defines to which privilege level + * the machine returns when doing an exception return + */ + Cpu::Sstatus::access_t v = Cpu::Sstatus::read(); + Cpu::Sstatus::Spp::set(v, (type() == USER) ? 0 : 1); + Cpu::Sstatus::write(v); + + if (!cpu.active(pd().mmu_regs) && type() != CORE) + cpu.switch_to(_pd->mmu_regs); asm volatile("csrw sscratch, %1 \n" "mv x31, %0 \n" diff --git a/repos/base-hw/src/core/spec/x86_64/cpu.cc b/repos/base-hw/src/core/spec/x86_64/cpu.cc index 5ef6bc2cff..689e4f9e3d 100644 --- a/repos/base-hw/src/core/spec/x86_64/cpu.cc +++ b/repos/base-hw/src/core/spec/x86_64/cpu.cc @@ -111,11 +111,20 @@ extern void const * const kernel_stack; extern Genode::size_t const kernel_stack_size; -void Genode::Cpu::switch_to(Context & context, Mmu_context &mmu_context) +bool Genode::Cpu::active(Mmu_context &mmu_context) { - if ((context.cs != 0x8) && (mmu_context.cr3 != Cr3::read())) - Cr3::write(mmu_context.cr3); + return (mmu_context.cr3 == Cr3::read()); +} + +void Genode::Cpu::switch_to(Mmu_context &mmu_context) +{ + Cr3::write(mmu_context.cr3); +} + + +void Genode::Cpu::switch_to(Context & context) +{ tss.ist[0] = (addr_t)&context + sizeof(Genode::Cpu_state); addr_t const stack_base = reinterpret_cast(&kernel_stack); diff --git a/repos/base-hw/src/core/spec/x86_64/cpu.h b/repos/base-hw/src/core/spec/x86_64/cpu.h index 35073dcc93..781fa38373 100644 --- a/repos/base-hw/src/core/spec/x86_64/cpu.h +++ b/repos/base-hw/src/core/spec/x86_64/cpu.h @@ -126,7 +126,10 @@ class Genode::Cpu : public Hw::X86_64_cpu * * \param context next CPU context */ - void switch_to(Context & context, Mmu_context &mmu_context); + void switch_to(Context & context); + + bool active(Mmu_context &mmu_context); + void switch_to(Mmu_context &mmu_context); static void mmu_fault(Context & regs, Kernel::Thread_fault & fault); diff --git a/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc b/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc index 9df97877b1..df3332a4de 100644 --- a/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc @@ -43,7 +43,10 @@ void Kernel::Thread::_call_cache_invalidate_data_region() { } void Kernel::Thread::proceed(Cpu & cpu) { - cpu.switch_to(*regs, pd().mmu_regs); + if (!cpu.active(pd().mmu_regs) && type() != CORE) + cpu.switch_to(pd().mmu_regs); + + cpu.switch_to(*regs); asm volatile("fxrstor (%1) \n" "mov %0, %%rsp \n"