diff --git a/repos/base-foc/lib/mk/spec/x86/base-foc-common.mk b/repos/base-foc/lib/mk/spec/x86/base-foc-common.mk
index c5c10663d4..f3a625c676 100644
--- a/repos/base-foc/lib/mk/spec/x86/base-foc-common.mk
+++ b/repos/base-foc/lib/mk/spec/x86/base-foc-common.mk
@@ -1,9 +1 @@
-#
-# \brief Portions of base library shared by core and non-core processes
-# \author Norman Feske
-# \date 2013-02-14
-#
-
-vpath vm_session.cc $(REP_DIR)/src/lib/base/x86
-
include $(REP_DIR)/lib/mk/base-foc-common.inc
diff --git a/repos/base-foc/lib/mk/spec/x86/base-foc.mk b/repos/base-foc/lib/mk/spec/x86/base-foc.mk
index 820c2c2707..e0e42da9cc 100644
--- a/repos/base-foc/lib/mk/spec/x86/base-foc.mk
+++ b/repos/base-foc/lib/mk/spec/x86/base-foc.mk
@@ -1,3 +1,5 @@
LIBS += timeout
+vpath vm.cc $(REP_DIR)/src/lib/base/x86
+
include $(REP_DIR)/lib/mk/base-foc.inc
diff --git a/repos/base-foc/src/core/include/platform_thread.h b/repos/base-foc/src/core/include/platform_thread.h
index 322b0e60a2..55e5b10afe 100644
--- a/repos/base-foc/src/core/include/platform_thread.h
+++ b/repos/base-foc/src/core/include/platform_thread.h
@@ -158,7 +158,8 @@ class Genode::Platform_thread : Interface
/**
* Make thread to vCPU
*/
- Foc::l4_cap_idx_t setup_vcpu(unsigned, Cap_mapping const &, Cap_mapping &);
+ Foc::l4_cap_idx_t setup_vcpu(unsigned, Cap_mapping const &,
+ Cap_mapping &, Region_map::Local_addr &);
/************************
diff --git a/repos/base-foc/src/core/include/vm_session_component.h b/repos/base-foc/src/core/include/vm_session_component.h
index d07fa75b2e..848f728102 100644
--- a/repos/base-foc/src/core/include/vm_session_component.h
+++ b/repos/base-foc/src/core/include/vm_session_component.h
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -15,8 +15,10 @@
#define _CORE__VM_SESSION_COMPONENT_H_
/* Genode includes */
+#include
#include
#include
+#include
#include
/* core includes */
@@ -24,30 +26,46 @@
#include
#include
#include
+#include
-namespace Genode { class Vm_session_component; struct Vcpu; }
+namespace Genode
+{
+ class Vm_session_component;
+ struct Vcpu;
+
+ enum { MAX_VCPU_IDS = (Platform::VCPU_VIRT_EXT_END -
+ Platform::VCPU_VIRT_EXT_START) / L4_PAGESIZE };
+ typedef Bit_allocator Vcpu_id_allocator;
+}
-struct Genode::Vcpu : List::Element
+struct Genode::Vcpu : Rpc_object
{
private:
- Constrained_ram_allocator &_ram_alloc;
- Cap_quota_guard &_cap_alloc;
- Ram_dataspace_capability _ds_cap { };
- Vm_session::Vcpu_id const _id;
- Cap_mapping _recall { true };
+ Rpc_entrypoint &_ep;
+ Constrained_ram_allocator &_ram_alloc;
+ Cap_quota_guard &_cap_alloc;
+ Vcpu_id_allocator &_vcpu_ids;
+ Cap_mapping _recall { true };
+ Foc::l4_cap_idx_t _task_index_client { };
+ Region_map::Local_addr _foc_vcpu_state { };
public:
- Vcpu(Constrained_ram_allocator &ram_alloc,
- Cap_quota_guard &cap_alloc, Vm_session::Vcpu_id const id);
+ Vcpu(Rpc_entrypoint &, Constrained_ram_allocator &, Cap_quota_guard &,
+ Platform_thread &, Cap_mapping &, Vcpu_id_allocator &);
~Vcpu();
- bool match(Vm_session::Vcpu_id const id) const { return id.id == _id.id; }
- Dataspace_capability ds_cap() { return _ds_cap; }
Cap_mapping &recall_cap() { return _recall; }
+
+ /*******************************
+ ** Native_vcpu RPC interface **
+ *******************************/
+
+ Foc::l4_cap_idx_t task_index() const { return _task_index_client; }
+ Region_map::Local_addr foc_vcpu_state() const { return _foc_vcpu_state; }
};
@@ -60,17 +78,19 @@ class Genode::Vm_session_component
{
private:
- typedef Constrained_ram_allocator Con_ram_allocator;
+ typedef Constrained_ram_allocator Con_ram_allocator;
typedef Allocator_avl_tpl Avl_region;
Rpc_entrypoint &_ep;
Con_ram_allocator _constrained_md_ram_alloc;
Sliced_heap _heap;
- Avl_region _map { &_heap };
- List _vcpus { };
+ Avl_region _map { &_heap };
Cap_mapping _task_vcpu { true };
- unsigned _id_alloc { 0 };
+ Vcpu_id_allocator _vcpu_ids { };
+ Registry> _vcpus { };
+
+ /* helpers for vm_session_common.cc */
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
void _detach_vm_memory(addr_t, size_t);
@@ -94,25 +114,20 @@ class Genode::Vm_session_component
** Region_map_detach interface **
*********************************/
- void detach(Region_map::Local_addr) override;
- void unmap_region(addr_t, size_t) override;
+ /* used on destruction of attached dataspaces */
+ void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
+ void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
/**************************
** Vm session interface **
**************************/
- Dataspace_capability _cpu_state(Vcpu_id);
+ Capability create_vcpu(Thread_capability);
+ void attach_pic(addr_t) override { /* unused on Fiasco.OC */ }
- void _exception_handler(Signal_context_capability, Vcpu_id) { }
- void _run(Vcpu_id) { }
- void _pause(Vcpu_id) { }
- void attach(Dataspace_capability, addr_t, Attach_attr) override;
- void attach_pic(addr_t) override { }
- void detach(addr_t, size_t) override;
- Vcpu_id _create_vcpu(Thread_capability);
- Capability _native_vcpu(Vcpu_id) {
- return Capability(); }
+ void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
+ void detach(addr_t, size_t) override; /* vm_session_common.cc */
};
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
diff --git a/repos/base-foc/src/core/platform_thread.cc b/repos/base-foc/src/core/platform_thread.cc
index 6d795732b5..f6eef12cfb 100644
--- a/repos/base-foc/src/core/platform_thread.cc
+++ b/repos/base-foc/src/core/platform_thread.cc
@@ -349,8 +349,9 @@ Platform_thread::~Platform_thread()
}
Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
- Cap_mapping const &task_vcpu,
- Cap_mapping &vcpu_irq)
+ Cap_mapping const &task_vcpu,
+ Cap_mapping &vcpu_irq,
+ Region_map::Local_addr &vcpu_state)
{
if (!_platform_pd)
return Foc::L4_INVALID_CAP;
@@ -358,8 +359,11 @@ Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
if (vcpu_id >= (Platform::VCPU_VIRT_EXT_END - Platform::VCPU_VIRT_EXT_START) / L4_PAGESIZE)
return Foc::L4_INVALID_CAP;
- addr_t const vcpu_addr = Platform::VCPU_VIRT_EXT_START + L4_PAGESIZE*vcpu_id;
- l4_fpage_t const vm_page = l4_fpage( vcpu_addr, L4_PAGESHIFT, L4_FPAGE_RW);
+ /* vCPU state attached by kernel syscall to client PD directly */
+ vcpu_state = Region_map::Local_addr(Platform::VCPU_VIRT_EXT_START +
+ L4_PAGESIZE * vcpu_id);
+
+ l4_fpage_t const vm_page = l4_fpage(vcpu_state, L4_PAGESHIFT, L4_FPAGE_RW);
l4_msgtag_t msg = l4_task_add_ku_mem(_platform_pd->native_task().data()->kcap(), vm_page);
if (l4_error(msg)) {
@@ -367,7 +371,7 @@ Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
return Foc::L4_INVALID_CAP;
}
- msg = l4_thread_vcpu_control_ext(_thread.local.data()->kcap(), vcpu_addr);
+ msg = l4_thread_vcpu_control_ext(_thread.local.data()->kcap(), vcpu_state);
if (l4_error(msg)) {
error("vcpu_control_exit failed ", l4_error(msg));
return Foc::L4_INVALID_CAP;
diff --git a/repos/base-foc/src/core/vm_session_component.cc b/repos/base-foc/src/core/vm_session_component.cc
index c6ee8ca350..73df144d85 100644
--- a/repos/base-foc/src/core/vm_session_component.cc
+++ b/repos/base-foc/src/core/vm_session_component.cc
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -26,6 +26,67 @@
using namespace Genode;
+struct Vcpu_creation_error : Exception { };
+
+
+Vcpu::Vcpu(Rpc_entrypoint &ep,
+ Constrained_ram_allocator &ram_alloc,
+ Cap_quota_guard &cap_alloc,
+ Platform_thread &thread,
+ Cap_mapping &task_cap,
+ Vcpu_id_allocator &vcpu_alloc)
+:
+ _ep(ep),
+ _ram_alloc(ram_alloc),
+ _cap_alloc(cap_alloc),
+ _vcpu_ids(vcpu_alloc)
+{
+ Foc::l4_msgtag_t msg = l4_factory_create_irq(Foc::L4_BASE_FACTORY_CAP,
+ _recall.local.data()->kcap());
+ if (l4_error(msg)) {
+ Genode::error("vcpu irq creation failed", l4_error(msg));
+ throw Vcpu_creation_error();
+ }
+
+ try {
+ unsigned const vcpu_id = _vcpu_ids.alloc();
+ _task_index_client = thread.setup_vcpu(vcpu_id, task_cap, recall_cap(),
+ _foc_vcpu_state);
+ if (_task_index_client == Foc::L4_INVALID_CAP) {
+ vcpu_alloc.free(vcpu_id);
+ if (l4_error(Foc::l4_irq_detach(_recall.local.data()->kcap())))
+ error("cannot detach IRQ");
+ throw Vcpu_creation_error();
+ }
+ } catch (Vcpu_id_allocator::Out_of_indices) {
+ throw Vcpu_creation_error();
+ }
+
+ _ep.manage(this);
+}
+
+
+Vcpu::~Vcpu()
+{
+ _ep.dissolve(this);
+
+ if (_task_index_client != Foc::L4_INVALID_CAP) {
+ if (l4_error(Foc::l4_irq_detach(_recall.local.data()->kcap())))
+ error("cannot detach IRQ");
+ }
+
+ if (_foc_vcpu_state) {
+ unsigned const vcpu_id = ((addr_t)_foc_vcpu_state -
+ Platform::VCPU_VIRT_EXT_START) / L4_PAGESIZE;
+ _vcpu_ids.free(vcpu_id);
+ }
+}
+
+
+/**************************
+ ** Vm_session_component **
+ **************************/
+
Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
Resources resources,
Label const &,
@@ -41,7 +102,7 @@ Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
_constrained_md_ram_alloc(ram, _ram_quota_guard(), _cap_quota_guard()),
_heap(_constrained_md_ram_alloc, local_rm)
{
- _cap_quota_guard().withdraw(Cap_quota{1});
+ Cap_quota_guard::Reservation caps(_cap_quota_guard(), Cap_quota{1});
using namespace Foc;
l4_msgtag_t msg = l4_factory_create_vm(L4_BASE_FACTORY_CAP,
@@ -54,15 +115,15 @@ Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
/* configure managed VM area */
_map.add_range(0, 0UL - 0x1000);
_map.add_range(0UL - 0x1000, 0x1000);
+
+ caps.acknowledge();
}
Vm_session_component::~Vm_session_component()
{
- for (;Vcpu * vcpu = _vcpus.first();) {
- _vcpus.remove(vcpu);
- destroy(_heap, vcpu);
- }
+ _vcpus.for_each([&] (Vcpu &vcpu) {
+ destroy(_heap, &vcpu); });
/* detach all regions */
while (true) {
@@ -76,97 +137,34 @@ Vm_session_component::~Vm_session_component()
}
-Vcpu::Vcpu(Constrained_ram_allocator &ram_alloc,
- Cap_quota_guard &cap_alloc,
- Vm_session::Vcpu_id const id)
-:
- _ram_alloc(ram_alloc),
- _cap_alloc(cap_alloc),
- _id(id)
+Capability Vm_session_component::create_vcpu(Thread_capability cap)
{
- try {
- /* create ds for vCPU state */
- _ds_cap = _ram_alloc.alloc(0x1000, Cache_attribute::CACHED);
- } catch (...) {
- throw;
- }
-
- Foc::l4_msgtag_t msg = l4_factory_create_irq(Foc::L4_BASE_FACTORY_CAP,
- _recall.local.data()->kcap());
- if (l4_error(msg)) {
- _ram_alloc.free(_ds_cap);
- Genode::error("vcpu irq creation failed", l4_error(msg));
- throw 1;
- }
-}
-
-Vcpu::~Vcpu()
-{
- if (_ds_cap.valid())
- _ram_alloc.free(_ds_cap);
-}
-
-Vm_session::Vcpu_id Vm_session_component::_create_vcpu(Thread_capability cap)
-{
- Vcpu_id ret;
-
if (!cap.valid())
- return ret;
+ return { };
- auto lambda = [&] (Cpu_thread_component *thread) {
+ /* allocate vCPU object */
+ Vcpu * vcpu = nullptr;
+
+ _ep.apply(cap, [&] (Cpu_thread_component *thread) {
if (!thread)
return;
- /* allocate vCPU object */
- Vcpu * vcpu = nullptr;
try {
- vcpu = new (_heap) Vcpu(_constrained_md_ram_alloc,
- _cap_quota_guard(),
- Vcpu_id {_id_alloc});
-
- Foc::l4_cap_idx_t task =
- thread->platform_thread().setup_vcpu(_id_alloc, _task_vcpu, vcpu->recall_cap());
-
- if (task == Foc::L4_INVALID_CAP)
- throw 0;
-
- _ep.apply(vcpu->ds_cap(), [&] (Dataspace_component *ds) {
- if (!ds)
- throw 1;
- /* tell client where to find task cap */
- *reinterpret_cast(ds->phys_addr()) = task;
- });
- } catch (int) {
- if (vcpu)
- destroy(_heap, vcpu);
-
+ vcpu = new (_heap) Registered(_vcpus,
+ _ep,
+ _constrained_md_ram_alloc,
+ _cap_quota_guard(),
+ thread->platform_thread(),
+ _task_vcpu,
+ _vcpu_ids);
+ } catch (Vcpu_creation_error) {
return;
- } catch (...) {
- if (vcpu)
- destroy(_heap, vcpu);
-
- throw;
}
+ });
- _vcpus.insert(vcpu);
- ret.id = _id_alloc++;
- };
-
- _ep.apply(cap, lambda);
- return ret;
+ return vcpu ? vcpu->cap() : Capability {};
}
-Dataspace_capability Vm_session_component::_cpu_state(Vcpu_id const vcpu_id)
-{
- for (Vcpu *vcpu = _vcpus.first(); vcpu; vcpu = vcpu->next()) {
- if (!vcpu->match(vcpu_id))
- continue;
-
- return vcpu->ds_cap();
- }
-
- return Dataspace_capability();
-}
void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
addr_t const guest_phys,
@@ -202,6 +200,7 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
}
}
+
void Vm_session_component::_detach_vm_memory(addr_t guest_phys, size_t size)
{
Flexpage_iterator flex(guest_phys, size, guest_phys, size, 0);
diff --git a/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h b/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h
new file mode 100644
index 0000000000..4a3b9bd303
--- /dev/null
+++ b/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h
@@ -0,0 +1,31 @@
+ /*
+ * \brief Fiasco.OC vCPU RPC interface
+ * \author Christian Helmuth
+ * \author Alexander Böttcher
+ * \date 2021-01-19
+ */
+
+/*
+ * Copyright (C) 2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _INCLUDE__FOC_NATIVE_VCPU__FOC_NATIVE_VCPU_H_
+#define _INCLUDE__FOC_NATIVE_VCPU__FOC_NATIVE_VCPU_H_
+
+#include
+#include
+
+#include
+
+struct Genode::Vm_session::Native_vcpu : Interface
+{
+ GENODE_RPC(Rpc_foc_vcpu_state, Region_map::Local_addr, foc_vcpu_state);
+ GENODE_RPC(Rpc_task_index, Foc::l4_cap_idx_t, task_index);
+
+ GENODE_RPC_INTERFACE(Rpc_task_index, Rpc_foc_vcpu_state);
+};
+
+#endif /* _INCLUDE__FOC_NATIVE_VCPU__FOC_NATIVE_VCPU_H_ */
diff --git a/repos/base-foc/src/lib/base/x86/vm_session.cc b/repos/base-foc/src/lib/base/x86/vm.cc
similarity index 67%
rename from repos/base-foc/src/lib/base/x86/vm_session.cc
rename to repos/base-foc/src/lib/base/x86/vm.cc
index 769c12866d..22a40a4859 100644
--- a/repos/base-foc/src/lib/base/x86/vm_session.cc
+++ b/repos/base-foc/src/lib/base/x86/vm.cc
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -16,9 +16,12 @@
#include
#include
#include
-#include
-#include
+#include
#include
+#include
+#include
+
+#include
/* Fiasco.OC includes */
#include
@@ -33,6 +36,8 @@ namespace Foc {
using namespace Genode;
+using Exit_config = Vm_connection::Exit_config;
+
enum Virt { VMX, SVM, UNKNOWN };
@@ -48,13 +53,42 @@ static uint32_t svm_features()
static bool svm_np() { return svm_features() & (1U << 0); }
-struct Vcpu;
+/***********************************
+ ** Fiasco.OC vCPU implementation **
+ ***********************************/
+
+struct Foc_vcpu;
+
+struct Foc_native_vcpu_rpc : Rpc_client, Noncopyable
+{
+ private:
+
+ Capability _create_vcpu(Vm_connection &vm,
+ Thread_capability &cap)
+ {
+ return vm.with_upgrade([&] () {
+ return vm.call(cap); });
+ }
+
+ public:
+
+ Foc_vcpu &vcpu;
+
+ Foc_native_vcpu_rpc(Vm_connection &vm, Thread_capability &cap,
+ Foc_vcpu &vcpu)
+ :
+ Rpc_client(_create_vcpu(vm, cap)),
+ vcpu(vcpu)
+ { }
+
+ Foc::l4_cap_idx_t task_index() { return call(); }
+
+ Foc::l4_vcpu_state_t * foc_vcpu_state() {
+ return static_cast(call()); }
+};
-static Registry > vcpus;
-
-
-struct Vcpu : Thread
+struct Foc_vcpu : Thread, Noncopyable
{
private:
@@ -204,20 +238,19 @@ struct Vcpu : Thread
addr_t const vmcs_cr4_mask { CR4_VMX };
addr_t const vmcs_cr4_set { CR4_VMX };
- Signal_context_capability _signal;
+ Vcpu_handler_base &_vcpu_handler;
+ Blockade _startup { };
Semaphore _wake_up { 0 };
- Semaphore &_handler_ready;
- Allocator &_alloc;
- Vm_session_client::Vcpu_id _id { Vm_session_client::Vcpu_id::INVALID };
- addr_t _state { 0 };
- addr_t _task { 0 };
- enum Virt const _vm_type;
uint64_t _tsc_offset { 0 };
+ enum Virt const _vm_type;
bool _show_error_unsupported_pdpte { true };
bool _show_error_unsupported_tpr { true };
- uint8_t _fpu_ep[512] __attribute__((aligned(0x10)));
- uint8_t _fpu_vcpu[512] __attribute__((aligned(0x10)));
+ Vcpu_state _vcpu_state __attribute__((aligned(0x10))) { };
+ Vcpu_state::Fpu::State _fpu_ep __attribute__((aligned(0x10))) { };
+ Vcpu_state::Fpu::State _fpu_vcpu __attribute__((aligned(0x10))) { };
+
+ Constructible _rpc { };
enum {
VMEXIT_STARTUP = 0xfe,
@@ -238,6 +271,10 @@ struct Vcpu : Thread
void entry() override
{
+ /* trigger that thread is up */
+ _startup.wakeup();
+
+ /* wait until vcpu is assigned to us */
_wake_up.down();
{
@@ -250,9 +287,8 @@ struct Vcpu : Thread
_state_request = NONE;
}
- /* reserved ranged for state of vCPUs - see platform.cc */
- addr_t const vcpu_addr = 0x1000 + 0x1000 * _id.id;
- Foc::l4_vcpu_state_t * const vcpu = reinterpret_cast(vcpu_addr);
+ Foc::l4_vcpu_state_t * const vcpu = _rpc->foc_vcpu_state();
+ addr_t const vcpu_addr = reinterpret_cast(vcpu);
if (!l4_vcpu_check_version(vcpu))
error("vCPU version mismatch kernel vs user-land - ",
@@ -264,10 +300,10 @@ struct Vcpu : Thread
void * vmcs = reinterpret_cast(vcpu_addr + L4_VCPU_OFFSET_EXT_STATE);
/* set vm page table */
- vcpu->user_task = _task;
+ vcpu->user_task = _rpc->task_index();
- Vm_state &state = *reinterpret_cast(_state);
- state = Vm_state {};
+ Vcpu_state &state = _vcpu_state;
+ state.discharge();
/* initial startup VM exit to get valid VM state */
if (_vm_type == Virt::VMX)
@@ -277,16 +313,16 @@ struct Vcpu : Thread
_read_amd_state(state, vmcb, vcpu);
state.exit_reason = VMEXIT_STARTUP;
- Signal_transmitter(_signal).submit();
+ Signal_transmitter(_vcpu_handler.signal_cap()).submit();
- _handler_ready.down();
+ _vcpu_handler.ready_semaphore().down();
_wake_up.down();
/*
* Fiasoc.OC peculiarities
*/
if (_vm_type == Virt::SVM)
- state.efer.value(state.efer.value() | AMD_SVM_ENABLE);
+ state.efer.charge(state.efer.value() | AMD_SVM_ENABLE);
if (_vm_type == Virt::SVM) {
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0;
@@ -305,7 +341,7 @@ struct Vcpu : Thread
Foc::l4_vm_vmx_write(vmcs, Vmcs::CR0_MASK, vmcs_cr0_mask);
Foc::l4_vm_vmx_write(vmcs, Vmcs::CR4_MASK, vmcs_cr4_mask);
Foc::l4_vm_vmx_write(vmcs, Vmcs::CR4_SHADOW, 0);
- state.cr4.value(vmcs_cr4_set);
+ state.cr4.charge(vmcs_cr4_set);
enum {
EXIT_SAVE_EFER = 1U << 20,
@@ -348,29 +384,29 @@ struct Vcpu : Thread
_write_intel_state(state, vmcs, vcpu);
/* save FPU state of this thread and restore state of vCPU */
- asm volatile ("fxsave %0" : "=m" (*_fpu_ep));
- if (state.fpu.valid()) {
- state.fpu.value([&] (uint8_t *fpu, size_t const) {
- asm volatile ("fxrstor %0" : : "m" (*fpu) : "memory");
+ asm volatile ("fxsave %0" : "=m" (_fpu_ep));
+ if (state.fpu.charged()) {
+ state.fpu.charge([&] (Vcpu_state::Fpu::State &fpu) {
+ asm volatile ("fxrstor %0" : : "m" (fpu) : "memory");
});
} else
- asm volatile ("fxrstor %0" : : "m" (*_fpu_vcpu) : "memory");
+ asm volatile ("fxrstor %0" : : "m" (_fpu_vcpu) : "memory");
/* tell Fiasco.OC to run the vCPU */
l4_msgtag_t tag = l4_thread_vcpu_resume_start();
tag = l4_thread_vcpu_resume_commit(L4_INVALID_CAP, tag);
/* save FPU state of vCPU and restore state of this thread */
- state.fpu.value([&] (uint8_t *fpu, size_t const) {
- asm volatile ("fxsave %0" : "=m" (*fpu) :: "memory");
- asm volatile ("fxsave %0" : "=m" (*_fpu_vcpu) :: "memory");
+ state.fpu.charge([&] (Vcpu_state::Fpu::State &fpu) {
+ asm volatile ("fxsave %0" : "=m" (fpu) :: "memory");
+ asm volatile ("fxsave %0" : "=m" (_fpu_vcpu) :: "memory");
});
- asm volatile ("fxrstor %0" : : "m" (*_fpu_ep) : "memory");
+ asm volatile ("fxrstor %0" : : "m" (_fpu_ep) : "memory");
/* got VM exit or interrupted by asynchronous signal */
uint64_t reason = 0;
- state = Vm_state {};
+ state.discharge();
if (_vm_type == Virt::SVM) {
reason = vmcb->control_area.exitcode;
@@ -425,13 +461,13 @@ struct Vcpu : Thread
}
/* notify VM handler */
- Signal_transmitter(_signal).submit();
+ Signal_transmitter(_vcpu_handler.signal_cap()).submit();
/*
* Wait until VM handler is really really done,
* otherwise we lose state.
*/
- _handler_ready.down();
+ _vcpu_handler.ready_semaphore().down();
}
}
@@ -447,54 +483,54 @@ struct Vcpu : Thread
uint16_t _convert_ar_16(addr_t value) {
return ((value & 0x1f000) >> 4) | (value & 0xff); }
- void _read_intel_state(Vm_state &state, void *vmcs,
+ void _read_intel_state(Vcpu_state &state, void *vmcs,
Foc::l4_vcpu_state_t *vcpu)
{
- state.ax.value(vcpu->r.ax);
- state.cx.value(vcpu->r.cx);
- state.dx.value(vcpu->r.dx);
- state.bx.value(vcpu->r.bx);
+ state.ax.charge(vcpu->r.ax);
+ state.cx.charge(vcpu->r.cx);
+ state.dx.charge(vcpu->r.dx);
+ state.bx.charge(vcpu->r.bx);
- state.bp.value(vcpu->r.bp);
- state.di.value(vcpu->r.di);
- state.si.value(vcpu->r.si);
+ state.bp.charge(vcpu->r.bp);
+ state.di.charge(vcpu->r.di);
+ state.si.charge(vcpu->r.si);
- state.flags.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::FLAGS));
+ state.flags.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::FLAGS));
- state.sp.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::SP));
+ state.sp.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::SP));
- state.ip.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::IP));
- state.ip_len.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::INST_LEN));
+ state.ip.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::IP));
+ state.ip_len.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::INST_LEN));
- state.dr7.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::DR7));
+ state.dr7.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::DR7));
#ifdef __x86_64__
- state.r8.value(vcpu->r.r8);
- state.r9.value(vcpu->r.r9);
- state.r10.value(vcpu->r.r10);
- state.r11.value(vcpu->r.r11);
- state.r12.value(vcpu->r.r12);
- state.r13.value(vcpu->r.r13);
- state.r14.value(vcpu->r.r14);
- state.r15.value(vcpu->r.r15);
+ state.r8.charge(vcpu->r.r8);
+ state.r9.charge(vcpu->r.r9);
+ state.r10.charge(vcpu->r.r10);
+ state.r11.charge(vcpu->r.r11);
+ state.r12.charge(vcpu->r.r12);
+ state.r13.charge(vcpu->r.r13);
+ state.r14.charge(vcpu->r.r14);
+ state.r15.charge(vcpu->r.r15);
#endif
{
addr_t const cr0 = Foc::l4_vm_vmx_read(vmcs, Vmcs::CR0);
addr_t const cr0_shadow = Foc::l4_vm_vmx_read(vmcs, Vmcs::CR0_SHADOW);
- state.cr0.value((cr0 & ~vmcs_cr0_mask) | (cr0_shadow & vmcs_cr0_mask));
+ state.cr0.charge((cr0 & ~vmcs_cr0_mask) | (cr0_shadow & vmcs_cr0_mask));
if (state.cr0.value() != cr0_shadow)
Foc::l4_vm_vmx_write(vmcs, Vmcs::CR0_SHADOW, state.cr0.value());
}
unsigned const cr2 = Foc::l4_vm_vmx_get_cr2_index(vmcs);
- state.cr2.value(Foc::l4_vm_vmx_read(vmcs, cr2));
- state.cr3.value(Foc::l4_vm_vmx_read(vmcs, Vmcs::CR3));
+ state.cr2.charge(Foc::l4_vm_vmx_read(vmcs, cr2));
+ state.cr3.charge(Foc::l4_vm_vmx_read(vmcs, Vmcs::CR3));
{
addr_t const cr4 = Foc::l4_vm_vmx_read(vmcs, Vmcs::CR4);
addr_t const cr4_shadow = Foc::l4_vm_vmx_read(vmcs, Vmcs::CR4_SHADOW);
- state.cr4.value((cr4 & ~vmcs_cr4_mask) | (cr4_shadow & vmcs_cr4_mask));
+ state.cr4.charge((cr4 & ~vmcs_cr4_mask) | (cr4_shadow & vmcs_cr4_mask));
if (state.cr4.value() != cr4_shadow)
Foc::l4_vm_vmx_write(vmcs, Vmcs::CR4_SHADOW,
state.cr4.value());
@@ -504,8 +540,8 @@ struct Vcpu : Thread
using Foc::l4_vm_vmx_read_16;
using Foc::l4_vm_vmx_read_32;
using Foc::l4_vm_vmx_read_nat;
- typedef Vm_state::Segment Segment;
- typedef Vm_state::Range Range;
+ typedef Vcpu_state::Segment Segment;
+ typedef Vcpu_state::Range Range;
{
Segment cs { l4_vm_vmx_read_16(vmcs, Vmcs::CS_SEL),
@@ -513,7 +549,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::CS_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::CS_BASE) };
- state.cs.value(cs);
+ state.cs.charge(cs);
}
{
@@ -522,7 +558,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::SS_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::SS_BASE) };
- state.ss.value(ss);
+ state.ss.charge(ss);
}
{
@@ -531,7 +567,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::ES_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::ES_BASE) };
- state.es.value(es);
+ state.es.charge(es);
}
{
@@ -540,7 +576,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::DS_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::DS_BASE) };
- state.ds.value(ds);
+ state.ds.charge(ds);
}
{
@@ -549,7 +585,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::FS_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::FS_BASE) };
- state.fs.value(fs);
+ state.fs.charge(fs);
}
{
@@ -558,7 +594,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::GS_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::GS_BASE) };
- state.gs.value(gs);
+ state.gs.charge(gs);
}
{
@@ -567,7 +603,7 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::TR_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::TR_BASE) };
- state.tr.value(tr);
+ state.tr.charge(tr);
}
{
@@ -576,48 +612,48 @@ struct Vcpu : Thread
l4_vm_vmx_read_32(vmcs, Vmcs::LDTR_LIMIT),
l4_vm_vmx_read_nat(vmcs, Vmcs::LDTR_BASE) };
- state.ldtr.value(ldtr);
+ state.ldtr.charge(ldtr);
}
- state.gdtr.value(Range{l4_vm_vmx_read_nat(vmcs, Vmcs::GDTR_BASE),
- l4_vm_vmx_read_32(vmcs, Vmcs::GDTR_LIMIT)});
+ state.gdtr.charge(Range{.limit = l4_vm_vmx_read_32(vmcs, Vmcs::GDTR_LIMIT),
+ .base = l4_vm_vmx_read_nat(vmcs, Vmcs::GDTR_BASE)});
- state.idtr.value(Range{l4_vm_vmx_read_nat(vmcs, Vmcs::IDTR_BASE),
- l4_vm_vmx_read_32(vmcs, Vmcs::IDTR_LIMIT)});
+ state.idtr.charge(Range{.limit = l4_vm_vmx_read_32(vmcs, Vmcs::IDTR_LIMIT),
+ .base = l4_vm_vmx_read_nat(vmcs, Vmcs::IDTR_BASE)});
- state.sysenter_cs.value(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_CS));
- state.sysenter_sp.value(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_SP));
- state.sysenter_ip.value(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_IP));
+ state.sysenter_cs.charge(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_CS));
+ state.sysenter_sp.charge(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_SP));
+ state.sysenter_ip.charge(l4_vm_vmx_read(vmcs, Vmcs::SYSENTER_IP));
- state.qual_primary.value(l4_vm_vmx_read(vmcs, Vmcs::EXIT_QUAL));
- state.qual_secondary.value(l4_vm_vmx_read(vmcs, Vmcs::GUEST_PHYS));
+ state.qual_primary.charge(l4_vm_vmx_read(vmcs, Vmcs::EXIT_QUAL));
+ state.qual_secondary.charge(l4_vm_vmx_read(vmcs, Vmcs::GUEST_PHYS));
- state.ctrl_primary.value(l4_vm_vmx_read(vmcs, Vmcs::CTRL_0));
- state.ctrl_secondary.value(l4_vm_vmx_read(vmcs, Vmcs::CTRL_1));
+ state.ctrl_primary.charge(l4_vm_vmx_read(vmcs, Vmcs::CTRL_0));
+ state.ctrl_secondary.charge(l4_vm_vmx_read(vmcs, Vmcs::CTRL_1));
if (state.exit_reason == INTEL_EXIT_INVALID ||
state.exit_reason == VMEXIT_PAUSED)
{
- state.inj_info.value(l4_vm_vmx_read(vmcs, Vmcs::INTR_INFO));
- state.inj_error.value(l4_vm_vmx_read(vmcs, Vmcs::INTR_ERROR));
+ state.inj_info.charge(l4_vm_vmx_read(vmcs, Vmcs::INTR_INFO));
+ state.inj_error.charge(l4_vm_vmx_read(vmcs, Vmcs::INTR_ERROR));
} else {
- state.inj_info.value(l4_vm_vmx_read(vmcs, Vmcs::IDT_INFO));
- state.inj_error.value(l4_vm_vmx_read(vmcs, Vmcs::IDT_ERROR));
+ state.inj_info.charge(l4_vm_vmx_read(vmcs, Vmcs::IDT_INFO));
+ state.inj_error.charge(l4_vm_vmx_read(vmcs, Vmcs::IDT_ERROR));
}
- state.intr_state.value(l4_vm_vmx_read(vmcs, Vmcs::STATE_INTR));
- state.actv_state.value(l4_vm_vmx_read(vmcs, Vmcs::STATE_ACTV));
+ state.intr_state.charge(l4_vm_vmx_read(vmcs, Vmcs::STATE_INTR));
+ state.actv_state.charge(l4_vm_vmx_read(vmcs, Vmcs::STATE_ACTV));
- state.tsc.value(Trace::timestamp());
- state.tsc_offset.value(_tsc_offset);
+ state.tsc.charge(Trace::timestamp());
+ state.tsc_offset.charge(_tsc_offset);
- state.efer.value(l4_vm_vmx_read(vmcs, Vmcs::EFER));
+ state.efer.charge(l4_vm_vmx_read(vmcs, Vmcs::EFER));
- state.star.value(l4_vm_vmx_read(vmcs, Vmcs::MSR_STAR));
- state.lstar.value(l4_vm_vmx_read(vmcs, Vmcs::MSR_LSTAR));
- state.cstar.value(l4_vm_vmx_read(vmcs, Vmcs::MSR_CSTAR));
- state.fmask.value(l4_vm_vmx_read(vmcs, Vmcs::MSR_FMASK));
- state.kernel_gs_base.value(l4_vm_vmx_read(vmcs, Vmcs::KERNEL_GS_BASE));
+ state.star.charge(l4_vm_vmx_read(vmcs, Vmcs::MSR_STAR));
+ state.lstar.charge(l4_vm_vmx_read(vmcs, Vmcs::MSR_LSTAR));
+ state.cstar.charge(l4_vm_vmx_read(vmcs, Vmcs::MSR_CSTAR));
+ state.fmask.charge(l4_vm_vmx_read(vmcs, Vmcs::MSR_FMASK));
+ state.kernel_gs_base.charge(l4_vm_vmx_read(vmcs, Vmcs::KERNEL_GS_BASE));
/* XXX missing */
#if 0
@@ -626,109 +662,109 @@ struct Vcpu : Thread
#endif
}
- void _read_amd_state(Vm_state &state, Foc::l4_vm_svm_vmcb_t *vmcb,
+ void _read_amd_state(Vcpu_state &state, Foc::l4_vm_svm_vmcb_t *vmcb,
Foc::l4_vcpu_state_t * const vcpu)
{
- state.ax.value(vmcb->state_save_area.rax);
- state.cx.value(vcpu->r.cx);
- state.dx.value(vcpu->r.dx);
- state.bx.value(vcpu->r.bx);
+ state.ax.charge(vmcb->state_save_area.rax);
+ state.cx.charge(vcpu->r.cx);
+ state.dx.charge(vcpu->r.dx);
+ state.bx.charge(vcpu->r.bx);
- state.di.value(vcpu->r.di);
- state.si.value(vcpu->r.si);
- state.bp.value(vcpu->r.bp);
+ state.di.charge(vcpu->r.di);
+ state.si.charge(vcpu->r.si);
+ state.bp.charge(vcpu->r.bp);
- state.flags.value(vmcb->state_save_area.rflags);
+ state.flags.charge(vmcb->state_save_area.rflags);
- state.sp.value(vmcb->state_save_area.rsp);
+ state.sp.charge(vmcb->state_save_area.rsp);
- state.ip.value(vmcb->state_save_area.rip);
- state.ip_len.value(0); /* unsupported on AMD */
+ state.ip.charge(vmcb->state_save_area.rip);
+ state.ip_len.charge(0); /* unsupported on AMD */
- state.dr7.value(vmcb->state_save_area.dr7);
+ state.dr7.charge(vmcb->state_save_area.dr7);
#ifdef __x86_64__
- state.r8.value(vcpu->r.r8);
- state.r9.value(vcpu->r.r9);
- state.r10.value(vcpu->r.r10);
- state.r11.value(vcpu->r.r11);
- state.r12.value(vcpu->r.r12);
- state.r13.value(vcpu->r.r13);
- state.r14.value(vcpu->r.r14);
- state.r15.value(vcpu->r.r15);
+ state.r8.charge(vcpu->r.r8);
+ state.r9.charge(vcpu->r.r9);
+ state.r10.charge(vcpu->r.r10);
+ state.r11.charge(vcpu->r.r11);
+ state.r12.charge(vcpu->r.r12);
+ state.r13.charge(vcpu->r.r13);
+ state.r14.charge(vcpu->r.r14);
+ state.r15.charge(vcpu->r.r15);
#endif
{
addr_t const cr0 = vmcb->state_save_area.cr0;
- state.cr0.value((cr0 & ~vmcb_cr0_mask) | (vmcb_cr0_shadow & vmcb_cr0_mask));
+ state.cr0.charge((cr0 & ~vmcb_cr0_mask) | (vmcb_cr0_shadow & vmcb_cr0_mask));
if (state.cr0.value() != vmcb_cr0_shadow)
vmcb_cr0_shadow = state.cr0.value();
}
- state.cr2.value(vmcb->state_save_area.cr2);
- state.cr3.value(vmcb->state_save_area.cr3);
+ state.cr2.charge(vmcb->state_save_area.cr2);
+ state.cr3.charge(vmcb->state_save_area.cr3);
{
addr_t const cr4 = vmcb->state_save_area.cr4;
- state.cr4.value((cr4 & ~vmcb_cr4_mask) | (vmcb_cr4_shadow & vmcb_cr4_mask));
+ state.cr4.charge((cr4 & ~vmcb_cr4_mask) | (vmcb_cr4_shadow & vmcb_cr4_mask));
if (state.cr4.value() != vmcb_cr4_shadow)
vmcb_cr4_shadow = state.cr4.value();
}
- typedef Vm_state::Segment Segment;
+ typedef Vcpu_state::Segment Segment;
- state.cs.value(Segment{vmcb->state_save_area.cs.selector,
+ state.cs.charge(Segment{vmcb->state_save_area.cs.selector,
vmcb->state_save_area.cs.attrib,
vmcb->state_save_area.cs.limit,
(addr_t)vmcb->state_save_area.cs.base});
- state.ss.value(Segment{vmcb->state_save_area.ss.selector,
+ state.ss.charge(Segment{vmcb->state_save_area.ss.selector,
vmcb->state_save_area.ss.attrib,
vmcb->state_save_area.ss.limit,
(addr_t)vmcb->state_save_area.ss.base});
- state.es.value(Segment{vmcb->state_save_area.es.selector,
+ state.es.charge(Segment{vmcb->state_save_area.es.selector,
vmcb->state_save_area.es.attrib,
vmcb->state_save_area.es.limit,
(addr_t)vmcb->state_save_area.es.base});
- state.ds.value(Segment{vmcb->state_save_area.ds.selector,
+ state.ds.charge(Segment{vmcb->state_save_area.ds.selector,
vmcb->state_save_area.ds.attrib,
vmcb->state_save_area.ds.limit,
(addr_t)vmcb->state_save_area.ds.base});
- state.fs.value(Segment{vmcb->state_save_area.fs.selector,
+ state.fs.charge(Segment{vmcb->state_save_area.fs.selector,
vmcb->state_save_area.fs.attrib,
vmcb->state_save_area.fs.limit,
(addr_t)vmcb->state_save_area.fs.base});
- state.gs.value(Segment{vmcb->state_save_area.gs.selector,
+ state.gs.charge(Segment{vmcb->state_save_area.gs.selector,
vmcb->state_save_area.gs.attrib,
vmcb->state_save_area.gs.limit,
(addr_t)vmcb->state_save_area.gs.base});
- state.tr.value(Segment{vmcb->state_save_area.tr.selector,
+ state.tr.charge(Segment{vmcb->state_save_area.tr.selector,
vmcb->state_save_area.tr.attrib,
vmcb->state_save_area.tr.limit,
(addr_t)vmcb->state_save_area.tr.base});
- state.ldtr.value(Segment{vmcb->state_save_area.ldtr.selector,
+ state.ldtr.charge(Segment{vmcb->state_save_area.ldtr.selector,
vmcb->state_save_area.ldtr.attrib,
vmcb->state_save_area.ldtr.limit,
(addr_t)vmcb->state_save_area.ldtr.base});
- typedef Vm_state::Range Range;
+ typedef Vcpu_state::Range Range;
- state.gdtr.value(Range{(addr_t)vmcb->state_save_area.gdtr.base,
- vmcb->state_save_area.gdtr.limit});
+ state.gdtr.charge(Range{.limit = vmcb->state_save_area.gdtr.limit,
+ .base = (addr_t)vmcb->state_save_area.gdtr.base });
- state.idtr.value(Range{(addr_t)vmcb->state_save_area.idtr.base,
- vmcb->state_save_area.idtr.limit});
+ state.idtr.charge(Range{.limit = vmcb->state_save_area.idtr.limit,
+ .base = (addr_t)vmcb->state_save_area.idtr.base });
- state.sysenter_cs.value(vmcb->state_save_area.sysenter_cs);
- state.sysenter_sp.value(vmcb->state_save_area.sysenter_esp);
- state.sysenter_ip.value(vmcb->state_save_area.sysenter_eip);
+ state.sysenter_cs.charge(vmcb->state_save_area.sysenter_cs);
+ state.sysenter_sp.charge(vmcb->state_save_area.sysenter_esp);
+ state.sysenter_ip.charge(vmcb->state_save_area.sysenter_eip);
- state.qual_primary.value(vmcb->control_area.exitinfo1);
- state.qual_secondary.value(vmcb->control_area.exitinfo2);
+ state.qual_primary.charge(vmcb->control_area.exitinfo1);
+ state.qual_secondary.charge(vmcb->control_area.exitinfo2);
uint32_t inj_info = 0;
uint32_t inj_error = 0;
@@ -741,56 +777,56 @@ struct Vcpu : Thread
inj_info = vmcb->control_area.exitintinfo;
inj_error = vmcb->control_area.exitintinfo >> 32;
}
- state.inj_info.value(inj_info);
- state.inj_error.value(inj_error);
+ state.inj_info.charge(inj_info);
+ state.inj_error.charge(inj_error);
- state.intr_state.value(vmcb->control_area.interrupt_shadow);
- state.actv_state.value(0);
+ state.intr_state.charge(vmcb->control_area.interrupt_shadow);
+ state.actv_state.charge(0);
- state.tsc.value(Trace::timestamp());
- state.tsc_offset.value(_tsc_offset);
+ state.tsc.charge(Trace::timestamp());
+ state.tsc_offset.charge(_tsc_offset);
- state.efer.value(vmcb->state_save_area.efer);
+ state.efer.charge(vmcb->state_save_area.efer);
- if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
- state.pdpte_2.valid() || state.pdpte_3.valid()) {
+ if (state.pdpte_0.charged() || state.pdpte_1.charged() ||
+ state.pdpte_2.charged() || state.pdpte_3.charged()) {
error("pdpte not implemented");
}
- if (state.star.valid() || state.lstar.valid() || state.cstar.valid() ||
- state.fmask.valid() || state.kernel_gs_base.valid()) {
+ if (state.star.charged() || state.lstar.charged() || state.cstar.charged() ||
+ state.fmask.charged() || state.kernel_gs_base.charged()) {
error("star, fstar, fmask, kernel_gs_base not implemented");
}
- if (state.tpr.valid() || state.tpr_threshold.valid()) {
+ if (state.tpr.charged() || state.tpr_threshold.charged()) {
error("tpr not implemented");
}
}
- void _write_intel_state(Vm_state &state, void *vmcs,
+ void _write_intel_state(Vcpu_state &state, void *vmcs,
Foc::l4_vcpu_state_t *vcpu)
{
using Foc::l4_vm_vmx_write;
- if (state.ax.valid() || state.cx.valid() || state.dx.valid() ||
- state.bx.valid()) {
+ if (state.ax.charged() || state.cx.charged() || state.dx.charged() ||
+ state.bx.charged()) {
vcpu->r.ax = state.ax.value();
vcpu->r.cx = state.cx.value();
vcpu->r.dx = state.dx.value();
vcpu->r.bx = state.bx.value();
}
- if (state.bp.valid() || state.di.valid() || state.si.valid()) {
+ if (state.bp.charged() || state.di.charged() || state.si.charged()) {
vcpu->r.bp = state.bp.value();
vcpu->r.di = state.di.value();
vcpu->r.si = state.si.value();
}
- if (state.r8.valid() || state.r9.valid() || state.r10.valid() ||
- state.r11.valid() || state.r12.valid() || state.r13.valid() ||
- state.r14.valid() || state.r15.valid()) {
+ if (state.r8.charged() || state.r9.charged() || state.r10.charged() ||
+ state.r11.charged() || state.r12.charged() || state.r13.charged() ||
+ state.r14.charged() || state.r15.charged()) {
#ifdef __x86_64__
vcpu->r.r8 = state.r8.value();
vcpu->r.r9 = state.r9.value();
@@ -803,38 +839,38 @@ struct Vcpu : Thread
#endif
}
- if (state.tsc_offset.valid()) {
+ if (state.tsc_offset.charged()) {
_tsc_offset += state.tsc_offset.value();
l4_vm_vmx_write(vmcs, Vmcs::TSC_OFF_LO, _tsc_offset & 0xffffffffu);
l4_vm_vmx_write(vmcs, Vmcs::TSC_OFF_HI, (_tsc_offset >> 32) & 0xffffffffu);
}
- if (state.star.valid())
+ if (state.star.charged())
l4_vm_vmx_write(vmcs, Vmcs::MSR_STAR, state.star.value());
- if (state.lstar.valid())
+ if (state.lstar.charged())
l4_vm_vmx_write(vmcs, Vmcs::MSR_LSTAR, state.lstar.value());
- if (state.cstar.valid())
+ if (state.cstar.charged())
l4_vm_vmx_write(vmcs, Vmcs::MSR_CSTAR, state.cstar.value());
- if (state.fmask.valid())
+ if (state.fmask.charged())
l4_vm_vmx_write(vmcs, Vmcs::MSR_FMASK, state.fmask.value());
- if (state.kernel_gs_base.valid())
+ if (state.kernel_gs_base.charged())
l4_vm_vmx_write(vmcs, Vmcs::KERNEL_GS_BASE, state.kernel_gs_base.value());
- if (state.tpr.valid() || state.tpr_threshold.valid()) {
+ if (state.tpr.charged() || state.tpr_threshold.charged()) {
if (_show_error_unsupported_tpr) {
_show_error_unsupported_tpr = false;
error("TPR & TPR_THRESHOLD not supported on Fiasco.OC");
}
}
- if (state.dr7.valid())
+ if (state.dr7.charged())
l4_vm_vmx_write(vmcs, Vmcs::DR7, state.dr7.value());
- if (state.cr0.valid()) {
+ if (state.cr0.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::CR0, vmcs_cr0_set | (~vmcs_cr0_mask & state.cr0.value()));
l4_vm_vmx_write(vmcs, Vmcs::CR0_SHADOW, state.cr0.value());
@@ -843,22 +879,22 @@ struct Vcpu : Thread
#endif
}
- if (state.cr2.valid()) {
+ if (state.cr2.charged()) {
unsigned const cr2 = Foc::l4_vm_vmx_get_cr2_index(vmcs);
l4_vm_vmx_write(vmcs, cr2, state.cr2.value());
}
- if (state.cr3.valid())
+ if (state.cr3.charged())
l4_vm_vmx_write(vmcs, Vmcs::CR3, state.cr3.value());
- if (state.cr4.valid()) {
+ if (state.cr4.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::CR4,
vmcs_cr4_set | (~vmcs_cr4_mask & state.cr4.value()));
l4_vm_vmx_write(vmcs, Vmcs::CR4_SHADOW, state.cr4.value());
}
- if (state.inj_info.valid() || state.inj_error.valid()) {
- addr_t ctrl_0 = state.ctrl_primary.valid() ?
+ if (state.inj_info.charged() || state.inj_error.charged()) {
+ addr_t ctrl_0 = state.ctrl_primary.charged() ?
state.ctrl_primary.value() :
Foc::l4_vm_vmx_read(vmcs, Vmcs::CTRL_0);
@@ -873,7 +909,7 @@ struct Vcpu : Thread
else
ctrl_0 &= ~Vmcs::IRQ_WINDOW;
- state.ctrl_primary.value(ctrl_0);
+ state.ctrl_primary.charge(ctrl_0);
l4_vm_vmx_write(vmcs, Vmcs::INTR_INFO,
state.inj_info.value() & ~0x3000);
@@ -881,105 +917,105 @@ struct Vcpu : Thread
state.inj_error.value());
}
- if (state.flags.valid())
+ if (state.flags.charged())
l4_vm_vmx_write(vmcs, Vmcs::FLAGS, state.flags.value());
- if (state.sp.valid())
+ if (state.sp.charged())
l4_vm_vmx_write(vmcs, Vmcs::SP, state.sp.value());
- if (state.ip.valid())
+ if (state.ip.charged())
l4_vm_vmx_write(vmcs, Vmcs::IP, state.ip.value());
- if (state.ip_len.valid())
+ if (state.ip_len.charged())
l4_vm_vmx_write(vmcs, Vmcs::ENTRY_INST_LEN, state.ip_len.value());
- if (state.efer.valid())
+ if (state.efer.charged())
l4_vm_vmx_write(vmcs, Vmcs::EFER, state.efer.value());
- if (state.ctrl_primary.valid())
+ if (state.ctrl_primary.charged())
l4_vm_vmx_write(vmcs, Vmcs::CTRL_0,
_vmcs_ctrl0 | state.ctrl_primary.value());
- if (state.ctrl_secondary.valid())
+ if (state.ctrl_secondary.charged())
l4_vm_vmx_write(vmcs, Vmcs::CTRL_1,
state.ctrl_secondary.value());
- if (state.intr_state.valid())
+ if (state.intr_state.charged())
l4_vm_vmx_write(vmcs, Vmcs::STATE_INTR,
state.intr_state.value());
- if (state.actv_state.valid())
+ if (state.actv_state.charged())
l4_vm_vmx_write(vmcs, Vmcs::STATE_ACTV,
state.actv_state.value());
- if (state.cs.valid()) {
+ if (state.cs.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::CS_SEL, state.cs.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::CS_AR, _convert_ar(state.cs.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::CS_LIMIT, state.cs.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::CS_BASE, state.cs.value().base);
}
- if (state.ss.valid()) {
+ if (state.ss.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::SS_SEL, state.ss.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::SS_AR, _convert_ar(state.ss.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::SS_LIMIT, state.ss.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::SS_BASE, state.ss.value().base);
}
- if (state.es.valid()) {
+ if (state.es.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::ES_SEL, state.es.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::ES_AR, _convert_ar(state.es.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::ES_LIMIT, state.es.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::ES_BASE, state.es.value().base);
}
- if (state.ds.valid()) {
+ if (state.ds.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::DS_SEL, state.ds.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::DS_AR, _convert_ar(state.ds.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::DS_LIMIT, state.ds.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::DS_BASE, state.ds.value().base);
}
- if (state.fs.valid()) {
+ if (state.fs.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::FS_SEL, state.fs.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::FS_AR, _convert_ar(state.fs.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::FS_LIMIT, state.fs.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::FS_BASE, state.fs.value().base);
}
- if (state.gs.valid()) {
+ if (state.gs.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::GS_SEL, state.gs.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::GS_AR, _convert_ar(state.gs.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::GS_LIMIT, state.gs.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::GS_BASE, state.gs.value().base);
}
- if (state.tr.valid()) {
+ if (state.tr.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::TR_SEL, state.tr.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::TR_AR, _convert_ar(state.tr.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::TR_LIMIT, state.tr.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::TR_BASE, state.tr.value().base);
}
- if (state.ldtr.valid()) {
+ if (state.ldtr.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::LDTR_SEL, state.ldtr.value().sel);
l4_vm_vmx_write(vmcs, Vmcs::LDTR_AR, _convert_ar(state.ldtr.value().ar));
l4_vm_vmx_write(vmcs, Vmcs::LDTR_LIMIT, state.ldtr.value().limit);
l4_vm_vmx_write(vmcs, Vmcs::LDTR_BASE, state.ldtr.value().base);
}
- if (state.idtr.valid()) {
+ if (state.idtr.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::IDTR_BASE, state.idtr.value().base);
l4_vm_vmx_write(vmcs, Vmcs::IDTR_LIMIT, state.idtr.value().limit);
}
- if (state.gdtr.valid()) {
+ if (state.gdtr.charged()) {
l4_vm_vmx_write(vmcs, Vmcs::GDTR_BASE, state.gdtr.value().base);
l4_vm_vmx_write(vmcs, Vmcs::GDTR_LIMIT, state.gdtr.value().limit);
}
- if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
- state.pdpte_2.valid() || state.pdpte_3.valid())
+ if (state.pdpte_0.charged() || state.pdpte_1.charged() ||
+ state.pdpte_2.charged() || state.pdpte_3.charged())
{
if (_show_error_unsupported_pdpte) {
_show_error_unsupported_pdpte = false;
@@ -987,22 +1023,22 @@ struct Vcpu : Thread
}
}
- if (state.sysenter_cs.valid())
+ if (state.sysenter_cs.charged())
l4_vm_vmx_write(vmcs, Vmcs::SYSENTER_CS,
state.sysenter_cs.value());
- if (state.sysenter_sp.valid())
+ if (state.sysenter_sp.charged())
l4_vm_vmx_write(vmcs, Vmcs::SYSENTER_SP,
state.sysenter_sp.value());
- if (state.sysenter_ip.valid())
+ if (state.sysenter_ip.charged())
l4_vm_vmx_write(vmcs, Vmcs::SYSENTER_IP,
state.sysenter_ip.value());
}
- void _write_amd_state(Vm_state &state, Foc::l4_vm_svm_vmcb_t *vmcb,
+ void _write_amd_state(Vcpu_state &state, Foc::l4_vm_svm_vmcb_t *vmcb,
Foc::l4_vcpu_state_t *vcpu)
{
- if (state.ax.valid() || state.cx.valid() || state.dx.valid() ||
- state.bx.valid()) {
+ if (state.ax.charged() || state.cx.charged() || state.dx.charged() ||
+ state.bx.charged()) {
vmcb->state_save_area.rax = state.ax.value();
vcpu->r.ax = state.ax.value();
@@ -1011,15 +1047,15 @@ struct Vcpu : Thread
vcpu->r.bx = state.bx.value();
}
- if (state.bp.valid() || state.di.valid() || state.si.valid()) {
+ if (state.bp.charged() || state.di.charged() || state.si.charged()) {
vcpu->r.bp = state.bp.value();
vcpu->r.di = state.di.value();
vcpu->r.si = state.si.value();
}
- if (state.r8.valid() || state.r9.valid() || state.r10.valid() ||
- state.r11.valid() || state.r12.valid() || state.r13.valid() ||
- state.r14.valid() || state.r15.valid()) {
+ if (state.r8.charged() || state.r9.charged() || state.r10.charged() ||
+ state.r11.charged() || state.r12.charged() || state.r13.charged() ||
+ state.r14.charged() || state.r15.charged()) {
#ifdef __x86_64__
vcpu->r.r8 = state.r8.value();
vcpu->r.r9 = state.r9.value();
@@ -1032,7 +1068,7 @@ struct Vcpu : Thread
#endif
}
- if (state.tsc_offset.valid()) {
+ if (state.tsc_offset.charged()) {
_tsc_offset += state.tsc_offset.value();
vmcb->control_area.tsc_offset = _tsc_offset;
}
@@ -1041,17 +1077,17 @@ struct Vcpu : Thread
state.fmask.value() || state.kernel_gs_base.value())
error(__LINE__, " not implemented");
- if (state.tpr.valid() || state.tpr_threshold.valid()) {
+ if (state.tpr.charged() || state.tpr_threshold.charged()) {
if (_show_error_unsupported_tpr) {
_show_error_unsupported_tpr = false;
error("TPR & TPR_THRESHOLD not supported on Fiasco.OC");
}
}
- if (state.dr7.valid())
+ if (state.dr7.charged())
vmcb->state_save_area.dr7 = state.dr7.value();
- if (state.cr0.valid()) {
+ if (state.cr0.charged()) {
vmcb->state_save_area.cr0 = vmcb_cr0_set | (~vmcb_cr0_mask & state.cr0.value());
vmcb_cr0_shadow = state.cr0.value();
#if 0
@@ -1059,26 +1095,26 @@ struct Vcpu : Thread
#endif
}
- if (state.cr2.valid())
+ if (state.cr2.charged())
vmcb->state_save_area.cr2 = state.cr2.value();
- if (state.cr3.valid())
+ if (state.cr3.charged())
vmcb->state_save_area.cr3 = state.cr3.value();
- if (state.cr4.valid()) {
+ if (state.cr4.charged()) {
vmcb->state_save_area.cr4 = vmcb_cr4_set | (~vmcb_cr4_mask & state.cr4.value());
vmcb_cr4_shadow = state.cr4.value();
}
- if (state.ctrl_primary.valid())
+ if (state.ctrl_primary.charged())
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0 |
state.ctrl_primary.value();
- if (state.ctrl_secondary.valid())
+ if (state.ctrl_secondary.charged())
vmcb->control_area.intercept_instruction1 = vmcb_ctrl1 |
state.ctrl_secondary.value();
- if (state.inj_info.valid()) {
+ if (state.inj_info.charged()) {
if (state.inj_info.value() & 0x1000) {
vmcb->control_area.interrupt_ctl |= (1ul << 8 | 1ul << 20);
vmcb->control_area.intercept_instruction0 |= Vmcb::CTRL0_VINTR;
@@ -1090,97 +1126,97 @@ struct Vcpu : Thread
vmcb->control_area.eventinj |= ~0x3000U & state.inj_info.value();
}
- if (state.inj_error.valid()) {
+ if (state.inj_error.charged()) {
vmcb->control_area.eventinj &= ((1ULL << 32) - 1);
uint64_t value = (0ULL + state.inj_error.value()) << 32;
vmcb->control_area.eventinj |= value;
}
- if (state.flags.valid())
+ if (state.flags.charged())
vmcb->state_save_area.rflags = state.flags.value();
- if (state.sp.valid())
+ if (state.sp.charged())
vmcb->state_save_area.rsp = state.sp.value();
- if (state.ip.valid())
+ if (state.ip.charged())
vmcb->state_save_area.rip = state.ip.value();
- if (state.efer.valid())
+ if (state.efer.charged())
vmcb->state_save_area.efer = state.efer.value() | AMD_SVM_ENABLE;
- if (state.intr_state.valid())
+ if (state.intr_state.charged())
vmcb->control_area.interrupt_shadow = state.intr_state.value();
- /* state.actv_state.valid() - not required for AMD */
+ /* state.actv_state.charged() - not required for AMD */
- if (state.cs.valid()) {
+ if (state.cs.charged()) {
vmcb->state_save_area.cs.selector = state.cs.value().sel;
vmcb->state_save_area.cs.attrib = state.cs.value().ar;
vmcb->state_save_area.cs.limit = state.cs.value().limit;
vmcb->state_save_area.cs.base = state.cs.value().base;
}
- if (state.ss.valid()) {
+ if (state.ss.charged()) {
vmcb->state_save_area.ss.selector = state.ss.value().sel;
vmcb->state_save_area.ss.attrib = state.ss.value().ar;
vmcb->state_save_area.ss.limit = state.ss.value().limit;
vmcb->state_save_area.ss.base = state.ss.value().base;
}
- if (state.es.valid()) {
+ if (state.es.charged()) {
vmcb->state_save_area.es.selector = state.es.value().sel;
vmcb->state_save_area.es.attrib = state.es.value().ar;
vmcb->state_save_area.es.limit = state.es.value().limit;
vmcb->state_save_area.es.base = state.es.value().base;
}
- if (state.ds.valid()) {
+ if (state.ds.charged()) {
vmcb->state_save_area.ds.selector = state.ds.value().sel;
vmcb->state_save_area.ds.attrib = state.ds.value().ar;
vmcb->state_save_area.ds.limit = state.ds.value().limit;
vmcb->state_save_area.ds.base = state.ds.value().base;
}
- if (state.fs.valid()) {
+ if (state.fs.charged()) {
vmcb->state_save_area.fs.selector = state.fs.value().sel;
vmcb->state_save_area.fs.attrib = state.fs.value().ar;
vmcb->state_save_area.fs.limit = state.fs.value().limit;
vmcb->state_save_area.fs.base = state.fs.value().base;
}
- if (state.gs.valid()) {
+ if (state.gs.charged()) {
vmcb->state_save_area.gs.selector = state.gs.value().sel;
vmcb->state_save_area.gs.attrib = state.gs.value().ar;
vmcb->state_save_area.gs.limit = state.gs.value().limit;
vmcb->state_save_area.gs.base = state.gs.value().base;
}
- if (state.tr.valid()) {
+ if (state.tr.charged()) {
vmcb->state_save_area.tr.selector = state.tr.value().sel;
vmcb->state_save_area.tr.attrib = state.tr.value().ar;
vmcb->state_save_area.tr.limit = state.tr.value().limit;
vmcb->state_save_area.tr.base = state.tr.value().base;
}
- if (state.ldtr.valid()) {
+ if (state.ldtr.charged()) {
vmcb->state_save_area.ldtr.selector = state.ldtr.value().sel;
vmcb->state_save_area.ldtr.attrib = state.ldtr.value().ar;
vmcb->state_save_area.ldtr.limit = state.ldtr.value().limit;
vmcb->state_save_area.ldtr.base = state.ldtr.value().base;
}
- if (state.idtr.valid()) {
+ if (state.idtr.charged()) {
vmcb->state_save_area.idtr.base = state.idtr.value().base;
vmcb->state_save_area.idtr.limit = state.idtr.value().limit;
}
- if (state.gdtr.valid()) {
+ if (state.gdtr.charged()) {
vmcb->state_save_area.gdtr.base = state.gdtr.value().base;
vmcb->state_save_area.gdtr.limit = state.gdtr.value().limit;
}
- if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
- state.pdpte_2.valid() || state.pdpte_3.valid())
+ if (state.pdpte_0.charged() || state.pdpte_1.charged() ||
+ state.pdpte_2.charged() || state.pdpte_3.charged())
{
if (_show_error_unsupported_pdpte) {
_show_error_unsupported_pdpte = false;
@@ -1188,38 +1224,42 @@ struct Vcpu : Thread
}
}
- if (state.sysenter_cs.valid())
+ if (state.sysenter_cs.charged())
vmcb->state_save_area.sysenter_cs = state.sysenter_cs.value();
- if (state.sysenter_sp.valid())
+ if (state.sysenter_sp.charged())
vmcb->state_save_area.sysenter_esp = state.sysenter_sp.value();
- if (state.sysenter_ip.valid())
+ if (state.sysenter_ip.charged())
vmcb->state_save_area.sysenter_eip = state.sysenter_ip.value();
}
+ Affinity::Location _location(Vcpu_handler_base &handler) const
+ {
+ Thread * ep = reinterpret_cast(&handler.rpc_ep());
+ return ep->affinity();
+ }
+
public:
- Vcpu(Env &env, Signal_context_capability &cap,
- Semaphore &handler_ready, enum Virt type,
- Allocator &alloc, Affinity::Location location)
+ Foc_vcpu(Env &env, Vm_connection &vm, Vcpu_handler_base &handler,
+ enum Virt type)
:
- Thread(env, "vcpu_thread", STACK_SIZE, location, Weight(), env.cpu()),
- _signal(cap), _handler_ready(handler_ready), _alloc(alloc),
+ Thread(env, "vcpu_thread", STACK_SIZE, _location(handler),
+ Weight(), env.cpu()),
+ _vcpu_handler(handler),
_vm_type(type)
- { }
-
- Allocator &allocator() const { return _alloc; }
-
- bool match(Vm_session_client::Vcpu_id id) { return id.id == _id.id; }
-
- Vm_session_client::Vcpu_id id() const { return _id; }
-
- void id(Vm_session_client::Vcpu_id id) { _id = id; }
-
- void assign_ds_state(Region_map &rm, Dataspace_capability cap)
{
- _state = rm.attach(cap);
- _task = *reinterpret_cast(_state);
- *reinterpret_cast(_state) = 0UL;
+ Thread::start();
+
+ /* wait until thread is alive, e.g. Thread::cap() is valid */
+ _startup.block();
+
+ try {
+ _rpc.construct(vm, this->cap(), *this);
+ } catch (...) {
+ terminate();
+ join();
+ throw;
+ }
}
void resume()
@@ -1258,6 +1298,9 @@ struct Vcpu : Thread
_state_request = TERMINATE;
_wake_up.up();
}
+
+ Vcpu_state &state() { return _vcpu_state; }
+ Foc_native_vcpu_rpc *rpc() { return &*_rpc; }
};
@@ -1278,82 +1321,17 @@ static enum Virt virt_type(Env &env)
}
-Vm_session_client::Vcpu_id
-Vm_session_client::create_vcpu(Allocator &alloc, Env &env,
- Vm_handler_base &handler)
-{
- enum Virt vm_type = virt_type(env);
- if (vm_type == Virt::UNKNOWN) {
- error("unsupported hardware virtualisation");
- return Vm_session::Vcpu_id();
- }
+/**************
+ ** vCPU API **
+ **************/
- Thread * ep = reinterpret_cast(&handler._rpc_ep);
- Affinity::Location location = ep->affinity();
-
- /* create thread that switches modes between thread/cpu */
- Vcpu * vcpu = new (alloc) Registered(vcpus, env, handler._cap,
- handler._done, vm_type,
- alloc, location);
-
- try {
- /* now it gets actually valid - vcpu->cap() becomes valid */
- vcpu->start();
-
- /* instruct core to let it become a vCPU */
- vcpu->id(call(vcpu->cap()));
-
- call(handler._cap, vcpu->id());
-
- vcpu->assign_ds_state(env.rm(), call(vcpu->id()));
- } catch (...) {
- vcpu->terminate();
- vcpu->join();
-
- destroy(alloc, vcpu);
- throw;
- }
- return vcpu->id();
-}
+void Vm_connection::Vcpu::run() { static_cast(_native_vcpu).vcpu.resume(); }
+void Vm_connection::Vcpu::pause() { static_cast(_native_vcpu).vcpu.pause(); }
+Vcpu_state & Vm_connection::Vcpu::state() { return static_cast(_native_vcpu).vcpu.state(); }
-void Vm_session_client::run(Vcpu_id vcpu_id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.match(vcpu_id))
- vcpu.resume();
- });
-}
-
-
-void Vm_session_client::pause(Vm_session_client::Vcpu_id vcpu_id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (!vcpu.match(vcpu_id))
- return;
-
- vcpu.pause();
- });
-}
-
-
-Dataspace_capability Vm_session_client::cpu_state(Vcpu_id vcpu_id)
-{
- Dataspace_capability cap;
-
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.match(vcpu_id))
- cap = call(vcpu_id);
- });
-
- return cap;
-}
-
-
-Vm_session::~Vm_session()
-{
- vcpus.for_each([&] (Vcpu &vc) {
- Allocator &alloc = vc.allocator();
- destroy(alloc, &vc);
- });
-}
+Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &)
+:
+ _native_vcpu(*((new (alloc) Foc_vcpu(vm._env, vm, handler, virt_type(vm._env)))->rpc()))
+{ }
diff --git a/repos/base-hw/recipes/src/base-hw_content.inc b/repos/base-hw/recipes/src/base-hw_content.inc
index b817a10597..37edea1a83 100644
--- a/repos/base-hw/recipes/src/base-hw_content.inc
+++ b/repos/base-hw/recipes/src/base-hw_content.inc
@@ -129,6 +129,7 @@ CONTENT += $(addprefix src/timer/,$(SRC_TIMER)) \
$(addprefix src/core/,$(SRC_CORE)) \
src/lib/hw src/lib/ld src/lib/cxx \
src/include/base/internal src/include/pager \
+ src/include/hw_native_vcpu \
include/drivers/uart
# remove duplicates
diff --git a/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc b/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc
index 03c6a76993..61f2e9ccd9 100644
--- a/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc
+++ b/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc
@@ -137,7 +137,10 @@ Vm_session_component::~Vm_session_component()
/* free region in allocator */
for (unsigned i = 0; i < _vcpu_id_alloc; i++) {
- Vcpu & vcpu = _vcpus[i];
+ if (!_vcpus[i].constructed())
+ continue;
+
+ Vcpu & vcpu = *_vcpus[i];
if (vcpu.ds_cap.valid()) {
_region_map.detach(vcpu.ds_addr);
_constrained_md_ram_alloc.free(vcpu.ds_cap);
diff --git a/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc b/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc
index f3a80a7735..eb2e462221 100644
--- a/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc
+++ b/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc
@@ -91,7 +91,10 @@ Vm_session_component::~Vm_session_component()
/* free region in allocator */
for (unsigned i = 0; i < _vcpu_id_alloc; i++) {
- Vcpu & vcpu = _vcpus[i];
+ if (!_vcpus[i].constructed())
+ continue;
+
+ Vcpu & vcpu = *_vcpus[i];
if (vcpu.ds_cap.valid()) {
_region_map.detach(vcpu.ds_addr);
_constrained_md_ram_alloc.free(vcpu.ds_cap);
diff --git a/repos/base-hw/src/core/vm_session_component.cc b/repos/base-hw/src/core/vm_session_component.cc
index 0f51ee5a67..bc72bd29ce 100644
--- a/repos/base-hw/src/core/vm_session_component.cc
+++ b/repos/base-hw/src/core/vm_session_component.cc
@@ -28,82 +28,55 @@ size_t Vm_session_component::_ds_size() {
return align_addr(sizeof(Board::Vm_state), get_page_size_log2()); }
-addr_t Vm_session_component::_alloc_ds()
+void Vm_session_component::Vcpu::exception_handler(Signal_context_capability handler)
{
- addr_t addr;
- if (platform().ram_alloc().alloc_aligned(_ds_size(), (void**)&addr,
- get_page_size_log2()).error())
- throw Insufficient_ram_quota();
- return addr;
-}
-
-
-void Vm_session_component::_run(Vcpu_id) { }
-
-
-void Vm_session_component::_pause(Vcpu_id) { }
-
-
-Capability Vm_session_component::_native_vcpu(Vcpu_id id)
-{
- if (!_valid_id(id)) { return Capability(); }
- return reinterpret_cap_cast(_vcpus[id.id].kobj.cap());
-}
-
-
-void Vm_session_component::_exception_handler(Signal_context_capability handler,
- Vcpu_id id)
-{
- if (!_valid_id(id)) {
- Genode::warning("invalid vcpu id ", id.id);
+ if (!handler.valid()) {
+ Genode::warning("invalid signal");
return;
}
- Vcpu & vcpu = _vcpus[id.id];
- if (vcpu.kobj.constructed()) {
+ if (kobj.constructed()) {
Genode::warning("Cannot register vcpu handler twice");
return;
}
- unsigned const cpu = vcpu.location.valid() ? vcpu.location.xpos() : 0;
+ unsigned const cpu = location.valid() ? location.xpos() : 0;
- if (!vcpu.kobj.create(cpu, vcpu.ds_addr, Capability_space::capid(handler), _id))
+ if (!kobj.create(cpu, ds_addr, Capability_space::capid(handler), id))
Genode::warning("Cannot instantiate vm kernel object, ",
"invalid signal context?");
}
-Vm_session::Vcpu_id Vm_session_component::_create_vcpu(Thread_capability tcap)
+Capability Vm_session_component::create_vcpu(Thread_capability const tcap)
{
- using namespace Genode;
-
- if (_vcpu_id_alloc == Board::VCPU_MAX) return Vcpu_id{Vcpu_id::INVALID};
+ if (_vcpu_id_alloc == Board::VCPU_MAX) return { };
Affinity::Location vcpu_location;
- auto lambda = [&] (Cpu_thread_component *ptr) {
+ _ep.apply(tcap, [&] (Cpu_thread_component *ptr) {
if (!ptr) return;
vcpu_location = ptr->platform_thread().affinity();
- };
- _ep.apply(tcap, lambda);
+ });
+
+ if (_vcpus[_vcpu_id_alloc].constructed())
+ return { };
+
+ _vcpus[_vcpu_id_alloc].construct(_id, _ep);
+ Vcpu & vcpu = *_vcpus[_vcpu_id_alloc];
- Vcpu & vcpu = _vcpus[_vcpu_id_alloc];
- vcpu.ds_cap = _constrained_md_ram_alloc.alloc(_ds_size(),
- Cache_attribute::UNCACHED);
try {
+ vcpu.ds_cap = _constrained_md_ram_alloc.alloc(_ds_size(),
+ Cache_attribute::UNCACHED);
vcpu.ds_addr = _region_map.attach(vcpu.ds_cap);
} catch (...) {
- _constrained_md_ram_alloc.free(vcpu.ds_cap);
+ if (vcpu.ds_cap.valid())
+ _constrained_md_ram_alloc.free(vcpu.ds_cap);
+ _vcpus[_vcpu_id_alloc].destruct();
throw;
}
vcpu.location = vcpu_location;
- return Vcpu_id { _vcpu_id_alloc++ };
-}
-
-Genode::Dataspace_capability
-Vm_session_component::_cpu_state(Vm_session::Vcpu_id id)
-{
- return (_valid_id(id)) ? _vcpus[id.id].ds_cap
- : Genode::Ram_dataspace_capability();
+ _vcpu_id_alloc ++;
+ return vcpu.cap();
}
diff --git a/repos/base-hw/src/core/vm_session_component.h b/repos/base-hw/src/core/vm_session_component.h
index 6a1c7c9325..31c8c95792 100644
--- a/repos/base-hw/src/core/vm_session_component.h
+++ b/repos/base-hw/src/core/vm_session_component.h
@@ -21,6 +21,8 @@
#include
#include
+#include
+
/* Core includes */
#include
#include
@@ -49,13 +51,36 @@ class Genode::Vm_session_component
Vm_session_component(Vm_session_component const &);
Vm_session_component &operator = (Vm_session_component const &);
- struct Vcpu
+ struct Vcpu : public Rpc_object
{
+ Kernel::Vm::Identity &id;
+ Rpc_entrypoint &ep;
Ram_dataspace_capability ds_cap { };
Region_map::Local_addr ds_addr { nullptr };
Kernel_object kobj {};
Affinity::Location location {};
- } _vcpus[Board::VCPU_MAX];
+
+ Vcpu(Kernel::Vm::Identity &id, Rpc_entrypoint &ep) : id(id), ep(ep)
+ {
+ ep.manage(this);
+ }
+
+ ~Vcpu()
+ {
+ ep.dissolve(this);
+ }
+
+ /*******************************
+ ** Native_vcpu RPC interface **
+ *******************************/
+
+ Capability state() const { return ds_cap; }
+ Native_capability native_vcpu() { return kobj.cap(); }
+
+ void exception_handler(Signal_context_capability);
+ };
+
+ Constructible _vcpus[Board::VCPU_MAX];
Rpc_entrypoint &_ep;
Constrained_ram_allocator _constrained_md_ram_alloc;
@@ -68,10 +93,11 @@ class Genode::Vm_session_component
unsigned _vcpu_id_alloc { 0 };
static size_t _ds_size();
- bool _valid_id(Vcpu_id id) { return id.id < Board::VCPU_MAX; }
- addr_t _alloc_ds();
+
void * _alloc_table();
void _attach(addr_t phys_addr, addr_t vm_addr, size_t size);
+
+ /* helpers for vm_session_common.cc */
void _attach_vm_memory(Dataspace_component &, addr_t,
Attach_attr);
void _detach_vm_memory(addr_t, size_t);
@@ -107,13 +133,7 @@ class Genode::Vm_session_component
void attach_pic(addr_t) override;
void detach(addr_t, size_t) override;
- Dataspace_capability _cpu_state(Vcpu_id);
- Vcpu_id _create_vcpu(Thread_capability);
- void _exception_handler(Signal_context_capability,
- Vcpu_id);
- void _run(Vcpu_id);
- void _pause(Vcpu_id);
- Capability _native_vcpu(Vcpu_id);
+ Capability create_vcpu(Thread_capability);
};
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
diff --git a/repos/base-hw/src/include/hw_native_vcpu/hw_native_vcpu.h b/repos/base-hw/src/include/hw_native_vcpu/hw_native_vcpu.h
new file mode 100644
index 0000000000..eb8b2b2e27
--- /dev/null
+++ b/repos/base-hw/src/include/hw_native_vcpu/hw_native_vcpu.h
@@ -0,0 +1,29 @@
+ /*
+ * \brief hw vCPU RPC interface
+ * \author Christian Helmuth
+ * \date 2021-01-19
+ */
+
+/*
+ * Copyright (C) 2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _INCLUDE__HW_NATIVE_VCPU__HW_NATIVE_VCPU_H_
+#define _INCLUDE__HW_NATIVE_VCPU__HW_NATIVE_VCPU_H_
+
+#include
+#include
+
+struct Genode::Vm_session::Native_vcpu : Interface
+{
+ GENODE_RPC(Rpc_state, Capability, state);
+ GENODE_RPC(Rpc_native_vcpu, Native_capability, native_vcpu);
+ GENODE_RPC(Rpc_exception_handler, void, exception_handler, Signal_context_capability);
+
+ GENODE_RPC_INTERFACE(Rpc_state, Rpc_native_vcpu, Rpc_exception_handler);
+};
+
+#endif /* _INCLUDE__HW_NATIVE_VCPU__HW_NATIVE_VCPU_H_ */
diff --git a/repos/base-hw/src/lib/base/vm.cc b/repos/base-hw/src/lib/base/vm.cc
new file mode 100644
index 0000000000..009efdf158
--- /dev/null
+++ b/repos/base-hw/src/lib/base/vm.cc
@@ -0,0 +1,91 @@
+/*
+ * \brief Client-side VM session interface
+ * \author Alexander Boettcher
+ * \date 2018-08-27
+ */
+
+/*
+ * Copyright (C) 2018-2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+using namespace Genode;
+
+using Exit_config = Vm_connection::Exit_config;
+
+
+/****************************
+ ** hw vCPU implementation **
+ ****************************/
+
+struct Hw_vcpu : Rpc_client, Noncopyable
+{
+ private:
+
+ Attached_dataspace _state;
+ Native_capability _kernel_vcpu { };
+
+ Capability _create_vcpu(Vm_connection &, Vcpu_handler_base &);
+
+ public:
+
+ Hw_vcpu(Env &, Vm_connection &, Vcpu_handler_base &);
+
+ void run() {
+ Kernel::run_vm(Capability_space::capid(_kernel_vcpu)); }
+
+ void pause() {
+ Kernel::pause_vm(Capability_space::capid(_kernel_vcpu)); }
+
+ Vcpu_state & state() { return *_state.local_addr(); }
+};
+
+
+Hw_vcpu::Hw_vcpu(Env &env, Vm_connection &vm, Vcpu_handler_base &handler)
+:
+ Rpc_client(_create_vcpu(vm, handler)),
+ _state(env.rm(), vm.with_upgrade([&] () { return call(); }))
+{
+ call(handler.signal_cap());
+ _kernel_vcpu = call();
+}
+
+
+Capability Hw_vcpu::_create_vcpu(Vm_connection &vm,
+ Vcpu_handler_base &handler)
+{
+ Thread &tep { *reinterpret_cast(&handler.rpc_ep()) };
+
+ return vm.with_upgrade([&] () {
+ return vm.call(tep.cap()); });
+}
+
+
+/**************
+ ** vCPU API **
+ **************/
+
+void Vm_connection::Vcpu::run() { static_cast(_native_vcpu).run(); }
+void Vm_connection::Vcpu::pause() { static_cast(_native_vcpu).pause(); }
+Vcpu_state & Vm_connection::Vcpu::state() { return static_cast(_native_vcpu).state(); }
+
+
+Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &)
+:
+ _native_vcpu(*new (alloc) Hw_vcpu(vm._env, vm, handler))
+{ }
diff --git a/repos/base-hw/src/lib/base/vm_session.cc b/repos/base-hw/src/lib/base/vm_session.cc
deleted file mode 100644
index 8f07fb5625..0000000000
--- a/repos/base-hw/src/lib/base/vm_session.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * \brief Client-side VM session interface
- * \author Alexander Boettcher
- * \date 2018-08-27
- */
-
-/*
- * Copyright (C) 2018 Genode Labs GmbH
- *
- * This file is part of the Genode OS framework, which is distributed
- * under the terms of the GNU Affero General Public License version 3.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-using namespace Genode;
-
-struct Vcpu;
-
-static Genode::Registry> vcpus;
-
-
-struct Vcpu
-{
- Vm_session_client::Vcpu_id const id;
- Capability const cap;
-
- Vcpu(Vm_session::Vcpu_id id, Capability cap)
- : id(id), cap(cap) { }
-
- virtual ~Vcpu() { }
-};
-
-
-Vm_session::Vcpu_id
-Vm_session_client::create_vcpu(Allocator & alloc, Env &, Vm_handler_base & handler)
-{
- Vcpu_id const id =
- call(reinterpret_cast(&handler._rpc_ep)->cap());
- call(handler._cap, id);
- Vcpu * vcpu = new (alloc) Registered (vcpus, id, call(id));
- return vcpu->id;
-}
-
-
-void Vm_session_client::run(Vcpu_id const vcpu_id)
-{
- vcpus.for_each([&] (Vcpu & vcpu) {
- if (vcpu.id.id != vcpu_id.id) { return; }
- Kernel::run_vm(Capability_space::capid(vcpu.cap));
- });
-}
-
-
-void Vm_session_client::pause(Vcpu_id const vcpu_id)
-{
- vcpus.for_each([&] (Vcpu & vcpu) {
- if (vcpu.id.id != vcpu_id.id) { return; }
- Kernel::pause_vm(Capability_space::capid(vcpu.cap));
- });
-}
-
-
-Dataspace_capability Vm_session_client::cpu_state(Vcpu_id const vcpu_id)
-{
- return call(vcpu_id);
-}
-
-
-Vm_session::~Vm_session()
-{ }
diff --git a/repos/base-nova/include/nova/syscall-generic.h b/repos/base-nova/include/nova/syscall-generic.h
index ac0ea8b8cd..7718edd256 100644
--- a/repos/base-nova/include/nova/syscall-generic.h
+++ b/repos/base-nova/include/nova/syscall-generic.h
@@ -756,7 +756,8 @@ namespace Nova {
NUM_INITIAL_PT_LOG2 = 5,
NUM_INITIAL_PT = 1UL << NUM_INITIAL_PT_LOG2,
NUM_INITIAL_PT_RESERVED = 2 * NUM_INITIAL_PT,
- NUM_INITIAL_VCPU_PT_LOG2 = 8,
+ NUM_INITIAL_VCPU_PT_LOG2 = 8,
+ NUM_INITIAL_VCPU_PT = 1UL << NUM_INITIAL_VCPU_PT_LOG2,
};
/**
diff --git a/repos/base-nova/src/core/include/vm_session_component.h b/repos/base-nova/src/core/include/vm_session_component.h
index 1598935de7..363e6a41c2 100644
--- a/repos/base-nova/src/core/include/vm_session_component.h
+++ b/repos/base-nova/src/core/include/vm_session_component.h
@@ -1,11 +1,12 @@
/*
* \brief Core-specific instance of the VM session interface
* \author Alexander Boettcher
+ * \author Christian Helmuth
* \date 2018-08-26
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -19,6 +20,7 @@
#include
#include
#include
+#include
namespace Genode { class Vm_session_component; }
@@ -34,63 +36,26 @@ class Genode::Vm_session_component
typedef Constrained_ram_allocator Con_ram_allocator;
typedef Allocator_avl_tpl Avl_region;
- class Vcpu : private List::Element,
- public Trace::Source::Info_accessor
+ class Vcpu : public Rpc_object,
+ public Trace::Source::Info_accessor
{
-
- friend class List;
- friend class Vm_session_component;
-
public:
- enum State { INIT, ALIVE };
+ struct Creation_failed { };
private:
+ Rpc_entrypoint &_ep;
Constrained_ram_allocator &_ram_alloc;
Cap_quota_guard &_cap_alloc;
Trace::Source_registry &_trace_sources;
- Ram_dataspace_capability _ds_cap { };
addr_t _sel_sm_ec_sc;
- addr_t _vm_pt_cnt { 0 };
- Vcpu_id const _id;
- State _state { INIT };
+ bool _alive { false };
+ unsigned const _id;
Affinity::Location const _location;
+ unsigned const _priority;
Session_label const &_label;
-
- public:
-
- Vcpu(Constrained_ram_allocator &ram_alloc,
- Cap_quota_guard &cap_alloc,
- Vcpu_id const id, Affinity::Location const,
- Session_label const &,
- Trace::Control_area &,
- Trace::Source_registry &);
-
- ~Vcpu();
-
- addr_t sm_sel() const { return _sel_sm_ec_sc + 0; }
- addr_t ec_sel() const { return _sel_sm_ec_sc + 1; }
- addr_t sc_sel() const { return _sel_sm_ec_sc + 2; }
-
- addr_t new_pt_id();
-
- Vcpu_id id() { return _id; }
- bool match(Vcpu_id const id) const { return id.id == _id.id; }
- Ram_dataspace_capability ds_cap() const { return _ds_cap; }
-
- bool init() const { return _state == State::INIT; }
- void alive() { _state = ALIVE; };
-
- static addr_t invalid() { return ~0UL; }
-
- /********************************************
- ** Trace::Source::Info_accessor interface **
- ********************************************/
-
- Trace::Source::Info trace_source_info() const override;
-
- private:
+ addr_t const _pd_sel;
struct Trace_control_slot
{
@@ -118,6 +83,42 @@ class Genode::Vm_session_component
Trace_control_slot _trace_control_slot;
Trace::Source _trace_source { *this, _trace_control_slot.control() };
+
+ public:
+
+ Vcpu(Rpc_entrypoint &,
+ Constrained_ram_allocator &ram_alloc,
+ Cap_quota_guard &cap_alloc,
+ unsigned id,
+ unsigned kernel_id,
+ Affinity::Location,
+ unsigned priority,
+ Session_label const &,
+ addr_t pd_sel,
+ addr_t core_pd_sel,
+ addr_t vmm_pd_sel,
+ Trace::Control_area &,
+ Trace::Source_registry &);
+
+ ~Vcpu();
+
+ addr_t sm_sel() const { return _sel_sm_ec_sc + 0; }
+ addr_t ec_sel() const { return _sel_sm_ec_sc + 1; }
+ addr_t sc_sel() const { return _sel_sm_ec_sc + 2; }
+
+ /*******************************
+ ** Native_vcpu RPC interface **
+ *******************************/
+
+ Capability state();
+ void startup();
+ void exit_handler(unsigned, Signal_context_capability);
+
+ /********************************************
+ ** Trace::Source::Info_accessor interface **
+ ********************************************/
+
+ Trace::Source::Info trace_source_info() const override;
};
Rpc_entrypoint &_ep;
@@ -127,20 +128,13 @@ class Genode::Vm_session_component
Sliced_heap _heap;
Avl_region _map { &_heap };
addr_t _pd_sel { 0 };
- unsigned _id_alloc { 0 };
+ unsigned _next_vcpu_id { 0 };
unsigned _priority;
Session_label const _session_label;
- List _vcpus { };
-
- Vcpu * _lookup(Vcpu_id const vcpu_id)
- {
- for (Vcpu * vcpu = _vcpus.first(); vcpu; vcpu = vcpu->next())
- if (vcpu->match(vcpu_id)) return vcpu;
-
- return nullptr;
- }
+ Registry> _vcpus { };
+ /* helpers for vm_session_common.cc */
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
void _detach_vm_memory(addr_t, size_t);
@@ -163,24 +157,20 @@ class Genode::Vm_session_component
** Region_map_detach interface **
*********************************/
- void detach(Region_map::Local_addr) override;
- void unmap_region(addr_t, size_t) override;
+ /* used on destruction of attached dataspaces */
+ void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
+ void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
/**************************
** Vm session interface **
**************************/
- Dataspace_capability _cpu_state(Vcpu_id);
+ Capability create_vcpu(Thread_capability);
+ void attach_pic(addr_t) override { /* unused on NOVA */ }
+
+ void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
+ void detach(addr_t, size_t) override; /* vm_session_common.cc */
- void _exception_handler(Signal_context_capability, Vcpu_id);
- void _run(Vcpu_id);
- void _pause(Vcpu_id) { }
- void attach(Dataspace_capability, addr_t, Attach_attr) override;
- void attach_pic(addr_t) override {}
- void detach(addr_t, size_t) override;
- Vcpu_id _create_vcpu(Thread_capability);
- Capability _native_vcpu(Vcpu_id) {
- return Capability(); }
};
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
diff --git a/repos/base-nova/src/core/vm_session_component.cc b/repos/base-nova/src/core/vm_session_component.cc
index f7206773fe..0d30067ee7 100644
--- a/repos/base-nova/src/core/vm_session_component.cc
+++ b/repos/base-nova/src/core/vm_session_component.cc
@@ -1,11 +1,12 @@
/*
* \brief Core-specific instance of the VM session interface
* \author Alexander Boettcher
+ * \author Christian Helmuth
* \date 2018-08-26
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -13,7 +14,7 @@
/* Base includes */
#include
-#include
+#include
#include
#include
@@ -31,78 +32,60 @@
/* NOVA includes */
#include
-using Genode::addr_t;
-using Genode::Vm_session_component;
-using Vcpu_id = Genode::Vm_session::Vcpu_id;
+using namespace Genode;
+
enum { CAP_RANGE_LOG2 = 2, CAP_RANGE = 1 << CAP_RANGE_LOG2 };
-Vm_session_component::Vcpu::Vcpu(Constrained_ram_allocator &ram_alloc,
- Cap_quota_guard &cap_alloc,
- Vcpu_id const id,
- Affinity::Location const location,
- Session_label const &label,
- Trace::Control_area &trace_control_area,
- Trace::Source_registry &trace_sources)
-:
- _ram_alloc(ram_alloc),
- _cap_alloc(cap_alloc),
- _trace_sources(trace_sources),
- _sel_sm_ec_sc(invalid()),
- _id(id),
- _location(location),
- _label(label),
- _trace_control_slot(trace_control_area)
+static addr_t invalid_sel() { return ~0UL; }
+
+static Nova::uint8_t map_async_caps(Nova::Obj_crd const src,
+ Nova::Obj_crd const dst,
+ addr_t const dst_pd)
{
- /* account caps required to setup vCPU */
- _cap_alloc.withdraw(Cap_quota{CAP_RANGE});
+ using Nova::Utcb;
- /* now try to allocate cap indexes */
- _sel_sm_ec_sc = cap_map().insert(CAP_RANGE_LOG2);
- if (_sel_sm_ec_sc == invalid()) {
- error("out of caps in core");
- _cap_alloc.replenish(Cap_quota{CAP_RANGE});
- return;
- }
+ Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb());
+ addr_t const src_pd = platform_specific().core_pd_sel();
- try {
- /* create ds for vCPU state */
- _ds_cap = _ram_alloc.alloc(align_addr(sizeof(Genode::Vm_state), 12),
- Cache_attribute::CACHED);
- } catch (...) {
- _cap_alloc.replenish(Cap_quota{CAP_RANGE});
- cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
- throw;
- }
+ utcb.set_msg_word(0);
+ /* ignore return value as one item always fits into the utcb */
+ bool const ok = utcb.append_item(src, 0);
+ (void)ok;
- _trace_sources.insert(&_trace_source);
+ /* asynchronously map capabilities */
+ return Nova::delegate(src_pd, dst_pd, dst);
}
-Vm_session_component::Vcpu::~Vcpu()
+
+static Nova::uint8_t kernel_quota_upgrade(addr_t const pd_target)
{
- _trace_sources.remove(&_trace_source);
-
- if (_ds_cap.valid())
- _ram_alloc.free(_ds_cap);
-
- if (_sel_sm_ec_sc != invalid()) {
- _cap_alloc.replenish(Cap_quota{CAP_RANGE});
- cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
- }
+ return Pager_object::handle_oom(Pager_object::SRC_CORE_PD, pd_target,
+ "core", "ep",
+ Pager_object::Policy::UPGRADE_CORE_TO_DST);
}
-addr_t Vm_session_component::Vcpu::new_pt_id()
-{
- enum { MAX_VM_EXITS = (1U << Nova::NUM_INITIAL_VCPU_PT_LOG2) };
- if (_vm_pt_cnt >= MAX_VM_EXITS)
- return invalid();
- return MAX_VM_EXITS * _id.id + _vm_pt_cnt ++;
+template
+static uint8_t _with_kernel_quota_upgrade(addr_t const pd_target,
+ FUNC const &func)
+{
+ uint8_t res;
+ do {
+ res = func();
+ } while (res == Nova::NOVA_PD_OOM &&
+ Nova::NOVA_OK == kernel_quota_upgrade(pd_target));
+ return res;
}
-Genode::Trace::Source::Info Vm_session_component::Vcpu::trace_source_info() const
+
+/********************************
+ ** Vm_session_component::Vcpu **
+ ********************************/
+
+Trace::Source::Info Vm_session_component::Vcpu::trace_source_info() const
{
- Genode::uint64_t sc_time = 0;
+ uint64_t sc_time = 0;
uint8_t res = Nova::sc_ctrl(sc_sel(), sc_time);
if (res != Nova::NOVA_OK)
@@ -114,176 +97,34 @@ Genode::Trace::Source::Info Vm_session_component::Vcpu::trace_source_info() cons
}
-static Nova::uint8_t map_async_caps(Nova::Obj_crd const src,
- Nova::Obj_crd const dst,
- addr_t const dst_pd)
+void Vm_session_component::Vcpu::startup()
{
- using Nova::Utcb;
- using Genode::Thread;
-
- Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb());
- addr_t const src_pd = Genode::platform_specific().core_pd_sel();
-
- utcb.set_msg_word(0);
- /* ignore return value as one item always fits into the utcb */
- bool const ok = utcb.append_item(src, 0);
- (void)ok;
-
- /* asynchronously map capabilities */
- return Nova::delegate(src_pd, dst_pd, dst);
-}
-
-static Nova::uint8_t kernel_quota_upgrade(addr_t const pd_target)
-{
- using Genode::Pager_object;
-
- return Pager_object::handle_oom(Pager_object::SRC_CORE_PD, pd_target,
- "core", "ep",
- Pager_object::Policy::UPGRADE_CORE_TO_DST);
-}
-
-template
-static Genode::uint8_t _with_kernel_quota_upgrade(addr_t const pd_target,
- FUNC const &func)
-{
- Genode::uint8_t res;
- do {
- res = func();
- } while (res == Nova::NOVA_PD_OOM &&
- Nova::NOVA_OK == kernel_quota_upgrade(pd_target));
- return res;
-}
-
-Vcpu_id Vm_session_component::_create_vcpu(Thread_capability cap)
-{
- Vcpu_id ret;
- if (!cap.valid()) return ret;
-
- /* lookup vmm pd and cpu location of handler thread in VMM */
- addr_t kernel_cpu_id = 0;
- Affinity::Location vcpu_location;
-
- auto lambda = [&] (Cpu_thread_component *ptr) {
- if (!ptr)
- return Vcpu::invalid();
-
- Cpu_thread_component &thread = *ptr;
-
- vcpu_location = thread.platform_thread().affinity();
- kernel_cpu_id = platform_specific().kernel_cpu_id(thread.platform_thread().affinity());
-
- return thread.platform_thread().pager().pd_sel();
- };
- addr_t const vmm_pd_sel = _ep.apply(cap, lambda);
-
- /* if VMM pd lookup failed then deny to create vCPU */
- if (!vmm_pd_sel || vmm_pd_sel == Vcpu::invalid())
- return ret;
-
- /* allocate vCPU object */
- Vcpu &vcpu = *new (_heap) Vcpu(_constrained_md_ram_alloc,
- _cap_quota_guard(),
- Vcpu_id {_id_alloc},
- vcpu_location,
- _session_label,
- _trace_control_area,
- _trace_sources);
-
- /* we ran out of caps in core */
- if (!vcpu.ds_cap().valid())
- return ret;
-
- /* core PD selector */
- addr_t const core_pd = platform_specific().core_pd_sel();
-
- /* setup vCPU resources */
- uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] {
- return Nova::create_sm(vcpu.sm_sel(), core_pd, 0);
- });
-
- if (res != Nova::NOVA_OK) {
- error("create_sm = ", res);
- destroy(_heap, &vcpu);
- return ret;
- }
-
- addr_t const event_base = (1U << Nova::NUM_INITIAL_VCPU_PT_LOG2) * _id_alloc;
- enum { THREAD_GLOBAL = true, NO_UTCB = 0, NO_STACK = 0 };
- res = _with_kernel_quota_upgrade(_pd_sel, [&] {
- return Nova::create_ec(vcpu.ec_sel(), _pd_sel, kernel_cpu_id,
- NO_UTCB, NO_STACK, event_base, THREAD_GLOBAL);
- });
-
- if (res != Nova::NOVA_OK) {
- error("create_ec = ", res);
- destroy(_heap, &vcpu);
- return ret;
- }
-
- addr_t const dst_sm_ec_sel = Nova::NUM_INITIAL_PT_RESERVED
- + _id_alloc * CAP_RANGE;
-
- res = _with_kernel_quota_upgrade(vmm_pd_sel, [&] {
- using namespace Nova;
-
- enum { CAP_LOG2_COUNT = 1 };
- int permission = Obj_crd::RIGHT_EC_RECALL | Obj_crd::RIGHT_SM_UP |
- Obj_crd::RIGHT_SM_DOWN;
- Obj_crd const src(vcpu.sm_sel(), CAP_LOG2_COUNT, permission);
- Obj_crd const dst(dst_sm_ec_sel, CAP_LOG2_COUNT);
-
- return map_async_caps(src, dst, vmm_pd_sel);
- });
-
- if (res != Nova::NOVA_OK)
- {
- error("map sm ", res, " ", _id_alloc);
- destroy(_heap, &vcpu);
- return ret;
- }
-
- _vcpus.insert(&vcpu);
- _id_alloc++;
- return vcpu.id();
-}
-
-void Vm_session_component::_run(Vcpu_id const vcpu_id)
-{
- Vcpu * ptr = _lookup(vcpu_id);
- if (!ptr)
- return;
-
- Vcpu &vcpu = *ptr;
-
- if (!vcpu.init())
- return;
+ /* initialize SC on first call - do nothing on subsequent calls */
+ if (_alive) return;
uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] {
- return Nova::create_sc(vcpu.sc_sel(), _pd_sel, vcpu.ec_sel(),
+ return Nova::create_sc(sc_sel(), _pd_sel, ec_sel(),
Nova::Qpd(Nova::Qpd::DEFAULT_QUANTUM, _priority));
});
if (res == Nova::NOVA_OK)
- vcpu.alive();
+ _alive = true;
else
error("create_sc=", res);
}
-void Vm_session_component::_exception_handler(Signal_context_capability const cap,
- Vcpu_id const vcpu_id)
+
+void Vm_session_component::Vcpu::exit_handler(unsigned const exit,
+ Signal_context_capability const cap)
{
if (!cap.valid())
return;
- Vcpu * ptr = _lookup(vcpu_id);
- if (!ptr)
+ if (exit >= Nova::NUM_INITIAL_VCPU_PT)
return;
- Vcpu &vcpu = *ptr;
-
- addr_t const pt = vcpu.new_pt_id();
- if (pt == Vcpu::invalid())
- return;
+ /* map handler into vCPU-specific range of VM protection domain */
+ addr_t const pt = Nova::NUM_INITIAL_VCPU_PT * _id + exit;
uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] {
Nova::Obj_crd const src(cap.local_name(), 0);
@@ -296,77 +137,111 @@ void Vm_session_component::_exception_handler(Signal_context_capability const ca
error("map pt ", res, " failed");
}
-Genode::Dataspace_capability Vm_session_component::_cpu_state(Vcpu_id const vcpu_id)
-{
- Vcpu * ptr = _lookup(vcpu_id);
- if (!ptr)
- return Dataspace_capability();
- Vcpu &vcpu = *ptr;
- return vcpu.ds_cap();
-}
-
-Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
- Resources resources,
- Label const &label,
- Diag,
- Ram_allocator &ram,
- Region_map &local_rm,
- unsigned const priority,
- Trace::Source_registry &trace_sources)
+Vm_session_component::Vcpu::Vcpu(Rpc_entrypoint &ep,
+ Constrained_ram_allocator &ram_alloc,
+ Cap_quota_guard &cap_alloc,
+ unsigned const id,
+ unsigned const kernel_id,
+ Affinity::Location const location,
+ unsigned const priority,
+ Session_label const &label,
+ addr_t const pd_sel,
+ addr_t const core_pd_sel,
+ addr_t const vmm_pd_sel,
+ Trace::Control_area &trace_control_area,
+ Trace::Source_registry &trace_sources)
:
- Ram_quota_guard(resources.ram_quota),
- Cap_quota_guard(resources.cap_quota),
_ep(ep),
- _trace_control_area(ram, local_rm), _trace_sources(trace_sources),
- _constrained_md_ram_alloc(ram, _ram_quota_guard(), _cap_quota_guard()),
- _heap(_constrained_md_ram_alloc, local_rm),
- _priority(scale_priority(priority, "VM session")),
- _session_label(label)
+ _ram_alloc(ram_alloc),
+ _cap_alloc(cap_alloc),
+ _trace_sources(trace_sources),
+ _sel_sm_ec_sc(invalid_sel()),
+ _id(id),
+ _location(location),
+ _priority(priority),
+ _label(label),
+ _pd_sel(pd_sel),
+ _trace_control_slot(trace_control_area)
{
- _cap_quota_guard().withdraw(Cap_quota{1});
+ /* account caps required to setup vCPU */
+ Cap_quota_guard::Reservation caps(_cap_alloc, Cap_quota{CAP_RANGE});
- _pd_sel = cap_map().insert();
- if (!_pd_sel || _pd_sel == Vcpu::invalid())
- throw Service_denied();
+ /* now try to allocate cap indexes */
+ _sel_sm_ec_sc = cap_map().insert(CAP_RANGE_LOG2);
+ if (_sel_sm_ec_sc == invalid_sel()) {
+ error("out of caps in core");
+ throw Creation_failed();
+ }
+
+ /* setup resources */
+ uint8_t res = _with_kernel_quota_upgrade(_pd_sel, [&] {
+ return Nova::create_sm(sm_sel(), core_pd_sel, 0);
+ });
- addr_t const core_pd = platform_specific().core_pd_sel();
- enum { KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE = 2, UPPER_LIMIT_PAGES = 32 };
- uint8_t res = Nova::create_pd(_pd_sel, core_pd, Nova::Obj_crd(),
- KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE,
- UPPER_LIMIT_PAGES);
if (res != Nova::NOVA_OK) {
- error("create_pd = ", res);
- cap_map().remove(_pd_sel, 0, true);
- throw Service_denied();
+ cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
+ error("create_sm = ", res);
+ throw Creation_failed();
}
- /* configure managed VM area */
- _map.add_range(0, 0UL - 0x1000);
- _map.add_range(0UL - 0x1000, 0x1000);
+ addr_t const event_base = (1U << Nova::NUM_INITIAL_VCPU_PT_LOG2) * id;
+ enum { THREAD_GLOBAL = true, NO_UTCB = 0, NO_STACK = 0 };
+ res = _with_kernel_quota_upgrade(_pd_sel, [&] {
+ return Nova::create_ec(ec_sel(), _pd_sel, kernel_id,
+ NO_UTCB, NO_STACK, event_base, THREAD_GLOBAL);
+ });
+
+ if (res != Nova::NOVA_OK) {
+ cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
+ error("create_ec = ", res);
+ throw Creation_failed();
+ }
+
+ addr_t const dst_sm_ec_sel = Nova::NUM_INITIAL_PT_RESERVED + _id*CAP_RANGE;
+
+ res = _with_kernel_quota_upgrade(vmm_pd_sel, [&] {
+ using namespace Nova;
+
+ enum { CAP_LOG2_COUNT = 1 };
+ int permission = Obj_crd::RIGHT_EC_RECALL | Obj_crd::RIGHT_SM_UP |
+ Obj_crd::RIGHT_SM_DOWN;
+ Obj_crd const src(sm_sel(), CAP_LOG2_COUNT, permission);
+ Obj_crd const dst(dst_sm_ec_sel, CAP_LOG2_COUNT);
+
+ return map_async_caps(src, dst, vmm_pd_sel);
+ });
+
+ if (res != Nova::NOVA_OK) {
+ cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
+ error("map sm ", res, " ", _id);
+ throw Creation_failed();
+ }
+
+ _ep.manage(this);
+
+ _trace_sources.insert(&_trace_source);
+
+ caps.acknowledge();
}
-Vm_session_component::~Vm_session_component()
+
+Vm_session_component::Vcpu::~Vcpu()
{
- for (;Vcpu * vcpu = _vcpus.first();) {
- _vcpus.remove(vcpu);
- destroy(_heap, vcpu);
+ _ep.dissolve(this);
+
+ _trace_sources.remove(&_trace_source);
+
+ if (_sel_sm_ec_sc != invalid_sel()) {
+ _cap_alloc.replenish(Cap_quota{CAP_RANGE});
+ cap_map().remove(_sel_sm_ec_sc, CAP_RANGE_LOG2);
}
-
- /* detach all regions */
- while (true) {
- addr_t out_addr = 0;
-
- if (!_map.any_block_addr(&out_addr))
- break;
-
- detach(out_addr);
- }
-
- if (_pd_sel && _pd_sel != Vcpu::invalid())
- cap_map().remove(_pd_sel, 0, true);
}
+/**************************
+ ** Vm_session_component **
+ **************************/
+
void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
addr_t const guest_phys,
Attach_attr const attribute)
@@ -420,3 +295,120 @@ void Vm_session_component::_detach_vm_memory(addr_t guest_phys, size_t size)
page = flex.page();
}
}
+
+
+Capability Vm_session_component::create_vcpu(Thread_capability cap)
+{
+ if (!cap.valid()) return { };
+
+ /* lookup vmm pd and cpu location of handler thread in VMM */
+ addr_t kernel_cpu_id = 0;
+ Affinity::Location vcpu_location;
+
+ auto lambda = [&] (Cpu_thread_component *ptr) {
+ if (!ptr)
+ return invalid_sel();
+
+ Cpu_thread_component &thread = *ptr;
+
+ vcpu_location = thread.platform_thread().affinity();
+ kernel_cpu_id = platform_specific().kernel_cpu_id(thread.platform_thread().affinity());
+
+ return thread.platform_thread().pager().pd_sel();
+ };
+ addr_t const vmm_pd_sel = _ep.apply(cap, lambda);
+
+ /* if VMM pd lookup failed then deny to create vCPU */
+ if (!vmm_pd_sel || vmm_pd_sel == invalid_sel())
+ return { };
+
+ /* XXX this is a quite limited ID allocator... */
+ unsigned const vcpu_id = _next_vcpu_id;
+
+ try {
+ Vcpu &vcpu =
+ *new (_heap) Registered(_vcpus,
+ _ep,
+ _constrained_md_ram_alloc,
+ _cap_quota_guard(),
+ vcpu_id,
+ kernel_cpu_id,
+ vcpu_location,
+ _priority,
+ _session_label,
+ _pd_sel,
+ platform_specific().core_pd_sel(),
+ vmm_pd_sel,
+ _trace_control_area,
+ _trace_sources);
+ ++_next_vcpu_id;
+ return vcpu.cap();
+
+ } catch (Vcpu::Creation_failed&) {
+ return { };
+ }
+}
+
+
+Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
+ Resources resources,
+ Label const &label,
+ Diag,
+ Ram_allocator &ram,
+ Region_map &local_rm,
+ unsigned const priority,
+ Trace::Source_registry &trace_sources)
+:
+ Ram_quota_guard(resources.ram_quota),
+ Cap_quota_guard(resources.cap_quota),
+ _ep(ep),
+ _trace_control_area(ram, local_rm), _trace_sources(trace_sources),
+ _constrained_md_ram_alloc(ram, _ram_quota_guard(), _cap_quota_guard()),
+ _heap(_constrained_md_ram_alloc, local_rm),
+ _priority(scale_priority(priority, "VM session")),
+ _session_label(label)
+{
+ _cap_quota_guard().withdraw(Cap_quota{1});
+
+ _pd_sel = cap_map().insert();
+ if (!_pd_sel || _pd_sel == invalid_sel())
+ throw Service_denied();
+
+ addr_t const core_pd = platform_specific().core_pd_sel();
+ enum { KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE = 2, UPPER_LIMIT_PAGES = 32 };
+ uint8_t res = Nova::create_pd(_pd_sel, core_pd, Nova::Obj_crd(),
+ KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE,
+ UPPER_LIMIT_PAGES);
+ if (res != Nova::NOVA_OK) {
+ error("create_pd = ", res);
+ cap_map().remove(_pd_sel, 0, true);
+ throw Service_denied();
+ }
+
+ /*
+ * Configure managed VM area. The two ranges work around the size
+ * limitation to ULONG_MAX.
+ */
+ _map.add_range(0, 0UL - 0x1000);
+ _map.add_range(0UL - 0x1000, 0x1000);
+}
+
+
+Vm_session_component::~Vm_session_component()
+{
+ _vcpus.for_each([&] (Vcpu &vcpu) {
+ destroy(_heap, &vcpu); });
+
+ /* detach all regions */
+ while (true) {
+ addr_t out_addr = 0;
+
+ if (!_map.any_block_addr(&out_addr))
+ break;
+
+ detach(out_addr);
+ }
+
+ if (_pd_sel && _pd_sel != invalid_sel())
+ cap_map().remove(_pd_sel, 0, true);
+}
diff --git a/repos/base-nova/src/include/nova_native_vcpu/nova_native_vcpu.h b/repos/base-nova/src/include/nova_native_vcpu/nova_native_vcpu.h
new file mode 100644
index 0000000000..038c763832
--- /dev/null
+++ b/repos/base-nova/src/include/nova_native_vcpu/nova_native_vcpu.h
@@ -0,0 +1,29 @@
+ /*
+ * \brief NOVA vCPU RPC interface
+ * \author Christian Helmuth
+ * \author Alexander Böttcher
+ * \date 2021-01-19
+ */
+
+/*
+ * Copyright (C) 2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_
+#define _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_
+
+#include
+#include
+
+struct Genode::Vm_session::Native_vcpu : Interface
+{
+ GENODE_RPC(Rpc_startup, void, startup);
+ GENODE_RPC(Rpc_exit_handler, void, exit_handler, unsigned, Signal_context_capability);
+
+ GENODE_RPC_INTERFACE(Rpc_startup, Rpc_exit_handler);
+};
+
+#endif /* _INCLUDE__NOVA_NATIVE_VCPU__NOVA_NATIVE_VCPU_H_ */
diff --git a/repos/base-nova/src/lib/base/vm.cc b/repos/base-nova/src/lib/base/vm.cc
new file mode 100644
index 0000000000..567695c051
--- /dev/null
+++ b/repos/base-nova/src/lib/base/vm.cc
@@ -0,0 +1,796 @@
+/*
+ * \brief NOVA-specific VM-connection implementation
+ * \author Alexander Boettcher
+ * \author Christian Helmuth
+ * \date 2018-08-27
+ */
+
+/*
+ * Copyright (C) 2018-2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+/* Genode includes */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+/* Nova includes */
+#include
+#include
+#include
+#include
+#include
+
+using namespace Genode;
+
+using Exit_config = Vm_connection::Exit_config;
+
+
+/******************************
+ ** NOVA vCPU implementation **
+ ******************************/
+
+struct Nova_vcpu : Rpc_client, Noncopyable
+{
+ private:
+
+ typedef Id_space Vcpu_space;
+
+ static Vcpu_space &_vcpu_space()
+ {
+ static Vcpu_space instance;
+ return instance;
+ }
+
+ Vcpu_space::Element _id_elem;
+
+ struct Vcpu_id_space_exhausted : Exception { };
+
+ Signal_dispatcher_base &_obj;
+ Allocator &_alloc;
+ void *_ep_handler { nullptr };
+ void *_dispatching { nullptr };
+ bool _block { true };
+ bool _use_guest_fpu { false };
+
+ Vcpu_state _vcpu_state __attribute__((aligned(0x10))) { };
+
+ uint8_t _fpu_ep[512] __attribute__((aligned(0x10)));
+
+ enum Remote_state_requested {
+ NONE = 0,
+ PAUSE = 1,
+ RUN = 2
+ } _remote { NONE };
+
+ inline void _read_nova_state(Nova::Utcb &utcb, unsigned exit_reason);
+
+ inline void _write_nova_state(Nova::Utcb &utcb);
+
+ addr_t _sm_sel() const {
+ return Nova::NUM_INITIAL_PT_RESERVED + _id_elem.id().value * 4; }
+
+ addr_t _ec_sel() const { return _sm_sel() + 1; }
+
+ /**
+ * NOVA badge with 16-bit exit reason and 16-bit artificial vCPU I
+ */
+ struct Badge
+ {
+ uint32_t _value;
+
+ Badge(unsigned long value)
+ : _value((uint32_t)value) { }
+
+ Badge(uint16_t vcpu_id, uint16_t exit_reason)
+ : _value((uint32_t)(vcpu_id << 16) | exit_reason) { }
+
+ uint16_t exit_reason() const { return (uint16_t)( _value & 0xffff); }
+ uint16_t vcpu_id() const { return (uint16_t)((_value >> 16) & 0xffff); }
+ uint32_t value() const { return _value; }
+ };
+
+ bool _handle_exit(Nova::Utcb &utcb, uint16_t exit_reason);
+
+ __attribute__((regparm(1))) static void _exit_entry(addr_t badge);
+
+ Nova::Mtd _portal_mtd(unsigned exit, Exit_config const &config)
+ {
+ /* TODO define and implement omissions */
+ (void)exit;
+ (void)config;
+
+ return Nova::Mtd(Nova::Mtd::ALL);
+
+ Genode::addr_t mtd = 0;
+
+ mtd |= Nova::Mtd::ACDB;
+ mtd |= Nova::Mtd::EBSD;
+ mtd |= Nova::Mtd::EFL;
+ mtd |= Nova::Mtd::ESP;
+ mtd |= Nova::Mtd::EIP;
+ mtd |= Nova::Mtd::DR;
+ mtd |= Nova::Mtd::R8_R15;
+ mtd |= Nova::Mtd::CR;
+ mtd |= Nova::Mtd::CSSS;
+ mtd |= Nova::Mtd::ESDS;
+ mtd |= Nova::Mtd::FSGS;
+ mtd |= Nova::Mtd::TR;
+ mtd |= Nova::Mtd::LDTR;
+ mtd |= Nova::Mtd::GDTR;
+ mtd |= Nova::Mtd::IDTR;
+ mtd |= Nova::Mtd::SYS;
+ mtd |= Nova::Mtd::CTRL;
+ mtd |= Nova::Mtd::INJ;
+ mtd |= Nova::Mtd::STA;
+ mtd |= Nova::Mtd::TSC;
+ mtd |= Nova::Mtd::EFER;
+ mtd |= Nova::Mtd::PDPTE;
+ mtd |= Nova::Mtd::SYSCALL_SWAPGS;
+ mtd |= Nova::Mtd::TPR;
+ mtd |= Nova::Mtd::QUAL;
+
+ _use_guest_fpu = true;
+ mtd |= Nova::Mtd::FPU;
+
+ return Nova::Mtd(mtd);
+ }
+
+ static Capability _create_vcpu(Vm_connection &, Vcpu_handler_base &);
+
+ static Signal_context_capability _create_exit_handler(Pd_session &pd,
+ Vcpu_handler_base &handler,
+ uint16_t vcpu_id,
+ uint16_t exit_reason,
+ Nova::Mtd mtd);
+
+ /*
+ * Noncopyable
+ */
+ Nova_vcpu(Nova_vcpu const &) = delete;
+ Nova_vcpu &operator = (Nova_vcpu const &) = delete;
+
+ public:
+
+ Nova_vcpu(Env &env, Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &exit_config);
+
+ void run();
+
+ void pause();
+
+ Vcpu_state & state() { return _vcpu_state; }
+};
+
+
+void Nova_vcpu::_read_nova_state(Nova::Utcb &utcb, unsigned exit_reason)
+{
+ typedef Genode::Vcpu_state::Segment Segment;
+ typedef Genode::Vcpu_state::Range Range;
+
+ state().discharge();
+ state().exit_reason = exit_reason;
+
+ if (utcb.mtd & Nova::Mtd::FPU) {
+ state().fpu.charge([] (Vcpu_state::Fpu::State &fpu) {
+ asm volatile ("fxsave %0" : "=m" (fpu) :: "memory");
+ });
+ asm volatile ("fxrstor %0" : : "m" (*_fpu_ep) : "memory");
+ }
+
+ if (utcb.mtd & Nova::Mtd::ACDB) {
+ state().ax.charge(utcb.ax);
+ state().cx.charge(utcb.cx);
+ state().dx.charge(utcb.dx);
+ state().bx.charge(utcb.bx);
+ }
+
+ if (utcb.mtd & Nova::Mtd::EBSD) {
+ state().di.charge(utcb.di);
+ state().si.charge(utcb.si);
+ state().bp.charge(utcb.bp);
+ }
+
+ if (utcb.mtd & Nova::Mtd::EFL) state().flags.charge(utcb.flags);
+ if (utcb.mtd & Nova::Mtd::ESP) state().sp.charge(utcb.sp);
+ if (utcb.mtd & Nova::Mtd::DR) state().dr7.charge(utcb.dr7);
+
+ if (utcb.mtd & Nova::Mtd::EIP) {
+ state().ip.charge(utcb.ip);
+ state().ip_len.charge(utcb.instr_len);
+ }
+
+ if (utcb.mtd & Nova::Mtd::R8_R15) {
+ state(). r8.charge(utcb.read_r8());
+ state(). r9.charge(utcb.read_r9());
+ state().r10.charge(utcb.read_r10());
+ state().r11.charge(utcb.read_r11());
+ state().r12.charge(utcb.read_r12());
+ state().r13.charge(utcb.read_r13());
+ state().r14.charge(utcb.read_r14());
+ state().r15.charge(utcb.read_r15());
+ }
+
+ if (utcb.mtd & Nova::Mtd::CR) {
+ state().cr0.charge(utcb.cr0);
+ state().cr2.charge(utcb.cr2);
+ state().cr3.charge(utcb.cr3);
+ state().cr4.charge(utcb.cr4);
+ }
+ if (utcb.mtd & Nova::Mtd::CSSS) {
+ state().cs.charge(Segment { .sel = utcb.cs.sel,
+ .ar = utcb.cs.ar,
+ .limit = utcb.cs.limit,
+ .base = utcb.cs.base });
+ state().ss.charge(Segment { .sel = utcb.ss.sel,
+ .ar = utcb.ss.ar,
+ .limit = utcb.ss.limit,
+ .base = utcb.ss.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::ESDS) {
+ state().es.charge(Segment { .sel = utcb.es.sel,
+ .ar = utcb.es.ar,
+ .limit = utcb.es.limit,
+ .base = utcb.es.base });
+ state().ds.charge(Segment { .sel = utcb.ds.sel,
+ .ar = utcb.ds.ar,
+ .limit = utcb.ds.limit,
+ .base = utcb.ds.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::FSGS) {
+ state().fs.charge(Segment { .sel = utcb.fs.sel,
+ .ar = utcb.fs.ar,
+ .limit = utcb.fs.limit,
+ .base = utcb.fs.base });
+ state().gs.charge(Segment { .sel = utcb.gs.sel,
+ .ar = utcb.gs.ar,
+ .limit = utcb.gs.limit,
+ .base = utcb.gs.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::TR) {
+ state().tr.charge(Segment { .sel = utcb.tr.sel,
+ .ar = utcb.tr.ar,
+ .limit = utcb.tr.limit,
+ .base = utcb.tr.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::LDTR) {
+ state().ldtr.charge(Segment { .sel = utcb.ldtr.sel,
+ .ar = utcb.ldtr.ar,
+ .limit = utcb.ldtr.limit,
+ .base = utcb.ldtr.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::GDTR) {
+ state().gdtr.charge(Range { .limit = utcb.gdtr.limit,
+ .base = utcb.gdtr.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::IDTR) {
+ state().idtr.charge(Range { .limit = utcb.idtr.limit,
+ .base = utcb.idtr.base });
+ }
+
+ if (utcb.mtd & Nova::Mtd::SYS) {
+ state().sysenter_cs.charge(utcb.sysenter_cs);
+ state().sysenter_sp.charge(utcb.sysenter_sp);
+ state().sysenter_ip.charge(utcb.sysenter_ip);
+ }
+
+ if (utcb.mtd & Nova::Mtd::QUAL) {
+ state().qual_primary.charge(utcb.qual[0]);
+ state().qual_secondary.charge(utcb.qual[1]);
+ }
+
+ if (utcb.mtd & Nova::Mtd::CTRL) {
+ state().ctrl_primary.charge(utcb.ctrl[0]);
+ state().ctrl_secondary.charge(utcb.ctrl[1]);
+ }
+
+ if (utcb.mtd & Nova::Mtd::INJ) {
+ state().inj_info.charge(utcb.inj_info);
+ state().inj_error.charge(utcb.inj_error);
+ }
+
+ if (utcb.mtd & Nova::Mtd::STA) {
+ state().intr_state.charge(utcb.intr_state);
+ state().actv_state.charge(utcb.actv_state);
+ }
+
+ if (utcb.mtd & Nova::Mtd::TSC) {
+ state().tsc.charge(utcb.tsc_val);
+ state().tsc_offset.charge(utcb.tsc_off);
+ }
+
+ if (utcb.mtd & Nova::Mtd::EFER) {
+ state().efer.charge(utcb.read_efer());
+ }
+
+ if (utcb.mtd & Nova::Mtd::PDPTE) {
+ state().pdpte_0.charge(utcb.pdpte[0]);
+ state().pdpte_1.charge(utcb.pdpte[1]);
+ state().pdpte_2.charge(utcb.pdpte[2]);
+ state().pdpte_3.charge(utcb.pdpte[3]);
+ }
+
+ if (utcb.mtd & Nova::Mtd::SYSCALL_SWAPGS) {
+ state().star.charge(utcb.read_star());
+ state().lstar.charge(utcb.read_lstar());
+ state().cstar.charge(utcb.read_cstar());
+ state().fmask.charge(utcb.read_fmask());
+ state().kernel_gs_base.charge(utcb.read_kernel_gs_base());
+ }
+
+ if (utcb.mtd & Nova::Mtd::TPR) {
+ state().tpr.charge(utcb.read_tpr());
+ state().tpr_threshold.charge(utcb.read_tpr_threshold());
+ }
+}
+
+
+void Nova_vcpu::_write_nova_state(Nova::Utcb &utcb)
+{
+ utcb.items = 0;
+ utcb.mtd = 0;
+
+ if (state().ax.charged() || state().cx.charged() ||
+ state().dx.charged() || state().bx.charged()) {
+ utcb.mtd |= Nova::Mtd::ACDB;
+ utcb.ax = state().ax.value();
+ utcb.cx = state().cx.value();
+ utcb.dx = state().dx.value();
+ utcb.bx = state().bx.value();
+ }
+
+ if (state().bp.charged() || state().di.charged() || state().si.charged()) {
+ utcb.mtd |= Nova::Mtd::EBSD;
+ utcb.di = state().di.value();
+ utcb.si = state().si.value();
+ utcb.bp = state().bp.value();
+ }
+
+ if (state().flags.charged()) {
+ utcb.mtd |= Nova::Mtd::EFL;
+ utcb.flags = state().flags.value();
+ }
+
+ if (state().sp.charged()) {
+ utcb.mtd |= Nova::Mtd::ESP;
+ utcb.sp = state().sp.value();
+ }
+
+ if (state().ip.charged()) {
+ utcb.mtd |= Nova::Mtd::EIP;
+ utcb.ip = state().ip.value();
+ utcb.instr_len = state().ip_len.value();
+ }
+
+ if (state().dr7.charged()) {
+ utcb.mtd |= Nova::Mtd::DR;
+ utcb.dr7 = state().dr7.value();
+ }
+
+ if (state().r8.charged() || state().r9.charged() ||
+ state().r10.charged() || state().r11.charged() ||
+ state().r12.charged() || state().r13.charged() ||
+ state().r14.charged() || state().r15.charged()) {
+
+ utcb.mtd |= Nova::Mtd::R8_R15;
+ utcb.write_r8 (state().r8.value());
+ utcb.write_r9 (state().r9.value());
+ utcb.write_r10(state().r10.value());
+ utcb.write_r11(state().r11.value());
+ utcb.write_r12(state().r12.value());
+ utcb.write_r13(state().r13.value());
+ utcb.write_r14(state().r14.value());
+ utcb.write_r15(state().r15.value());
+ }
+
+ if (state().cr0.charged() || state().cr2.charged() ||
+ state().cr3.charged() || state().cr4.charged()) {
+ utcb.mtd |= Nova::Mtd::CR;
+ utcb.cr0 = state().cr0.value();
+ utcb.cr2 = state().cr2.value();
+ utcb.cr3 = state().cr3.value();
+ utcb.cr4 = state().cr4.value();
+ }
+
+ if (state().cs.charged() || state().ss.charged()) {
+ utcb.mtd |= Nova::Mtd::CSSS;
+ utcb.cs.sel = state().cs.value().sel;
+ utcb.cs.ar = state().cs.value().ar;
+ utcb.cs.limit = state().cs.value().limit;
+ utcb.cs.base = state().cs.value().base;
+
+ utcb.ss.sel = state().ss.value().sel;
+ utcb.ss.ar = state().ss.value().ar;
+ utcb.ss.limit = state().ss.value().limit;
+ utcb.ss.base = state().ss.value().base;
+ }
+
+ if (state().es.charged() || state().ds.charged()) {
+ utcb.mtd |= Nova::Mtd::ESDS;
+ utcb.es.sel = state().es.value().sel;
+ utcb.es.ar = state().es.value().ar;
+ utcb.es.limit = state().es.value().limit;
+ utcb.es.base = state().es.value().base;
+
+ utcb.ds.sel = state().ds.value().sel;
+ utcb.ds.ar = state().ds.value().ar;
+ utcb.ds.limit = state().ds.value().limit;
+ utcb.ds.base = state().ds.value().base;
+ }
+
+ if (state().fs.charged() || state().gs.charged()) {
+ utcb.mtd |= Nova::Mtd::FSGS;
+ utcb.fs.sel = state().fs.value().sel;
+ utcb.fs.ar = state().fs.value().ar;
+ utcb.fs.limit = state().fs.value().limit;
+ utcb.fs.base = state().fs.value().base;
+
+ utcb.gs.sel = state().gs.value().sel;
+ utcb.gs.ar = state().gs.value().ar;
+ utcb.gs.limit = state().gs.value().limit;
+ utcb.gs.base = state().gs.value().base;
+ }
+
+ if (state().tr.charged()) {
+ utcb.mtd |= Nova::Mtd::TR;
+ utcb.tr.sel = state().tr.value().sel;
+ utcb.tr.ar = state().tr.value().ar;
+ utcb.tr.limit = state().tr.value().limit;
+ utcb.tr.base = state().tr.value().base;
+ }
+
+ if (state().ldtr.charged()) {
+ utcb.mtd |= Nova::Mtd::LDTR;
+ utcb.ldtr.sel = state().ldtr.value().sel;
+ utcb.ldtr.ar = state().ldtr.value().ar;
+ utcb.ldtr.limit = state().ldtr.value().limit;
+ utcb.ldtr.base = state().ldtr.value().base;
+ }
+
+ if (state().gdtr.charged()) {
+ utcb.mtd |= Nova::Mtd::GDTR;
+ utcb.gdtr.limit = state().gdtr.value().limit;
+ utcb.gdtr.base = state().gdtr.value().base;
+ }
+
+ if (state().idtr.charged()) {
+ utcb.mtd |= Nova::Mtd::IDTR;
+ utcb.idtr.limit = state().idtr.value().limit;
+ utcb.idtr.base = state().idtr.value().base;
+ }
+
+ if (state().sysenter_cs.charged() || state().sysenter_sp.charged() ||
+ state().sysenter_ip.charged()) {
+ utcb.mtd |= Nova::Mtd::SYS;
+ utcb.sysenter_cs = state().sysenter_cs.value();
+ utcb.sysenter_sp = state().sysenter_sp.value();
+ utcb.sysenter_ip = state().sysenter_ip.value();
+ }
+
+ if (state().ctrl_primary.charged() || state().ctrl_secondary.charged()) {
+ utcb.mtd |= Nova::Mtd::CTRL;
+ utcb.ctrl[0] = state().ctrl_primary.value();
+ utcb.ctrl[1] = state().ctrl_secondary.value();
+ }
+
+ if (state().inj_info.charged() || state().inj_error.charged()) {
+ utcb.mtd |= Nova::Mtd::INJ;
+ utcb.inj_info = state().inj_info.value();
+ utcb.inj_error = state().inj_error.value();
+ }
+
+ if (state().intr_state.charged() || state().actv_state.charged()) {
+ utcb.mtd |= Nova::Mtd::STA;
+ utcb.intr_state = state().intr_state.value();
+ utcb.actv_state = state().actv_state.value();
+ }
+
+ if (state().tsc.charged() || state().tsc_offset.charged()) {
+ utcb.mtd |= Nova::Mtd::TSC;
+ utcb.tsc_val = state().tsc.value();
+ utcb.tsc_off = state().tsc_offset.value();
+ }
+
+ if (state().efer.charged()) {
+ utcb.mtd |= Nova::Mtd::EFER;
+ utcb.write_efer(state().efer.value());
+ }
+
+ if (state().pdpte_0.charged() || state().pdpte_1.charged() ||
+ state().pdpte_2.charged() || state().pdpte_3.charged()) {
+
+ utcb.mtd |= Nova::Mtd::PDPTE;
+ utcb.pdpte[0] = state().pdpte_0.value();
+ utcb.pdpte[1] = state().pdpte_1.value();
+ utcb.pdpte[2] = state().pdpte_2.value();
+ utcb.pdpte[3] = state().pdpte_3.value();
+ }
+
+ if (state().star.charged() || state().lstar.charged() ||
+ state().cstar.charged() || state().fmask.charged() ||
+ state().kernel_gs_base.charged()) {
+
+ utcb.mtd |= Nova::Mtd::SYSCALL_SWAPGS;
+ utcb.write_star(state().star.value());
+ utcb.write_lstar(state().lstar.value());
+ utcb.write_cstar(state().cstar.value());
+ utcb.write_fmask(state().fmask.value());
+ utcb.write_kernel_gs_base(state().kernel_gs_base.value());
+ }
+
+ if (state().tpr.charged() || state().tpr_threshold.charged()) {
+ utcb.mtd |= Nova::Mtd::TPR;
+ utcb.write_tpr(state().tpr.value());
+ utcb.write_tpr_threshold(state().tpr_threshold.value());
+ }
+
+ if (_use_guest_fpu || state().fpu.charged()) {
+ asm volatile ("fxsave %0" : "=m" (*_fpu_ep) :: "memory");
+ }
+
+ if (state().fpu.charged()) {
+ state().fpu.with_state([] (Vcpu_state::Fpu::State const &fpu) {
+ asm volatile ("fxrstor %0" : : "m" (fpu) : "memory");
+ });
+ }
+}
+
+
+void Nova_vcpu::run()
+{
+ if (!_ep_handler) {
+ /* not started yet - trigger startup of native vCPU */
+ call();
+ return;
+ }
+
+ Thread * const current = Thread::myself();
+
+ if (_dispatching == current) {
+ _block = false;
+ return;
+ }
+
+ if ((_ep_handler == current) && !_block)
+ return;
+
+ if (_ep_handler != current)
+ _remote = RUN;
+
+ Nova::ec_ctrl(Nova::EC_RECALL, _ec_sel());
+ Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_UP);
+}
+
+
+/*
+ * Do not touch the UTCB before _read_nova_state() and after
+ * _write_nova_state(), particularly not by logging diagnostics.
+ */
+bool Nova_vcpu::_handle_exit(Nova::Utcb &utcb, uint16_t exit_reason)
+{
+ /* reset blocking state */
+ bool const previous_blocked = _block;
+ _block = true;
+
+ /* NOVA specific exit reasons */
+ enum { VM_EXIT_STARTUP = 0xfe, VM_EXIT_RECALL = 0xff };
+
+ if (exit_reason == VM_EXIT_STARTUP)
+ _ep_handler = Thread::myself();
+
+ /* transform state from NOVA to Genode */
+ if (exit_reason != VM_EXIT_RECALL || !previous_blocked)
+ _read_nova_state(utcb, exit_reason);
+
+ if (exit_reason == VM_EXIT_RECALL) {
+ if (previous_blocked)
+ state().exit_reason = exit_reason;
+
+ /* consume potential multiple sem ups */
+ Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_UP);
+ Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_DOWNZERO);
+
+ if (_remote == PAUSE) {
+ _remote = NONE;
+ } else {
+ if (_remote == RUN) {
+ _remote = NONE;
+ if (!previous_blocked) {
+ /* still running - reply without state transfer */
+ _block = false;
+ utcb.items = 0;
+ utcb.mtd = 0;
+ return false;
+ }
+ }
+
+ if (previous_blocked) {
+ /* resume vCPU - with vCPU state update */
+ _block = false;
+ _write_nova_state(utcb);
+ return false;
+ }
+ }
+ }
+
+ try {
+ _dispatching = Thread::myself();
+ /* call dispatch handler */
+ _obj.dispatch(1);
+ _dispatching = nullptr;
+ } catch (...) {
+ _dispatching = nullptr;
+ throw;
+ }
+
+ if (_block) {
+ /* block vCPU in kernel - no vCPU state update */
+ utcb.items = 0;
+ utcb.mtd = 0;
+ return true;
+ }
+
+ /* reply to NOVA and transfer vCPU state */
+ _write_nova_state(utcb);
+ return false;
+}
+
+
+void Nova_vcpu::_exit_entry(addr_t badge)
+{
+ Thread &myself = *Thread::myself();
+ Nova::Utcb &utcb = *reinterpret_cast(myself.utcb());
+
+ uint16_t const exit_reason { Badge(badge).exit_reason() };
+ Vcpu_space::Id const vcpu_id { Badge(badge).vcpu_id() };
+
+ try {
+ _vcpu_space().apply(vcpu_id, [&] (Nova_vcpu &vcpu)
+ {
+ bool const block = vcpu._handle_exit(utcb, exit_reason);
+
+ if (block) {
+ Nova::reply(myself.stack_top(), vcpu._sm_sel());
+ } else {
+ Nova::reply(myself.stack_top());
+ }
+ });
+
+ } catch (Vcpu_space::Unknown_id &) {
+
+ /* somebody called us directly ? ... ignore/deny */
+ utcb.items = 0;
+ utcb.mtd = 0;
+ Nova::reply(myself.stack_top());
+ }
+}
+
+
+void Nova_vcpu::pause()
+{
+ Thread * const current = Thread::myself();
+
+ if (_dispatching == current) {
+ /* current thread is already dispatching */
+ if (_block)
+ /* issue pause exit next time - fall through */
+ _block = false;
+ else {
+ _block = true;
+ return;
+ }
+ }
+
+ if ((_ep_handler == current) && _block) {
+ _remote = PAUSE;
+ /* already blocked */
+ }
+
+ if (_ep_handler != current)
+ _remote = PAUSE;
+
+ if (!_ep_handler) {
+ /* not started yet - let startup handler issue the recall */
+ return;
+ }
+
+ Nova::ec_ctrl(Nova::EC_RECALL, _ec_sel());
+ Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_UP);
+}
+
+
+Signal_context_capability Nova_vcpu::_create_exit_handler(Pd_session &pd,
+ Vcpu_handler_base &handler,
+ uint16_t vcpu_id,
+ uint16_t exit_reason,
+ Nova::Mtd mtd)
+{
+ Thread *tep = reinterpret_cast(&handler.rpc_ep());
+
+ Native_capability thread_cap = Capability_space::import(tep->native_thread().ec_sel);
+
+ Nova_native_pd_client native_pd { pd.native_pd() };
+
+ Native_capability vm_exit_cap =
+ native_pd.alloc_rpc_cap(thread_cap, (addr_t)Nova_vcpu::_exit_entry, mtd.value());
+
+ Badge const badge { vcpu_id, exit_reason };
+ native_pd.imprint_rpc_cap(vm_exit_cap, badge.value());
+
+ return reinterpret_cap_cast(vm_exit_cap);
+}
+
+
+Capability Nova_vcpu::_create_vcpu(Vm_connection &vm,
+ Vcpu_handler_base &handler)
+{
+ Thread &tep { *reinterpret_cast(&handler.rpc_ep()) };
+
+ return vm.with_upgrade([&] () {
+ return vm.call(tep.cap()); });
+}
+
+
+Nova_vcpu::Nova_vcpu(Env &env, Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &exit_config)
+:
+ Rpc_client(_create_vcpu(vm, handler)),
+ _id_elem(*this, _vcpu_space()), _obj(handler), _alloc(alloc)
+{
+ /*
+ * XXX can be alleviated by managing ID values with Bit_allocator
+ * that allocates lowest free index in dynamic scenarios
+ */
+ if (_id_elem.id().value > 0xffff)
+ throw Vcpu_id_space_exhausted();
+
+ uint16_t const vcpu_id = (uint16_t)_id_elem.id().value;
+
+ Signal_context_capability dontcare_exit =
+ _create_exit_handler(env.pd(), handler, vcpu_id, 0x100, Nova::Mtd(Nova::Mtd::EIP));
+
+ for (unsigned i = 0; i < Nova::NUM_INITIAL_VCPU_PT; ++i) {
+ Signal_context_capability signal_exit;
+
+ Nova::Mtd mtd = _portal_mtd(i, exit_config);
+ if (mtd.value()) {
+ signal_exit = _create_exit_handler(env.pd(), handler, vcpu_id, i, mtd);
+ } else {
+ signal_exit = dontcare_exit;
+ }
+
+ call(i, signal_exit);
+ }
+}
+
+
+/**************
+ ** vCPU API **
+ **************/
+
+void Vm_connection::Vcpu::run() { static_cast(_native_vcpu).run(); }
+void Vm_connection::Vcpu::pause() { static_cast(_native_vcpu).pause(); }
+Vcpu_state & Vm_connection::Vcpu::state() { return static_cast(_native_vcpu).state(); }
+
+
+Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &exit_config)
+:
+ _native_vcpu(*new (alloc) Nova_vcpu(vm._env, vm, alloc, handler, exit_config))
+{ }
+
diff --git a/repos/base-nova/src/lib/base/vm_session.cc b/repos/base-nova/src/lib/base/vm_session.cc
deleted file mode 100644
index d9031d175f..0000000000
--- a/repos/base-nova/src/lib/base/vm_session.cc
+++ /dev/null
@@ -1,791 +0,0 @@
-/*
- * \brief Client-side VM session interface
- * \author Alexander Boettcher
- * \date 2018-08-27
- */
-
-/*
- * Copyright (C) 2018 Genode Labs GmbH
- *
- * This file is part of the Genode OS framework, which is distributed
- * under the terms of the GNU Affero General Public License version 3.
- */
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-
-#include
-#include
-#include
-#include
-
-using namespace Genode;
-
-struct Vcpu;
-
-static Genode::Registry > vcpus;
-
-struct Vcpu {
-
- private:
-
- Signal_dispatcher_base &_obj;
- Allocator &_alloc;
- Vm_session_client::Vcpu_id _id;
- addr_t _state { 0 };
- void *_ep_handler { nullptr };
- void *_dispatching { nullptr };
- bool _block { true };
- bool _use_guest_fpu { false };
-
- uint8_t _fpu_ep[512] __attribute__((aligned(0x10)));
-
- enum Remote_state_requested {
- NONE = 0,
- PAUSE = 1,
- RUN = 2
- } _remote { NONE };
-
- void _read_nova_state(Nova::Utcb &utcb, Vm_state &state,
- unsigned exit_reason)
- {
- typedef Genode::Vm_state::Segment Segment;
- typedef Genode::Vm_state::Range Range;
-
- state = Vm_state {};
- state.exit_reason = exit_reason;
-
- if (utcb.mtd & Nova::Mtd::FPU) {
- state.fpu.value([&] (uint8_t *fpu, size_t const) {
- asm volatile ("fxsave %0" : "=m" (*fpu) :: "memory");
- });
- asm volatile ("fxrstor %0" : : "m" (*_fpu_ep) : "memory");
- }
-
-
- if (utcb.mtd & Nova::Mtd::ACDB) {
- state.ax.value(utcb.ax);
- state.cx.value(utcb.cx);
- state.dx.value(utcb.dx);
- state.bx.value(utcb.bx);
- }
-
- if (utcb.mtd & Nova::Mtd::EBSD) {
- state.di.value(utcb.di);
- state.si.value(utcb.si);
- state.bp.value(utcb.bp);
- }
-
- if (utcb.mtd & Nova::Mtd::EFL) state.flags.value(utcb.flags);
- if (utcb.mtd & Nova::Mtd::ESP) state.sp.value(utcb.sp);
- if (utcb.mtd & Nova::Mtd::DR) state.dr7.value(utcb.dr7);
-
- if (utcb.mtd & Nova::Mtd::EIP) {
- state.ip.value(utcb.ip);
- state.ip_len.value(utcb.instr_len);
- }
-
- if (utcb.mtd & Nova::Mtd::R8_R15) {
- state. r8.value(utcb.read_r8());
- state. r9.value(utcb.read_r9());
- state.r10.value(utcb.read_r10());
- state.r11.value(utcb.read_r11());
- state.r12.value(utcb.read_r12());
- state.r13.value(utcb.read_r13());
- state.r14.value(utcb.read_r14());
- state.r15.value(utcb.read_r15());
- }
-
- if (utcb.mtd & Nova::Mtd::CR) {
- state.cr0.value(utcb.cr0);
- state.cr2.value(utcb.cr2);
- state.cr3.value(utcb.cr3);
- state.cr4.value(utcb.cr4);
- }
- if (utcb.mtd & Nova::Mtd::CSSS) {
- state.cs.value(Segment{utcb.cs.sel, utcb.cs.ar, utcb.cs.limit,
- utcb.cs.base});
- state.ss.value(Segment{utcb.ss.sel, utcb.ss.ar, utcb.ss.limit,
- utcb.ss.base});
- }
-
- if (utcb.mtd & Nova::Mtd::ESDS) {
- state.es.value(Segment{utcb.es.sel, utcb.es.ar, utcb.es.limit,
- utcb.es.base});
- state.ds.value(Segment{utcb.ds.sel, utcb.ds.ar, utcb.ds.limit,
- utcb.ds.base});
- }
-
- if (utcb.mtd & Nova::Mtd::FSGS) {
- state.fs.value(Segment{utcb.fs.sel, utcb.fs.ar, utcb.fs.limit,
- utcb.fs.base});
- state.gs.value(Segment{utcb.gs.sel, utcb.gs.ar, utcb.gs.limit,
- utcb.gs.base});
- }
-
- if (utcb.mtd & Nova::Mtd::TR) {
- state.tr.value(Segment{utcb.tr.sel, utcb.tr.ar, utcb.tr.limit,
- utcb.tr.base});
- }
-
- if (utcb.mtd & Nova::Mtd::LDTR) {
- state.ldtr.value(Segment{utcb.ldtr.sel, utcb.ldtr.ar,
- utcb.ldtr.limit, utcb.ldtr.base});
- }
-
- if (utcb.mtd & Nova::Mtd::GDTR) {
- state.gdtr.value(Range{utcb.gdtr.base, utcb.gdtr.limit});
- }
-
- if (utcb.mtd & Nova::Mtd::IDTR) {
- state.idtr.value(Range{utcb.idtr.base, utcb.idtr.limit});
- }
-
- if (utcb.mtd & Nova::Mtd::SYS) {
- state.sysenter_cs.value(utcb.sysenter_cs);
- state.sysenter_sp.value(utcb.sysenter_sp);
- state.sysenter_ip.value(utcb.sysenter_ip);
- }
-
- if (utcb.mtd & Nova::Mtd::QUAL) {
- state.qual_primary.value(utcb.qual[0]);
- state.qual_secondary.value(utcb.qual[1]);
- }
-
- if (utcb.mtd & Nova::Mtd::CTRL) {
- state.ctrl_primary.value(utcb.ctrl[0]);
- state.ctrl_secondary.value(utcb.ctrl[1]);
- }
-
- if (utcb.mtd & Nova::Mtd::INJ) {
- state.inj_info.value(utcb.inj_info);
- state.inj_error.value(utcb.inj_error);
- }
-
- if (utcb.mtd & Nova::Mtd::STA) {
- state.intr_state.value(utcb.intr_state);
- state.actv_state.value(utcb.actv_state);
- }
-
- if (utcb.mtd & Nova::Mtd::TSC) {
- state.tsc.value(utcb.tsc_val);
- state.tsc_offset.value(utcb.tsc_off);
- }
-
- if (utcb.mtd & Nova::Mtd::EFER) {
- state.efer.value(utcb.read_efer());
- }
-
- if (utcb.mtd & Nova::Mtd::PDPTE) {
- state.pdpte_0.value(utcb.pdpte[0]);
- state.pdpte_1.value(utcb.pdpte[1]);
- state.pdpte_2.value(utcb.pdpte[2]);
- state.pdpte_3.value(utcb.pdpte[3]);
- }
-
- if (utcb.mtd & Nova::Mtd::SYSCALL_SWAPGS) {
- state.star.value(utcb.read_star());
- state.lstar.value(utcb.read_lstar());
- state.cstar.value(utcb.read_cstar());
- state.fmask.value(utcb.read_fmask());
- state.kernel_gs_base.value(utcb.read_kernel_gs_base());
- }
-
- if (utcb.mtd & Nova::Mtd::TPR) {
- state.tpr.value(utcb.read_tpr());
- state.tpr_threshold.value(utcb.read_tpr_threshold());
- }
-
- }
-
- void _write_nova_state(Nova::Utcb &utcb, Vm_state &state)
- {
- utcb.items = 0;
- utcb.mtd = 0;
-
- if (state.ax.valid() || state.cx.valid() ||
- state.dx.valid() || state.bx.valid()) {
- utcb.mtd |= Nova::Mtd::ACDB;
- utcb.ax = state.ax.value();
- utcb.cx = state.cx.value();
- utcb.dx = state.dx.value();
- utcb.bx = state.bx.value();
- }
-
- if (state.bp.valid() || state.di.valid() || state.si.valid()) {
- utcb.mtd |= Nova::Mtd::EBSD;
- utcb.di = state.di.value();
- utcb.si = state.si.value();
- utcb.bp = state.bp.value();
- }
-
- if (state.flags.valid()) {
- utcb.mtd |= Nova::Mtd::EFL;
- utcb.flags = state.flags.value();
- }
-
- if (state.sp.valid()) {
- utcb.mtd |= Nova::Mtd::ESP;
- utcb.sp = state.sp.value();
- }
-
- if (state.ip.valid()) {
- utcb.mtd |= Nova::Mtd::EIP;
- utcb.ip = state.ip.value();
- utcb.instr_len = state.ip_len.value();
- }
-
- if (state.dr7.valid()) {
- utcb.mtd |= Nova::Mtd::DR;
- utcb.dr7 = state.dr7.value();
- }
-
- if (state.r8.valid() || state.r9.valid() ||
- state.r10.valid() || state.r11.valid() ||
- state.r12.valid() || state.r13.valid() ||
- state.r14.valid() || state.r15.valid()) {
-
- utcb.mtd |= Nova::Mtd::R8_R15;
- utcb.write_r8 (state.r8.value());
- utcb.write_r9 (state.r9.value());
- utcb.write_r10(state.r10.value());
- utcb.write_r11(state.r11.value());
- utcb.write_r12(state.r12.value());
- utcb.write_r13(state.r13.value());
- utcb.write_r14(state.r14.value());
- utcb.write_r15(state.r15.value());
- }
-
- if (state.cr0.valid() || state.cr2.valid() || state.cr3.valid() ||
- state.cr4.valid()) {
- utcb.mtd |= Nova::Mtd::CR;
- utcb.cr0 = state.cr0.value();
- utcb.cr2 = state.cr2.value();
- utcb.cr3 = state.cr3.value();
- utcb.cr4 = state.cr4.value();
- }
-
- if (state.cs.valid() || state.ss.valid()) {
- utcb.mtd |= Nova::Mtd::CSSS;
- utcb.cs.sel = state.cs.value().sel;
- utcb.cs.ar = state.cs.value().ar;
- utcb.cs.limit = state.cs.value().limit;
- utcb.cs.base = state.cs.value().base;
-
- utcb.ss.sel = state.ss.value().sel;
- utcb.ss.ar = state.ss.value().ar;
- utcb.ss.limit = state.ss.value().limit;
- utcb.ss.base = state.ss.value().base;
- }
-
- if (state.es.valid() || state.ds.valid()) {
- utcb.mtd |= Nova::Mtd::ESDS;
- utcb.es.sel = state.es.value().sel;
- utcb.es.ar = state.es.value().ar;
- utcb.es.limit = state.es.value().limit;
- utcb.es.base = state.es.value().base;
-
- utcb.ds.sel = state.ds.value().sel;
- utcb.ds.ar = state.ds.value().ar;
- utcb.ds.limit = state.ds.value().limit;
- utcb.ds.base = state.ds.value().base;
- }
-
- if (state.fs.valid() || state.gs.valid()) {
- utcb.mtd |= Nova::Mtd::FSGS;
- utcb.fs.sel = state.fs.value().sel;
- utcb.fs.ar = state.fs.value().ar;
- utcb.fs.limit = state.fs.value().limit;
- utcb.fs.base = state.fs.value().base;
-
- utcb.gs.sel = state.gs.value().sel;
- utcb.gs.ar = state.gs.value().ar;
- utcb.gs.limit = state.gs.value().limit;
- utcb.gs.base = state.gs.value().base;
- }
-
- if (state.tr.valid()) {
- utcb.mtd |= Nova::Mtd::TR;
- utcb.tr.sel = state.tr.value().sel;
- utcb.tr.ar = state.tr.value().ar;
- utcb.tr.limit = state.tr.value().limit;
- utcb.tr.base = state.tr.value().base;
- }
-
- if (state.ldtr.valid()) {
- utcb.mtd |= Nova::Mtd::LDTR;
- utcb.ldtr.sel = state.ldtr.value().sel;
- utcb.ldtr.ar = state.ldtr.value().ar;
- utcb.ldtr.limit = state.ldtr.value().limit;
- utcb.ldtr.base = state.ldtr.value().base;
- }
-
- if (state.gdtr.valid()) {
- utcb.mtd |= Nova::Mtd::GDTR;
- utcb.gdtr.limit = state.gdtr.value().limit;
- utcb.gdtr.base = state.gdtr.value().base;
- }
-
- if (state.idtr.valid()) {
- utcb.mtd |= Nova::Mtd::IDTR;
- utcb.idtr.limit = state.idtr.value().limit;
- utcb.idtr.base = state.idtr.value().base;
- }
-
- if (state.sysenter_cs.valid() || state.sysenter_sp.valid() ||
- state.sysenter_ip.valid()) {
- utcb.mtd |= Nova::Mtd::SYS;
- utcb.sysenter_cs = state.sysenter_cs.value();
- utcb.sysenter_sp = state.sysenter_sp.value();
- utcb.sysenter_ip = state.sysenter_ip.value();
- }
-
- if (state.ctrl_primary.valid() || state.ctrl_secondary.valid()) {
- utcb.mtd |= Nova::Mtd::CTRL;
- utcb.ctrl[0] = state.ctrl_primary.value();
- utcb.ctrl[1] = state.ctrl_secondary.value();
- }
-
- if (state.inj_info.valid() || state.inj_error.valid()) {
- utcb.mtd |= Nova::Mtd::INJ;
- utcb.inj_info = state.inj_info.value();
- utcb.inj_error = state.inj_error.value();
- }
-
- if (state.intr_state.valid() || state.actv_state.valid()) {
- utcb.mtd |= Nova::Mtd::STA;
- utcb.intr_state = state.intr_state.value();
- utcb.actv_state = state.actv_state.value();
- }
-
- if (state.tsc.valid() || state.tsc_offset.valid()) {
- utcb.mtd |= Nova::Mtd::TSC;
- utcb.tsc_val = state.tsc.value();
- utcb.tsc_off = state.tsc_offset.value();
- }
-
- if (state.efer.valid()) {
- utcb.mtd |= Nova::Mtd::EFER;
- utcb.write_efer(state.efer.value());
- }
-
- if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
- state.pdpte_2.valid() || state.pdpte_3.valid()) {
-
- utcb.mtd |= Nova::Mtd::PDPTE;
- utcb.pdpte[0] = state.pdpte_0.value();
- utcb.pdpte[1] = state.pdpte_1.value();
- utcb.pdpte[2] = state.pdpte_2.value();
- utcb.pdpte[3] = state.pdpte_3.value();
- }
-
- if (state.star.valid() || state.lstar.valid() ||
- state.cstar.valid() || state.fmask.valid() ||
- state.kernel_gs_base.valid()) {
-
- utcb.mtd |= Nova::Mtd::SYSCALL_SWAPGS;
- utcb.write_star(state.star.value());
- utcb.write_lstar(state.lstar.value());
- utcb.write_cstar(state.cstar.value());
- utcb.write_fmask(state.fmask.value());
- utcb.write_kernel_gs_base(state.kernel_gs_base.value());
- }
-
- if (state.tpr.valid() || state.tpr_threshold.valid()) {
- utcb.mtd |= Nova::Mtd::TPR;
- utcb.write_tpr(state.tpr.value());
- utcb.write_tpr_threshold(state.tpr_threshold.value());
- }
-
-
- if (_use_guest_fpu || state.fpu.valid())
- asm volatile ("fxsave %0" : "=m" (*_fpu_ep) :: "memory");
-
- if (state.fpu.valid()) {
- state.fpu.value([&] (uint8_t *fpu, size_t const) {
- asm volatile ("fxrstor %0" : : "m" (*fpu) : "memory");
- });
- }
- }
-
- void _dispatch()
- {
- try {
- _dispatching = Thread::myself();
- /* call dispatch handler */
- _obj.dispatch(1);
- _dispatching = nullptr;
- } catch (...) {
- _dispatching = nullptr;
- throw;
- }
- }
-
- addr_t _sm_sel() const {
- return Nova::NUM_INITIAL_PT_RESERVED + _id.id * 4; }
-
- addr_t _ec_sel() const { return _sm_sel() + 1; }
-
- Vcpu(const Vcpu&);
- Vcpu &operator = (Vcpu const &);
-
- public:
-
- Vcpu(Vm_handler_base &o, Vm_session::Vcpu_id const id,
- Allocator &alloc)
- : _obj(o), _alloc(alloc), _id(id) { }
-
- virtual ~Vcpu() { }
-
- Allocator &allocator() { return _alloc; }
-
- addr_t badge(uint16_t exit) const {
- return ((0UL + _id.id) << (sizeof(exit) * 8)) | exit; }
-
- Vm_session_client::Vcpu_id id() const { return _id; }
-
- __attribute__((regparm(1))) static void exit_entry(addr_t o)
- {
- Thread &myself = *Thread::myself();
- Nova::Utcb &utcb = *reinterpret_cast(myself.utcb());
-
- uint16_t const exit_reason = o;
- unsigned const vcpu_id = o >> (sizeof(exit_reason) * 8);
- Vcpu * vcpu = nullptr;
-
- vcpus.for_each([&] (Vcpu &vc) {
- if (vcpu_id == vc._id.id)
- vcpu = &vc;
- });
-
- if (!vcpu) {
- /* somebody called us directly ? ... ignore/deny */
- utcb.items = 0;
- utcb.mtd = 0;
- Nova::reply(myself.stack_top());
- }
-
- /* reset blocking state */
- bool const previous_blocked = vcpu->_block;
- vcpu->_block = true;
-
- /* NOVA specific exit reasons */
- enum { VM_EXIT_STARTUP = 0xfe, VM_EXIT_RECALL = 0xff };
-
- if (exit_reason == VM_EXIT_STARTUP)
- vcpu->_ep_handler = &myself;
-
- Vm_state &state = *reinterpret_cast(vcpu->_state);
- /* transform state from NOVA to Genode */
- if (exit_reason != VM_EXIT_RECALL || !previous_blocked)
- vcpu->_read_nova_state(utcb, state, exit_reason);
-
- if (exit_reason == VM_EXIT_RECALL) {
- if (previous_blocked)
- state.exit_reason = exit_reason;
-
- /* consume potential multiple sem ups */
- Nova::sm_ctrl(vcpu->_sm_sel(), Nova::SEMAPHORE_UP);
- Nova::sm_ctrl(vcpu->_sm_sel(), Nova::SEMAPHORE_DOWNZERO);
-
- if (vcpu->_remote == PAUSE) {
- vcpu->_remote = NONE;
- } else {
- if (vcpu->_remote == RUN) {
- vcpu->_remote = NONE;
- if (!previous_blocked) {
- /* still running - reply without state transfer */
- vcpu->_block = false;
- utcb.items = 0;
- utcb.mtd = 0;
- Nova::reply(myself.stack_top());
- }
- }
-
- if (previous_blocked) {
- /* resume vCPU - with vCPU state update */
- vcpu->_block = false;
- vcpu->_write_nova_state(utcb, state);
- Nova::reply(myself.stack_top());
- }
- }
- }
-
- vcpu->_dispatch();
-
- if (vcpu->_block) {
- /* block vCPU in kernel - no vCPU state update */
- utcb.items = 0;
- utcb.mtd = 0;
- Nova::reply(myself.stack_top(), vcpu->_sm_sel());
- }
-
- /* reply to NOVA and transfer vCPU state */
- vcpu->_write_nova_state(utcb, state);
- Nova::reply(myself.stack_top());
- }
-
- bool resume()
- {
- if (!_ep_handler) {
- /* not started yet */
- return true;
- }
-
- Thread * const current = Thread::myself();
-
- if (_dispatching == current) {
- _block = false;
- return false;
- }
-
- if ((_ep_handler == current) && !_block)
- return false;
-
- if (_ep_handler != current)
- _remote = RUN;
-
- Nova::ec_ctrl(Nova::EC_RECALL, _ec_sel());
- Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_UP);
-
- return false;
- }
-
- void pause()
- {
- Thread * const current = Thread::myself();
-
- if (_dispatching == current) {
- /* current thread is already dispatching */
- if (_block)
- /* issue pause exit next time - fall through */
- _block = false;
- else {
- _block = true;
- return;
- }
- }
-
- if ((_ep_handler == current) && _block) {
- _remote = PAUSE;
- /* already blocked */
- }
-
- if (_ep_handler != current)
- _remote = PAUSE;
-
- if (!_ep_handler) {
- /* not started yet - let startup handler issue the recall */
- return;
- }
-
- Nova::ec_ctrl(Nova::EC_RECALL, _ec_sel());
- Nova::sm_ctrl(_sm_sel(), Nova::SEMAPHORE_UP);
- }
-
- void assign_ds_state(Region_map &rm, Dataspace_capability cap) {
- _state = rm.attach(cap); }
-
- Nova::Mtd portal_mtd(unsigned exit, Vm_handler_base &handler)
- {
- Vm_state &state = *reinterpret_cast(_state);
- state = Vm_state {};
-
- if (!handler.config_vm_event(state, exit))
- return Nova::Mtd(Nova::Mtd::ALL);
-
- Genode::addr_t mtd = 0;
-
- if (state.ax.valid() || state.cx.valid() ||
- state.dx.valid() || state.bx.valid())
- mtd |= Nova::Mtd::ACDB;
-
- if (state.bp.valid() || state.di.valid() || state.si.valid())
- mtd |= Nova::Mtd::EBSD;
-
- if (state.flags.valid())
- mtd |= Nova::Mtd::EFL;
-
- if (state.sp.valid())
- mtd |= Nova::Mtd::ESP;
-
- if (state.ip.valid())
- mtd |= Nova::Mtd::EIP;
-
- if (state.dr7.valid())
- mtd |= Nova::Mtd::DR;
-
- if (state.r8.valid() || state.r9.valid() || state.r10.valid() ||
- state.r11.valid() || state.r12.valid() || state.r13.valid() ||
- state.r14.valid() || state.r15.valid())
- mtd |= Nova::Mtd::R8_R15;
-
- if (state.cr0.valid() || state.cr2.valid() || state.cr3.valid() ||
- state.cr4.valid())
- mtd |= Nova::Mtd::CR;
-
- if (state.cs.valid() || state.ss.valid())
- mtd |= Nova::Mtd::CSSS;
-
- if (state.es.valid() || state.ds.valid())
- mtd |= Nova::Mtd::ESDS;
-
- if (state.fs.valid() || state.gs.valid())
- mtd |= Nova::Mtd::FSGS;
-
- if (state.tr.valid())
- mtd |= Nova::Mtd::TR;
-
- if (state.ldtr.valid())
- mtd |= Nova::Mtd::LDTR;
-
- if (state.gdtr.valid())
- mtd |= Nova::Mtd::GDTR;
-
- if (state.idtr.valid())
- mtd |= Nova::Mtd::IDTR;
-
- if (state.sysenter_cs.valid() || state.sysenter_sp.valid() ||
- state.sysenter_ip.valid())
- mtd |= Nova::Mtd::SYS;
-
- if (state.ctrl_primary.valid() || state.ctrl_secondary.valid())
- mtd |= Nova::Mtd::CTRL;
-
- if (state.inj_info.valid() || state.inj_error.valid())
- mtd |= Nova::Mtd::INJ;
-
- if (state.intr_state.valid() || state.actv_state.valid())
- mtd |= Nova::Mtd::STA;
-
- if (state.tsc.valid() || state.tsc_offset.valid())
- mtd |= Nova::Mtd::TSC;
-
- if (state.efer.valid())
- mtd |= Nova::Mtd::EFER;
-
- if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
- state.pdpte_2.valid() || state.pdpte_3.valid())
- mtd |= Nova::Mtd::PDPTE;
-
- if (state.star.valid() || state.lstar.valid() ||
- state.cstar.valid() || state.fmask.valid() ||
- state.kernel_gs_base.valid())
- mtd |= Nova::Mtd::SYSCALL_SWAPGS;
-
- if (state.tpr.valid() || state.tpr_threshold.valid())
- mtd |= Nova::Mtd::TPR;
-
- if (state.qual_primary.valid() || state.qual_secondary.valid())
- mtd |= Nova::Mtd::QUAL;
-
- if (state.fpu.valid()) {
- _use_guest_fpu = true;
- mtd |= Nova::Mtd::FPU;
- }
-
- state = Vm_state {};
-
- return Nova::Mtd(mtd);
- }
-};
-
-Signal_context_capability
-static create_exit_handler(Pd_session &pd, Rpc_entrypoint &ep,
- Vcpu &vcpu, unsigned exit_reason,
- Nova::Mtd &mtd)
-{
- Thread * tep = reinterpret_cast(&ep);
-
- Native_capability thread_cap = Capability_space::import(tep->native_thread().ec_sel);
-
- Nova_native_pd_client native_pd { pd.native_pd() };
- Native_capability vm_exit_cap = native_pd.alloc_rpc_cap(thread_cap,
- (addr_t)Vcpu::exit_entry,
- mtd.value());
- native_pd.imprint_rpc_cap(vm_exit_cap, vcpu.badge(exit_reason));
-
- return reinterpret_cap_cast(vm_exit_cap);
-}
-
-Vm_session_client::Vcpu_id
-Vm_session_client::create_vcpu(Allocator &alloc, Env &env,
- Vm_handler_base &handler)
-{
- Thread * ep = reinterpret_cast(&handler._rpc_ep);
-
- Vcpu * vcpu = new (alloc) Registered (vcpus, handler,
- call(ep->cap()),
- alloc);
- vcpu->assign_ds_state(env.rm(), call(vcpu->id()));
-
- Signal_context_capability dontcare_exit;
-
- enum { MAX_VM_EXITS = (1U << Nova::NUM_INITIAL_VCPU_PT_LOG2) };
- for (unsigned i = 0; i < MAX_VM_EXITS; i++) {
- Signal_context_capability signal_exit;
-
- Nova::Mtd mtd = vcpu->portal_mtd(i, handler);
- if (mtd.value()) {
- signal_exit = create_exit_handler(env.pd(), handler._rpc_ep,
- *vcpu, i, mtd);
- } else {
- if (!dontcare_exit.valid()) {
- Nova::Mtd mtd_ip(Nova::Mtd::EIP);
- dontcare_exit = create_exit_handler(env.pd(), handler._rpc_ep,
- *vcpu, 0x100, mtd_ip);
- }
- signal_exit = dontcare_exit;
- }
-
- call(signal_exit, vcpu->id());
- }
-
- return vcpu->id();
-}
-
-void Vm_session_client::run(Vm_session_client::Vcpu_id vcpu_id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.id().id != vcpu_id.id)
- return;
-
- if (vcpu.resume())
- call(vcpu.id());
- });
-}
-
-void Vm_session_client::pause(Vm_session_client::Vcpu_id vcpu_id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.id().id != vcpu_id.id)
- return;
-
- vcpu.pause();
- });
-}
-
-Dataspace_capability Vm_session_client::cpu_state(Vcpu_id vcpu_id)
-{
- Dataspace_capability cap;
-
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.id().id == vcpu_id.id)
- cap = call(vcpu_id);
- });
-
- return cap;
-}
-
-Vm_session::~Vm_session()
-{
- vcpus.for_each([&] (Vcpu &vc) {
- Allocator &alloc = vc.allocator();
- destroy(alloc, &vc);
- });
-}
diff --git a/repos/base-sel4/lib/mk/spec/x86/base-sel4-common.mk b/repos/base-sel4/lib/mk/spec/x86/base-sel4-common.mk
index 432f4d73aa..b7c19fce6b 100644
--- a/repos/base-sel4/lib/mk/spec/x86/base-sel4-common.mk
+++ b/repos/base-sel4/lib/mk/spec/x86/base-sel4-common.mk
@@ -4,6 +4,4 @@
# \date 2013-02-14
#
-vpath vm_session.cc $(REP_DIR)/src/lib/base/x86
-
include $(REP_DIR)/lib/mk/base-sel4-common.inc
diff --git a/repos/base-sel4/lib/mk/spec/x86/base-sel4.mk b/repos/base-sel4/lib/mk/spec/x86/base-sel4.mk
index caae7797e1..60271d93f7 100644
--- a/repos/base-sel4/lib/mk/spec/x86/base-sel4.mk
+++ b/repos/base-sel4/lib/mk/spec/x86/base-sel4.mk
@@ -1,3 +1,5 @@
LIBS += timeout
+vpath vm.cc $(REP_DIR)/src/lib/base/x86
+
include $(REP_DIR)/lib/mk/base-sel4.inc
diff --git a/repos/base-sel4/src/core/include/vm_session_component.h b/repos/base-sel4/src/core/include/vm_session_component.h
index 618da08afb..77a54bb14f 100644
--- a/repos/base-sel4/src/core/include/vm_session_component.h
+++ b/repos/base-sel4/src/core/include/vm_session_component.h
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -19,6 +19,7 @@
#include
#include
+#include
namespace Genode { class Vm_session_component; }
@@ -31,27 +32,31 @@ class Genode::Vm_session_component
{
private:
- class Vcpu : public Genode::List::Element
+ class Vcpu : public Rpc_object
{
private:
- Constrained_ram_allocator &_ram_alloc;
- Ram_dataspace_capability _ds_cap;
- Cap_sel _notification { 0 };
- Vm_session::Vcpu_id _vcpu_id;
+ Rpc_entrypoint &_ep;
+ Constrained_ram_allocator &_ram_alloc;
+ Ram_dataspace_capability const _ds_cap;
+ Cap_sel _notification { 0 };
void _free_up();
public:
- Vcpu(Constrained_ram_allocator &, Cap_quota_guard &, Vcpu_id,
- seL4_Untyped);
- ~Vcpu() { _free_up(); }
+ Vcpu(Rpc_entrypoint &, Constrained_ram_allocator &,
+ Cap_quota_guard &, seL4_Untyped);
+
+ ~Vcpu();
- Dataspace_capability ds_cap() const { return _ds_cap; }
- bool match(Vcpu_id id) const { return id.id == _vcpu_id.id; }
- void signal() const { seL4_Signal(_notification.value()); }
Cap_sel notification_cap() const { return _notification; }
+
+ /*******************************
+ ** Native_vcpu RPC interface **
+ *******************************/
+
+ Capability state() const { return _ds_cap; }
};
typedef Allocator_avl_tpl Avl_region;
@@ -60,8 +65,6 @@ class Genode::Vm_session_component
Constrained_ram_allocator _constrained_md_ram_alloc;
Heap _heap;
Avl_region _map { &_heap };
- List _vcpus { };
- unsigned _id_alloc { 0 };
unsigned _pd_id { 0 };
Cap_sel _vm_page_table;
Page_table_registry _page_table_registry { _heap };
@@ -75,14 +78,9 @@ class Genode::Vm_session_component
seL4_Untyped _service;
} _notifications { 0, 0 };
- Vcpu * _lookup(Vcpu_id const vcpu_id)
- {
- for (Vcpu * vcpu = _vcpus.first(); vcpu; vcpu = vcpu->next())
- if (vcpu->match(vcpu_id)) return vcpu;
-
- return nullptr;
- }
+ Registry> _vcpus { };
+ /* helpers for vm_session_common.cc */
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
void _detach_vm_memory(addr_t, size_t);
@@ -105,24 +103,19 @@ class Genode::Vm_session_component
** Region_map_detach interface **
*********************************/
- void detach(Region_map::Local_addr) override;
- void unmap_region(addr_t, size_t) override;
+ /* used on destruction of attached dataspaces */
+ void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
+ void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
/**************************
** Vm session interface **
**************************/
- Dataspace_capability _cpu_state(Vcpu_id);
+ Capability create_vcpu(Thread_capability);
+ void attach_pic(addr_t) override { /* unused on seL4 */ }
- void _exception_handler(Signal_context_capability, Vcpu_id) {}
- void _run(Vcpu_id) {}
- void _pause(Vcpu_id);
- void attach(Dataspace_capability, addr_t, Attach_attr) override;
- void attach_pic(addr_t) override {}
- void detach(addr_t, size_t) override;
- Vcpu_id _create_vcpu(Thread_capability);
- Capability _native_vcpu(Vcpu_id) {
- return Capability(); }
+ void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
+ void detach(addr_t, size_t) override; /* vm_session_common.cc */
};
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
diff --git a/repos/base-sel4/src/core/spec/x86/vm_session_component.cc b/repos/base-sel4/src/core/spec/x86/vm_session_component.cc
index ee9b907f47..4498b71bdc 100644
--- a/repos/base-sel4/src/core/spec/x86/vm_session_component.cc
+++ b/repos/base-sel4/src/core/spec/x86/vm_session_component.cc
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@@ -13,7 +13,7 @@
/* base includes */
#include
-#include
+#include
/* core includes */
#include
@@ -25,6 +25,11 @@
using namespace Genode;
+
+/********************************
+ ** Vm_session_component::Vcpu **
+ ********************************/
+
void Vm_session_component::Vcpu::_free_up()
{
if (_ds_cap.valid())
@@ -40,15 +45,16 @@ void Vm_session_component::Vcpu::_free_up()
}
}
-Vm_session_component::Vcpu::Vcpu(Constrained_ram_allocator &ram_alloc,
- Cap_quota_guard &cap_alloc,
- Vcpu_id const vcpu_id,
- seL4_Untyped const service)
+
+Vm_session_component::Vcpu::Vcpu(Rpc_entrypoint &ep,
+ Constrained_ram_allocator &ram_alloc,
+ Cap_quota_guard &cap_alloc,
+ seL4_Untyped const service)
:
+ _ep(ep),
_ram_alloc(ram_alloc),
- _ds_cap (_ram_alloc.alloc(align_addr(sizeof(Genode::Vm_state), 12),
- Cache_attribute::CACHED)),
- _vcpu_id(vcpu_id)
+ _ds_cap (_ram_alloc.alloc(align_addr(sizeof(Genode::Vcpu_state), 12),
+ Cache_attribute::CACHED))
{
try {
/* notification cap */
@@ -59,6 +65,8 @@ Vm_session_component::Vcpu::Vcpu(Constrained_ram_allocator &ram_alloc,
platform_specific().core_cnode().sel(),
_notification);
+ _ep.manage(this);
+
caps.acknowledge();
} catch (...) {
_free_up();
@@ -66,6 +74,18 @@ Vm_session_component::Vcpu::Vcpu(Constrained_ram_allocator &ram_alloc,
}
}
+
+Vm_session_component::Vcpu::~Vcpu()
+{
+ _ep.dissolve(this);
+ _free_up();
+}
+
+
+/**************************
+ ** Vm_session_component **
+ **************************/
+
Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
Resources resources,
Label const &,
@@ -153,12 +173,11 @@ try
throw;
}
+
Vm_session_component::~Vm_session_component()
{
- for (;Vcpu * vcpu = _vcpus.first();) {
- _vcpus.remove(vcpu);
- destroy(_heap, vcpu);
- }
+ _vcpus.for_each([&] (Vcpu &vcpu) {
+ destroy(_heap, &vcpu); });
/* detach all regions */
while (true) {
@@ -177,28 +196,27 @@ Vm_session_component::~Vm_session_component()
Platform_pd::pd_id_alloc().free(_pd_id);
}
-Vm_session::Vcpu_id Vm_session_component::_create_vcpu(Thread_capability cap)
-{
- Vcpu_id ret;
+Capability Vm_session_component::create_vcpu(Thread_capability const cap)
+{
if (!cap.valid())
- return ret;
+ return { };
+
+ Vcpu * vcpu = nullptr;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread)
return;
- /* allocate vCPU object */
- Vcpu * vcpu = nullptr;
-
/* code to revert partial allocations in case of Out_of_ram/_quota */
auto free_up = [&] () { if (vcpu) destroy(_heap, vcpu); };
try {
- vcpu = new (_heap) Vcpu(_constrained_md_ram_alloc,
- _cap_quota_guard(),
- Vcpu_id{_id_alloc},
- _notifications._service);
+ vcpu = new (_heap) Registered(_vcpus,
+ _ep,
+ _constrained_md_ram_alloc,
+ _cap_quota_guard(),
+ _notifications._service);
Platform_thread &pthread = thread->platform_thread();
pthread.setup_vcpu(_vm_page_table, vcpu->notification_cap());
@@ -218,32 +236,13 @@ Vm_session::Vcpu_id Vm_session_component::_create_vcpu(Thread_capability cap)
free_up();
return;
}
-
- _vcpus.insert(vcpu);
- ret.id = _id_alloc++;
};
_ep.apply(cap, lambda);
- return ret;
+
+ return vcpu ? vcpu->cap() : Capability {};
}
-Dataspace_capability Vm_session_component::_cpu_state(Vcpu_id const vcpu_id)
-{
- Vcpu * vcpu = _lookup(vcpu_id);
- if (!vcpu)
- return Dataspace_capability();
-
- return vcpu->ds_cap();
-}
-
-void Vm_session_component::_pause(Vcpu_id const vcpu_id)
-{
- Vcpu * vcpu = _lookup(vcpu_id);
- if (!vcpu)
- return;
-
- vcpu->signal();
-}
void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
addr_t const guest_phys,
@@ -308,6 +307,7 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
}
}
+
void Vm_session_component::_detach_vm_memory(addr_t guest_phys, size_t size)
{
Flexpage_iterator flex(guest_phys, size, guest_phys, size, 0);
diff --git a/repos/base-sel4/src/include/sel4_native_vcpu/sel4_native_vcpu.h b/repos/base-sel4/src/include/sel4_native_vcpu/sel4_native_vcpu.h
new file mode 100644
index 0000000000..ee596f6335
--- /dev/null
+++ b/repos/base-sel4/src/include/sel4_native_vcpu/sel4_native_vcpu.h
@@ -0,0 +1,26 @@
+ /*
+ * \brief seL4 vCPU RPC interface
+ * \author Christian Helmuth
+ * \author Alexander Böttcher
+ * \date 2021-01-19
+ */
+
+/*
+ * Copyright (C) 2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _INCLUDE__SEL4_NATIVE_VCPU__SEL4_NATIVE_VCPU_H_
+#define _INCLUDE__SEL4_NATIVE_VCPU__SEL4_NATIVE_VCPU_H_
+
+#include
+#include
+
+struct Genode::Vm_session::Native_vcpu : Interface
+{
+ GENODE_RPC_INTERFACE();
+};
+
+#endif /* _INCLUDE__SEL4_NATIVE_VCPU__SEL4_NATIVE_VCPU_H_ */
diff --git a/repos/base-sel4/src/lib/base/x86/vm_session.cc b/repos/base-sel4/src/lib/base/x86/vm.cc
similarity index 65%
rename from repos/base-sel4/src/lib/base/x86/vm_session.cc
rename to repos/base-sel4/src/lib/base/x86/vm.cc
index 2a1f8ca265..59fa79ab94 100644
--- a/repos/base-sel4/src/lib/base/x86/vm_session.cc
+++ b/repos/base-sel4/src/lib/base/x86/vm.cc
@@ -5,48 +5,80 @@
*/
/*
- * Copyright (C) 2018 Genode Labs GmbH
+ * Copyright (C) 2018-2021 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
+/* Genode includes */
#include
+#include
#include
#include
-#include
-#include
+#include
+#include
+
+#include
#include
#include
#include
+#include
+/* seL4 includes */
+#include
#include
#include
-#include
using namespace Genode;
-struct Vcpu;
+using Exit_config = Vm_connection::Exit_config;
-static Genode::Registry > vcpus;
+struct Sel4_vcpu;
-struct Vcpu : Genode::Thread
+struct Sel4_native_rpc : Rpc_client, Noncopyable
{
private:
- Signal_context_capability &_signal;
+ Capability _create_vcpu(Vm_connection &vm,
+ Thread_capability &cap)
+ {
+ return vm.with_upgrade([&] () {
+ return vm.call(cap); });
+ }
+
+ public:
+
+ Sel4_vcpu &vcpu;
+
+ Sel4_native_rpc(Vm_connection &vm, Thread_capability &cap,
+ Sel4_vcpu &vcpu)
+ :
+ Rpc_client(_create_vcpu(vm, cap)),
+ vcpu(vcpu)
+ { }
+};
+
+/******************************
+ ** seL4 vCPU implementation **
+ ******************************/
+
+struct Sel4_vcpu : Genode::Thread, Noncopyable
+{
+ private:
+
+ Vcpu_handler_base &_vcpu_handler;
+ Vcpu_state _state __attribute__((aligned(0x10))) { };
Semaphore _wake_up { 0 };
- Semaphore &_handler_ready;
- Allocator &_alloc;
Blockade _startup { };
- Vm_session_client::Vcpu_id _id {};
- addr_t _state { 0 };
addr_t _recall { 0 };
uint64_t _tsc_offset { 0 };
+ Constructible _rpc { };
+
bool _show_error_unsupported_r { true };
bool _show_error_unsupported_tpr { true };
bool _show_error_unsupported_star { true };
@@ -101,8 +133,8 @@ struct Vcpu : Genode::Thread
/* get selector to call back a vCPU into VMM */
_recall = _stack->utcb().lock_sel();
- Vm_state &state = *reinterpret_cast(_state);
- state = Vm_state {};
+ Vcpu_state &state = this->state();
+ state.discharge();
/* wait for first user resume() */
_wake_up.down();
@@ -116,9 +148,9 @@ struct Vcpu : Genode::Thread
state.exit_reason = VMEXIT_STARTUP;
_read_sel4_state(service, state);
- Genode::Signal_transmitter(_signal).submit();
+ Genode::Signal_transmitter(_vcpu_handler.signal_cap()).submit();
- _handler_ready.down();
+ _vcpu_handler.ready_semaphore().down();
_wake_up.down();
State local_state { NONE };
@@ -154,20 +186,20 @@ struct Vcpu : Genode::Thread
if (local_state == PAUSE) {
- state = Vm_state {};
+ state.discharge();
state.exit_reason = VMEXIT_RECALL;
_read_sel4_state(service, state);
/* notify VM handler */
- Genode::Signal_transmitter(_signal).submit();
+ Genode::Signal_transmitter(_vcpu_handler.signal_cap()).submit();
/*
* Wait until VM handler is really really done,
* otherwise we lose state.
*/
- _handler_ready.down();
+ _vcpu_handler.ready_semaphore().down();
continue;
}
@@ -185,7 +217,7 @@ struct Vcpu : Genode::Thread
seL4_Word badge = 0;
seL4_Word res = seL4_VMEnter(&badge);
- state = Vm_state {};
+ state.discharge();
if (res != SEL4_VMENTER_RESULT_FAULT)
state.exit_reason = VMEXIT_RECALL;
@@ -203,13 +235,13 @@ struct Vcpu : Genode::Thread
}
/* notify VM handler */
- Genode::Signal_transmitter(_signal).submit();
+ Genode::Signal_transmitter(_vcpu_handler.signal_cap()).submit();
/*
* Wait until VM handler is really really done,
* otherwise we lose state.
*/
- _handler_ready.down();
+ _vcpu_handler.ready_semaphore().down();
}
}
@@ -330,20 +362,20 @@ struct Vcpu : Genode::Thread
uint16_t _convert_ar_16(addr_t value) {
return ((value & 0x1f000) >> 4) | (value & 0xff); }
- void _write_sel4_state(seL4_X86_VCPU const service, Vm_state &state)
+ void _write_sel4_state(seL4_X86_VCPU const service, Vcpu_state &state)
{
- if (state.ax.valid()) _recent_gpr.eax = state.ax.value();
- if (state.bx.valid()) _recent_gpr.ebx = state.bx.value();
- if (state.cx.valid()) _recent_gpr.ecx = state.cx.value();
- if (state.dx.valid()) _recent_gpr.edx = state.dx.value();
- if (state.si.valid()) _recent_gpr.esi = state.si.value();
- if (state.di.valid()) _recent_gpr.edi = state.di.value();
- if (state.bp.valid()) _recent_gpr.ebp = state.bp.value();
+ if (state.ax.charged()) _recent_gpr.eax = state.ax.value();
+ if (state.bx.charged()) _recent_gpr.ebx = state.bx.value();
+ if (state.cx.charged()) _recent_gpr.ecx = state.cx.value();
+ if (state.dx.charged()) _recent_gpr.edx = state.dx.value();
+ if (state.si.charged()) _recent_gpr.esi = state.si.value();
+ if (state.di.charged()) _recent_gpr.edi = state.di.value();
+ if (state.bp.charged()) _recent_gpr.ebp = state.bp.value();
- if (state.ax.valid() || state.cx.valid() ||
- state.dx.valid() || state.bx.valid() ||
- state.bp.valid() || state.di.valid() ||
- state.si.valid())
+ if (state.ax.charged() || state.cx.charged() ||
+ state.dx.charged() || state.bx.charged() ||
+ state.bp.charged() || state.di.charged() ||
+ state.si.charged())
{
seL4_Error res = seL4_X86_VCPU_WriteRegisters(service,
&_recent_gpr);
@@ -352,10 +384,10 @@ struct Vcpu : Genode::Thread
(int)res);
}
- if (state.r8.valid() || state.r9.valid() ||
- state.r10.valid() || state.r11.valid() ||
- state.r12.valid() || state.r13.valid() ||
- state.r14.valid() || state.r15.valid())
+ if (state.r8.charged() || state.r9.charged() ||
+ state.r10.charged() || state.r11.charged() ||
+ state.r12.charged() || state.r13.charged() ||
+ state.r14.charged() || state.r15.charged())
{
if (_show_error_unsupported_r)
{
@@ -364,7 +396,7 @@ struct Vcpu : Genode::Thread
}
}
- if (state.tsc.valid() || state.tsc_offset.valid())
+ if (state.tsc.charged() || state.tsc_offset.charged())
{
_tsc_offset += state.tsc_offset.value();
/* not supported by seL4 */
@@ -374,8 +406,8 @@ struct Vcpu : Genode::Thread
#endif
}
- if (state.star.valid() || state.lstar.valid() || state.cstar.valid() ||
- state.fmask.valid() || state.kernel_gs_base.valid())
+ if (state.star.charged() || state.lstar.charged() || state.cstar.charged() ||
+ state.fmask.charged() || state.kernel_gs_base.charged())
{
if (_show_error_unsupported_star) {
_show_error_unsupported_star = false;
@@ -383,7 +415,7 @@ struct Vcpu : Genode::Thread
}
}
- if (state.tpr.valid() || state.tpr_threshold.valid())
+ if (state.tpr.charged() || state.tpr_threshold.charged())
{
if (_show_error_unsupported_tpr)
{
@@ -392,26 +424,26 @@ struct Vcpu : Genode::Thread
}
}
- if (state.dr7.valid())
+ if (state.dr7.charged())
_write_vmcs(service, Vmcs::DR7, state.dr7.value());
- if (state.cr0.valid()) {
+ if (state.cr0.charged()) {
_write_vmcs(service, Vmcs::CR0, cr0_set | (~cr0_mask & state.cr0.value()));
_write_vmcs(service, Vmcs::CR0_SHADOW, state.cr0.value());
}
- /* not supported on seL4 - state.cr2.valid() */
+ /* not supported on seL4 - state.cr2.charged() */
- if (state.cr3.valid())
+ if (state.cr3.charged())
_write_vmcs(service, Vmcs::CR3, state.cr3.value());
- if (state.cr4.valid()) {
+ if (state.cr4.charged()) {
_write_vmcs(service, Vmcs::CR4, cr4_set | (~cr4_mask & state.cr4.value()));
_write_vmcs(service, Vmcs::CR4_SHADOW, state.cr4.value());
}
- if (state.inj_info.valid()) {
- addr_t ctrl_0 = state.ctrl_primary.valid() ?
+ if (state.inj_info.charged()) {
+ addr_t ctrl_0 = state.ctrl_primary.charged() ?
state.ctrl_primary.value() :
_read_vmcs(service, Vmcs::CTRL_0);
@@ -423,123 +455,123 @@ struct Vcpu : Genode::Thread
else
ctrl_0 &= ~Vmcs::IRQ_WINDOW;
- state.ctrl_primary.value(ctrl_0);
+ state.ctrl_primary.charge(ctrl_0);
}
- if (state.inj_error.valid())
+ if (state.inj_error.charged())
_write_vmcs(service, Vmcs::INTR_ERROR, state.inj_error.value());
- if (state.flags.valid())
+ if (state.flags.charged())
_write_vmcs(service, Vmcs::RFLAGS, state.flags.value());
- if (state.sp.valid())
+ if (state.sp.charged())
_write_vmcs(service, Vmcs::RSP, state.sp.value());
- if (state.ip.valid())
+ if (state.ip.charged())
_write_vmcs(service, Vmcs::RIP, state.ip.value());
- if (state.ip_len.valid())
+ if (state.ip_len.charged())
_write_vmcs(service, Vmcs::ENTRY_INST_LEN, state.ip_len.value());
- if (state.efer.valid())
+ if (state.efer.charged())
_write_vmcs(service, Vmcs::EFER, state.efer.value());
- /* state.ctrl_primary.valid() update on vmenter - see above */
+ /* state.ctrl_primary.charged() update on vmenter - see above */
- if (state.ctrl_secondary.valid())
+ if (state.ctrl_secondary.charged())
_write_vmcs(service, Vmcs::CTRL_1, state.ctrl_secondary.value());
- if (state.intr_state.valid())
+ if (state.intr_state.charged())
_write_vmcs(service, Vmcs::STATE_INTR, state.intr_state.value());
- if (state.actv_state.valid())
+ if (state.actv_state.charged())
_write_vmcs(service, Vmcs::STATE_ACTV, state.actv_state.value());
- if (state.cs.valid()) {
+ if (state.cs.charged()) {
_write_vmcs(service, Vmcs::CS_SEL, state.cs.value().sel);
_write_vmcs(service, Vmcs::CS_LIMIT, state.cs.value().limit);
_write_vmcs(service, Vmcs::CS_AR, _convert_ar(state.cs.value().ar));
_write_vmcs(service, Vmcs::CS_BASE, state.cs.value().base);
}
- if (state.ss.valid()) {
+ if (state.ss.charged()) {
_write_vmcs(service, Vmcs::SS_SEL, state.ss.value().sel);
_write_vmcs(service, Vmcs::SS_LIMIT, state.ss.value().limit);
_write_vmcs(service, Vmcs::SS_AR, _convert_ar(state.ss.value().ar));
_write_vmcs(service, Vmcs::SS_BASE, state.ss.value().base);
}
- if (state.es.valid()) {
+ if (state.es.charged()) {
_write_vmcs(service, Vmcs::ES_SEL, state.es.value().sel);
_write_vmcs(service, Vmcs::ES_LIMIT, state.es.value().limit);
_write_vmcs(service, Vmcs::ES_AR, _convert_ar(state.es.value().ar));
_write_vmcs(service, Vmcs::ES_BASE, state.es.value().base);
}
- if (state.ds.valid()) {
+ if (state.ds.charged()) {
_write_vmcs(service, Vmcs::DS_SEL, state.ds.value().sel);
_write_vmcs(service, Vmcs::DS_LIMIT, state.ds.value().limit);
_write_vmcs(service, Vmcs::DS_AR, _convert_ar(state.ds.value().ar));
_write_vmcs(service, Vmcs::DS_BASE, state.ds.value().base);
}
- if (state.fs.valid()) {
+ if (state.fs.charged()) {
_write_vmcs(service, Vmcs::FS_SEL, state.fs.value().sel);
_write_vmcs(service, Vmcs::FS_LIMIT, state.fs.value().limit);
_write_vmcs(service, Vmcs::FS_AR, _convert_ar(state.fs.value().ar));
_write_vmcs(service, Vmcs::FS_BASE, state.fs.value().base);
}
- if (state.gs.valid()) {
+ if (state.gs.charged()) {
_write_vmcs(service, Vmcs::GS_SEL, state.gs.value().sel);
_write_vmcs(service, Vmcs::GS_LIMIT, state.gs.value().limit);
_write_vmcs(service, Vmcs::GS_AR, _convert_ar(state.gs.value().ar));
_write_vmcs(service, Vmcs::GS_BASE, state.gs.value().base);
}
- if (state.tr.valid()) {
+ if (state.tr.charged()) {
_write_vmcs(service, Vmcs::TR_SEL, state.tr.value().sel);
_write_vmcs(service, Vmcs::TR_LIMIT, state.tr.value().limit);
_write_vmcs(service, Vmcs::TR_AR, _convert_ar(state.tr.value().ar));
_write_vmcs(service, Vmcs::TR_BASE, state.tr.value().base);
}
- if (state.ldtr.valid()) {
+ if (state.ldtr.charged()) {
_write_vmcs(service, Vmcs::LDTR_SEL, state.ldtr.value().sel);
_write_vmcs(service, Vmcs::LDTR_LIMIT, state.ldtr.value().limit);
_write_vmcs(service, Vmcs::LDTR_AR, _convert_ar(state.ldtr.value().ar));
_write_vmcs(service, Vmcs::LDTR_BASE, state.ldtr.value().base);
}
- if (state.idtr.valid()) {
+ if (state.idtr.charged()) {
_write_vmcs(service, Vmcs::IDTR_BASE, state.idtr.value().base);
_write_vmcs(service, Vmcs::IDTR_LIMIT, state.idtr.value().limit);
}
- if (state.gdtr.valid()) {
+ if (state.gdtr.charged()) {
_write_vmcs(service, Vmcs::GDTR_BASE, state.gdtr.value().base);
_write_vmcs(service, Vmcs::GDTR_LIMIT, state.gdtr.value().limit);
}
- if (state.pdpte_0.valid())
+ if (state.pdpte_0.charged())
_write_vmcs(service, Vmcs::PDPTE_0, state.pdpte_0.value());
- if (state.pdpte_1.valid())
+ if (state.pdpte_1.charged())
_write_vmcs(service, Vmcs::PDPTE_1, state.pdpte_1.value());
- if (state.pdpte_2.valid())
+ if (state.pdpte_2.charged())
_write_vmcs(service, Vmcs::PDPTE_2, state.pdpte_2.value());
- if (state.pdpte_3.valid())
+ if (state.pdpte_3.charged())
_write_vmcs(service, Vmcs::PDPTE_3, state.pdpte_3.value());
- if (state.sysenter_cs.valid())
+ if (state.sysenter_cs.charged())
_write_vmcs(service, Vmcs::SYSENTER_CS, state.sysenter_cs.value());
- if (state.sysenter_sp.valid())
+ if (state.sysenter_sp.charged())
_write_vmcs(service, Vmcs::SYSENTER_SP, state.sysenter_sp.value());
- if (state.sysenter_ip.valid())
+ if (state.sysenter_ip.charged())
_write_vmcs(service, Vmcs::SYSENTER_IP, state.sysenter_ip.value());
}
@@ -568,26 +600,26 @@ struct Vcpu : Genode::Thread
uint32_t _read_vmcs_32(seL4_X86_VCPU const service, enum Vmcs const field) {
return _read_vmcsX(service, field); }
- void _read_sel4_state(seL4_X86_VCPU const service, Vm_state &state)
+ void _read_sel4_state(seL4_X86_VCPU const service, Vcpu_state &state)
{
- state.ip.value(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
- state.ctrl_primary.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
+ state.ip.charge(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
+ state.ctrl_primary.charge(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
- state.ip_len.value(seL4_GetMR(SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR));
- state.qual_primary.value(seL4_GetMR(SEL4_VMENTER_FAULT_QUALIFICATION_MR));
- state.qual_secondary.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR));
+ state.ip_len.charge(seL4_GetMR(SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR));
+ state.qual_primary.charge(seL4_GetMR(SEL4_VMENTER_FAULT_QUALIFICATION_MR));
+ state.qual_secondary.charge(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR));
- state.flags.value(seL4_GetMR(SEL4_VMENTER_FAULT_RFLAGS_MR));
- state.intr_state.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_INT_MR));
- state.cr3.value(seL4_GetMR(SEL4_VMENTER_FAULT_CR3_MR));
+ state.flags.charge(seL4_GetMR(SEL4_VMENTER_FAULT_RFLAGS_MR));
+ state.intr_state.charge(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_INT_MR));
+ state.cr3.charge(seL4_GetMR(SEL4_VMENTER_FAULT_CR3_MR));
- state.ax.value(seL4_GetMR(SEL4_VMENTER_FAULT_EAX));
- state.bx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBX));
- state.cx.value(seL4_GetMR(SEL4_VMENTER_FAULT_ECX));
- state.dx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDX));
- state.si.value(seL4_GetMR(SEL4_VMENTER_FAULT_ESI));
- state.di.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDI));
- state.bp.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBP));
+ state.ax.charge(seL4_GetMR(SEL4_VMENTER_FAULT_EAX));
+ state.bx.charge(seL4_GetMR(SEL4_VMENTER_FAULT_EBX));
+ state.cx.charge(seL4_GetMR(SEL4_VMENTER_FAULT_ECX));
+ state.dx.charge(seL4_GetMR(SEL4_VMENTER_FAULT_EDX));
+ state.si.charge(seL4_GetMR(SEL4_VMENTER_FAULT_ESI));
+ state.di.charge(seL4_GetMR(SEL4_VMENTER_FAULT_EDI));
+ state.bp.charge(seL4_GetMR(SEL4_VMENTER_FAULT_EBP));
_recent_gpr.eax = state.ax.value();
_recent_gpr.ebx = state.bx.value();
@@ -597,144 +629,141 @@ struct Vcpu : Genode::Thread
_recent_gpr.edi = state.di.value();
_recent_gpr.ebp = state.bp.value();
- state.sp.value(_read_vmcs(service, Vmcs::RSP));
- state.dr7.value(_read_vmcs(service, Vmcs::DR7));
+ state.sp.charge(_read_vmcs(service, Vmcs::RSP));
+ state.dr7.charge(_read_vmcs(service, Vmcs::DR7));
/* r8 - r15 not supported on seL4 */
{
addr_t const cr0 = _read_vmcs(service, Vmcs::CR0);
addr_t const cr0_shadow = _read_vmcs(service, Vmcs::CR0_SHADOW);
- state.cr0.value((cr0 & ~cr0_mask) | (cr0_shadow & cr0_mask));
+ state.cr0.charge((cr0 & ~cr0_mask) | (cr0_shadow & cr0_mask));
if (state.cr0.value() != cr0_shadow)
_write_vmcs(service, Vmcs::CR0_SHADOW, state.cr0.value());
}
/* cr2 not supported on seL4 */
- state.cr2.value(state.cr2.value());
+ state.cr2.charge(state.cr2.value());
{
addr_t const cr4 = _read_vmcs(service, Vmcs::CR4);
addr_t const cr4_shadow = _read_vmcs(service, Vmcs::CR4_SHADOW);
- state.cr4.value((cr4 & ~cr4_mask) | (cr4_shadow & cr4_mask));
+ state.cr4.charge((cr4 & ~cr4_mask) | (cr4_shadow & cr4_mask));
if (state.cr4.value() != cr4_shadow)
_write_vmcs(service, Vmcs::CR4_SHADOW, state.cr4.value());
}
- typedef Genode::Vm_state::Segment Segment;
- typedef Genode::Vm_state::Range Range;
+ typedef Genode::Vcpu_state::Segment Segment;
+ typedef Genode::Vcpu_state::Range Range;
- state.cs.value(Segment{_read_vmcs_16(service, Vmcs::CS_SEL),
+ state.cs.charge(Segment{_read_vmcs_16(service, Vmcs::CS_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::CS_AR)),
_read_vmcs_32(service, Vmcs::CS_LIMIT),
_read_vmcs(service, Vmcs::CS_BASE)});
- state.ss.value(Segment{_read_vmcs_16(service, Vmcs::SS_SEL),
+ state.ss.charge(Segment{_read_vmcs_16(service, Vmcs::SS_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::SS_AR)),
_read_vmcs_32(service, Vmcs::SS_LIMIT),
_read_vmcs(service, Vmcs::SS_BASE)});
- state.es.value(Segment{_read_vmcs_16(service, Vmcs::ES_SEL),
+ state.es.charge(Segment{_read_vmcs_16(service, Vmcs::ES_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::ES_AR)),
_read_vmcs_32(service, Vmcs::ES_LIMIT),
_read_vmcs(service, Vmcs::ES_BASE)});
- state.ds.value(Segment{_read_vmcs_16(service, Vmcs::DS_SEL),
+ state.ds.charge(Segment{_read_vmcs_16(service, Vmcs::DS_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::DS_AR)),
_read_vmcs_32(service, Vmcs::DS_LIMIT),
_read_vmcs(service, Vmcs::DS_BASE)});
- state.fs.value(Segment{_read_vmcs_16(service, Vmcs::FS_SEL),
+ state.fs.charge(Segment{_read_vmcs_16(service, Vmcs::FS_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::FS_AR)),
_read_vmcs_32(service, Vmcs::FS_LIMIT),
_read_vmcs(service, Vmcs::FS_BASE)});
- state.gs.value(Segment{_read_vmcs_16(service, Vmcs::GS_SEL),
+ state.gs.charge(Segment{_read_vmcs_16(service, Vmcs::GS_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::GS_AR)),
_read_vmcs_32(service, Vmcs::GS_LIMIT),
_read_vmcs(service, Vmcs::GS_BASE)});
- state.tr.value(Segment{_read_vmcs_16(service, Vmcs::TR_SEL),
+ state.tr.charge(Segment{_read_vmcs_16(service, Vmcs::TR_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::TR_AR)),
_read_vmcs_32(service, Vmcs::TR_LIMIT),
_read_vmcs(service, Vmcs::TR_BASE)});
- state.ldtr.value(Segment{_read_vmcs_16(service, Vmcs::LDTR_SEL),
+ state.ldtr.charge(Segment{_read_vmcs_16(service, Vmcs::LDTR_SEL),
_convert_ar_16(_read_vmcs(service, Vmcs::LDTR_AR)),
_read_vmcs_32(service, Vmcs::LDTR_LIMIT),
_read_vmcs(service, Vmcs::LDTR_BASE)});
- state.idtr.value(Range{_read_vmcs(service, Vmcs::IDTR_BASE),
- _read_vmcs_32(service, Vmcs::IDTR_LIMIT)});
+ state.idtr.charge(Range{ .limit = _read_vmcs_32(service, Vmcs::IDTR_LIMIT),
+ .base = _read_vmcs(service, Vmcs::IDTR_BASE) });
- state.gdtr.value(Range{_read_vmcs(service, Vmcs::GDTR_BASE),
- _read_vmcs_32(service, Vmcs::GDTR_LIMIT)});
+ state.gdtr.charge(Range{ .limit = _read_vmcs_32(service, Vmcs::GDTR_LIMIT),
+ .base = _read_vmcs(service, Vmcs::GDTR_BASE) });
- state.sysenter_cs.value(_read_vmcs(service, Vmcs::SYSENTER_CS));
- state.sysenter_sp.value(_read_vmcs(service, Vmcs::SYSENTER_SP));
- state.sysenter_ip.value(_read_vmcs(service, Vmcs::SYSENTER_IP));
+ state.sysenter_cs.charge(_read_vmcs(service, Vmcs::SYSENTER_CS));
+ state.sysenter_sp.charge(_read_vmcs(service, Vmcs::SYSENTER_SP));
+ state.sysenter_ip.charge(_read_vmcs(service, Vmcs::SYSENTER_IP));
/* no support by seL4 to read this value */
- state.ctrl_secondary.value(state.ctrl_secondary.value());
- //state.ctrl_secondary.value(_read_vmcs(service, Vmcs::CTRL_1));
+ state.ctrl_secondary.charge(state.ctrl_secondary.value());
+ //state.ctrl_secondary.charge(_read_vmcs(service, Vmcs::CTRL_1));
if (state.exit_reason == VMEXIT_INVALID ||
state.exit_reason == VMEXIT_RECALL)
{
- state.inj_info.value(_read_vmcs(service, Vmcs::INTR_INFO));
- state.inj_error.value(_read_vmcs(service, Vmcs::INTR_ERROR));
+ state.inj_info.charge(_read_vmcs(service, Vmcs::INTR_INFO));
+ state.inj_error.charge(_read_vmcs(service, Vmcs::INTR_ERROR));
} else {
- state.inj_info.value(_read_vmcs(service, Vmcs::IDT_INFO));
- state.inj_error.value(_read_vmcs(service, Vmcs::IDT_ERROR));
+ state.inj_info.charge(_read_vmcs(service, Vmcs::IDT_INFO));
+ state.inj_error.charge(_read_vmcs(service, Vmcs::IDT_ERROR));
}
- state.intr_state.value(_read_vmcs(service, Vmcs::STATE_INTR));
- state.actv_state.value(_read_vmcs(service, Vmcs::STATE_ACTV));
+ state.intr_state.charge(_read_vmcs(service, Vmcs::STATE_INTR));
+ state.actv_state.charge(_read_vmcs(service, Vmcs::STATE_ACTV));
- state.pdpte_0.value(_read_vmcs(service, Vmcs::PDPTE_0));
- state.pdpte_1.value(_read_vmcs(service, Vmcs::PDPTE_1));
- state.pdpte_2.value(_read_vmcs(service, Vmcs::PDPTE_2));
- state.pdpte_3.value(_read_vmcs(service, Vmcs::PDPTE_3));
+ state.pdpte_0.charge(_read_vmcs(service, Vmcs::PDPTE_0));
+ state.pdpte_1.charge(_read_vmcs(service, Vmcs::PDPTE_1));
+ state.pdpte_2.charge(_read_vmcs(service, Vmcs::PDPTE_2));
+ state.pdpte_3.charge(_read_vmcs(service, Vmcs::PDPTE_3));
/* tsc and tsc_offset not supported by seL4 */
- state.tsc.value(Trace::timestamp());
- state.tsc_offset.value(_tsc_offset);
+ state.tsc.charge(Trace::timestamp());
+ state.tsc_offset.charge(_tsc_offset);
- state.efer.value(_read_vmcs(service, Vmcs::EFER));
+ state.efer.charge(_read_vmcs(service, Vmcs::EFER));
/* XXX star, lstar, cstar, fmask, kernel_gs_base not supported by seL4 */
/* XXX tpr and tpr_threshold not supported by seL4 */
}
- public:
-
- Vcpu(Genode::Env &env, Genode::Signal_context_capability &cap,
- Semaphore &handler_ready, Allocator &alloc,
- Affinity::Location &location)
- :
- Thread(env, "vcpu_thread", STACK_SIZE, location, Weight(), env.cpu()),
- _signal(cap),
- _handler_ready(handler_ready), _alloc(alloc)
- { }
-
- Allocator &allocator() { return _alloc; }
-
- void start() override {
- Thread::start();
- _startup.block();
+ Affinity::Location _location(Vcpu_handler_base &handler) const
+ {
+ Thread * ep = reinterpret_cast(&handler.rpc_ep());
+ return ep->affinity();
}
- Genode::Vm_session_client::Vcpu_id id() const { return _id; }
- void id(Genode::Vm_session_client::Vcpu_id id) { _id = id; }
+ public:
- void assign_ds_state(Region_map &rm, Dataspace_capability cap) {
- _state = rm.attach(cap); }
-
- void initial_resume()
+ Sel4_vcpu(Env &env, Vm_connection &vm,
+ Vcpu_handler_base &handler, Exit_config const &)
+ :
+ Thread(env, "vcpu_thread", STACK_SIZE, _location(handler),
+ Weight(), env.cpu()),
+ _vcpu_handler(handler)
{
+ Thread::start();
+
+ /* wait until thread is alive, e.g. Thread::cap() is valid */
+ _startup.block();
+
+ _rpc.construct(vm, this->cap(), *this);
+
+ /* signal about finished vCPU assignment */
_wake_up.up();
}
@@ -762,72 +791,22 @@ struct Vcpu : Genode::Thread
_wake_up.up();
}
+
+ Vcpu_state & state() { return _state; }
+ Sel4_native_rpc * rpc() { return &*_rpc; }
};
-Genode::Vm_session_client::Vcpu_id
-Genode::Vm_session_client::create_vcpu(Allocator &alloc, Env &env,
- Vm_handler_base &handler)
-{
- Thread * ep = reinterpret_cast(&handler._rpc_ep);
- Affinity::Location location = ep->affinity();
+/**************
+ ** vCPU API **
+ **************/
- /* create thread that switches modes between thread/cpu */
- Vcpu * vcpu = new (alloc) Genode::Registered (vcpus, env,
- handler._cap,
- handler._done,
- alloc,
- location);
+void Vm_connection::Vcpu::run() { static_cast(_native_vcpu).vcpu.resume(); }
+void Vm_connection::Vcpu::pause() { static_cast(_native_vcpu).vcpu.pause(); }
+Vcpu_state & Vm_connection::Vcpu::state() { return static_cast(_native_vcpu).vcpu.state(); }
- try {
- /* now it gets actually valid - vcpu->cap() becomes valid */
- vcpu->start();
- /* instruct core to let it become a vCPU */
- vcpu->id(call(vcpu->cap()));
- call(handler._cap, vcpu->id());
-
- vcpu->assign_ds_state(env.rm(), call(vcpu->id()));
- } catch (...) {
- destroy(alloc, vcpu);
- throw;
- }
-
- vcpu->initial_resume();
-
- return vcpu->id();
-}
-
-void Genode::Vm_session_client::run(Genode::Vm_session_client::Vcpu_id id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.id().id == id.id)
- vcpu.resume();
- });
-}
-
-void Vm_session_client::pause(Vm_session_client::Vcpu_id vcpu_id)
-{
- vcpus.for_each([&] (Vcpu &vcpu) {
- if (vcpu.id().id != vcpu_id.id)
- return;
-
- vcpu.pause();
- });
-}
-
-Genode::Dataspace_capability Genode::Vm_session_client::cpu_state(Vcpu_id vcpu_id)
-{
- Dataspace_capability cap;
-
- cap = call(vcpu_id);
-
- return cap;
-}
-
-Vm_session::~Vm_session()
-{
- vcpus.for_each([&] (Vcpu &vc) {
- Allocator &alloc = vc.allocator();
- destroy(alloc, &vc);
- });
-}
+Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
+ Vcpu_handler_base &handler, Exit_config const &exit_config)
+:
+ _native_vcpu(*((new (alloc) Sel4_vcpu(vm._env, vm, handler, exit_config))->rpc()))
+{ }
diff --git a/repos/base/include/spec/x86/cpu/vcpu_state.h b/repos/base/include/spec/x86/cpu/vcpu_state.h
new file mode 100644
index 0000000000..992ec347c8
--- /dev/null
+++ b/repos/base/include/spec/x86/cpu/vcpu_state.h
@@ -0,0 +1,231 @@
+/*
+ * \brief Virtual CPU context for x86
+ * \author Alexander Boettcher
+ * \author Christian Helmuth
+ * \date 2018-10-09
+ */
+
+/*
+ * Copyright (C) 2018-2021 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _INCLUDE__SPEC__X86__CPU__VM_STATE_H_
+#define _INCLUDE__SPEC__X86__CPU__VM_STATE_H_
+
+#include
+#include
+
+namespace Genode { struct Vcpu_state; }
+
+
+/*
+ * The state of one virtual CPU (vCPU) as available via the VM session for x86
+ *
+ * The state object is designed for bidirectional transfer of register state,
+ * which means it reflects vCPU state on VM exits but also supports loading
+ * updating register state on VM entry. Therefore, each register contains not
+ * only the actual register value but also a 'charged' state.
+ *
+ * The hypervisor charges registers as requested by the VMM on VM exit with the
+ * current virtual CPU state. The VMM for its part charges registers it intends
+ * to update with new values before VM entry (e.g., after I/O emulation). Both
+ * parties are required to 'discharge()' the vCPU state explicitly if registers
+ * charged by the other party should not be considered on return. The common
+ * case is to discharge all registers, charge some updates and transfer
+ * execution to the other party.
+ */
+class Genode::Vcpu_state
+{
+ private:
+
+ Vcpu_state & operator = (Vcpu_state const &) = default;
+
+ Vcpu_state(Vcpu_state const&) = delete;
+
+ public:
+
+ template
+ class Register : Noncopyable
+ {
+ private:
+
+ friend class Vcpu_state;
+
+ T _value { };
+ bool _charged { false };
+
+ /*
+ * Trick used by Vcpu_state::discharge() to discharge all
+ * registers at once. Note, the register value is kept intact.
+ */
+ Register & operator = (Register const &)
+ {
+ _charged = false;
+
+ return *this;
+ }
+
+ public:
+
+ bool charged() const { return _charged; }
+
+ T value() const { return _value; }
+
+ void charge(T const &value)
+ {
+ _charged = true;
+ _value = value;
+ }
+ };
+
+ struct Range
+ {
+ uint32_t limit;
+ addr_t base;
+ };
+
+ struct Segment
+ {
+ uint16_t sel, ar;
+ uint32_t limit;
+ addr_t base;
+ };
+
+ Register ax;
+ Register cx;
+ Register dx;
+ Register bx;
+
+ Register bp;
+ Register si;
+ Register di;
+
+ Register sp;
+ Register ip;
+ Register ip_len;
+ Register flags;
+
+ Register es;
+ Register ds;
+ Register fs;
+ Register gs;
+ Register cs;
+ Register ss;
+ Register tr;
+ Register ldtr;
+
+ Register gdtr;
+ Register idtr;
+
+ Register cr0;
+ Register cr2;
+ Register cr3;
+ Register cr4;
+
+ Register dr7;
+
+ Register sysenter_ip;
+ Register sysenter_sp;
+ Register sysenter_cs;
+
+ Register qual_primary;
+ Register qual_secondary;
+
+ Register ctrl_primary;
+ Register