mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-22 13:02:56 +01:00
Compare commits
118 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c51d3aefc | ||
|
|
5551f96c2d | ||
|
|
220d159b7c | ||
|
|
f38762f9f0 | ||
|
|
bdb1b222cd | ||
|
|
ce1a137ddb | ||
|
|
3e736fc888 | ||
|
|
304b61613b | ||
|
|
a91c4bc8ae | ||
|
|
9320380efd | ||
|
|
84a79c2168 | ||
|
|
a8f6fc42cc | ||
|
|
8b9d93259f | ||
|
|
6e9b070758 | ||
|
|
8ff310b40d | ||
|
|
4710b24e99 | ||
|
|
918cac3cca | ||
|
|
835408a828 | ||
|
|
5b44b54065 | ||
|
|
99286e0c5c | ||
|
|
ad0f2d3933 | ||
|
|
f76aaa0abf | ||
|
|
668ea3f253 | ||
|
|
d015297925 | ||
|
|
0191b42e51 | ||
|
|
68e4ef34d3 | ||
|
|
4af23e023f | ||
|
|
a921845e36 | ||
|
|
06fd884ef4 | ||
|
|
2b66139f49 | ||
|
|
8bb247da0e | ||
|
|
8acd0741d4 | ||
|
|
a7aaad6dae | ||
|
|
1dbdf5bd96 | ||
|
|
7d5338a393 | ||
|
|
bce0fbdc4f | ||
|
|
fcaffab7d5 | ||
|
|
8c0ecf9ac9 | ||
|
|
57662d5c8c | ||
|
|
ea036537c5 | ||
|
|
6ba44cbe70 | ||
|
|
1e7cd10657 | ||
|
|
0b42ee3da2 | ||
|
|
4afed37ffd | ||
|
|
bfcf897893 | ||
|
|
fc7bdd97e0 | ||
|
|
a0c5ad77c9 | ||
|
|
28a142821b | ||
|
|
48b042564d | ||
|
|
f3eb97bf1c | ||
|
|
d0d08c68aa | ||
|
|
f94d7c40d1 | ||
|
|
0fdb9c7a4c | ||
|
|
604a5f1f8e | ||
|
|
0f565ba253 | ||
|
|
836bd76106 | ||
|
|
256c509550 | ||
|
|
c33e8cae4a | ||
|
|
29b00817ed | ||
|
|
a68cc9d6ee | ||
|
|
e6da335de9 | ||
|
|
213fe79900 | ||
|
|
3b32c3f785 | ||
|
|
23b527ba85 | ||
|
|
a1856ca6d9 | ||
|
|
b8f6e86fa3 | ||
|
|
544057fea1 | ||
|
|
f98359cbe6 | ||
|
|
1c3c8ca98f | ||
|
|
481a26d286 | ||
|
|
480bb08429 | ||
|
|
db3b242acb | ||
|
|
9399b07d0c | ||
|
|
ed008edef8 | ||
|
|
baa130db17 | ||
|
|
5a4de94aa8 | ||
|
|
5d6d54c066 | ||
|
|
f8f8ea229a | ||
|
|
2644b7d5aa | ||
|
|
f0340b12a3 | ||
|
|
1ff4093b40 | ||
|
|
e5b58e1eb6 | ||
|
|
0c1f727871 | ||
|
|
e6b09edaca | ||
|
|
7d405d8f6a | ||
|
|
16f5ad55c8 | ||
|
|
142ef47861 | ||
|
|
6b7fae0643 | ||
|
|
64a2307c08 | ||
|
|
d0416903dc | ||
|
|
d1c240c6c5 | ||
|
|
48cbe050f7 | ||
|
|
55f07a89c6 | ||
|
|
33ff8591f0 | ||
|
|
45fef3c8be | ||
|
|
834bebf3e5 | ||
|
|
d931e6a56e | ||
|
|
ff525b743f | ||
|
|
7d9db940e2 | ||
|
|
08ebbf001e | ||
|
|
2888391eec | ||
|
|
95faab73fa | ||
|
|
9c4a683b33 | ||
|
|
db81856dac | ||
|
|
c4a73980da | ||
|
|
14ee3c0d36 | ||
|
|
c4d4e12f7e | ||
|
|
6c0f4e232c | ||
|
|
a649cd8633 | ||
|
|
71314a9ca3 | ||
|
|
29c00310ad | ||
|
|
d4073612cb | ||
|
|
de772a6fc1 | ||
|
|
f07003f2b8 | ||
|
|
ff9d123000 | ||
|
|
b5449df554 | ||
|
|
b255eb14fe | ||
|
|
f9d28eb8e0 |
@@ -61,9 +61,8 @@ class Core::Platform_thread : Interface
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
Platform_thread(Platform_pd &pd, Rpc_entrypoint &, Ram_allocator &,
|
||||
Region_map &, size_t, const char *name, unsigned,
|
||||
Affinity::Location, addr_t)
|
||||
Platform_thread(Platform_pd &pd, size_t, const char *name,
|
||||
unsigned, Affinity::Location, addr_t)
|
||||
: _name(name), _pd(pd) { }
|
||||
|
||||
/**
|
||||
|
||||
@@ -38,11 +38,8 @@ static inline bool can_use_super_page(addr_t, size_t)
|
||||
}
|
||||
|
||||
|
||||
Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(addr_t const phys_base,
|
||||
size_t const size_in)
|
||||
addr_t Io_mem_session_component::_map_local(addr_t phys_base, size_t size)
|
||||
{
|
||||
size_t const size = size_in;
|
||||
|
||||
auto map_io_region = [] (addr_t phys_base, addr_t local_base, size_t size)
|
||||
{
|
||||
using namespace Fiasco;
|
||||
@@ -94,16 +91,14 @@ Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(
|
||||
size_t align = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<Map_local_result>(
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
addr_t const core_local_base = (addr_t)ptr;
|
||||
map_io_region(phys_base, core_local_base, size);
|
||||
return Map_local_result { .core_local_addr = core_local_base, .success = true };
|
||||
},
|
||||
return core_local_base; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
error("core-local mapping of memory-mapped I/O range failed");
|
||||
return Map_local_result();
|
||||
});
|
||||
return 0; });
|
||||
}
|
||||
|
||||
@@ -103,6 +103,3 @@ Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||
{
|
||||
return Capability_space::import(native_thread().l4id, Rpc_obj_key(badge));
|
||||
}
|
||||
|
||||
|
||||
void Core::init_page_fault_handling(Rpc_entrypoint &) { }
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
/* core includes */
|
||||
#include <platform.h>
|
||||
#include <core_env.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ namespace Genode { struct Foc_thread_state; }
|
||||
struct Genode::Foc_thread_state : Thread_state
|
||||
{
|
||||
Foc::l4_cap_idx_t kcap { Foc::L4_INVALID_CAP }; /* thread's gate cap in its PD */
|
||||
uint32_t id { }; /* ID of gate capability */
|
||||
uint16_t id { }; /* ID of gate capability */
|
||||
addr_t utcb { }; /* thread's UTCB in its PD */
|
||||
};
|
||||
|
||||
|
||||
@@ -30,15 +30,17 @@ class Core::Cap_id_allocator
|
||||
{
|
||||
public:
|
||||
|
||||
using id_t = unsigned;
|
||||
using id_t = uint16_t;
|
||||
|
||||
enum { ID_MASK = 0xffff };
|
||||
|
||||
private:
|
||||
|
||||
enum {
|
||||
CAP_ID_OFFSET = 1 << 2,
|
||||
CAP_ID_MASK = CAP_ID_OFFSET - 1,
|
||||
CAP_ID_RANGE = 1u << 28,
|
||||
ID_MASK = CAP_ID_RANGE - 1,
|
||||
CAP_ID_RANGE = ~0UL,
|
||||
CAP_ID_MASK = ~3UL,
|
||||
CAP_ID_NUM_MAX = CAP_ID_MASK >> 2,
|
||||
CAP_ID_OFFSET = 1 << 2
|
||||
};
|
||||
|
||||
Synced_range_allocator<Allocator_avl> _id_alloc;
|
||||
|
||||
@@ -75,8 +75,8 @@ class Core::Platform_thread : Interface
|
||||
/**
|
||||
* Constructor for non-core threads
|
||||
*/
|
||||
Platform_thread(Platform_pd &, Rpc_entrypoint &, Ram_allocator &, Region_map &,
|
||||
size_t, const char *name, unsigned priority, Affinity::Location, addr_t);
|
||||
Platform_thread(Platform_pd &, size_t, const char *name, unsigned priority,
|
||||
Affinity::Location, addr_t);
|
||||
|
||||
/**
|
||||
* Constructor for core main-thread
|
||||
|
||||
@@ -125,7 +125,7 @@ class Core::Vm_session_component
|
||||
** Vm session interface **
|
||||
**************************/
|
||||
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability) override;
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
||||
void attach_pic(addr_t) override { /* unused on Fiasco.OC */ }
|
||||
|
||||
void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2006-2024 Genode Labs GmbH
|
||||
* Copyright (C) 2006-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
@@ -21,37 +21,31 @@
|
||||
using namespace Core;
|
||||
|
||||
|
||||
void Io_mem_session_component::_unmap_local(addr_t base, size_t size, addr_t)
|
||||
void Io_mem_session_component::_unmap_local(addr_t base, size_t, addr_t)
|
||||
{
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
unmap_local(base, size >> 12);
|
||||
platform().region_alloc().free(reinterpret_cast<void *>(base));
|
||||
}
|
||||
|
||||
|
||||
Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(addr_t const base,
|
||||
size_t const size)
|
||||
addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
||||
{
|
||||
/* align large I/O dataspaces on a super-page boundary within core */
|
||||
size_t alignment = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
|
||||
/* find appropriate region and map it locally */
|
||||
return platform().region_alloc().alloc_aligned(size, (unsigned)alignment).convert<Map_local_result>(
|
||||
/* find appropriate region for mapping */
|
||||
return platform().region_alloc().alloc_aligned(size, (unsigned)alignment).convert<addr_t>(
|
||||
|
||||
[&] (void *local_base) {
|
||||
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
||||
error("map_local_io failed ", Hex_range(base, size));
|
||||
error("map_local_io failed");
|
||||
platform().region_alloc().free(local_base, base);
|
||||
return Map_local_result();
|
||||
return 0UL;
|
||||
}
|
||||
return Map_local_result { .core_local_addr = addr_t(local_base),
|
||||
.success = true };
|
||||
return (addr_t)local_base;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("allocation of virtual memory for local I/O mapping failed");
|
||||
return Map_local_result(); });
|
||||
return 0UL; });
|
||||
}
|
||||
|
||||
@@ -153,6 +153,3 @@ Pager_capability Pager_entrypoint::manage(Pager_object &obj)
|
||||
},
|
||||
[&] (Cpu_session::Create_thread_error) { return Pager_capability(); });
|
||||
}
|
||||
|
||||
|
||||
void Core::init_page_fault_handling(Rpc_entrypoint &) { }
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
#include <dataspace/capability.h>
|
||||
#include <trace/source_registry.h>
|
||||
#include <util/misc_math.h>
|
||||
#include <util/mmio.h>
|
||||
#include <util/xml_generator.h>
|
||||
|
||||
/* base-internal includes */
|
||||
@@ -343,76 +342,6 @@ void Core::Platform::_setup_irq_alloc()
|
||||
}
|
||||
|
||||
|
||||
struct Acpi_rsdp : public Genode::Mmio<32>
|
||||
{
|
||||
using Mmio<32>::Mmio;
|
||||
|
||||
struct Signature : Register< 0, 64> { };
|
||||
struct Revision : Register<15, 8> { };
|
||||
struct Rsdt : Register<16, 32> { };
|
||||
struct Length : Register<20, 32> { };
|
||||
struct Xsdt : Register<24, 64> { };
|
||||
|
||||
bool valid() const
|
||||
{
|
||||
const char sign[] = "RSD PTR ";
|
||||
return read<Signature>() == *(Genode::uint64_t *)sign;
|
||||
}
|
||||
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
static void add_acpi_rsdp(auto ®ion_alloc, auto &xml)
|
||||
{
|
||||
using namespace Foc;
|
||||
using Foc::L4::Kip::Mem_desc;
|
||||
|
||||
l4_kernel_info_t const &kip = sigma0_map_kip();
|
||||
Mem_desc const * const desc = Mem_desc::first(&kip);
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
for (unsigned i = 0; i < Mem_desc::count(&kip); ++i) {
|
||||
if (desc[i].type() != Mem_desc::Mem_type::Info ||
|
||||
desc[i].sub_type() != Mem_desc::Info_sub_type::Info_acpi_rsdp)
|
||||
continue;
|
||||
|
||||
auto offset = desc[i].start() & 0xffful;
|
||||
auto pages = align_addr(offset + desc[i].size(), 12) >> 12;
|
||||
|
||||
region_alloc.alloc_aligned(pages * 4096, 12).with_result([&] (void *core_local_ptr) {
|
||||
|
||||
if (!map_local_io(desc[i].start(), (addr_t)core_local_ptr, pages))
|
||||
return;
|
||||
|
||||
Byte_range_ptr const ptr((char *)(addr_t(core_local_ptr) + offset),
|
||||
pages * 4096 - offset);
|
||||
auto const rsdp = Acpi_rsdp(ptr);
|
||||
|
||||
if (!rsdp.valid())
|
||||
return;
|
||||
|
||||
xml.node("acpi", [&] {
|
||||
xml.attribute("revision", rsdp.read<Acpi_rsdp::Revision>());
|
||||
if (rsdp.read<Acpi_rsdp::Rsdt>())
|
||||
xml.attribute("rsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Rsdt>())));
|
||||
if (rsdp.read<Acpi_rsdp::Xsdt>())
|
||||
xml.attribute("xsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Xsdt>())));
|
||||
});
|
||||
|
||||
unmap_local(addr_t(core_local_ptr), pages);
|
||||
region_alloc.free(core_local_ptr);
|
||||
|
||||
pages = 0;
|
||||
}, [&] (Range_allocator::Alloc_error) { });
|
||||
|
||||
if (!pages)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Core::Platform::_setup_basics()
|
||||
{
|
||||
using namespace Foc;
|
||||
@@ -483,10 +412,6 @@ void Core::Platform::_setup_basics()
|
||||
|
||||
/* image is accessible by core */
|
||||
add_region(Region(img_start, img_end), _core_address_ranges());
|
||||
|
||||
/* requested as I/O memory by the VESA driver and ACPI (rsdp search) */
|
||||
_io_mem_alloc.add_range (0, 0x2000);
|
||||
ram_alloc() .remove_range(0, 0x2000);
|
||||
}
|
||||
|
||||
|
||||
@@ -592,10 +517,7 @@ Core::Platform::Platform()
|
||||
|
||||
xml.node("affinity-space", [&] {
|
||||
xml.attribute("width", affinity_space().width());
|
||||
xml.attribute("height", affinity_space().height());
|
||||
});
|
||||
|
||||
add_acpi_rsdp(region_alloc(), xml);
|
||||
xml.attribute("height", affinity_space().height()); });
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
/* core includes */
|
||||
#include <platform_thread.h>
|
||||
#include <platform.h>
|
||||
#include <core_env.h>
|
||||
|
||||
/* Fiasco.OC includes */
|
||||
#include <foc/syscall.h>
|
||||
@@ -209,7 +210,7 @@ Foc_thread_state Platform_thread::state()
|
||||
s = _pager_obj->state.state;
|
||||
|
||||
s.kcap = _gate.remote;
|
||||
s.id = Cap_index::id_t(_gate.local.local_name());
|
||||
s.id = (uint16_t)_gate.local.local_name();
|
||||
s.utcb = _utcb;
|
||||
|
||||
return s;
|
||||
@@ -277,8 +278,7 @@ void Platform_thread::_finalize_construction()
|
||||
}
|
||||
|
||||
|
||||
Platform_thread::Platform_thread(Platform_pd &pd, Rpc_entrypoint &, Ram_allocator &,
|
||||
Region_map &, size_t, const char *name, unsigned prio,
|
||||
Platform_thread::Platform_thread(Platform_pd &pd, size_t, const char *name, unsigned prio,
|
||||
Affinity::Location location, addr_t)
|
||||
:
|
||||
_name(name),
|
||||
|
||||
@@ -38,7 +38,7 @@ using namespace Core;
|
||||
|
||||
Cap_index_allocator &Genode::cap_idx_alloc()
|
||||
{
|
||||
static Cap_index_allocator_tpl<Core_cap_index, 128 * 1024> alloc;
|
||||
static Cap_index_allocator_tpl<Core_cap_index,10*1024> alloc;
|
||||
return alloc;
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ Cap_id_allocator::Cap_id_allocator(Allocator &alloc)
|
||||
:
|
||||
_id_alloc(&alloc)
|
||||
{
|
||||
_id_alloc.add_range(CAP_ID_OFFSET, unsigned(CAP_ID_RANGE) - unsigned(CAP_ID_OFFSET));
|
||||
_id_alloc.add_range(CAP_ID_OFFSET, CAP_ID_RANGE);
|
||||
}
|
||||
|
||||
|
||||
@@ -213,7 +213,7 @@ void Cap_id_allocator::free(id_t id)
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
||||
if (id < CAP_ID_RANGE)
|
||||
_id_alloc.free((void*)(addr_t(id & CAP_ID_MASK)), CAP_ID_OFFSET);
|
||||
_id_alloc.free((void*)(id & CAP_ID_MASK), CAP_ID_OFFSET);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
*/
|
||||
|
||||
/* core includes */
|
||||
#include <core_env.h>
|
||||
#include <platform_services.h>
|
||||
#include <vm_root.h>
|
||||
#include <io_port_root.h>
|
||||
@@ -23,15 +24,15 @@ void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &heap,
|
||||
Registry<Service> &services,
|
||||
Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &core_ram,
|
||||
Region_map &core_rm,
|
||||
Range_allocator &io_port_ranges)
|
||||
Ram_allocator &)
|
||||
{
|
||||
static Vm_root vm_root(ep, heap, core_ram, core_rm, trace_sources);
|
||||
static Vm_root vm_root(ep, heap, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
|
||||
static Core_service<Vm_session_component> vm(services, vm_root);
|
||||
|
||||
static Io_port_root io_root(io_port_ranges, heap);
|
||||
static Io_port_root io_root(*core_env().pd_session(),
|
||||
platform().io_port_alloc(), heap);
|
||||
|
||||
static Core_service<Io_port_session_component> io_port(services, io_root);
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
/* core includes */
|
||||
#include <platform.h>
|
||||
#include <core_env.h>
|
||||
|
||||
/* Fiasco.OC includes */
|
||||
#include <foc/syscall.h>
|
||||
|
||||
@@ -30,13 +30,12 @@ class Genode::Native_capability::Data : public Avl_node<Data>
|
||||
{
|
||||
public:
|
||||
|
||||
using id_t = unsigned;
|
||||
|
||||
constexpr static id_t INVALID_ID = ~0u;
|
||||
using id_t = uint16_t;
|
||||
|
||||
private:
|
||||
|
||||
constexpr static id_t UNUSED = 0;
|
||||
constexpr static uint16_t INVALID_ID = ~0;
|
||||
constexpr static uint16_t UNUSED = 0;
|
||||
|
||||
uint8_t _ref_cnt; /* reference counter */
|
||||
id_t _id; /* global capability id */
|
||||
@@ -47,8 +46,8 @@ class Genode::Native_capability::Data : public Avl_node<Data>
|
||||
|
||||
bool valid() const { return _id != INVALID_ID; }
|
||||
bool used() const { return _id != UNUSED; }
|
||||
id_t id() const { return _id; }
|
||||
void id(id_t id) { _id = id; }
|
||||
uint16_t id() const { return _id; }
|
||||
void id(uint16_t id) { _id = id; }
|
||||
uint8_t inc();
|
||||
uint8_t dec();
|
||||
addr_t kcap() const;
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2010-12-06
|
||||
*
|
||||
* This is a Fiasco.OC-specific addition to the process environment.
|
||||
* This is a Fiasco.OC-specific addition to the process enviroment.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2010-2025 Genode Labs GmbH
|
||||
* Copyright (C) 2010-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
@@ -59,7 +59,7 @@ static volatile int _cap_index_spinlock = SPINLOCK_UNLOCKED;
|
||||
bool Cap_index::higher(Cap_index *n) { return n->_id > _id; }
|
||||
|
||||
|
||||
Cap_index* Cap_index::find_by_id(id_t id)
|
||||
Cap_index* Cap_index::find_by_id(uint16_t id)
|
||||
{
|
||||
if (_id == id) return this;
|
||||
|
||||
@@ -116,8 +116,8 @@ Cap_index* Capability_map::insert(Cap_index::id_t id)
|
||||
{
|
||||
Spin_lock::Guard guard(_lock);
|
||||
|
||||
if (_tree.first() && _tree.first()->find_by_id(id))
|
||||
return { };
|
||||
ASSERT(!_tree.first() || !_tree.first()->find_by_id(id),
|
||||
"Double insertion in cap_map()!");
|
||||
|
||||
Cap_index * const i = cap_idx_alloc().alloc_range(1);
|
||||
if (i) {
|
||||
@@ -184,16 +184,9 @@ Cap_index* Capability_map::insert_map(Cap_index::id_t id, addr_t kcap)
|
||||
_tree.insert(i);
|
||||
|
||||
/* map the given cap to our registry entry */
|
||||
auto const msg = l4_task_map(L4_BASE_TASK_CAP, L4_BASE_TASK_CAP,
|
||||
l4_obj_fpage(kcap, 0, L4_FPAGE_RWX),
|
||||
i->kcap() | L4_ITEM_MAP | L4_MAP_ITEM_GRANT);
|
||||
|
||||
if (l4_error(msg)) {
|
||||
_tree.remove(i);
|
||||
cap_idx_alloc().free(i, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
l4_task_map(L4_BASE_TASK_CAP, L4_BASE_TASK_CAP,
|
||||
l4_obj_fpage(kcap, 0, L4_FPAGE_RWX),
|
||||
i->kcap() | L4_ITEM_MAP | L4_MAP_ITEM_GRANT);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
@@ -55,6 +55,9 @@ static inline bool ipc_error(l4_msgtag_t tag, bool print)
|
||||
}
|
||||
|
||||
|
||||
static constexpr Cap_index::id_t INVALID_BADGE = 0xffff;
|
||||
|
||||
|
||||
/**
|
||||
* Representation of a capability during UTCB marshalling/unmarshalling
|
||||
*/
|
||||
@@ -113,7 +116,7 @@ static int extract_msg_from_utcb(l4_msgtag_t tag,
|
||||
|
||||
Cap_index::id_t const badge = (Cap_index::id_t)(*msg_words++);
|
||||
|
||||
if (badge == Cap_index::INVALID_ID)
|
||||
if (badge == INVALID_BADGE)
|
||||
continue;
|
||||
|
||||
/* received a delegated capability */
|
||||
@@ -224,7 +227,7 @@ static l4_msgtag_t copy_msgbuf_to_utcb(Msgbuf_base &snd_msg,
|
||||
for (unsigned i = 0; i < num_caps; i++) {
|
||||
|
||||
/* store badge as normal message word */
|
||||
*msg_words++ = caps[i].valid ? caps[i].badge : Cap_index::INVALID_ID;
|
||||
*msg_words++ = caps[i].valid ? caps[i].badge : INVALID_BADGE;
|
||||
|
||||
/* setup flexpage for valid capability to delegate */
|
||||
if (caps[i].valid) {
|
||||
|
||||
@@ -42,6 +42,7 @@ namespace Foc {
|
||||
using namespace Genode;
|
||||
|
||||
using Exit_config = Vm_connection::Exit_config;
|
||||
using Call_with_state = Vm_connection::Call_with_state;
|
||||
|
||||
|
||||
enum Virt { VMX, SVM, UNKNOWN };
|
||||
@@ -71,7 +72,8 @@ struct Foc_native_vcpu_rpc : Rpc_client<Vm_session::Native_vcpu>, Noncopyable
|
||||
Capability<Vm_session::Native_vcpu> _create_vcpu(Vm_connection &vm,
|
||||
Thread_capability &cap)
|
||||
{
|
||||
return vm.create_vcpu(cap);
|
||||
return vm.with_upgrade([&] {
|
||||
return vm.call<Vm_session::Rpc_create_vcpu>(cap); });
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -1340,7 +1342,7 @@ struct Foc_vcpu : Thread, Noncopyable
|
||||
_wake_up.up();
|
||||
}
|
||||
|
||||
void with_state(auto const &fn)
|
||||
void with_state(Call_with_state &cw)
|
||||
{
|
||||
if (!_dispatching) {
|
||||
if (Thread::myself() != _ep_handler) {
|
||||
@@ -1373,7 +1375,7 @@ struct Foc_vcpu : Thread, Noncopyable
|
||||
_state_ready.down();
|
||||
}
|
||||
|
||||
if (fn(_vcpu_state)
|
||||
if (cw.call_with_state(_vcpu_state)
|
||||
|| _extra_dispatch_up)
|
||||
resume();
|
||||
|
||||
@@ -1415,10 +1417,7 @@ static enum Virt virt_type(Env &env)
|
||||
** vCPU API **
|
||||
**************/
|
||||
|
||||
void Vm_connection::Vcpu::_with_state(With_state::Ft const &fn)
|
||||
{
|
||||
static_cast<Foc_native_vcpu_rpc &>(_native_vcpu).vcpu.with_state(fn);
|
||||
}
|
||||
void Vm_connection::Vcpu::_with_state(Call_with_state &cw) { static_cast<Foc_native_vcpu_rpc &>(_native_vcpu).vcpu.with_state(cw); }
|
||||
|
||||
|
||||
Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
|
||||
|
||||
@@ -382,10 +382,13 @@ namespace Kernel {
|
||||
* Halt processing of a signal context synchronously
|
||||
*
|
||||
* \param context capability ID of the targeted signal context
|
||||
*
|
||||
* \retval 0 suceeded
|
||||
* \retval -1 failed
|
||||
*/
|
||||
inline void kill_signal_context(capid_t const context)
|
||||
inline int kill_signal_context(capid_t const context)
|
||||
{
|
||||
call(call_id_kill_signal_context(), context);
|
||||
return (int)call(call_id_kill_signal_context(), context);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -46,6 +46,7 @@ SRC_CC += ram_dataspace_factory.cc
|
||||
SRC_CC += signal_transmitter_noinit.cc
|
||||
SRC_CC += thread_start.cc
|
||||
SRC_CC += env.cc
|
||||
SRC_CC += region_map_support.cc
|
||||
SRC_CC += pager.cc
|
||||
SRC_CC += _main.cc
|
||||
SRC_CC += kernel/cpu.cc
|
||||
@@ -54,14 +55,13 @@ SRC_CC += kernel/ipc_node.cc
|
||||
SRC_CC += kernel/irq.cc
|
||||
SRC_CC += kernel/main.cc
|
||||
SRC_CC += kernel/object.cc
|
||||
SRC_CC += kernel/signal.cc
|
||||
SRC_CC += kernel/signal_receiver.cc
|
||||
SRC_CC += kernel/thread.cc
|
||||
SRC_CC += kernel/timer.cc
|
||||
SRC_CC += capability.cc
|
||||
SRC_CC += stack_area_addr.cc
|
||||
SRC_CC += heartbeat.cc
|
||||
|
||||
BOARD ?= unknown
|
||||
CC_OPT_platform += -DBOARD_NAME="\"$(BOARD)\""
|
||||
|
||||
# provide Genode version information
|
||||
|
||||
@@ -22,9 +22,12 @@ SRC_CC += kernel/vm_thread_on.cc
|
||||
SRC_CC += spec/x86_64/virtualization/kernel/vm.cc
|
||||
SRC_CC += spec/x86_64/virtualization/kernel/svm.cc
|
||||
SRC_CC += spec/x86_64/virtualization/kernel/vmx.cc
|
||||
SRC_CC += spec/x86_64/virtualization/vm_session_component.cc
|
||||
SRC_CC += vm_session_common.cc
|
||||
SRC_CC += vm_session_component.cc
|
||||
SRC_CC += kernel/lock.cc
|
||||
SRC_CC += spec/x86_64/pic.cc
|
||||
SRC_CC += spec/x86_64/timer.cc
|
||||
SRC_CC += spec/x86_64/pit.cc
|
||||
SRC_CC += spec/x86_64/kernel/thread_exception.cc
|
||||
SRC_CC += spec/x86_64/platform_support.cc
|
||||
SRC_CC += spec/x86_64/virtualization/platform_services.cc
|
||||
|
||||
@@ -200,7 +200,6 @@ generalize_target_names: $(CONTENT)
|
||||
# supplement BOARD definition that normally comes form the build dir
|
||||
sed -i "s/\?= unknown/:= $(BOARD)/" src/core/hw/target.mk
|
||||
sed -i "s/\?= unknown/:= $(BOARD)/" src/bootstrap/hw/target.mk
|
||||
sed -i "s/\?= unknown/:= $(BOARD)/" lib/mk/core-hw.inc
|
||||
# discharge targets when building for mismatching architecture
|
||||
sed -i "1aREQUIRES := $(ARCH)" src/core/hw/target.mk
|
||||
sed -i "1aREQUIRES := $(ARCH)" src/bootstrap/hw/target.mk
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
/* base includes */
|
||||
#include <base/internal/globals.h>
|
||||
#include <base/internal/unmanaged_singleton.h>
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
@@ -25,23 +26,13 @@ size_t bootstrap_stack_size = STACK_SIZE;
|
||||
uint8_t bootstrap_stack[Board::NR_OF_CPUS][STACK_SIZE]
|
||||
__attribute__((aligned(get_page_size())));
|
||||
|
||||
|
||||
Bootstrap::Platform & Bootstrap::platform()
|
||||
{
|
||||
/*
|
||||
* Don't use static local variable because cmpxchg cannot be executed
|
||||
* w/o MMU on ARMv6.
|
||||
*/
|
||||
static long _obj[(sizeof(Bootstrap::Platform)+sizeof(long))/sizeof(long)];
|
||||
static Bootstrap::Platform *ptr;
|
||||
if (!ptr)
|
||||
ptr = construct_at<Bootstrap::Platform>(_obj);
|
||||
|
||||
return *ptr;
|
||||
}
|
||||
Bootstrap::Platform & Bootstrap::platform() {
|
||||
return *unmanaged_singleton<Bootstrap::Platform>(); }
|
||||
|
||||
|
||||
extern "C" void init() __attribute__ ((noreturn));
|
||||
|
||||
|
||||
extern "C" void init()
|
||||
{
|
||||
Bootstrap::Platform & p = Bootstrap::platform();
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <base/internal/globals.h>
|
||||
#include <base/internal/output.h>
|
||||
#include <base/internal/raw_write_string.h>
|
||||
#include <base/internal/unmanaged_singleton.h>
|
||||
|
||||
#include <board.h>
|
||||
|
||||
@@ -54,11 +55,7 @@ struct Buffer
|
||||
};
|
||||
|
||||
|
||||
Genode::Log &Genode::Log::log()
|
||||
{
|
||||
static Buffer buffer { };
|
||||
return buffer.log;
|
||||
}
|
||||
Genode::Log &Genode::Log::log() { return unmanaged_singleton<Buffer>()->log; }
|
||||
|
||||
|
||||
void Genode::raw_write_string(char const *str) { log(str); }
|
||||
|
||||
@@ -27,7 +27,6 @@ namespace Bootstrap {
|
||||
|
||||
using Genode::addr_t;
|
||||
using Genode::size_t;
|
||||
using Genode::uint32_t;
|
||||
using Boot_info = Hw::Boot_info<::Board::Boot_info>;
|
||||
using Hw::Mmio_space;
|
||||
using Hw::Mapping;
|
||||
|
||||
@@ -18,12 +18,10 @@
|
||||
#include <platform.h>
|
||||
#include <multiboot.h>
|
||||
#include <multiboot2.h>
|
||||
#include <port_io.h>
|
||||
|
||||
#include <hw/memory_consts.h>
|
||||
#include <hw/spec/x86_64/acpi.h>
|
||||
#include <hw/spec/x86_64/apic.h>
|
||||
#include <hw/spec/x86_64/x86_64.h>
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
@@ -68,108 +66,6 @@ static Hw::Acpi_rsdp search_rsdp(addr_t area, addr_t area_size)
|
||||
}
|
||||
|
||||
|
||||
static uint32_t calibrate_tsc_frequency(addr_t fadt_addr)
|
||||
{
|
||||
uint32_t const default_freq = 2'400'000;
|
||||
|
||||
if (!fadt_addr) {
|
||||
warning("FADT not found, returning fixed TSC frequency of ", default_freq, "kHz");
|
||||
return default_freq;
|
||||
}
|
||||
|
||||
uint32_t const sleep_ms = 10;
|
||||
|
||||
Hw::Acpi_fadt fadt(reinterpret_cast<Hw::Acpi_generic *>(fadt_addr));
|
||||
|
||||
uint32_t const freq = fadt.calibrate_freq_khz(sleep_ms, []() { return Hw::Tsc::rdtsc(); });
|
||||
|
||||
if (!freq) {
|
||||
warning("Unable to calibrate TSC, returning fixed TSC frequency of ", default_freq, "kHz");
|
||||
return default_freq;
|
||||
}
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
|
||||
static Hw::Local_apic::Calibration calibrate_lapic_frequency(addr_t fadt_addr)
|
||||
{
|
||||
uint32_t const default_freq = TIMER_MIN_TICKS_PER_MS;
|
||||
|
||||
if (!fadt_addr) {
|
||||
warning("FADT not found, setting minimum Local APIC frequency of ", default_freq, "kHz");
|
||||
return { default_freq, 1 };
|
||||
}
|
||||
|
||||
uint32_t const sleep_ms = 10;
|
||||
|
||||
Hw::Acpi_fadt fadt(reinterpret_cast<Hw::Acpi_generic *>(fadt_addr));
|
||||
|
||||
Hw::Local_apic lapic(Hw::Cpu_memory_map::lapic_phys_base());
|
||||
|
||||
auto const result =
|
||||
lapic.calibrate_divider([&] {
|
||||
return fadt.calibrate_freq_khz(sleep_ms, [&] {
|
||||
return lapic.read<Hw::Local_apic::Tmr_current>(); }, true); });
|
||||
|
||||
if (!result.freq_khz) {
|
||||
warning("FADT not found, setting minimum Local APIC frequency of ", default_freq, "kHz");
|
||||
return { default_freq, 1 };
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static void disable_pit()
|
||||
{
|
||||
using Hw::outb;
|
||||
|
||||
enum {
|
||||
/* PIT constants */
|
||||
PIT_CH0_DATA = 0x40,
|
||||
PIT_MODE = 0x43,
|
||||
};
|
||||
|
||||
/*
|
||||
* Disable PIT timer channel. This is necessary since BIOS sets up
|
||||
* channel 0 to fire periodically.
|
||||
*/
|
||||
outb(PIT_MODE, 0x30);
|
||||
outb(PIT_CH0_DATA, 0);
|
||||
outb(PIT_CH0_DATA, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Enable dispatch serializing lfence instruction on AMD processors
|
||||
*
|
||||
* See Software techniques for managing speculation on AMD processors
|
||||
* Revision 5.09.23
|
||||
* Mitigation G-2
|
||||
*/
|
||||
static void amd_enable_serializing_lfence()
|
||||
{
|
||||
using Cpu = Hw::X86_64_cpu;
|
||||
|
||||
if (Hw::Vendor::get_vendor_id() != Hw::Vendor::Vendor_id::AMD)
|
||||
return;
|
||||
|
||||
unsigned const family = Hw::Vendor::get_family();
|
||||
|
||||
/*
|
||||
* In family 0Fh and 11h, lfence is always dispatch serializing and
|
||||
* "AMD plans support for this MSR and access to this bit for all future
|
||||
* processors." from family 14h on.
|
||||
*/
|
||||
if ((family == 0x10) || (family == 0x12) || (family >= 0x14)) {
|
||||
Cpu::Amd_lfence::access_t amd_lfence = Cpu::Amd_lfence::read();
|
||||
Cpu::Amd_lfence::Enable_dispatch_serializing::set(amd_lfence);
|
||||
Cpu::Amd_lfence::write(amd_lfence);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Bootstrap::Platform::Board::Board()
|
||||
:
|
||||
core_mmio(Memory_region { 0, 0x1000 },
|
||||
@@ -354,21 +250,6 @@ Bootstrap::Platform::Board::Board()
|
||||
cpus = !cpus ? 1 : max_cpus;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable serializing lfence on supported AMD processors
|
||||
*
|
||||
* For APs this will be set up later, but we need it already to obtain
|
||||
* the most acurate results when calibrating the TSC frequency.
|
||||
*/
|
||||
amd_enable_serializing_lfence();
|
||||
|
||||
auto r = calibrate_lapic_frequency(info.acpi_fadt);
|
||||
info.lapic_freq_khz = r.freq_khz;
|
||||
info.lapic_div = r.div;
|
||||
info.tsc_freq_khz = calibrate_tsc_frequency(info.acpi_fadt);
|
||||
|
||||
disable_pit();
|
||||
|
||||
/* copy 16 bit boot code for AP CPUs and for ACPI resume */
|
||||
addr_t ap_code_size = (addr_t)&_start - (addr_t)&_ap;
|
||||
memcpy((void *)AP_BOOT_CODE_PAGE, &_ap, ap_code_size);
|
||||
@@ -438,12 +319,9 @@ unsigned Bootstrap::Platform::enable_mmu()
|
||||
if (board.cpus <= 1)
|
||||
return (unsigned)cpu_id;
|
||||
|
||||
if (!Cpu::IA32_apic_base::Bsp::get(lapic_msr)) {
|
||||
if (!Cpu::IA32_apic_base::Bsp::get(lapic_msr))
|
||||
/* AP - done */
|
||||
/* enable serializing lfence on supported AMD processors. */
|
||||
amd_enable_serializing_lfence();
|
||||
return (unsigned)cpu_id;
|
||||
}
|
||||
|
||||
/* BSP - we're primary CPU - wake now all other CPUs */
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
/* base-hw core includes */
|
||||
#include <spec/x86_64/pic.h>
|
||||
#include <spec/x86_64/timer.h>
|
||||
#include <spec/x86_64/pit.h>
|
||||
#include <spec/x86_64/cpu.h>
|
||||
|
||||
namespace Board {
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
/*
|
||||
* \brief Guest memory abstraction
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-11-25
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__GUEST_MEMORY_H_
|
||||
#define _CORE__GUEST_MEMORY_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/allocator.h>
|
||||
#include <base/allocator_avl.h>
|
||||
#include <vm_session/vm_session.h>
|
||||
#include <dataspace/capability.h>
|
||||
|
||||
/* core includes */
|
||||
#include <dataspace_component.h>
|
||||
#include <region_map_component.h>
|
||||
|
||||
namespace Core { class Guest_memory; }
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
class Core::Guest_memory
|
||||
{
|
||||
private:
|
||||
|
||||
using Avl_region = Allocator_avl_tpl<Rm_region>;
|
||||
|
||||
using Attach_attr = Genode::Vm_session::Attach_attr;
|
||||
|
||||
Sliced_heap _sliced_heap;
|
||||
Avl_region _map { &_sliced_heap };
|
||||
|
||||
uint8_t _remaining_print_count { 10 };
|
||||
|
||||
void _with_region(addr_t const addr, auto const &fn)
|
||||
{
|
||||
Rm_region *region = _map.metadata((void *)addr);
|
||||
if (region)
|
||||
fn(*region);
|
||||
else
|
||||
if (_remaining_print_count) {
|
||||
error(__PRETTY_FUNCTION__, " unknown region");
|
||||
_remaining_print_count--;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
enum class Attach_result {
|
||||
OK,
|
||||
INVALID_DS,
|
||||
OUT_OF_RAM,
|
||||
OUT_OF_CAPS,
|
||||
REGION_CONFLICT,
|
||||
};
|
||||
|
||||
|
||||
Attach_result attach(Region_map_detach &rm_detach,
|
||||
Dataspace_component &dsc,
|
||||
addr_t const guest_phys,
|
||||
Attach_attr attr,
|
||||
auto const &map_fn)
|
||||
{
|
||||
/*
|
||||
* unsupported - deny otherwise arbitrary physical
|
||||
* memory can be mapped to a VM
|
||||
*/
|
||||
if (dsc.managed())
|
||||
return Attach_result::INVALID_DS;
|
||||
|
||||
if (guest_phys & 0xffful || attr.offset & 0xffful ||
|
||||
attr.size & 0xffful)
|
||||
return Attach_result::INVALID_DS;
|
||||
|
||||
if (!attr.size) {
|
||||
attr.size = dsc.size();
|
||||
|
||||
if (attr.offset < attr.size)
|
||||
attr.size -= attr.offset;
|
||||
}
|
||||
|
||||
if (attr.size > dsc.size())
|
||||
attr.size = dsc.size();
|
||||
|
||||
if (attr.offset >= dsc.size() ||
|
||||
attr.offset > dsc.size() - attr.size)
|
||||
return Attach_result::INVALID_DS;
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
Attach_result const retval = _map.alloc_addr(attr.size, guest_phys).convert<Attach_result>(
|
||||
|
||||
[&] (void *) {
|
||||
|
||||
Rm_region::Attr const region_attr
|
||||
{
|
||||
.base = guest_phys,
|
||||
.size = attr.size,
|
||||
.write = dsc.writeable() && attr.writeable,
|
||||
.exec = attr.executable,
|
||||
.off = attr.offset,
|
||||
.dma = false,
|
||||
};
|
||||
|
||||
/* store attachment info in meta data */
|
||||
try {
|
||||
_map.construct_metadata((void *)guest_phys,
|
||||
dsc, rm_detach, region_attr);
|
||||
|
||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||
if (_remaining_print_count) {
|
||||
error("failed to store attachment info");
|
||||
_remaining_print_count--;
|
||||
}
|
||||
return Attach_result::INVALID_DS;
|
||||
}
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
|
||||
return Attach_result::OK;
|
||||
},
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
|
||||
switch (error) {
|
||||
|
||||
case Alloc_error::OUT_OF_RAM:
|
||||
return Attach_result::OUT_OF_RAM;
|
||||
case Alloc_error::OUT_OF_CAPS:
|
||||
return Attach_result::OUT_OF_CAPS;
|
||||
case Alloc_error::DENIED:
|
||||
{
|
||||
/*
|
||||
* Handle attach after partial detach
|
||||
*/
|
||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||
if (!region_ptr)
|
||||
return Attach_result::REGION_CONFLICT;
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
|
||||
bool conflict = false;
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
(void)dataspace;
|
||||
if (!(dsc.cap() == dataspace.cap()))
|
||||
conflict = true;
|
||||
});
|
||||
if (conflict)
|
||||
return Attach_result::REGION_CONFLICT;
|
||||
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
return Attach_result::REGION_CONFLICT;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
return Attach_result::OK;
|
||||
}
|
||||
);
|
||||
|
||||
if (retval == Attach_result::OK) {
|
||||
addr_t phys_addr = dsc.phys_addr() + attr.offset;
|
||||
size_t size = attr.size;
|
||||
|
||||
map_fn(guest_phys, phys_addr, size);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
void detach(addr_t guest_phys,
|
||||
size_t size,
|
||||
auto const &unmap_fn)
|
||||
{
|
||||
if (!size || (guest_phys & 0xffful) || (size & 0xffful)) {
|
||||
if (_remaining_print_count) {
|
||||
warning("vm_session: skipping invalid memory detach addr=",
|
||||
(void *)guest_phys, " size=", (void *)size);
|
||||
_remaining_print_count--;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
addr_t const guest_phys_end = guest_phys + (size - 1);
|
||||
addr_t addr = guest_phys;
|
||||
do {
|
||||
Rm_region *region = _map.metadata((void *)addr);
|
||||
|
||||
/* walk region holes page-by-page */
|
||||
size_t iteration_size = 0x1000;
|
||||
|
||||
if (region) {
|
||||
iteration_size = region->size();
|
||||
detach_at(region->base(), unmap_fn);
|
||||
}
|
||||
|
||||
if (addr >= guest_phys_end - (iteration_size - 1))
|
||||
break;
|
||||
|
||||
addr += iteration_size;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
|
||||
Guest_memory(Constrained_ram_allocator &constrained_md_ram_alloc,
|
||||
Region_map ®ion_map)
|
||||
:
|
||||
_sliced_heap(constrained_md_ram_alloc, region_map)
|
||||
{
|
||||
/* configure managed VM area */
|
||||
_map.add_range(0UL, ~0UL);
|
||||
}
|
||||
|
||||
~Guest_memory()
|
||||
{
|
||||
/* detach all regions */
|
||||
while (true) {
|
||||
addr_t out_addr = 0;
|
||||
|
||||
if (!_map.any_block_addr(&out_addr))
|
||||
break;
|
||||
|
||||
detach_at(out_addr, [](addr_t, size_t) { });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void detach_at(addr_t addr,
|
||||
auto const &unmap_fn)
|
||||
{
|
||||
_with_region(addr, [&] (Rm_region ®ion) {
|
||||
|
||||
if (!region.reserved())
|
||||
reserve_and_flush(addr, unmap_fn);
|
||||
|
||||
/* free the reserved region */
|
||||
_map.free(reinterpret_cast<void *>(region.base()));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void reserve_and_flush(addr_t addr,
|
||||
auto const &unmap_fn)
|
||||
{
|
||||
_with_region(addr, [&] (Rm_region ®ion) {
|
||||
|
||||
/* inform dataspace */
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
dataspace.detached_from(region);
|
||||
});
|
||||
|
||||
region.mark_as_reserved();
|
||||
|
||||
unmap_fn(region.base(), region.size());
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__GUEST_MEMORY_H_ */
|
||||
@@ -21,7 +21,5 @@ using namespace Core;
|
||||
void Io_mem_session_component::_unmap_local(addr_t, size_t, addr_t) { }
|
||||
|
||||
|
||||
Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(addr_t const base, size_t)
|
||||
{
|
||||
return { .core_local_addr = base, .success = true };
|
||||
}
|
||||
addr_t Io_mem_session_component::_map_local(addr_t base, size_t) { return base; }
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
/* core includes */
|
||||
#include <kernel/irq.h>
|
||||
#include <irq_root.h>
|
||||
#include <platform.h>
|
||||
#include <core_env.h>
|
||||
|
||||
/* base-internal includes */
|
||||
#include <base/internal/capability_space.h>
|
||||
|
||||
@@ -66,7 +66,6 @@ namespace Kernel {
|
||||
constexpr Call_arg call_id_set_cpu_state() { return 125; }
|
||||
constexpr Call_arg call_id_exception_state() { return 126; }
|
||||
constexpr Call_arg call_id_single_step() { return 127; }
|
||||
constexpr Call_arg call_id_ack_pager_signal() { return 128; }
|
||||
|
||||
/**
|
||||
* Invalidate TLB entries for the `pd` in region `addr`, `sz`
|
||||
@@ -138,9 +137,10 @@ namespace Kernel {
|
||||
* \retval 0 suceeded
|
||||
* \retval !=0 failed
|
||||
*/
|
||||
inline int start_thread(Thread & thread, Pd & pd, Native_utcb & utcb)
|
||||
inline int start_thread(Thread & thread, unsigned const cpu_id,
|
||||
Pd & pd, Native_utcb & utcb)
|
||||
{
|
||||
return (int)call(call_id_start_thread(), (Call_arg)&thread,
|
||||
return (int)call(call_id_start_thread(), (Call_arg)&thread, cpu_id,
|
||||
(Call_arg)&pd, (Call_arg)&utcb);
|
||||
}
|
||||
|
||||
@@ -148,16 +148,13 @@ namespace Kernel {
|
||||
/**
|
||||
* Set or unset the handler of an event that can be triggered by a thread
|
||||
*
|
||||
* \param thread reference to thread kernel object
|
||||
* \param pager reference to pager kernel object
|
||||
* \param thread pointer to thread kernel object
|
||||
* \param signal_context_id capability id of the page-fault handler
|
||||
*/
|
||||
inline void thread_pager(Thread &thread,
|
||||
Thread &pager,
|
||||
inline void thread_pager(Thread & thread,
|
||||
capid_t const signal_context_id)
|
||||
{
|
||||
call(call_id_thread_pager(), (Call_arg)&thread, (Call_arg)&pager,
|
||||
signal_context_id);
|
||||
call(call_id_thread_pager(), (Call_arg)&thread, signal_context_id);
|
||||
}
|
||||
|
||||
|
||||
@@ -206,18 +203,6 @@ namespace Kernel {
|
||||
{
|
||||
call(call_id_single_step(), (Call_arg)&thread, (Call_arg)&on);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acknowledge a signal transmitted to a pager
|
||||
*
|
||||
* \param context signal context to acknowledge
|
||||
* \param thread reference to faulting thread kernel object
|
||||
* \param resolved whether fault got resolved
|
||||
*/
|
||||
inline void ack_pager_signal(capid_t const context, Thread &thread, bool resolved)
|
||||
{
|
||||
call(call_id_ack_pager_signal(), context, (Call_arg)&thread, resolved);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _CORE__KERNEL__CORE_INTERFACE_H_ */
|
||||
|
||||
@@ -27,35 +27,35 @@
|
||||
using namespace Kernel;
|
||||
|
||||
|
||||
/*****************
|
||||
** Cpu_context **
|
||||
*****************/
|
||||
/*************
|
||||
** Cpu_job **
|
||||
*************/
|
||||
|
||||
void Cpu_context::_activate() { _cpu().schedule(*this); }
|
||||
void Cpu_job::_activate_own_share() { _cpu->schedule(this); }
|
||||
|
||||
|
||||
void Cpu_context::_deactivate()
|
||||
void Cpu_job::_deactivate_own_share()
|
||||
{
|
||||
assert(_cpu().id() == Cpu::executing_id());
|
||||
_cpu().scheduler().unready(*this);
|
||||
assert(_cpu->id() == Cpu::executing_id());
|
||||
_cpu->scheduler().unready(*this);
|
||||
}
|
||||
|
||||
|
||||
void Cpu_context::_yield()
|
||||
void Cpu_job::_yield()
|
||||
{
|
||||
assert(_cpu().id() == Cpu::executing_id());
|
||||
_cpu().scheduler().yield();
|
||||
assert(_cpu->id() == Cpu::executing_id());
|
||||
_cpu->scheduler().yield();
|
||||
}
|
||||
|
||||
|
||||
void Cpu_context::_interrupt(Irq::Pool &user_irq_pool)
|
||||
void Cpu_job::_interrupt(Irq::Pool &user_irq_pool, unsigned const /* cpu_id */)
|
||||
{
|
||||
/* let the IRQ controller take a pending IRQ for handling, if any */
|
||||
unsigned irq_id;
|
||||
if (_cpu().pic().take_request(irq_id))
|
||||
if (_cpu->pic().take_request(irq_id))
|
||||
|
||||
/* let the CPU of this context handle the IRQ if it is a CPU-local one */
|
||||
if (!_cpu().handle_if_cpu_local_interrupt(irq_id)) {
|
||||
/* let the CPU of this job handle the IRQ if it is a CPU-local one */
|
||||
if (!_cpu->handle_if_cpu_local_interrupt(irq_id)) {
|
||||
|
||||
/* it isn't a CPU-local IRQ, so, it must be a user IRQ */
|
||||
User_irq * irq = User_irq::object(user_irq_pool, irq_id);
|
||||
@@ -64,37 +64,38 @@ void Cpu_context::_interrupt(Irq::Pool &user_irq_pool)
|
||||
}
|
||||
|
||||
/* let the IRQ controller finish the currently taken IRQ */
|
||||
_cpu().pic().finish_request();
|
||||
_cpu->pic().finish_request();
|
||||
}
|
||||
|
||||
|
||||
void Cpu_context::affinity(Cpu &cpu)
|
||||
void Cpu_job::affinity(Cpu &cpu)
|
||||
{
|
||||
_cpu().scheduler().remove(*this);
|
||||
_cpu_ptr = &cpu;
|
||||
_cpu().scheduler().insert(*this);
|
||||
_cpu = &cpu;
|
||||
_cpu->scheduler().insert(*this);
|
||||
}
|
||||
|
||||
|
||||
void Cpu_context::quota(unsigned const q)
|
||||
void Cpu_job::quota(unsigned const q)
|
||||
{
|
||||
_cpu().scheduler().quota(*this, q);
|
||||
if (_cpu)
|
||||
_cpu->scheduler().quota(*this, q);
|
||||
else
|
||||
Context::quota(q);
|
||||
}
|
||||
|
||||
|
||||
Cpu_context::Cpu_context(Cpu &cpu,
|
||||
Priority const priority,
|
||||
unsigned const quota)
|
||||
Cpu_job::Cpu_job(Priority const p, unsigned const q)
|
||||
:
|
||||
Context(priority, quota), _cpu_ptr(&cpu)
|
||||
{
|
||||
_cpu().scheduler().insert(*this);
|
||||
}
|
||||
Context(p, q), _cpu(0)
|
||||
{ }
|
||||
|
||||
|
||||
Cpu_context::~Cpu_context()
|
||||
Cpu_job::~Cpu_job()
|
||||
{
|
||||
_cpu().scheduler().remove(*this);
|
||||
if (!_cpu)
|
||||
return;
|
||||
|
||||
_cpu->scheduler().remove(*this);
|
||||
}
|
||||
|
||||
|
||||
@@ -111,17 +112,19 @@ Cpu::Idle_thread::Idle_thread(Board::Address_space_id_allocator &addr_space_id_a
|
||||
Cpu &cpu,
|
||||
Pd &core_pd)
|
||||
:
|
||||
Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, cpu,
|
||||
core_pd, Priority::min(), 0, "idle", Thread::IDLE }
|
||||
Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd,
|
||||
Priority::min(), 0, "idle", Thread::IDLE }
|
||||
{
|
||||
regs->ip = (addr_t)&idle_thread_main;
|
||||
|
||||
affinity(cpu);
|
||||
Thread::_pd = &core_pd;
|
||||
}
|
||||
|
||||
|
||||
void Cpu::schedule(Context &context)
|
||||
void Cpu::schedule(Job * const job)
|
||||
{
|
||||
_scheduler.ready(static_cast<Scheduler::Context&>(context));
|
||||
_scheduler.ready(job->context());
|
||||
if (_id != executing_id() && _scheduler.need_to_schedule())
|
||||
trigger_ip_interrupt();
|
||||
}
|
||||
@@ -139,34 +142,33 @@ bool Cpu::handle_if_cpu_local_interrupt(unsigned const irq_id)
|
||||
}
|
||||
|
||||
|
||||
Cpu::Context & Cpu::handle_exception_and_schedule()
|
||||
Cpu_job & Cpu::schedule()
|
||||
{
|
||||
Context &context = current_context();
|
||||
context.exception();
|
||||
/* update scheduler */
|
||||
Job & old_job = scheduled_job();
|
||||
old_job.exception(*this);
|
||||
|
||||
if (_state == SUSPEND || _state == HALT)
|
||||
return _halt_job;
|
||||
|
||||
/* update schedule if necessary */
|
||||
if (_scheduler.need_to_schedule()) {
|
||||
_timer.process_timeouts();
|
||||
_scheduler.update(_timer.time());
|
||||
time_t t = _scheduler.current_time_left();
|
||||
_timer.set_timeout(&_timeout, t);
|
||||
time_t duration = _timer.schedule_timeout();
|
||||
context.update_execution_time(duration);
|
||||
old_job.update_execution_time(duration);
|
||||
}
|
||||
|
||||
/* return current context */
|
||||
return current_context();
|
||||
/* return new job */
|
||||
return scheduled_job();
|
||||
}
|
||||
|
||||
|
||||
addr_t Cpu::stack_start()
|
||||
{
|
||||
return Abi::stack_align(Hw::Mm::cpu_local_memory().base +
|
||||
(Hw::Mm::CPU_LOCAL_MEMORY_SLOT_SIZE*_id)
|
||||
+ Hw::Mm::KERNEL_STACK_SIZE);
|
||||
(1024*1024*_id) + (64*1024));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -39,12 +39,10 @@ namespace Kernel {
|
||||
class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
|
||||
public Genode::List<Cpu>::Element
|
||||
{
|
||||
public:
|
||||
|
||||
using Context = Cpu_context;
|
||||
|
||||
private:
|
||||
|
||||
using Job = Cpu_job;
|
||||
|
||||
/**
|
||||
* Inter-processor-interrupt object of the cpu
|
||||
*/
|
||||
@@ -85,14 +83,16 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
|
||||
Pd &core_pd);
|
||||
};
|
||||
|
||||
struct Halt_job : Cpu_context
|
||||
struct Halt_job : Job
|
||||
{
|
||||
Halt_job(Cpu &cpu)
|
||||
: Cpu_context(cpu, 0, 0) { }
|
||||
Halt_job() : Job (0, 0) { }
|
||||
|
||||
void exception() override { }
|
||||
void proceed() override;
|
||||
} _halt_job { *this };
|
||||
void exception(Kernel::Cpu &) override { }
|
||||
|
||||
void proceed(Kernel::Cpu &) override;
|
||||
|
||||
Kernel::Cpu_job* helping_destination() override { return this; }
|
||||
} _halt_job { };
|
||||
|
||||
enum State { RUN, HALT, SUSPEND };
|
||||
|
||||
@@ -143,14 +143,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
|
||||
bool handle_if_cpu_local_interrupt(unsigned const irq_id);
|
||||
|
||||
/**
|
||||
* Schedule 'context' at this CPU
|
||||
* Schedule 'job' at this CPU
|
||||
*/
|
||||
void schedule(Context& context);
|
||||
void schedule(Job * const job);
|
||||
|
||||
/**
|
||||
* Return the context that should be executed next
|
||||
* Return the job that should be executed at next
|
||||
*/
|
||||
Context& handle_exception_and_schedule();
|
||||
Cpu_job& schedule();
|
||||
|
||||
Board::Pic & pic() { return _pic; }
|
||||
Timer & timer() { return _timer; }
|
||||
@@ -158,10 +158,10 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
|
||||
addr_t stack_start();
|
||||
|
||||
/**
|
||||
* Returns the currently scheduled context
|
||||
* Returns the currently active job
|
||||
*/
|
||||
Context & current_context() {
|
||||
return static_cast<Context&>(_scheduler.current().helping_destination()); }
|
||||
Job & scheduled_job() {
|
||||
return *static_cast<Job *>(&_scheduler.current())->helping_destination(); }
|
||||
|
||||
unsigned id() const { return _id; }
|
||||
Scheduler &scheduler() { return _scheduler; }
|
||||
|
||||
@@ -22,39 +22,46 @@
|
||||
namespace Kernel {
|
||||
|
||||
class Cpu;
|
||||
class Cpu_context;
|
||||
|
||||
/**
|
||||
* Context of a job (thread, VM, idle) that shall be executed by a CPU
|
||||
*/
|
||||
class Cpu_job;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Context (thread, vcpu) that shall be executed by a CPU
|
||||
*/
|
||||
class Kernel::Cpu_context : private Scheduler::Context
|
||||
class Kernel::Cpu_job : private Scheduler::Context
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Cpu;
|
||||
friend class Cpu; /* static_cast from 'Scheduler::Context' to 'Cpu_job' */
|
||||
|
||||
time_t _execution_time { 0 };
|
||||
Cpu *_cpu_ptr;
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Cpu_context(Cpu_context const &);
|
||||
Cpu_context &operator = (Cpu_context const &);
|
||||
Cpu_job(Cpu_job const &);
|
||||
Cpu_job &operator = (Cpu_job const &);
|
||||
|
||||
protected:
|
||||
|
||||
Cpu &_cpu() const { return *_cpu_ptr; }
|
||||
Cpu * _cpu;
|
||||
|
||||
/**
|
||||
* Handle interrupt exception
|
||||
* Handle interrupt exception that occured during execution on CPU 'id'
|
||||
*/
|
||||
void _interrupt(Irq::Pool &user_irq_pool);
|
||||
void _interrupt(Irq::Pool &user_irq_pool, unsigned const id);
|
||||
|
||||
void _activate();
|
||||
void _deactivate();
|
||||
/**
|
||||
* Activate our own CPU-share
|
||||
*/
|
||||
void _activate_own_share();
|
||||
|
||||
/**
|
||||
* Deactivate our own CPU-share
|
||||
*/
|
||||
void _deactivate_own_share();
|
||||
|
||||
/**
|
||||
* Yield the currently scheduled CPU share of this context
|
||||
@@ -62,37 +69,55 @@ class Kernel::Cpu_context : private Scheduler::Context
|
||||
void _yield();
|
||||
|
||||
/**
|
||||
* Return possibility to help context 'j' scheduling-wise
|
||||
* Return wether we are allowed to help job 'j' with our CPU-share
|
||||
*/
|
||||
bool _helping_possible(Cpu_context const &j) const {
|
||||
return j._cpu_ptr == _cpu_ptr; }
|
||||
|
||||
void _help(Cpu_context &context) { Context::help(context); }
|
||||
|
||||
using Context::ready;
|
||||
using Context::helping_finished;
|
||||
bool _helping_possible(Cpu_job const &j) const { return j._cpu == _cpu; }
|
||||
|
||||
public:
|
||||
|
||||
using Context = Scheduler::Context;
|
||||
using Priority = Scheduler::Priority;
|
||||
|
||||
Cpu_context(Cpu &cpu,
|
||||
Priority const priority,
|
||||
unsigned const quota);
|
||||
|
||||
virtual ~Cpu_context();
|
||||
/**
|
||||
* Handle exception that occured during execution on CPU 'id'
|
||||
*/
|
||||
virtual void exception(Cpu & cpu) = 0;
|
||||
|
||||
/**
|
||||
* Link context to CPU 'cpu'
|
||||
* Continue execution on CPU 'id'
|
||||
*/
|
||||
virtual void proceed(Cpu & cpu) = 0;
|
||||
|
||||
/**
|
||||
* Return which job currently uses our CPU-share
|
||||
*/
|
||||
virtual Cpu_job * helping_destination() = 0;
|
||||
|
||||
/**
|
||||
* Construct a job with scheduling priority 'p' and time quota 'q'
|
||||
*/
|
||||
Cpu_job(Priority const p, unsigned const q);
|
||||
|
||||
/**
|
||||
* Destructor
|
||||
*/
|
||||
virtual ~Cpu_job();
|
||||
|
||||
/**
|
||||
* Link job to CPU 'cpu'
|
||||
*/
|
||||
void affinity(Cpu &cpu);
|
||||
|
||||
/**
|
||||
* Set CPU quota of the context to 'q'
|
||||
* Set CPU quota of the job to 'q'
|
||||
*/
|
||||
void quota(unsigned const q);
|
||||
|
||||
/**
|
||||
* Return wether our CPU-share is currently active
|
||||
*/
|
||||
bool own_share_active() { return Context::ready(); }
|
||||
|
||||
/**
|
||||
* Update total execution time
|
||||
*/
|
||||
@@ -103,15 +128,14 @@ class Kernel::Cpu_context : private Scheduler::Context
|
||||
*/
|
||||
time_t execution_time() const { return _execution_time; }
|
||||
|
||||
/**
|
||||
* Handle exception that occured during execution of this context
|
||||
*/
|
||||
virtual void exception() = 0;
|
||||
|
||||
/**
|
||||
* Continue execution of this context
|
||||
*/
|
||||
virtual void proceed() = 0;
|
||||
/***************
|
||||
** Accessors **
|
||||
***************/
|
||||
|
||||
void cpu(Cpu &cpu) { _cpu = &cpu; }
|
||||
|
||||
Context &context() { return *this; }
|
||||
};
|
||||
|
||||
#endif /* _CORE__KERNEL__CPU_CONTEXT_H_ */
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__KERNEL__INTER_PROCESSOR_WORK_H_
|
||||
#define _CORE__KERNEL__INTER_PROCESSOR_WORK_H_
|
||||
#ifndef _CORE__KERNEL__SMP_H_
|
||||
#define _CORE__KERNEL__SMP_H_
|
||||
|
||||
#include <util/interface.h>
|
||||
|
||||
@@ -32,11 +32,11 @@ class Kernel::Inter_processor_work : Genode::Interface
|
||||
{
|
||||
public:
|
||||
|
||||
virtual void execute(Cpu & cpu) = 0;
|
||||
virtual void execute(Cpu &) = 0;
|
||||
|
||||
protected:
|
||||
|
||||
Genode::List_element<Inter_processor_work> _le { this };
|
||||
};
|
||||
|
||||
#endif /* _CORE__KERNEL__INTER_PROCESSOR_WORK_H_ */
|
||||
#endif /* _CORE__KERNEL__SMP_H_ */
|
||||
|
||||
@@ -57,13 +57,19 @@ void Ipc_node::_cancel_send()
|
||||
}
|
||||
|
||||
|
||||
bool Ipc_node::_helping() const
|
||||
{
|
||||
return _out.state == Out::SEND_HELPING && _out.node;
|
||||
}
|
||||
|
||||
|
||||
bool Ipc_node::ready_to_send() const
|
||||
{
|
||||
return _out.state == Out::READY && !_in.waiting();
|
||||
}
|
||||
|
||||
|
||||
void Ipc_node::send(Ipc_node &node)
|
||||
void Ipc_node::send(Ipc_node &node, bool help)
|
||||
{
|
||||
node._in.queue.enqueue(_queue_item);
|
||||
|
||||
@@ -72,7 +78,13 @@ void Ipc_node::send(Ipc_node &node)
|
||||
node._thread.ipc_await_request_succeeded();
|
||||
}
|
||||
_out.node = &node;
|
||||
_out.state = Out::SEND;
|
||||
_out.state = help ? Out::SEND_HELPING : Out::SEND;
|
||||
}
|
||||
|
||||
|
||||
Thread &Ipc_node::helping_destination()
|
||||
{
|
||||
return _helping() ? _out.node->helping_destination() : _thread;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -50,14 +50,14 @@ class Kernel::Ipc_node
|
||||
|
||||
struct Out
|
||||
{
|
||||
enum State { READY, SEND, DESTRUCT };
|
||||
enum State { READY, SEND, SEND_HELPING, DESTRUCT };
|
||||
|
||||
State state { READY };
|
||||
Ipc_node *node { nullptr };
|
||||
|
||||
bool sending() const
|
||||
{
|
||||
return state == SEND;
|
||||
return state == SEND_HELPING || state == SEND;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -76,6 +76,11 @@ class Kernel::Ipc_node
|
||||
*/
|
||||
void _cancel_send();
|
||||
|
||||
/**
|
||||
* Return wether this IPC node is helping another one
|
||||
*/
|
||||
bool _helping() const;
|
||||
|
||||
/**
|
||||
* Noncopyable
|
||||
*/
|
||||
@@ -97,8 +102,28 @@ class Kernel::Ipc_node
|
||||
* Send a message and wait for the according reply
|
||||
*
|
||||
* \param node targeted IPC node
|
||||
* \param help wether the request implies a helping relationship
|
||||
*/
|
||||
void send(Ipc_node &node);
|
||||
void send(Ipc_node &node, bool help);
|
||||
|
||||
/**
|
||||
* Return final destination of the helping-chain
|
||||
* this IPC node is part of, or its own thread otherwise
|
||||
*/
|
||||
Thread &helping_destination();
|
||||
|
||||
/**
|
||||
* Call 'fn' of type 'void (Ipc_node *)' for each helper
|
||||
*/
|
||||
void for_each_helper(auto const &fn)
|
||||
{
|
||||
_in.queue.for_each([fn] (Queue_item &item) {
|
||||
Ipc_node &node { item.object() };
|
||||
|
||||
if (node._helping())
|
||||
fn(node._thread);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether this IPC node is ready to wait for messages
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include <util/avl_tree.h>
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
|
||||
namespace Board {
|
||||
|
||||
@@ -161,7 +161,9 @@ class Kernel::User_irq : public Kernel::Irq
|
||||
*/
|
||||
void occurred() override
|
||||
{
|
||||
_context.submit(1);
|
||||
if (_context.can_submit(1)) {
|
||||
_context.submit(1);
|
||||
}
|
||||
disable();
|
||||
}
|
||||
|
||||
|
||||
@@ -63,16 +63,16 @@ Kernel::Main *Kernel::Main::_instance;
|
||||
|
||||
void Kernel::Main::_handle_kernel_entry()
|
||||
{
|
||||
Cpu::Context * context;
|
||||
Cpu &cpu = _cpu_pool.cpu(Cpu::executing_id());
|
||||
Cpu_job * new_job;
|
||||
|
||||
{
|
||||
Lock::Guard guard(_data_lock);
|
||||
|
||||
context =
|
||||
&_cpu_pool.cpu(Cpu::executing_id()).handle_exception_and_schedule();
|
||||
new_job = &cpu.schedule();
|
||||
}
|
||||
|
||||
context->proceed();
|
||||
new_job->proceed(cpu);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -19,38 +19,6 @@
|
||||
using namespace Kernel;
|
||||
|
||||
|
||||
void Scheduler::Context::help(Scheduler::Context &c)
|
||||
{
|
||||
_destination = &c;
|
||||
c._helper_list.insert(&_helper_le);
|
||||
}
|
||||
|
||||
|
||||
void Scheduler::Context::helping_finished()
|
||||
{
|
||||
if (!_destination)
|
||||
return;
|
||||
|
||||
_destination->_helper_list.remove(&_helper_le);
|
||||
_destination = nullptr;
|
||||
}
|
||||
|
||||
|
||||
Scheduler::Context& Scheduler::Context::helping_destination()
|
||||
{
|
||||
return (_destination) ? _destination->helping_destination() : *this;
|
||||
}
|
||||
|
||||
|
||||
Scheduler::Context::~Context()
|
||||
{
|
||||
helping_finished();
|
||||
|
||||
for (Context::List_element *h = _helper_list.first(); h; h = h->next())
|
||||
h->object()->helping_finished();
|
||||
}
|
||||
|
||||
|
||||
void Scheduler::_consumed(unsigned const time)
|
||||
{
|
||||
if (_super_period_left > time) {
|
||||
@@ -181,10 +149,7 @@ void Scheduler::update(time_t time)
|
||||
|
||||
void Scheduler::ready(Context &c)
|
||||
{
|
||||
assert(&c != &_idle);
|
||||
|
||||
if (c.ready())
|
||||
return;
|
||||
assert(!c.ready() && &c != &_idle);
|
||||
|
||||
c._ready = true;
|
||||
|
||||
@@ -205,33 +170,23 @@ void Scheduler::ready(Context &c)
|
||||
_slack_list.insert_head(&c._slack_le);
|
||||
|
||||
if (!keep_current && _state == UP_TO_DATE) _state = OUT_OF_DATE;
|
||||
|
||||
for (Context::List_element *helper = c._helper_list.first();
|
||||
helper; helper = helper->next())
|
||||
if (!helper->object()->ready()) ready(*helper->object());
|
||||
}
|
||||
|
||||
|
||||
void Scheduler::unready(Context &c)
|
||||
{
|
||||
assert(&c != &_idle);
|
||||
|
||||
if (!c.ready())
|
||||
return;
|
||||
assert(c.ready() && &c != &_idle);
|
||||
|
||||
if (&c == _current && _state == UP_TO_DATE) _state = OUT_OF_DATE;
|
||||
|
||||
c._ready = false;
|
||||
_slack_list.remove(&c._slack_le);
|
||||
|
||||
if (c._quota) {
|
||||
_rpl[c._priority].remove(&c._priotized_le);
|
||||
_upl[c._priority].insert_tail(&c._priotized_le);
|
||||
}
|
||||
if (!c._quota)
|
||||
return;
|
||||
|
||||
for (Context::List_element *helper = c._helper_list.first();
|
||||
helper; helper = helper->next())
|
||||
if (helper->object()->ready()) unready(*helper->object());
|
||||
_rpl[c._priority].remove(&c._priotized_le);
|
||||
_upl[c._priority].insert_tail(&c._priotized_le);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -65,7 +65,6 @@ class Kernel::Scheduler
|
||||
friend class Scheduler_test::Context;
|
||||
|
||||
using List_element = Genode::List_element<Context>;
|
||||
using List = Genode::List<List_element>;
|
||||
|
||||
unsigned _priority;
|
||||
unsigned _quota;
|
||||
@@ -75,20 +74,10 @@ class Kernel::Scheduler
|
||||
List_element _slack_le { this };
|
||||
unsigned _slack_time_left { 0 };
|
||||
|
||||
List_element _helper_le { this };
|
||||
List _helper_list {};
|
||||
Context *_destination { nullptr };
|
||||
|
||||
bool _ready { false };
|
||||
|
||||
void _reset() { _priotized_time_left = _quota; }
|
||||
|
||||
/**
|
||||
* Noncopyable
|
||||
*/
|
||||
Context(const Context&) = delete;
|
||||
Context& operator=(const Context&) = delete;
|
||||
|
||||
public:
|
||||
|
||||
Context(Priority const priority,
|
||||
@@ -96,14 +85,9 @@ class Kernel::Scheduler
|
||||
:
|
||||
_priority(priority.value),
|
||||
_quota(quota) { }
|
||||
~Context();
|
||||
|
||||
bool ready() const { return _ready; }
|
||||
void quota(unsigned const q) { _quota = q; }
|
||||
|
||||
void help(Context &c);
|
||||
void helping_finished();
|
||||
Context& helping_destination();
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
/*
|
||||
* \brief Kernel backend for asynchronous inter-process communication
|
||||
* \author Martin Stein
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2012-11-30
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2025 Genode Labs GmbH
|
||||
* Copyright (C) 2012-2019 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
#include <kernel/thread.h>
|
||||
|
||||
using namespace Kernel;
|
||||
@@ -27,7 +26,7 @@ void Signal_handler::cancel_waiting()
|
||||
{
|
||||
if (_receiver) {
|
||||
_receiver->_handler_cancelled(*this);
|
||||
_receiver = nullptr;
|
||||
_receiver = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,20 +71,28 @@ void Signal_context::_deliverable()
|
||||
void Signal_context::_delivered()
|
||||
{
|
||||
_submits = 0;
|
||||
_ack = false;
|
||||
_ack = 0;
|
||||
}
|
||||
|
||||
|
||||
void Signal_context::_killer_cancelled() { _killer = nullptr; }
|
||||
void Signal_context::_killer_cancelled() { _killer = 0; }
|
||||
|
||||
|
||||
bool Signal_context::can_submit(unsigned const n) const
|
||||
{
|
||||
if (_killed || _submits >= (unsigned)~0 - n)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Signal_context::submit(unsigned const n)
|
||||
{
|
||||
if (_killed)
|
||||
if (_killed || _submits >= (unsigned)~0 - n)
|
||||
return;
|
||||
|
||||
if (_submits < ((unsigned)~0 - n))
|
||||
_submits += n;
|
||||
_submits += n;
|
||||
|
||||
if (_ack)
|
||||
_deliverable();
|
||||
@@ -98,19 +105,32 @@ void Signal_context::ack()
|
||||
return;
|
||||
|
||||
if (!_killed) {
|
||||
_ack = true;
|
||||
_ack = 1;
|
||||
_deliverable();
|
||||
return;
|
||||
}
|
||||
|
||||
if (_killer) {
|
||||
_killer->_context = nullptr;
|
||||
_killer->_context = 0;
|
||||
_killer->_thread.signal_context_kill_done();
|
||||
_killer = nullptr;
|
||||
_killer = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Signal_context::can_kill() const
|
||||
{
|
||||
/* check if in a kill operation or already killed */
|
||||
if (_killed) {
|
||||
if (_ack)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Signal_context::kill(Signal_context_killer &k)
|
||||
{
|
||||
/* check if in a kill operation or already killed */
|
||||
@@ -119,13 +139,13 @@ void Signal_context::kill(Signal_context_killer &k)
|
||||
|
||||
/* kill directly if there is no unacknowledged delivery */
|
||||
if (_ack) {
|
||||
_killed = true;
|
||||
_killed = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* wait for delivery acknowledgement */
|
||||
_killer = &k;
|
||||
_killed = true;
|
||||
_killed = 1;
|
||||
_killer->_context = this;
|
||||
_killer->_thread.signal_context_kill_pending();
|
||||
}
|
||||
@@ -211,17 +231,24 @@ void Signal_receiver::_add_context(Signal_context &c) {
|
||||
_contexts.enqueue(c._contexts_fe); }
|
||||
|
||||
|
||||
|
||||
bool Signal_receiver::add_handler(Signal_handler &h)
|
||||
bool Signal_receiver::can_add_handler(Signal_handler const &h) const
|
||||
{
|
||||
if (h._receiver)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Signal_receiver::add_handler(Signal_handler &h)
|
||||
{
|
||||
if (h._receiver)
|
||||
return;
|
||||
|
||||
_handlers.enqueue(h._handlers_fe);
|
||||
h._receiver = this;
|
||||
h._thread.signal_wait_for_signal();
|
||||
_listen();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
/*
|
||||
* \brief Kernel backend for asynchronous inter-process communication
|
||||
* \author Martin Stein
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2012-11-30
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2025 Genode Labs GmbH
|
||||
* Copyright (C) 2012-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__KERNEL__SIGNAL_H_
|
||||
#define _CORE__KERNEL__SIGNAL_H_
|
||||
#ifndef _CORE__KERNEL__SIGNAL_RECEIVER_H_
|
||||
#define _CORE__KERNEL__SIGNAL_RECEIVER_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/signal.h>
|
||||
@@ -166,7 +165,11 @@ class Kernel::Signal_context
|
||||
* Submit the signal
|
||||
*
|
||||
* \param n number of submits
|
||||
*
|
||||
* \retval 0 succeeded
|
||||
* \retval -1 failed
|
||||
*/
|
||||
bool can_submit(unsigned const n) const;
|
||||
void submit(unsigned const n);
|
||||
|
||||
/**
|
||||
@@ -177,8 +180,12 @@ class Kernel::Signal_context
|
||||
/**
|
||||
* Destruct context or prepare to do it as soon as delivery is done
|
||||
*
|
||||
* \param k object that shall receive progress reports
|
||||
* \param killer object that shall receive progress reports
|
||||
*
|
||||
* \retval 0 succeeded
|
||||
* \retval -1 failed
|
||||
*/
|
||||
bool can_kill() const;
|
||||
void kill(Signal_context_killer &k);
|
||||
|
||||
/**
|
||||
@@ -263,7 +270,8 @@ class Kernel::Signal_receiver
|
||||
* \retval 0 succeeded
|
||||
* \retval -1 failed
|
||||
*/
|
||||
bool add_handler(Signal_handler &h);
|
||||
bool can_add_handler(Signal_handler const &h) const;
|
||||
void add_handler(Signal_handler &h);
|
||||
|
||||
/**
|
||||
* Syscall to create a signal receiver
|
||||
@@ -169,7 +169,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object<Thread> & to_delet
|
||||
:
|
||||
caller(caller), thread_to_destroy(to_delete)
|
||||
{
|
||||
thread_to_destroy->_cpu().work_list().insert(&_le);
|
||||
thread_to_destroy->_cpu->work_list().insert(&_le);
|
||||
caller._become_inactive(AWAITS_RESTART);
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object<Thread> & to_delet
|
||||
void
|
||||
Thread::Destroy::execute(Cpu &)
|
||||
{
|
||||
thread_to_destroy->_cpu().work_list().remove(&_le);
|
||||
thread_to_destroy->_cpu->work_list().remove(&_le);
|
||||
thread_to_destroy.destruct();
|
||||
caller._restart();
|
||||
}
|
||||
@@ -239,8 +239,7 @@ void Thread::ipc_send_request_succeeded()
|
||||
assert(_state == AWAITS_IPC);
|
||||
user_arg_0(0);
|
||||
_state = ACTIVE;
|
||||
_activate();
|
||||
helping_finished();
|
||||
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
|
||||
}
|
||||
|
||||
|
||||
@@ -249,8 +248,7 @@ void Thread::ipc_send_request_failed()
|
||||
assert(_state == AWAITS_IPC);
|
||||
user_arg_0(-1);
|
||||
_state = ACTIVE;
|
||||
_activate();
|
||||
helping_finished();
|
||||
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
|
||||
}
|
||||
|
||||
|
||||
@@ -270,16 +268,32 @@ void Thread::ipc_await_request_failed()
|
||||
}
|
||||
|
||||
|
||||
void Thread::_deactivate_used_shares()
|
||||
{
|
||||
Cpu_job::_deactivate_own_share();
|
||||
_ipc_node.for_each_helper([&] (Thread &thread) {
|
||||
thread._deactivate_used_shares(); });
|
||||
}
|
||||
|
||||
|
||||
void Thread::_activate_used_shares()
|
||||
{
|
||||
Cpu_job::_activate_own_share();
|
||||
_ipc_node.for_each_helper([&] (Thread &thread) {
|
||||
thread._activate_used_shares(); });
|
||||
}
|
||||
|
||||
|
||||
void Thread::_become_active()
|
||||
{
|
||||
if (_state != ACTIVE && !_paused) Cpu_context::_activate();
|
||||
if (_state != ACTIVE && !_paused) { _activate_used_shares(); }
|
||||
_state = ACTIVE;
|
||||
}
|
||||
|
||||
|
||||
void Thread::_become_inactive(State const s)
|
||||
{
|
||||
if (_state == ACTIVE && !_paused) Cpu_context::_deactivate();
|
||||
if (_state == ACTIVE && !_paused) { _deactivate_used_shares(); }
|
||||
_state = s;
|
||||
}
|
||||
|
||||
@@ -287,13 +301,17 @@ void Thread::_become_inactive(State const s)
|
||||
void Thread::_die() { _become_inactive(DEAD); }
|
||||
|
||||
|
||||
Cpu_job * Thread::helping_destination() {
|
||||
return &_ipc_node.helping_destination(); }
|
||||
|
||||
|
||||
size_t Thread::_core_to_kernel_quota(size_t const quota) const
|
||||
{
|
||||
using Genode::Cpu_session;
|
||||
|
||||
/* we assert at timer construction that cpu_quota_us in ticks fits size_t */
|
||||
size_t const ticks = (size_t)
|
||||
_cpu().timer().us_to_ticks(Kernel::cpu_quota_us);
|
||||
_cpu->timer().us_to_ticks(Kernel::cpu_quota_us);
|
||||
return Cpu_session::quota_lim_downscale(quota, ticks);
|
||||
}
|
||||
|
||||
@@ -301,20 +319,24 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const
|
||||
void Thread::_call_thread_quota()
|
||||
{
|
||||
Thread * const thread = (Thread *)user_arg_1();
|
||||
thread->Cpu_context::quota((unsigned)(_core_to_kernel_quota(user_arg_2())));
|
||||
thread->Cpu_job::quota((unsigned)(_core_to_kernel_quota(user_arg_2())));
|
||||
}
|
||||
|
||||
|
||||
void Thread::_call_start_thread()
|
||||
{
|
||||
/* lookup CPU */
|
||||
Cpu & cpu = _cpu_pool.cpu((unsigned)user_arg_2());
|
||||
user_arg_0(0);
|
||||
Thread &thread = *(Thread*)user_arg_1();
|
||||
|
||||
assert(thread._state == AWAITS_START);
|
||||
|
||||
thread.affinity(cpu);
|
||||
|
||||
/* join protection domain */
|
||||
thread._pd = (Pd *) user_arg_2();
|
||||
switch (thread._ipc_init(*(Native_utcb *)user_arg_3(), *this)) {
|
||||
thread._pd = (Pd *) user_arg_3();
|
||||
switch (thread._ipc_init(*(Native_utcb *)user_arg_4(), *this)) {
|
||||
case Ipc_alloc_result::OK:
|
||||
break;
|
||||
case Ipc_alloc_result::EXHAUSTED:
|
||||
@@ -334,8 +356,7 @@ void Thread::_call_start_thread()
|
||||
* semantic changes, and additional core threads are started
|
||||
* across cpu cores.
|
||||
*/
|
||||
if (thread._pd == &_core_pd &&
|
||||
thread._cpu().id() != _cpu_pool.primary_cpu().id())
|
||||
if (thread._pd == &_core_pd && cpu.id() != _cpu_pool.primary_cpu().id())
|
||||
Genode::raw("Error: do not start core threads"
|
||||
" on CPU cores different than boot cpu");
|
||||
|
||||
@@ -346,8 +367,8 @@ void Thread::_call_start_thread()
|
||||
void Thread::_call_pause_thread()
|
||||
{
|
||||
Thread &thread = *reinterpret_cast<Thread*>(user_arg_1());
|
||||
if (thread._state == ACTIVE && !thread._paused)
|
||||
thread._deactivate();
|
||||
if (thread._state == ACTIVE && !thread._paused) {
|
||||
thread._deactivate_used_shares(); }
|
||||
|
||||
thread._paused = true;
|
||||
}
|
||||
@@ -356,8 +377,8 @@ void Thread::_call_pause_thread()
|
||||
void Thread::_call_resume_thread()
|
||||
{
|
||||
Thread &thread = *reinterpret_cast<Thread*>(user_arg_1());
|
||||
if (thread._state == ACTIVE && thread._paused)
|
||||
thread._activate();
|
||||
if (thread._state == ACTIVE && thread._paused) {
|
||||
thread._activate_used_shares(); }
|
||||
|
||||
thread._paused = false;
|
||||
}
|
||||
@@ -385,7 +406,6 @@ void Thread::_call_restart_thread()
|
||||
_die();
|
||||
return;
|
||||
}
|
||||
|
||||
user_arg_0(thread._restart());
|
||||
}
|
||||
|
||||
@@ -393,10 +413,7 @@ void Thread::_call_restart_thread()
|
||||
bool Thread::_restart()
|
||||
{
|
||||
assert(_state == ACTIVE || _state == AWAITS_RESTART);
|
||||
|
||||
if (_state == ACTIVE && _exception_state == NO_EXCEPTION)
|
||||
return false;
|
||||
|
||||
if (_state != AWAITS_RESTART) { return false; }
|
||||
_exception_state = NO_EXCEPTION;
|
||||
_become_active();
|
||||
return true;
|
||||
@@ -434,7 +451,7 @@ void Thread::_cancel_blocking()
|
||||
|
||||
void Thread::_call_yield_thread()
|
||||
{
|
||||
Cpu_context::_yield();
|
||||
Cpu_job::_yield();
|
||||
}
|
||||
|
||||
|
||||
@@ -444,11 +461,12 @@ void Thread::_call_delete_thread()
|
||||
*(Core::Kernel_object<Thread>*)user_arg_1();
|
||||
|
||||
/**
|
||||
* Delete a thread immediately if it is assigned to this cpu,
|
||||
* or the assigned cpu did not scheduled it.
|
||||
* Delete a thread immediately if it has no cpu assigned yet,
|
||||
* or it is assigned to this cpu, or the assigned cpu did not scheduled it.
|
||||
*/
|
||||
if (to_delete->_cpu().id() == Cpu::executing_id() ||
|
||||
&to_delete->_cpu().current_context() != &*to_delete) {
|
||||
if (!to_delete->_cpu ||
|
||||
(to_delete->_cpu->id() == Cpu::executing_id() ||
|
||||
&to_delete->_cpu->scheduled_job() != &*to_delete)) {
|
||||
_call_delete<Thread>();
|
||||
return;
|
||||
}
|
||||
@@ -457,7 +475,7 @@ void Thread::_call_delete_thread()
|
||||
* Construct a cross-cpu work item and send an IPI
|
||||
*/
|
||||
_destroy.construct(*this, to_delete);
|
||||
to_delete->_cpu().trigger_ip_interrupt();
|
||||
to_delete->_cpu->trigger_ip_interrupt();
|
||||
}
|
||||
|
||||
|
||||
@@ -466,8 +484,8 @@ void Thread::_call_delete_pd()
|
||||
Core::Kernel_object<Pd> & pd =
|
||||
*(Core::Kernel_object<Pd>*)user_arg_1();
|
||||
|
||||
if (_cpu().active(pd->mmu_regs))
|
||||
_cpu().switch_to(_core_pd.mmu_regs);
|
||||
if (_cpu->active(pd->mmu_regs))
|
||||
_cpu->switch_to(_core_pd.mmu_regs);
|
||||
|
||||
_call_delete<Pd>();
|
||||
}
|
||||
@@ -499,7 +517,7 @@ void Thread::_call_await_request_msg()
|
||||
|
||||
void Thread::_call_timeout()
|
||||
{
|
||||
Timer & t = _cpu().timer();
|
||||
Timer & t = _cpu->timer();
|
||||
_timeout_sigid = (Kernel::capid_t)user_arg_2();
|
||||
t.set_timeout(this, t.us_to_ticks(user_arg_1()));
|
||||
}
|
||||
@@ -507,13 +525,13 @@ void Thread::_call_timeout()
|
||||
|
||||
void Thread::_call_timeout_max_us()
|
||||
{
|
||||
user_ret_time(_cpu().timer().timeout_max_us());
|
||||
user_ret_time(_cpu->timer().timeout_max_us());
|
||||
}
|
||||
|
||||
|
||||
void Thread::_call_time()
|
||||
{
|
||||
Timer & t = _cpu().timer();
|
||||
Timer & t = _cpu->timer();
|
||||
user_ret_time(t.ticks_to_us(t.time()));
|
||||
}
|
||||
|
||||
@@ -522,8 +540,11 @@ void Thread::timeout_triggered()
|
||||
{
|
||||
Signal_context * const c =
|
||||
pd().cap_tree().find<Signal_context>(_timeout_sigid);
|
||||
if (c) c->submit(1);
|
||||
else Genode::warning(*this, ": failed to submit timeout signal");
|
||||
if (!c || !c->can_submit(1)) {
|
||||
Genode::raw(*this, ": failed to submit timeout signal");
|
||||
return;
|
||||
}
|
||||
c->submit(1);
|
||||
}
|
||||
|
||||
|
||||
@@ -537,7 +558,7 @@ void Thread::_call_send_request_msg()
|
||||
_become_inactive(DEAD);
|
||||
return;
|
||||
}
|
||||
bool const help = Cpu_context::_helping_possible(*dst);
|
||||
bool const help = Cpu_job::_helping_possible(*dst);
|
||||
oir = oir->find(dst->pd());
|
||||
|
||||
if (!_ipc_node.ready_to_send()) {
|
||||
@@ -551,12 +572,11 @@ void Thread::_call_send_request_msg()
|
||||
return;
|
||||
}
|
||||
_ipc_capid = oir ? oir->capid() : cap_id_invalid();
|
||||
_ipc_node.send(dst->_ipc_node);
|
||||
_ipc_node.send(dst->_ipc_node, help);
|
||||
}
|
||||
|
||||
_state = AWAITS_IPC;
|
||||
if (help) Cpu_context::_help(*dst);
|
||||
if (!help || !dst->ready()) _deactivate();
|
||||
if (!help || !dst->own_share_active()) { _deactivate_used_shares(); }
|
||||
}
|
||||
|
||||
|
||||
@@ -573,9 +593,7 @@ void Thread::_call_pager()
|
||||
{
|
||||
/* override event route */
|
||||
Thread &thread = *(Thread *)user_arg_1();
|
||||
Thread &pager = *(Thread *)user_arg_2();
|
||||
Signal_context &sc = *pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_3());
|
||||
thread._fault_context.construct(pager, sc);
|
||||
thread._pager = pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_2());
|
||||
}
|
||||
|
||||
|
||||
@@ -599,11 +617,12 @@ void Thread::_call_await_signal()
|
||||
return;
|
||||
}
|
||||
/* register handler at the receiver */
|
||||
if (!r->add_handler(_signal_handler)) {
|
||||
if (!r->can_add_handler(_signal_handler)) {
|
||||
Genode::raw("failed to register handler at signal receiver");
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
r->add_handler(_signal_handler);
|
||||
user_arg_0(0);
|
||||
}
|
||||
|
||||
@@ -620,10 +639,11 @@ void Thread::_call_pending_signal()
|
||||
}
|
||||
|
||||
/* register handler at the receiver */
|
||||
if (!r->add_handler(_signal_handler)) {
|
||||
if (!r->can_add_handler(_signal_handler)) {
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
r->add_handler(_signal_handler);
|
||||
|
||||
if (_state == AWAITS_SIGNAL) {
|
||||
_cancel_blocking();
|
||||
@@ -658,7 +678,20 @@ void Thread::_call_submit_signal()
|
||||
{
|
||||
/* lookup signal context */
|
||||
Signal_context * const c = pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_1());
|
||||
if(c) c->submit((unsigned)user_arg_2());
|
||||
if(!c) {
|
||||
/* cannot submit unknown signal context */
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* trigger signal context */
|
||||
if (!c->can_submit((unsigned)user_arg_2())) {
|
||||
Genode::raw("failed to submit signal context");
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
c->submit((unsigned)user_arg_2());
|
||||
user_arg_0(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -666,8 +699,13 @@ void Thread::_call_ack_signal()
|
||||
{
|
||||
/* lookup signal context */
|
||||
Signal_context * const c = pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_1());
|
||||
if (c) c->ack();
|
||||
else Genode::warning(*this, ": cannot ack unknown signal context");
|
||||
if (!c) {
|
||||
Genode::raw(*this, ": cannot ack unknown signal context");
|
||||
return;
|
||||
}
|
||||
|
||||
/* acknowledge */
|
||||
c->ack();
|
||||
}
|
||||
|
||||
|
||||
@@ -675,8 +713,19 @@ void Thread::_call_kill_signal_context()
|
||||
{
|
||||
/* lookup signal context */
|
||||
Signal_context * const c = pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_1());
|
||||
if (c) c->kill(_signal_context_killer);
|
||||
else Genode::warning(*this, ": cannot kill unknown signal context");
|
||||
if (!c) {
|
||||
Genode::raw(*this, ": cannot kill unknown signal context");
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* kill signal context */
|
||||
if (!c->can_kill()) {
|
||||
Genode::raw("failed to kill signal context");
|
||||
user_arg_0(-1);
|
||||
return;
|
||||
}
|
||||
c->kill(_signal_context_killer);
|
||||
}
|
||||
|
||||
|
||||
@@ -695,7 +744,7 @@ void Thread::_call_new_irq()
|
||||
(Genode::Irq_session::Polarity) (user_arg_3() & 0b11);
|
||||
|
||||
_call_new<User_irq>((unsigned)user_arg_2(), trigger, polarity, *c,
|
||||
_cpu().pic(), _user_irq_pool);
|
||||
_cpu->pic(), _user_irq_pool);
|
||||
}
|
||||
|
||||
|
||||
@@ -796,25 +845,6 @@ void Thread::_call_single_step() {
|
||||
}
|
||||
|
||||
|
||||
void Thread::_call_ack_pager_signal()
|
||||
{
|
||||
Signal_context * const c = pd().cap_tree().find<Signal_context>((Kernel::capid_t)user_arg_1());
|
||||
if (!c)
|
||||
Genode::raw(*this, ": cannot ack unknown signal context");
|
||||
else
|
||||
c->ack();
|
||||
|
||||
Thread &thread = *(Thread*)user_arg_2();
|
||||
thread.helping_finished();
|
||||
|
||||
bool resolved = user_arg_3() ||
|
||||
thread._exception_state == NO_EXCEPTION;
|
||||
if (resolved) thread._restart();
|
||||
else thread._become_inactive(AWAITS_RESTART);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void Thread::_call()
|
||||
{
|
||||
/* switch over unrestricted kernel calls */
|
||||
@@ -856,15 +886,13 @@ void Thread::_call()
|
||||
switch (call_id) {
|
||||
case call_id_new_thread():
|
||||
_call_new<Thread>(_addr_space_id_alloc, _user_irq_pool, _cpu_pool,
|
||||
_cpu_pool.cpu((unsigned)user_arg_2()),
|
||||
_core_pd, (unsigned) user_arg_3(),
|
||||
(unsigned) _core_to_kernel_quota(user_arg_4()),
|
||||
(char const *) user_arg_5(), USER);
|
||||
_core_pd, (unsigned) user_arg_2(),
|
||||
(unsigned) _core_to_kernel_quota(user_arg_3()),
|
||||
(char const *) user_arg_4(), USER);
|
||||
return;
|
||||
case call_id_new_core_thread():
|
||||
_call_new<Thread>(_addr_space_id_alloc, _user_irq_pool, _cpu_pool,
|
||||
_cpu_pool.cpu((unsigned)user_arg_2()),
|
||||
_core_pd, (char const *) user_arg_3());
|
||||
_core_pd, (char const *) user_arg_2());
|
||||
return;
|
||||
case call_id_thread_quota(): _call_thread_quota(); return;
|
||||
case call_id_delete_thread(): _call_delete_thread(); return;
|
||||
@@ -897,7 +925,6 @@ void Thread::_call()
|
||||
case call_id_set_cpu_state(): _call_set_cpu_state(); return;
|
||||
case call_id_exception_state(): _call_exception_state(); return;
|
||||
case call_id_single_step(): _call_single_step(); return;
|
||||
case call_id_ack_pager_signal(): _call_ack_pager_signal(); return;
|
||||
default:
|
||||
Genode::raw(*this, ": unknown kernel call");
|
||||
_die();
|
||||
@@ -906,37 +933,18 @@ void Thread::_call()
|
||||
}
|
||||
|
||||
|
||||
void Thread::_signal_to_pager()
|
||||
{
|
||||
if (!_fault_context.constructed()) {
|
||||
Genode::warning(*this, " could not send signal to pager");
|
||||
_die();
|
||||
return;
|
||||
}
|
||||
|
||||
/* first signal to pager to wake it up */
|
||||
_fault_context->sc.submit(1);
|
||||
|
||||
/* only help pager thread if runnable and scheduler allows it */
|
||||
bool const help = Cpu_context::_helping_possible(_fault_context->pager)
|
||||
&& (_fault_context->pager._state == ACTIVE);
|
||||
if (help) Cpu_context::_help(_fault_context->pager);
|
||||
else _become_inactive(AWAITS_RESTART);
|
||||
}
|
||||
|
||||
|
||||
void Thread::_mmu_exception()
|
||||
{
|
||||
using namespace Genode;
|
||||
using Genode::log;
|
||||
|
||||
_become_inactive(AWAITS_RESTART);
|
||||
_exception_state = MMU_FAULT;
|
||||
Cpu::mmu_fault(*regs, _fault);
|
||||
_fault.ip = regs->ip;
|
||||
|
||||
if (_fault.type == Thread_fault::UNKNOWN) {
|
||||
Genode::warning(*this, " raised unhandled MMU fault ", _fault);
|
||||
_die();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -951,16 +959,17 @@ void Thread::_mmu_exception()
|
||||
Hw::Mm::core_stack_area().size };
|
||||
regs->for_each_return_address(stack, [&] (void **p) {
|
||||
log(*p); });
|
||||
_die();
|
||||
return;
|
||||
}
|
||||
|
||||
_signal_to_pager();
|
||||
if (_pager && _pager->can_submit(1)) {
|
||||
_pager->submit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Thread::_exception()
|
||||
{
|
||||
_become_inactive(AWAITS_RESTART);
|
||||
_exception_state = EXCEPTION;
|
||||
|
||||
if (_type != USER) {
|
||||
@@ -968,14 +977,18 @@ void Thread::_exception()
|
||||
_die();
|
||||
}
|
||||
|
||||
_signal_to_pager();
|
||||
if (_pager && _pager->can_submit(1)) {
|
||||
_pager->submit(1);
|
||||
} else {
|
||||
Genode::raw(*this, " could not send signal to pager on exception");
|
||||
_die();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
Irq::Pool &user_irq_pool,
|
||||
Cpu_pool &cpu_pool,
|
||||
Cpu &cpu,
|
||||
Pd &core_pd,
|
||||
unsigned const priority,
|
||||
unsigned const quota,
|
||||
@@ -983,7 +996,7 @@ Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
Type type)
|
||||
:
|
||||
Kernel::Object { *this },
|
||||
Cpu_context { cpu, priority, quota },
|
||||
Cpu_job { priority, quota },
|
||||
_addr_space_id_alloc { addr_space_id_alloc },
|
||||
_user_irq_pool { user_irq_pool },
|
||||
_cpu_pool { cpu_pool },
|
||||
@@ -1020,8 +1033,8 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
Cpu_pool &cpu_pool,
|
||||
Pd &core_pd)
|
||||
:
|
||||
Core_object<Thread>(core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool,
|
||||
cpu_pool.primary_cpu(), core_pd, "core")
|
||||
Core_object<Thread>(
|
||||
core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd, "core")
|
||||
{
|
||||
using namespace Core;
|
||||
|
||||
@@ -1037,6 +1050,7 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
regs->sp = (addr_t)&__initial_stack_base[0] + DEFAULT_STACK_SIZE;
|
||||
regs->ip = (addr_t)&_core_start;
|
||||
|
||||
affinity(_cpu_pool.primary_cpu());
|
||||
_utcb = &_utcb_instance;
|
||||
Thread::_pd = &core_pd;
|
||||
_become_active();
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
/* base-hw core includes */
|
||||
#include <kernel/cpu_context.h>
|
||||
#include <kernel/inter_processor_work.h>
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
#include <kernel/ipc_node.h>
|
||||
#include <object.h>
|
||||
#include <kernel/interface.h>
|
||||
@@ -53,7 +53,7 @@ struct Kernel::Thread_fault
|
||||
/**
|
||||
* Kernel back-end for userland execution-contexts
|
||||
*/
|
||||
class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeout
|
||||
class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -173,15 +173,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
size_t _ipc_rcv_caps { 0 };
|
||||
Genode::Native_utcb *_utcb { nullptr };
|
||||
Pd *_pd { nullptr };
|
||||
|
||||
struct Fault_context
|
||||
{
|
||||
Thread &pager;
|
||||
Signal_context ≻
|
||||
};
|
||||
|
||||
Genode::Constructible<Fault_context> _fault_context {};
|
||||
|
||||
Signal_context *_pager { nullptr };
|
||||
Thread_fault _fault { };
|
||||
State _state;
|
||||
Signal_handler _signal_handler { *this };
|
||||
@@ -224,16 +216,21 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
*/
|
||||
void _become_inactive(State const s);
|
||||
|
||||
/**
|
||||
* Activate our CPU-share and those of our helpers
|
||||
*/
|
||||
void _activate_used_shares();
|
||||
|
||||
/**
|
||||
* Deactivate our CPU-share and those of our helpers
|
||||
*/
|
||||
void _deactivate_used_shares();
|
||||
|
||||
/**
|
||||
* Suspend unrecoverably from execution
|
||||
*/
|
||||
void _die();
|
||||
|
||||
/**
|
||||
* In case of fault, signal to pager, and help or block
|
||||
*/
|
||||
void _signal_to_pager();
|
||||
|
||||
/**
|
||||
* Handle an exception thrown by the memory management unit
|
||||
*/
|
||||
@@ -309,7 +306,6 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
void _call_set_cpu_state();
|
||||
void _call_exception_state();
|
||||
void _call_single_step();
|
||||
void _call_ack_pager_signal();
|
||||
|
||||
template <typename T>
|
||||
void _call_new(auto &&... args)
|
||||
@@ -349,7 +345,6 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
Irq::Pool &user_irq_pool,
|
||||
Cpu_pool &cpu_pool,
|
||||
Cpu &cpu,
|
||||
Pd &core_pd,
|
||||
unsigned const priority,
|
||||
unsigned const quota,
|
||||
@@ -364,12 +359,11 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
|
||||
Irq::Pool &user_irq_pool,
|
||||
Cpu_pool &cpu_pool,
|
||||
Cpu &cpu,
|
||||
Pd &core_pd,
|
||||
char const *const label)
|
||||
:
|
||||
Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, cpu,
|
||||
core_pd, Scheduler::Priority::min(), 0, label, CORE)
|
||||
Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd,
|
||||
Scheduler::Priority::min(), 0, label, CORE)
|
||||
{ }
|
||||
|
||||
~Thread();
|
||||
@@ -406,14 +400,13 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
* \retval capability id of the new kernel object
|
||||
*/
|
||||
static capid_t syscall_create(Core::Kernel_object<Thread> &t,
|
||||
unsigned const cpu_id,
|
||||
unsigned const priority,
|
||||
size_t const quota,
|
||||
char const * const label)
|
||||
{
|
||||
return (capid_t)call(call_id_new_thread(), (Call_arg)&t,
|
||||
(Call_arg)cpu_id, (Call_arg)priority,
|
||||
(Call_arg)quota, (Call_arg)label);
|
||||
(Call_arg)priority, (Call_arg)quota,
|
||||
(Call_arg)label);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -425,11 +418,10 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
* \retval capability id of the new kernel object
|
||||
*/
|
||||
static capid_t syscall_create(Core::Kernel_object<Thread> &t,
|
||||
unsigned const cpu_id,
|
||||
char const * const label)
|
||||
{
|
||||
return (capid_t)call(call_id_new_core_thread(), (Call_arg)&t,
|
||||
(Call_arg)cpu_id, (Call_arg)label);
|
||||
(Call_arg)label);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -466,12 +458,13 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
|
||||
void signal_receive_signal(void * const base, size_t const size);
|
||||
|
||||
|
||||
/*****************
|
||||
** Cpu_context **
|
||||
*****************/
|
||||
/*************
|
||||
** Cpu_job **
|
||||
*************/
|
||||
|
||||
void exception() override;
|
||||
void proceed() override;
|
||||
void exception(Cpu & cpu) override;
|
||||
void proceed(Cpu & cpu) override;
|
||||
Cpu_job * helping_destination() override;
|
||||
|
||||
|
||||
/*************
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
/* core includes */
|
||||
#include <kernel/cpu_context.h>
|
||||
#include <kernel/pd.h>
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
|
||||
#include <board.h>
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace Kernel {
|
||||
}
|
||||
|
||||
|
||||
class Kernel::Vm : private Kernel::Object, public Cpu_context
|
||||
class Kernel::Vm : private Kernel::Object, public Cpu_job
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -66,7 +66,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_context
|
||||
void _pause_vcpu()
|
||||
{
|
||||
if (_scheduled != INACTIVE)
|
||||
Cpu_context::_deactivate();
|
||||
Cpu_job::_deactivate_own_share();
|
||||
|
||||
_scheduled = INACTIVE;
|
||||
}
|
||||
@@ -135,7 +135,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_context
|
||||
void run()
|
||||
{
|
||||
_sync_from_vmm();
|
||||
if (_scheduled != ACTIVE) Cpu_context::_activate();
|
||||
if (_scheduled != ACTIVE) Cpu_job::_activate_own_share();
|
||||
_scheduled = ACTIVE;
|
||||
}
|
||||
|
||||
@@ -146,12 +146,13 @@ class Kernel::Vm : private Kernel::Object, public Cpu_context
|
||||
}
|
||||
|
||||
|
||||
/*****************
|
||||
** Cpu_context **
|
||||
*****************/
|
||||
/*************
|
||||
** Cpu_job **
|
||||
*************/
|
||||
|
||||
void exception() override;
|
||||
void proceed() override;
|
||||
void exception(Cpu & cpu) override;
|
||||
void proceed(Cpu & cpu) override;
|
||||
Cpu_job * helping_destination() override { return this; }
|
||||
};
|
||||
|
||||
#endif /* _CORE__KERNEL__VM_H_ */
|
||||
|
||||
@@ -19,30 +19,9 @@
|
||||
|
||||
/* base-internal includes */
|
||||
#include <base/internal/capability_space.h>
|
||||
#include <base/internal/native_thread.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
static unsigned _nr_of_cpus = 0;
|
||||
static void *_pager_thread_memory = nullptr;
|
||||
|
||||
|
||||
void Core::init_pager_thread_per_cpu_memory(unsigned const cpus, void * mem)
|
||||
{
|
||||
_nr_of_cpus = cpus;
|
||||
_pager_thread_memory = mem;
|
||||
}
|
||||
|
||||
|
||||
void Core::init_page_fault_handling(Rpc_entrypoint &) { }
|
||||
|
||||
|
||||
/*************
|
||||
** Mapping **
|
||||
*************/
|
||||
|
||||
void Mapping::prepare_map_operation() const { }
|
||||
|
||||
|
||||
/***************
|
||||
** Ipc_pager **
|
||||
@@ -72,11 +51,13 @@ void Pager_object::wake_up()
|
||||
}
|
||||
|
||||
|
||||
void Pager_object::start_paging(Kernel_object<Kernel::Signal_receiver> &receiver,
|
||||
Platform_thread &pager_thread)
|
||||
void Pager_object::start_paging(Kernel_object<Kernel::Signal_receiver> & receiver)
|
||||
{
|
||||
using Object = Kernel_object<Kernel::Signal_context>;
|
||||
using Entry = Object_pool<Pager_object>::Entry;
|
||||
|
||||
create(*receiver, (unsigned long)this);
|
||||
_pager_thread = &pager_thread;
|
||||
Entry::cap(Object::_cap);
|
||||
}
|
||||
|
||||
|
||||
@@ -94,11 +75,11 @@ void Pager_object::print(Output &out) const
|
||||
|
||||
Pager_object::Pager_object(Cpu_session_capability cpu_session_cap,
|
||||
Thread_capability thread_cap, addr_t const badge,
|
||||
Affinity::Location location, Session_label const &,
|
||||
Affinity::Location, Session_label const &,
|
||||
Cpu_session::Name const &)
|
||||
:
|
||||
_badge(badge), _location(location),
|
||||
_cpu_session_cap(cpu_session_cap), _thread_cap(thread_cap)
|
||||
Object_pool<Pager_object>::Entry(Kernel_object<Kernel::Signal_context>::_cap),
|
||||
_badge(badge), _cpu_session_cap(cpu_session_cap), _thread_cap(thread_cap)
|
||||
{ }
|
||||
|
||||
|
||||
@@ -106,115 +87,27 @@ Pager_object::Pager_object(Cpu_session_capability cpu_session_cap,
|
||||
** Pager_entrypoint **
|
||||
**********************/
|
||||
|
||||
void Pager_entrypoint::Thread::entry()
|
||||
void Pager_entrypoint::dissolve(Pager_object &o)
|
||||
{
|
||||
while (1) {
|
||||
|
||||
/* receive fault */
|
||||
if (Kernel::await_signal(Capability_space::capid(_kobj.cap())))
|
||||
continue;
|
||||
|
||||
Pager_object *po = *(Pager_object**)Thread::myself()->utcb()->data();
|
||||
if (!po)
|
||||
continue;
|
||||
|
||||
Untyped_capability cap = po->cap();
|
||||
|
||||
/* fetch fault data */
|
||||
Platform_thread * const pt = (Platform_thread *)po->badge();
|
||||
if (!pt) {
|
||||
warning("failed to get platform thread of faulter");
|
||||
Kernel::ack_signal(Capability_space::capid(cap));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pt->exception_state() ==
|
||||
Kernel::Thread::Exception_state::EXCEPTION) {
|
||||
if (!po->submit_exception_signal())
|
||||
warning("unresolvable exception: "
|
||||
"pd='", pt->pd().label(), "', "
|
||||
"thread='", pt->label(), "', "
|
||||
"ip=", Hex(pt->state().cpu.ip));
|
||||
pt->fault_resolved(cap, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
_fault = pt->fault_info();
|
||||
|
||||
/* try to resolve fault directly via local region managers */
|
||||
if (po->pager(*this) == Pager_object::Pager_result::STOP) {
|
||||
pt->fault_resolved(cap, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* apply mapping that was determined by the local region managers */
|
||||
{
|
||||
Locked_ptr<Address_space> locked_ptr(pt->address_space());
|
||||
if (!locked_ptr.valid()) {
|
||||
pt->fault_resolved(cap, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
||||
|
||||
Cache cacheable = Genode::CACHED;
|
||||
if (!_mapping.cached)
|
||||
cacheable = Genode::UNCACHED;
|
||||
if (_mapping.write_combined)
|
||||
cacheable = Genode::WRITE_COMBINED;
|
||||
|
||||
Hw::Page_flags const flags {
|
||||
.writeable = _mapping.writeable ? Hw::RW : Hw::RO,
|
||||
.executable = _mapping.executable ? Hw::EXEC : Hw::NO_EXEC,
|
||||
.privileged = Hw::USER,
|
||||
.global = Hw::NO_GLOBAL,
|
||||
.type = _mapping.io_mem ? Hw::DEVICE : Hw::RAM,
|
||||
.cacheable = cacheable
|
||||
};
|
||||
|
||||
as->insert_translation(_mapping.dst_addr, _mapping.src_addr,
|
||||
1UL << _mapping.size_log2, flags);
|
||||
}
|
||||
|
||||
pt->fault_resolved(cap, true);
|
||||
}
|
||||
Kernel::kill_signal_context(Capability_space::capid(o.cap()));
|
||||
remove(&o);
|
||||
}
|
||||
|
||||
|
||||
Pager_entrypoint::Thread::Thread(Affinity::Location cpu)
|
||||
Pager_entrypoint::Pager_entrypoint(Rpc_cap_factory &)
|
||||
:
|
||||
Genode::Thread(Weight::DEFAULT_WEIGHT, "pager_ep", PAGER_EP_STACK_SIZE, cpu),
|
||||
Thread(Weight::DEFAULT_WEIGHT, "pager_ep", PAGER_EP_STACK_SIZE,
|
||||
Type::NORMAL),
|
||||
|
||||
_kobj(_kobj.CALLED_FROM_CORE)
|
||||
{
|
||||
start();
|
||||
}
|
||||
|
||||
|
||||
void Pager_entrypoint::dissolve(Pager_object &o)
|
||||
{
|
||||
Kernel::kill_signal_context(Capability_space::capid(o.cap()));
|
||||
}
|
||||
|
||||
|
||||
Pager_capability Pager_entrypoint::manage(Pager_object &o)
|
||||
{
|
||||
unsigned const cpu = o.location().xpos();
|
||||
if (cpu >= _cpus) {
|
||||
error("Invalid location of pager object ", cpu);
|
||||
} else {
|
||||
o.start_paging(_threads[cpu]._kobj,
|
||||
*_threads[cpu].native_thread().platform_thread);
|
||||
}
|
||||
|
||||
o.start_paging(_kobj);
|
||||
insert(&o);
|
||||
return reinterpret_cap_cast<Pager_object>(o.cap());
|
||||
}
|
||||
|
||||
|
||||
Pager_entrypoint::Pager_entrypoint(Rpc_cap_factory &)
|
||||
:
|
||||
_cpus(_nr_of_cpus),
|
||||
_threads((Thread*)_pager_thread_memory)
|
||||
{
|
||||
for (unsigned i = 0; i < _cpus; i++)
|
||||
construct_at<Thread>((void*)&_threads[i], Affinity::Location(i, 0));
|
||||
}
|
||||
|
||||
@@ -17,11 +17,12 @@
|
||||
/* Genode includes */
|
||||
#include <base/session_label.h>
|
||||
#include <base/thread.h>
|
||||
#include <base/object_pool.h>
|
||||
#include <base/signal.h>
|
||||
#include <pager/capability.h>
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
#include <hw/mapping.h>
|
||||
#include <mapping.h>
|
||||
#include <object.h>
|
||||
@@ -29,9 +30,6 @@
|
||||
|
||||
namespace Core {
|
||||
|
||||
class Platform;
|
||||
class Platform_thread;
|
||||
|
||||
/**
|
||||
* Interface used by generic region_map code
|
||||
*/
|
||||
@@ -55,10 +53,6 @@ namespace Core {
|
||||
using Pager_capability = Capability<Pager_object>;
|
||||
|
||||
enum { PAGER_EP_STACK_SIZE = sizeof(addr_t) * 2048 };
|
||||
|
||||
extern void init_page_fault_handling(Rpc_entrypoint &);
|
||||
|
||||
void init_pager_thread_per_cpu_memory(unsigned const cpus, void * mem);
|
||||
}
|
||||
|
||||
|
||||
@@ -99,17 +93,17 @@ class Core::Ipc_pager
|
||||
};
|
||||
|
||||
|
||||
class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
class Core::Pager_object : private Object_pool<Pager_object>::Entry,
|
||||
private Kernel_object<Kernel::Signal_context>
|
||||
{
|
||||
friend class Pager_entrypoint;
|
||||
friend class Object_pool<Pager_object>;
|
||||
|
||||
private:
|
||||
|
||||
unsigned long const _badge;
|
||||
Affinity::Location _location;
|
||||
Cpu_session_capability _cpu_session_cap;
|
||||
Thread_capability _thread_cap;
|
||||
Platform_thread *_pager_thread { nullptr };
|
||||
|
||||
/**
|
||||
* User-level signal handler registered for this pager object via
|
||||
@@ -117,12 +111,6 @@ class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
*/
|
||||
Signal_context_capability _exception_sigh { };
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Pager_object(const Pager_object&) = delete;
|
||||
Pager_object& operator=(const Pager_object&) = delete;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
@@ -135,15 +123,11 @@ class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
Affinity::Location, Session_label const&,
|
||||
Cpu_session::Name const&);
|
||||
|
||||
virtual ~Pager_object() {}
|
||||
|
||||
/**
|
||||
* User identification of pager object
|
||||
*/
|
||||
unsigned long badge() const { return _badge; }
|
||||
|
||||
Affinity::Location location() { return _location; }
|
||||
|
||||
/**
|
||||
* Resume faulter
|
||||
*/
|
||||
@@ -174,8 +158,7 @@ class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
*
|
||||
* \param receiver signal receiver that receives the page faults
|
||||
*/
|
||||
void start_paging(Kernel_object<Kernel::Signal_receiver> &receiver,
|
||||
Platform_thread &pager_thread);
|
||||
void start_paging(Kernel_object<Kernel::Signal_receiver> & receiver);
|
||||
|
||||
/**
|
||||
* Called when a page-fault finally could not be resolved
|
||||
@@ -184,11 +167,6 @@ class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
|
||||
void print(Output &out) const;
|
||||
|
||||
void with_pager(auto const &fn)
|
||||
{
|
||||
if (_pager_thread) fn(*_pager_thread);
|
||||
}
|
||||
|
||||
|
||||
/******************
|
||||
** Pure virtual **
|
||||
@@ -214,44 +192,24 @@ class Core::Pager_object : private Kernel_object<Kernel::Signal_context>
|
||||
Cpu_session_capability cpu_session_cap() const { return _cpu_session_cap; }
|
||||
Thread_capability thread_cap() const { return _thread_cap; }
|
||||
|
||||
Untyped_capability cap() {
|
||||
return Kernel_object<Kernel::Signal_context>::_cap; }
|
||||
using Object_pool<Pager_object>::Entry::cap;
|
||||
};
|
||||
|
||||
|
||||
class Core::Pager_entrypoint
|
||||
class Core::Pager_entrypoint : public Object_pool<Pager_object>,
|
||||
public Thread,
|
||||
private Ipc_pager
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Platform;
|
||||
|
||||
class Thread : public Genode::Thread,
|
||||
private Ipc_pager
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Pager_entrypoint;
|
||||
|
||||
Kernel_object<Kernel::Signal_receiver> _kobj;
|
||||
|
||||
public:
|
||||
|
||||
explicit Thread(Affinity::Location);
|
||||
|
||||
|
||||
/**********************
|
||||
** Thread interface **
|
||||
**********************/
|
||||
|
||||
void entry() override;
|
||||
};
|
||||
|
||||
unsigned const _cpus;
|
||||
Thread *_threads;
|
||||
Kernel_object<Kernel::Signal_receiver> _kobj;
|
||||
|
||||
public:
|
||||
|
||||
explicit Pager_entrypoint(Rpc_cap_factory &);
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
Pager_entrypoint(Rpc_cap_factory &);
|
||||
|
||||
/**
|
||||
* Associate pager object 'obj' with entry point
|
||||
@@ -262,6 +220,13 @@ class Core::Pager_entrypoint
|
||||
* Dissolve pager object 'obj' from entry point
|
||||
*/
|
||||
void dissolve(Pager_object &obj);
|
||||
|
||||
|
||||
/**********************
|
||||
** Thread interface **
|
||||
**********************/
|
||||
|
||||
void entry() override;
|
||||
};
|
||||
|
||||
#endif /* _CORE__PAGER_H_ */
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* \brief Allocate an object with a physical address
|
||||
* \author Norman Feske
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-12-02
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__PHYS_ALLOCATED_H_
|
||||
#define _CORE__PHYS_ALLOCATED_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/allocator.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
#include <util/noncopyable.h>
|
||||
|
||||
/* core-local includes */
|
||||
#include <types.h>
|
||||
|
||||
namespace Core {
|
||||
template <typename T>
|
||||
class Phys_allocated;
|
||||
}
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
template <typename T>
|
||||
class Core::Phys_allocated : Genode::Noncopyable
|
||||
{
|
||||
private:
|
||||
|
||||
Rpc_entrypoint &_ep;
|
||||
Ram_allocator &_ram;
|
||||
Region_map &_rm;
|
||||
|
||||
Attached_ram_dataspace _ds { _ram, _rm, sizeof(T) };
|
||||
public:
|
||||
|
||||
T &obj = *_ds.local_addr<T>();
|
||||
|
||||
Phys_allocated(Rpc_entrypoint &ep,
|
||||
Ram_allocator &ram,
|
||||
Region_map &rm)
|
||||
:
|
||||
_ep(ep), _ram(ram), _rm(rm)
|
||||
{
|
||||
construct_at<T>(&obj);
|
||||
}
|
||||
|
||||
Phys_allocated(Rpc_entrypoint &ep,
|
||||
Ram_allocator &ram,
|
||||
Region_map &rm,
|
||||
auto const &construct_fn)
|
||||
:
|
||||
_ep(ep), _ram(ram), _rm(rm)
|
||||
{
|
||||
construct_fn(*this, &obj);
|
||||
}
|
||||
|
||||
~Phys_allocated() { obj.~T(); }
|
||||
|
||||
addr_t phys_addr() {
|
||||
addr_t phys_addr { };
|
||||
_ep.apply(_ds.cap(), [&](Dataspace_component *dsc) {
|
||||
phys_addr = dsc->phys_addr();
|
||||
});
|
||||
|
||||
return phys_addr;
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__PHYS_ALLOCATED_H_ */
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
/* base-hw core includes */
|
||||
#include <map_local.h>
|
||||
#include <pager.h>
|
||||
#include <platform.h>
|
||||
#include <platform_pd.h>
|
||||
#include <kernel/main.h>
|
||||
@@ -32,6 +31,7 @@
|
||||
/* base internal includes */
|
||||
#include <base/internal/crt0.h>
|
||||
#include <base/internal/stack_area.h>
|
||||
#include <base/internal/unmanaged_singleton.h>
|
||||
|
||||
/* base includes */
|
||||
#include <trace/source_registry.h>
|
||||
@@ -60,9 +60,8 @@ Hw::Page_table::Allocator & Platform::core_page_table_allocator()
|
||||
using Allocator = Hw::Page_table::Allocator;
|
||||
using Array = Allocator::Array<Hw::Page_table::CORE_TRANS_TABLE_COUNT>;
|
||||
addr_t virt_addr = Hw::Mm::core_page_tables().base + sizeof(Hw::Page_table);
|
||||
|
||||
static Array::Allocator alloc { _boot_info().table_allocator, virt_addr };
|
||||
return alloc;
|
||||
return *unmanaged_singleton<Array::Allocator>(_boot_info().table_allocator,
|
||||
virt_addr);
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +70,6 @@ addr_t Platform::core_main_thread_phys_utcb()
|
||||
return core_phys_addr(_boot_info().core_main_thread_utcb);
|
||||
}
|
||||
|
||||
|
||||
void Platform::_init_io_mem_alloc()
|
||||
{
|
||||
/* add entire adress space minus the RAM memory regions */
|
||||
@@ -83,9 +81,8 @@ void Platform::_init_io_mem_alloc()
|
||||
|
||||
Hw::Memory_region_array const & Platform::_core_virt_regions()
|
||||
{
|
||||
static Hw::Memory_region_array array {
|
||||
Hw::Memory_region(stack_area_virtual_base(), stack_area_virtual_size()) };
|
||||
return array;
|
||||
return *unmanaged_singleton<Hw::Memory_region_array>(
|
||||
Hw::Memory_region(stack_area_virtual_base(), stack_area_virtual_size()));
|
||||
}
|
||||
|
||||
|
||||
@@ -254,10 +251,6 @@ Platform::Platform()
|
||||
);
|
||||
}
|
||||
|
||||
unsigned const cpus = _boot_info().cpus;
|
||||
size_t size = cpus * sizeof(Pager_entrypoint::Thread);
|
||||
init_pager_thread_per_cpu_memory(cpus, _core_mem_alloc.alloc(size));
|
||||
|
||||
class Idle_thread_trace_source : public Trace::Source::Info_accessor,
|
||||
private Trace::Control,
|
||||
private Trace::Source
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
/* core includes */
|
||||
#include <platform_thread.h>
|
||||
#include <platform_pd.h>
|
||||
#include <core_env.h>
|
||||
#include <rm_session_component.h>
|
||||
#include <map_local.h>
|
||||
|
||||
@@ -29,19 +30,48 @@
|
||||
using namespace Core;
|
||||
|
||||
|
||||
addr_t Platform_thread::Utcb::_attach(Region_map &core_rm)
|
||||
Ram_dataspace_capability Platform_thread::Utcb::_allocate_utcb(bool core_thread)
|
||||
{
|
||||
Region_map::Attr attr { };
|
||||
attr.writeable = true;
|
||||
return core_rm.attach(_ds, attr).convert<addr_t>(
|
||||
[&] (Region_map::Range range) { return range.start; },
|
||||
[&] (Region_map::Attach_error) {
|
||||
error("failed to attach UTCB of new thread within core");
|
||||
return 0ul; });
|
||||
Ram_dataspace_capability ds;
|
||||
|
||||
if (core_thread)
|
||||
return ds;
|
||||
|
||||
try {
|
||||
ds = core_env().pd_session()->alloc(sizeof(Native_utcb), CACHED);
|
||||
} catch (...) {
|
||||
error("failed to allocate UTCB");
|
||||
throw Out_of_ram();
|
||||
}
|
||||
|
||||
return ds;
|
||||
}
|
||||
|
||||
|
||||
static addr_t _alloc_core_local_utcb(addr_t core_addr)
|
||||
addr_t Platform_thread::Utcb::_core_local_address(addr_t utcb_addr,
|
||||
bool core_thread)
|
||||
{
|
||||
if (core_thread)
|
||||
return utcb_addr;
|
||||
|
||||
addr_t ret = 0;
|
||||
|
||||
Region_map::Attr attr { };
|
||||
attr.writeable = true;
|
||||
core_env().rm_session()->attach(_ds, attr).with_result(
|
||||
[&] (Region_map::Range range) {
|
||||
ret = range.start; },
|
||||
[&] (Region_map::Attach_error) {
|
||||
error("failed to attach UTCB of new thread within core"); });
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
Platform_thread::Utcb::Utcb(addr_t pd_addr, bool core_thread)
|
||||
:
|
||||
_ds(_allocate_utcb(core_thread)),
|
||||
_core_addr(_core_local_address(pd_addr, core_thread))
|
||||
{
|
||||
/*
|
||||
* All non-core threads use the typical dataspace/rm_session
|
||||
@@ -50,25 +80,27 @@ static addr_t _alloc_core_local_utcb(addr_t core_addr)
|
||||
* physical and virtual memory allocators to create/attach its
|
||||
* UTCBs. Therefore, we've to allocate and map those here.
|
||||
*/
|
||||
return platform().ram_alloc().try_alloc(sizeof(Native_utcb)).convert<addr_t>(
|
||||
if (core_thread) {
|
||||
platform().ram_alloc().try_alloc(sizeof(Native_utcb)).with_result(
|
||||
|
||||
[&] (void *utcb_phys) {
|
||||
map_local((addr_t)utcb_phys, core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
return addr_t(utcb_phys);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("failed to allocate UTCB for core/kernel thread!");
|
||||
return 0ul;
|
||||
});
|
||||
[&] (void *utcb_phys) {
|
||||
map_local((addr_t)utcb_phys, _core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("failed to allocate UTCB for core/kernel thread!");
|
||||
throw Out_of_ram();
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Platform_thread::Utcb::Utcb(addr_t core_addr)
|
||||
:
|
||||
core_addr(core_addr),
|
||||
phys_addr(_alloc_core_local_utcb(core_addr))
|
||||
{ }
|
||||
Platform_thread::Utcb::~Utcb()
|
||||
{
|
||||
/* detach UTCB from core/kernel */
|
||||
core_env().rm_session()->detach((addr_t)_core_addr);
|
||||
}
|
||||
|
||||
|
||||
void Platform_thread::_init() { }
|
||||
@@ -90,33 +122,28 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb)
|
||||
_label(label),
|
||||
_pd(_kernel_main_get_core_platform_pd()),
|
||||
_pager(nullptr),
|
||||
_utcb((addr_t)&utcb),
|
||||
_utcb((addr_t)&utcb, true),
|
||||
_main_thread(false),
|
||||
_location(Affinity::Location()),
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _location.xpos(), _label.string())
|
||||
{ }
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _label.string()) { }
|
||||
|
||||
|
||||
Platform_thread::Platform_thread(Platform_pd &pd,
|
||||
Rpc_entrypoint &ep,
|
||||
Ram_allocator &ram,
|
||||
Region_map &core_rm,
|
||||
size_t const quota,
|
||||
Label const &label,
|
||||
unsigned const virt_prio,
|
||||
Affinity::Location const location,
|
||||
addr_t /* utcb */)
|
||||
addr_t const utcb)
|
||||
:
|
||||
_label(label),
|
||||
_pd(pd),
|
||||
_pager(nullptr),
|
||||
_utcb(ep, ram, core_rm),
|
||||
_utcb(utcb, false),
|
||||
_priority(_scale_priority(virt_prio)),
|
||||
_quota((unsigned)quota),
|
||||
_main_thread(!pd.has_any_thread),
|
||||
_location(location),
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _location.xpos(),
|
||||
_priority, _quota, _label.string())
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _priority, _quota, _label.string())
|
||||
{
|
||||
_address_space = pd.weak_ptr();
|
||||
pd.has_any_thread = true;
|
||||
@@ -138,6 +165,9 @@ Platform_thread::~Platform_thread()
|
||||
locked_ptr->flush(user_utcb_main_thread(), sizeof(Native_utcb),
|
||||
Address_space::Core_local_addr{0});
|
||||
}
|
||||
|
||||
/* free UTCB */
|
||||
core_env().pd_session()->free(_utcb._ds);
|
||||
}
|
||||
|
||||
|
||||
@@ -155,23 +185,35 @@ void Platform_thread::start(void * const ip, void * const sp)
|
||||
/* attach UTCB in case of a main thread */
|
||||
if (_main_thread) {
|
||||
|
||||
Locked_ptr<Address_space> locked_ptr(_address_space);
|
||||
if (!locked_ptr.valid()) {
|
||||
error("unable to start thread in invalid address space");
|
||||
return;
|
||||
/* lookup dataspace component for physical address */
|
||||
auto lambda = [&] (Dataspace_component *dsc) {
|
||||
if (!dsc) return -1;
|
||||
|
||||
/* lock the address space */
|
||||
Locked_ptr<Address_space> locked_ptr(_address_space);
|
||||
if (!locked_ptr.valid()) {
|
||||
error("invalid RM client");
|
||||
return -1;
|
||||
};
|
||||
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
||||
if (!as->insert_translation(user_utcb_main_thread(), dsc->phys_addr(),
|
||||
sizeof(Native_utcb), Hw::PAGE_FLAGS_UTCB)) {
|
||||
error("failed to attach UTCB");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
||||
if (!as->insert_translation(user_utcb_main_thread(), _utcb.phys_addr,
|
||||
sizeof(Native_utcb), Hw::PAGE_FLAGS_UTCB)) {
|
||||
error("failed to attach UTCB");
|
||||
if (core_env().entrypoint().apply(_utcb._ds, lambda))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize thread registers */
|
||||
_kobj->regs->ip = reinterpret_cast<addr_t>(ip);
|
||||
_kobj->regs->sp = reinterpret_cast<addr_t>(sp);
|
||||
|
||||
/* start executing new thread */
|
||||
unsigned const cpu = _location.xpos();
|
||||
|
||||
Native_utcb &utcb = *Thread::myself()->utcb();
|
||||
|
||||
/* reset capability counter */
|
||||
@@ -181,20 +223,16 @@ void Platform_thread::start(void * const ip, void * const sp)
|
||||
utcb.cap_add(Capability_space::capid(_pd.parent()));
|
||||
utcb.cap_add(Capability_space::capid(_utcb._ds));
|
||||
}
|
||||
|
||||
Kernel::start_thread(*_kobj, _pd.kernel_pd(),
|
||||
*(Native_utcb*)_utcb.core_addr);
|
||||
Kernel::start_thread(*_kobj, cpu, _pd.kernel_pd(), *(Native_utcb*)_utcb._core_addr);
|
||||
}
|
||||
|
||||
|
||||
void Platform_thread::pager(Pager_object &po)
|
||||
void Platform_thread::pager(Pager_object &pager)
|
||||
{
|
||||
using namespace Kernel;
|
||||
|
||||
po.with_pager([&] (Platform_thread &pt) {
|
||||
thread_pager(*_kobj, *pt._kobj,
|
||||
Capability_space::capid(po.cap())); });
|
||||
_pager = &po;
|
||||
thread_pager(*_kobj, Capability_space::capid(pager.cap()));
|
||||
_pager = &pager;
|
||||
}
|
||||
|
||||
|
||||
@@ -240,9 +278,3 @@ void Platform_thread::restart()
|
||||
{
|
||||
Kernel::restart_thread(Capability_space::capid(_kobj.cap()));
|
||||
}
|
||||
|
||||
|
||||
void Platform_thread::fault_resolved(Untyped_capability cap, bool resolved)
|
||||
{
|
||||
Kernel::ack_pager_signal(Capability_space::capid(cap), *_kobj, resolved);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
#include <base/ram_allocator.h>
|
||||
#include <base/thread.h>
|
||||
#include <base/trace/types.h>
|
||||
#include <base/rpc_server.h>
|
||||
|
||||
/* base-internal includes */
|
||||
#include <base/internal/native_utcb.h>
|
||||
@@ -27,7 +26,6 @@
|
||||
/* core includes */
|
||||
#include <address_space.h>
|
||||
#include <object.h>
|
||||
#include <dataspace_component.h>
|
||||
|
||||
/* kernel includes */
|
||||
#include <kernel/core_interface.h>
|
||||
@@ -57,59 +55,17 @@ class Core::Platform_thread : Noncopyable
|
||||
|
||||
using Label = String<32>;
|
||||
|
||||
struct Utcb : Noncopyable
|
||||
struct Utcb
|
||||
{
|
||||
struct {
|
||||
Ram_allocator *_ram_ptr = nullptr;
|
||||
Region_map *_core_rm_ptr = nullptr;
|
||||
};
|
||||
|
||||
Ram_dataspace_capability _ds { }; /* UTCB ds of non-core threads */
|
||||
|
||||
addr_t const core_addr; /* UTCB address within core/kernel */
|
||||
addr_t const phys_addr;
|
||||
addr_t const _core_addr; /* UTCB address within core/kernel */
|
||||
|
||||
/*
|
||||
* \throw Out_of_ram
|
||||
* \throw Out_of_caps
|
||||
*/
|
||||
Ram_dataspace_capability _allocate(Ram_allocator &ram)
|
||||
{
|
||||
return ram.alloc(sizeof(Native_utcb), CACHED);
|
||||
}
|
||||
Ram_dataspace_capability _allocate_utcb(bool core_thread);
|
||||
addr_t _core_local_address(addr_t utcb_addr, bool core_thread);
|
||||
|
||||
addr_t _attach(Region_map &);
|
||||
|
||||
static addr_t _ds_phys(Rpc_entrypoint &ep, Dataspace_capability ds)
|
||||
{
|
||||
return ep.apply(ds, [&] (Dataspace_component *dsc) {
|
||||
return dsc ? dsc->phys_addr() : 0; });
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor used for core-local threads
|
||||
*/
|
||||
Utcb(addr_t core_addr);
|
||||
|
||||
/**
|
||||
* Constructor used for threads outside of core
|
||||
*/
|
||||
Utcb(Rpc_entrypoint &ep, Ram_allocator &ram, Region_map &core_rm)
|
||||
:
|
||||
_core_rm_ptr(&core_rm),
|
||||
_ds(_allocate(ram)),
|
||||
core_addr(_attach(core_rm)),
|
||||
phys_addr(_ds_phys(ep, _ds))
|
||||
{ }
|
||||
|
||||
~Utcb()
|
||||
{
|
||||
if (_core_rm_ptr)
|
||||
_core_rm_ptr->detach(core_addr);
|
||||
|
||||
if (_ram_ptr && _ds.valid())
|
||||
_ram_ptr->free(_ds);
|
||||
}
|
||||
Utcb(addr_t pd_addr, bool core_thread);
|
||||
~Utcb();
|
||||
};
|
||||
|
||||
Label const _label;
|
||||
@@ -170,8 +126,7 @@ class Core::Platform_thread : Noncopyable
|
||||
* \param virt_prio unscaled processor-scheduling priority
|
||||
* \param utcb core local pointer to userland stack
|
||||
*/
|
||||
Platform_thread(Platform_pd &, Rpc_entrypoint &, Ram_allocator &,
|
||||
Region_map &, size_t const quota, Label const &label,
|
||||
Platform_thread(Platform_pd &, size_t const quota, Label const &label,
|
||||
unsigned const virt_prio, Affinity::Location,
|
||||
addr_t const utcb);
|
||||
|
||||
@@ -216,8 +171,6 @@ class Core::Platform_thread : Noncopyable
|
||||
|
||||
void restart();
|
||||
|
||||
void fault_resolved(Untyped_capability, bool);
|
||||
|
||||
/**
|
||||
* Pause this thread
|
||||
*/
|
||||
|
||||
94
repos/base-hw/src/core/region_map_support.cc
Normal file
94
repos/base-hw/src/core/region_map_support.cc
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* \brief RM- and pager implementations specific for base-hw and core
|
||||
* \author Martin Stein
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2012-02-12
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base-hw core includes */
|
||||
#include <pager.h>
|
||||
#include <platform_pd.h>
|
||||
#include <platform_thread.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
void Pager_entrypoint::entry()
|
||||
{
|
||||
Untyped_capability cap;
|
||||
|
||||
while (1) {
|
||||
|
||||
if (cap.valid()) Kernel::ack_signal(Capability_space::capid(cap));
|
||||
|
||||
/* receive fault */
|
||||
if (Kernel::await_signal(Capability_space::capid(_kobj.cap()))) continue;
|
||||
|
||||
Pager_object *po = *(Pager_object**)Thread::myself()->utcb()->data();
|
||||
cap = po->cap();
|
||||
|
||||
if (!po) continue;
|
||||
|
||||
/* fetch fault data */
|
||||
Platform_thread * const pt = (Platform_thread *)po->badge();
|
||||
if (!pt) {
|
||||
warning("failed to get platform thread of faulter");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pt->exception_state() ==
|
||||
Kernel::Thread::Exception_state::EXCEPTION) {
|
||||
if (!po->submit_exception_signal())
|
||||
warning("unresolvable exception: "
|
||||
"pd='", pt->pd().label(), "', "
|
||||
"thread='", pt->label(), "', "
|
||||
"ip=", Hex(pt->state().cpu.ip));
|
||||
continue;
|
||||
}
|
||||
|
||||
_fault = pt->fault_info();
|
||||
|
||||
/* try to resolve fault directly via local region managers */
|
||||
if (po->pager(*this) == Pager_object::Pager_result::STOP)
|
||||
continue;
|
||||
|
||||
/* apply mapping that was determined by the local region managers */
|
||||
{
|
||||
Locked_ptr<Address_space> locked_ptr(pt->address_space());
|
||||
if (!locked_ptr.valid()) continue;
|
||||
|
||||
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
||||
|
||||
Cache cacheable = Genode::CACHED;
|
||||
if (!_mapping.cached)
|
||||
cacheable = Genode::UNCACHED;
|
||||
if (_mapping.write_combined)
|
||||
cacheable = Genode::WRITE_COMBINED;
|
||||
|
||||
Hw::Page_flags const flags {
|
||||
.writeable = _mapping.writeable ? Hw::RW : Hw::RO,
|
||||
.executable = _mapping.executable ? Hw::EXEC : Hw::NO_EXEC,
|
||||
.privileged = Hw::USER,
|
||||
.global = Hw::NO_GLOBAL,
|
||||
.type = _mapping.io_mem ? Hw::DEVICE : Hw::RAM,
|
||||
.cacheable = cacheable
|
||||
};
|
||||
|
||||
as->insert_translation(_mapping.dst_addr, _mapping.src_addr,
|
||||
1UL << _mapping.size_log2, flags);
|
||||
}
|
||||
|
||||
/* let pager object go back to no-fault state */
|
||||
po->wake_up();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Mapping::prepare_map_operation() const { }
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
/* core includes */
|
||||
#include <object.h>
|
||||
#include <kernel/signal.h>
|
||||
#include <kernel/signal_receiver.h>
|
||||
#include <assertion.h>
|
||||
|
||||
namespace Core {
|
||||
|
||||
@@ -23,35 +23,32 @@
|
||||
|
||||
using namespace Kernel;
|
||||
|
||||
extern "C" void kernel_to_user_context_switch(Core::Cpu::Context*,
|
||||
Core::Cpu::Fpu_context*);
|
||||
extern "C" void kernel_to_user_context_switch(Cpu::Context*, Cpu::Fpu_context*);
|
||||
|
||||
|
||||
void Thread::_call_suspend() { }
|
||||
|
||||
|
||||
void Thread::exception()
|
||||
void Thread::exception(Cpu & cpu)
|
||||
{
|
||||
using Ctx = Core::Cpu::Context;
|
||||
|
||||
switch (regs->cpu_exception) {
|
||||
case Ctx::SUPERVISOR_CALL:
|
||||
case Cpu::Context::SUPERVISOR_CALL:
|
||||
_call();
|
||||
return;
|
||||
case Ctx::PREFETCH_ABORT:
|
||||
case Ctx::DATA_ABORT:
|
||||
case Cpu::Context::PREFETCH_ABORT:
|
||||
case Cpu::Context::DATA_ABORT:
|
||||
_mmu_exception();
|
||||
return;
|
||||
case Ctx::INTERRUPT_REQUEST:
|
||||
case Ctx::FAST_INTERRUPT_REQUEST:
|
||||
_interrupt(_user_irq_pool);
|
||||
case Cpu::Context::INTERRUPT_REQUEST:
|
||||
case Cpu::Context::FAST_INTERRUPT_REQUEST:
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
return;
|
||||
case Ctx::UNDEFINED_INSTRUCTION:
|
||||
case Cpu::Context::UNDEFINED_INSTRUCTION:
|
||||
Genode::raw(*this, ": undefined instruction at ip=",
|
||||
Genode::Hex(regs->ip));
|
||||
_die();
|
||||
return;
|
||||
case Ctx::RESET:
|
||||
case Cpu::Context::RESET:
|
||||
return;
|
||||
default:
|
||||
Genode::raw(*this, ": triggered an unknown exception ",
|
||||
@@ -74,17 +71,17 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { }
|
||||
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
|
||||
|
||||
|
||||
void Cpu::Halt_job::proceed() { }
|
||||
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
|
||||
|
||||
|
||||
void Thread::proceed()
|
||||
void Thread::proceed(Cpu & cpu)
|
||||
{
|
||||
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
|
||||
_cpu().switch_to(pd().mmu_regs);
|
||||
if (!cpu.active(pd().mmu_regs) && type() != CORE)
|
||||
cpu.switch_to(pd().mmu_regs);
|
||||
|
||||
regs->cpu_exception = _cpu().stack_start();
|
||||
kernel_to_user_context_switch((static_cast<Core::Cpu::Context*>(&*regs)),
|
||||
(static_cast<Core::Cpu::Fpu_context*>(&*regs)));
|
||||
regs->cpu_exception = cpu.stack_start();
|
||||
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),
|
||||
(static_cast<Cpu::Fpu_context*>(&*regs)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -16,11 +16,12 @@
|
||||
|
||||
/* core includes */
|
||||
#include <platform.h>
|
||||
#include <platform_pd.h>
|
||||
#include <platform_services.h>
|
||||
#include <core_env.h>
|
||||
#include <core_service.h>
|
||||
#include <map_local.h>
|
||||
#include <vm_root.h>
|
||||
#include <platform.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
@@ -31,13 +32,11 @@ extern addr_t hypervisor_exception_vector;
|
||||
/*
|
||||
* Add ARM virtualization specific vm service
|
||||
*/
|
||||
void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &sh,
|
||||
Registry<Service> &services,
|
||||
Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &core_ram,
|
||||
Region_map &core_rm,
|
||||
Range_allocator &)
|
||||
void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &sh,
|
||||
Registry<Service> &services,
|
||||
Core::Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &)
|
||||
{
|
||||
map_local(Platform::core_phys_addr((addr_t)&hypervisor_exception_vector),
|
||||
Hw::Mm::hypervisor_exception_vector().base,
|
||||
@@ -52,7 +51,8 @@ void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_DATA);
|
||||
|
||||
static Vm_root vm_root(ep, sh, core_ram, core_rm, trace_sources);
|
||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
|
||||
@@ -14,11 +14,15 @@
|
||||
/* Genode includes */
|
||||
#include <util/construct_at.h>
|
||||
|
||||
/* base internal includes */
|
||||
#include <base/internal/unmanaged_singleton.h>
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/core_interface.h>
|
||||
#include <vm_session_component.h>
|
||||
#include <platform.h>
|
||||
#include <cpu_thread_component.h>
|
||||
#include <core_env.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
@@ -83,14 +87,29 @@ void * Vm_session_component::_alloc_table()
|
||||
}
|
||||
|
||||
|
||||
using Vmid_allocator = Bit_allocator<256>;
|
||||
|
||||
static Vmid_allocator &alloc()
|
||||
{
|
||||
static Vmid_allocator * allocator = nullptr;
|
||||
if (!allocator) {
|
||||
allocator = unmanaged_singleton<Vmid_allocator>();
|
||||
|
||||
/* reserve VM ID 0 for the hypervisor */
|
||||
addr_t id = allocator->alloc();
|
||||
assert (id == 0);
|
||||
}
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
|
||||
Genode::addr_t Vm_session_component::_alloc_vcpu_data(Genode::addr_t ds_addr)
|
||||
{
|
||||
return ds_addr;
|
||||
}
|
||||
|
||||
|
||||
Vm_session_component::Vm_session_component(Vmid_allocator & vmid_alloc,
|
||||
Rpc_entrypoint &ds_ep,
|
||||
Vm_session_component::Vm_session_component(Rpc_entrypoint &ds_ep,
|
||||
Resources resources,
|
||||
Label const &,
|
||||
Diag,
|
||||
@@ -108,8 +127,7 @@ Vm_session_component::Vm_session_component(Vmid_allocator & vmid_alloc,
|
||||
_table(*construct_at<Board::Vm_page_table>(_alloc_table())),
|
||||
_table_array(*(new (cma()) Board::Vm_page_table_array([] (void * virt) {
|
||||
return (addr_t)cma().phys_addr(virt);}))),
|
||||
_vmid_alloc(vmid_alloc),
|
||||
_id({(unsigned)_vmid_alloc.alloc(), cma().phys_addr(&_table)})
|
||||
_id({(unsigned)alloc().alloc(), cma().phys_addr(&_table)})
|
||||
{
|
||||
/* configure managed VM area */
|
||||
_map.add_range(0, 0UL - 0x1000);
|
||||
@@ -144,5 +162,5 @@ Vm_session_component::~Vm_session_component()
|
||||
/* free guest-to-host page tables */
|
||||
destroy(platform().core_mem_alloc(), &_table);
|
||||
destroy(platform().core_mem_alloc(), &_table_array);
|
||||
_vmid_alloc.free(_id.id);
|
||||
alloc().free(_id.id);
|
||||
}
|
||||
|
||||
@@ -28,13 +28,14 @@ Vm::Vm(Irq::Pool & user_irq_pool,
|
||||
Identity & id)
|
||||
:
|
||||
Kernel::Object { *this },
|
||||
Cpu_context(cpu, Scheduler::Priority::min(), 0),
|
||||
Cpu_job(Scheduler::Priority::min(), 0),
|
||||
_user_irq_pool(user_irq_pool),
|
||||
_state(data),
|
||||
_context(context),
|
||||
_id(id),
|
||||
_vcpu_context(cpu)
|
||||
{
|
||||
affinity(cpu);
|
||||
/* once constructed, exit with a startup exception */
|
||||
pause();
|
||||
_state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP;
|
||||
@@ -45,12 +46,12 @@ Vm::Vm(Irq::Pool & user_irq_pool,
|
||||
Vm::~Vm() {}
|
||||
|
||||
|
||||
void Vm::exception()
|
||||
void Vm::exception(Cpu & cpu)
|
||||
{
|
||||
switch(_state.cpu_exception) {
|
||||
case Genode::Cpu_state::INTERRUPT_REQUEST: [[fallthrough]];
|
||||
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
return;
|
||||
case Genode::Cpu_state::DATA_ABORT:
|
||||
_state.dfar = Cpu::Dfar::read();
|
||||
@@ -68,19 +69,19 @@ bool secure_irq(unsigned const i);
|
||||
extern "C" void monitor_mode_enter_normal_world(Genode::Vcpu_state&, void*);
|
||||
|
||||
|
||||
void Vm::proceed()
|
||||
void Vm::proceed(Cpu & cpu)
|
||||
{
|
||||
unsigned const irq = _state.irq_injection;
|
||||
if (irq) {
|
||||
if (_cpu().pic().secure(irq)) {
|
||||
if (cpu.pic().secure(irq)) {
|
||||
Genode::raw("Refuse to inject secure IRQ into VM");
|
||||
} else {
|
||||
_cpu().pic().trigger(irq);
|
||||
cpu.pic().trigger(irq);
|
||||
_state.irq_injection = 0;
|
||||
}
|
||||
}
|
||||
|
||||
monitor_mode_enter_normal_world(_state, (void*) _cpu().stack_start());
|
||||
monitor_mode_enter_normal_world(_state, (void*) cpu.stack_start());
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
/* core includes */
|
||||
#include <platform.h>
|
||||
#include <platform_services.h>
|
||||
#include <core_env.h>
|
||||
#include <core_service.h>
|
||||
#include <vm_root.h>
|
||||
#include <map_local.h>
|
||||
@@ -28,13 +29,11 @@ extern int monitor_mode_exception_vector;
|
||||
/*
|
||||
* Add TrustZone specific vm service
|
||||
*/
|
||||
void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &sliced_heap,
|
||||
Registry<Service> &services,
|
||||
Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &core_ram,
|
||||
Region_map &core_rm,
|
||||
Range_allocator &)
|
||||
void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &sliced_heap,
|
||||
Registry<Service> &local_services,
|
||||
Core::Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &)
|
||||
{
|
||||
static addr_t const phys_base =
|
||||
Platform::core_phys_addr((addr_t)&monitor_mode_exception_vector);
|
||||
@@ -42,7 +41,8 @@ void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
map_local(phys_base, Hw::Mm::system_exception_vector().base, 1,
|
||||
Hw::PAGE_FLAGS_KERN_TEXT);
|
||||
|
||||
static Vm_root vm_root(ep, sliced_heap, core_ram, core_rm, trace_sources);
|
||||
static Vm_root vm_root(ep, sliced_heap, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
static Core_service<Vm_session_component> vm_service(local_services, vm_root);
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ Genode::addr_t Vm_session_component::_alloc_vcpu_data(Genode::addr_t ds_addr)
|
||||
}
|
||||
|
||||
|
||||
Vm_session_component::Vm_session_component(Vmid_allocator &vmids, Rpc_entrypoint &ep,
|
||||
Vm_session_component::Vm_session_component(Rpc_entrypoint &ep,
|
||||
Resources resources,
|
||||
Label const &,
|
||||
Diag,
|
||||
@@ -74,7 +74,6 @@ Vm_session_component::Vm_session_component(Vmid_allocator &vmids, Rpc_entrypoint
|
||||
_region_map(region_map),
|
||||
_table(*construct_at<Board::Vm_page_table>(_alloc_table())),
|
||||
_table_array(dummy_array()),
|
||||
_vmid_alloc(vmids),
|
||||
_id({id_alloc++, nullptr})
|
||||
{
|
||||
if (_id.id) {
|
||||
|
||||
@@ -101,7 +101,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) {
|
||||
|
||||
void Board::Vcpu_context::Vm_irq::occurred()
|
||||
{
|
||||
Vm *vm = dynamic_cast<Vm*>(&_cpu.current_context());
|
||||
Vm *vm = dynamic_cast<Vm*>(&_cpu.scheduled_job());
|
||||
if (!vm) Genode::raw("VM interrupt while VM is not runnning!");
|
||||
else handle(*vm, _irq_nr);
|
||||
}
|
||||
@@ -140,13 +140,14 @@ Kernel::Vm::Vm(Irq::Pool & user_irq_pool,
|
||||
Identity & id)
|
||||
:
|
||||
Kernel::Object { *this },
|
||||
Cpu_context(cpu, Scheduler::Priority::min(), 0),
|
||||
Cpu_job(Scheduler::Priority::min(), 0),
|
||||
_user_irq_pool(user_irq_pool),
|
||||
_state(data),
|
||||
_context(context),
|
||||
_id(id),
|
||||
_vcpu_context(cpu)
|
||||
{
|
||||
affinity(cpu);
|
||||
/* once constructed, exit with a startup exception */
|
||||
pause();
|
||||
_state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP;
|
||||
@@ -163,29 +164,29 @@ Kernel::Vm::~Vm()
|
||||
}
|
||||
|
||||
|
||||
void Kernel::Vm::exception()
|
||||
void Kernel::Vm::exception(Cpu & cpu)
|
||||
{
|
||||
switch(_state.cpu_exception) {
|
||||
case Genode::Cpu_state::INTERRUPT_REQUEST:
|
||||
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
break;
|
||||
default:
|
||||
pause();
|
||||
_context.submit(1);
|
||||
}
|
||||
|
||||
if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic))
|
||||
if (cpu.pic().ack_virtual_irq(_vcpu_context.pic))
|
||||
inject_irq(Board::VT_MAINTAINANCE_IRQ);
|
||||
_vcpu_context.vtimer_irq.disable();
|
||||
}
|
||||
|
||||
|
||||
void Kernel::Vm::proceed()
|
||||
void Kernel::Vm::proceed(Cpu & cpu)
|
||||
{
|
||||
if (_state.timer.irq) _vcpu_context.vtimer_irq.enable();
|
||||
|
||||
_cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
|
||||
cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
|
||||
|
||||
/*
|
||||
* the following values have to be enforced by the hypervisor
|
||||
@@ -201,7 +202,7 @@ void Kernel::Vm::proceed()
|
||||
_state.esr_el2 = Cpu::Hstr::init();
|
||||
_state.hpfar_el2 = Cpu::Hcr::init();
|
||||
|
||||
Hypervisor::switch_world(_state, host_context(_cpu()));
|
||||
Hypervisor::switch_world(_state, host_context(cpu));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ using namespace Kernel;
|
||||
void Thread::_call_suspend() { }
|
||||
|
||||
|
||||
void Thread::exception()
|
||||
void Thread::exception(Cpu & cpu)
|
||||
{
|
||||
switch (regs->exception_type) {
|
||||
case Cpu::RESET: return;
|
||||
@@ -35,7 +35,7 @@ void Thread::exception()
|
||||
case Cpu::IRQ_LEVEL_EL1: [[fallthrough]];
|
||||
case Cpu::FIQ_LEVEL_EL0: [[fallthrough]];
|
||||
case Cpu::FIQ_LEVEL_EL1:
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
return;
|
||||
case Cpu::SYNC_LEVEL_EL0: [[fallthrough]];
|
||||
case Cpu::SYNC_LEVEL_EL1:
|
||||
@@ -94,51 +94,51 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { }
|
||||
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
|
||||
|
||||
|
||||
void Cpu::Halt_job::proceed() { }
|
||||
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
|
||||
|
||||
|
||||
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size)
|
||||
{
|
||||
using namespace Genode;
|
||||
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size)
|
||||
{
|
||||
using namespace Genode;
|
||||
|
||||
/* only apply to the active cpu */
|
||||
if (cpu.id() != Cpu::executing_id())
|
||||
return false;
|
||||
|
||||
/**
|
||||
* The kernel part of the address space is mapped as global
|
||||
* therefore we have to invalidate it differently
|
||||
*/
|
||||
if (addr >= Hw::Mm::supervisor_exception_vector().base) {
|
||||
for (addr_t end = addr+size; addr < end; addr += get_page_size())
|
||||
asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12));
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Too big mappings will result in long running invalidation loops,
|
||||
* just invalidate the whole tlb for the ASID then.
|
||||
*/
|
||||
if (size > 8 * get_page_size()) {
|
||||
asm volatile ("tlbi aside1is, %0"
|
||||
:: "r" ((uint64_t)mmu_regs.id() << 48));
|
||||
return false;
|
||||
}
|
||||
/* only apply to the active cpu */
|
||||
if (cpu.id() != Cpu::executing_id())
|
||||
return false;
|
||||
|
||||
/**
|
||||
* The kernel part of the address space is mapped as global
|
||||
* therefore we have to invalidate it differently
|
||||
*/
|
||||
if (addr >= Hw::Mm::supervisor_exception_vector().base) {
|
||||
for (addr_t end = addr+size; addr < end; addr += get_page_size())
|
||||
asm volatile ("tlbi vae1is, %0"
|
||||
:: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48));
|
||||
asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12));
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Too big mappings will result in long running invalidation loops,
|
||||
* just invalidate the whole tlb for the ASID then.
|
||||
*/
|
||||
if (size > 8 * get_page_size()) {
|
||||
asm volatile ("tlbi aside1is, %0"
|
||||
:: "r" ((uint64_t)mmu_regs.id() << 48));
|
||||
return false;
|
||||
}
|
||||
|
||||
void Thread::proceed()
|
||||
{
|
||||
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
|
||||
_cpu().switch_to(pd().mmu_regs);
|
||||
for (addr_t end = addr+size; addr < end; addr += get_page_size())
|
||||
asm volatile ("tlbi vae1is, %0"
|
||||
:: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48));
|
||||
return false;
|
||||
}
|
||||
|
||||
kernel_to_user_context_switch((static_cast<Core::Cpu::Context*>(&*regs)),
|
||||
(void*)_cpu().stack_start());
|
||||
|
||||
void Thread::proceed(Cpu & cpu)
|
||||
{
|
||||
if (!cpu.active(pd().mmu_regs) && type() != CORE)
|
||||
cpu.switch_to(pd().mmu_regs);
|
||||
|
||||
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),
|
||||
(void*)cpu.stack_start());
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) {
|
||||
|
||||
void Board::Vcpu_context::Vm_irq::occurred()
|
||||
{
|
||||
Vm *vm = dynamic_cast<Vm*>(&_cpu.current_context());
|
||||
Vm *vm = dynamic_cast<Vm*>(&_cpu.scheduled_job());
|
||||
if (!vm) Genode::raw("VM interrupt while VM is not runnning!");
|
||||
else handle(*vm, _irq_nr);
|
||||
}
|
||||
@@ -115,13 +115,15 @@ Vm::Vm(Irq::Pool & user_irq_pool,
|
||||
Identity & id)
|
||||
:
|
||||
Kernel::Object { *this },
|
||||
Cpu_context(cpu, Scheduler::Priority::min(), 0),
|
||||
Cpu_job(Scheduler::Priority::min(), 0),
|
||||
_user_irq_pool(user_irq_pool),
|
||||
_state(data),
|
||||
_context(context),
|
||||
_id(id),
|
||||
_vcpu_context(cpu)
|
||||
{
|
||||
affinity(cpu);
|
||||
|
||||
_state.id_aa64isar0_el1 = Cpu::Id_aa64isar0_el1::read();
|
||||
_state.id_aa64isar1_el1 = Cpu::Id_aa64isar1_el1::read();
|
||||
_state.id_aa64mmfr0_el1 = Cpu::Id_aa64mmfr0_el1::read();
|
||||
@@ -165,14 +167,14 @@ Vm::~Vm()
|
||||
}
|
||||
|
||||
|
||||
void Vm::exception()
|
||||
void Vm::exception(Cpu & cpu)
|
||||
{
|
||||
switch (_state.exception_type) {
|
||||
case Cpu::IRQ_LEVEL_EL0: [[fallthrough]];
|
||||
case Cpu::IRQ_LEVEL_EL1: [[fallthrough]];
|
||||
case Cpu::FIQ_LEVEL_EL0: [[fallthrough]];
|
||||
case Cpu::FIQ_LEVEL_EL1:
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
break;
|
||||
case Cpu::SYNC_LEVEL_EL0: [[fallthrough]];
|
||||
case Cpu::SYNC_LEVEL_EL1: [[fallthrough]];
|
||||
@@ -186,17 +188,17 @@ void Vm::exception()
|
||||
" not implemented!");
|
||||
};
|
||||
|
||||
if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic))
|
||||
if (cpu.pic().ack_virtual_irq(_vcpu_context.pic))
|
||||
inject_irq(Board::VT_MAINTAINANCE_IRQ);
|
||||
_vcpu_context.vtimer_irq.disable();
|
||||
}
|
||||
|
||||
|
||||
void Vm::proceed()
|
||||
void Vm::proceed(Cpu & cpu)
|
||||
{
|
||||
if (_state.timer.irq) _vcpu_context.vtimer_irq.enable();
|
||||
|
||||
_cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
|
||||
cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
|
||||
|
||||
/*
|
||||
* the following values have to be enforced by the hypervisor
|
||||
@@ -206,7 +208,7 @@ void Vm::proceed()
|
||||
Cpu::Vttbr_el2::Asid::set(vttbr_el2, _id.id);
|
||||
addr_t guest = Hw::Mm::el2_addr(&_state);
|
||||
addr_t pic = Hw::Mm::el2_addr(&_vcpu_context.pic);
|
||||
addr_t host = Hw::Mm::el2_addr(&host_context(_cpu()));
|
||||
addr_t host = Hw::Mm::el2_addr(&host_context(cpu));
|
||||
|
||||
Hypervisor::switch_world(guest, host, pic, vttbr_el2);
|
||||
}
|
||||
|
||||
@@ -49,10 +49,6 @@ using namespace Kernel;
|
||||
CALL_4_FILL_ARG_REGS \
|
||||
register Call_arg arg_4_reg asm("a4") = arg_4;
|
||||
|
||||
#define CALL_6_FILL_ARG_REGS \
|
||||
CALL_5_FILL_ARG_REGS \
|
||||
register Call_arg arg_5_reg asm("a5") = arg_5;
|
||||
|
||||
extern Genode::addr_t _kernel_entry;
|
||||
|
||||
/*
|
||||
@@ -79,7 +75,6 @@ extern Genode::addr_t _kernel_entry;
|
||||
#define CALL_3_SWI CALL_2_SWI, "r" (arg_2_reg)
|
||||
#define CALL_4_SWI CALL_3_SWI, "r" (arg_3_reg)
|
||||
#define CALL_5_SWI CALL_4_SWI, "r" (arg_4_reg)
|
||||
#define CALL_6_SWI CALL_5_SWI, "r" (arg_5_reg)
|
||||
|
||||
|
||||
/******************
|
||||
@@ -142,16 +137,3 @@ Call_ret Kernel::call(Call_arg arg_0,
|
||||
asm volatile(CALL_5_SWI : "ra");
|
||||
return arg_0_reg;
|
||||
}
|
||||
|
||||
|
||||
Call_ret Kernel::call(Call_arg arg_0,
|
||||
Call_arg arg_1,
|
||||
Call_arg arg_2,
|
||||
Call_arg arg_3,
|
||||
Call_arg arg_4,
|
||||
Call_arg arg_5)
|
||||
{
|
||||
CALL_6_FILL_ARG_REGS
|
||||
asm volatile(CALL_6_SWI : "ra");
|
||||
return arg_0_reg;
|
||||
}
|
||||
|
||||
@@ -25,21 +25,21 @@ void Thread::Tlb_invalidation::execute(Cpu &) { }
|
||||
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
|
||||
|
||||
|
||||
void Cpu::Halt_job::proceed() { }
|
||||
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
|
||||
|
||||
|
||||
void Thread::exception()
|
||||
void Thread::exception(Cpu & cpu)
|
||||
{
|
||||
using Context = Core::Cpu::Context;
|
||||
using Stval = Core::Cpu::Stval;
|
||||
|
||||
if (regs->is_irq()) {
|
||||
/* cpu-local timer interrupt */
|
||||
if (regs->irq() == _cpu().timer().interrupt_id()) {
|
||||
_cpu().handle_if_cpu_local_interrupt(_cpu().timer().interrupt_id());
|
||||
if (regs->irq() == cpu.timer().interrupt_id()) {
|
||||
cpu.handle_if_cpu_local_interrupt(cpu.timer().interrupt_id());
|
||||
} else {
|
||||
/* interrupt controller */
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -113,7 +113,7 @@ void Kernel::Thread::_call_cache_line_size()
|
||||
}
|
||||
|
||||
|
||||
void Kernel::Thread::proceed()
|
||||
void Kernel::Thread::proceed(Cpu & cpu)
|
||||
{
|
||||
/*
|
||||
* The sstatus register defines to which privilege level
|
||||
@@ -123,8 +123,8 @@ void Kernel::Thread::proceed()
|
||||
Cpu::Sstatus::Spp::set(v, (type() == USER) ? 0 : 1);
|
||||
Cpu::Sstatus::write(v);
|
||||
|
||||
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
|
||||
_cpu().switch_to(_pd->mmu_regs);
|
||||
if (!cpu.active(pd().mmu_regs) && type() != CORE)
|
||||
cpu.switch_to(_pd->mmu_regs);
|
||||
|
||||
asm volatile("csrw sscratch, %1 \n"
|
||||
"mv x31, %0 \n"
|
||||
|
||||
@@ -55,9 +55,9 @@ void Kernel::Thread::Flush_and_stop_cpu::execute(Cpu &cpu)
|
||||
}
|
||||
|
||||
|
||||
void Kernel::Cpu::Halt_job::Halt_job::proceed()
|
||||
void Kernel::Cpu::Halt_job::Halt_job::proceed(Kernel::Cpu &cpu)
|
||||
{
|
||||
switch (_cpu().state()) {
|
||||
switch (cpu.state()) {
|
||||
case HALT:
|
||||
while (true) {
|
||||
asm volatile ("hlt"); }
|
||||
@@ -83,7 +83,7 @@ void Kernel::Cpu::Halt_job::Halt_job::proceed()
|
||||
/* adhere to ACPI specification */
|
||||
asm volatile ("wbinvd" : : : "memory");
|
||||
|
||||
fadt.suspend(_cpu().suspend.typ_a, _cpu().suspend.typ_b);
|
||||
fadt.suspend(cpu.suspend.typ_a, cpu.suspend.typ_b);
|
||||
|
||||
Genode::raw("kernel: unexpected resume");
|
||||
});
|
||||
@@ -143,7 +143,7 @@ void Kernel::Thread::_call_suspend()
|
||||
/* single core CPU case */
|
||||
if (cpu_count == 1) {
|
||||
/* current CPU triggers final ACPI suspend outside kernel lock */
|
||||
_cpu().next_state_suspend();
|
||||
_cpu->next_state_suspend();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -176,12 +176,12 @@ void Kernel::Thread::_call_cache_line_size()
|
||||
}
|
||||
|
||||
|
||||
void Kernel::Thread::proceed()
|
||||
void Kernel::Thread::proceed(Cpu & cpu)
|
||||
{
|
||||
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
|
||||
_cpu().switch_to(pd().mmu_regs);
|
||||
if (!cpu.active(pd().mmu_regs) && type() != CORE)
|
||||
cpu.switch_to(pd().mmu_regs);
|
||||
|
||||
_cpu().switch_to(*regs);
|
||||
cpu.switch_to(*regs);
|
||||
|
||||
asm volatile("fxrstor (%1) \n"
|
||||
"mov %0, %%rsp \n"
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
using namespace Kernel;
|
||||
|
||||
|
||||
void Thread::exception()
|
||||
void Thread::exception(Cpu & cpu)
|
||||
{
|
||||
using Genode::Cpu_state;
|
||||
|
||||
@@ -45,7 +45,7 @@ void Thread::exception()
|
||||
|
||||
if (regs->trapno >= Cpu_state::INTERRUPTS_START &&
|
||||
regs->trapno <= Cpu_state::INTERRUPTS_END) {
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -47,8 +47,6 @@ Local_interrupt_controller(Global_interrupt_controller &global_irq_ctrl)
|
||||
|
||||
void Local_interrupt_controller::init()
|
||||
{
|
||||
using Hw::outb;
|
||||
|
||||
/* Start initialization sequence in cascade mode */
|
||||
outb(PIC_CMD_MASTER, 0x11);
|
||||
outb(PIC_CMD_SLAVE, 0x11);
|
||||
|
||||
@@ -15,6 +15,9 @@
|
||||
|
||||
#include <hw/spec/x86_64/x86_64.h>
|
||||
|
||||
/* Genode includes */
|
||||
#include <drivers/timer/util.h>
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/timer.h>
|
||||
#include <platform.h>
|
||||
@@ -22,9 +25,37 @@
|
||||
using namespace Core;
|
||||
using namespace Kernel;
|
||||
|
||||
|
||||
uint32_t Board::Timer::pit_calc_timer_freq(void)
|
||||
{
|
||||
uint32_t t_start, t_end;
|
||||
|
||||
/* set channel gate high and disable speaker */
|
||||
outb(PIT_CH2_GATE, (uint8_t)((inb(0x61) & ~0x02) | 0x01));
|
||||
|
||||
/* set timer counter (mode 0, binary count) */
|
||||
outb(PIT_MODE, 0xb0);
|
||||
outb(PIT_CH2_DATA, PIT_SLEEP_TICS & 0xff);
|
||||
outb(PIT_CH2_DATA, PIT_SLEEP_TICS >> 8);
|
||||
|
||||
write<Tmr_initial>(~0U);
|
||||
|
||||
t_start = read<Tmr_current>();
|
||||
while ((inb(PIT_CH2_GATE) & 0x20) == 0)
|
||||
{
|
||||
asm volatile("pause" : : : "memory");
|
||||
}
|
||||
t_end = read<Tmr_current>();
|
||||
|
||||
write<Tmr_initial>(0);
|
||||
|
||||
return (t_start - t_end) / PIT_SLEEP_MS;
|
||||
}
|
||||
|
||||
|
||||
Board::Timer::Timer(unsigned)
|
||||
:
|
||||
Local_apic(Platform::mmio_to_virt(Hw::Cpu_memory_map::lapic_phys_base()))
|
||||
Mmio({(char *)Platform::mmio_to_virt(Hw::Cpu_memory_map::lapic_phys_base()), Mmio::SIZE})
|
||||
{
|
||||
init();
|
||||
}
|
||||
@@ -44,10 +75,28 @@ void Board::Timer::init()
|
||||
return;
|
||||
}
|
||||
|
||||
Platform::apply_with_boot_info([&](auto const &boot_info) {
|
||||
ticks_per_ms = boot_info.plat_info.lapic_freq_khz;
|
||||
divider = boot_info.plat_info.lapic_div;
|
||||
});
|
||||
/* calibrate LAPIC frequency to fullfill our requirements */
|
||||
for (Divide_configuration::access_t div = Divide_configuration::Divide_value::MAX;
|
||||
div && ticks_per_ms < TIMER_MIN_TICKS_PER_MS; div--)
|
||||
{
|
||||
if (!div){
|
||||
raw("Failed to calibrate timer frequency");
|
||||
throw Calibration_failed();
|
||||
}
|
||||
write<Divide_configuration::Divide_value>((uint8_t)div);
|
||||
|
||||
/* Calculate timer frequency */
|
||||
ticks_per_ms = pit_calc_timer_freq();
|
||||
divider = div;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable PIT timer channel. This is necessary since BIOS sets up
|
||||
* channel 0 to fire periodically.
|
||||
*/
|
||||
outb(Board::Timer::PIT_MODE, 0x30);
|
||||
outb(Board::Timer::PIT_CH0_DATA, 0);
|
||||
outb(Board::Timer::PIT_CH0_DATA, 0);
|
||||
}
|
||||
|
||||
|
||||
80
repos/base-hw/src/core/spec/x86_64/pit.h
Normal file
80
repos/base-hw/src/core/spec/x86_64/pit.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* \brief Timer driver for core
|
||||
* \author Adrian-Ken Rueegsegger
|
||||
* \author Reto Buerki
|
||||
* \date 2015-02-06
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__CORE__SPEC__ARM__PIT_H_
|
||||
#define _SRC__CORE__SPEC__ARM__PIT_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <util/mmio.h>
|
||||
#include <base/stdint.h>
|
||||
|
||||
/* core includes */
|
||||
#include <port_io.h>
|
||||
|
||||
namespace Board { class Timer; }
|
||||
|
||||
|
||||
/**
|
||||
* LAPIC-based timer driver for core
|
||||
*/
|
||||
struct Board::Timer: Genode::Mmio<Hw::Cpu_memory_map::LAPIC_SIZE>
|
||||
{
|
||||
enum {
|
||||
/* PIT constants */
|
||||
PIT_TICK_RATE = 1193182ul,
|
||||
PIT_SLEEP_MS = 50,
|
||||
PIT_SLEEP_TICS = (PIT_TICK_RATE / 1000) * PIT_SLEEP_MS,
|
||||
PIT_CH0_DATA = 0x40,
|
||||
PIT_CH2_DATA = 0x42,
|
||||
PIT_CH2_GATE = 0x61,
|
||||
PIT_MODE = 0x43,
|
||||
};
|
||||
|
||||
/* Timer registers */
|
||||
struct Tmr_lvt : Register<0x320, 32>
|
||||
{
|
||||
struct Vector : Bitfield<0, 8> { };
|
||||
struct Delivery : Bitfield<8, 3> { };
|
||||
struct Mask : Bitfield<16, 1> { };
|
||||
struct Timer_mode : Bitfield<17, 2> { };
|
||||
};
|
||||
|
||||
struct Tmr_initial : Register <0x380, 32> { };
|
||||
struct Tmr_current : Register <0x390, 32> { };
|
||||
|
||||
struct Divide_configuration : Register <0x03e0, 32>
|
||||
{
|
||||
struct Divide_value_0_2 : Bitfield<0, 2> { };
|
||||
struct Divide_value_2_1 : Bitfield<3, 1> { };
|
||||
struct Divide_value :
|
||||
Genode::Bitset_2<Divide_value_0_2, Divide_value_2_1>
|
||||
{
|
||||
enum { MAX = 6 };
|
||||
};
|
||||
};
|
||||
|
||||
struct Calibration_failed : Genode::Exception { };
|
||||
|
||||
Divide_configuration::access_t divider = 0;
|
||||
Genode::uint32_t ticks_per_ms = 0;
|
||||
|
||||
/* Measure LAPIC timer frequency using PIT channel 2 */
|
||||
Genode::uint32_t pit_calc_timer_freq(void);
|
||||
|
||||
Timer(unsigned);
|
||||
|
||||
void init();
|
||||
};
|
||||
|
||||
#endif /* _SRC__CORE__SPEC__ARM__PIT_H_ */
|
||||
@@ -59,8 +59,8 @@ void Platform::_init_additional_platform_info(Xml_generator &xml)
|
||||
xml.attribute("vmx", Hw::Virtualization_support::has_vmx());
|
||||
});
|
||||
xml.node("tsc", [&] {
|
||||
xml.attribute("invariant", Hw::Tsc::invariant_tsc());
|
||||
xml.attribute("freq_khz", _boot_info().plat_info.tsc_freq_khz);
|
||||
xml.attribute("invariant", Hw::Lapic::invariant_tsc());
|
||||
xml.attribute("freq_khz", Hw::Lapic::tsc_freq());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -11,15 +11,13 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _INCLUDE__SPEC__X86_64__PORT_IO_H_
|
||||
#define _INCLUDE__SPEC__X86_64__PORT_IO_H_
|
||||
#ifndef _CORE__SPEC__X86_64__PORT_IO_H_
|
||||
#define _CORE__SPEC__X86_64__PORT_IO_H_
|
||||
|
||||
#include <base/fixed_stdint.h>
|
||||
/* core includes */
|
||||
#include <types.h>
|
||||
|
||||
namespace Hw {
|
||||
|
||||
using Genode::uint8_t;
|
||||
using Genode::uint16_t;
|
||||
namespace Core {
|
||||
|
||||
/**
|
||||
* Read byte from I/O port
|
||||
@@ -40,4 +38,4 @@ namespace Hw {
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _INCLUDE__SPEC__X86_64__PORT_IO_H_ */
|
||||
#endif /* _CORE__SPEC__X86_64__PORT_IO_H_ */
|
||||
@@ -1,40 +0,0 @@
|
||||
/*
|
||||
* \brief Timer driver for core
|
||||
* \author Adrian-Ken Rueegsegger
|
||||
* \author Reto Buerki
|
||||
* \date 2015-02-06
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__CORE__SPEC__ARM__PIT_H_
|
||||
#define _SRC__CORE__SPEC__ARM__PIT_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/stdint.h>
|
||||
|
||||
/* hw includes */
|
||||
#include <hw/spec/x86_64/apic.h>
|
||||
|
||||
namespace Board { class Timer; }
|
||||
|
||||
|
||||
/**
|
||||
* LAPIC-based timer driver for core
|
||||
*/
|
||||
struct Board::Timer: public Hw::Local_apic
|
||||
{
|
||||
Divide_configuration::access_t divider = 0;
|
||||
Genode::uint32_t ticks_per_ms = 0;
|
||||
|
||||
Timer(unsigned);
|
||||
|
||||
void init();
|
||||
};
|
||||
|
||||
#endif /* _SRC__CORE__SPEC__ARM__PIT_H_ */
|
||||
@@ -1,128 +0,0 @@
|
||||
/*
|
||||
* \brief Vm_session vCPU
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-11-26
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__VCPU_H_
|
||||
#define _CORE__VCPU_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/attached_dataspace.h>
|
||||
#include <vm_session/vm_session.h>
|
||||
|
||||
/* base-hw includes */
|
||||
#include <hw_native_vcpu/hw_native_vcpu.h>
|
||||
#include <kernel/vm.h>
|
||||
|
||||
/* core includes */
|
||||
#include <phys_allocated.h>
|
||||
#include <region_map_component.h>
|
||||
|
||||
namespace Core { struct Vcpu; }
|
||||
|
||||
|
||||
class Core::Vcpu : public Rpc_object<Vm_session::Native_vcpu, Vcpu>
|
||||
{
|
||||
private:
|
||||
struct Data_pages {
|
||||
uint8_t _[Vcpu_data::size()];
|
||||
};
|
||||
|
||||
Kernel::Vm::Identity &_id;
|
||||
Rpc_entrypoint &_ep;
|
||||
Vcpu_data _vcpu_data { };
|
||||
Kernel_object<Kernel::Vm> _kobj { };
|
||||
Constrained_ram_allocator &_ram;
|
||||
Ram_dataspace_capability _ds_cap { };
|
||||
Region_map &_region_map;
|
||||
Affinity::Location _location;
|
||||
Phys_allocated<Data_pages> _vcpu_data_pages;
|
||||
|
||||
constexpr size_t vcpu_state_size()
|
||||
{
|
||||
return align_addr(sizeof(Board::Vcpu_state),
|
||||
get_page_size_log2());
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
Vcpu(Kernel::Vm::Identity &id,
|
||||
Rpc_entrypoint &ep,
|
||||
Constrained_ram_allocator &constrained_ram_alloc,
|
||||
Region_map ®ion_map,
|
||||
Affinity::Location location)
|
||||
:
|
||||
_id(id),
|
||||
_ep(ep),
|
||||
_ram(constrained_ram_alloc),
|
||||
_ds_cap( {_ram.alloc(vcpu_state_size(), Cache::UNCACHED)} ),
|
||||
_region_map(region_map),
|
||||
_location(location),
|
||||
_vcpu_data_pages(ep, constrained_ram_alloc, region_map)
|
||||
{
|
||||
Region_map::Attr attr { };
|
||||
attr.writeable = true;
|
||||
_vcpu_data.vcpu_state = _region_map.attach(_ds_cap, attr).convert<Vcpu_state *>(
|
||||
[&] (Region_map::Range range) { return (Vcpu_state *)range.start; },
|
||||
[&] (Region_map::Attach_error) -> Vcpu_state * {
|
||||
error("failed to attach VCPU data within core");
|
||||
return nullptr;
|
||||
});
|
||||
|
||||
if (!_vcpu_data.vcpu_state) {
|
||||
_ram.free(_ds_cap);
|
||||
|
||||
throw Attached_dataspace::Region_conflict();
|
||||
}
|
||||
|
||||
_vcpu_data.virt_area = &_vcpu_data_pages.obj;
|
||||
_vcpu_data.phys_addr = _vcpu_data_pages.phys_addr();
|
||||
|
||||
ep.manage(this);
|
||||
}
|
||||
|
||||
~Vcpu()
|
||||
{
|
||||
_region_map.detach((addr_t)_vcpu_data.vcpu_state);
|
||||
_ram.free(_ds_cap);
|
||||
_ep.dissolve(this);
|
||||
}
|
||||
|
||||
/*******************************
|
||||
** Native_vcpu RPC interface **
|
||||
*******************************/
|
||||
|
||||
Capability<Dataspace> state() const { return _ds_cap; }
|
||||
Native_capability native_vcpu() { return _kobj.cap(); }
|
||||
|
||||
void exception_handler(Signal_context_capability handler)
|
||||
{
|
||||
using Genode::warning;
|
||||
if (!handler.valid()) {
|
||||
warning("invalid signal");
|
||||
return;
|
||||
}
|
||||
|
||||
if (_kobj.constructed()) {
|
||||
warning("Cannot register vcpu handler twice");
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned const cpu = _location.xpos();
|
||||
|
||||
if (!_kobj.create(cpu, (void *)&_vcpu_data,
|
||||
Capability_space::capid(handler), _id))
|
||||
warning("Cannot instantiate vm kernel object, invalid signal context?");
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__VCPU_H_ */
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <cpu.h>
|
||||
#include <cpu/vcpu_state_virtualization.h>
|
||||
#include <hw/spec/x86_64/x86_64.h>
|
||||
#include <spec/x86_64/virtualization/vm_page_table.h>
|
||||
#include <spec/x86_64/virtualization/svm.h>
|
||||
#include <spec/x86_64/virtualization/vmx.h>
|
||||
|
||||
@@ -33,6 +34,10 @@ namespace Board {
|
||||
using Vcpu_data = Genode::Vcpu_data;
|
||||
using Vcpu_state = Genode::Vcpu_state;
|
||||
|
||||
enum {
|
||||
VCPU_MAX = 16
|
||||
};
|
||||
|
||||
enum Platform_exitcodes : uint64_t {
|
||||
EXIT_NPF = 0xfc,
|
||||
EXIT_INIT = 0xfd,
|
||||
|
||||
@@ -267,7 +267,7 @@ void Vmcb::write_vcpu_state(Vcpu_state &state)
|
||||
/* Guest activity state (actv) not used by SVM */
|
||||
state.actv_state.set_charged();
|
||||
|
||||
state.tsc.charge(Hw::Tsc::rdtsc());
|
||||
state.tsc.charge(Hw::Lapic::rdtsc());
|
||||
state.tsc_offset.charge(v.read<Vmcb_buf::Tsc_offset>());
|
||||
|
||||
state.efer.charge(v.read<Vmcb_buf::Efer>());
|
||||
|
||||
@@ -41,12 +41,15 @@ Vm::Vm(Irq::Pool & user_irq_pool,
|
||||
Identity & id)
|
||||
:
|
||||
Kernel::Object { *this },
|
||||
Cpu_context(cpu, Scheduler::Priority::min(), 0),
|
||||
Cpu_job(Scheduler::Priority::min(), 0),
|
||||
_user_irq_pool(user_irq_pool),
|
||||
_state(*data.vcpu_state),
|
||||
_context(context),
|
||||
_id(id),
|
||||
_vcpu_context(id.id, data) { }
|
||||
_vcpu_context(id.id, data)
|
||||
{
|
||||
affinity(cpu);
|
||||
}
|
||||
|
||||
|
||||
Vm::~Vm()
|
||||
@@ -54,10 +57,10 @@ Vm::~Vm()
|
||||
}
|
||||
|
||||
|
||||
void Vm::proceed()
|
||||
void Vm::proceed(Cpu & cpu)
|
||||
{
|
||||
using namespace Board;
|
||||
_cpu().switch_to(*_vcpu_context.regs);
|
||||
cpu.switch_to(*_vcpu_context.regs);
|
||||
|
||||
if (_vcpu_context.exit_reason == EXIT_INIT) {
|
||||
_vcpu_context.regs->trapno = TRAP_VMSKIP;
|
||||
@@ -80,7 +83,7 @@ void Vm::proceed()
|
||||
}
|
||||
|
||||
|
||||
void Vm::exception()
|
||||
void Vm::exception(Cpu & cpu)
|
||||
{
|
||||
using namespace Board;
|
||||
|
||||
@@ -118,18 +121,18 @@ void Vm::exception()
|
||||
* it needs to handle an exit.
|
||||
*/
|
||||
if (_vcpu_context.exit_reason == EXIT_PAUSED)
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
else
|
||||
pause = true;
|
||||
break;
|
||||
case Cpu_state::INTERRUPTS_START ... Cpu_state::INTERRUPTS_END:
|
||||
_interrupt(_user_irq_pool);
|
||||
_interrupt(_user_irq_pool, cpu.id());
|
||||
break;
|
||||
case TRAP_VMSKIP:
|
||||
/* vCPU is running for the first time */
|
||||
_vcpu_context.initialize(_cpu(),
|
||||
_vcpu_context.initialize(cpu,
|
||||
reinterpret_cast<addr_t>(_id.table));
|
||||
_vcpu_context.tsc_aux_host = _cpu().id();
|
||||
_vcpu_context.tsc_aux_host = cpu.id();
|
||||
/*
|
||||
* We set the artificial startup exit code, stop the
|
||||
* vCPU thread and ask the VMM to handle it.
|
||||
@@ -253,7 +256,7 @@ void Board::Vcpu_context::write_vcpu_state(Vcpu_state &state)
|
||||
state.r14.charge(regs->r14);
|
||||
state.r15.charge(regs->r15);
|
||||
|
||||
state.tsc.charge(Hw::Tsc::rdtsc());
|
||||
state.tsc.charge(Hw::Lapic::rdtsc());
|
||||
|
||||
tsc_aux_guest = Cpu::Ia32_tsc_aux::read();
|
||||
state.tsc_aux.charge(tsc_aux_guest);
|
||||
|
||||
@@ -599,7 +599,7 @@ void Vmcs::write_vcpu_state(Genode::Vcpu_state &state)
|
||||
state.actv_state.charge(
|
||||
static_cast<uint32_t>(read(E_GUEST_ACTIVITY_STATE)));
|
||||
|
||||
state.tsc.charge(Hw::Tsc::rdtsc());
|
||||
state.tsc.charge(Hw::Lapic::rdtsc());
|
||||
state.tsc_offset.charge(read(E_TSC_OFFSET));
|
||||
|
||||
state.efer.charge(read(E_GUEST_IA32_EFER));
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <base/service.h>
|
||||
|
||||
/* core includes */
|
||||
#include <core_env.h>
|
||||
#include <platform.h>
|
||||
#include <platform_services.h>
|
||||
#include <vm_root.h>
|
||||
@@ -29,15 +30,16 @@ void Core::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Sliced_heap &sliced_heap,
|
||||
Registry<Service> &local_services,
|
||||
Trace::Source_registry &trace_sources,
|
||||
Ram_allocator &core_ram,
|
||||
Region_map &core_rm,
|
||||
Range_allocator &io_port_ranges)
|
||||
Ram_allocator &)
|
||||
{
|
||||
static Io_port_root io_port_root(io_port_ranges, sliced_heap);
|
||||
static Io_port_root io_port_root(*core_env().pd_session(),
|
||||
platform().io_port_alloc(), sliced_heap);
|
||||
|
||||
static Vm_root vm_root(ep, sliced_heap, core_ram, core_rm, trace_sources);
|
||||
static Vm_root vm_root(ep, sliced_heap, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
|
||||
static Core_service<Session_object<Vm_session>> vm_service(local_services, vm_root);
|
||||
static Core_service<Vm_session_component> vm_service(local_services, vm_root);
|
||||
|
||||
static Core_service<Io_port_session_component> io_port_ls(local_services, io_port_root);
|
||||
static Core_service<Io_port_session_component>
|
||||
io_port_ls(local_services, io_port_root);
|
||||
}
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
/*
|
||||
* \brief SVM VM session component for 'base-hw'
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-09-20
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__SVM_VM_SESSION_COMPONENT_H_
|
||||
#define _CORE__SVM_VM_SESSION_COMPONENT_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/allocator.h>
|
||||
#include <base/session_object.h>
|
||||
#include <base/registry.h>
|
||||
#include <vm_session/vm_session.h>
|
||||
#include <dataspace/capability.h>
|
||||
|
||||
/* base-hw includes */
|
||||
#include <spec/x86_64/virtualization/hpt.h>
|
||||
|
||||
/* core includes */
|
||||
#include <cpu_thread_component.h>
|
||||
#include <region_map_component.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <trace/source_registry.h>
|
||||
|
||||
#include <vcpu.h>
|
||||
#include <vmid_allocator.h>
|
||||
#include <guest_memory.h>
|
||||
#include <phys_allocated.h>
|
||||
|
||||
|
||||
namespace Core { class Svm_session_component; }
|
||||
|
||||
|
||||
class Core::Svm_session_component
|
||||
:
|
||||
public Session_object<Vm_session>
|
||||
{
|
||||
private:
|
||||
|
||||
using Vm_page_table = Hw::Hpt;
|
||||
|
||||
using Vm_page_table_array =
|
||||
Vm_page_table::Allocator::Array<Kernel::DEFAULT_TRANSLATION_TABLE_MAX>;
|
||||
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Svm_session_component(Svm_session_component const &);
|
||||
Svm_session_component &operator = (Svm_session_component const &);
|
||||
|
||||
struct Detach : Region_map_detach
|
||||
{
|
||||
Svm_session_component &_session;
|
||||
|
||||
Detach(Svm_session_component &session) : _session(session)
|
||||
{ }
|
||||
|
||||
void detach_at(addr_t at) override
|
||||
{
|
||||
_session._detach_at(at);
|
||||
}
|
||||
|
||||
void reserve_and_flush(addr_t at) override
|
||||
{
|
||||
_session._reserve_and_flush(at);
|
||||
}
|
||||
|
||||
void unmap_region(addr_t base, size_t size) override
|
||||
{
|
||||
Genode::error(__func__, " unimplemented ", base, " ", size);
|
||||
}
|
||||
} _detach { *this };
|
||||
|
||||
Registry<Registered<Vcpu>> _vcpus { };
|
||||
|
||||
Rpc_entrypoint &_ep;
|
||||
Constrained_ram_allocator _constrained_ram_alloc;
|
||||
Region_map &_region_map;
|
||||
Heap _heap;
|
||||
Phys_allocated<Vm_page_table> _table;
|
||||
Phys_allocated<Vm_page_table_array> _table_array;
|
||||
Guest_memory _memory;
|
||||
Vmid_allocator &_vmid_alloc;
|
||||
Kernel::Vm::Identity _id;
|
||||
uint8_t _remaining_print_count { 10 };
|
||||
|
||||
void _detach_at(addr_t addr)
|
||||
{
|
||||
_memory.detach_at(addr,
|
||||
[&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
void _reserve_and_flush(addr_t addr)
|
||||
{
|
||||
_memory.reserve_and_flush(addr, [&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
Svm_session_component(Vmid_allocator & vmid_alloc,
|
||||
Rpc_entrypoint &ds_ep,
|
||||
Resources resources,
|
||||
Label const &label,
|
||||
Diag diag,
|
||||
Ram_allocator &ram_alloc,
|
||||
Region_map ®ion_map,
|
||||
Trace::Source_registry &)
|
||||
:
|
||||
Session_object(ds_ep, resources, label, diag),
|
||||
_ep(ds_ep),
|
||||
_constrained_ram_alloc(ram_alloc, _ram_quota_guard(), _cap_quota_guard()),
|
||||
_region_map(region_map),
|
||||
_heap(_constrained_ram_alloc, region_map),
|
||||
_table(_ep, _constrained_ram_alloc, _region_map),
|
||||
_table_array(_ep, _constrained_ram_alloc, _region_map,
|
||||
[] (Phys_allocated<Vm_page_table_array> &table_array, auto *obj_ptr) {
|
||||
construct_at<Vm_page_table_array>(obj_ptr, [&] (void *virt) {
|
||||
return table_array.phys_addr() + ((addr_t) obj_ptr - (addr_t)virt);
|
||||
});
|
||||
}),
|
||||
_memory(_constrained_ram_alloc, region_map),
|
||||
_vmid_alloc(vmid_alloc),
|
||||
_id({(unsigned)_vmid_alloc.alloc(), (void *)_table.phys_addr()})
|
||||
{ }
|
||||
|
||||
~Svm_session_component()
|
||||
{
|
||||
_vcpus.for_each([&] (Registered<Vcpu> &vcpu) {
|
||||
destroy(_heap, &vcpu); });
|
||||
|
||||
_vmid_alloc.free(_id.id);
|
||||
}
|
||||
|
||||
|
||||
/**************************
|
||||
** Vm session interface **
|
||||
**************************/
|
||||
|
||||
void attach(Dataspace_capability cap, addr_t guest_phys, Attach_attr attr) override
|
||||
{
|
||||
bool out_of_tables = false;
|
||||
bool invalid_mapping = false;
|
||||
|
||||
auto const &map_fn = [&](addr_t vm_addr, addr_t phys_addr, size_t size) {
|
||||
Page_flags const pflags { RW, EXEC, USER, NO_GLOBAL, RAM, CACHED };
|
||||
|
||||
try {
|
||||
_table.obj.insert_translation(vm_addr, phys_addr, size, pflags, _table_array.obj.alloc());
|
||||
} catch(Hw::Out_of_tables &) {
|
||||
if (_remaining_print_count) {
|
||||
Genode::error("Translation table needs too much RAM");
|
||||
_remaining_print_count--;
|
||||
}
|
||||
out_of_tables = true;
|
||||
} catch(...) {
|
||||
if (_remaining_print_count) {
|
||||
Genode::error("Invalid mapping ", Genode::Hex(phys_addr), " -> ",
|
||||
Genode::Hex(vm_addr), " (", size, ")");
|
||||
}
|
||||
invalid_mapping = true;
|
||||
}
|
||||
};
|
||||
|
||||
if (!cap.valid())
|
||||
throw Invalid_dataspace();
|
||||
|
||||
/* check dataspace validity */
|
||||
_ep.apply(cap, [&] (Dataspace_component *ptr) {
|
||||
if (!ptr)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
Dataspace_component &dsc = *ptr;
|
||||
|
||||
Guest_memory::Attach_result result =
|
||||
_memory.attach(_detach, dsc, guest_phys, attr, map_fn);
|
||||
|
||||
if (out_of_tables)
|
||||
throw Out_of_ram();
|
||||
|
||||
if (invalid_mapping)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
switch (result) {
|
||||
case Guest_memory::Attach_result::OK : break;
|
||||
case Guest_memory::Attach_result::INVALID_DS : throw Invalid_dataspace(); break;
|
||||
case Guest_memory::Attach_result::OUT_OF_RAM : throw Out_of_ram(); break;
|
||||
case Guest_memory::Attach_result::OUT_OF_CAPS : throw Out_of_caps(); break;
|
||||
case Guest_memory::Attach_result::REGION_CONFLICT: throw Region_conflict(); break;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void attach_pic(addr_t) override
|
||||
{ }
|
||||
|
||||
void detach(addr_t guest_phys, size_t size) override
|
||||
{
|
||||
_memory.detach(guest_phys, size, [&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability tcap) override
|
||||
{
|
||||
Affinity::Location vcpu_location;
|
||||
_ep.apply(tcap, [&] (Cpu_thread_component *ptr) {
|
||||
if (!ptr) return;
|
||||
vcpu_location = ptr->platform_thread().affinity();
|
||||
});
|
||||
|
||||
Vcpu &vcpu = *new (_heap)
|
||||
Registered<Vcpu>(_vcpus,
|
||||
_id,
|
||||
_ep,
|
||||
_constrained_ram_alloc,
|
||||
_region_map,
|
||||
vcpu_location);
|
||||
|
||||
return vcpu.cap();
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__SVM_VM_SESSION_COMPONENT_H_ */
|
||||
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* \brief VM page table abstraction between VMX and SVM for x86
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-04-23
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__SPEC__PC__VIRTUALIZATION__VM_PAGE_TABLE_H_
|
||||
#define _CORE__SPEC__PC__VIRTUALIZATION__VM_PAGE_TABLE_H_
|
||||
|
||||
#include <base/log.h>
|
||||
#include <util/construct_at.h>
|
||||
#include <spec/x86_64/virtualization/ept.h>
|
||||
#include <spec/x86_64/virtualization/hpt.h>
|
||||
|
||||
namespace Board {
|
||||
using namespace Genode;
|
||||
|
||||
struct Vm_page_table
|
||||
{
|
||||
/* Both Ept and Hpt need to actually use this allocator */
|
||||
using Allocator = Genode::Page_table_allocator<1UL << SIZE_LOG2_4KB>;
|
||||
|
||||
template <class T, class U>
|
||||
struct is_same {
|
||||
static const bool value = false;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct is_same <T, T> {
|
||||
static const bool value = true;
|
||||
};
|
||||
|
||||
static_assert(is_same<Allocator, Hw::Ept::Allocator>::value,
|
||||
"Ept uses different allocator");
|
||||
static_assert(is_same<Allocator, Hw::Hpt::Allocator>::value,
|
||||
"Hpt uses different allocator");
|
||||
|
||||
static constexpr size_t ALIGNM_LOG2 = Hw::SIZE_LOG2_4KB;
|
||||
|
||||
enum Virt_type {
|
||||
VIRT_TYPE_NONE,
|
||||
VIRT_TYPE_VMX,
|
||||
VIRT_TYPE_SVM
|
||||
};
|
||||
|
||||
union {
|
||||
Hw::Ept ept;
|
||||
Hw::Hpt hpt;
|
||||
};
|
||||
|
||||
void insert_translation(addr_t vo,
|
||||
addr_t pa,
|
||||
size_t size,
|
||||
Page_flags const & flags,
|
||||
Allocator & alloc)
|
||||
{
|
||||
if (virt_type() == VIRT_TYPE_VMX)
|
||||
ept.insert_translation(vo, pa, size, flags, alloc);
|
||||
else if (virt_type() == VIRT_TYPE_SVM)
|
||||
hpt.insert_translation(vo, pa, size, flags, alloc);
|
||||
}
|
||||
|
||||
void remove_translation(addr_t vo, size_t size, Allocator & alloc)
|
||||
{
|
||||
if (virt_type() == VIRT_TYPE_VMX)
|
||||
ept.remove_translation(vo, size, alloc);
|
||||
else if (virt_type() == VIRT_TYPE_SVM)
|
||||
hpt.remove_translation(vo, size, alloc);
|
||||
}
|
||||
|
||||
static Virt_type virt_type() {
|
||||
static Virt_type virt_type { VIRT_TYPE_NONE };
|
||||
|
||||
if (virt_type == VIRT_TYPE_NONE) {
|
||||
if (Hw::Virtualization_support::has_vmx())
|
||||
virt_type = VIRT_TYPE_VMX;
|
||||
else if (Hw::Virtualization_support::has_svm())
|
||||
virt_type = VIRT_TYPE_SVM;
|
||||
else
|
||||
error("Failed to detect Virtualization technology");
|
||||
}
|
||||
|
||||
return virt_type;
|
||||
}
|
||||
|
||||
Vm_page_table()
|
||||
{
|
||||
if (virt_type() == VIRT_TYPE_VMX)
|
||||
Genode::construct_at<Hw::Ept>(this);
|
||||
else if (virt_type() == VIRT_TYPE_SVM)
|
||||
Genode::construct_at<Hw::Hpt>(this);
|
||||
}
|
||||
};
|
||||
|
||||
using Vm_page_table_array =
|
||||
Vm_page_table::Allocator::Array<Kernel::DEFAULT_TRANSLATION_TABLE_MAX>;
|
||||
};
|
||||
|
||||
#endif /* _CORE__SPEC__PC__VIRTUALIZATION__VM_PAGE_TABLE_H_ */
|
||||
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
* \brief VM session component for 'base-hw'
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2015-02-17
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* Genode includes */
|
||||
#include <util/construct_at.h>
|
||||
|
||||
/* base internal includes */
|
||||
#include <base/internal/unmanaged_singleton.h>
|
||||
|
||||
/* core includes */
|
||||
#include <kernel/core_interface.h>
|
||||
#include <vm_session_component.h>
|
||||
#include <platform.h>
|
||||
#include <cpu_thread_component.h>
|
||||
#include <core_env.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
static Core_mem_allocator & cma() {
|
||||
return static_cast<Core_mem_allocator&>(platform().core_mem_alloc()); }
|
||||
|
||||
|
||||
void Vm_session_component::_attach(addr_t phys_addr, addr_t vm_addr, size_t size)
|
||||
{
|
||||
using namespace Hw;
|
||||
|
||||
Page_flags pflags { RW, EXEC, USER, NO_GLOBAL, RAM, CACHED };
|
||||
|
||||
try {
|
||||
_table.insert_translation(vm_addr, phys_addr, size, pflags,
|
||||
_table_array.alloc());
|
||||
return;
|
||||
} catch(Hw::Out_of_tables &) {
|
||||
Genode::error("Translation table needs to much RAM");
|
||||
} catch(...) {
|
||||
Genode::error("Invalid mapping ", Genode::Hex(phys_addr), " -> ",
|
||||
Genode::Hex(vm_addr), " (", size, ")");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
|
||||
addr_t const vm_addr,
|
||||
Attach_attr const attribute)
|
||||
{
|
||||
_attach(dsc.phys_addr() + attribute.offset, vm_addr, attribute.size);
|
||||
}
|
||||
|
||||
|
||||
void Vm_session_component::attach_pic(addr_t )
|
||||
{ }
|
||||
|
||||
|
||||
void Vm_session_component::_detach_vm_memory(addr_t vm_addr, size_t size)
|
||||
{
|
||||
_table.remove_translation(vm_addr, size, _table_array.alloc());
|
||||
}
|
||||
|
||||
|
||||
void * Vm_session_component::_alloc_table()
|
||||
{
|
||||
/* get some aligned space for the translation table */
|
||||
return cma().alloc_aligned(sizeof(Board::Vm_page_table),
|
||||
Board::Vm_page_table::ALIGNM_LOG2).convert<void *>(
|
||||
[&] (void *table_ptr) {
|
||||
return table_ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
/* XXX handle individual error conditions */
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota(); }
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
using Vmid_allocator = Genode::Bit_allocator<256>;
|
||||
|
||||
static Vmid_allocator &alloc()
|
||||
{
|
||||
static Vmid_allocator * allocator = nullptr;
|
||||
if (!allocator) {
|
||||
allocator = unmanaged_singleton<Vmid_allocator>();
|
||||
|
||||
/* reserve VM ID 0 for the hypervisor */
|
||||
addr_t id = allocator->alloc();
|
||||
assert (id == 0);
|
||||
}
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
|
||||
Genode::addr_t Vm_session_component::_alloc_vcpu_data(Genode::addr_t ds_addr)
|
||||
{
|
||||
/*
|
||||
* XXX these allocations currently leak memory on VM Session
|
||||
* destruction. This cannot be easily fixed because the
|
||||
* Core Mem Allocator does not implement free().
|
||||
*
|
||||
* Normally we would use constrained_md_ram_alloc to make the allocation,
|
||||
* but to get the physical address of the pages in virt_area, we need
|
||||
* to use the Core Mem Allocator.
|
||||
*/
|
||||
|
||||
Vcpu_data * vcpu_data = (Vcpu_data *) cma()
|
||||
.try_alloc(sizeof(Board::Vcpu_data))
|
||||
.convert<void *>(
|
||||
[&](void *ptr) { return ptr; },
|
||||
[&](Range_allocator::Alloc_error) -> void * {
|
||||
/* XXX handle individual error conditions */
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota();
|
||||
});
|
||||
|
||||
vcpu_data->virt_area = cma()
|
||||
.alloc_aligned(Vcpu_data::size(), 12)
|
||||
.convert<void *>(
|
||||
[&](void *ptr) { return ptr; },
|
||||
[&](Range_allocator::Alloc_error) -> void * {
|
||||
/* XXX handle individual error conditions */
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota();
|
||||
});
|
||||
|
||||
vcpu_data->vcpu_state = (Vcpu_state *) ds_addr;
|
||||
vcpu_data->phys_addr = (addr_t)cma().phys_addr(vcpu_data->virt_area);
|
||||
|
||||
return (Genode::addr_t) vcpu_data;
|
||||
}
|
||||
|
||||
|
||||
Vm_session_component::Vm_session_component(Rpc_entrypoint &ds_ep,
|
||||
Resources resources,
|
||||
Label const &,
|
||||
Diag,
|
||||
Ram_allocator &ram_alloc,
|
||||
Region_map ®ion_map,
|
||||
unsigned,
|
||||
Trace::Source_registry &)
|
||||
:
|
||||
Ram_quota_guard(resources.ram_quota),
|
||||
Cap_quota_guard(resources.cap_quota),
|
||||
_ep(ds_ep),
|
||||
_constrained_md_ram_alloc(ram_alloc, _ram_quota_guard(), _cap_quota_guard()),
|
||||
_sliced_heap(_constrained_md_ram_alloc, region_map),
|
||||
_region_map(region_map),
|
||||
_table(*construct_at<Board::Vm_page_table>(_alloc_table())),
|
||||
_table_array(*(new (cma()) Board::Vm_page_table_array([] (void * virt) {
|
||||
return (addr_t)cma().phys_addr(virt);}))),
|
||||
_id({(unsigned)alloc().alloc(), cma().phys_addr(&_table)})
|
||||
{
|
||||
/* configure managed VM area */
|
||||
_map.add_range(0UL, ~0UL);
|
||||
}
|
||||
|
||||
|
||||
Vm_session_component::~Vm_session_component()
|
||||
{
|
||||
/* detach all regions */
|
||||
while (true) {
|
||||
addr_t out_addr = 0;
|
||||
|
||||
if (!_map.any_block_addr(&out_addr))
|
||||
break;
|
||||
|
||||
detach_at(out_addr);
|
||||
}
|
||||
|
||||
/* free region in allocator */
|
||||
for (unsigned i = 0; i < _vcpu_id_alloc; i++) {
|
||||
if (!_vcpus[i].constructed())
|
||||
continue;
|
||||
|
||||
Vcpu & vcpu = *_vcpus[i];
|
||||
if (vcpu.ds_cap.valid()) {
|
||||
_region_map.detach(vcpu.ds_addr);
|
||||
_constrained_md_ram_alloc.free(vcpu.ds_cap);
|
||||
}
|
||||
}
|
||||
|
||||
/* free guest-to-host page tables */
|
||||
destroy(platform().core_mem_alloc(), &_table);
|
||||
destroy(platform().core_mem_alloc(), &_table_array);
|
||||
alloc().free(_id.id);
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
/*
|
||||
* \brief VMX VM session component for 'base-hw'
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-09-20
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__VMX_VM_SESSION_COMPONENT_H_
|
||||
#define _CORE__VMX_VM_SESSION_COMPONENT_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/allocator.h>
|
||||
#include <base/session_object.h>
|
||||
#include <base/registry.h>
|
||||
#include <vm_session/vm_session.h>
|
||||
#include <dataspace/capability.h>
|
||||
|
||||
/* base-hw includes */
|
||||
#include <spec/x86_64/virtualization/ept.h>
|
||||
|
||||
/* core includes */
|
||||
#include <cpu_thread_component.h>
|
||||
#include <region_map_component.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <trace/source_registry.h>
|
||||
|
||||
#include <vcpu.h>
|
||||
#include <vmid_allocator.h>
|
||||
#include <guest_memory.h>
|
||||
#include <phys_allocated.h>
|
||||
|
||||
|
||||
namespace Core { class Vmx_session_component; }
|
||||
|
||||
|
||||
class Core::Vmx_session_component
|
||||
:
|
||||
public Session_object<Vm_session>
|
||||
{
|
||||
private:
|
||||
|
||||
using Vm_page_table = Hw::Ept;
|
||||
|
||||
using Vm_page_table_array =
|
||||
Vm_page_table::Allocator::Array<Kernel::DEFAULT_TRANSLATION_TABLE_MAX>;
|
||||
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Vmx_session_component(Vmx_session_component const &);
|
||||
Vmx_session_component &operator = (Vmx_session_component const &);
|
||||
|
||||
struct Detach : Region_map_detach
|
||||
{
|
||||
Vmx_session_component &_session;
|
||||
|
||||
Detach(Vmx_session_component &session) : _session(session)
|
||||
{ }
|
||||
|
||||
void detach_at(addr_t at) override
|
||||
{
|
||||
_session._detach_at(at);
|
||||
}
|
||||
|
||||
void reserve_and_flush(addr_t at) override
|
||||
{
|
||||
_session._reserve_and_flush(at);
|
||||
}
|
||||
|
||||
void unmap_region(addr_t base, size_t size) override
|
||||
{
|
||||
Genode::error(__func__, " unimplemented ", base, " ", size);
|
||||
}
|
||||
} _detach { *this };
|
||||
|
||||
Registry<Registered<Vcpu>> _vcpus { };
|
||||
|
||||
Rpc_entrypoint &_ep;
|
||||
Constrained_ram_allocator _constrained_ram_alloc;
|
||||
Region_map &_region_map;
|
||||
Heap _heap;
|
||||
Phys_allocated<Vm_page_table> _table;
|
||||
Phys_allocated<Vm_page_table_array> _table_array;
|
||||
Guest_memory _memory;
|
||||
Vmid_allocator &_vmid_alloc;
|
||||
Kernel::Vm::Identity _id;
|
||||
uint8_t _remaining_print_count { 10 };
|
||||
|
||||
void _detach_at(addr_t addr)
|
||||
{
|
||||
_memory.detach_at(addr,
|
||||
[&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
void _reserve_and_flush(addr_t addr)
|
||||
{
|
||||
_memory.reserve_and_flush(addr, [&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
Vmx_session_component(Vmid_allocator & vmid_alloc,
|
||||
Rpc_entrypoint &ds_ep,
|
||||
Resources resources,
|
||||
Label const &label,
|
||||
Diag diag,
|
||||
Ram_allocator &ram_alloc,
|
||||
Region_map ®ion_map,
|
||||
Trace::Source_registry &)
|
||||
:
|
||||
Session_object(ds_ep, resources, label, diag),
|
||||
_ep(ds_ep),
|
||||
_constrained_ram_alloc(ram_alloc, _ram_quota_guard(), _cap_quota_guard()),
|
||||
_region_map(region_map),
|
||||
_heap(_constrained_ram_alloc, region_map),
|
||||
_table(_ep, _constrained_ram_alloc, _region_map),
|
||||
_table_array(_ep, _constrained_ram_alloc, _region_map,
|
||||
[] (Phys_allocated<Vm_page_table_array> &table_array, auto *obj_ptr) {
|
||||
construct_at<Vm_page_table_array>(obj_ptr, [&] (void *virt) {
|
||||
return table_array.phys_addr() + ((addr_t) obj_ptr - (addr_t)virt);
|
||||
});
|
||||
}),
|
||||
_memory(_constrained_ram_alloc, region_map),
|
||||
_vmid_alloc(vmid_alloc),
|
||||
_id({(unsigned)_vmid_alloc.alloc(), (void *)_table.phys_addr()})
|
||||
{ }
|
||||
|
||||
~Vmx_session_component()
|
||||
{
|
||||
_vcpus.for_each([&] (Registered<Vcpu> &vcpu) {
|
||||
destroy(_heap, &vcpu); });
|
||||
|
||||
_vmid_alloc.free(_id.id);
|
||||
}
|
||||
|
||||
|
||||
/**************************
|
||||
** Vm session interface **
|
||||
**************************/
|
||||
|
||||
void attach(Dataspace_capability cap, addr_t guest_phys, Attach_attr attr) override
|
||||
{
|
||||
bool out_of_tables = false;
|
||||
bool invalid_mapping = false;
|
||||
|
||||
auto const &map_fn = [&](addr_t vm_addr, addr_t phys_addr, size_t size) {
|
||||
Page_flags const pflags { RW, EXEC, USER, NO_GLOBAL, RAM, CACHED };
|
||||
|
||||
try {
|
||||
_table.obj.insert_translation(vm_addr, phys_addr, size, pflags, _table_array.obj.alloc());
|
||||
} catch(Hw::Out_of_tables &) {
|
||||
if (_remaining_print_count) {
|
||||
Genode::error("Translation table needs too much RAM");
|
||||
_remaining_print_count--;
|
||||
}
|
||||
out_of_tables = true;
|
||||
} catch(...) {
|
||||
if (_remaining_print_count) {
|
||||
Genode::error("Invalid mapping ", Genode::Hex(phys_addr), " -> ",
|
||||
Genode::Hex(vm_addr), " (", size, ")");
|
||||
}
|
||||
invalid_mapping = true;
|
||||
}
|
||||
};
|
||||
|
||||
if (!cap.valid())
|
||||
throw Invalid_dataspace();
|
||||
|
||||
/* check dataspace validity */
|
||||
_ep.apply(cap, [&] (Dataspace_component *ptr) {
|
||||
if (!ptr)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
Dataspace_component &dsc = *ptr;
|
||||
|
||||
Guest_memory::Attach_result result =
|
||||
_memory.attach(_detach, dsc, guest_phys, attr, map_fn);
|
||||
|
||||
if (out_of_tables)
|
||||
throw Out_of_ram();
|
||||
|
||||
if (invalid_mapping)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
switch (result) {
|
||||
case Guest_memory::Attach_result::OK : break;
|
||||
case Guest_memory::Attach_result::INVALID_DS : throw Invalid_dataspace(); break;
|
||||
case Guest_memory::Attach_result::OUT_OF_RAM : throw Out_of_ram(); break;
|
||||
case Guest_memory::Attach_result::OUT_OF_CAPS : throw Out_of_caps(); break;
|
||||
case Guest_memory::Attach_result::REGION_CONFLICT: throw Region_conflict(); break;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void attach_pic(addr_t) override
|
||||
{ }
|
||||
|
||||
void detach(addr_t guest_phys, size_t size) override
|
||||
{
|
||||
_memory.detach(guest_phys, size, [&](addr_t vm_addr, size_t size) {
|
||||
_table.obj.remove_translation(vm_addr, size, _table_array.obj.alloc()); });
|
||||
}
|
||||
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability tcap) override
|
||||
{
|
||||
Affinity::Location vcpu_location;
|
||||
_ep.apply(tcap, [&] (Cpu_thread_component *ptr) {
|
||||
if (!ptr) return;
|
||||
vcpu_location = ptr->platform_thread().affinity();
|
||||
});
|
||||
|
||||
Vcpu &vcpu = *new (_heap)
|
||||
Registered<Vcpu>(_vcpus,
|
||||
_id,
|
||||
_ep,
|
||||
_constrained_ram_alloc,
|
||||
_region_map,
|
||||
vcpu_location);
|
||||
|
||||
return vcpu.cap();
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__VMX_VM_SESSION_COMPONENT_H_ */
|
||||
@@ -1,99 +0,0 @@
|
||||
/*
|
||||
* \brief x86_64 specific Vm root interface
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2012-10-08
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__INCLUDE__VM_ROOT_H_
|
||||
#define _CORE__INCLUDE__VM_ROOT_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <root/component.h>
|
||||
|
||||
/* Hw includes */
|
||||
#include <hw/spec/x86_64/x86_64.h>
|
||||
|
||||
/* core includes */
|
||||
#include <virtualization/vmx_session_component.h>
|
||||
#include <virtualization/svm_session_component.h>
|
||||
|
||||
#include <vmid_allocator.h>
|
||||
|
||||
namespace Core { class Vm_root; }
|
||||
|
||||
|
||||
class Core::Vm_root : public Root_component<Session_object<Vm_session>>
|
||||
{
|
||||
private:
|
||||
|
||||
Ram_allocator &_ram_allocator;
|
||||
Region_map &_local_rm;
|
||||
Trace::Source_registry &_trace_sources;
|
||||
Vmid_allocator _vmid_alloc { };
|
||||
|
||||
protected:
|
||||
|
||||
Session_object<Vm_session> *_create_session(const char *args) override
|
||||
{
|
||||
Session::Resources resources = session_resources_from_args(args);
|
||||
|
||||
if (Hw::Virtualization_support::has_svm())
|
||||
return new (md_alloc())
|
||||
Svm_session_component(_vmid_alloc,
|
||||
*ep(),
|
||||
resources,
|
||||
session_label_from_args(args),
|
||||
session_diag_from_args(args),
|
||||
_ram_allocator, _local_rm,
|
||||
_trace_sources);
|
||||
|
||||
if (Hw::Virtualization_support::has_vmx())
|
||||
return new (md_alloc())
|
||||
Vmx_session_component(_vmid_alloc,
|
||||
*ep(),
|
||||
session_resources_from_args(args),
|
||||
session_label_from_args(args),
|
||||
session_diag_from_args(args),
|
||||
_ram_allocator, _local_rm,
|
||||
_trace_sources);
|
||||
|
||||
Genode::error( "No virtualization support detected.");
|
||||
throw Core::Service_denied();
|
||||
}
|
||||
|
||||
void _upgrade_session(Session_object<Vm_session> *vm, const char *args) override
|
||||
{
|
||||
vm->upgrade(ram_quota_from_args(args));
|
||||
vm->upgrade(cap_quota_from_args(args));
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* \param session_ep entrypoint managing vm_session components
|
||||
* \param md_alloc meta-data allocator to be used by root component
|
||||
*/
|
||||
Vm_root(Rpc_entrypoint &session_ep,
|
||||
Allocator &md_alloc,
|
||||
Ram_allocator &ram_alloc,
|
||||
Region_map &local_rm,
|
||||
Trace::Source_registry &trace_sources)
|
||||
:
|
||||
Root_component<Session_object<Vm_session>>(&session_ep, &md_alloc),
|
||||
_ram_allocator(ram_alloc),
|
||||
_local_rm(local_rm),
|
||||
_trace_sources(trace_sources)
|
||||
{ }
|
||||
};
|
||||
|
||||
#endif /* _CORE__INCLUDE__VM_ROOT_H_ */
|
||||
@@ -1,87 +0,0 @@
|
||||
/*
|
||||
* \brief base-hw specific Vm root interface
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2012-10-08
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__INCLUDE__VM_ROOT_H_
|
||||
#define _CORE__INCLUDE__VM_ROOT_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <root/component.h>
|
||||
|
||||
/* core includes */
|
||||
#include <vm_session_component.h>
|
||||
|
||||
#include <vmid_allocator.h>
|
||||
|
||||
namespace Core { class Vm_root; }
|
||||
|
||||
class Core::Vm_root : public Root_component<Vm_session_component>
|
||||
{
|
||||
private:
|
||||
|
||||
Ram_allocator &_ram_allocator;
|
||||
Region_map &_local_rm;
|
||||
Trace::Source_registry &_trace_sources;
|
||||
Vmid_allocator _vmid_alloc { };
|
||||
|
||||
protected:
|
||||
|
||||
Vm_session_component *_create_session(const char *args) override
|
||||
{
|
||||
unsigned priority = 0;
|
||||
Arg a = Arg_string::find_arg(args, "priority");
|
||||
if (a.valid()) {
|
||||
priority = (unsigned)a.ulong_value(0);
|
||||
|
||||
/* clamp priority value to valid range */
|
||||
priority = min((unsigned)Cpu_session::PRIORITY_LIMIT - 1, priority);
|
||||
}
|
||||
|
||||
return new (md_alloc())
|
||||
Vm_session_component(_vmid_alloc,
|
||||
*ep(),
|
||||
session_resources_from_args(args),
|
||||
session_label_from_args(args),
|
||||
session_diag_from_args(args),
|
||||
_ram_allocator, _local_rm, priority,
|
||||
_trace_sources);
|
||||
}
|
||||
|
||||
void _upgrade_session(Vm_session_component *vm, const char *args) override
|
||||
{
|
||||
vm->upgrade(ram_quota_from_args(args));
|
||||
vm->upgrade(cap_quota_from_args(args));
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* \param session_ep entrypoint managing vm_session components
|
||||
* \param md_alloc meta-data allocator to be used by root component
|
||||
*/
|
||||
Vm_root(Rpc_entrypoint &session_ep,
|
||||
Allocator &md_alloc,
|
||||
Ram_allocator &ram_alloc,
|
||||
Region_map &local_rm,
|
||||
Trace::Source_registry &trace_sources)
|
||||
:
|
||||
Root_component<Vm_session_component>(&session_ep, &md_alloc),
|
||||
_ram_allocator(ram_alloc),
|
||||
_local_rm(local_rm),
|
||||
_trace_sources(trace_sources)
|
||||
{ }
|
||||
};
|
||||
|
||||
#endif /* _CORE__INCLUDE__VM_ROOT_H_ */
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <vm_session_component.h>
|
||||
#include <platform.h>
|
||||
#include <cpu_thread_component.h>
|
||||
#include <core_env.h>
|
||||
|
||||
using namespace Core;
|
||||
|
||||
|
||||
@@ -30,9 +30,6 @@
|
||||
#include <kernel/vm.h>
|
||||
#include <trace/source_registry.h>
|
||||
|
||||
#include <vmid_allocator.h>
|
||||
|
||||
|
||||
namespace Core { class Vm_session_component; }
|
||||
|
||||
|
||||
@@ -91,7 +88,6 @@ class Core::Vm_session_component
|
||||
Region_map &_region_map;
|
||||
Board::Vm_page_table &_table;
|
||||
Board::Vm_page_table_array &_table_array;
|
||||
Vmid_allocator &_vmid_alloc;
|
||||
Kernel::Vm::Identity _id;
|
||||
unsigned _vcpu_id_alloc { 0 };
|
||||
|
||||
@@ -117,9 +113,8 @@ class Core::Vm_session_component
|
||||
using Cap_quota_guard::upgrade;
|
||||
using Rpc_object<Vm_session, Vm_session_component>::cap;
|
||||
|
||||
Vm_session_component(Vmid_allocator &, Rpc_entrypoint &,
|
||||
Resources, Label const &, Diag,
|
||||
Ram_allocator &ram, Region_map &, unsigned,
|
||||
Vm_session_component(Rpc_entrypoint &, Resources, Label const &,
|
||||
Diag, Ram_allocator &ram, Region_map &, unsigned,
|
||||
Trace::Source_registry &);
|
||||
~Vm_session_component();
|
||||
|
||||
@@ -141,7 +136,7 @@ class Core::Vm_session_component
|
||||
void attach_pic(addr_t) override;
|
||||
void detach(addr_t, size_t) override;
|
||||
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability) override;
|
||||
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
||||
};
|
||||
|
||||
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
* \brief VM ID allocator
|
||||
* \author Stefan Kalkowski
|
||||
* \author Benjamin Lamowski
|
||||
* \date 2024-11-21
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2024 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _CORE__VMID_ALLOCATOR_H_
|
||||
#define _CORE__VMID_ALLOCATOR_H_
|
||||
|
||||
#include <util/bit_allocator.h>
|
||||
|
||||
namespace Core { struct Vmid_allocator; }
|
||||
|
||||
struct Core::Vmid_allocator
|
||||
: Genode::Bit_allocator<256>
|
||||
{
|
||||
Vmid_allocator()
|
||||
{
|
||||
/* reserve VM ID 0 for the hypervisor */
|
||||
addr_t id = alloc();
|
||||
assert (id == 0);
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _CORE__VMID_ALLOCATOR_H_ */
|
||||
@@ -94,8 +94,6 @@ struct Hw::Acpi_fadt : Genode::Mmio<276>
|
||||
struct Smi_cmd : Register<0x30, 32> { };
|
||||
struct Acpi_enable : Register<0x34, 8> { };
|
||||
|
||||
struct Pm_tmr_len : Register< 91, 8> { };
|
||||
|
||||
struct Pm1a_cnt_blk : Register < 64, 32> {
|
||||
struct Slp_typ : Bitfield < 10, 3> { };
|
||||
struct Slp_ena : Bitfield < 13, 1> { };
|
||||
@@ -125,13 +123,6 @@ struct Hw::Acpi_fadt : Genode::Mmio<276>
|
||||
};
|
||||
struct Pm1b_cnt_blk_ext_addr : Register < 184 + 4, 64> { };
|
||||
|
||||
struct X_pm_tmr_blk : Register < 208, 32> {
|
||||
struct Addressspace : Bitfield < 0, 8> { };
|
||||
struct Width : Bitfield < 8, 8> { };
|
||||
};
|
||||
|
||||
struct X_pm_tmr_blk_addr : Register < 208 + 4, 64> { };
|
||||
|
||||
struct Gpe0_blk_ext : Register < 220, 32> {
|
||||
struct Addressspace : Bitfield < 0, 8> { };
|
||||
struct Width : Bitfield < 8, 8> { };
|
||||
@@ -241,45 +232,6 @@ struct Hw::Acpi_fadt : Genode::Mmio<276>
|
||||
return pm1_a | pm1_b;
|
||||
}
|
||||
|
||||
/* see ACPI spec version 6.5 4.8.3.3. Power Management Timer (PM_TMR) */
|
||||
uint32_t read_pm_tmr()
|
||||
{
|
||||
if (read<Pm_tmr_len>() != 4)
|
||||
return 0;
|
||||
|
||||
addr_t const tmr_addr = read<X_pm_tmr_blk_addr>();
|
||||
|
||||
if (!tmr_addr)
|
||||
return 0;
|
||||
|
||||
uint8_t const tmr_addr_type =
|
||||
read<Hw::Acpi_fadt::X_pm_tmr_blk::Addressspace>();
|
||||
|
||||
/* I/O port address, most likely */
|
||||
if (tmr_addr_type == 1) return inl((uint16_t)tmr_addr);
|
||||
|
||||
/* System Memory space address */
|
||||
if (tmr_addr_type == 0) return *(uint32_t *)tmr_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t calibrate_freq_khz(uint32_t sleep_ms, auto get_value_fn, bool reverse = false)
|
||||
{
|
||||
unsigned const acpi_timer_freq = 3'579'545;
|
||||
|
||||
uint32_t const initial = read_pm_tmr();
|
||||
|
||||
if (!initial) return 0;
|
||||
|
||||
uint64_t const t1 = get_value_fn();
|
||||
while ((read_pm_tmr() - initial) < (acpi_timer_freq * sleep_ms / 1000))
|
||||
asm volatile ("pause":::"memory");
|
||||
uint64_t const t2 = get_value_fn();
|
||||
|
||||
return (uint32_t)((reverse ? (t1 - t2) : (t2 - t1)) / sleep_ms);
|
||||
}
|
||||
|
||||
void write_cnt_blk(unsigned value_a, unsigned value_b)
|
||||
{
|
||||
_write<Pm1_cnt_len, Pm1a_cnt_blk, Pm1a_cnt_blk_ext::Width,
|
||||
|
||||
@@ -18,9 +18,6 @@ namespace Hw { class Local_apic; }
|
||||
|
||||
#include <hw/spec/x86_64/x86_64.h>
|
||||
|
||||
/* Genode includes */
|
||||
#include <drivers/timer/util.h>
|
||||
|
||||
struct Hw::Local_apic : Genode::Mmio<Hw::Cpu_memory_map::LAPIC_SIZE>
|
||||
{
|
||||
struct Id : Register<0x020, 32> { };
|
||||
@@ -61,57 +58,6 @@ struct Hw::Local_apic : Genode::Mmio<Hw::Cpu_memory_map::LAPIC_SIZE>
|
||||
struct Destination : Bitfield<24, 8> { };
|
||||
};
|
||||
|
||||
/* Timer registers */
|
||||
struct Tmr_lvt : Register<0x320, 32>
|
||||
{
|
||||
struct Vector : Bitfield<0, 8> { };
|
||||
struct Delivery : Bitfield<8, 3> { };
|
||||
struct Mask : Bitfield<16, 1> { };
|
||||
struct Timer_mode : Bitfield<17, 2> { };
|
||||
};
|
||||
|
||||
struct Tmr_initial : Register <0x380, 32> { };
|
||||
struct Tmr_current : Register <0x390, 32> { };
|
||||
|
||||
struct Divide_configuration : Register <0x03e0, 32>
|
||||
{
|
||||
struct Divide_value_0_2 : Bitfield<0, 2> { };
|
||||
struct Divide_value_2_1 : Bitfield<3, 1> { };
|
||||
struct Divide_value :
|
||||
Genode::Bitset_2<Divide_value_0_2, Divide_value_2_1>
|
||||
{
|
||||
enum { MAX = 6 };
|
||||
};
|
||||
};
|
||||
|
||||
struct Calibration { uint32_t freq_khz; uint32_t div; };
|
||||
|
||||
Calibration calibrate_divider(auto calibration_fn)
|
||||
{
|
||||
Calibration result { };
|
||||
|
||||
/* calibrate LAPIC frequency to fullfill our requirements */
|
||||
for (Divide_configuration::access_t div = Divide_configuration::Divide_value::MAX;
|
||||
div && result.freq_khz < TIMER_MIN_TICKS_PER_MS; div--) {
|
||||
|
||||
if (!div) {
|
||||
raw("Failed to calibrate Local APIC frequency");
|
||||
return { 0, 1 };
|
||||
}
|
||||
write<Divide_configuration::Divide_value>((uint8_t)div);
|
||||
|
||||
write<Tmr_initial>(~0U);
|
||||
|
||||
/* Calculate timer frequency */
|
||||
result.freq_khz = calibration_fn();
|
||||
result.div = div;
|
||||
|
||||
write<Tmr_initial>(0);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Local_apic(addr_t const addr) : Mmio({(char*)addr, Mmio::SIZE}) {}
|
||||
};
|
||||
|
||||
|
||||
@@ -118,12 +118,6 @@ struct Hw::X86_64_cpu
|
||||
/* AMD host save physical address */
|
||||
X86_64_MSR_REGISTER(Amd_vm_hsavepa, 0xC0010117);
|
||||
|
||||
|
||||
/* Non-architectural MSR used to make lfence serializing */
|
||||
X86_64_MSR_REGISTER(Amd_lfence, 0xC0011029,
|
||||
struct Enable_dispatch_serializing : Bitfield<1, 1> { }; /* Enable lfence dispatch serializing */
|
||||
)
|
||||
|
||||
X86_64_MSR_REGISTER(Platform_id, 0x17,
|
||||
struct Bus_ratio : Bitfield<8, 5> { }; /* Bus ratio on Core 2, see SDM 19.7.3 */
|
||||
);
|
||||
|
||||
@@ -40,13 +40,10 @@ struct Hw::Pc_board::Serial : Genode::X86_uart
|
||||
|
||||
struct Hw::Pc_board::Boot_info
|
||||
{
|
||||
Acpi_rsdp acpi_rsdp { };
|
||||
Framebuffer framebuffer { };
|
||||
Genode::addr_t efi_system_table { 0 };
|
||||
Genode::addr_t acpi_fadt { 0 };
|
||||
Genode::uint32_t tsc_freq_khz { 0 };
|
||||
Genode::uint32_t lapic_freq_khz { 0 };
|
||||
Genode::uint32_t lapic_div { 0 };
|
||||
Acpi_rsdp acpi_rsdp { };
|
||||
Framebuffer framebuffer { };
|
||||
Genode::addr_t efi_system_table { 0 };
|
||||
Genode::addr_t acpi_fadt { 0 };
|
||||
|
||||
Boot_info() {}
|
||||
Boot_info(Acpi_rsdp const &acpi_rsdp,
|
||||
|
||||
@@ -22,8 +22,8 @@
|
||||
namespace Hw {
|
||||
struct Cpu_memory_map;
|
||||
struct Virtualization_support;
|
||||
class Vendor;
|
||||
struct Tsc;
|
||||
class Vendor;
|
||||
class Lapic;
|
||||
}
|
||||
|
||||
|
||||
@@ -107,34 +107,172 @@ public:
|
||||
};
|
||||
|
||||
|
||||
struct Hw::Tsc
|
||||
class Hw::Lapic
|
||||
{
|
||||
/*
|
||||
* Provide serialized access to the Timestamp Counter
|
||||
*
|
||||
* See #5430 for more information.
|
||||
*/
|
||||
static Genode::uint64_t rdtsc()
|
||||
{
|
||||
Genode::uint32_t low, high;
|
||||
asm volatile(
|
||||
"lfence;"
|
||||
"rdtsc;"
|
||||
"lfence;"
|
||||
: "=a"(low), "=d"(high)
|
||||
:
|
||||
: "memory"
|
||||
);
|
||||
return (Genode::uint64_t)(high) << 32 | low;
|
||||
}
|
||||
|
||||
static bool invariant_tsc()
|
||||
private:
|
||||
static bool _has_tsc_dl()
|
||||
{
|
||||
using Cpu = Hw::X86_64_cpu;
|
||||
|
||||
Cpu::Cpuid_80000007_eax::access_t eax = Cpu::Cpuid_80000007_eax::read();
|
||||
return Cpu::Cpuid_80000007_eax::Invariant_tsc::get(eax);
|
||||
Cpu::Cpuid_1_ecx::access_t ecx = Cpu::Cpuid_1_ecx::read();
|
||||
return (bool)Cpu::Cpuid_1_ecx::Tsc_deadline::get(ecx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adapted from Christian Prochaska's and Alexander Boettcher's
|
||||
* implementation for Nova.
|
||||
*
|
||||
* For details, see Vol. 3B of the Intel SDM (September 2023):
|
||||
* 20.7.3 Determining the Processor Base Frequency
|
||||
*/
|
||||
static unsigned _read_tsc_freq()
|
||||
{
|
||||
using Cpu = Hw::X86_64_cpu;
|
||||
|
||||
if (Vendor::get_vendor_id() != Vendor::INTEL)
|
||||
return 0;
|
||||
|
||||
unsigned const model = Vendor::get_model();
|
||||
unsigned const family = Vendor::get_family();
|
||||
|
||||
enum
|
||||
{
|
||||
Cpu_id_clock = 0x15,
|
||||
Cpu_id_base_freq = 0x16
|
||||
};
|
||||
|
||||
Cpu::Cpuid_0_eax::access_t eax_0 = Cpu::Cpuid_0_eax::read();
|
||||
|
||||
/*
|
||||
* If CPUID leaf 15 is available, return the frequency reported there.
|
||||
*/
|
||||
if (eax_0 >= Cpu_id_clock) {
|
||||
Cpu::Cpuid_15_eax::access_t eax_15 = Cpu::Cpuid_15_eax::read();
|
||||
Cpu::Cpuid_15_ebx::access_t ebx_15 = Cpu::Cpuid_15_ebx::read();
|
||||
Cpu::Cpuid_15_ecx::access_t ecx_15 = Cpu::Cpuid_15_ecx::read();
|
||||
|
||||
if (eax_15 && ebx_15) {
|
||||
if (ecx_15)
|
||||
return static_cast<unsigned>(
|
||||
((Genode::uint64_t)(ecx_15) * ebx_15) / eax_15 / 1000
|
||||
);
|
||||
|
||||
if (family == 6) {
|
||||
if (model == 0x5c) /* Goldmont */
|
||||
return static_cast<unsigned>((19200ull * ebx_15) / eax_15);
|
||||
if (model == 0x55) /* Xeon */
|
||||
return static_cast<unsigned>((25000ull * ebx_15) / eax_15);
|
||||
}
|
||||
|
||||
if (family >= 6)
|
||||
return static_cast<unsigned>((24000ull * ebx_15) / eax_15);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Specific methods for family 6 models
|
||||
*/
|
||||
if (family == 6) {
|
||||
unsigned freq_tsc = 0U;
|
||||
|
||||
if (model == 0x2a ||
|
||||
model == 0x2d || /* Sandy Bridge */
|
||||
model >= 0x3a) /* Ivy Bridge and later */
|
||||
{
|
||||
Cpu::Platform_info::access_t platform_info = Cpu::Platform_info::read();
|
||||
Genode::uint64_t ratio = Cpu::Platform_info::Ratio::get(platform_info);
|
||||
freq_tsc = static_cast<unsigned>(ratio * 100000);
|
||||
} else if (model == 0x1a ||
|
||||
model == 0x1e ||
|
||||
model == 0x1f ||
|
||||
model == 0x2e || /* Nehalem */
|
||||
model == 0x25 ||
|
||||
model == 0x2c ||
|
||||
model == 0x2f) /* Xeon Westmere */
|
||||
{
|
||||
Cpu::Platform_info::access_t platform_info = Cpu::Platform_info::read();
|
||||
Genode::uint64_t ratio = Cpu::Platform_info::Ratio::get(platform_info);
|
||||
freq_tsc = static_cast<unsigned>(ratio * 133330);
|
||||
} else if (model == 0x17 || model == 0xf) { /* Core 2 */
|
||||
Cpu::Fsb_freq::access_t fsb_freq = Cpu::Fsb_freq::read();
|
||||
Genode::uint64_t freq_bus = Cpu::Fsb_freq::Speed::get(fsb_freq);
|
||||
|
||||
switch (freq_bus) {
|
||||
case 0b101: freq_bus = 100000; break;
|
||||
case 0b001: freq_bus = 133330; break;
|
||||
case 0b011: freq_bus = 166670; break;
|
||||
case 0b010: freq_bus = 200000; break;
|
||||
case 0b000: freq_bus = 266670; break;
|
||||
case 0b100: freq_bus = 333330; break;
|
||||
case 0b110: freq_bus = 400000; break;
|
||||
default: freq_bus = 0; break;
|
||||
}
|
||||
|
||||
Cpu::Platform_id::access_t platform_id = Cpu::Platform_id::read();
|
||||
Genode::uint64_t ratio = Cpu::Platform_id::Bus_ratio::get(platform_id);
|
||||
|
||||
freq_tsc = static_cast<unsigned>(freq_bus * ratio);
|
||||
}
|
||||
|
||||
if (!freq_tsc)
|
||||
Genode::warning("TSC: family 6 Intel platform info reports bus frequency of 0");
|
||||
else
|
||||
return freq_tsc;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Finally, using Processor Frequency Information for a rough estimate
|
||||
*/
|
||||
if (eax_0 >= Cpu_id_base_freq) {
|
||||
Cpu::Cpuid_16_eax::access_t base_mhz = Cpu::Cpuid_16_eax::read();
|
||||
|
||||
if (base_mhz) {
|
||||
Genode::warning("TSC: using processor base frequency: ", base_mhz, " MHz");
|
||||
return base_mhz * 1000;
|
||||
} else {
|
||||
Genode::warning("TSC: CPUID reported processor base frequency of 0");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned _measure_tsc_freq()
|
||||
{
|
||||
const unsigned Tsc_fixed_value = 2400;
|
||||
|
||||
Genode::warning("TSC: calibration not yet implemented, using fixed value of ", Tsc_fixed_value, " MHz");
|
||||
/* TODO: implement TSC calibration on AMD */
|
||||
return Tsc_fixed_value * 1000;
|
||||
}
|
||||
|
||||
public:
|
||||
static Genode::uint64_t rdtsc()
|
||||
{
|
||||
Genode::uint32_t low, high;
|
||||
asm volatile("rdtsc" : "=a"(low), "=d"(high));
|
||||
return (Genode::uint64_t)(high) << 32 | low;
|
||||
}
|
||||
|
||||
static bool invariant_tsc()
|
||||
{
|
||||
using Cpu = Hw::X86_64_cpu;
|
||||
|
||||
Cpu::Cpuid_80000007_eax::access_t eax =
|
||||
Cpu::Cpuid_80000007_eax::read();
|
||||
return Cpu::Cpuid_80000007_eax::Invariant_tsc::get(eax);
|
||||
}
|
||||
|
||||
static unsigned tsc_freq()
|
||||
{
|
||||
unsigned freq = _read_tsc_freq();
|
||||
if (freq)
|
||||
return freq;
|
||||
else
|
||||
return _measure_tsc_freq();
|
||||
}
|
||||
};
|
||||
|
||||
struct Hw::Virtualization_support
|
||||
|
||||
@@ -21,7 +21,6 @@ namespace Hw {
|
||||
|
||||
using Genode::addr_t;
|
||||
using Genode::size_t;
|
||||
using Genode::uint32_t;
|
||||
using Genode::get_page_size;
|
||||
using Genode::get_page_size_log2;
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
using namespace Genode;
|
||||
|
||||
using Exit_config = Vm_connection::Exit_config;
|
||||
using Call_with_state = Vm_connection::Call_with_state;
|
||||
|
||||
|
||||
/****************************
|
||||
@@ -55,7 +56,8 @@ struct Hw_vcpu : Rpc_client<Vm_session::Native_vcpu>, Noncopyable
|
||||
|
||||
Hw_vcpu(Env &, Vm_connection &, Vcpu_handler_base &);
|
||||
|
||||
void with_state(auto const &);
|
||||
|
||||
void with_state(Call_with_state &);
|
||||
};
|
||||
|
||||
|
||||
@@ -70,7 +72,7 @@ Hw_vcpu::Hw_vcpu(Env &env, Vm_connection &vm, Vcpu_handler_base &handler)
|
||||
}
|
||||
|
||||
|
||||
void Hw_vcpu::with_state(auto const &fn)
|
||||
void Hw_vcpu::with_state(Call_with_state &cw)
|
||||
{
|
||||
if (Thread::myself() != _ep_handler) {
|
||||
error("vCPU state requested outside of vcpu_handler EP");
|
||||
@@ -78,7 +80,7 @@ void Hw_vcpu::with_state(auto const &fn)
|
||||
}
|
||||
Kernel::pause_vm(Capability_space::capid(_kernel_vcpu));
|
||||
|
||||
if (fn(_local_state()))
|
||||
if (cw.call_with_state(_local_state()))
|
||||
Kernel::run_vm(Capability_space::capid(_kernel_vcpu));
|
||||
}
|
||||
|
||||
@@ -88,7 +90,8 @@ Capability<Vm_session::Native_vcpu> Hw_vcpu::_create_vcpu(Vm_connection &vm,
|
||||
{
|
||||
Thread &tep { *reinterpret_cast<Thread *>(&handler.rpc_ep()) };
|
||||
|
||||
return vm.create_vcpu(tep.cap());
|
||||
return vm.with_upgrade([&] {
|
||||
return vm.call<Vm_session::Rpc_create_vcpu>(tep.cap()); });
|
||||
}
|
||||
|
||||
|
||||
@@ -96,10 +99,7 @@ Capability<Vm_session::Native_vcpu> Hw_vcpu::_create_vcpu(Vm_connection &vm,
|
||||
** vCPU API **
|
||||
**************/
|
||||
|
||||
void Vm_connection::Vcpu::_with_state(With_state::Ft const &fn)
|
||||
{
|
||||
static_cast<Hw_vcpu &>(_native_vcpu).with_state(fn);
|
||||
}
|
||||
void Vm_connection::Vcpu::_with_state(Call_with_state &cw) { static_cast<Hw_vcpu &>(_native_vcpu).with_state(cw); }
|
||||
|
||||
|
||||
Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
using namespace Genode;
|
||||
|
||||
using Exit_config = Vm_connection::Exit_config;
|
||||
using Call_with_state = Vm_connection::Call_with_state;
|
||||
|
||||
|
||||
/****************************
|
||||
@@ -57,7 +58,7 @@ struct Hw_vcpu : Rpc_client<Vm_session::Native_vcpu>, Noncopyable
|
||||
|
||||
Hw_vcpu(Env &, Vm_connection &, Vcpu_handler_base &);
|
||||
|
||||
void with_state(auto const &fn);
|
||||
void with_state(Call_with_state &);
|
||||
|
||||
};
|
||||
|
||||
@@ -83,7 +84,7 @@ void Hw_vcpu::_run()
|
||||
}
|
||||
|
||||
|
||||
void Hw_vcpu::with_state(auto const &fn)
|
||||
void Hw_vcpu::with_state(Call_with_state &cw)
|
||||
{
|
||||
if (Thread::myself() != _ep_handler) {
|
||||
error("vCPU state requested outside of vcpu_handler EP");
|
||||
@@ -91,7 +92,7 @@ void Hw_vcpu::with_state(auto const &fn)
|
||||
}
|
||||
Kernel::pause_vm(Capability_space::capid(_kernel_vcpu));
|
||||
|
||||
if (fn(_local_state()))
|
||||
if(cw.call_with_state(_local_state()))
|
||||
_run();
|
||||
}
|
||||
|
||||
@@ -101,7 +102,8 @@ Capability<Vm_session::Native_vcpu> Hw_vcpu::_create_vcpu(Vm_connection &vm,
|
||||
{
|
||||
Thread &tep { *reinterpret_cast<Thread *>(&handler.rpc_ep()) };
|
||||
|
||||
return vm.create_vcpu(tep.cap());
|
||||
return vm.with_upgrade([&] {
|
||||
return vm.call<Vm_session::Rpc_create_vcpu>(tep.cap()); });
|
||||
}
|
||||
|
||||
|
||||
@@ -109,10 +111,7 @@ Capability<Vm_session::Native_vcpu> Hw_vcpu::_create_vcpu(Vm_connection &vm,
|
||||
** vCPU API **
|
||||
**************/
|
||||
|
||||
void Vm_connection::Vcpu::_with_state(With_state::Ft const &fn)
|
||||
{
|
||||
static_cast<Hw_vcpu &>(_native_vcpu).with_state(fn);
|
||||
}
|
||||
void Vm_connection::Vcpu::_with_state(Call_with_state &cw) { static_cast<Hw_vcpu &>(_native_vcpu).with_state(cw); }
|
||||
|
||||
|
||||
Vm_connection::Vcpu::Vcpu(Vm_connection &vm, Allocator &alloc,
|
||||
|
||||
@@ -25,7 +25,3 @@ HOST_INC_DIR += /usr/include/aarch64-linux-gnu
|
||||
#
|
||||
CC_OPT += -D_GNU_SOURCE
|
||||
|
||||
#
|
||||
# Enable definition of __STDC_HOSTED__
|
||||
#
|
||||
CC_OPT_FREESTANDING =
|
||||
|
||||
@@ -25,9 +25,7 @@ namespace Core { class Core_region_map; }
|
||||
|
||||
struct Core::Core_region_map : Region_map_mmap
|
||||
{
|
||||
static void init(Rpc_entrypoint &);
|
||||
|
||||
Core_region_map(Rpc_entrypoint &ep) : Region_map_mmap(false) { init(ep); }
|
||||
Core_region_map(Rpc_entrypoint &) : Region_map_mmap(false) { }
|
||||
};
|
||||
|
||||
#endif /* _CORE__INCLUDE__CORE_REGION_MAP_H_ */
|
||||
|
||||
@@ -29,8 +29,6 @@ namespace Core {
|
||||
struct Pager_entrypoint;
|
||||
|
||||
using Pager_capability = Capability<Pager_object>;
|
||||
|
||||
extern void init_page_fault_handling(Rpc_entrypoint &);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user