mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
base: introduce Allocator::try_alloc
This patch changes the 'Allocator' interface to the use of 'Attempt' return values instead of using exceptions for propagating errors. To largely uphold compatibility with components using the original exception-based interface - in particluar use cases where an 'Allocator' is passed to the 'new' operator - the traditional 'alloc' is still supported. But it existes merely as a wrapper around the new 'try_alloc'. Issue #4324
This commit is contained in:
committed by
Christian Helmuth
parent
9591e6caee
commit
dc39a8db62
@@ -30,19 +30,16 @@ extern unsigned _bss_end;
|
||||
void * Platform::Ram_allocator::alloc_aligned(size_t size, unsigned align)
|
||||
{
|
||||
using namespace Genode;
|
||||
using namespace Hw;
|
||||
|
||||
void * ret;
|
||||
assert(Base::alloc_aligned(round_page(size), &ret,
|
||||
max(align, get_page_size_log2())).ok());
|
||||
return ret;
|
||||
}
|
||||
return Base::alloc_aligned(Hw::round_page(size),
|
||||
max(align, get_page_size_log2())).convert<void *>(
|
||||
|
||||
|
||||
bool Platform::Ram_allocator::alloc(size_t size, void **out_addr)
|
||||
{
|
||||
*out_addr = alloc_aligned(size, 0);
|
||||
return true;
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Ram_allocator::Alloc_error e) -> void *
|
||||
{
|
||||
error("bootstrap RAM allocation failed, error=", e);
|
||||
assert(false);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -53,8 +53,13 @@ class Bootstrap::Platform
|
||||
};
|
||||
|
||||
|
||||
class Ram_allocator : public Genode::Allocator_avl_base
|
||||
class Ram_allocator : private Genode::Allocator_avl_base
|
||||
{
|
||||
/*
|
||||
* 'Ram_allocator' is derived from 'Allocator_avl_base' to access
|
||||
* the protected 'slab_block_size'.
|
||||
*/
|
||||
|
||||
private:
|
||||
|
||||
using Base = Genode::Allocator_avl_base;
|
||||
@@ -73,8 +78,7 @@ class Bootstrap::Platform
|
||||
{ }
|
||||
|
||||
void * alloc_aligned(size_t size, unsigned align);
|
||||
bool alloc(size_t size, void **out_addr) override;
|
||||
void * alloc(size_t size) { return Allocator::alloc(size); }
|
||||
void * alloc(size_t size) { return alloc_aligned(size, 0); }
|
||||
|
||||
void add(Memory_region const &);
|
||||
void remove(Memory_region const &);
|
||||
|
||||
@@ -29,12 +29,15 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
off_t offset, bool use_local_addr,
|
||||
Region_map::Local_addr, bool, bool writeable)
|
||||
{
|
||||
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||
if (!ds)
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Local_addr {
|
||||
|
||||
if (!ds_ptr)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
Dataspace_component &ds = *ds_ptr;
|
||||
|
||||
if (size == 0)
|
||||
size = ds->size();
|
||||
size = ds.size();
|
||||
|
||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
@@ -48,10 +51,13 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc_aligned(page_rounded_size, &virt_addr,
|
||||
get_page_size_log2()).ok()) {
|
||||
Allocator::Alloc_result virt =
|
||||
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
||||
|
||||
if (virt.failed()) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
@@ -61,16 +67,23 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
Page_flags const flags { (writeable && ds->writable()) ? RW : RO,
|
||||
Page_flags const flags { (writeable && ds.writable()) ? RW : RO,
|
||||
NO_EXEC, KERN, GLOBAL,
|
||||
ds->io_mem() ? DEVICE : RAM,
|
||||
ds->cacheability() };
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||
return nullptr;
|
||||
ds.io_mem() ? DEVICE : RAM,
|
||||
ds.cacheability() };
|
||||
|
||||
return virt_addr;
|
||||
};
|
||||
return _ep.apply(ds_cap, lambda);
|
||||
return virt.convert<Local_addr>(
|
||||
|
||||
[&] (void *virt_addr) -> void * {
|
||||
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||
return virt_addr;
|
||||
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
return nullptr; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
return nullptr; });
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -57,8 +57,8 @@ class Genode::Cpu_thread_allocator : public Allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return _alloc.alloc(size, out_addr); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _alloc.alloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_alloc.free(addr, size); }
|
||||
|
||||
@@ -78,7 +78,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
}
|
||||
|
||||
/* allocate interrupt */
|
||||
if (_irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (_irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
||||
@@ -35,10 +35,41 @@ using namespace Kernel;
|
||||
|
||||
void Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||
{
|
||||
Genode::Allocator &slab = pd().platform_pd().capability_slab();
|
||||
using Allocator = Genode::Allocator;
|
||||
|
||||
Allocator &slab = pd().platform_pd().capability_slab();
|
||||
for (unsigned i = 0; i < cap_count; i++) {
|
||||
if (_obj_id_ref_ptr[i] == nullptr)
|
||||
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
|
||||
if (_obj_id_ref_ptr[i] != nullptr)
|
||||
continue;
|
||||
|
||||
slab.try_alloc(sizeof(Object_identity_reference)).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_obj_id_ref_ptr[i] = ptr; },
|
||||
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
|
||||
switch (e) {
|
||||
case Allocator::Alloc_error::DENIED:
|
||||
|
||||
/*
|
||||
* Slab is exhausted, reflect condition to the client.
|
||||
*/
|
||||
throw Genode::Out_of_ram();
|
||||
|
||||
case Allocator::Alloc_error::OUT_OF_CAPS:
|
||||
case Allocator::Alloc_error::OUT_OF_RAM:
|
||||
|
||||
/*
|
||||
* These conditions cannot happen because the slab
|
||||
* does not try to grow automatically. It is
|
||||
* explicitely expanded by the client as response to
|
||||
* the 'Out_of_ram' condition above.
|
||||
*/
|
||||
Genode::raw("unexpected recv_caps allocation failure");
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
_ipc_rcv_caps = cap_count;
|
||||
}
|
||||
|
||||
@@ -112,28 +112,46 @@ void Platform::_init_platform_info()
|
||||
{
|
||||
unsigned const pages = 1;
|
||||
size_t const rom_size = pages << get_page_size_log2();
|
||||
void *phys_ptr = nullptr;
|
||||
void *virt_ptr = nullptr;
|
||||
const char *rom_name = "platform_info";
|
||||
|
||||
if (!ram_alloc().alloc(get_page_size(), &phys_ptr)) {
|
||||
error("could not setup platform_info ROM - ram allocation error");
|
||||
return;
|
||||
}
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
Range_allocator &virt_alloc;
|
||||
|
||||
if (!region_alloc().alloc(rom_size, &virt_ptr)) {
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
ram_alloc().free(phys_ptr);
|
||||
return;
|
||||
}
|
||||
struct {
|
||||
void *phys_ptr = nullptr;
|
||||
void *virt_ptr = nullptr;
|
||||
};
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||
Guard(Range_allocator &phys_alloc, Range_allocator &virt_alloc)
|
||||
: phys_alloc(phys_alloc), virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard()
|
||||
{
|
||||
if (phys_ptr) phys_alloc.free(phys_ptr);
|
||||
if (virt_ptr) virt_alloc.free(phys_ptr);
|
||||
}
|
||||
} guard { ram_alloc(), region_alloc() };
|
||||
|
||||
ram_alloc().try_alloc(get_page_size()).with_result(
|
||||
[&] (void *ptr) { guard.phys_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - RAM allocation error"); });
|
||||
|
||||
region_alloc().try_alloc(rom_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - region allocation error"); });
|
||||
|
||||
if (!guard.phys_ptr || !guard.virt_ptr)
|
||||
return;
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(guard.phys_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(guard.virt_ptr);
|
||||
|
||||
if (!map_local(phys_addr, virt_addr, pages, Hw::PAGE_FLAGS_KERN_DATA)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
region_alloc().free(virt_ptr);
|
||||
ram_alloc().free(phys_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -156,10 +174,11 @@ void Platform::_init_platform_info()
|
||||
return;
|
||||
}
|
||||
|
||||
region_alloc().free(virt_ptr);
|
||||
|
||||
_rom_fs.insert(
|
||||
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
||||
|
||||
/* keep phys allocation but let guard revert virt allocation */
|
||||
guard.phys_ptr = nullptr;
|
||||
}
|
||||
|
||||
|
||||
@@ -203,25 +222,32 @@ Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
|
||||
map_local(phys_addr, core_local_addr, pages);
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
[&] (void *ptr) {
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, log_size, "core_log"));
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { /* ignored */ }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
|
||||
class Idle_thread_trace_source : public Trace::Source::Info_accessor,
|
||||
|
||||
@@ -37,11 +37,16 @@ Core_mem_allocator &Hw::Address_space::_cma()
|
||||
|
||||
void *Hw::Address_space::_table_alloc()
|
||||
{
|
||||
void * ret = nullptr;
|
||||
if (!_cma().alloc_aligned(sizeof(Page_table), (void**)&ret,
|
||||
Page_table::ALIGNM_LOG2).ok())
|
||||
throw Insufficient_ram_quota();
|
||||
return ret;
|
||||
unsigned const align = Page_table::ALIGNM_LOG2;
|
||||
|
||||
return _cma().alloc_aligned(sizeof(Page_table), align).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_result) -> void * {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Insufficient_ram_quota(); });
|
||||
}
|
||||
|
||||
|
||||
@@ -134,10 +139,15 @@ Cap_space::Cap_space() : _slab(nullptr, &_initial_sb) { }
|
||||
|
||||
void Cap_space::upgrade_slab(Allocator &alloc)
|
||||
{
|
||||
void * block = nullptr;
|
||||
if (!alloc.alloc(SLAB_SIZE, &block))
|
||||
throw Out_of_ram();
|
||||
_slab.insert_sb(block);
|
||||
alloc.try_alloc(SLAB_SIZE).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_slab.insert_sb(ptr); },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Out_of_ram();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -71,13 +71,18 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb)
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _label.string())
|
||||
{
|
||||
/* create UTCB for a core thread */
|
||||
void *utcb_phys;
|
||||
if (!platform().ram_alloc().alloc(sizeof(Native_utcb), &utcb_phys)) {
|
||||
error("failed to allocate UTCB");
|
||||
throw Out_of_ram();
|
||||
}
|
||||
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
platform().ram_alloc().try_alloc(sizeof(Native_utcb)).with_result(
|
||||
|
||||
[&] (void *utcb_phys) {
|
||||
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("failed to allocate UTCB");
|
||||
/* XXX distinguish error conditions */
|
||||
throw Out_of_ram();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -33,30 +33,40 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
||||
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* dependent on the architecture, cache maintainance might be necessary */
|
||||
Cpu::clear_memory_region((addr_t)virt_addr, page_rounded_size,
|
||||
Cpu::clear_memory_region((addr_t)guard.virt_ptr, page_rounded_size,
|
||||
ds.cacheability() != CACHED);
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
||||
error("could not unmap core-local address range at ", virt_addr);
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr);
|
||||
}
|
||||
|
||||
|
||||
@@ -86,22 +86,28 @@ class Genode::Rpc_cap_factory
|
||||
{
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
/* allocate kernel object */
|
||||
Kobject * obj;
|
||||
if (!_slab.alloc(sizeof(Kobject), (void**)&obj))
|
||||
throw Allocator::Out_of_memory();
|
||||
construct_at<Kobject>(obj, ep);
|
||||
return _slab.try_alloc(sizeof(Kobject)).convert<Native_capability>(
|
||||
|
||||
if (!obj->cap.valid()) {
|
||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||
" for allocating a capability!");
|
||||
destroy(&_slab, obj);
|
||||
return Native_capability();
|
||||
}
|
||||
[&] (void *ptr) {
|
||||
|
||||
/* store it in the list and return result */
|
||||
_list.insert(obj);
|
||||
return obj->cap;
|
||||
/* create kernel object */
|
||||
Kobject &obj = *construct_at<Kobject>(ptr, ep);
|
||||
|
||||
if (!obj.cap.valid()) {
|
||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||
" for allocating a capability!");
|
||||
destroy(&_slab, &obj);
|
||||
return Native_capability();
|
||||
}
|
||||
|
||||
/* store it in the list and return result */
|
||||
_list.insert(&obj);
|
||||
return obj.cap;
|
||||
},
|
||||
[&] (Allocator::Alloc_error) -> Native_capability {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Allocator::Out_of_memory();
|
||||
});
|
||||
}
|
||||
|
||||
void free(Native_capability cap)
|
||||
|
||||
@@ -43,16 +43,20 @@ void Genode::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Hw::Mm::hypervisor_exception_vector().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_TEXT);
|
||||
|
||||
void * stack = nullptr;
|
||||
assert(platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
||||
(void**)&stack,
|
||||
get_page_size_log2()).ok());
|
||||
map_local((addr_t)stack,
|
||||
Hw::Mm::hypervisor_stack().base,
|
||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_DATA);
|
||||
platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
||||
get_page_size_log2()).with_result(
|
||||
[&] (void *stack) {
|
||||
map_local((addr_t)stack,
|
||||
Hw::Mm::hypervisor_stack().base,
|
||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_DATA);
|
||||
|
||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to allocate hypervisor stack for VM service");
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -73,14 +73,17 @@ void Vm_session_component::_detach_vm_memory(addr_t vm_addr, size_t size)
|
||||
|
||||
void * Vm_session_component::_alloc_table()
|
||||
{
|
||||
void * table;
|
||||
/* get some aligned space for the translation table */
|
||||
if (!cma().alloc_aligned(sizeof(Board::Vm_page_table), (void**)&table,
|
||||
Board::Vm_page_table::ALIGNM_LOG2).ok()) {
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota();
|
||||
}
|
||||
return table;
|
||||
return cma().alloc_aligned(sizeof(Board::Vm_page_table),
|
||||
Board::Vm_page_table::ALIGNM_LOG2).convert<void *>(
|
||||
[&] (void *table_ptr) {
|
||||
return table_ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
/* XXX handle individual error conditions */
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota(); }
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user