mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
base: introduce Allocator::try_alloc
This patch changes the 'Allocator' interface to the use of 'Attempt' return values instead of using exceptions for propagating errors. To largely uphold compatibility with components using the original exception-based interface - in particluar use cases where an 'Allocator' is passed to the 'new' operator - the traditional 'alloc' is still supported. But it existes merely as a wrapper around the new 'try_alloc'. Issue #4324
This commit is contained in:
committed by
Christian Helmuth
parent
9591e6caee
commit
dc39a8db62
@@ -24,10 +24,8 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
off_t offset, bool use_local_addr,
|
||||
Region_map::Local_addr, bool, bool)
|
||||
{
|
||||
using namespace Okl4;
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> void * {
|
||||
|
||||
auto lambda = [&] (Dataspace_component *ds) -> void *
|
||||
{
|
||||
if (!ds)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
@@ -48,21 +46,25 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
}
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
}
|
||||
Range_allocator &virt_alloc = platform().region_alloc();
|
||||
return virt_alloc.try_alloc(page_rounded_size).convert<void *>(
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||
return nullptr;
|
||||
return virt_addr;
|
||||
};
|
||||
[&] (void *virt_addr) -> void * {
|
||||
|
||||
return _ep.apply(ds_cap, lambda);
|
||||
/* map the dataspace's physical pages to virtual memory */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||
return nullptr;
|
||||
|
||||
return virt_addr;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -138,7 +138,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
||||
@@ -189,52 +189,66 @@ Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
|
||||
map_local(phys_addr, core_local_addr, pages);
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
[&] (void *ptr) {
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, log_size, "core_log"));
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
|
||||
/* export platform specific infos */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const size = pages << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, get_page_size_log2()).ok()) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys_ptr) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
|
||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] () {
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
||||
});
|
||||
[&] (void *core_local_ptr) {
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
}
|
||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||
|
||||
Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] () {
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
||||
});
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,31 +38,41 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
||||
error("core-local memory mapping failed, error=", Okl4::L4_ErrorCode());
|
||||
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear dataspace */
|
||||
size_t num_longwords = page_rounded_size/sizeof(long);
|
||||
for (long *dst = (long *)virt_addr; num_longwords--;)
|
||||
for (long *dst = (long *)guard.virt_ptr; num_longwords--;)
|
||||
*dst++ = 0;
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
||||
error("could not unmap core-local address range at ", virt_addr, ", "
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr, ", "
|
||||
"error=", Okl4::L4_ErrorCode());
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user