base: introduce Allocator::try_alloc

This patch changes the 'Allocator' interface to the use of 'Attempt'
return values instead of using exceptions for propagating errors.

To largely uphold compatibility with components using the original
exception-based interface - in particluar use cases where an 'Allocator'
is passed to the 'new' operator - the traditional 'alloc' is still
supported. But it existes merely as a wrapper around the new
'try_alloc'.

Issue #4324
This commit is contained in:
Norman Feske
2021-11-10 12:01:32 +01:00
committed by Christian Helmuth
parent 9591e6caee
commit dc39a8db62
102 changed files with 2128 additions and 1710 deletions

View File

@@ -18,6 +18,7 @@
#include <base/stdint.h>
#include <base/exception.h>
#include <base/quota_guard.h>
#include <base/ram_allocator.h>
namespace Genode {
@@ -61,7 +62,14 @@ struct Genode::Allocator : Deallocator
/**
* Exception type
*/
typedef Out_of_ram Out_of_memory;
using Out_of_memory = Out_of_ram;
using Denied = Ram_allocator::Denied;
/**
* Return type of 'try_alloc'
*/
using Alloc_error = Ram_allocator::Alloc_error;
using Alloc_result = Attempt<void *, Alloc_error>;
/**
* Destructor
@@ -74,32 +82,8 @@ struct Genode::Allocator : Deallocator
* \param size block size to allocate
* \param out_addr resulting pointer to the new block,
* undefined in the error case
*
* \throw Out_of_ram
* \throw Out_of_caps
*
* \return true on success
*/
virtual bool alloc(size_t size, void **out_addr) = 0;
/**
* Allocate typed block
*
* This template allocates a typed block returned as a pointer to
* a non-void type. By providing this method, we prevent the
* compiler from warning us about "dereferencing type-punned
* pointer will break strict-aliasing rules".
*
* \throw Out_of_ram
* \throw Out_of_caps
*/
template <typename T> bool alloc(size_t size, T **out_addr)
{
void *addr = 0;
bool ret = alloc(size, &addr);
*out_addr = (T *)addr;
return ret;
}
virtual Alloc_result try_alloc(size_t size) = 0;
/**
* Return total amount of backing store consumed by the allocator
@@ -111,6 +95,19 @@ struct Genode::Allocator : Deallocator
*/
virtual size_t overhead(size_t size) const = 0;
/**
* Raise exception according to the 'error' value
*/
static void throw_alloc_error(Alloc_error error) __attribute__((noreturn))
{
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break;
}
throw Denied();
}
/**
* Allocate block and signal error as an exception
*
@@ -118,16 +115,16 @@ struct Genode::Allocator : Deallocator
*
* \throw Out_of_ram
* \throw Out_of_caps
* \throw Denied
*
* \return pointer to the new block
*/
void *alloc(size_t size)
{
void *result = 0;
if (!alloc(size, &result))
throw Out_of_memory();
return result;
return try_alloc(size).convert<void *>(
[&] (void *ptr) { return ptr; },
[&] (Alloc_error error) -> void * {
throw_alloc_error(error); });
}
};
@@ -139,32 +136,21 @@ struct Genode::Range_allocator : Allocator
*/
virtual ~Range_allocator() { }
/**
* Return type of range-management operations
*/
struct Range_ok { };
using Range_result = Attempt<Range_ok, Alloc_error>;
/**
* Add free address range to allocator
*/
virtual int add_range(addr_t base, size_t size) = 0;
virtual Range_result add_range(addr_t base, size_t size) = 0;
/**
* Remove address range from allocator
*/
virtual int remove_range(addr_t base, size_t size) = 0;
/**
* Return value of allocation functons
*
* 'OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if no fitting address range is found
*/
struct Alloc_return
{
enum Value { OK = 0, OUT_OF_METADATA = -1, RANGE_CONFLICT = -2 };
Value const value;
Alloc_return(Value value) : value(value) { }
bool ok() const { return value == OK; }
bool error() const { return !ok(); }
};
virtual Range_result remove_range(addr_t base, size_t size) = 0;
struct Range { addr_t start, end; };
@@ -172,21 +158,18 @@ struct Genode::Range_allocator : Allocator
* Allocate block
*
* \param size size of new block
* \param out_addr start address of new block,
* undefined in the error case
* \param align alignment of new block specified
* as the power of two
* \param range address-range constraint for the allocation
*/
virtual Alloc_return alloc_aligned(size_t size, void **out_addr,
unsigned align, Range range) = 0;
virtual Alloc_result alloc_aligned(size_t size, unsigned align, Range range) = 0;
/**
* Allocate block without constraining the address range
*/
Alloc_return alloc_aligned(size_t size, void **out_addr, unsigned align)
Alloc_result alloc_aligned(size_t size, unsigned align)
{
return alloc_aligned(size, out_addr, align, Range { .start = 0, .end = ~0UL });
return alloc_aligned(size, align, Range { .start = 0, .end = ~0UL });
}
/**
@@ -194,12 +177,8 @@ struct Genode::Range_allocator : Allocator
*
* \param size size of new block
* \param addr desired address of block
*
* \return 'ALLOC_OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if specified range is occupied
*/
virtual Alloc_return alloc_addr(size_t size, addr_t addr) = 0;
virtual Alloc_result alloc_addr(size_t size, addr_t addr) = 0;
/**
* Free a previously allocated block
@@ -326,4 +305,32 @@ void Genode::destroy(DEALLOC && dealloc, T *obj)
operator delete (obj, dealloc);
}
namespace Genode {
void static inline print(Output &out, Allocator::Alloc_error error)
{
using Error = Allocator::Alloc_error;
auto name = [] (Error error)
{
switch (error) {
case Error::OUT_OF_RAM: return "OUT_OF_RAM";
case Error::OUT_OF_CAPS: return "OUT_OF_CAPS";
case Error::DENIED: return "DENIED";
}
return "<unknown>";
};
Genode::print(out, name(error));
}
void static inline print(Output &out, Allocator::Alloc_result result)
{
result.with_result(
[&] (void *ptr) { Genode::print(out, ptr); },
[&] (auto error) { Genode::print(out, error); });
}
}
#endif /* _INCLUDE__BASE__ALLOCATOR_H_ */

View File

@@ -163,25 +163,29 @@ class Genode::Allocator_avl_base : public Range_allocator
private:
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
Allocator *_md_alloc { nullptr }; /* meta-data allocator */
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
Allocator &_md_alloc; /* meta-data allocator */
size_t _md_entry_size { 0 }; /* size of block meta-data entry */
struct Two_blocks { Block *b1_ptr, *b2_ptr; };
using Alloc_md_result = Attempt<Block *, Alloc_error>;
using Alloc_md_two_result = Attempt<Two_blocks, Alloc_error>;
/**
* Alloc meta-data block
*/
Block *_alloc_block_metadata();
Alloc_md_result _alloc_block_metadata();
/**
* Alloc two meta-data blocks in a transactional way
*/
bool _alloc_two_blocks_metadata(Block **dst1, Block **dst2);
Alloc_md_two_result _alloc_two_blocks_metadata();
/**
* Create new block
*/
int _add_block(Block *block_metadata,
addr_t base, size_t size, bool used);
void _add_block(Block &block_metadata, addr_t base, size_t size, bool used);
Block *_find_any_used_block(Block *sub_tree);
Block *_find_any_unused_block(Block *sub_tree);
@@ -189,7 +193,7 @@ class Genode::Allocator_avl_base : public Range_allocator
/**
* Destroy block
*/
void _destroy_block(Block *b);
void _destroy_block(Block &b);
/**
* Cut specified area from block
@@ -197,8 +201,13 @@ class Genode::Allocator_avl_base : public Range_allocator
* The original block gets replaced by (up to) two smaller blocks
* with remaining space.
*/
void _cut_from_block(Block *b, addr_t cut_addr, size_t cut_size,
Block *dst1, Block *dst2);
void _cut_from_block(Block &b, addr_t cut_addr, size_t cut_size, Two_blocks);
template <typename ANY_BLOCK_FN>
void _revert_block_ranges(ANY_BLOCK_FN const &);
template <typename SEARCH_FN>
Alloc_result _allocate(size_t, unsigned, Range, SEARCH_FN const &);
protected:
@@ -234,7 +243,7 @@ class Genode::Allocator_avl_base : public Range_allocator
* we can attach custom information to block meta data.
*/
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
_md_alloc(md_alloc), _md_entry_size(md_entry_size) { }
_md_alloc(*md_alloc), _md_entry_size(md_entry_size) { }
~Allocator_avl_base() { _revert_allocations_and_ranges(); }
@@ -258,10 +267,10 @@ class Genode::Allocator_avl_base : public Range_allocator
** Range allocator interface **
*******************************/
int add_range(addr_t base, size_t size) override;
int remove_range(addr_t base, size_t size) override;
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
Alloc_return alloc_addr(size_t size, addr_t addr) override;
Range_result add_range(addr_t base, size_t size) override;
Range_result remove_range(addr_t base, size_t size) override;
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
Alloc_result alloc_addr(size_t size, addr_t addr) override;
void free(void *addr) override;
size_t avail() const override;
bool valid_addr(addr_t addr) const override;
@@ -273,10 +282,9 @@ class Genode::Allocator_avl_base : public Range_allocator
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override
Alloc_result try_alloc(size_t size) override
{
return (Allocator_avl_base::alloc_aligned(
size, out_addr, log2(sizeof(addr_t))).ok());
return Allocator_avl_base::alloc_aligned(size, log2(sizeof(addr_t)));
}
void free(void *addr, size_t) override { free(addr); }
@@ -385,7 +393,7 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
return b && b->used() ? b : 0;
}
int add_range(addr_t base, size_t size) override
Range_result add_range(addr_t base, size_t size) override
{
/*
* We disable the slab block allocation while
@@ -395,9 +403,9 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
*/
Allocator *md_bs = _metadata.backing_store();
_metadata.backing_store(0);
int ret = Allocator_avl_base::add_range(base, size);
Range_result result = Allocator_avl_base::add_range(base, size);
_metadata.backing_store(md_bs);
return ret;
return result;
}
/**

View File

@@ -96,32 +96,26 @@ class Genode::Heap : public Allocator
size_t _quota_used { 0 };
size_t _chunk_size { 0 };
using Alloc_ds_result = Attempt<Dataspace *, Alloc_error>;
/**
* Allocate a new dataspace of the specified size
*
* \param size number of bytes to allocate
* \param enforce_separate_metadata if true, the new dataspace
* will not contain any meta data
* \throw Region_map::Invalid_dataspace,
* Region_map::Region_conflict
* \return 0 on success or negative error code
*/
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
Alloc_ds_result _allocate_dataspace(size_t size, bool enforce_separate_metadata);
/**
* Try to allocate block at our local allocator
*
* \return true on success
*
* This method is a utility used by '_unsynchronized_alloc' to
* avoid code duplication.
*/
bool _try_local_alloc(size_t size, void **out_addr);
Alloc_result _try_local_alloc(size_t size);
/**
* Unsynchronized implementation of 'alloc'
* Unsynchronized implementation of 'try_alloc'
*/
bool _unsynchronized_alloc(size_t size, void **out_addr);
Alloc_result _unsynchronized_alloc(size_t size);
public:
@@ -167,11 +161,11 @@ class Genode::Heap : public Allocator
** Allocator interface **
*************************/
bool alloc(size_t, void **) override;
void free(void *, size_t) override;
size_t consumed() const override { return _quota_used; }
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
bool need_size_for_free() const override { return false; }
Alloc_result try_alloc(size_t) override;
void free(void *, size_t) override;
size_t consumed() const override { return _quota_used; }
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
bool need_size_for_free() const override { return false; }
};
@@ -222,11 +216,11 @@ class Genode::Sliced_heap : public Allocator
** Allocator interface **
*************************/
bool alloc(size_t, void **) override;
void free(void *, size_t) override;
size_t consumed() const override { return _consumed; }
size_t overhead(size_t size) const override;
bool need_size_for_free() const override { return false; }
Alloc_result try_alloc(size_t) override;
void free(void *, size_t) override;
size_t consumed() const override { return _consumed; }
size_t overhead(size_t size) const override;
bool need_size_for_free() const override { return false; }
};
#endif /* _INCLUDE__BASE__HEAP_H_ */

View File

@@ -44,10 +44,12 @@ class Genode::Slab : public Allocator
Allocator *_backing_store;
using New_slab_block_result = Attempt<Block *, Alloc_error>;
/**
* Allocate and initialize new slab block
*/
Block *_new_slab_block();
New_slab_block_result _new_slab_block();
/*****************************
@@ -58,11 +60,17 @@ class Genode::Slab : public Allocator
/**
* Insert block into slab block ring
*
* \noapi
*/
void _insert_sb(Block *);
struct Expand_ok { };
using Expand_result = Attempt<Expand_ok, Alloc_error>;
/**
* Expand slab by one block
*/
Expand_result _expand();
/**
* Release slab block
*/
@@ -88,6 +96,10 @@ class Genode::Slab : public Allocator
* block that is used for the first couple of allocations,
* especially for the allocation of the second slab
* block.
*
* \throw Out_of_ram
* \throw Out_of_caps
* \throw Allocator::Denied failed to obtain initial slab block
*/
Slab(size_t slab_size, size_t block_size, void *initial_sb,
Allocator *backing_store = 0);
@@ -154,7 +166,7 @@ class Genode::Slab : public Allocator
* The 'size' parameter is ignored as only slab entries with
* preconfigured slab-entry size are allocated.
*/
bool alloc(size_t size, void **addr) override;
Alloc_result try_alloc(size_t size) override;
void free(void *addr, size_t) override { _free(addr); }
size_t consumed() const override;
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }

View File

@@ -56,8 +56,8 @@ class Genode::Synced_allocator : public Allocator
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override {
return _synced_object()->alloc(size, out_addr); }
Alloc_result try_alloc(size_t size) override {
return _synced_object()->try_alloc(size); }
void free(void *addr, size_t size) override {
_synced_object()->free(addr, size); }

View File

@@ -64,7 +64,7 @@ _ZN6Genode10Ipc_serverC2Ev T
_ZN6Genode10Ipc_serverD1Ev T
_ZN6Genode10Ipc_serverD2Ev T
_ZN6Genode11Sliced_heap4freeEPvm T
_ZN6Genode11Sliced_heap5allocEmPPv T
_ZN6Genode11Sliced_heap9try_allocEm T
_ZN6Genode11Sliced_heapC1ERNS_13Ram_allocatorERNS_10Region_mapE T
_ZN6Genode11Sliced_heapC2ERNS_13Ram_allocatorERNS_10Region_mapE T
_ZN6Genode11Sliced_heapD0Ev T
@@ -165,23 +165,15 @@ _ZN6Genode17Timeout_schedulerC2ERNS_11Time_sourceENS_12MicrosecondsE T
_ZN6Genode17Timeout_schedulerD0Ev T
_ZN6Genode17Timeout_schedulerD1Ev T
_ZN6Genode17Timeout_schedulerD2Ev T
_ZN6Genode18Allocator_avl_base10_add_blockEPNS0_5BlockEmmb T
_ZN6Genode18Allocator_avl_base10_add_blockERNS0_5BlockEmmb T
_ZN6Genode18Allocator_avl_base10alloc_addrEmm T
_ZN6Genode18Allocator_avl_base12remove_rangeEmm T
_ZN6Genode18Allocator_avl_base13alloc_alignedEmPPvjNS_15Range_allocator5RangeE T
_ZN6Genode18Allocator_avl_base14_destroy_blockEPNS0_5BlockE T
_ZN6Genode18Allocator_avl_base13alloc_alignedEmjNS_15Range_allocator5RangeE T
_ZN6Genode18Allocator_avl_base14any_block_addrEPm T
_ZN6Genode18Allocator_avl_base15_cut_from_blockEPNS0_5BlockEmmS2_S2_ T
_ZN6Genode18Allocator_avl_base20_find_any_used_blockEPNS0_5BlockE T
_ZN6Genode18Allocator_avl_base21_alloc_block_metadataEv T
_ZN6Genode18Allocator_avl_base21_revert_unused_rangesEv T
_ZN6Genode18Allocator_avl_base22_find_any_unused_blockEPNS0_5BlockE T
_ZN6Genode18Allocator_avl_base26_alloc_two_blocks_metadataEPPNS0_5BlockES3_ T
_ZN6Genode18Allocator_avl_base30_revert_allocations_and_rangesEv T
_ZN6Genode18Allocator_avl_base4freeEPv T
_ZN6Genode18Allocator_avl_base5Block13find_best_fitEmjmm T
_ZN6Genode18Allocator_avl_base5Block15find_by_addressEmmb T
_ZN6Genode18Allocator_avl_base5Block16avail_in_subtreeEv T
_ZN6Genode18Allocator_avl_base5Block9recomputeEv T
_ZN6Genode18Allocator_avl_base9add_rangeEmm T
_ZN6Genode18Signal_transmitter6submitEj T
@@ -201,8 +193,10 @@ _ZN6Genode3Raw7_outputEv T
_ZN6Genode3Raw8_acquireEv T
_ZN6Genode3Raw8_releaseEv T
_ZN6Genode4Heap11quota_limitEm T
_ZN6Genode4Heap14Dataspace_poolD1Ev T
_ZN6Genode4Heap14Dataspace_poolD2Ev T
_ZN6Genode4Heap4freeEPvm T
_ZN6Genode4Heap5allocEmPPv T
_ZN6Genode4Heap9try_allocEm T
_ZN6Genode4HeapC1EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
_ZN6Genode4HeapC2EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
_ZN6Genode4HeapD0Ev T
@@ -213,13 +207,9 @@ _ZN6Genode4Lock6unlockEv T
_ZN6Genode4LockC1ENS0_5StateE T
_ZN6Genode4Slab13any_used_elemEv T
_ZN6Genode4Slab17free_empty_blocksEv T
_ZN6Genode4Slab5Block11_slab_entryEi T
_ZN6Genode4Slab5Block14any_used_entryEv T
_ZN6Genode4Slab5Block5allocEv T
_ZN6Genode4Slab5Block9inc_availERNS0_5EntryE T
_ZN6Genode4Slab5_freeEPv T
_ZN6Genode4Slab5allocEmPPv T
_ZN6Genode4Slab9insert_sbEPv T
_ZN6Genode4Slab9try_allocEm T
_ZN6Genode4SlabC1EmmPvPNS_9AllocatorE T
_ZN6Genode4SlabC2EmmPvPNS_9AllocatorE T
_ZN6Genode4SlabD0Ev T

View File

@@ -33,42 +33,46 @@ void * Mapped_avl_allocator::map_addr(void * addr)
}
Range_allocator::Alloc_return
Mapped_mem_allocator::alloc_aligned(size_t size, void **out_addr,
unsigned align, Range range)
Range_allocator::Alloc_result
Mapped_mem_allocator::alloc_aligned(size_t size, unsigned align, Range range)
{
size_t page_rounded_size = align_addr(size, get_page_size_log2());
void *phys_addr = 0;
align = max((size_t)align, get_page_size_log2());
/* allocate physical pages */
Alloc_return ret1 = _phys_alloc->alloc_aligned(page_rounded_size,
&phys_addr, align, range);
if (!ret1.ok()) {
error("Could not allocate physical memory region of size ",
page_rounded_size);
return ret1;
}
return _phys_alloc->alloc_aligned(page_rounded_size, align, range)
.convert<Alloc_result>(
/* allocate range in core's virtual address space */
Alloc_return ret2 = _virt_alloc->alloc_aligned(page_rounded_size,
out_addr, align);
if (!ret2.ok()) {
error("Could not allocate virtual address range in core of size ",
page_rounded_size);
[&] (void *phys_addr) -> Alloc_result {
/* revert physical allocation */
_phys_alloc->free(phys_addr);
return ret2;
}
/* allocate range in core's virtual address space */
return _virt_alloc->alloc_aligned(page_rounded_size, align)
.convert<Alloc_result>(
_phys_alloc->metadata(phys_addr, { *out_addr });
_virt_alloc->metadata(*out_addr, { phys_addr });
[&] (void *virt_addr) {
/* make physical page accessible at the designated virtual address */
_map_local((addr_t)*out_addr, (addr_t)phys_addr, page_rounded_size);
_phys_alloc->metadata(phys_addr, { virt_addr });
_virt_alloc->metadata(virt_addr, { phys_addr });
return Alloc_return::OK;
/* make physical page accessible at the designated virtual address */
_map_local((addr_t)virt_addr, (addr_t)phys_addr, page_rounded_size);
return virt_addr;
},
[&] (Alloc_error e) {
error("Could not allocate virtual address range in core of size ",
page_rounded_size, " (error ", (int)e, ")");
/* revert physical allocation */
_phys_alloc->free(phys_addr);
return e;
});
},
[&] (Alloc_error e) {
error("Could not allocate physical memory region of size ",
page_rounded_size, " (error ", (int)e, ")");
return e;
});
}

View File

@@ -47,7 +47,7 @@ class Genode::Constrained_core_ram : public Allocator
" in core !");
}
bool alloc(size_t const size, void **ptr) override
Alloc_result try_alloc(size_t const size) override
{
size_t const page_aligned_size = align_addr(size, 12);
@@ -56,15 +56,16 @@ class Genode::Constrained_core_ram : public Allocator
/* on some kernels we require a cap, on some not XXX */
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
if (!_core_mem.alloc(page_aligned_size, ptr))
return false;
return _core_mem.try_alloc(page_aligned_size).convert<Alloc_result>(
ram.acknowledge();
caps.acknowledge();
[&] (void *ptr) {
ram.acknowledge();
caps.acknowledge();
core_mem_allocated += page_aligned_size;
return ptr; },
core_mem_allocated += page_aligned_size;
return true;
[&] (Alloc_error error) {
return error; });
}
void free(void *ptr, size_t const size) override

View File

@@ -163,11 +163,10 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
** Range allocator interface **
*******************************/
int add_range(addr_t, size_t) override { return -1; }
int remove_range(addr_t, size_t) override { return -1; }
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
Alloc_return alloc_addr(size_t, addr_t) override {
return Alloc_return::RANGE_CONFLICT; }
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
void free(void *) override;
size_t avail() const override { return _phys_alloc->avail(); }
bool valid_addr(addr_t addr) const override {
@@ -180,8 +179,8 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override {
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
Alloc_result try_alloc(size_t size) override {
return alloc_aligned(size, log2(sizeof(addr_t))); }
void free(void *addr, size_t) override;
size_t consumed() const override { return _phys_alloc->consumed(); }
size_t overhead(size_t size) const override {
@@ -276,16 +275,14 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
** Range allocator interface **
*******************************/
int add_range(addr_t, size_t) override { return -1; }
int remove_range(addr_t, size_t) override { return -1; }
Alloc_return alloc_addr(size_t, addr_t) override {
return Alloc_return::RANGE_CONFLICT; }
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
Alloc_return alloc_aligned(size_t size, void **out_addr,
unsigned align, Range range) override
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override
{
Mutex::Guard lock_guard(_mutex);
return _mem_alloc.alloc_aligned(size, out_addr, align, range);
return _mem_alloc.alloc_aligned(size, align, range);
}
void free(void *addr) override
@@ -305,8 +302,10 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override {
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
Alloc_result try_alloc(size_t size) override
{
return alloc_aligned(size, log2(sizeof(addr_t)));
}
void free(void *addr, size_t size) override
{

View File

@@ -40,7 +40,7 @@ class Genode::Synced_range_allocator : public Range_allocator
friend class Mapped_mem_allocator;
Mutex _default_mutex { };
Mutex &_mutex;
Mutex &_mutex { _default_mutex };
ALLOC _alloc;
Synced_interface<ALLOC, Mutex> _synced_object;
@@ -54,8 +54,7 @@ class Genode::Synced_range_allocator : public Range_allocator
template <typename... ARGS>
Synced_range_allocator(ARGS &&... args)
: _mutex(_default_mutex), _alloc(args...),
_synced_object(_mutex, &_alloc) { }
: _alloc(args...), _synced_object(_mutex, &_alloc) { }
Guard operator () () { return _synced_object(); }
Guard operator () () const { return _synced_object(); }
@@ -67,8 +66,8 @@ class Genode::Synced_range_allocator : public Range_allocator
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override {
return _synced_object()->alloc(size, out_addr); }
Alloc_result try_alloc(size_t size) override {
return _synced_object()->try_alloc(size); }
void free(void *addr, size_t size) override {
_synced_object()->free(addr, size); }
@@ -87,17 +86,16 @@ class Genode::Synced_range_allocator : public Range_allocator
** Range-allocator interface **
*******************************/
int add_range(addr_t base, size_t size) override {
Range_result add_range(addr_t base, size_t size) override {
return _synced_object()->add_range(base, size); }
int remove_range(addr_t base, size_t size) override {
Range_result remove_range(addr_t base, size_t size) override {
return _synced_object()->remove_range(base, size); }
Alloc_return alloc_aligned(size_t size, void **out_addr,
unsigned align, Range range) override {
return _synced_object()->alloc_aligned(size, out_addr, align, range); }
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override {
return _synced_object()->alloc_aligned(size, align, range); }
Alloc_return alloc_addr(size_t size, addr_t addr) override {
Alloc_result alloc_addr(size_t size, addr_t addr) override {
return _synced_object()->alloc_addr(size, addr); }
void free(void *addr) override {

View File

@@ -41,24 +41,16 @@ Io_mem_session_component::_prepare_io_mem(const char *args,
_cacheable = WRITE_COMBINED;
/* check for RAM collision */
int ret;
if ((ret = ram_alloc.remove_range(base, size))) {
if (ram_alloc.remove_range(base, size).failed()) {
error("I/O memory ", Hex_range<addr_t>(base, size), " "
"used by RAM allocator (", ret, ")");
"used by RAM allocator");
return Dataspace_attr();
}
/* allocate region */
switch (_io_mem_alloc.alloc_addr(req_size, req_base).value) {
case Range_allocator::Alloc_return::RANGE_CONFLICT:
if (_io_mem_alloc.alloc_addr(req_size, req_base).failed()) {
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
return Dataspace_attr();
case Range_allocator::Alloc_return::OUT_OF_METADATA:
error("I/O memory allocator ran out of meta data");
return Dataspace_attr();
case Range_allocator::Alloc_return::OK: break;
}
/* request local mapping */

View File

@@ -38,8 +38,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
* If this does not work, we subsequently weaken the alignment constraint
* until the allocation succeeds.
*/
void *ds_addr = nullptr;
bool alloc_succeeded = false;
Range_allocator::Alloc_result allocated_range = Allocator::Alloc_error::DENIED;
/*
* If no physical constraint exists, try to allocate physical memory at
@@ -53,63 +52,57 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
Phys_range const range { .start = high_start, .end = _phys_range.end };
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2, range).ok()) {
alloc_succeeded = true;
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, range);
if (allocated_range.ok())
break;
}
}
}
/* apply constraints, or retry if larger memory allocation failed */
if (!alloc_succeeded) {
if (!allocated_range.ok()) {
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2,
_phys_range).ok()) {
alloc_succeeded = true;
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, _phys_range);
if (allocated_range.ok())
break;
}
}
}
/*
* Helper to release the allocated physical memory whenever we leave the
* scope via an exception.
*/
class Phys_alloc_guard
{
private:
/*
* Noncopyable
*/
Phys_alloc_guard(Phys_alloc_guard const &);
Phys_alloc_guard &operator = (Phys_alloc_guard const &);
public:
Range_allocator &phys_alloc;
void * const ds_addr;
bool ack = false;
Phys_alloc_guard(Range_allocator &phys_alloc, void *ds_addr)
: phys_alloc(phys_alloc), ds_addr(ds_addr) { }
~Phys_alloc_guard() { if (!ack) phys_alloc.free(ds_addr); }
} phys_alloc_guard(_phys_alloc, ds_addr);
/*
* Normally, init's quota equals the size of physical memory and this quota
* is distributed among the processes. As we check the quota before
* allocating, the allocation should always succeed in theory. However,
* fragmentation could cause a failing allocation.
*/
if (!alloc_succeeded) {
if (allocated_range.failed()) {
error("out of physical memory while allocating ", ds_size, " bytes ",
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
return Alloc_error::OUT_OF_RAM;
return allocated_range.convert<Ram_allocator::Alloc_result>(
[&] (void *) { return Alloc_error::DENIED; },
[&] (Alloc_error error) { return error; });
}
/*
* Helper to release the allocated physical memory whenever we leave the
* scope via an exception.
*/
struct Phys_alloc_guard
{
Range_allocator &phys_alloc;
struct { void * ds_addr = nullptr; };
bool keep = false;
Phys_alloc_guard(Range_allocator &phys_alloc)
: phys_alloc(phys_alloc) { }
~Phys_alloc_guard() { if (!keep && ds_addr) phys_alloc.free(ds_addr); }
} phys_alloc_guard(_phys_alloc);
allocated_range.with_result(
[&] (void *ptr) { phys_alloc_guard.ds_addr = ptr; },
[&] (Alloc_error) { /* already checked above */ });
/*
* For non-cached RAM dataspaces, we mark the dataspace as write
* combined and expect the pager to evaluate this dataspace property
@@ -118,7 +111,8 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
Dataspace_component *ds_ptr = nullptr;
try {
ds_ptr = new (_ds_slab)
Dataspace_component(ds_size, (addr_t)ds_addr, cache, true, this);
Dataspace_component(ds_size, (addr_t)phys_alloc_guard.ds_addr,
cache, true, this);
}
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
@@ -145,7 +139,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
Dataspace_capability ds_cap = _ep.manage(&ds);
phys_alloc_guard.ack = true;
phys_alloc_guard.keep = true;
return static_cap_cast<Ram_dataspace>(ds_cap);
}

View File

@@ -365,8 +365,14 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
throw Region_conflict();
auto lambda = [&] (Dataspace_component *dsc) {
using Alloc_error = Range_allocator::Alloc_error;
/* check dataspace validity */
if (!dsc) throw Invalid_dataspace();
if (!dsc)
throw Invalid_dataspace();
unsigned const min_align_log2 = get_page_size_log2();
size_t const off = offset;
if (off >= dsc->size())
@@ -376,27 +382,25 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
size = dsc->size() - offset;
/* work with page granularity */
size = align_addr(size, get_page_size_log2());
size = align_addr(size, min_align_log2);
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + offset)
throw Region_conflict();
/* allocate region for attachment */
void *attach_at = 0;
void *attach_at = nullptr;
if (use_local_addr) {
switch (_map.alloc_addr(size, local_addr).value) {
case Range_allocator::Alloc_return::OUT_OF_METADATA:
throw Out_of_ram();
case Range_allocator::Alloc_return::RANGE_CONFLICT:
throw Region_conflict();
case Range_allocator::Alloc_return::OK:
attach_at = local_addr;
break;
}
_map.alloc_addr(size, local_addr).with_result(
[&] (void *ptr) { attach_at = ptr; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break;
}
throw Region_conflict();
});
} else {
/*
@@ -406,9 +410,10 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
*/
size_t align_log2 = log2(size);
if (align_log2 >= sizeof(void *)*8)
align_log2 = get_page_size_log2();
align_log2 = min_align_log2;
for (; align_log2 >= get_page_size_log2(); align_log2--) {
bool done = false;
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
/*
* Don't use an alignment higher than the alignment of the backing
@@ -419,21 +424,23 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
continue;
/* try allocating the align region */
Range_allocator::Alloc_return alloc_return =
_map.alloc_aligned(size, &attach_at, align_log2);
_map.alloc_aligned(size, align_log2).with_result(
typedef Range_allocator::Alloc_return Alloc_return;
switch (alloc_return.value) {
case Alloc_return::OK: break; /* switch */
case Alloc_return::OUT_OF_METADATA: throw Out_of_ram();
case Alloc_return::RANGE_CONFLICT: continue; /* for loop */
}
break; /* for loop */
[&] (void *ptr) {
attach_at = ptr;
done = true; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break; /* no fit */
}
/* try smaller alignment in next iteration... */
});
}
if (align_log2 < get_page_size_log2())
if (!done)
throw Region_conflict();
}

View File

@@ -35,18 +35,23 @@ Io_port_session_component::Io_port_session_component(Range_allocator &io_port_al
unsigned size = Arg_string::find_arg(args, "io_port_size").ulong_value(0);
/* allocate region (also checks out-of-bounds regions) */
switch (io_port_alloc.alloc_addr(size, base).value) {
io_port_alloc.alloc_addr(size, base).with_error(
[&] (Allocator::Alloc_error e) {
case Range_allocator::Alloc_return::RANGE_CONFLICT:
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
throw Service_denied();
switch (e) {
case Range_allocator::Alloc_error::DENIED:
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
throw Service_denied();
case Range_allocator::Alloc_return::OUT_OF_METADATA:
error("I/O port allocator ran out of meta data");
throw Service_denied();
case Range_allocator::Alloc_error::OUT_OF_RAM:
error("I/O port allocator ran out of RAM");
throw Service_denied();
case Range_allocator::Alloc_return::OK: break;
}
case Range_allocator::Alloc_error::OUT_OF_CAPS:
error("I/O port allocator ran out of caps");
throw Service_denied();
}
});
/* store information */
_base = base;

View File

@@ -71,29 +71,37 @@ class Stack_area_region_map : public Region_map
{
/* allocate physical memory */
size = round_page(size);
void *phys_base = nullptr;
Range_allocator &ra = platform_specific().ram_alloc();
if (ra.alloc_aligned(size, &phys_base,
get_page_size_log2()).error()) {
error("could not allocate backing store for new stack");
return (addr_t)0;
}
Dataspace_component &ds = *new (&_ds_slab)
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
Range_allocator &phys = platform_specific().ram_alloc();
addr_t const core_local_addr = stack_area_virtual_base() + (addr_t)local_addr;
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
if (!map_local(ds.phys_addr(), core_local_addr,
ds.size() >> get_page_size_log2())) {
error("could not map phys ", Hex(ds.phys_addr()),
" at local ", Hex(core_local_addr));
return (addr_t)0;
}
[&] (void *phys_ptr) {
ds.assign_core_local_addr((void*)core_local_addr);
addr_t const phys_base = (addr_t)phys_ptr;
return local_addr;
Dataspace_component &ds = *new (&_ds_slab)
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
addr_t const core_local_addr = stack_area_virtual_base()
+ (addr_t)local_addr;
if (!map_local(ds.phys_addr(), core_local_addr,
ds.size() >> get_page_size_log2())) {
error("could not map phys ", Hex(ds.phys_addr()),
" at local ", Hex(core_local_addr));
phys.free(phys_ptr);
return Local_addr { (addr_t)0 };
}
ds.assign_core_local_addr((void*)core_local_addr);
return local_addr;
},
[&] (Range_allocator::Alloc_error) {
error("could not allocate backing store for new stack");
return (addr_t)0; });
}
void detach(Local_addr local_addr) override

View File

@@ -63,48 +63,57 @@ void Vm_session_component::attach(Dataspace_capability const cap,
attribute.offset > dsc.size() - attribute.size)
throw Invalid_dataspace();
switch (_map.alloc_addr(attribute.size, guest_phys).value) {
case Range_allocator::Alloc_return::OUT_OF_METADATA:
throw Out_of_ram();
case Range_allocator::Alloc_return::RANGE_CONFLICT:
{
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
if (!region_ptr)
throw Region_conflict();
using Alloc_error = Range_allocator::Alloc_error;
Rm_region &region = *region_ptr;
_map.alloc_addr(attribute.size, guest_phys).with_result(
if (!(cap == region.dataspace().cap()))
throw Region_conflict();
if (guest_phys < region.base() ||
guest_phys > region.base() + region.size() - 1)
throw Region_conflict();
[&] (void *) {
/* re-attach all */
break;
}
case Range_allocator::Alloc_return::OK:
{
/* store attachment info in meta data */
try {
_map.construct_metadata((void *)guest_phys,
guest_phys, attribute.size,
dsc.writable() && attribute.writeable,
dsc, attribute.offset, *this,
attribute.executable);
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
throw Invalid_dataspace();
/* store attachment info in meta data */
try {
_map.construct_metadata((void *)guest_phys,
guest_phys, attribute.size,
dsc.writable() && attribute.writeable,
dsc, attribute.offset, *this,
attribute.executable);
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
throw Invalid_dataspace();
}
Rm_region &region = *_map.metadata((void *)guest_phys);
/* inform dataspace about attachment */
dsc.attached_to(region);
},
[&] (Alloc_error error) {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED:
{
/*
* Handle attach after partial detach
*/
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
if (!region_ptr)
throw Region_conflict();
Rm_region &region = *region_ptr;
if (!(cap == region.dataspace().cap()))
throw Region_conflict();
if (guest_phys < region.base() ||
guest_phys > region.base() + region.size() - 1)
throw Region_conflict();
}
};
}
Rm_region &region = *_map.metadata((void *)guest_phys);
/* inform dataspace about attachment */
dsc.attached_to(region);
break;
}
};
);
/* kernel specific code to attach memory to guest */
_attach_vm_memory(dsc, guest_phys, attribute);

View File

@@ -95,110 +95,106 @@ void Allocator_avl_base::Block::recompute()
** Allocator_avl implementation **
**********************************/
Allocator_avl_base::Block *Allocator_avl_base::_alloc_block_metadata()
Allocator_avl_base::Alloc_md_result Allocator_avl_base::_alloc_block_metadata()
{
void *b = nullptr;
if (_md_alloc->alloc(sizeof(Block), &b))
return construct_at<Block>(b, 0, 0, 0);
return nullptr;
return _md_alloc.try_alloc(sizeof(Block)).convert<Alloc_md_result>(
[&] (void *ptr) {
return construct_at<Block>(ptr, 0, 0, 0); },
[&] (Alloc_error error) {
return error; });
}
bool Allocator_avl_base::_alloc_two_blocks_metadata(Block **dst1, Block **dst2)
Allocator_avl_base::Alloc_md_two_result
Allocator_avl_base::_alloc_two_blocks_metadata()
{
Block * const b1 = _alloc_block_metadata();
Block * b2 = nullptr;
return _alloc_block_metadata().convert<Alloc_md_two_result>(
try {
b2 = _alloc_block_metadata();
} catch (...) {
if (b1) _md_alloc->free(b1, sizeof(Block));
throw;
}
[&] (Block *b1_ptr) {
return _alloc_block_metadata().convert<Alloc_md_two_result>(
if (b1 && b2) {
*dst1 = b1;
*dst2 = b2;
return true;
}
[&] (Block *b2_ptr) {
return Two_blocks { b1_ptr, b2_ptr }; },
*dst1 = *dst2 = nullptr;
if (b2) _md_alloc->free(b2, sizeof(Block));
if (b1) _md_alloc->free(b1, sizeof(Block));
return false;
[&] (Alloc_error error) {
_md_alloc.free(b1_ptr, sizeof(Block));
return error; });
},
[&] (Alloc_error error) {
return error; });
}
int Allocator_avl_base::_add_block(Block *block_metadata,
void Allocator_avl_base::_add_block(Block &block_metadata,
addr_t base, size_t size, bool used)
{
if (!block_metadata)
return -1;
/* call constructor for new block */
construct_at<Block>(block_metadata, base, size, used);
construct_at<Block>(&block_metadata, base, size, used);
/* insert block into avl tree */
_addr_tree.insert(block_metadata);
return 0;
_addr_tree.insert(&block_metadata);
}
void Allocator_avl_base::_destroy_block(Block *b)
void Allocator_avl_base::_destroy_block(Block &b)
{
if (!b) return;
/* remove block from both avl trees */
_addr_tree.remove(b);
_md_alloc->free(b, _md_entry_size);
_addr_tree.remove(&b);
_md_alloc.free(&b, _md_entry_size);
}
void Allocator_avl_base::_cut_from_block(Block *b, addr_t addr, size_t size,
Block *dst1, Block *dst2)
void Allocator_avl_base::_cut_from_block(Block &b, addr_t addr, size_t size, Two_blocks blocks)
{
size_t const padding = addr > b->addr() ? addr - b->addr() : 0;
size_t const b_size = b->size() > padding ? b->size() - padding : 0;
size_t const padding = addr > b.addr() ? addr - b.addr() : 0;
size_t const b_size = b.size() > padding ? b.size() - padding : 0;
size_t remaining = b_size > size ? b_size - size : 0;
/* case that a block contains the whole addressable range */
if (!b->addr() && !b->size())
remaining = b->size() - size - padding;
if (!b.addr() && !b.size())
remaining = b.size() - size - padding;
addr_t orig_addr = b->addr();
addr_t orig_addr = b.addr();
_destroy_block(b);
/* create free block containing the alignment padding */
if (padding > 0)
_add_block(dst1, orig_addr, padding, Block::FREE);
_add_block(*blocks.b1_ptr, orig_addr, padding, Block::FREE);
else
_md_alloc->free(dst1, sizeof(Block));
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
/* create free block for remaining space of original block */
if (remaining > 0)
_add_block(dst2, addr + size, remaining, Block::FREE);
_add_block(*blocks.b2_ptr, addr + size, remaining, Block::FREE);
else
_md_alloc->free(dst2, sizeof(Block));
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
}
template <typename FN>
void Allocator_avl_base::_revert_block_ranges(FN const &any_block_fn)
{
for (bool loop = true; loop; ) {
Block *block_ptr = any_block_fn();
if (!block_ptr)
break;
remove_range(block_ptr->addr(), block_ptr->size()).with_error(
[&] (Alloc_error error) {
if (error == Alloc_error::DENIED) /* conflict */
_destroy_block(*block_ptr);
else
loop = false; /* give up on OUT_OF_RAM or OUT_OF_CAPS */
});
}
}
void Allocator_avl_base::_revert_unused_ranges()
{
do {
Block * const block = _find_any_unused_block(_addr_tree.first());
if (!block)
break;
int const error = remove_range(block->addr(), block->size());
if (error && block == _find_any_unused_block(_addr_tree.first()))
/* if the invocation fails, release the block to break endless loop */
_destroy_block(block);
} while (true);
_revert_block_ranges([&] () {
return _find_any_unused_block(_addr_tree.first()); });
}
@@ -220,159 +216,161 @@ void Allocator_avl_base::_revert_allocations_and_ranges()
" at allocator destruction time");
/* destroy all remaining blocks */
while (Block *block = _addr_tree.first()) {
if (remove_range(block->addr(), block->size())) {
/* if the invocation fails, release the block to break endless loop */
_destroy_block(block);
}
}
_revert_block_ranges([&] () { return _addr_tree.first(); });
}
int Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
Allocator_avl_base::Range_result Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
{
Block *b;
/* sanity check for insane users ;-) */
if (!new_size) return -2;
if (!new_size)
return Alloc_error::DENIED;
/* check for conflicts with existing blocks */
if (_find_by_address(new_addr, new_size, true))
return -3;
return Alloc_error::DENIED;
Block *new_block = _alloc_block_metadata();
if (!new_block) return -4;
return _alloc_block_metadata().convert<Range_result>(
/* merge with predecessor */
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
[&] (Block *new_block_ptr) {
new_size += b->size();
new_addr = b->addr();
/* merge with predecessor */
Block *b = nullptr;
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
_destroy_block(b);
}
new_size += b->size();
new_addr = b->addr();
_destroy_block(*b);
}
/* merge with successor */
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
/* merge with successor */
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
new_size += b->size();
new_size += b->size();
_destroy_block(*b);
}
_destroy_block(b);
}
/* create new block that spans over all merged blocks */
_add_block(*new_block_ptr, new_addr, new_size, Block::FREE);
/* create new block that spans over all merged blocks */
return _add_block(new_block, new_addr, new_size, Block::FREE);
return Range_ok();
},
[&] (Alloc_error error) {
return error; });
}
int Allocator_avl_base::remove_range(addr_t base, size_t size)
Allocator_avl_base::Range_result Allocator_avl_base::remove_range(addr_t base, size_t size)
{
/* sanity check for insane users ;-) */
if (!size) return -1;
Range_result result = Alloc_error::DENIED;
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return -2;
if (!size)
return result;
/* FIXME removing ranges from allocators with used blocks is not safe! */
while (1) {
for (bool done = false; !done; ) {
/* find block overlapping the specified range */
Block *b = _addr_tree.first();
b = b ? b->find_by_address(base, size, 1) : 0;
_alloc_two_blocks_metadata().with_result(
[&] (Two_blocks blocks) {
/*
* If there are no overlappings with any existing blocks (b == 0), we
* are done. If however, the overlapping block is in use, we have a
* problem. In both cases, return.
*/
if (!b || !b->avail()) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return !b ? 0 : -3;
}
/* find block overlapping the specified range */
Block *b = _addr_tree.first();
b = b ? b->find_by_address(base, size, 1) : 0;
/* cut intersecting address range */
addr_t intersect_beg = max(base, b->addr());
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
/*
* If there are no overlappings with any existing blocks (b == 0), we
* are done. If however, the overlapping block is in use, we have a
* problem. Stop iterating in both cases.
*/
if (!b || !b->avail()) {
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
_cut_from_block(b, intersect_beg, intersect_end - intersect_beg + 1, dst1, dst2);
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return -4;
};
if (b == 0)
result = Range_ok();
done = true;
return;
}
/* cut intersecting address range */
addr_t intersect_beg = max(base, b->addr());
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
_cut_from_block(*b, intersect_beg, intersect_end - intersect_beg + 1, blocks);
},
[&] (Alloc_error error) {
result = error;
done = true;
});
}
return result;
}
Range_allocator::Alloc_return
Allocator_avl_base::alloc_aligned(size_t size, void **out_addr, unsigned align,
Range range)
template <typename SEARCH_FN>
Allocator::Alloc_result
Allocator_avl_base::_allocate(size_t const size, unsigned align, Range range,
SEARCH_FN const &search_fn)
{
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return Alloc_return(Alloc_return::OUT_OF_METADATA);
return _alloc_two_blocks_metadata().convert<Alloc_result>(
/* find best fitting block */
Block *b = _addr_tree.first();
b = b ? b->find_best_fit(size, align, range) : 0;
[&] (Two_blocks two_blocks) -> Alloc_result {
if (!b) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return Alloc_return(Alloc_return::RANGE_CONFLICT);
}
/* find block according to the policy implemented by 'search_fn' */
Block *b_ptr = _addr_tree.first();
b_ptr = b_ptr ? search_fn(*b_ptr) : 0;
/* calculate address of new (aligned) block */
addr_t new_addr = align_addr(max(b->addr(), range.start), align);
if (!b_ptr) {
/* range conflict */
_md_alloc.free(two_blocks.b1_ptr, sizeof(Block));
_md_alloc.free(two_blocks.b2_ptr, sizeof(Block));
return Alloc_error::DENIED;
}
Block &b = *b_ptr;
/* remove new block from containing block */
_cut_from_block(b, new_addr, size, dst1, dst2);
/* calculate address of new (aligned) block */
addr_t const new_addr = align_addr(max(b.addr(), range.start), align);
/* create allocated block */
Block *new_block = _alloc_block_metadata();
if (!new_block) {
_md_alloc->free(new_block, sizeof(Block));
return Alloc_return(Alloc_return::OUT_OF_METADATA);
}
_add_block(new_block, new_addr, size, Block::USED);
/* remove new block from containing block, consume two_blocks */
_cut_from_block(b, new_addr, size, two_blocks);
*out_addr = reinterpret_cast<void *>(new_addr);
return Alloc_return(Alloc_return::OK);
/* create allocated block */
return _alloc_block_metadata().convert<Alloc_result>(
[&] (Block *new_block_ptr) {
_add_block(*new_block_ptr, new_addr, size, Block::USED);
return reinterpret_cast<void *>(new_addr); },
[&] (Alloc_error error) {
return error; });
},
[&] (Alloc_error error) {
return error; });
}
Range_allocator::Alloc_return Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
Allocator::Alloc_result
Allocator_avl_base::alloc_aligned(size_t size, unsigned align, Range range)
{
/* sanity check */
return _allocate(size, align, range, [&] (Block &first) {
return first.find_best_fit(size, align, range); });
}
Range_allocator::Alloc_result Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
{
/* check for integer overflow */
if (addr + size - 1 < addr)
return Alloc_error::DENIED;
/* check for range conflict */
if (!_sum_in_range(addr, size))
return Alloc_return(Alloc_return::RANGE_CONFLICT);
return Alloc_error::DENIED;
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return Alloc_return(Alloc_return::OUT_OF_METADATA);
Range const range { .start = addr, .end = addr + size - 1 };
unsigned const align_any = 0;
/* find block at specified address */
Block *b = _addr_tree.first();
b = b ? b->find_by_address(addr, size) : 0;
/* skip if there's no block or block is used */
if (!b || b->used()) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return Alloc_return(Alloc_return::RANGE_CONFLICT);
}
/* remove new block from containing block */
_cut_from_block(b, addr, size, dst1, dst2);
/* create allocated block */
Block *new_block = _alloc_block_metadata();
if (!new_block) {
_md_alloc->free(new_block, sizeof(Block));
return Alloc_return(Alloc_return::OUT_OF_METADATA);
}
_add_block(new_block, addr, size, Block::USED);
return Alloc_return(Alloc_return::OK);
return _allocate(size, align_any, range, [&] (Block &first) {
return first.find_by_address(addr, size); });
}
@@ -383,14 +381,14 @@ void Allocator_avl_base::free(void *addr)
if (!b || !(b->used())) return;
addr_t new_addr = b->addr();
size_t new_size = b->size();
addr_t const new_addr = b->addr();
size_t const new_size = b->size();
if (new_addr != (addr_t)addr)
error(__PRETTY_FUNCTION__, ": given address (", addr, ") "
"is not the block start address (", (void *)new_addr, ")");
_destroy_block(b);
_destroy_block(*b);
add_range(new_addr, new_size);
}

View File

@@ -77,117 +77,129 @@ int Heap::quota_limit(size_t new_quota_limit)
}
Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
Heap::Alloc_ds_result
Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
{
Ram_dataspace_capability new_ds_cap;
void *ds_addr = 0;
void *ds_meta_data_addr = 0;
Heap::Dataspace *ds = 0;
using Result = Alloc_ds_result;
/* make new ram dataspace available at our local address space */
try {
new_ds_cap = _ds_pool.ram_alloc->alloc(size);
try { ds_addr = _ds_pool.region_map->attach(new_ds_cap); }
catch (Out_of_ram) {
_ds_pool.ram_alloc->free(new_ds_cap);
return nullptr;
}
catch (Out_of_caps) {
_ds_pool.ram_alloc->free(new_ds_cap);
throw;
}
catch (Region_map::Invalid_dataspace) {
warning("heap: attempt to attach invalid dataspace");
_ds_pool.ram_alloc->free(new_ds_cap);
return nullptr;
}
catch (Region_map::Region_conflict) {
warning("heap: region conflict while allocating dataspace");
_ds_pool.ram_alloc->free(new_ds_cap);
return nullptr;
}
}
catch (Out_of_ram) { return nullptr; }
return _ds_pool.ram_alloc->try_alloc(size).convert<Result>(
if (enforce_separate_metadata) {
[&] (Ram_dataspace_capability ds_cap) -> Result {
/* allocate the Dataspace structure */
if (!_unsynchronized_alloc(sizeof(Heap::Dataspace), &ds_meta_data_addr)) {
warning("could not allocate dataspace meta data");
return 0;
}
struct Alloc_guard
{
Ram_allocator &ram;
Ram_dataspace_capability ds;
bool keep = false;
} else {
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
: ram(ram), ds(ds) { }
/* add new local address range to our local allocator */
_alloc->add_range((addr_t)ds_addr, size);
~Alloc_guard() { if (!keep) ram.free(ds); }
/* allocate the Dataspace structure */
if (_alloc->alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(16)).error()) {
warning("could not allocate dataspace meta data - this should never happen");
return 0;
}
}
} alloc_guard(*_ds_pool.ram_alloc, ds_cap);
ds = construct_at<Dataspace>(ds_meta_data_addr, new_ds_cap, ds_addr, size);
struct Attach_guard
{
Region_map &rm;
struct { void *ptr = nullptr; };
bool keep = false;
_ds_pool.insert(ds);
Attach_guard(Region_map &rm) : rm(rm) { }
return ds;
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
} attach_guard(*_ds_pool.region_map);
try {
attach_guard.ptr = _ds_pool.region_map->attach(ds_cap);
}
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
Alloc_result metadata = Alloc_error::DENIED;
/* allocate the 'Dataspace' structure */
if (enforce_separate_metadata) {
metadata = _unsynchronized_alloc(sizeof(Heap::Dataspace));
} else {
/* add new local address range to our local allocator */
_alloc->add_range((addr_t)attach_guard.ptr, size).with_result(
[&] (Range_allocator::Range_ok) {
metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16)); },
[&] (Alloc_error error) {
metadata = error; });
}
return metadata.convert<Result>(
[&] (void *md_ptr) -> Result {
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
attach_guard.ptr, size);
_ds_pool.insert(&ds);
alloc_guard.keep = attach_guard.keep = true;
return &ds;
},
[&] (Alloc_error error) {
return error; });
},
[&] (Alloc_error error) {
return error; });
}
bool Heap::_try_local_alloc(size_t size, void **out_addr)
Allocator::Alloc_result Heap::_try_local_alloc(size_t size)
{
if (_alloc->alloc_aligned(size, out_addr, log2(16)).error())
return false;
return _alloc->alloc_aligned(size, log2(16)).convert<Alloc_result>(
_quota_used += size;
return true;
[&] (void *ptr) {
_quota_used += size;
return ptr; },
[&] (Alloc_error error) {
return error; });
}
bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
Allocator::Alloc_result Heap::_unsynchronized_alloc(size_t size)
{
size_t dataspace_size;
if (size >= BIG_ALLOCATION_THRESHOLD) {
/*
* big allocation
*
* in this case, we allocate one dataspace without any meta data in it
* In this case, we allocate one dataspace without any meta data in it
* and return its local address without going through the allocator.
*/
/* align to 4K page */
dataspace_size = align_addr(size, 12);
size_t const dataspace_size = align_addr(size, 12);
Heap::Dataspace *ds = _allocate_dataspace(dataspace_size, true);
return _allocate_dataspace(dataspace_size, true).convert<Alloc_result>(
if (!ds) {
warning("could not allocate dataspace");
return false;
}
[&] (Dataspace *ds_ptr) {
_quota_used += ds_ptr->size;
return ds_ptr->local_addr; },
_quota_used += ds->size;
*out_addr = ds->local_addr;
return true;
[&] (Alloc_error error) {
return error; });
}
/* try allocation at our local allocator */
if (_try_local_alloc(size, out_addr))
return true;
{
Alloc_result result = _try_local_alloc(size);
if (result.ok())
return result;
}
/*
* Calculate block size of needed backing store. The block must hold the
* requested 'size' and we add some space for meta data
* ('Dataspace' structures, AVL-node slab blocks).
* Finally, we align the size to a 4K page.
*/
dataspace_size = size + Allocator_avl::slab_block_size() + sizeof(Heap::Dataspace);
size_t dataspace_size = size
+ Allocator_avl::slab_block_size()
+ sizeof(Heap::Dataspace);
/* align to 4K page */
dataspace_size = align_addr(dataspace_size, 12);
/*
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
@@ -195,29 +207,34 @@ bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
*/
size_t const request_size = _chunk_size * sizeof(umword_t);
if ((dataspace_size < request_size) &&
_allocate_dataspace(request_size, false)) {
Alloc_ds_result result = Alloc_error::DENIED;
/*
* Exponentially increase chunk size with each allocated chunk until
* we hit 'MAX_CHUNK_SIZE'.
*/
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
if (dataspace_size < request_size) {
result = _allocate_dataspace(request_size, false);
if (result.ok()) {
/*
* Exponentially increase chunk size with each allocated chunk until
* we hit 'MAX_CHUNK_SIZE'.
*/
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
}
} else {
/* align to 4K page */
dataspace_size = align_addr(dataspace_size, 12);
if (!_allocate_dataspace(dataspace_size, false))
return false;
result = _allocate_dataspace(dataspace_size, false);
}
if (result.failed())
return result.convert<Alloc_result>(
[&] (Dataspace *) { return Alloc_error::DENIED; },
[&] (Alloc_error error) { return error; });
/* allocate originally requested block */
return _try_local_alloc(size, out_addr);
return _try_local_alloc(size);
}
bool Heap::alloc(size_t size, void **out_addr)
Allocator::Alloc_result Heap::try_alloc(size_t size)
{
if (size == 0)
error("attempt to allocate zero-size block from heap");
@@ -227,9 +244,9 @@ bool Heap::alloc(size_t size, void **out_addr)
/* check requested allocation against quota limit */
if (size + _quota_used > _quota_limit)
return false;
return Alloc_error::DENIED;
return _unsynchronized_alloc(size, out_addr);
return _unsynchronized_alloc(size);
}

View File

@@ -223,10 +223,13 @@ Slab::Slab(size_t slab_size, size_t block_size, void *initial_sb,
{
/* if no initial slab block was specified, try to get one */
if (!_curr_sb && _backing_store)
_curr_sb = _new_slab_block();
_new_slab_block().with_result(
[&] (Block *sb) { _curr_sb = sb; },
[&] (Alloc_error error) {
Allocator::throw_alloc_error(error); });
if (!_curr_sb)
throw Out_of_memory();
throw Allocator::Denied();
/* init first slab block */
construct_at<Block>(_curr_sb, *this);
@@ -253,13 +256,19 @@ Slab::~Slab()
}
Slab::Block *Slab::_new_slab_block()
Slab::New_slab_block_result Slab::_new_slab_block()
{
void *sb = nullptr;
if (!_backing_store || !_backing_store->alloc(_block_size, &sb))
return nullptr;
using Result = New_slab_block_result;
return construct_at<Block>(sb, *this);
if (!_backing_store)
return Alloc_error::DENIED;
Slab &this_slab = *this;
return _backing_store->try_alloc(_block_size).convert<Result>(
[&] (void *sb) {
return construct_at<Block>(sb, this_slab); },
[&] (Alloc_error error) {
return error; });
}
@@ -313,19 +322,51 @@ void Slab::_insert_sb(Block *sb)
}
Slab::Expand_result Slab::_expand()
{
if (!_backing_store || _nested)
return Expand_ok();
/* allocate new block for slab */
_nested = true;
/* reset '_nested' when leaving the scope */
struct Nested_guard {
bool &_nested;
Nested_guard(bool &nested) : _nested(nested) { }
~Nested_guard() { _nested = false; }
} guard(_nested);
return _new_slab_block().convert<Expand_result>(
[&] (Block *sb_ptr) {
/*
* The new block has the maximum number of available slots.
* Hence, we can insert it at the beginning of the sorted block
* list.
*/
_insert_sb(sb_ptr);
return Expand_ok(); },
[&] (Alloc_error error) {
return error; });
}
void Slab::insert_sb(void *ptr)
{
_insert_sb(construct_at<Block>(ptr, *this));
}
bool Slab::alloc(size_t size, void **out_addr)
Allocator::Alloc_result Slab::try_alloc(size_t size)
{
/* too large for us ? */
if (size > _slab_size) {
error("requested size ", size, " is larger then slab size ",
_slab_size);
return false;
return Alloc_error::DENIED;
}
/*
@@ -336,29 +377,12 @@ bool Slab::alloc(size_t size, void **out_addr)
* new slab block early enough - that is if there are only three free slab
* entries left.
*/
if (_backing_store && (_total_avail <= 3) && !_nested) {
/* allocate new block for slab */
_nested = true;
try {
Block * const sb = _new_slab_block();
_nested = false;
if (!sb) return false;
/*
* The new block has the maximum number of available slots and
* so we can insert it at the beginning of the sorted block
* list.
*/
_insert_sb(sb);
}
catch (...) {
_nested = false;
throw;
}
if (_total_avail <= 3) {
Expand_result expand_result = _expand();
if (expand_result.failed())
return expand_result.convert<Alloc_result>(
[&] (Expand_ok) { return Alloc_error::DENIED; },
[&] (Alloc_error error) { return error; });
}
/* skip completely occupied slab blocks, detect cycles */
@@ -367,13 +391,13 @@ bool Slab::alloc(size_t size, void **out_addr)
if (_curr_sb->next == orig_curr_sb)
break;
*out_addr = _curr_sb->alloc();
if (*out_addr == nullptr)
return false;
void *ptr = _curr_sb->alloc();
if (!ptr)
return Alloc_error::DENIED;
_total_avail--;
return true;
return ptr;
}

View File

@@ -38,56 +38,64 @@ Sliced_heap::~Sliced_heap()
}
bool Sliced_heap::alloc(size_t size, void **out_addr)
Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
{
/* allocation includes space for block meta data and is page-aligned */
size = align_addr(size + sizeof(Block), 12);
Ram_dataspace_capability ds_cap;
Block *block = nullptr;
return _ram_alloc.try_alloc(size).convert<Alloc_result>(
_ram_alloc.try_alloc(size).with_result(
[&] (Ram_dataspace_capability cap) { ds_cap = cap; },
[&] (Ram_allocator::Alloc_error error) {
switch (error) {
case Ram_allocator::Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Ram_allocator::Alloc_error::OUT_OF_RAM: break;
case Ram_allocator::Alloc_error::DENIED: break;
[&] (Ram_dataspace_capability ds_cap) -> Alloc_result {
struct Alloc_guard
{
Ram_allocator &ram;
Ram_dataspace_capability ds;
bool keep = false;
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
: ram(ram), ds(ds) { }
~Alloc_guard() { if (!keep) ram.free(ds); }
} alloc_guard(_ram_alloc, ds_cap);
struct Attach_guard
{
Region_map &rm;
struct { void *ptr = nullptr; };
bool keep = false;
Attach_guard(Region_map &rm) : rm(rm) { }
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
} attach_guard(_region_map);
try {
attach_guard.ptr = _region_map.attach(ds_cap);
}
});
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
if (!ds_cap.valid())
return false;
/* serialize access to block list */
Mutex::Guard guard(_mutex);
try {
block = _region_map.attach(ds_cap);
}
catch (Region_map::Region_conflict) {
error("sliced_heap: region conflict while attaching dataspace");
_ram_alloc.free(ds_cap);
return false;
}
catch (Region_map::Invalid_dataspace) {
error("sliced_heap: attempt to attach invalid dataspace");
_ram_alloc.free(ds_cap);
return false;
}
catch (Out_of_ram) {
return false;
}
Block * const block = construct_at<Block>(attach_guard.ptr, ds_cap, size);
/* serialize access to block list */
Mutex::Guard guard(_mutex);
_consumed += size;
_blocks.insert(block);
construct_at<Block>(block, ds_cap, size);
alloc_guard.keep = attach_guard.keep = true;
_consumed += size;
_blocks.insert(block);
/* skip meta data prepended to the payload portion of the block */
*out_addr = block + 1;
return true;
/* skip meta data prepended to the payload portion of the block */
void *ptr = block + 1;
return ptr;
},
[&] (Alloc_error error) {
return error; });
}

View File

@@ -171,16 +171,18 @@ extern "C" void *__emutls_get_address(void *obj)
/* the heap allocates 16-byte aligned */
if ((16 % emutls_object->align) != 0)
Genode::warning(__func__, ": cannot ensure alignment of ",
emutls_object->align, " bytes");
warning(__func__, ": cannot ensure alignment of ",
emutls_object->align, " bytes");
void *address = nullptr;
if (!cxx_heap().alloc(emutls_object->size, &address)) {
Genode::error(__func__,
": could not allocate thread-local variable instance");
cxx_heap().try_alloc(emutls_object->size).with_result(
[&] (void *ptr) { address = ptr; },
[&] (Allocator::Alloc_error e) {
error(__func__,
": could not allocate thread-local variable, error ", (int)e); });
if (!address)
return nullptr;
}
if (emutls_object->templ)
memcpy(address, emutls_object->templ, emutls_object->size);

View File

@@ -76,9 +76,15 @@ extern "C" void *malloc(size_t size)
* the size information when freeing the block.
*/
unsigned long real_size = size + sizeof(Block_header);
void *addr = 0;
if (!cxx_heap().alloc(real_size, &addr))
return 0;
void *addr = nullptr;
cxx_heap().try_alloc(real_size).with_result(
[&] (void *ptr) { addr = ptr; },
[&] (Allocator::Alloc_error error) {
Genode::error(__func__,
": cxx_heap allocation failed with error ", (int)error); });
if (!addr)
return nullptr;
*(Block_header *)addr = real_size;
return (Block_header *)addr + 1;

View File

@@ -75,15 +75,14 @@ class Linker::Region_map
/**
* Allocate region anywhere within the region map
*
* XXX propagate OUT_OF_RAM, OUT_OF_CAPS
*/
addr_t alloc_region(size_t size)
{
addr_t result = 0;
if (_range.alloc_aligned(size, (void **)&result,
get_page_size_log2()).error())
throw Region_conflict();
return result;
return _range.alloc_aligned(size, get_page_size_log2()).convert<addr_t>(
[&] (void *ptr) { return (addr_t)ptr; },
[&] (Allocator::Alloc_error) -> addr_t { throw Region_conflict(); });
}
/**
@@ -91,7 +90,7 @@ class Linker::Region_map
*/
void alloc_region_at(size_t size, addr_t vaddr)
{
if (_range.alloc_addr(size, vaddr).error())
if (_range.alloc_addr(size, vaddr).failed())
throw Region_conflict();
}

View File

@@ -56,13 +56,13 @@ struct Allocator : Genode::Allocator
bool need_size_for_free() const override {
return a.need_size_for_free(); }
bool alloc(Genode::size_t size, void **p) override
Alloc_result try_alloc(Genode::size_t size) override
{
*p = a.alloc(size);
Alloc_result const result = a.try_alloc(size);
log("Allocator::alloc()");
return *p != 0;
return result;
}
void free(void *p, Genode::size_t size) override