mirror of
https://github.com/mmueller41/genode.git
synced 2026-01-21 12:32:56 +01:00
core: New heap allocator for NUMA-regional allocations.
This commit is contained in:
230
repos/base/include/base/regional_heap.h
Normal file
230
repos/base/include/base/regional_heap.h
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
* \brief Heap partition
|
||||
* \author Norman Feske
|
||||
* \date 2006-05-15
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2022 Michael Müller
|
||||
* Copyright (C) 2006-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _INCLUDE__BASE__REGIONAL_HEAP_H_
|
||||
#define _INCLUDE__BASE__REGIONAL_HEAP_H_
|
||||
|
||||
#include <util/list.h>
|
||||
#include <util/reconstructible.h>
|
||||
#include <base/ram_allocator.h>
|
||||
#include <region_map/region_map.h>
|
||||
#include <base/allocator_avl.h>
|
||||
#include <base/mutex.h>
|
||||
#include <topo_session/node.h>
|
||||
|
||||
namespace Genode {
|
||||
|
||||
class Regional_heap;
|
||||
class Sliced_regional_heap;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Heap that uses dataspaces as backing store and has a physical memory range limited to a single NUMA region
|
||||
*
|
||||
* The heap class provides an allocator that uses a list of dataspaces of a RAM
|
||||
* allocator as backing store. One dataspace may be used for holding multiple
|
||||
* blocks.
|
||||
*/
|
||||
class Genode::Regional_heap : public Allocator
|
||||
{
|
||||
private:
|
||||
|
||||
class Dataspace : public List<Dataspace>::Element
|
||||
{
|
||||
private:
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Dataspace(Dataspace const &);
|
||||
Dataspace &operator = (Dataspace const &);
|
||||
|
||||
public:
|
||||
|
||||
Ram_dataspace_capability cap;
|
||||
void *local_addr;
|
||||
size_t size;
|
||||
|
||||
Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size)
|
||||
: cap(c), local_addr(local_addr), size(size) { }
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure exists only to make sure that the dataspaces are
|
||||
* destroyed after the AVL allocator.
|
||||
*/
|
||||
class Dataspace_pool : public List<Dataspace>
|
||||
{
|
||||
private:
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Dataspace_pool(Dataspace_pool const &);
|
||||
Dataspace_pool &operator = (Dataspace_pool const &);
|
||||
|
||||
public:
|
||||
|
||||
Ram_allocator *ram_alloc; /* backing store */
|
||||
Region_map *region_map;
|
||||
|
||||
Dataspace_pool(Ram_allocator *ram, Region_map *rm)
|
||||
: ram_alloc(ram), region_map(rm) { }
|
||||
|
||||
~Dataspace_pool();
|
||||
|
||||
void remove_and_free(Dataspace &);
|
||||
|
||||
void reassign_resources(Ram_allocator *ram, Region_map *rm) {
|
||||
ram_alloc = ram, region_map = rm; }
|
||||
};
|
||||
|
||||
Mutex mutable _mutex { };
|
||||
Reconstructible<Allocator_avl> _alloc; /* local allocator */
|
||||
Dataspace_pool _ds_pool; /* list of dataspaces */
|
||||
size_t _quota_limit { 0 };
|
||||
size_t _quota_used { 0 };
|
||||
size_t _chunk_size { 0 };
|
||||
Ram_allocator::Numa_id _numa_id; /* ID of NUMA region to serve allocations from */
|
||||
|
||||
using Alloc_ds_result = Attempt<Dataspace *, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Allocate a new dataspace of the specified size
|
||||
*
|
||||
* \param size number of bytes to allocate
|
||||
* \param enforce_separate_metadata if true, the new dataspace
|
||||
* will not contain any meta data
|
||||
*/
|
||||
Alloc_ds_result _allocate_dataspace(size_t size, bool enforce_separate_metadata);
|
||||
|
||||
/**
|
||||
* Try to allocate block at our local allocator
|
||||
*/
|
||||
Alloc_result _try_local_alloc(size_t size);
|
||||
|
||||
/**
|
||||
* Unsynchronized implementation of 'try_alloc'
|
||||
*/
|
||||
Alloc_result _unsynchronized_alloc(size_t size);
|
||||
|
||||
public:
|
||||
|
||||
enum { UNLIMITED = ~0 };
|
||||
|
||||
Regional_heap(Ram_allocator *ram_allocator,
|
||||
Region_map *region_map,
|
||||
Topology::Numa_region ®ion,
|
||||
size_t quota_limit = UNLIMITED,
|
||||
void *static_addr = 0,
|
||||
size_t static_size = 0);
|
||||
|
||||
Regional_heap(Ram_allocator &ram, Region_map &rm, Topology::Numa_region ®ion) : Regional_heap(&ram, &rm, region) { }
|
||||
|
||||
~Regional_heap();
|
||||
|
||||
/**
|
||||
* Reconfigure quota limit
|
||||
*
|
||||
* \return negative error code if new quota limit is higher than
|
||||
* currently used quota.
|
||||
*/
|
||||
int quota_limit(size_t new_quota_limit);
|
||||
|
||||
/**
|
||||
* Re-assign RAM allocator and region map
|
||||
*/
|
||||
void reassign_resources(Ram_allocator *ram, Region_map *rm) {
|
||||
_ds_pool.reassign_resources(ram, rm); }
|
||||
|
||||
/**
|
||||
* Call 'fn' with the start and size of each backing-store region
|
||||
*/
|
||||
template <typename FN>
|
||||
void for_each_region(FN const &fn) const
|
||||
{
|
||||
Mutex::Guard guard(_mutex);
|
||||
for (Dataspace const *ds = _ds_pool.first(); ds; ds = ds->next())
|
||||
fn(ds->local_addr, ds->size);
|
||||
}
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _quota_used; }
|
||||
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
||||
bool need_size_for_free() const override { return false; }
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Heap that allocates each block at a separate dataspace
|
||||
*/
|
||||
class Genode::Sliced_regional_heap : public Allocator
|
||||
{
|
||||
private:
|
||||
|
||||
/**
|
||||
* Meta-data header placed in front of each allocated block
|
||||
*/
|
||||
struct Block : List<Block>::Element
|
||||
{
|
||||
Ram_dataspace_capability const ds;
|
||||
size_t const size;
|
||||
|
||||
Block(Ram_dataspace_capability ds, size_t size) : ds(ds), size(size)
|
||||
{ }
|
||||
};
|
||||
|
||||
Ram_allocator &_ram_alloc; /* RAM allocator for backing store */
|
||||
Region_map &_region_map; /* region map of the address space */
|
||||
size_t _consumed = 0; /* number of allocated bytes */
|
||||
List<Block> _blocks { }; /* list of allocated blocks */
|
||||
Mutex _mutex { }; /* serialize allocations */
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Return size of header prepended to each allocated block in bytes
|
||||
*/
|
||||
static constexpr size_t meta_data_size() { return sizeof(Block); }
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
Sliced_regional_heap(Ram_allocator &ram_alloc, Region_map ®ion_map);
|
||||
|
||||
/**
|
||||
* Destructor
|
||||
*/
|
||||
~Sliced_regional_heap();
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _consumed; }
|
||||
size_t overhead(size_t size) const override;
|
||||
bool need_size_for_free() const override { return false; }
|
||||
};
|
||||
|
||||
#endif /* _INCLUDE__BASE__HEAP_H_ */
|
||||
346
repos/base/src/lib/base/regional_heap.cc
Normal file
346
repos/base/src/lib/base/regional_heap.cc
Normal file
@@ -0,0 +1,346 @@
|
||||
/*
|
||||
* \brief Implementation of Genode heap partition
|
||||
* \author Norman Feske
|
||||
* \date 2006-05-17
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2006-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#include <util/construct_at.h>
|
||||
#include <base/env.h>
|
||||
#include <base/log.h>
|
||||
#include <base/regional_heap.h>
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
|
||||
namespace {
|
||||
|
||||
enum {
|
||||
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
|
||||
MAX_CHUNK_SIZE = 256*1024,
|
||||
/*
|
||||
* Allocation sizes >= this value are considered as big
|
||||
* allocations, which get their own dataspace. In contrast
|
||||
* to smaller allocations, this memory is released to
|
||||
* the RAM session when 'free()' is called.
|
||||
*/
|
||||
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void Regional_heap::Dataspace_pool::remove_and_free(Dataspace &ds)
|
||||
{
|
||||
/*
|
||||
* read dataspace capability and modify _ds_list before detaching
|
||||
* possible backing store for Dataspace - we rely on LIFO list
|
||||
* manipulation here!
|
||||
*/
|
||||
|
||||
Ram_dataspace_capability ds_cap = ds.cap;
|
||||
void *ds_local_addr = ds.local_addr;
|
||||
|
||||
remove(&ds);
|
||||
|
||||
/*
|
||||
* Call 'Dataspace' destructor to properly release the RAM dataspace
|
||||
* capabilities. Note that we don't free the 'Dataspace' object at the
|
||||
* local allocator because this is already done by the 'Regional_heap'
|
||||
* destructor prior executing the 'Dataspace_pool' destructor.
|
||||
*/
|
||||
ds.~Dataspace();
|
||||
|
||||
region_map->detach(ds_local_addr);
|
||||
ram_alloc->free(ds_cap);
|
||||
}
|
||||
|
||||
|
||||
Regional_heap::Dataspace_pool::~Dataspace_pool()
|
||||
{
|
||||
/* free all ram_dataspaces */
|
||||
for (Dataspace *ds; (ds = first()); )
|
||||
remove_and_free(*ds);
|
||||
}
|
||||
|
||||
|
||||
int Regional_heap::quota_limit(size_t new_quota_limit)
|
||||
{
|
||||
if (new_quota_limit < _quota_used) return -1;
|
||||
_quota_limit = new_quota_limit;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Regional_heap::Alloc_ds_result
|
||||
Regional_heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
||||
{
|
||||
using Result = Alloc_ds_result;
|
||||
|
||||
return _ds_pool.ram_alloc->try_alloc(size, _numa_id).convert<Result>(
|
||||
|
||||
[&] (Ram_dataspace_capability ds_cap) -> Result {
|
||||
|
||||
struct Alloc_guard
|
||||
{
|
||||
Ram_allocator &ram;
|
||||
Ram_dataspace_capability ds;
|
||||
bool keep = false;
|
||||
|
||||
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
|
||||
: ram(ram), ds(ds) { }
|
||||
|
||||
~Alloc_guard() { if (!keep) ram.free(ds); }
|
||||
|
||||
} alloc_guard(*_ds_pool.ram_alloc, ds_cap);
|
||||
|
||||
struct Attach_guard
|
||||
{
|
||||
Region_map &rm;
|
||||
struct { void *ptr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||
|
||||
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
||||
|
||||
} attach_guard(*_ds_pool.region_map);
|
||||
|
||||
try {
|
||||
attach_guard.ptr = _ds_pool.region_map->attach(ds_cap);
|
||||
}
|
||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
||||
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
||||
|
||||
Alloc_result metadata = Alloc_error::DENIED;
|
||||
|
||||
/* allocate the 'Dataspace' structure */
|
||||
if (enforce_separate_metadata) {
|
||||
metadata = _unsynchronized_alloc(sizeof(Regional_heap::Dataspace));
|
||||
|
||||
} else {
|
||||
|
||||
/* add new local address range to our local allocator */
|
||||
_alloc->add_range((addr_t)attach_guard.ptr, size).with_result(
|
||||
[&] (Range_allocator::Range_ok) {
|
||||
metadata = _alloc->alloc_aligned(sizeof(Regional_heap::Dataspace), log2(16U)); },
|
||||
[&] (Alloc_error error) {
|
||||
metadata = error; });
|
||||
}
|
||||
|
||||
return metadata.convert<Result>(
|
||||
[&] (void *md_ptr) -> Result {
|
||||
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
|
||||
attach_guard.ptr, size);
|
||||
_ds_pool.insert(&ds);
|
||||
alloc_guard.keep = attach_guard.keep = true;
|
||||
return &ds;
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
Allocator::Alloc_result Regional_heap::_try_local_alloc(size_t size)
|
||||
{
|
||||
return _alloc->alloc_aligned(size, log2(16U)).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_quota_used += size;
|
||||
return ptr; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
Allocator::Alloc_result Regional_heap::_unsynchronized_alloc(size_t size)
|
||||
{
|
||||
if (size >= BIG_ALLOCATION_THRESHOLD) {
|
||||
|
||||
/*
|
||||
* big allocation
|
||||
*
|
||||
* In this case, we allocate one dataspace without any meta data in it
|
||||
* and return its local address without going through the allocator.
|
||||
*/
|
||||
|
||||
/* align to 4K page */
|
||||
size_t const dataspace_size = align_addr(size, 12);
|
||||
|
||||
return _allocate_dataspace(dataspace_size, true).convert<Alloc_result>(
|
||||
|
||||
[&] (Dataspace *ds_ptr) {
|
||||
_quota_used += ds_ptr->size;
|
||||
return ds_ptr->local_addr; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
/* try allocation at our local allocator */
|
||||
{
|
||||
Alloc_result result = _try_local_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t dataspace_size = size
|
||||
+ Allocator_avl::slab_block_size()
|
||||
+ sizeof(Regional_heap::Dataspace);
|
||||
/* align to 4K page */
|
||||
dataspace_size = align_addr(dataspace_size, 12);
|
||||
|
||||
/*
|
||||
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
||||
* 4K-aligned, too.
|
||||
*/
|
||||
size_t const request_size = _chunk_size * sizeof(umword_t);
|
||||
|
||||
Alloc_ds_result result = Alloc_error::DENIED;
|
||||
|
||||
if (dataspace_size < request_size) {
|
||||
|
||||
result = _allocate_dataspace(request_size, false);
|
||||
if (result.ok()) {
|
||||
|
||||
/*
|
||||
* Exponentially increase chunk size with each allocated chunk until
|
||||
* we hit 'MAX_CHUNK_SIZE'.
|
||||
*/
|
||||
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
|
||||
}
|
||||
} else {
|
||||
result = _allocate_dataspace(dataspace_size, false);
|
||||
}
|
||||
|
||||
if (result.failed())
|
||||
return result.convert<Alloc_result>(
|
||||
[&] (Dataspace *) { return Alloc_error::DENIED; },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
|
||||
/* allocate originally requested block */
|
||||
return _try_local_alloc(size);
|
||||
}
|
||||
|
||||
|
||||
Allocator::Alloc_result Regional_heap::try_alloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
error("attempt to allocate zero-size block from heap");
|
||||
|
||||
/* serialize access of heap functions */
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
/* check requested allocation against quota limit */
|
||||
if (size + _quota_used > _quota_limit)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
return _unsynchronized_alloc(size);
|
||||
}
|
||||
|
||||
|
||||
void Regional_heap::free(void *addr, size_t)
|
||||
{
|
||||
/* serialize access of heap functions */
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
using Size_at_error = Allocator_avl::Size_at_error;
|
||||
|
||||
Allocator_avl::Size_at_result size_at_result = _alloc->size_at(addr);
|
||||
|
||||
if (size_at_result.ok()) {
|
||||
/* forward request to our local allocator */
|
||||
size_at_result.with_result(
|
||||
[&] (size_t size) {
|
||||
/* forward request to our local allocator */
|
||||
_alloc->free(addr, size);
|
||||
_quota_used -= size;
|
||||
},
|
||||
[&] (Size_at_error) { });
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (size_at_result == Size_at_error::MISMATCHING_ADDR) {
|
||||
/* address was found in local allocator but is not a block start address */
|
||||
error("heap could not free memory block: given address ", addr,
|
||||
" is not a block start adress");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Block could not be found in local allocator. So it is either a big
|
||||
* allocation or invalid address.
|
||||
*/
|
||||
|
||||
Regional_heap::Dataspace *ds = nullptr;
|
||||
for (ds = _ds_pool.first(); ds; ds = ds->next())
|
||||
if (((addr_t)addr >= (addr_t)ds->local_addr) &&
|
||||
((addr_t)addr <= (addr_t)ds->local_addr + ds->size - 1))
|
||||
break;
|
||||
|
||||
if (!ds) {
|
||||
warning("heap could not free memory block: invalid address");
|
||||
return;
|
||||
}
|
||||
|
||||
_quota_used -= ds->size;
|
||||
|
||||
_ds_pool.remove_and_free(*ds);
|
||||
_alloc->free(ds);
|
||||
}
|
||||
|
||||
|
||||
Regional_heap::Regional_heap(Ram_allocator *ram_alloc,
|
||||
Region_map *region_map,
|
||||
Topology::Numa_region ®ion,
|
||||
size_t quota_limit,
|
||||
void *static_addr,
|
||||
size_t static_size)
|
||||
:
|
||||
_alloc(nullptr),
|
||||
_ds_pool(ram_alloc, region_map),
|
||||
_quota_limit(quota_limit), _quota_used(0),
|
||||
_chunk_size(MIN_CHUNK_SIZE),
|
||||
_numa_id(region.native_id())
|
||||
{
|
||||
if (static_addr)
|
||||
_alloc->add_range((addr_t)static_addr, static_size);
|
||||
}
|
||||
|
||||
|
||||
Regional_heap::~Regional_heap()
|
||||
{
|
||||
/*
|
||||
* Revert allocations of heap-internal 'Dataspace' objects. Otherwise, the
|
||||
* subsequent destruction of the 'Allocator_avl' would detect those blocks
|
||||
* as dangling allocations.
|
||||
*
|
||||
* Since no new allocations can occur at the destruction time of the
|
||||
* 'Regional_heap', it is safe to release the 'Dataspace' objects at the allocator
|
||||
* yet still access them afterwards during the destruction of the
|
||||
* 'Allocator_avl'.
|
||||
*/
|
||||
for (Regional_heap::Dataspace *ds = _ds_pool.first(); ds; ds = ds->next())
|
||||
_alloc->free(ds, sizeof(Dataspace));
|
||||
|
||||
/*
|
||||
* Destruct 'Allocator_avl' before destructing the dataspace pool. This
|
||||
* order is important because some dataspaces of the dataspace pool are
|
||||
* used as backing store for the allocator's meta data. If we destroyed
|
||||
* the object pool before the allocator, the subsequent attempt to destruct
|
||||
* the allocator would access no-longer-present backing store.
|
||||
*/
|
||||
_alloc.destruct();
|
||||
}
|
||||
Reference in New Issue
Block a user