blob: 2ab2c4f19ec36caed9939dfc26f236ab6fe04c53 [file] [log] [blame]
// Copyright 2016 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REGION_ALLOC_REGION_ALLOC_H_
#define REGION_ALLOC_REGION_ALLOC_H_
#include <stdbool.h>
#include <stddef.h>
#include <zircon/compiler.h>
#include <zircon/types.h>
#include <memory>
#include <utility>
#include <fbl/auto_lock.h>
#include <fbl/intrusive_single_list.h>
#include <fbl/intrusive_wavl_tree.h>
#include <fbl/mutex.h>
#include <fbl/ref_counted.h>
#include <fbl/ref_ptr.h>
#include <fbl/slab_allocator.h>
// RegionAllocator
//
// == Overview ==
// A RegionAllocator is a utility class designed to help with the bookkeeping
// involved in managing the allocation/partitioning of a 64-bit space into
// non-overlapping "Regions". In addition to the RegionAllocator, there are two
// other classes involved in the use of a RegionAllcator;
// RegionAllocator::Region and RegionAllocator::RegionPool.
//
// A Region consists of an unsigned 64-bit base address and an unsigned 64-bit
// size. A Region is considered valid iff its size is non-zero, and it does not
// wrap its 64-bit space.
//
// See the "Memory Allocation" section for a discussion of the RegionPool.
//
// RegionAllocator users can create an allocator and then add any number of
// non-overlapping Regions to its pool of regions available for allocation.
// They may then request that regions be allocated from the pool either by
// requesting that a region be allocated with a particular size/alignment, or
// by asking for a specific base/size. The RegionAllocator will manage all of
// the bookkeeping involved in breaking available regions into smaller chunks,
// tracking allocated regions, and re-merging regions when they are returned to
// the allocator.
//
// == Memory Allocaion ==
// RegionAllocators require dynamically allocated memory in order to store the
// bookkeeping required for managing available regions. In order to control
// heap fragmentation and the frequency of heap interaction, A RegionPool object
// may be used to allocate bookkeeping overhead in larger slabs which are carved up
// and placed on a free list to be used by a RegionAllocator. RegionPools are
// created with a defined slab size as well as a maximum memory limit. The pool
// will initially allocate a single slab, but will attempt to grow any time
// bookkeeping is needed but the free list is empty and the allocation of
// another slab would not push the allocator over its maximum memory limit.
//
// RegionPools are ref-counted objects that may be shared by multiple
// RegionAllocators. This allows sub-systems which use multiple allocators to
// impose system-wide limits on bookkeeping overhead. If a RegionPool allocator
// is to be used, it must be assigned to the RegionAllocator before any regions
// can be added or allocated, and the pool may not be re-assigned while the
// allocator is using any bookkeeping from the pool.
//
// == APIs and Object lifecycle management ==
// The API makes use of fbl managed pointer types in order to simplify lifecycle
// management. RegionPools are managed with fbl::RefPtr while Regions are handed
// out via std::unique_ptr. RegionAllocators themselves impose no lifecycle
// restrictions and may be heap allocated, stack allocated, or embedded directly
// in objects as the user sees fit. It is an error to allow a RegionAllocator
// to destruct while there are allocations in flight.
//
// == Dependencies ==
// The RegionAllocator depends only on malloc/free and fbl. new/delete
// implementations are provided internally, no global new/delete behavior needs
// to be defined by the user.
//
// == Thread Safety ==
// RegionAllocator and RegionPools use fbl::Mutex objects to provide thread
// safety in multi-threaded environments. As such, RegionAllocators are not
// currently suitable for use in code which may run at IRQ context, or which
// must never block.
//
// Each RegionAllocator has its own mutex allowing for concurrent access across
// multiple allocators, even when the allocators share the same RegionPool.
// RegionPools also hold their own mutex which may be obtained by an Allocator
// while holding the Allocator's Mutex.
//
// == Simple Usage Example ==
//
// /* Create a pool and assign it to a stack allocated allocator. Limit the
// * bookkeeping memory to 32KB allocated from a single slab. This will ensure
// * that no heap interactions take place after startup (during operation).
// */
// RegionAllocator alloc(
// RegionAllocator::RegionPool::Create(32 << 10, 32 << 10));
//
// /* Add regions to the pool which can be allocated from */
// alloc.AddRegion({ .base = 0xC0000000, .size = 0x40000000 }); // [3GB, 4GB)
// alloc.AddRegion({ .base = 0x4000000000, .size = 0x40000000 }); // [256GB, 257GB)
//
// /* Grab some specific regions out of the available regions.
// * Note: auto here will expand to RegionAllocator::Region::UPtr. Feel free
// * to add your own using statement to lessen the C++ naming pain.
// */
// auto r1 = alloc.GetRegion({ 0xC0100000, 0x100000 }); // [3GB + 1MB, 3GB + 2MB)
// auto r2 = alloc.GetRegion({ 0x4000100000, 0x100000 }); // [256GB + 1MB, 256GB + 2MB)
//
// /* Grab some pointer aligned regions of various sizes */
// auto r3 = alloc.GetRegion(1024);
// auto r4 = alloc.GetRegion(75);
// auto r5 = alloc.GetRegion(80000);
//
// /* Grab some page aligned regions of various sizes */
// auto r6 = alloc.GetRegion(1024, 4 << 10);
// auto r7 = alloc.GetRegion(75, 4 << 10);
// auto r8 = alloc.GetRegion(80000, 4 << 10);
//
// /* Print some stuff about some of the allocations */
// ZX_DEBUG_ASSERT(r3 != nullptr);
// printf("R3 base %llx size %llx\n", r3->base, r3->size)
//
// ZX_DEBUG_ASSERT(r8 != nullptr);
// printf("R8 base %llx size %llx\n", r8->base, r8->size)
//
// /* No need to clean up. Regions will automatically be returned to the
// * allocator as they go out of scope. Then the allocator will return all of
// * its available regions to the pool when it goes out of scope. Finally, the
// * pool will free all of its memory as the allocator releases it reference
// * to the pool.
// */
//
struct ralloc_region_t {
uint64_t base;
uint64_t size;
};
class RegionAllocator {
private:
// Tag types used by the Region class to exist in multiple trees
// simultaneously.
struct SortByBaseTag {};
struct SortBySizeTag {};
public:
static constexpr uint64_t kRegionPoolSlabSize = (4u << 10);
// An enum which selects which set of regions to test against when testing for
// intersection or containment. See TestRegionIntersect and
// TestRegionContains for examples.
enum class TestRegionSet {
Allocated, // The set of currently allocated regions.
Available, // The set of currently available regions.
};
// Enums which act as strongly typed bools and are used to control the
// behavior of AddRegion and SubtractRegion.
enum class AllowOverlap {
No,
Yes,
};
enum class AllowIncomplete {
No,
Yes,
};
class Region;
using RegionSlabTraits =
fbl::ManualDeleteSlabAllocatorTraits<Region*, kRegionPoolSlabSize, fbl::Mutex,
fbl::SlabAllocatorOptions::AllowManualDeleteOperator>;
class Region
: public ralloc_region_t,
public fbl::SlabAllocated<RegionSlabTraits>,
public fbl::ContainableBaseClasses<fbl::TaggedWAVLTreeContainable<Region*, SortByBaseTag>,
fbl::TaggedWAVLTreeContainable<Region*, SortBySizeTag>> {
private:
struct RegionDeleter;
public:
using UPtr = std::unique_ptr<const Region, RegionDeleter>;
private:
using KeyTraitsSortByBase = fbl::DefaultKeyedObjectTraits<uint64_t, Region>;
struct KeyTraitsSortBySize {
static const ralloc_region_t& GetKey(const Region& r) { return r; }
static bool LessThan(const ralloc_region_t& k1, const ralloc_region_t& k2) {
return (k1.size < k2.size) || ((k1.size == k2.size) && (k1.base < k2.base));
}
static bool EqualTo(const ralloc_region_t& k1, const ralloc_region_t& k2) {
return (k1.size == k2.size) && (k1.base == k2.base);
}
};
// When a user's unique_ptr<> reference to this region goes out of
// scope, Don't actually delete the region. Instead, recycle it back
// into the set of available regions. The memory for the bookkeeping
// will eventually be deleted when it merges with another available
// region, or when the allocator finally shuts down.
struct RegionDeleter {
void operator()(const Region* region) const noexcept {
// Note: The external std::unique_ptrs that we hand out to our
// users are deliberately limited to being const Region*s. For
// our users, regions are read only constructs; they check them
// out from the allocator and can read the bookkeeping values,
// but not change them.
//
// When the region is finally returned to our pool via the
// deleter, the std::unique_ptr will hand it to the deleter
// class as a const pointer (it has no choice). We need to cast
// that const away here. When the region re-joins the pool, its
// bookkeeping information (which is logically owned by the
// pool) needs to be mutable in order to merge with other
// regions in the pool.
ZX_DEBUG_ASSERT(region->owner_ != nullptr);
Region* r = const_cast<Region*>(region);
r->owner_->ReleaseRegion(r);
}
};
using WAVLTreeSortByBase =
fbl::TaggedWAVLTree<uint64_t, Region*, SortByBaseTag, KeyTraitsSortByBase>;
using WAVLTreeSortBySize =
fbl::TaggedWAVLTree<ralloc_region_t, Region*, SortBySizeTag, KeyTraitsSortBySize>;
// Used by SortByBase key traits
uint64_t GetKey() const { return base; }
// So many friends! I'm the most popular class in the build!!
friend class RegionAllocator;
friend class RegionPool;
friend KeyTraitsSortByBase;
friend struct KeyTraitsSortBySize;
friend class fbl::SlabAllocator<RegionSlabTraits>;
// Regions can only be either placement new'ed by the RegionPool slab
// allocator, or allocated directly from the heap by the RegionAllocator if
// no RegionPool has been configured. They cannot be copied, assigned, or
// deleted. Externally, they should only be handled by their unique_ptr<>s.
explicit Region(RegionAllocator* owner) : owner_(owner) {}
~Region() = default;
DISALLOW_COPY_ASSIGN_AND_MOVE(Region);
RegionAllocator* owner_;
};
class RegionPool : public fbl::RefCounted<RegionPool>,
public fbl::SlabAllocator<RegionSlabTraits> {
public:
using RefPtr = fbl::RefPtr<RegionPool>;
static constexpr size_t SLAB_SIZE = RegionSlabTraits::SLAB_SIZE;
static RefPtr Create(size_t max_memory);
private:
// Only our RefPtr's are allowed to destroy us.
friend fbl::RefPtr<RegionPool>;
// Attempt to allocate at least one slab up front when we are created.
explicit RegionPool(size_t num_slabs) : fbl::SlabAllocator<RegionSlabTraits>(num_slabs, true) {}
~RegionPool() {}
// No one may copy, assign or move us.
DISALLOW_COPY_ASSIGN_AND_MOVE(RegionPool);
};
RegionAllocator() {}
explicit RegionAllocator(const RegionPool::RefPtr& region_pool) : region_pool_(region_pool) {}
explicit RegionAllocator(RegionPool::RefPtr&& region_pool)
: region_pool_(std::move(region_pool)) {}
RegionAllocator(const RegionAllocator& c) = delete;
RegionAllocator& operator=(const RegionAllocator& c) = delete;
~RegionAllocator();
// Set the RegionPool this RegionAllocator will obtain bookkeeping structures from.
//
// Possible return values
// ++ ZX_ERR_BAD_STATE : The RegionAllocator already has bookkeeping
// allocations. All allocated regions must be returned to the pool and the
// pool Reset before the allocator may be changed.
zx_status_t SetRegionPool(const RegionPool::RefPtr& region_pool) __TA_EXCLUDES(alloc_lock_);
zx_status_t SetRegionPool(RegionPool::RefPtr&& region_pool) __TA_EXCLUDES(alloc_lock_) {
RegionPool::RefPtr ref(std::move(region_pool));
return SetRegionPool(ref);
}
bool HasRegionPool() const __TA_EXCLUDES(alloc_lock_) {
fbl::AutoLock alloc_lock(&alloc_lock_);
return (region_pool_ != nullptr);
}
// Reset allocator. Releases all available regions, but has no effect on
// currently allocated regions.
void Reset() __TA_EXCLUDES(alloc_lock_);
// Add a region to the set of allocatable regions.
//
// If allow_overlap is AllowOverlap::No, the added region may not overlap with
// any previously added region and will be rejected if it does. If
// allow_overlap is AllowOverlap::Yes, the added region will be union'ed with
// existing available regions, provided it does not intersect any currently
// allocated region.
//
// Possible return values
// ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
// assigned region pool to add the region.
// ++ ZX_ERR_INVALID_ARGS : One of the following conditions applies.
// ++++ The region is invalid (wraps the space, or size is zero)
// ++++ The region being added intersects one or more currently
// allocated regions.
// ++++ The region being added intersects one ore more of the currently
// available regions, and allow_overlap is false.
zx_status_t AddRegion(const ralloc_region_t& region,
AllowOverlap allow_overlap = AllowOverlap::No) __TA_EXCLUDES(alloc_lock_);
// Subtract a region from the set of allocatable regions.
//
// If allow_incomplete is false, the subtracted region must exist entirely
// within the set of available regions. If allow_incomplete is true, the
// subtracted region will remove any portion of any available region it
// intersects with.
//
// Regardless of the value of the allow_incomplete flag, it is illegal to
// attempt to subtract a region which intersects any currently allocated
// region.
//
// Possible return values
// ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
// assigned region pool to subtract the region.
// ++ ZX_ERR_INVALID_ARGS : One of the following conditions applies.
// ++++ The region is invalid (wraps the space, or size is zero)
// ++++ The region being subtracted intersects one or more currently
// allocated regions.
// ++++ The region being subtracted intersects portions of the space which
// are absent from both the allocated and available sets, and
// allow_incomplete is false.
zx_status_t SubtractRegion(const ralloc_region_t& region,
AllowIncomplete allow_incomplete = AllowIncomplete::No)
__TA_EXCLUDES(alloc_lock_);
// Get a region out of the set of currently available regions which has a
// specified size and alignment. Note; the alignment must be a power of
// two. Pass 1 if alignment does not matter.
//
// Possible return values
// ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
// assigned region pool to perform the allocation.
// ++ ZX_ERR_INVALID_ARGS : size is zero, or alignment is not a power of two.
// ++ ZX_ERR_NOT_FOUND : No suitable region could be found in the set of
// currently available regions which can satisfy the request.
zx_status_t GetRegion(uint64_t size, uint64_t alignment, Region::UPtr& out_region)
__TA_EXCLUDES(alloc_lock_);
// Get a region with a specific location and size out of the set of
// currently available regions.
//
// Possible return values
// ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
// assigned region pool to perform the allocation.
// ++ ZX_ERR_INVALID_ARGS : The size of the requested region is zero.
// ++ ZX_ERR_NOT_FOUND : No suitable region could be found in the set of
// currently available regions which can satisfy the request.
zx_status_t GetRegion(const ralloc_region_t& requested_region, Region::UPtr& out_region)
__TA_EXCLUDES(alloc_lock_);
// Helper which defaults the alignment of a size/alignment based allocation
// to pointer-aligned.
zx_status_t GetRegion(uint64_t size, Region::UPtr& out_region) __TA_EXCLUDES(alloc_lock_) {
return GetRegion(size, sizeof(void*), out_region);
}
// Helper versions of the GetRegion methods for those who don't care
// about the specific reason for failure (nullptr will be returned on
// failure).
Region::UPtr GetRegion(uint64_t size, uint64_t alignment) __TA_EXCLUDES(alloc_lock_) {
Region::UPtr ret;
GetRegion(size, alignment, ret);
return ret;
}
Region::UPtr GetRegion(uint64_t size) __TA_EXCLUDES(alloc_lock_) {
Region::UPtr ret;
GetRegion(size, ret);
return ret;
}
Region::UPtr GetRegion(const ralloc_region_t& requested_region) __TA_EXCLUDES(alloc_lock_) {
Region::UPtr ret;
GetRegion(requested_region, ret);
return ret;
}
// Returns true if |region| intersects any of the regions in either the set of
// currently allocated regions, or currently available regions.
//
// region : The region to test
// which : Either TestRegionSet::Allocated or TestRegionSet::Available to
// test against the set of currently allocated or currently available
// regions.
bool TestRegionIntersects(const ralloc_region_t& region, TestRegionSet which) const {
fbl::AutoLock alloc_lock(&alloc_lock_);
return IntersectsLocked(
(which == TestRegionSet::Allocated) ? allocated_regions_by_base_ : avail_regions_by_base_,
region);
}
// Returns true if |region| is completely contained by any of the regions in
// either the set of currently allocated regions, or currently available
// regions.
//
// region : The region to test
// which : Either TestRegionSet::Allocated or TestRegionSet::Available to
// test against the set of currently allocated or currently available
// regions.
bool TestRegionContainedBy(const ralloc_region_t& region, TestRegionSet which) const {
fbl::AutoLock alloc_lock(&alloc_lock_);
return ContainedByLocked(
(which == TestRegionSet::Allocated) ? allocated_regions_by_base_ : avail_regions_by_base_,
region);
}
size_t AllocatedRegionCount() const __TA_EXCLUDES(alloc_lock_) {
fbl::AutoLock alloc_lock(&alloc_lock_);
return allocated_regions_by_base_.size();
}
size_t AvailableRegionCount() const __TA_EXCLUDES(alloc_lock_) {
fbl::AutoLock alloc_lock(&alloc_lock_);
return avail_regions_by_base_.size();
}
// Walk the allocated regions and call the user provided callback for each
// entry. Stop when out of entries or the callback returns false.
//
// *** It is absolutely required that the user callback not call into any other
// RegionAllocator public APIs, and should likely not acquire any locks of any
// kind. This method cannot protect against deadlocks and lock inversions that
// are possible by acquiring the allocation lock before calling the user provided
// callback.
template <typename WalkCallback>
void WalkAllocatedRegions(WalkCallback&& cb) const __TA_EXCLUDES(alloc_lock_) {
fbl::AutoLock alloc_lock(&alloc_lock_);
for (const auto& region : allocated_regions_by_base_) {
if (!std::forward<WalkCallback>(cb)(&region)) {
break;
}
}
}
// Walk the available regions and call the user provided callback for each
// entry. Stop when out of entries or the callback returns false.
//
// *** It is absolutely required that the user callback not call into any other
// RegionAllocator public APIs, and should likely not acquire any locks of any
// kind. This method cannot protect against deadlocks and lock inversions that
// are possible by acquiring the allocation lock before calling the user provided
// callback.
template <typename WalkCallback>
void WalkAvailableRegions(WalkCallback&& cb) const __TA_EXCLUDES(alloc_lock_) {
fbl::AutoLock alloc_lock(&alloc_lock_);
for (const auto& region : avail_regions_by_base_) {
// The forward permits use of cb's operator() const && if cb has that.
if (!std::forward<WalkCallback>(cb)(&region)) {
break;
}
}
}
private:
zx_status_t AddSubtractSanityCheckLocked(const ralloc_region_t& region)
__TA_REQUIRES(alloc_lock_);
void ReleaseRegion(Region* region) __TA_EXCLUDES(alloc_lock_);
void AddRegionToAvailLocked(Region* region, AllowOverlap allow_overlap = AllowOverlap::No)
__TA_REQUIRES(alloc_lock_);
zx_status_t AllocFromAvailLocked(Region::WAVLTreeSortBySize::iterator source,
Region::UPtr& out_region, uint64_t base, uint64_t size)
__TA_REQUIRES(alloc_lock_);
// Returns true if any of the regions in |tree| intersect |region|.
bool IntersectsLocked(const Region::WAVLTreeSortByBase& tree, const ralloc_region_t& region) const
__TA_REQUIRES(alloc_lock_);
// Returns true if any of the regions in |tree| completely contain |region|.
bool ContainedByLocked(const Region::WAVLTreeSortByBase& tree,
const ralloc_region_t& region) const __TA_REQUIRES(alloc_lock_);
// Create a region by allocating it from the current RegionPool, or from the
// heap if we have no assigned region pool.
Region* CreateRegion() __TA_REQUIRES(alloc_lock_);
// Destroy a region by either returning it to the current RegionPool, or to
// the heap if we have no assigned region pool.
void DestroyRegion(Region* region) __TA_REQUIRES(alloc_lock_);
/* Locking notes:
*
* alloc_lock_ protects all of the bookkeeping members of the
* RegionAllocator. This includes the allocated index, the available
* indices (by base and by size) and the region pool.
*
* The alloc_lock_ may be held while calling into a RegionAllocator's
* assigned RegionPool, but code from the RegionPool will never call into
* the RegionAllocator.
*/
mutable fbl::Mutex alloc_lock_;
Region::WAVLTreeSortByBase allocated_regions_by_base_ __TA_GUARDED(alloc_lock_);
Region::WAVLTreeSortByBase avail_regions_by_base_ __TA_GUARDED(alloc_lock_);
Region::WAVLTreeSortBySize avail_regions_by_size_ __TA_GUARDED(alloc_lock_);
RegionPool::RefPtr region_pool_ __TA_GUARDED(alloc_lock_);
};
#endif // REGION_ALLOC_REGION_ALLOC_H_