From 0dc4c1d429c50c69bd136bfec2fca8a5f759a01d Mon Sep 17 00:00:00 2001 From: Yuekai Jia Date: Thu, 11 Jul 2024 03:57:25 +0800 Subject: [PATCH] Initial commit --- .github/workflows/ci.yml | 55 ++++++++++++ .gitignore | 4 + Cargo.toml | 37 ++++++++ benches/collections.rs | 101 ++++++++++++++++++++++ benches/utils/mod.rs | 25 ++++++ src/bitmap.rs | 95 +++++++++++++++++++++ src/buddy.rs | 58 +++++++++++++ src/lib.rs | 180 +++++++++++++++++++++++++++++++++++++++ src/slab.rs | 68 +++++++++++++++ src/tlsf.rs | 77 +++++++++++++++++ tests/allocator.rs | 143 +++++++++++++++++++++++++++++++ 11 files changed, 843 insertions(+) create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 Cargo.toml create mode 100644 benches/collections.rs create mode 100644 benches/utils/mod.rs create mode 100644 src/bitmap.rs create mode 100644 src/buddy.rs create mode 100644 src/lib.rs create mode 100644 src/slab.rs create mode 100644 src/tlsf.rs create mode 100644 tests/allocator.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..531ddd1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,55 @@ +name: CI + +on: [push, pull_request] + +jobs: + ci: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust-toolchain: [nightly] + targets: [x86_64-unknown-linux-gnu, x86_64-unknown-none, riscv64gc-unknown-none-elf, aarch64-unknown-none-softfloat] + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + toolchain: ${{ matrix.rust-toolchain }} + components: rust-src, clippy, rustfmt + targets: ${{ matrix.targets }} + - name: Check rust version + run: rustc --version --verbose + - name: Check code format + run: cargo fmt --all -- --check + - name: Clippy + run: cargo clippy --target ${{ matrix.targets }} --all-features -- -A clippy::new_without_default + - name: Build + run: cargo build --target ${{ matrix.targets }} --all-features + - name: Unit test + if: ${{ matrix.targets == 'x86_64-unknown-linux-gnu' }} + run: cargo test --target ${{ matrix.targets }} -- --nocapture + + doc: + runs-on: ubuntu-latest + strategy: + fail-fast: false + permissions: + contents: write + env: + default-branch: ${{ format('refs/heads/{0}', github.event.repository.default_branch) }} + RUSTDOCFLAGS: -D rustdoc::broken_intra_doc_links -D missing-docs + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - name: Build docs + continue-on-error: ${{ github.ref != env.default-branch && github.event_name != 'pull_request' }} + run: | + cargo doc --no-deps --all-features + printf '' $(cargo tree | head -1 | cut -d' ' -f1) > target/doc/index.html + - name: Deploy to Github Pages + if: ${{ github.ref == env.default-branch }} + uses: JamesIves/github-pages-deploy-action@v4 + with: + single-commit: true + branch: gh-pages + folder: target/doc diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ff78c42 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +/target +/.vscode +.DS_Store +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..19e2871 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "allocator" +version = "0.1.0" +edition = "2021" +authors = ["Yuekai Jia "] +description = "Various allocator algorithms in a unified interface" +license = "GPL-3.0-or-later OR Apache-2.0 OR MulanPSL-2.0" +homepage = "https://github.com/arceos-org/arceos" +repository = "https://github.com/arceos-org/allocator" +documentation = "https://arceos-org.github.io/allocator" + +[features] +default = [] +full = ["bitmap", "tlsf", "slab", "buddy", "allocator_api"] + +bitmap = ["dep:bitmap-allocator"] + +tlsf = ["dep:rlsf"] +slab = ["dep:slab_allocator"] +buddy = ["dep:buddy_system_allocator"] + +allocator_api = [] + +[dependencies] +rlsf = { version = "0.2", optional = true } +buddy_system_allocator = { version = "0.10", default-features = false, optional = true } +slab_allocator = { git = "https://github.com/arceos-org/slab_allocator.git", tag = "v0.3.1", optional = true } +bitmap-allocator = { git = "https://github.com/rcore-os/bitmap-allocator.git", rev = "88e871a", optional = true } + +[dev-dependencies] +allocator = { path = ".", features = ["full"] } +rand = { version = "0.8", features = ["small_rng"] } +criterion = { version = "0.5", features = ["html_reports"] } + +[[bench]] +name = "collections" +harness = false diff --git a/benches/collections.rs b/benches/collections.rs new file mode 100644 index 0000000..c31fd84 --- /dev/null +++ b/benches/collections.rs @@ -0,0 +1,101 @@ +#![feature(allocator_api)] +#![feature(btreemap_alloc)] + +mod utils; + +use std::alloc::Allocator; +use std::collections::BTreeMap; +use std::io::Write; + +use allocator::{AllocatorRc, BuddyByteAllocator, SlabByteAllocator, TlsfByteAllocator}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand::{rngs::SmallRng, seq::SliceRandom, RngCore, SeedableRng}; + +use self::utils::MemoryPool; + +const POOL_SIZE: usize = 1024 * 1024 * 128; + +fn vec_push(n: usize, alloc: &(impl Allocator + Clone)) { + let mut v: Vec = Vec::new_in(alloc.clone()); + for _ in 0..n { + v.push(0xdead_beef); + } + drop(v); +} + +fn vec_rand_free(n: usize, blk_size: usize, alloc: &(impl Allocator + Clone)) { + let mut v = Vec::new_in(alloc.clone()); + for _ in 0..n { + let block = Vec::::with_capacity_in(blk_size, alloc.clone()); + v.push(block); + } + + let mut rng = SmallRng::seed_from_u64(0xdead_beef); + let mut index = Vec::with_capacity_in(n, alloc.clone()); + for i in 0..n { + index.push(i); + } + index.shuffle(&mut rng); + + for i in index { + v[i] = Vec::new_in(alloc.clone()); + } + drop(v); +} + +fn btree_map(n: usize, alloc: &(impl Allocator + Clone)) { + let mut rng = SmallRng::seed_from_u64(0xdead_beef); + let mut m = BTreeMap::new_in(alloc.clone()); + for _ in 0..n { + if rng.next_u32() % 5 == 0 && !m.is_empty() { + m.pop_first(); + } else { + let value = rng.next_u32(); + let mut key = Vec::new_in(alloc.clone()); + write!(&mut key, "key_{value}").unwrap(); + m.insert(key, value); + } + } + m.clear(); + drop(m); +} + +fn bench(c: &mut Criterion, alloc_name: &str, alloc: impl Allocator + Clone) { + let mut g = c.benchmark_group(alloc_name); + g.bench_function("vec_push_3M", |b| { + b.iter(|| vec_push(black_box(3_000_000), &alloc)); + }); + g.sample_size(10); + g.bench_function("vec_rand_free_25K_64", |b| { + b.iter(|| vec_rand_free(black_box(25_000), black_box(64), &alloc)); + }); + g.bench_function("vec_rand_free_7500_520", |b| { + b.iter(|| vec_rand_free(black_box(7_500), black_box(520), &alloc)); + }); + g.bench_function("btree_map_50K", |b| { + b.iter(|| btree_map(black_box(50_000), &alloc)); + }); +} + +fn criterion_benchmark(c: &mut Criterion) { + let mut pool = MemoryPool::new(POOL_SIZE); + bench(c, "system", std::alloc::System); + bench( + c, + "tlsf", + AllocatorRc::new(TlsfByteAllocator::new(), pool.as_slice()), + ); + bench( + c, + "slab", + AllocatorRc::new(SlabByteAllocator::new(), pool.as_slice()), + ); + bench( + c, + "buddy", + AllocatorRc::new(BuddyByteAllocator::new(), pool.as_slice()), + ); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/benches/utils/mod.rs b/benches/utils/mod.rs new file mode 100644 index 0000000..995e153 --- /dev/null +++ b/benches/utils/mod.rs @@ -0,0 +1,25 @@ +use std::alloc::Layout; +use std::ptr::NonNull; + +pub struct MemoryPool { + ptr: NonNull, + layout: Layout, +} + +impl MemoryPool { + pub fn new(size: usize) -> Self { + let layout = Layout::from_size_align(size, 4096).unwrap(); + let ptr = NonNull::new(unsafe { std::alloc::alloc_zeroed(layout) }).unwrap(); + Self { ptr, layout } + } + + pub fn as_slice(&mut self) -> &mut [u8] { + unsafe { core::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.layout.size()) } + } +} + +impl Drop for MemoryPool { + fn drop(&mut self) { + unsafe { std::alloc::dealloc(self.ptr.as_ptr(), self.layout) }; + } +} diff --git a/src/bitmap.rs b/src/bitmap.rs new file mode 100644 index 0000000..86018c7 --- /dev/null +++ b/src/bitmap.rs @@ -0,0 +1,95 @@ +//! Bitmap allocation in page-granularity. +//! +//! TODO: adaptive size + +use bitmap_allocator::BitAlloc; + +use crate::{AllocError, AllocResult, BaseAllocator, PageAllocator}; + +// Support max 1M * 4096 = 4GB memory. +type BitAllocUsed = bitmap_allocator::BitAlloc1M; + +/// A page-granularity memory allocator based on the [bitmap_allocator]. +/// +/// It internally uses a bitmap, each bit indicates whether a page has been +/// allocated. +/// +/// The `PAGE_SIZE` must be a power of two. +/// +/// [bitmap_allocator]: https://github.com/rcore-os/bitmap-allocator +pub struct BitmapPageAllocator { + base: usize, + total_pages: usize, + used_pages: usize, + inner: BitAllocUsed, +} + +impl BitmapPageAllocator { + /// Creates a new empty `BitmapPageAllocator`. + pub const fn new() -> Self { + Self { + base: 0, + total_pages: 0, + used_pages: 0, + inner: BitAllocUsed::DEFAULT, + } + } +} + +impl BaseAllocator for BitmapPageAllocator { + fn init(&mut self, start: usize, size: usize) { + assert!(PAGE_SIZE.is_power_of_two()); + let end = super::align_down(start + size, PAGE_SIZE); + let start = super::align_up(start, PAGE_SIZE); + self.base = start; + self.total_pages = (end - start) / PAGE_SIZE; + self.inner.insert(0..self.total_pages); + } + + fn add_memory(&mut self, _start: usize, _size: usize) -> AllocResult { + Err(AllocError::NoMemory) // unsupported + } +} + +impl PageAllocator for BitmapPageAllocator { + const PAGE_SIZE: usize = PAGE_SIZE; + + fn alloc_pages(&mut self, num_pages: usize, align_pow2: usize) -> AllocResult { + if align_pow2 % PAGE_SIZE != 0 { + return Err(AllocError::InvalidParam); + } + let align_pow2 = align_pow2 / PAGE_SIZE; + if !align_pow2.is_power_of_two() { + return Err(AllocError::InvalidParam); + } + let align_log2 = align_pow2.trailing_zeros() as usize; + match num_pages.cmp(&1) { + core::cmp::Ordering::Equal => self.inner.alloc().map(|idx| idx * PAGE_SIZE + self.base), + core::cmp::Ordering::Greater => self + .inner + .alloc_contiguous(num_pages, align_log2) + .map(|idx| idx * PAGE_SIZE + self.base), + _ => return Err(AllocError::InvalidParam), + } + .ok_or(AllocError::NoMemory) + .inspect(|_| self.used_pages += num_pages) + } + + fn dealloc_pages(&mut self, pos: usize, num_pages: usize) { + // TODO: not decrease `used_pages` if deallocation failed + self.used_pages -= num_pages; + self.inner.dealloc((pos - self.base) / PAGE_SIZE) + } + + fn total_pages(&self) -> usize { + self.total_pages + } + + fn used_pages(&self) -> usize { + self.used_pages + } + + fn available_pages(&self) -> usize { + self.total_pages - self.used_pages + } +} diff --git a/src/buddy.rs b/src/buddy.rs new file mode 100644 index 0000000..872f540 --- /dev/null +++ b/src/buddy.rs @@ -0,0 +1,58 @@ +//! Buddy memory allocation. +//! +//! TODO: more efficient + +use buddy_system_allocator::Heap; +use core::alloc::Layout; +use core::ptr::NonNull; + +use crate::{AllocError, AllocResult, BaseAllocator, ByteAllocator}; + +/// A byte-granularity memory allocator based on the [buddy_system_allocator]. +/// +/// [buddy_system_allocator]: https://docs.rs/buddy_system_allocator/latest/buddy_system_allocator/ +pub struct BuddyByteAllocator { + inner: Heap<32>, +} + +impl BuddyByteAllocator { + /// Creates a new empty `BuddyByteAllocator`. + pub const fn new() -> Self { + Self { + inner: Heap::<32>::new(), + } + } +} + +impl BaseAllocator for BuddyByteAllocator { + fn init(&mut self, start: usize, size: usize) { + unsafe { self.inner.init(start, size) }; + } + + fn add_memory(&mut self, start: usize, size: usize) -> AllocResult { + unsafe { self.inner.add_to_heap(start, start + size) }; + Ok(()) + } +} + +impl ByteAllocator for BuddyByteAllocator { + fn alloc(&mut self, layout: Layout) -> AllocResult> { + self.inner.alloc(layout).map_err(|_| AllocError::NoMemory) + } + + fn dealloc(&mut self, pos: NonNull, layout: Layout) { + self.inner.dealloc(pos, layout) + } + + fn total_bytes(&self) -> usize { + self.inner.stats_total_bytes() + } + + fn used_bytes(&self) -> usize { + self.inner.stats_alloc_actual() + } + + fn available_bytes(&self) -> usize { + self.inner.stats_total_bytes() - self.inner.stats_alloc_actual() + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..7a79161 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,180 @@ +//! Various allocator algorithms in a unified interface. +//! +//! There are three types of allocators: +//! +//! - [`ByteAllocator`]: Byte-granularity memory allocator. (e.g., +//! [`BuddyByteAllocator`], [`SlabByteAllocator`]) +//! - [`PageAllocator`]: Page-granularity memory allocator. (e.g., +//! [`BitmapPageAllocator`]) +//! - [`IdAllocator`]: Used to allocate unique IDs. + +#![no_std] +#![cfg_attr(feature = "allocator_api", feature(allocator_api))] + +#[cfg(feature = "bitmap")] +mod bitmap; +#[cfg(feature = "bitmap")] +pub use bitmap::BitmapPageAllocator; + +#[cfg(feature = "buddy")] +mod buddy; +#[cfg(feature = "buddy")] +pub use buddy::BuddyByteAllocator; + +#[cfg(feature = "slab")] +mod slab; +#[cfg(feature = "slab")] +pub use slab::SlabByteAllocator; + +#[cfg(feature = "tlsf")] +mod tlsf; +#[cfg(feature = "tlsf")] +pub use tlsf::TlsfByteAllocator; + +use core::alloc::Layout; +use core::ptr::NonNull; + +/// The error type used for allocation. +#[derive(Debug)] +pub enum AllocError { + /// Invalid `size` or `align_pow2`. (e.g. unaligned) + InvalidParam, + /// Memory added by `add_memory` overlapped with existed memory. + MemoryOverlap, + /// No enough memory to allocate. + NoMemory, + /// Deallocate an unallocated memory region. + NotAllocated, +} + +/// A [`Result`] type with [`AllocError`] as the error type. +pub type AllocResult = Result; + +/// The base allocator inherited by other allocators. +pub trait BaseAllocator { + /// Initialize the allocator with a free memory region. + fn init(&mut self, start: usize, size: usize); + + /// Add a free memory region to the allocator. + fn add_memory(&mut self, start: usize, size: usize) -> AllocResult; +} + +/// Byte-granularity allocator. +pub trait ByteAllocator: BaseAllocator { + /// Allocate memory with the given size (in bytes) and alignment. + fn alloc(&mut self, layout: Layout) -> AllocResult>; + + /// Deallocate memory at the given position, size, and alignment. + fn dealloc(&mut self, pos: NonNull, layout: Layout); + + /// Returns total memory size in bytes. + fn total_bytes(&self) -> usize; + + /// Returns allocated memory size in bytes. + fn used_bytes(&self) -> usize; + + /// Returns available memory size in bytes. + fn available_bytes(&self) -> usize; +} + +/// Page-granularity allocator. +pub trait PageAllocator: BaseAllocator { + /// The size of a memory page. + const PAGE_SIZE: usize; + + /// Allocate contiguous memory pages with given count and alignment. + fn alloc_pages(&mut self, num_pages: usize, align_pow2: usize) -> AllocResult; + + /// Deallocate contiguous memory pages with given position and count. + fn dealloc_pages(&mut self, pos: usize, num_pages: usize); + + /// Returns the total number of memory pages. + fn total_pages(&self) -> usize; + + /// Returns the number of allocated memory pages. + fn used_pages(&self) -> usize; + + /// Returns the number of available memory pages. + fn available_pages(&self) -> usize; +} + +/// Used to allocate unique IDs (e.g., thread ID). +pub trait IdAllocator: BaseAllocator { + /// Allocate contiguous IDs with given count and alignment. + fn alloc_id(&mut self, count: usize, align_pow2: usize) -> AllocResult; + + /// Deallocate contiguous IDs with given position and count. + fn dealloc_id(&mut self, start_id: usize, count: usize); + + /// Whether the given `id` was allocated. + fn is_allocated(&self, id: usize) -> bool; + + /// Mark the given `id` has been allocated and cannot be reallocated. + fn alloc_fixed_id(&mut self, id: usize) -> AllocResult; + + /// Returns the maximum number of supported IDs. + fn size(&self) -> usize; + + /// Returns the number of allocated IDs. + fn used(&self) -> usize; + + /// Returns the number of available IDs. + fn available(&self) -> usize; +} + +#[inline] +const fn align_down(pos: usize, align: usize) -> usize { + pos & !(align - 1) +} + +#[inline] +const fn align_up(pos: usize, align: usize) -> usize { + (pos + align - 1) & !(align - 1) +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + extern crate alloc; + + use super::ByteAllocator; + use alloc::rc::Rc; + use core::alloc::{AllocError, Allocator, Layout}; + use core::cell::RefCell; + use core::ptr::NonNull; + + /// A byte-allocator wrapped in [`Rc`] that implements [`core::alloc::Allocator`]. + pub struct AllocatorRc(Rc>); + + impl AllocatorRc { + /// Creates a new allocator with the given memory pool. + pub fn new(mut inner: A, pool: &mut [u8]) -> Self { + inner.init(pool.as_mut_ptr() as usize, pool.len()); + Self(Rc::new(RefCell::new(inner))) + } + } + + unsafe impl Allocator for AllocatorRc { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(NonNull::dangling(), 0)), + size => { + let raw_addr = self.0.borrow_mut().alloc(layout).map_err(|_| AllocError)?; + Ok(NonNull::slice_from_raw_parts(raw_addr, size)) + } + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + self.0.borrow_mut().dealloc(ptr, layout) + } + } + + impl Clone for AllocatorRc { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } +} + +#[cfg(feature = "allocator_api")] +pub use allocator_api::AllocatorRc; diff --git a/src/slab.rs b/src/slab.rs new file mode 100644 index 0000000..17cb164 --- /dev/null +++ b/src/slab.rs @@ -0,0 +1,68 @@ +//! Slab memory allocation. +//! +//! TODO: comments + +use super::{AllocError, AllocResult, BaseAllocator, ByteAllocator}; +use core::alloc::Layout; +use core::ptr::NonNull; +use slab_allocator::Heap; + +/// A byte-granularity memory allocator based on the [slab allocator]. +/// +/// [slab allocator]: ../slab_allocator/index.html +pub struct SlabByteAllocator { + inner: Option, +} + +impl SlabByteAllocator { + /// Creates a new empty `SlabByteAllocator`. + pub const fn new() -> Self { + Self { inner: None } + } + + fn inner_mut(&mut self) -> &mut Heap { + self.inner.as_mut().unwrap() + } + + fn inner(&self) -> &Heap { + self.inner.as_ref().unwrap() + } +} + +impl BaseAllocator for SlabByteAllocator { + fn init(&mut self, start: usize, size: usize) { + self.inner = unsafe { Some(Heap::new(start, size)) }; + } + + fn add_memory(&mut self, start: usize, size: usize) -> AllocResult { + unsafe { + self.inner_mut().add_memory(start, size); + } + Ok(()) + } +} + +impl ByteAllocator for SlabByteAllocator { + fn alloc(&mut self, layout: Layout) -> AllocResult> { + self.inner_mut() + .allocate(layout) + .map(|addr| unsafe { NonNull::new_unchecked(addr as *mut u8) }) + .map_err(|_| AllocError::NoMemory) + } + + fn dealloc(&mut self, pos: NonNull, layout: Layout) { + unsafe { self.inner_mut().deallocate(pos.as_ptr() as usize, layout) } + } + + fn total_bytes(&self) -> usize { + self.inner().total_bytes() + } + + fn used_bytes(&self) -> usize { + self.inner().used_bytes() + } + + fn available_bytes(&self) -> usize { + self.inner().available_bytes() + } +} diff --git a/src/tlsf.rs b/src/tlsf.rs new file mode 100644 index 0000000..3d2cd99 --- /dev/null +++ b/src/tlsf.rs @@ -0,0 +1,77 @@ +//! The TLSF (Two-Level Segregated Fit) dynamic memory allocation algorithm. +//! +//! This module wraps the implementation provided by the [rlsf] crate. + +use super::{AllocError, AllocResult, BaseAllocator, ByteAllocator}; +use core::alloc::Layout; +use core::ptr::NonNull; +use rlsf::Tlsf; + +/// A TLSF (Two-Level Segregated Fit) memory allocator. +/// +/// It's just a wrapper structure of [`rlsf::Tlsf`], with `FLLEN` and `SLLEN` +/// fixed to 28 and 32. +pub struct TlsfByteAllocator { + inner: Tlsf<'static, u32, u32, 28, 32>, // max pool size: 32 * 2^28 = 8G + total_bytes: usize, + used_bytes: usize, +} + +impl TlsfByteAllocator { + /// Creates a new empty [`TlsfByteAllocator`]. + pub const fn new() -> Self { + Self { + inner: Tlsf::new(), + total_bytes: 0, + used_bytes: 0, + } + } +} + +impl BaseAllocator for TlsfByteAllocator { + fn init(&mut self, start: usize, size: usize) { + unsafe { + let pool = core::slice::from_raw_parts_mut(start as *mut u8, size); + self.inner + .insert_free_block_ptr(NonNull::new(pool).unwrap()) + .unwrap(); + } + self.total_bytes = size; + } + + fn add_memory(&mut self, start: usize, size: usize) -> AllocResult { + unsafe { + let pool = core::slice::from_raw_parts_mut(start as *mut u8, size); + self.inner + .insert_free_block_ptr(NonNull::new(pool).unwrap()) + .ok_or(AllocError::InvalidParam)?; + } + self.total_bytes += size; + Ok(()) + } +} + +impl ByteAllocator for TlsfByteAllocator { + fn alloc(&mut self, layout: Layout) -> AllocResult> { + let ptr = self.inner.allocate(layout).ok_or(AllocError::NoMemory)?; + self.used_bytes += layout.size(); + Ok(ptr) + } + + fn dealloc(&mut self, pos: NonNull, layout: Layout) { + unsafe { self.inner.deallocate(pos, layout.align()) } + self.used_bytes -= layout.size(); + } + + fn total_bytes(&self) -> usize { + self.total_bytes + } + + fn used_bytes(&self) -> usize { + self.used_bytes + } + + fn available_bytes(&self) -> usize { + self.total_bytes - self.used_bytes + } +} diff --git a/tests/allocator.rs b/tests/allocator.rs new file mode 100644 index 0000000..58962b5 --- /dev/null +++ b/tests/allocator.rs @@ -0,0 +1,143 @@ +#![feature(btreemap_alloc)] +#![feature(allocator_api)] + +use std::alloc::{Allocator, Layout}; +use std::collections::BTreeMap; +use std::io::Write; + +use allocator::{AllocatorRc, BuddyByteAllocator, SlabByteAllocator, TlsfByteAllocator}; +use rand::{prelude::SliceRandom, Rng}; + +const POOL_SIZE: usize = 1024 * 1024 * 128; + +fn test_vec(n: usize, alloc: &(impl Allocator + Clone)) { + let mut v = Vec::with_capacity_in(n, alloc.clone()); + for _ in 0..n { + v.push(rand::random::()); + } + v.sort(); + for i in 0..n - 1 { + assert!(v[i] <= v[i + 1]); + } +} + +fn test_vec2(n: usize, blk_size: usize, alloc: &(impl Allocator + Clone)) { + let mut v = Vec::new_in(alloc.clone()); + for _ in 0..n { + let block = Vec::::with_capacity_in(blk_size, alloc.clone()); + v.push(block); + } + + let mut index = Vec::with_capacity_in(n, alloc.clone()); + for i in 0..n { + index.push(i); + } + index.shuffle(&mut rand::thread_rng()); + + for i in index { + v[i] = Vec::new_in(alloc.clone()) + } +} + +fn test_btree_map(n: usize, alloc: &(impl Allocator + Clone)) { + let mut m = BTreeMap::new_in(alloc.clone()); + for _ in 0..n { + if rand::random::() % 5 == 0 && !m.is_empty() { + m.pop_first(); + } else { + let value = rand::random::(); + let mut key = Vec::new_in(alloc.clone()); + write!(&mut key, "key_{value}").unwrap(); + m.insert(key, value); + } + } + for (k, v) in m.iter() { + let key = std::str::from_utf8(k) + .unwrap() + .strip_prefix("key_") + .unwrap(); + assert_eq!(key.parse::().unwrap(), *v); + } +} + +pub fn test_alignment(n: usize, alloc: &(impl Allocator + Clone)) { + let mut rng = rand::thread_rng(); + let mut blocks = vec![]; + for _ in 0..n { + if rng.gen_ratio(2, 3) || blocks.len() == 0 { + // insert a block + let size = + ((1 << rng.gen_range(0..16)) as f32 * rng.gen_range(1.0..2.0)).round() as usize; + let align = 1 << rng.gen_range(0..8); + let layout = Layout::from_size_align(size, align).unwrap(); + let ptr = alloc.allocate(layout).unwrap(); + blocks.push((ptr, layout)); + } else { + // delete a block + let idx = rng.gen_range(0..blocks.len()); + let blk = blocks.swap_remove(idx); + unsafe { alloc.deallocate(blk.0.cast(), blk.1) }; + } + } + for blk in blocks { + unsafe { alloc.deallocate(blk.0.cast(), blk.1) }; + } +} + +fn run_test(f: impl FnOnce(&mut [u8])) { + let layout = Layout::from_size_align(POOL_SIZE, 4096).unwrap(); + let ptr = unsafe { std::alloc::alloc_zeroed(layout) }; + let pool = unsafe { core::slice::from_raw_parts_mut(ptr, POOL_SIZE) }; + + f(pool); + + unsafe { std::alloc::dealloc(ptr, layout) }; +} + +#[test] +fn system_alloc() { + run_test(|_pool| { + let alloc = std::alloc::System; + test_alignment(50, &alloc); + test_vec(3_000_000, &alloc); + test_vec2(30_000, 64, &alloc); + test_vec2(7_500, 520, &alloc); + test_btree_map(50_000, &alloc); + }) +} + +#[test] +fn buddy_alloc() { + run_test(|pool| { + let alloc = AllocatorRc::new(BuddyByteAllocator::new(), pool); + test_alignment(50, &alloc); + test_vec(3_000_000, &alloc); + test_vec2(30_000, 64, &alloc); + test_vec2(7_500, 520, &alloc); + test_btree_map(50_000, &alloc); + }) +} + +#[test] +fn slab_alloc() { + run_test(|pool| { + let alloc = AllocatorRc::new(SlabByteAllocator::new(), pool); + test_alignment(50, &alloc); + test_vec(3_000_000, &alloc); + test_vec2(30_000, 64, &alloc); + test_vec2(7_500, 520, &alloc); + test_btree_map(50_000, &alloc); + }) +} + +#[test] +fn tlsf_alloc() { + run_test(|pool| { + let alloc = AllocatorRc::new(TlsfByteAllocator::new(), pool); + test_alignment(50, &alloc); + test_vec(3_000_000, &alloc); + test_vec2(30_000, 64, &alloc); + test_vec2(7_500, 520, &alloc); + test_btree_map(50_000, &alloc); + }) +}