Skip to content

Commit

Permalink
remove deallocation from Allocator trait
Browse files Browse the repository at this point in the history
  • Loading branch information
pzittlau committed Jan 24, 2025
1 parent eb8918c commit aede2de
Show file tree
Hide file tree
Showing 12 changed files with 17 additions and 393 deletions.
65 changes: 0 additions & 65 deletions betree/benches/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,6 @@ enum SizeDistribution {
// Define a trait for our benchmark struct to allow for trait objects
trait GenericAllocatorBenchmark {
fn benchmark_name(&self) -> &'static str;
fn bench_allocator(
&self,
b: &mut Bencher,
dist: SizeDistribution,
alloc_ratio: f64,
min_size: usize,
max_size: usize,
);

fn bench_allocator_with_sync(
&self,
Expand Down Expand Up @@ -56,17 +48,6 @@ impl<A: Allocator + 'static> GenericAllocatorBenchmark for AllocatorBenchmark<A>
self.benchmark_name
}

fn bench_allocator(
&self,
b: &mut Bencher,
dist: SizeDistribution,
alloc_ratio: f64,
min_size: usize,
max_size: usize,
) {
bench_allocator::<A>(b, dist, alloc_ratio, min_size, max_size)
}

fn bench_allocator_with_sync(
&self,
b: &mut Bencher,
Expand All @@ -80,42 +61,6 @@ impl<A: Allocator + 'static> GenericAllocatorBenchmark for AllocatorBenchmark<A>
}
}

fn bench_allocator<A: Allocator>(
b: &mut Bencher,
dist: SizeDistribution,
alloc_ratio: f64,
min_size: usize,
max_size: usize,
) {
let mut allocator = A::new([0; SEGMENT_SIZE_BYTES]);
let mut allocated = Vec::new();

let mut rng = StdRng::seed_from_u64(42);
let mut sample_size = || -> u32 {
match &dist {
SizeDistribution::Uniform(u) => return black_box(u.sample(&mut rng)) as u32,
SizeDistribution::Zipfian(z) => {
return (black_box(z.sample(&mut rng)) as usize).clamp(min_size, max_size) as u32
}
}
};

b.iter(|| {
if rand::random::<f64>() < alloc_ratio {
// Allocation path
let size = sample_size();
if let Some(offset) = black_box(allocator.allocate(size)) {
allocated.push((offset, size));
}
} else if !allocated.is_empty() {
// Deallocation path
let idx = rand::random::<usize>() % allocated.len();
let (offset, size) = allocated.swap_remove(idx);
black_box(allocator.deallocate(offset, size));
}
});
}

// In Haura, allocators are not continuously active in memory. Instead, they are loaded from disk
// when needed. This benchmark simulates this behavior by creating a new allocator instance for each
// iteration. Also deallocations are buffered and applied during sync operations, not immediately to
Expand Down Expand Up @@ -203,16 +148,6 @@ pub fn criterion_benchmark(c: &mut Criterion) {
Box::new(AllocatorBenchmark::<SegmentAllocator>::new("segment")),
];

for (dist_name, dist) in distributions.clone() {
let mut group = c.benchmark_group(dist_name);
for allocator_bench in &allocator_benchmarks {
group.bench_function(allocator_bench.benchmark_name(), |b| {
allocator_bench.bench_allocator(b, dist.clone(), 0.8, min_size, max_size)
});
}
group.finish();
}

let alloc_dealloc_ratios = [
(100, 50),
(500, 250),
Expand Down
36 changes: 0 additions & 36 deletions betree/src/allocator/best_fit_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,42 +102,6 @@ impl Allocator for BestFitList {

false
}

/// Deallocates the allocated block.
fn deallocate(&mut self, offset: u32, size: u32) {
if offset + size > SEGMENT_SIZE as u32 {
return;
}
self.mark(offset, size, Action::Deallocate);

let dealloc_end = offset + size;
let new_segment = (offset, size);
let mut insert_index = self.free_segments.len();

for i in 0..self.free_segments.len() {
let (seg_offset, seg_size) = self.free_segments[i];
let seg_end = seg_offset + seg_size;

if seg_end == offset {
// Merge with the preceding segment
self.free_segments[i].1 += size;
if i + 1 < self.free_segments.len() && self.free_segments[i + 1].0 == dealloc_end {
self.free_segments[i].1 += self.free_segments[i + 1].1;
self.free_segments.remove(i + 1);
}
return;
} else if dealloc_end == seg_offset {
// Merge with the following segment
self.free_segments[i].0 = offset;
self.free_segments[i].1 += size;
return;
} else if seg_offset > offset {
insert_index = i;
break;
}
}
self.free_segments.insert(insert_index, new_segment);
}
}

impl BestFitList {
Expand Down
8 changes: 0 additions & 8 deletions betree/src/allocator/best_fit_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,4 @@ impl Allocator for BestFitScan {
self.mark(offset, size, Action::Allocate);
true
}

/// Deallocates the allocated block.
fn deallocate(&mut self, offset: u32, size: u32) {
if offset + size > SEGMENT_SIZE as u32 {
return;
}
self.mark(offset, size, Action::Deallocate);
}
}
4 changes: 0 additions & 4 deletions betree/src/allocator/first_fit_fsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,6 @@ impl Allocator for FirstFitFSM {
// hard to implement efficiently
todo!()
}

fn deallocate(&mut self, _offset: u32, _size: u32) {
unimplemented!("Not needed right now");
}
}

impl FirstFitFSM {
Expand Down
41 changes: 0 additions & 41 deletions betree/src/allocator/first_fit_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,47 +102,6 @@ impl Allocator for FirstFitList {

false // No suitable free segment found in free_segments list
}

/// Deallocates the allocated block.
fn deallocate(&mut self, offset: u32, size: u32) {
if offset + size > SEGMENT_SIZE as u32 {
return;
}
self.mark(offset, size, Action::Deallocate);

let dealloc_end = offset + size;
let new_segment = (offset, size);
let mut insert_index = self.free_segments.len(); // Default to append

// Find the correct insertion index to keep free_segments sorted and check for merging
for i in 0..self.free_segments.len() {
let (seg_offset, seg_size) = self.free_segments[i];
let seg_end = seg_offset + seg_size;

if seg_end == offset {
// Merge with the preceding segment
self.free_segments[i].1 += size;
// Check if we also need to merge with the following segment
if i + 1 < self.free_segments.len() && self.free_segments[i + 1].0 == dealloc_end {
self.free_segments[i].1 += self.free_segments[i + 1].1;
// TODO: use swap remove
self.free_segments.remove(i + 1); // Remove the merged following segment
}
return;
} else if dealloc_end == seg_offset {
// Merge with the following segment
self.free_segments[i].0 = offset;
self.free_segments[i].1 += size;
return;
} else if seg_offset > offset {
// Insertion point found (segments are sorted by offset)
insert_index = i;
break;
}
}
// If no merge is possible, insert the new segment at the determined index
self.free_segments.insert(insert_index, new_segment);
}
}

impl FirstFitList {
Expand Down
8 changes: 0 additions & 8 deletions betree/src/allocator/first_fit_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,4 @@ impl Allocator for FirstFitScan {
self.mark(offset, size, Action::Allocate);
true
}

/// Deallocates the allocated block.
fn deallocate(&mut self, offset: u32, size: u32) {
if offset + size > SEGMENT_SIZE as u32 {
return;
}
self.mark(offset, size, Action::Deallocate);
}
}
Loading

0 comments on commit aede2de

Please sign in to comment.