diff --git a/arca/src/serde.rs b/arca/src/serde.rs index 23257df..a1ef5d3 100644 --- a/arca/src/serde.rs +++ b/arca/src/serde.rs @@ -333,7 +333,7 @@ impl<'de, R: Runtime> Visitor<'de> for TableVisitor { where A: serde::de::MapAccess<'de>, { - let (first_key, first_value): (alloc::string::String, usize) = + let (first_key, first_value): (&str, usize) = map.next_entry()?.expect("at least one element needed"); assert_eq!(first_key, "len"); let mut table = Table::new(first_value); diff --git a/common/src/buddy.rs b/common/src/buddy.rs index 0acd595..cec5b17 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -413,6 +413,13 @@ impl AllocatorInner { size: 1 << size_log2, }); } + // Check if index is within valid range for this level + if index >= self.size_of_level_bits(size_log2) { + return Err(AllocationError::InvalidReservation { + index, + size: 1 << size_log2, + }); + } self.with_level(base, size_log2, |level: &mut AllocatorLevel<'_>| { if level.reserve(index) { Ok(index) @@ -553,12 +560,15 @@ impl BuddyAllocatorImpl { // prevent physical zero page from being allocated assert_eq!(temp.to_offset(temp.reserve_raw(0, 4096)), 0); - // reserve kernel pages + // reserve kernel pages (only if within range) let mut pages = alloc::vec![]; for i in 0..8 { - let p = temp.reserve_raw(0x100000 * (i + 1), 0x100000); - assert!(!p.is_null()); - pages.push(p); + let addr = 0x100000 * (i + 1); + if addr + 0x100000 <= size { + let p = temp.reserve_raw(addr, 0x100000); + assert!(!p.is_null()); + pages.push(p); + } } let new_inner = AllocatorInner::new_in(slice, &temp); @@ -681,6 +691,7 @@ impl BuddyAllocatorImpl { for (i, item) in ptrs.iter_mut().enumerate() { let result = self.allocate_raw_unchecked(size); if result.is_null() { + self.inner.unlock(); return Some(i); } *item = result; @@ -696,6 +707,7 @@ impl BuddyAllocatorImpl { for (i, item) in ptrs.iter_mut().enumerate() { let result = self.allocate_raw_unchecked(size); if result.is_null() { + self.inner.unlock(); return i; } *item = result; @@ -1139,163 +1151,4 @@ unsafe impl Allocator for BuddyAllocator { } #[cfg(test)] -mod tests { - extern crate test; - - use super::*; - use test::Bencher; - - #[test] - fn test_bitref() { - let mut word = 10; - - let mut r0 = BitRef::new(&mut word, 0); - r0.set(); - - let mut r1 = BitRef::new(&mut word, 1); - r1.clear(); - - let mut r2 = BitRef::new(&mut word, 2); - r2.write(false); - - let mut r3 = BitRef::new(&mut word, 3); - r3.write(true); - - assert_eq!(word, 9); - } - - #[test] - fn test_bitslice() { - let mut words = [0; 2]; - let mut slice = BitSlice::new(128, &mut words); - let mut r0 = slice.bit(0); - r0.set(); - - let mut r1 = slice.bit(1); - r1.set(); - - let mut r127 = slice.bit(127); - r127.set(); - - assert_eq!(words[0], 3); - assert_eq!( - words[127 / (core::mem::size_of::() * 8)], - 1 << (127 % (core::mem::size_of::() * 8)) - ); - } - - #[test] - fn test_buddy_allocator() { - let allocator = BuddyAllocatorImpl::new(0x10000000); - - let test = Box::new_in(10, allocator.clone()); - assert_eq!(*test, 10); - - let mut v = Vec::new_in(allocator.clone()); - for i in 0..10000 { - v.push(i); - } - } - - #[bench] - fn bench_allocate_free(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - b.iter(|| { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }); - } - - #[bench] - fn bench_allocate_free_no_cache(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - allocator.set_caching(false); - b.iter(|| { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }); - } - - #[bench] - fn bench_contended_allocate_free(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - let f = || { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }; - use core::sync::atomic::AtomicBool; - use std::sync::Arc; - std::thread::scope(|s| { - let flag = Arc::new(AtomicBool::new(true)); - for _ in 0..16 { - let flag = flag.clone(); - s.spawn(move || { - while flag.load(Ordering::SeqCst) { - f(); - } - }); - } - b.iter(f); - flag.store(false, Ordering::SeqCst); - }); - } - - #[bench] - #[ignore] - fn bench_contended_allocate_free_no_cache(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - allocator.set_caching(false); - let f = || { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }; - use core::sync::atomic::AtomicBool; - use std::sync::Arc; - std::thread::scope(|s| { - let flag = Arc::new(AtomicBool::new(true)); - for _ in 0..16 { - let flag = flag.clone(); - s.spawn(move || { - while flag.load(Ordering::SeqCst) { - f(); - } - }); - } - b.iter(f); - flag.store(false, Ordering::SeqCst); - }); - } - - #[test] - fn stress_test() { - use std::hash::{BuildHasher, Hasher, RandomState}; - let allocator = BuddyAllocatorImpl::new(0x10000000); - allocator.set_caching(false); - let mut v = vec![]; - let random = |limit: usize| { - let x: u64 = RandomState::new().build_hasher().finish(); - x as usize % limit - }; - for _ in 0..100000 { - let used_before = allocator.used_size(); - let remaining = allocator.total_size() - used_before; - let size = random(core::cmp::min(1 << 21, remaining / 2)); - let alloc = - Box::<[u8], BuddyAllocatorImpl>::new_uninit_slice_in(size, allocator.clone()); - let used_after = allocator.used_size(); - assert!(used_after >= used_before + size); - if !v.is_empty() && size % 3 == 0 { - let number = random(v.len()); - for _ in 0..number { - let index = random(v.len()); - v.remove(index); - } - } - v.push(alloc); - } - } -} +mod tests; diff --git a/common/src/buddy/tests.rs b/common/src/buddy/tests.rs new file mode 100644 index 0000000..3b579e9 --- /dev/null +++ b/common/src/buddy/tests.rs @@ -0,0 +1,951 @@ +extern crate test; + +use super::*; +#[test] +// Setting/clearing individual bits in u64 words to check that the bit manipulation works +fn test_bitref() { + let mut word = 10; + + let mut r0 = BitRef::new(&mut word, 0); + r0.set(); + + let mut r1 = BitRef::new(&mut word, 1); + r1.clear(); + + let mut r2 = BitRef::new(&mut word, 2); + r2.write(false); + + let mut r3 = BitRef::new(&mut word, 3); + r3.write(true); + + assert_eq!(word, 9); +} + +#[test] +// Setting/clearing individual bits in a BitSlice to check that the bit manipulation works +fn test_bitslice() { + let mut words = [0; 2]; + let mut slice = BitSlice::new(128, &mut words); + let mut r0 = slice.bit(0); + r0.set(); + + let mut r1 = slice.bit(1); + r1.set(); + + let mut r127 = slice.bit(127); + r127.set(); + + assert_eq!(words[0], 3); + assert_eq!( + words[127 / (core::mem::size_of::() * 8)], + 1 << (127 % (core::mem::size_of::() * 8)) + ); +} + +#[test] +// Basic setup + continuous element pushing to check that the allocator grows and adjusts properly +fn test_buddy_allocator() { + let allocator = BuddyAllocatorImpl::new(0x10000000); + + let test = Box::new_in(10, allocator.clone()); + assert_eq!(*test, 10); + + let mut v = Vec::new_in(allocator.clone()); + for i in 0..10000 { + v.push(i); + } +} + +#[test] +// Verifying that too small allocations of allocator do not panic +// Potential issue: reserve_unchecked does not validate that the requested index is within the number of blocks at that level +fn test_too_small_allocation() { + let allocator = BuddyAllocatorImpl::new(1 << 20); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let _used_before = allocator.used_size(); + let _ptr = allocator.allocate_raw(size); +} + +#[test] +// Verifying allocate_raw adds size to used_size, and free_raw subtracts it back, returning usage to the original amount. +fn test_allocate_raw_and_used_size() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + assert_eq!(allocator.used_size(), used_before + size); + allocator.free_raw(ptr, size); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying allocate_many_raw adds size to used_size, and free_many_raw subtracts it back, returning usage to the original amount. +fn test_allocate_many_and_free_many() { + use std::collections::BTreeSet; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let mut ptrs = [core::ptr::null_mut(); 4]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + assert_eq!(count, ptrs.len()); + assert!(ptrs.iter().all(|ptr| !ptr.is_null())); + + let unique: BTreeSet = ptrs.iter().map(|ptr| *ptr as usize).collect(); + assert_eq!(unique.len(), ptrs.len()); + + allocator.free_many_raw(size, &ptrs); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that to_offset and from_offset roundtrip correctly +fn test_offset_roundtrip() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let offset = allocator.to_offset(ptr); + let roundtrip = allocator.from_offset::(offset); + assert_eq!(roundtrip as usize, ptr as usize); + + allocator.free_raw(ptr, size); +} + +#[test] +// Verifying that reserving at zero returns a null pointer and does not add to used_size +fn test_reserve_raw_at_zero() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.reserve_raw(0, size); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that allocating too large returns a null pointer and does not add to used_size +fn test_allocate_raw_too_large_returns_null() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(allocator.total_size() * 2); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that refcnt is zero on allocate +fn test_refcnt_zero_on_allocate() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let refcnt = allocator.refcnt(ptr); + assert!(!refcnt.is_null()); + let value = unsafe { (*refcnt).load(Ordering::SeqCst) }; + assert_eq!(value, 0); + + allocator.free_raw(ptr, size); +} + +#[test] +// Stress testing the allocator with random allocations and frees +fn stress_test() { + use std::hash::{BuildHasher, Hasher, RandomState}; + let allocator = BuddyAllocatorImpl::new(0x10000000); + allocator.set_caching(false); + let mut v = vec![]; + let random = |limit: usize| { + let x: u64 = RandomState::new().build_hasher().finish(); + x as usize % limit + }; + for _ in 0..100000 { + let used_before = allocator.used_size(); + let remaining = allocator.total_size() - used_before; + let size = random(core::cmp::min(1 << 21, remaining / 2)); + let alloc = Box::<[u8], BuddyAllocatorImpl>::new_uninit_slice_in(size, allocator.clone()); + let used_after = allocator.used_size(); + assert!(used_after >= used_before + size); + if !v.is_empty() && size % 3 == 0 { + let number = random(v.len()); + for _ in 0..number { + let index = random(v.len()); + v.remove(index); + } + } + v.push(alloc); + } +} + +#[test] +// Test splitting large blocks into smaller ones +fn test_block_splitting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let small_size = BuddyAllocatorImpl::MIN_ALLOCATION; + let large_size = small_size * 4; + + // Allocate and free a large block + let large_ptr = allocator.allocate_raw(large_size); + assert!(!large_ptr.is_null()); + allocator.free_raw(large_ptr, large_size); + + // Now allocate multiple small blocks - should split the large one + let mut small_ptrs = vec![]; + for _ in 0..4 { + let ptr = allocator.allocate_raw(small_size); + assert!(!ptr.is_null()); + small_ptrs.push(ptr); + } + + // Clean up + for ptr in small_ptrs { + allocator.free_raw(ptr, small_size); + } +} + +#[test] +fn test_reserve_specific_addresses() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Get a definitely-available block + let p = allocator.allocate_raw(size); + assert!(!p.is_null()); + let address = allocator.to_offset(p); + allocator.free_raw(p, size); + + // Now we should be able to reserve that exact address + let ptr1 = allocator.reserve_raw(address, size); + assert!(!ptr1.is_null()); + assert_eq!(allocator.to_offset(ptr1), address); + + // Reserving again should fail + let ptr2 = allocator.reserve_raw(address, size); + assert!(ptr2.is_null()); + + allocator.free_raw(ptr1, size); +} + +#[test] +// Test reserving overlapping regions +fn test_reserve_overlapping_regions() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Reserve a block + let ptr1 = allocator.reserve_raw(size * 5, size); + assert!(!ptr1.is_null()); + + // Try to reserve a larger block that would overlap + let ptr2 = allocator.reserve_raw(size * 4, size * 4); + assert!(ptr2.is_null()); // Should fail because it overlaps with ptr1 + + allocator.free_raw(ptr1, size); +} + +#[test] +// Test allocating all available memory +fn test_exhaust_memory() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let mut ptrs = vec![]; + + // Allocate until we can't anymore + loop { + let ptr = allocator.allocate_raw(size); + if ptr.is_null() { + break; + } + ptrs.push(ptr); + } + + // Verify we actually allocated something + assert!(!ptrs.is_empty()); + + // Try one more allocation - should fail + let ptr = allocator.allocate_raw(size); + assert!(ptr.is_null()); + + // Free everything + for ptr in ptrs { + allocator.free_raw(ptr, size); + } + + // Should be able to allocate again + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + allocator.free_raw(ptr, size); +} + +#[test] +// Test mixed allocation sizes +fn test_mixed_allocation_sizes() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let small = BuddyAllocatorImpl::MIN_ALLOCATION; + let medium = small * 4; + let large = small * 16; + + let ptr1 = allocator.allocate_raw(small); + let ptr2 = allocator.allocate_raw(large); + let ptr3 = allocator.allocate_raw(medium); + let ptr4 = allocator.allocate_raw(small); + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + assert!(!ptr4.is_null()); + + // Verify they're all different + let ptrs = [ptr1, ptr2, ptr3, ptr4]; + for i in 0..ptrs.len() { + for j in (i + 1)..ptrs.len() { + assert_ne!(ptrs[i], ptrs[j]); + } + } + + allocator.free_raw(ptr2, large); + allocator.free_raw(ptr1, small); + allocator.free_raw(ptr4, small); + allocator.free_raw(ptr3, medium); +} + +#[test] +// Test freeing in different order than allocation +fn test_free_reverse_order() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = vec![]; + for _ in 0..10 { + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + ptrs.push(ptr); + } + + let used_peak = allocator.used_size(); + + // Free in reverse order + for ptr in ptrs.iter().rev() { + allocator.free_raw(*ptr, size); + } + + assert!(allocator.used_size() < used_peak); +} + +#[test] +// Test allocation size rounding. Test after exhausting, allocator should still be usable -- no lock leak +fn allocation_rounds_up_to_pow2_and_min() { + let a = BuddyAllocatorImpl::new(1 << 24); + + // Request sizes that aren't powers of 2 + let ptr1 = a.allocate_raw(5000); // Should round to 8192 + let ptr2 = a.allocate_raw(1000); // Should round to 4096 + let ptr3 = a.allocate_raw(10000); // Should round to 16384 + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + + a.free_raw(ptr1, 5000); + a.free_raw(ptr2, 1000); + a.free_raw(ptr3, 10000); + + let used0 = a.used_size(); + let p = a.allocate_raw(5000); // rounds to 8192 (and >= 4096) + assert!(!p.is_null()); + assert_eq!(a.used_size(), used0 + 8192); + + a.free_raw(p, 5000); // free uses same rounding path + assert_eq!(a.used_size(), used0); +} + +#[test] +// Confirm there is no overlap between levels +fn test_offset_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let mut ranges = vec![]; + for level in allocator.inner.meta.level_range.clone() { + let offset = allocator.inner.offset_of_level_words(level); + let size = allocator.inner.size_of_level_words(level); + ranges.push((offset, offset + size, level)); + } + ranges.sort_by_key(|(start, _, _)| *start); + + for w in ranges.windows(2) { + let (_s1, e1, l1) = w[0]; + let (s2, _e2, l2) = w[1]; + assert!(e1 <= s2, "overlap between level {} and level {}", l1, l2); + } +} + +#[test] +// Test bitmap boundaries +fn test_bitmap_boundaries() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + for level in allocator.inner.meta.level_range.clone() { + let bits = allocator.inner.size_of_level_bits(level); + let words = allocator.inner.size_of_level_words(level); + + // Verify words is enough to hold bits + assert!( + words * 64 >= bits, + "Level {} needs {} bits but only has {} words ({} bits)", + level, + bits, + words, + words * 64 + ); + } +} + +#[test] +// Test try_allocate_many_raw where everything should succeed easily +fn test_try_allocate_many_no_contention() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10]; + let result = allocator.try_allocate_many_raw(size, &mut ptrs); + + assert_eq!(result, Some(10)); + assert!(ptrs.iter().all(|p| !p.is_null())); + + allocator.free_many_raw(size, &ptrs); +} + +#[test] +// Testing allocating more pointers than space available, making sure bulk alloc stops cleanly when space out, +// and partial success allowed, reported successes are valid. Currently hanging +fn test_allocate_many_partial_success() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Request more blocks than available + let mut ptrs = [core::ptr::null_mut(); 10000]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + + // Should have allocated some but not all + assert!(count > 0); + assert!(count < ptrs.len()); + + // All allocated pointers should be non-null + for i in 0..count { + assert!(!ptrs[i].is_null()); + } + + // Remaining should be null + for i in count..ptrs.len() { + assert!(ptrs[i].is_null()); + } + + // Clean up + allocator.free_many_raw(size, &ptrs[0..count]); +} + +#[test] +// Test that refcnt works for different allocation addresses +fn test_refcnt_different_addresses() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let ptr1 = allocator.allocate_raw(size); + let ptr2 = allocator.allocate_raw(size); + + let refcnt1 = allocator.refcnt(ptr1); + let refcnt2 = allocator.refcnt(ptr2); + + // Should be different refcnt locations + assert_ne!(refcnt1, refcnt2); + + // Both should be 0 + assert_eq!(unsafe { (*refcnt1).load(Ordering::SeqCst) }, 0); + assert_eq!(unsafe { (*refcnt2).load(Ordering::SeqCst) }, 0); + + allocator.free_raw(ptr1, size); + allocator.free_raw(ptr2, size); +} + +#[test] +// Test null pointer refcnt +fn test_refcnt_null_pointer() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let refcnt = allocator.refcnt(core::ptr::null::()); + assert!(refcnt.is_null()); +} + +#[test] +// Test usage calculation +fn test_usage_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let initial_usage = allocator.usage(); + + let ptr = allocator.allocate_raw(size); + let usage_after = allocator.usage(); + + assert!(usage_after > initial_usage); + assert!(usage_after <= 1.0); + assert!(usage_after >= 0.0); + + allocator.free_raw(ptr, size); +} + +#[test] +// Test request counting +fn test_request_counting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut before = [0; 64]; + let mut after = [0; 64]; + + allocator.requests(&mut before); + + let ptr = allocator.allocate_raw(size); + allocator.free_raw(ptr, size); + + allocator.requests(&mut after); + + // Should have incremented request count for the size level + let level = size.next_power_of_two().ilog2() as usize; + assert!(after[level] > before[level]); +} + +#[test] +fn test_alignment_requirements() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let base = allocator.base() as usize; + + for power in 12..20 { + let size = 1 << power; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let addr = ptr as usize; + assert_eq!( + (addr - base) % size, + 0, + "Allocation of size {} not aligned within arena", + size + ); + + allocator.free_raw(ptr, size); + } +} + +#[test] +// Test clone and drop behavior +fn test_clone_and_drop() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let ptr1 = allocator.allocate_raw(4096); + assert!(!ptr1.is_null()); + + { + let clone = allocator.clone(); + let ptr2 = clone.allocate_raw(4096); + assert!(!ptr2.is_null()); + clone.free_raw(ptr2, 4096); + // clone drops here + } + + // Original should still work + let ptr3 = allocator.allocate_raw(4096); + assert!(!ptr3.is_null()); + + allocator.free_raw(ptr1, 4096); + allocator.free_raw(ptr3, 4096); +} + +#[test] +// Allocate one block, compute its buddy address, and verify that reserving the buddy returns that address (if it’s free). +fn buddy_address_math_matches_reserve() { + let a = BuddyAllocatorImpl::new(1 << 24); + let base = a.base() as usize; + + for power in 12..18 { + let size = 1usize << power; + let p = a.allocate_raw(size); + assert!(!p.is_null()); + + let off = a.to_offset(p); + let idx = off / size; + let buddy_idx = idx ^ 1; + let buddy_off = buddy_idx * size; + + // If the buddy is free, reserve_raw must return exactly that address. + let b = a.reserve_raw(buddy_off, size); + if !b.is_null() { + assert_eq!(a.to_offset(b), buddy_off); + a.free_raw(b, size); + } + + a.free_raw(p, size); + + // (optional) base-relative alignment property + assert_eq!(((p as usize) - base) % size, 0); + } +} + +#[test] +// 'Create' a known free block at a known offset by allocating a large block, then freeing it, then reserving/allocating inside it. +fn split_large_block_into_smaller_blocks() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB + let small = 1usize << 12; // 4KiB + let factor = big / small; + + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Now reserve all 4KiB blocks inside that 64KiB region. + let mut blocks = Vec::new(); + for i in 0..factor { + let q = a.reserve_raw(off + i * small, small); + assert!(!q.is_null(), "failed to reserve sub-block {}", i); + blocks.push(q); + } + + // Free them back + for q in blocks { + a.free_raw(q, small); + } +} + +#[test] +// Reserve two buddy halves, free them, verify you can reserve the parent block at the exact parent address. +fn coalesce_two_buddies_into_parent() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Create a known free parent block at a known offset. + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children (buddies). + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free both; this should coalesce into the parent. + a.free_raw(c0, child); + a.free_raw(c1, child); + + // Now reserving the parent at 'off' should succeed. + let p2 = a.reserve_raw(off, parent); + assert!( + !p2.is_null(), + "parent block did not reappear after coalescing" + ); + assert_eq!(a.to_offset(p2), off); + + a.free_raw(p2, parent); +} + +#[test] +// Hold one child, free the other, ensure parent reservation fails at that exact parent address. +fn no_coalesce_if_only_one_buddy_free() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free only one child + a.free_raw(c0, child); + + // Parent must NOT be reservable while the other buddy is still held. + let parent_try = a.reserve_raw(off, parent); + assert!( + parent_try.is_null(), + "parent became available with one buddy still reserved" + ); + + // Cleanup + a.free_raw(c1, child); + + // Now parent should be available (coalesced) + let parent_ok = a.reserve_raw(off, parent); + assert!(!parent_ok.is_null()); + a.free_raw(parent_ok, parent); +} + +#[test] +// Free child1 then child0; ensure parent becomes available. +fn coalesce_is_order_independent() { + let a = BuddyAllocatorImpl::new(1 << 24); + let parent = 1usize << 15; // 32KiB + let child = 1usize << 14; // 16KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + a.free_raw(c1, child); + a.free_raw(c0, child); + + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); +} + +#[test] +// Free 4 children → coalesce to 2 parents → coalesce to 1 grandparent. +fn multi_level_coalesce_cascades() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let child = 1usize << 13; // 8KiB + let n = grand / child; // 4 + + let p = a.allocate_raw(grand); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, grand); + + let mut kids = Vec::new(); + for i in 0..n { + let k = a.reserve_raw(off + i * child, child); + assert!(!k.is_null()); + kids.push(k); + } + + // Free all kids -> should coalesce up to grand + for k in kids { + a.free_raw(k, child); + } + + let g = a.reserve_raw(off, grand); + assert!( + !g.is_null(), + "expected full cascade coalesce to grand block" + ); + a.free_raw(g, grand); +} + +#[test] +// Size rounding edge cases: allocate_raw rounds up to power-of-two and MIN_ALLOCATION, ensuring allocations don’t fail just because size isn’t a power of two. +// Currently failing; needs to be fixed? +fn reserve_out_of_range_returns_null() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = 1usize << 12; + + // definitely beyond arena + let ptr = a.reserve_raw(a.len() + size, size); + assert!(ptr.is_null()); +} + +#[test] +// Testing allocate_many_raw where partial failure does not poison the lock +// Currently hanging because partial failures is not working, test after that is fixed +fn allocate_many_partial_failure_does_not_poison_lock() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10000]; + let n = a.allocate_many_raw(size, &mut ptrs); + + assert!(n > 0); + assert!(n < ptrs.len()); + + a.free_many_raw(size, &ptrs[..n]); + + // If the lock leaked, this would hang. + let p = a.allocate_raw(size); + assert!(!p.is_null()); + a.free_raw(p, size); +} + +#[test] +// ensures try_* returns None when lock is held. +fn try_allocate_many_returns_none_when_locked() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Manually lock allocator and ensure try_* fails. + unsafe { + a.inner.lock(); + } + let mut ptrs = [core::ptr::null_mut(); 4]; + let r = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r, None); + unsafe { + a.inner.unlock(); + } + + // Now it should work + let r2 = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r2, Some(4)); + a.free_many_raw(size, &ptrs); +} + +#[test] +// Interleaved patterns: A,B,C,D where (A,B) and (C,D) are buddy pairs. +// Freeing B and C alone should NOT make either parent available; +// freeing A then enables AB coalesce; freeing D then enables CD coalesce; then both parents can coalesce further. +fn interleaved_buddy_pairs_coalesce_independently_then_merge() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free 32KiB region + let g = a.allocate_raw(grand); + assert!(!g.is_null()); + let off = a.to_offset(g); + a.free_raw(g, grand); + + // Reserve A,B,C,D as 8KiB blocks at offsets 0,1,2,3 within the 32KiB region + let a0 = a.reserve_raw(off + 0 * child, child); // A + let b0 = a.reserve_raw(off + 1 * child, child); // B (buddy of A) + let c0 = a.reserve_raw(off + 2 * child, child); // C + let d0 = a.reserve_raw(off + 3 * child, child); // D (buddy of C) + assert!(!a0.is_null() && !b0.is_null() && !c0.is_null() && !d0.is_null()); + + // Free B and C only -> neither 16KiB parent should be reservable yet. + a.free_raw(b0, child); + a.free_raw(c0, child); + + assert!( + a.reserve_raw(off + 0 * parent, parent).is_null(), + "AB parent should not exist yet" + ); + assert!( + a.reserve_raw(off + 1 * parent, parent).is_null(), + "CD parent should not exist yet" + ); + + // Free A -> AB should coalesce to first 16KiB parent at off + a.free_raw(a0, child); + let p0 = a.reserve_raw(off + 0 * parent, parent); + assert!(!p0.is_null(), "AB should coalesce to 16KiB"); + a.free_raw(p0, parent); + + // Free D -> CD should coalesce to second 16KiB parent at off + 16KiB + a.free_raw(d0, child); + let p1 = a.reserve_raw(off + 1 * parent, parent); + assert!(!p1.is_null(), "CD should coalesce to 16KiB"); + a.free_raw(p1, parent); + + // Now both 16KiB parents are free -> should coalesce into 32KiB grandparent at off + let g2 = a.reserve_raw(off, grand); + assert!( + !g2.is_null(), + "two free 16KiB parents should coalesce to 32KiB" + ); + a.free_raw(g2, grand); +} + +#[test] +// Fragmentation scenario: partial coalescing with an obstacle. +// If one leaf remains reserved, upper levels must not fully coalesce; once obstacle freed, full coalesce should happen. +fn fragmentation_blocks_full_coalesce_until_obstacle_removed() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB region we control + let leaf = 1usize << 12; // 4KiB + let n = big / leaf; // 16 leaves + + // Known free 64KiB region + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Reserve all leaves, keep one as "obstacle", free the rest. + let mut leaves = Vec::new(); + for i in 0..n { + let q = a.reserve_raw(off + i * leaf, leaf); + assert!(!q.is_null()); + leaves.push(q); + } + + let obstacle = leaves[7]; // arbitrary leaf to hold + for (_i, q) in leaves.iter().enumerate() { + if *q == obstacle { + continue; + } + a.free_raw(*q, leaf); + } + + // With one 4KiB still reserved, the full 64KiB block must NOT be available. + assert!( + a.reserve_raw(off, big).is_null(), + "should not fully coalesce with an obstacle leaf reserved" + ); + + // Now free the obstacle leaf -> full coalesce should become possible. + a.free_raw(obstacle, leaf); + let big2 = a.reserve_raw(off, big); + assert!( + !big2.is_null(), + "after removing obstacle, should fully coalesce back to 64KiB" + ); + a.free_raw(big2, big); +} + +#[test] +// Reserved blocks shouldn't participate in coalescing: +// if one buddy is permanently reserved (held), the parent must not become available. +fn reserved_block_prevents_coalescing() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free parent region + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children, but "reserve" one as a held block (simulate reservation that shouldn't coalesce). + let held = a.reserve_raw(off, child); + let other = a.reserve_raw(off + child, child); + assert!(!held.is_null() && !other.is_null()); + + // Free only the other -> parent must not appear + a.free_raw(other, child); + assert!( + a.reserve_raw(off, parent).is_null(), + "parent should not coalesce while one child is held/reserved" + ); + + // Once held is freed too, parent should become available + a.free_raw(held, child); + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); +} diff --git a/common/src/lib.rs b/common/src/lib.rs index 20d7811..78ea10b 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,4 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] +#![allow(stable_features, unused_features)] #![feature(allocator_api)] #![feature(fn_traits)] #![cfg_attr(feature = "std", feature(layout_for_ptr))] diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 3ad4cff..9a757d1 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -36,7 +36,8 @@ talc = "4.4.3" spin = "0.10.0" async-lock = { version = "3.4.1", default-features = false } postcard = "1.1.3" -serde = { version = "1.0.228", default-features = false } +serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } + [build-dependencies] anyhow = "1.0.86" diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index b5831ed..48aba55 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -1,7 +1,10 @@ #![no_main] #![no_std] +#![allow(stable_features, unused_features)] +#![feature(cfg_version)] #![feature(allocator_api)] -#![feature(widening_mul)] +#![cfg_attr(not(version("1.96")), feature(bigint_helper_methods))] +#![cfg_attr(version("1.96"), feature(widening_mul))] #![feature(box_as_ptr)] #![feature(negative_impls)] #![feature(never_type)] diff --git a/kernel/src/tests/test_serde.rs b/kernel/src/tests/test_serde.rs index b9e038a..102de14 100644 --- a/kernel/src/tests/test_serde.rs +++ b/kernel/src/tests/test_serde.rs @@ -1,112 +1,125 @@ -use crate::prelude::*; -extern crate alloc; +// Serialization round-trip tests using postcard. +// Runs with: cargo test -p kernel --target=x86_64-unknown-none -#[test] -fn test_serde_null() { - let null = Value::Null(Null::new()); - let bytes_vec = postcard::to_allocvec(&null).unwrap(); - let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_null, null); -} +#[cfg(test)] +mod tests { + extern crate alloc; -#[test] -fn test_serde_word() { - let word = Value::Word(1.into()); - let bytes_vec = postcard::to_allocvec(&word).unwrap(); - let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_word, word); -} + use crate::prelude::*; -#[test] -fn test_serde_blob() { - let blob = Value::Blob("hello, world!".into()); - let bytes_vec = postcard::to_allocvec(&blob).unwrap(); - let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_blob, blob); -} + /// Verifies Null serializes and deserializes back to an equal value. + #[test] + fn test_serde_null() { + let null = Value::Null(Null::new()); + let bytes_vec = postcard::to_allocvec(&null).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, null); + } -#[test] -fn test_serde_tuple() { - let tuple = Value::Tuple((1, 2, 3).into()); - let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); - let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_tuple, tuple); -} + /// Verifies Word serializes and deserializes back to an equal value. + #[test] + fn test_serde_word() { + let word = Value::Word(1.into()); + let bytes_vec = postcard::to_allocvec(&word).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, word); + } -#[test] -fn test_serde_page() { - let page = Value::Page(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&page).unwrap(); - let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_page, page); -} + /// Verifies Blob serializes and deserializes back to an equal value. + #[test] + fn test_serde_blob() { + let blob = Value::Blob("hello, world!".into()); + let bytes_vec = postcard::to_allocvec(&blob).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, blob); + } -#[test] -fn test_serde_table() { - let table = Value::Table(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&table).unwrap(); - let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_table, table); -} + /// Verifies Tuple serializes and deserializes back to an equal value. + #[test] + fn test_serde_tuple() { + let tuple = Value::Tuple((1, 2, 3).into()); + let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, tuple); + } -// #[test] -// fn test_serde_function() { -// let arca = Arca::new(); -// let inner_func: arca::Function = Function::from(arca); -// let func = Value::Function(inner_func); -// let bytes_vec = postcard::to_allocvec(&func).unwrap(); -// let deserialized_func: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_func, func); -// } + /// Verifies Page serializes and deserializes back to an equal value. + #[test] + fn test_serde_page() { + let page = Value::Page(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&page).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, page); + } -#[test] -fn test_serde_ropage() { - let ropage = Entry::ROPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); - let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_ropage, ropage); -} + /// Verifies Table serializes and deserializes back to an equal value. + #[test] + fn test_serde_table() { + let table = Value::Table(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&table).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, table); + } -#[test] -fn test_serde_rwpage() { - let rwpage = Entry::RWPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); - let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwpage, rwpage); -} + /// Verifies a read-only page Entry round-trips through serde. + #[test] + fn test_serde_ropage() { + let ropage = Entry::ROPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, ropage); + } -#[test] -fn test_serde_rotable() { - let rotable = Entry::ROTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); - let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rotable, rotable); -} + /// Verifies a read-write page Entry round-trips through serde. + #[test] + fn test_serde_rwpage() { + let rwpage = Entry::RWPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rwpage); + } -#[test] -fn test_serde_rwtable() { - let rwtable = Entry::RWTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); - let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwtable, rwtable); -} + /// Verifies a read-only table Entry round-trips through serde. + #[test] + fn test_serde_rotable() { + let rotable = Entry::ROTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rotable); + } -#[test] -fn test_value_error() { - let unknown_variant = [7, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = - serde::de::Error::unknown_variant("7", &["Null", "Word", "Blob", "Tuple", "Page", "Table"]); - assert_eq!(deserialized_error, error); -} + /// Verifies a read-write table Entry round-trips through serde. + #[test] + fn test_serde_rwtable() { + let rwtable = Entry::RWTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rwtable); + } + + /// Ensures deserializing an unknown Value variant produces the expected error. + #[test] + fn test_value_unknown_variant_error() { + let unknown_variant = [7, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = serde::de::Error::unknown_variant( + "7", + &["Null", "Word", "Blob", "Tuple", "Page", "Table"], + ); + assert_eq!(deserialized_error, error); + } -#[test] -fn test_entry_error() { - let unknown_variant = [5, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = - serde::de::Error::unknown_variant("5", &["Null", "ROPage", "RWPage", "ROTable", "RWTable"]); - assert_eq!(deserialized_error, error); + /// Ensures deserializing an unknown Entry variant produces the expected error. + #[test] + fn test_entry_unknown_variant_error() { + let unknown_variant = [5, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = serde::de::Error::unknown_variant( + "5", + &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], + ); + assert_eq!(deserialized_error, error); + } } diff --git a/kernel/src/types/blob.rs b/kernel/src/types/blob.rs index 5b7780e..d29c502 100644 --- a/kernel/src/types/blob.rs +++ b/kernel/src/types/blob.rs @@ -87,3 +87,40 @@ impl From<&str> for Blob { Blob::from(value.to_string()) } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies len() and into_inner() return correct content. + #[test] + fn test_len_and_into_inner() { + let blob = Blob::new(b"hello".to_vec()); + assert_eq!(blob.len(), 5); + assert_eq!(&*blob.into_inner(), b"hello"); + } + + /// Verifies DerefMut allows in-place byte mutation. + #[test] + fn test_mutation() { + let mut blob = Blob::new(b"hello".to_vec()); + blob[0] = b'j'; + assert_eq!(&*blob.into_inner(), b"jello"); + } + + /// Ensures invalid UTF-8 bytes are preserved as raw data. + #[test] + fn test_invalid_utf8_preserved() { + let bytes = vec![0xffu8, 0xfeu8, 0xfdu8]; + let blob = Blob::new(bytes.clone()); + assert_eq!(&*blob.into_inner(), &bytes[..]); + } + + /// Verifies From<&str> constructs a blob with matching content. + #[test] + fn test_from_str() { + let blob = Blob::from("test"); + assert_eq!(blob.len(), 4); + assert_eq!(&*blob, b"test"); + } +} diff --git a/kernel/src/types/function.rs b/kernel/src/types/function.rs index 29d539c..94c1aa2 100644 --- a/kernel/src/types/function.rs +++ b/kernel/src/types/function.rs @@ -166,3 +166,51 @@ impl Function { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies symbolic function parsing and read round-trip. + #[test] + fn test_symbolic_parse_and_read() { + let args = Tuple::from((1u64, "two")); + let value = Value::Tuple(Tuple::from(( + Blob::from("Symbolic"), + Value::Word(Word::new(5)), + Value::Tuple(args), + ))); + let func = Function::new(value.clone()).expect("symbolic parse failed"); + assert!(!func.is_arcane()); + assert_eq!(func.read(), value); + } + + /// Ensures unrecognized function tags are rejected. + #[test] + fn test_invalid_tag_rejected() { + let value = Value::Tuple(Tuple::from((Blob::from("Other"), Value::Null(Null::new())))); + assert!(Function::new(value).is_none()); + } + + /// Verifies arcane function parsing accepts a valid register/memory layout. + #[test] + fn test_arcane_parse_valid_layout() { + let mut registers = Tuple::new(18); + for i in 0..18 { + registers.set(i, Value::Null(Null::new())); + } + let mut data = Tuple::new(4); + data.set(0, Value::Tuple(registers)); + data.set(1, Value::Table(Table::new(1))); + data.set(2, Value::Tuple(Tuple::new(0))); + data.set(3, Value::Tuple(Tuple::new(0))); + + let value = Value::Tuple(Tuple::from(( + Blob::from("Arcane"), + Value::Tuple(data), + Value::Tuple(Tuple::new(0)), + ))); + let func = Function::new(value).expect("arcane parse failed"); + assert!(func.is_arcane()); + } +} diff --git a/kernel/src/types/null.rs b/kernel/src/types/null.rs index 79cfa92..ed84126 100644 --- a/kernel/src/types/null.rs +++ b/kernel/src/types/null.rs @@ -12,3 +12,14 @@ impl Default for Null { Self::new() } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Ensures Null::new() and Null::default() produce identical values. + #[test] + fn test_new_equals_default() { + assert_eq!(Null::new(), Null::default()); + } +} diff --git a/kernel/src/types/page.rs b/kernel/src/types/page.rs index a666c7e..1b77b73 100644 --- a/kernel/src/types/page.rs +++ b/kernel/src/types/page.rs @@ -120,3 +120,38 @@ impl DerefMut for Page { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies page size selection at tier boundaries (4KB, 2MB, 1GB). + #[test] + fn test_size_tiers() { + let small = Page::new(1); + assert_eq!(small.size(), 1 << 12); + + let mid = Page::new((1 << 12) + 1); + assert_eq!(mid.size(), 1 << 21); + + let large = Page::new((1 << 21) + 1); + assert_eq!(large.size(), 1 << 30); + } + + /// Verifies DerefMut write and Deref read on page bytes. + #[test] + fn test_write_and_read_back() { + let mut page = Page::new(1); + page[0] = 7; + assert_eq!(page[0], 7); + } + + /// Ensures shared() preserves written content. + #[test] + fn test_shared_preserves_content() { + let mut page = Page::new(1); + page[0] = 42; + let shared = page.shared(); + assert_eq!(shared[0], 42); + } +} diff --git a/kernel/src/types/table.rs b/kernel/src/types/table.rs index 6a4015b..b30f8b2 100644 --- a/kernel/src/types/table.rs +++ b/kernel/src/types/table.rs @@ -206,3 +206,36 @@ impl TryFrom for CowPage { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies table size selection at tier boundaries (2MB, 1GB). + #[test] + fn test_size_tiers() { + let small = Table::new(1); + assert_eq!(small.size(), 1 << 21); + + let large = Table::new((1 << 21) + 1); + assert_eq!(large.size(), 1 << 30); + } + + /// Ensures empty table slots return the correct default Null entry. + #[test] + fn test_get_returns_default_null() { + let table = Table::new(1); + let entry = table.get(10); + assert_eq!(entry, arca::Entry::Null(1 << 12)); + } + + /// Verifies set replaces the default entry and get retrieves it back. + #[test] + fn test_set_and_get_roundtrip() { + let mut table = Table::new(1); + let entry = arca::Entry::RWPage(arca::Page::from_inner(Page::new(1))); + let old = table.set(0, entry.clone()).unwrap(); + assert_eq!(old, arca::Entry::Null(1 << 12)); + assert_eq!(table.get(0), entry); + } +} diff --git a/kernel/src/types/tuple.rs b/kernel/src/types/tuple.rs index 8be3307..3257371 100644 --- a/kernel/src/types/tuple.rs +++ b/kernel/src/types/tuple.rs @@ -42,3 +42,28 @@ impl FromIterator for Tuple { Tuple::new(v) } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies new_with_len creates a tuple filled with Null values. + #[test] + fn test_new_with_len_defaults_to_null() { + let tuple = Tuple::new_with_len(2); + assert_eq!(tuple.len(), 2); + assert!(matches!(tuple[0], Value::Null(_))); + assert!(matches!(tuple[1], Value::Null(_))); + } + + /// Verifies FromIterator collects values into a correctly sized tuple. + #[test] + fn test_from_iter() { + let values: alloc::vec::Vec = + alloc::vec![Value::Word(1u64.into()), Value::Blob("x".into()),]; + let tuple: Tuple = values.clone().into_iter().collect(); + assert_eq!(tuple.len(), 2); + assert_eq!(tuple[0], values[0]); + assert_eq!(tuple[1], values[1]); + } +} diff --git a/kernel/src/types/value.rs b/kernel/src/types/value.rs index b4f9f3e..7fbd2c4 100644 --- a/kernel/src/types/value.rs +++ b/kernel/src/types/value.rs @@ -62,3 +62,33 @@ macro_rules! impl_value_from { foreach_type_item! {impl_tryfrom_value} foreach_type_item! {impl_value_from} + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies Word -> Value -> Word conversion round-trips correctly. + #[test] + fn test_word_roundtrip() { + let word = Word::new(99); + let value: Value = word.clone().into(); + let roundtrip = Word::try_from(value).unwrap(); + assert_eq!(roundtrip, word); + } + + /// Verifies Blob -> Value -> Blob conversion round-trips correctly. + #[test] + fn test_blob_roundtrip() { + let blob = Blob::new(b"data".to_vec()); + let value: Value = blob.clone().into(); + let roundtrip = Blob::try_from(value).unwrap(); + assert_eq!(roundtrip, blob); + } + + /// Ensures TryFrom fails when converting to the wrong variant type. + #[test] + fn test_mismatched_conversion_fails() { + let value: Value = Word::new(1).into(); + assert!(Blob::try_from(value).is_err()); + } +} diff --git a/kernel/src/types/word.rs b/kernel/src/types/word.rs index 749877f..291d4b5 100644 --- a/kernel/src/types/word.rs +++ b/kernel/src/types/word.rs @@ -36,3 +36,31 @@ impl AsMut for Word { &mut self.value } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies Word::read returns the value passed to new. + #[test] + fn test_read() { + let word = Word::new(123); + assert_eq!(word.read(), 123); + } + + /// Verifies From and Into round-trip correctly. + #[test] + fn test_from_u64_roundtrip() { + let word = Word::from(0xdeadbeef_u64); + assert_eq!(u64::from(word), 0xdeadbeef); + } + + /// Verifies AsRef and AsMut provide access to the inner value. + #[test] + fn test_as_ref_as_mut() { + let mut word = Word::new(42); + assert_eq!(*word.as_ref(), 42); + *word.as_mut() = 99; + assert_eq!(word.read(), 99); + } +} diff --git a/modules/arca-musl b/modules/arca-musl index a83796a..a88bc69 160000 --- a/modules/arca-musl +++ b/modules/arca-musl @@ -1 +1 @@ -Subproject commit a83796a98c009ea92b9dc47a526b24b818b325b7 +Subproject commit a88bc6999eb736d93a0aab0afe07c99e4e1ec559 diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index e0413c8..040802e 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(stable_features, unused_features)] #![feature(allocator_api)] #![feature(ptr_metadata)] #![feature(str_from_raw_parts)]