1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Heap implementation.
16 
17 use alloc::alloc::alloc;
18 use alloc::alloc::Layout;
19 use alloc::boxed::Box;
20 
21 use core::alloc::GlobalAlloc as _;
22 use core::ffi::c_void;
23 use core::mem;
24 use core::num::NonZeroUsize;
25 use core::ptr;
26 use core::ptr::NonNull;
27 
28 use buddy_system_allocator::LockedHeap;
29 
30 /// Configures the size of the global allocator.
31 #[macro_export]
32 macro_rules! configure_heap {
33     ($len:expr) => {
34         static mut __HEAP_ARRAY: [u8; $len] = [0; $len];
35         #[export_name = "HEAP"]
36         // SAFETY: HEAP will only be accessed once as mut, from init().
37         static mut __HEAP: &'static mut [u8] = unsafe { &mut __HEAP_ARRAY };
38     };
39 }
40 
41 extern "Rust" {
42     /// Slice used by the global allocator, configured using configure_heap!().
43     static mut HEAP: &'static mut [u8];
44 }
45 
46 #[global_allocator]
47 static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
48 
49 /// Initialize the global allocator.
50 ///
51 /// # Safety
52 ///
53 /// Must be called no more than once.
init()54 pub(crate) unsafe fn init() {
55     // SAFETY: Nothing else accesses this memory, and we hand it over to the heap to manage and
56     // never touch it again. The heap is locked, so there cannot be any races.
57     let (start, size) = unsafe { (HEAP.as_mut_ptr() as usize, HEAP.len()) };
58 
59     let mut heap = HEAP_ALLOCATOR.lock();
60     // SAFETY: We are supplying a valid memory range, and we only do this once.
61     unsafe { heap.init(start, size) };
62 }
63 
64 /// Allocate an aligned but uninitialized slice of heap.
aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>>65 pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> {
66     let size = NonZeroUsize::new(size)?.get();
67     let layout = Layout::from_size_align(size, align).ok()?;
68     // SAFETY: We verify that `size` and the returned `ptr` are non-null.
69     let ptr = unsafe { alloc(layout) };
70     let ptr = NonNull::new(ptr)?.as_ptr();
71     let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size);
72 
73     // SAFETY: The memory was allocated using the proper layout by our global_allocator.
74     Some(unsafe { Box::from_raw(slice_ptr) })
75 }
76 
77 #[no_mangle]
malloc(size: usize) -> *mut c_void78 unsafe extern "C" fn malloc(size: usize) -> *mut c_void {
79     allocate(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
80 }
81 
82 #[no_mangle]
calloc(nmemb: usize, size: usize) -> *mut c_void83 unsafe extern "C" fn calloc(nmemb: usize, size: usize) -> *mut c_void {
84     let Some(size) = nmemb.checked_mul(size) else { return ptr::null_mut() };
85     allocate(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
86 }
87 
88 #[no_mangle]
__memset_chk( dest: *mut c_void, val: u8, len: usize, destlen: usize, ) -> *mut c_void89 unsafe extern "C" fn __memset_chk(
90     dest: *mut c_void,
91     val: u8,
92     len: usize,
93     destlen: usize,
94 ) -> *mut c_void {
95     assert!(len <= destlen, "memset buffer overflow detected");
96     // SAFETY: `dest` is valid for writes of `len` bytes.
97     unsafe {
98         ptr::write_bytes(dest, val, len);
99     }
100     dest
101 }
102 
103 #[no_mangle]
104 /// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either
105 /// directly or via malloc or calloc). Note that this function is called directly from C, so we have
106 /// to trust that the C code is doing the right thing; there are checks below which will catch some
107 /// errors.
free(ptr: *mut c_void)108 unsafe extern "C" fn free(ptr: *mut c_void) {
109     let Some(ptr) = NonNull::new(ptr) else { return };
110     // SAFETY: The contents of the HEAP slice may change, but the address range never does.
111     let heap_range = unsafe { HEAP.as_ptr_range() };
112     assert!(
113         heap_range.contains(&(ptr.as_ptr() as *const u8)),
114         "free() called on a pointer that is not part of the HEAP: {ptr:?}"
115     );
116     // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
117     // usize.
118     let (ptr, size) = unsafe {
119         let ptr = ptr.cast::<usize>().as_ptr().offset(-1);
120         (ptr, *ptr)
121     };
122     let size = NonZeroUsize::new(size).unwrap();
123     let layout = malloc_layout(size).unwrap();
124     // SAFETY: If our precondition is satisfied, then this is a valid currently-allocated block.
125     unsafe { HEAP_ALLOCATOR.dealloc(ptr as *mut u8, layout) }
126 }
127 
128 /// Allocate a block of memory suitable to return from `malloc()` etc. Returns a valid pointer
129 /// to a suitable aligned region of size bytes, optionally zeroed (and otherwise uninitialized), or
130 /// None if size is 0 or allocation fails. The block can be freed by passing the returned pointer to
131 /// `free()`.
allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>>132 fn allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>> {
133     let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?;
134     let layout = malloc_layout(size)?;
135     // SAFETY: layout is known to have non-zero size.
136     let ptr = unsafe {
137         if zeroed {
138             HEAP_ALLOCATOR.alloc_zeroed(layout)
139         } else {
140             HEAP_ALLOCATOR.alloc(layout)
141         }
142     };
143     let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr();
144     // SAFETY: ptr points to a newly allocated block of memory which is properly aligned
145     // for a usize and is big enough to hold a usize as well as the requested number of
146     // bytes.
147     unsafe {
148         *ptr = size.get();
149         NonNull::new(ptr.offset(1))
150     }
151 }
152 
malloc_layout(size: NonZeroUsize) -> Option<Layout>153 fn malloc_layout(size: NonZeroUsize) -> Option<Layout> {
154     // We want at least 8 byte alignment, and we need to be able to store a usize.
155     const ALIGN: usize = const_max_size(mem::size_of::<usize>(), mem::size_of::<u64>());
156     Layout::from_size_align(size.get(), ALIGN).ok()
157 }
158 
const_max_size(a: usize, b: usize) -> usize159 const fn const_max_size(a: usize, b: usize) -> usize {
160     if a > b {
161         a
162     } else {
163         b
164     }
165 }
166