1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 // Some functions extracted from std include unsafe blocks inside an already
18 // unsafe function. Rather than changing these functions, we allow this
19 // behavior.
20 #![allow(unused_unsafe)]
21
22 use alloc::collections::TryReserveError;
23 use core::cmp;
24 use core::intrinsics;
25 use core::ptr::{self, NonNull};
26
27 #[doc(inline)]
28 pub use alloc::alloc::*;
29
30 #[doc(inline)]
31 pub use alloc::vec::Vec;
32
33 /// A value-to-value conversion that may fallibly allocate. The opposite of
34 /// [`TryAllocFrom`].
35 ///
36 /// See [`core::convert::Into`] for details. This trait is equivalent, with the
37 /// exception that it will attempt to allocate fallibly and return `Err` if it
38 /// cannot.
39 ///
40 /// We can't use [`core::convert::TryInto`] here, as that trait is default
41 /// implemented for any [`core::convert::Into`] implementation and we need to
42 /// explicitly require fallible allocation.
43 pub trait TryAllocInto<T: Sized> {
44 /// Performs the conversion.
try_alloc_into(self) -> Result<T, AllocError>45 fn try_alloc_into(self) -> Result<T, AllocError>;
46 }
47
48 impl<T, U> TryAllocInto<U> for T
49 where
50 U: TryAllocFrom<T>,
51 {
try_alloc_into(self) -> Result<U, AllocError>52 fn try_alloc_into(self) -> Result<U, AllocError> {
53 U::try_alloc_from(self)
54 }
55 }
56
57 /// A value-to-value conversion that may fallibly allocate. The opposite of
58 /// [`TryAllocInto`].
59 ///
60 /// See [`core::convert::From`] for details. This trait is equivalent, with the
61 /// exception that it will attempt to allocate fallibly and return `Err` if it
62 /// cannot.
63 ///
64 /// We can't use [`core::convert::TryFrom`] here, as that trait is default
65 /// implemented for any [`core::convert::Into`] implementation and we need to
66 /// explicitly require fallible allocation.
67 pub trait TryAllocFrom<T>: Sized {
68 /// Performs the conversion.
try_alloc_from(value: T) -> Result<Self, AllocError>69 fn try_alloc_from(value: T) -> Result<Self, AllocError>;
70 }
71
72 impl TryAllocFrom<&str> for Vec<u8> {
try_alloc_from(s: &str) -> Result<Self, AllocError>73 fn try_alloc_from(s: &str) -> Result<Self, AllocError> {
74 let mut vec = Vec::new();
75 vec.try_reserve_exact(s.len()).or(Err(AllocError))?;
76 vec.extend_from_slice(s.as_bytes());
77 Ok(vec)
78 }
79 }
80
81 impl TryAllocFrom<&[u8]> for Vec<u8> {
try_alloc_from(s: &[u8]) -> Result<Self, AllocError>82 fn try_alloc_from(s: &[u8]) -> Result<Self, AllocError> {
83 let mut vec = Vec::new();
84 vec.try_reserve_exact(s.len()).or(Err(AllocError))?;
85 vec.extend_from_slice(s);
86 Ok(vec)
87 }
88 }
89
90 /// Temporary trait to implement the future fallible API for [`Vec`].
91 // This should be removed when https://github.com/rust-lang/rust/pull/91559 or a
92 // similar change is available.
93 pub trait FallibleVec<T>: Sized {
94 /// Tries to append `value` to the end of the vector, returning Err if it
95 /// cannot allocate space for the expanded vector.
try_push(&mut self, value: T) -> Result<(), TryReserveError>96 fn try_push(&mut self, value: T) -> Result<(), TryReserveError>;
97
98 /// Tries to construct a new, empty `Vec<T>` with the specified capacity.
try_with_capacity(capacity: usize) -> Result<Self, TryReserveError>99 fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError>;
100 }
101
102 impl<T> FallibleVec<T> for Vec<T> {
try_push(&mut self, value: T) -> Result<(), TryReserveError>103 fn try_push(&mut self, value: T) -> Result<(), TryReserveError> {
104 self.try_reserve(self.len() + 1)?;
105 self.push(value);
106 Ok(())
107 }
108
try_with_capacity(capacity: usize) -> Result<Self, TryReserveError>109 fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
110 let mut v = Vec::new();
111 v.try_reserve(capacity)?;
112 Ok(v)
113 }
114 }
115
116 /*
117 * We provide the implementation of std::alloc::System here so that we don't
118 * need to maintain a separate allocator implementation.
119 *
120 * The rest of this file is derived from a combination of src/alloc.rs and
121 * src/sys/unix/alloc.rs in the Rust standard library, used under the Apache
122 * License, Version 2.0. The following is the original copyright information
123 * from the Rust project:
124 *
125 * Copyrights in the Rust project are retained by their contributors. No
126 * copyright assignment is required to contribute to the Rust project.
127 *
128 * Some files include explicit copyright notices and/or license notices.
129 * For full authorship information, see the version control history or
130 * https://thanks.rust-lang.org
131 *
132 * Except as otherwise noted (below and/or in individual files), Rust is
133 * licensed under the Apache License, Version 2.0 <LICENSE-APACHE> or
134 * <http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
135 * <LICENSE-MIT> or <http://opensource.org/licenses/MIT>, at your option.
136 */
137 /// The default memory allocator provided by Trusty.
138 ///
139 /// This allocator is extracted from the Rust std library for Unix and adapted
140 /// for use in Trusty. Internally it currently uses `malloc` from the musl libc.
141 ///
142 /// This type implements the `GlobalAlloc` trait and Rust programs by default
143 /// work as if they had this definition:
144 ///
145 /// ```rust
146 /// use std::alloc::System;
147 ///
148 /// #[global_allocator]
149 /// static A: System = System;
150 ///
151 /// fn main() {
152 /// let a = Box::new(4); // Allocates from the system allocator.
153 /// println!("{}", a);
154 /// }
155 /// ```
156 ///
157 /// You can also define your own wrapper around `System` if you'd like, such as
158 /// keeping track of the number of all bytes allocated:
159 ///
160 /// ```rust
161 /// use std::alloc::{System, GlobalAlloc, Layout};
162 /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
163 ///
164 /// struct Counter;
165 ///
166 /// static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
167 ///
168 /// unsafe impl GlobalAlloc for Counter {
169 /// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
170 /// let ret = System.alloc(layout);
171 /// if !ret.is_null() {
172 /// ALLOCATED.fetch_add(layout.size(), SeqCst);
173 /// }
174 /// return ret
175 /// }
176 ///
177 /// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
178 /// System.dealloc(ptr, layout);
179 /// ALLOCATED.fetch_sub(layout.size(), SeqCst);
180 /// }
181 /// }
182 ///
183 /// #[global_allocator]
184 /// static A: Counter = Counter;
185 ///
186 /// fn main() {
187 /// println!("allocated bytes before main: {}", ALLOCATED.load(SeqCst));
188 /// }
189 /// ```
190 ///
191 /// It can also be used directly to allocate memory independently of whatever
192 /// global allocator has been selected for a Rust program. For example if a Rust
193 /// program opts in to using jemalloc as the global allocator, `System` will
194 /// still allocate memory using `malloc` and `HeapAlloc`.
195 #[derive(Debug, Default, Copy, Clone)]
196 pub struct System;
197
198 #[global_allocator]
199 static A: System = System;
200
201 impl System {
202 #[inline]
alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError>203 fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
204 match layout.size() {
205 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
206 // SAFETY: `layout` is non-zero in size,
207 size => unsafe {
208 let raw_ptr = if zeroed {
209 GlobalAlloc::alloc_zeroed(self, layout)
210 } else {
211 GlobalAlloc::alloc(self, layout)
212 };
213 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
214 Ok(NonNull::slice_from_raw_parts(ptr, size))
215 },
216 }
217 }
218
219 // SAFETY: Same as `Allocator::grow`
220 #[inline]
grow_impl( &self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout, zeroed: bool, ) -> Result<NonNull<[u8]>, AllocError>221 unsafe fn grow_impl(
222 &self,
223 ptr: NonNull<u8>,
224 old_layout: Layout,
225 new_layout: Layout,
226 zeroed: bool,
227 ) -> Result<NonNull<[u8]>, AllocError> {
228 debug_assert!(
229 new_layout.size() >= old_layout.size(),
230 "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
231 );
232
233 match old_layout.size() {
234 0 => self.alloc_impl(new_layout, zeroed),
235
236 // SAFETY: `new_size` is non-zero as `new_size` is greater than or equal to `old_size`
237 // as required by safety conditions and the `old_size == 0` case was handled in the
238 // previous match arm. Other conditions must be upheld by the caller
239 old_size if old_layout.align() == new_layout.align() => unsafe {
240 let new_size = new_layout.size();
241
242 // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
243 intrinsics::assume(new_size >= old_layout.size());
244
245 let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
246 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
247 if zeroed {
248 raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
249 }
250 Ok(NonNull::slice_from_raw_parts(ptr, new_size))
251 },
252
253 // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
254 // both the old and new memory allocation are valid for reads and writes for `old_size`
255 // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
256 // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
257 // for `dealloc` must be upheld by the caller.
258 old_size => unsafe {
259 let new_ptr = self.alloc_impl(new_layout, zeroed)?;
260 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
261 Allocator::deallocate(&self, ptr, old_layout);
262 Ok(new_ptr)
263 },
264 }
265 }
266 }
267
268 // The Allocator impl checks the layout size to be non-zero and forwards to the GlobalAlloc impl,
269 // which is in `std::sys::*::alloc`.
270 unsafe impl Allocator for System {
271 #[inline]
allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>272 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
273 self.alloc_impl(layout, false)
274 }
275
276 #[inline]
allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>277 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
278 self.alloc_impl(layout, true)
279 }
280
281 #[inline]
deallocate(&self, ptr: NonNull<u8>, layout: Layout)282 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
283 if layout.size() != 0 {
284 // SAFETY: `layout` is non-zero in size,
285 // other conditions must be upheld by the caller
286 unsafe { GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) }
287 }
288 }
289
290 #[inline]
grow( &self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout, ) -> Result<NonNull<[u8]>, AllocError>291 unsafe fn grow(
292 &self,
293 ptr: NonNull<u8>,
294 old_layout: Layout,
295 new_layout: Layout,
296 ) -> Result<NonNull<[u8]>, AllocError> {
297 // SAFETY: all conditions must be upheld by the caller
298 unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
299 }
300
301 #[inline]
grow_zeroed( &self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout, ) -> Result<NonNull<[u8]>, AllocError>302 unsafe fn grow_zeroed(
303 &self,
304 ptr: NonNull<u8>,
305 old_layout: Layout,
306 new_layout: Layout,
307 ) -> Result<NonNull<[u8]>, AllocError> {
308 // SAFETY: all conditions must be upheld by the caller
309 unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
310 }
311
312 #[inline]
shrink( &self, ptr: NonNull<u8>, old_layout: Layout, new_layout: Layout, ) -> Result<NonNull<[u8]>, AllocError>313 unsafe fn shrink(
314 &self,
315 ptr: NonNull<u8>,
316 old_layout: Layout,
317 new_layout: Layout,
318 ) -> Result<NonNull<[u8]>, AllocError> {
319 debug_assert!(
320 new_layout.size() <= old_layout.size(),
321 "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
322 );
323
324 match new_layout.size() {
325 // SAFETY: conditions must be upheld by the caller
326 0 => unsafe {
327 Allocator::deallocate(&self, ptr, old_layout);
328 Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
329 },
330
331 // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
332 new_size if old_layout.align() == new_layout.align() => unsafe {
333 // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
334 intrinsics::assume(new_size <= old_layout.size());
335
336 let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
337 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
338 Ok(NonNull::slice_from_raw_parts(ptr, new_size))
339 },
340
341 // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
342 // both the old and new memory allocation are valid for reads and writes for `new_size`
343 // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
344 // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
345 // for `dealloc` must be upheld by the caller.
346 new_size => unsafe {
347 let new_ptr = Allocator::allocate(&self, new_layout)?;
348 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
349 Allocator::deallocate(&self, ptr, old_layout);
350 Ok(new_ptr)
351 },
352 }
353 }
354 }
355
356 #[cfg(any(target_arch = "x86", target_arch = "arm"))]
357 const MIN_ALIGN: usize = 8;
358 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
359 const MIN_ALIGN: usize = 16;
360
realloc_fallback( alloc: &System, ptr: *mut u8, old_layout: Layout, new_size: usize, ) -> *mut u8361 unsafe fn realloc_fallback(
362 alloc: &System,
363 ptr: *mut u8,
364 old_layout: Layout,
365 new_size: usize,
366 ) -> *mut u8 {
367 // Docs for GlobalAlloc::realloc require this to be valid:
368 let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
369
370 let new_ptr = GlobalAlloc::alloc(alloc, new_layout);
371 if !new_ptr.is_null() {
372 let size = cmp::min(old_layout.size(), new_size);
373 ptr::copy_nonoverlapping(ptr, new_ptr, size);
374 GlobalAlloc::dealloc(alloc, ptr, old_layout);
375 }
376 new_ptr
377 }
378
379 unsafe impl GlobalAlloc for System {
380 #[inline]
alloc(&self, layout: Layout) -> *mut u8381 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
382 // jemalloc provides alignment less than MIN_ALIGN for small allocations.
383 // So only rely on MIN_ALIGN if size >= align.
384 // Also see <https://github.com/rust-lang/rust/issues/45955> and
385 // <https://github.com/rust-lang/rust/issues/62251#issuecomment-507580914>.
386 if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
387 libc::malloc(layout.size()) as *mut u8
388 } else {
389 #[cfg(target_os = "macos")]
390 {
391 if layout.align() > (1 << 31) {
392 return ptr::null_mut();
393 }
394 }
395 libc::memalign(layout.align(), layout.size()) as *mut u8
396 }
397 }
398
399 #[inline]
alloc_zeroed(&self, layout: Layout) -> *mut u8400 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
401 // See the comment above in `alloc` for why this check looks the way it does.
402 if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
403 libc::calloc(layout.size(), 1) as *mut u8
404 } else {
405 let ptr = self.alloc(layout);
406 if !ptr.is_null() {
407 ptr::write_bytes(ptr, 0, layout.size());
408 }
409 ptr
410 }
411 }
412
413 #[inline]
dealloc(&self, ptr: *mut u8, _layout: Layout)414 unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
415 libc::free(ptr as *mut libc::c_void)
416 }
417
418 #[inline]
realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8419 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
420 if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
421 libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
422 } else {
423 realloc_fallback(self, ptr, layout, new_size)
424 }
425 }
426 }
427