1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // Header page:
18 //
19 // For minimum allocation size (8 bytes), bitmap can store used allocations for
20 // up to 4032*8*8=258048, which is 256KiB minus the header page
21 
22 #include <assert.h>
23 #include <stdlib.h>
24 
25 #include <sys/cdefs.h>
26 #include <sys/mman.h>
27 #include <sys/prctl.h>
28 
29 #include <cmath>
30 #include <cstddef>
31 #include <cstdint>
32 #include <memory>
33 #include <mutex>
34 
35 #include "android-base/macros.h"
36 
37 #include "Allocator.h"
38 #include "LinkedList.h"
39 
40 namespace android {
41 
42 // runtime interfaces used:
43 // abort
44 // assert - fprintf + mmap
45 // mmap
46 // munmap
47 // prctl
48 
const_log2(size_t n,size_t p=0)49 constexpr size_t const_log2(size_t n, size_t p = 0) {
50   return (n <= 1) ? p : const_log2(n / 2, p + 1);
51 }
52 
round_up_to_multiple(unsigned int x,unsigned int y)53 constexpr unsigned int round_up_to_multiple(unsigned int x, unsigned int y) {
54   return y * ((x + y - 1) / y);
55 }
56 
57 static const size_t kPageSize = getpagesize();
58 static constexpr size_t kChunkSize = 256 * 1024;
59 static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
60 static constexpr size_t kMinBucketAllocationSize = 8;
61 static constexpr unsigned int kNumBuckets =
62     const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
63 
64 std::atomic<int> heap_count;
65 
66 class Chunk;
67 
68 class HeapImpl {
69  public:
70   HeapImpl();
71   ~HeapImpl();
72   void* operator new(std::size_t count) noexcept;
73   void operator delete(void* ptr);
74 
75   void* Alloc(size_t size);
76   void Free(void* ptr);
77   bool Empty();
78 
79   void MoveToFullList(Chunk* chunk, int bucket_);
80   void MoveToFreeList(Chunk* chunk, int bucket_);
81 
82  private:
83   DISALLOW_COPY_AND_ASSIGN(HeapImpl);
84 
85   LinkedList<Chunk*> free_chunks_[kNumBuckets];
86   LinkedList<Chunk*> full_chunks_[kNumBuckets];
87 
88   void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
89   void* MapAlloc(size_t size);
90   void MapFree(void* ptr);
91   void* AllocLocked(size_t size);
92   void FreeLocked(void* ptr);
93 
94   struct MapAllocation {
95     void* ptr;
96     size_t size;
97     MapAllocation* next;
98   };
99   MapAllocation* map_allocation_list_;
100   std::mutex m_;
101 };
102 
103 // Integer log 2, rounds down
log2(size_t n)104 static inline unsigned int log2(size_t n) {
105   return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
106 }
107 
size_to_bucket(size_t size)108 static inline unsigned int size_to_bucket(size_t size) {
109   if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
110   return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
111 }
112 
bucket_to_size(unsigned int bucket)113 static inline size_t bucket_to_size(unsigned int bucket) {
114   return kMinBucketAllocationSize << bucket;
115 }
116 
MapAligned(size_t size,size_t align)117 static void* MapAligned(size_t size, size_t align) {
118   const int prot = PROT_READ | PROT_WRITE;
119   const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
120 
121   size = (size + kPageSize - 1) & ~(kPageSize - 1);
122 
123   // Over-allocate enough to align
124   size_t map_size = size + align - kPageSize;
125   if (map_size < size) {
126     return nullptr;
127   }
128 
129   void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
130   if (ptr == MAP_FAILED) {
131     return nullptr;
132   }
133 
134   size_t aligned_size = map_size;
135   void* aligned_ptr = ptr;
136 
137   std::align(align, size, aligned_ptr, aligned_size);
138 
139   // Trim beginning
140   if (aligned_ptr != ptr) {
141     ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
142     munmap(ptr, extra);
143     map_size -= extra;
144     ptr = aligned_ptr;
145   }
146 
147   // Trim end
148   if (map_size != size) {
149     assert(map_size > size);
150     assert(ptr != NULL);
151     munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
152   }
153 
154 #if defined(PR_SET_VMA)
155   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
156         "leak_detector_malloc");
157 #endif
158 
159   return ptr;
160 }
161 
162 class Chunk {
163  public:
164   static void* operator new(std::size_t count) noexcept;
165   static void operator delete(void* ptr);
166   Chunk(HeapImpl* heap, int bucket);
~Chunk()167   ~Chunk() {}
168 
169   void* Alloc();
170   void Free(void* ptr);
171   bool Empty();
172 
ptr_to_chunk(void * ptr)173   static Chunk* ptr_to_chunk(void* ptr) {
174     return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
175   }
is_chunk(void * ptr)176   static bool is_chunk(void* ptr) {
177     return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
178   }
179 
free_count()180   unsigned int free_count() { return free_count_; }
heap()181   HeapImpl* heap() { return heap_; }
182   LinkedList<Chunk*> node_;  // linked list sorted by minimum free count
183 
184  private:
185   DISALLOW_COPY_AND_ASSIGN(Chunk);
186   HeapImpl* heap_;
187   unsigned int bucket_;
188   unsigned int allocation_size_;    // size of allocations in chunk, min 8 bytes
189   unsigned int max_allocations_;    // maximum number of allocations in the chunk
190   unsigned int first_free_bitmap_;  // index into bitmap for first non-full entry
191   unsigned int free_count_;         // number of available allocations
192 
193   // bitmap of free allocations.
194   uint32_t free_bitmap_[kChunkSize / kMinBucketAllocationSize / 32];
195 
196   std::max_align_t data_[0];
197 
ptr_to_n(void * ptr)198   unsigned int ptr_to_n(void* ptr) {
199     ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
200     return offset / allocation_size_;
201   }
n_to_ptr(unsigned int n)202   void* n_to_ptr(unsigned int n) {
203     return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(data_) + n * allocation_size_);
204   }
205 };
206 
207 // Override new operator on chunk to use mmap to allocate kChunkSize
operator new(std::size_t count)208 void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
209   assert(count == sizeof(Chunk));
210   void* mem = MapAligned(kChunkSize, kChunkSize);
211   if (!mem) {
212     abort();  // throw std::bad_alloc;
213   }
214 
215   return mem;
216 }
217 
218 // Override new operator on chunk to use mmap to allocate kChunkSize
operator delete(void * ptr)219 void Chunk::operator delete(void* ptr) {
220   assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
221   munmap(ptr, kChunkSize);
222 }
223 
Chunk(HeapImpl * heap,int bucket)224 Chunk::Chunk(HeapImpl* heap, int bucket)
225     : node_(this),
226       heap_(heap),
227       bucket_(bucket),
228       allocation_size_(bucket_to_size(bucket)),
229       first_free_bitmap_(0) {
230   const size_t usable_chunk_size =
231           kChunkSize - round_up_to_multiple(sizeof(Chunk), sizeof(std::max_align_t));
232   max_allocations_ = usable_chunk_size / allocation_size_;
233   free_count_ = max_allocations_;
234   memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
235 }
236 
Empty()237 bool Chunk::Empty() {
238   return free_count_ == max_allocations_;
239 }
240 
Alloc()241 void* Chunk::Alloc() {
242   assert(free_count_ > 0);
243 
244   unsigned int i = first_free_bitmap_;
245   while (free_bitmap_[i] == 0) i++;
246   assert(i < arraysize(free_bitmap_));
247   unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
248   assert(free_bitmap_[i] & (1U << bit));
249   free_bitmap_[i] &= ~(1U << bit);
250   unsigned int n = i * 32 + bit;
251   assert(n < max_allocations_);
252 
253   free_count_--;
254   if (free_count_ == 0) {
255     heap_->MoveToFullList(this, bucket_);
256   }
257 
258   return n_to_ptr(n);
259 }
260 
Free(void * ptr)261 void Chunk::Free(void* ptr) {
262   assert(is_chunk(ptr));
263   assert(ptr_to_chunk(ptr) == this);
264 
265   unsigned int n = ptr_to_n(ptr);
266   unsigned int i = n / 32;
267   unsigned int bit = n % 32;
268 
269   assert(i < arraysize(free_bitmap_));
270   assert(!(free_bitmap_[i] & (1U << bit)));
271   free_bitmap_[i] |= 1U << bit;
272   free_count_++;
273 
274   if (i < first_free_bitmap_) {
275     first_free_bitmap_ = i;
276   }
277 
278   if (free_count_ == 1) {
279     heap_->MoveToFreeList(this, bucket_);
280   } else {
281     // TODO(ccross): move down free list if necessary
282   }
283 }
284 
285 // Override new operator on HeapImpl to use mmap to allocate a page
operator new(std::size_t count)286 void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
287   assert(count == sizeof(HeapImpl));
288   void* mem = MapAligned(kPageSize, kPageSize);
289   if (!mem) {
290     abort();  // throw std::bad_alloc;
291   }
292 
293   heap_count++;
294   return mem;
295 }
296 
operator delete(void * ptr)297 void HeapImpl::operator delete(void* ptr) {
298   munmap(ptr, kPageSize);
299 }
300 
HeapImpl()301 HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
302 
Empty()303 bool HeapImpl::Empty() {
304   for (unsigned int i = 0; i < kNumBuckets; i++) {
305     for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
306       if (!it->data()->Empty()) {
307         return false;
308       }
309     }
310     for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
311       if (!it->data()->Empty()) {
312         return false;
313       }
314     }
315   }
316 
317   return true;
318 }
319 
~HeapImpl()320 HeapImpl::~HeapImpl() {
321   for (unsigned int i = 0; i < kNumBuckets; i++) {
322     while (!free_chunks_[i].empty()) {
323       Chunk* chunk = free_chunks_[i].next()->data();
324       chunk->node_.remove();
325       delete chunk;
326     }
327     while (!full_chunks_[i].empty()) {
328       Chunk* chunk = full_chunks_[i].next()->data();
329       chunk->node_.remove();
330       delete chunk;
331     }
332   }
333 }
334 
Alloc(size_t size)335 void* HeapImpl::Alloc(size_t size) {
336   std::lock_guard<std::mutex> lk(m_);
337   return AllocLocked(size);
338 }
339 
AllocLocked(size_t size)340 void* HeapImpl::AllocLocked(size_t size) {
341   if (size > kMaxBucketAllocationSize) {
342     return MapAlloc(size);
343   }
344   int bucket = size_to_bucket(size);
345   if (free_chunks_[bucket].empty()) {
346     Chunk* chunk = new Chunk(this, bucket);
347     free_chunks_[bucket].insert(chunk->node_);
348   }
349   return free_chunks_[bucket].next()->data()->Alloc();
350 }
351 
Free(void * ptr)352 void HeapImpl::Free(void* ptr) {
353   std::lock_guard<std::mutex> lk(m_);
354   FreeLocked(ptr);
355 }
356 
FreeLocked(void * ptr)357 void HeapImpl::FreeLocked(void* ptr) {
358   if (!Chunk::is_chunk(ptr)) {
359     HeapImpl::MapFree(ptr);
360   } else {
361     Chunk* chunk = Chunk::ptr_to_chunk(ptr);
362     assert(chunk->heap() == this);
363     chunk->Free(ptr);
364   }
365 }
366 
MapAlloc(size_t size)367 void* HeapImpl::MapAlloc(size_t size) {
368   size = (size + kPageSize - 1) & ~(kPageSize - 1);
369 
370   MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
371   void* ptr = MapAligned(size, kChunkSize);
372   if (!ptr) {
373     FreeLocked(allocation);
374     abort();  // throw std::bad_alloc;
375   }
376   allocation->ptr = ptr;
377   allocation->size = size;
378   allocation->next = map_allocation_list_;
379   map_allocation_list_ = allocation;
380 
381   return ptr;
382 }
383 
MapFree(void * ptr)384 void HeapImpl::MapFree(void* ptr) {
385   MapAllocation** allocation = &map_allocation_list_;
386   while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
387 
388   assert(*allocation != nullptr);
389 
390   munmap((*allocation)->ptr, (*allocation)->size);
391   FreeLocked(*allocation);
392 
393   *allocation = (*allocation)->next;
394 }
395 
MoveToFreeList(Chunk * chunk,int bucket)396 void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
397   MoveToList(chunk, &free_chunks_[bucket]);
398 }
399 
MoveToFullList(Chunk * chunk,int bucket)400 void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
401   MoveToList(chunk, &full_chunks_[bucket]);
402 }
403 
MoveToList(Chunk * chunk,LinkedList<Chunk * > * head)404 void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
405   // Remove from old list
406   chunk->node_.remove();
407 
408   LinkedList<Chunk*>* node = head;
409   // Insert into new list, sorted by lowest free count
410   while (node->next() != head && node->data() != nullptr &&
411          node->data()->free_count() < chunk->free_count())
412     node = node->next();
413 
414   node->insert(chunk->node_);
415 }
416 
Heap()417 Heap::Heap() {
418   // HeapImpl overloads the operator new in order to mmap itself instead of
419   // allocating with new.
420   // Can't use a shared_ptr to store the result because shared_ptr needs to
421   // allocate, and Allocator<T> is still being constructed.
422   impl_ = new HeapImpl();
423   owns_impl_ = true;
424 }
425 
~Heap()426 Heap::~Heap() {
427   if (owns_impl_) {
428     delete impl_;
429   }
430 }
431 
allocate(size_t size)432 void* Heap::allocate(size_t size) {
433   return impl_->Alloc(size);
434 }
435 
deallocate(void * ptr)436 void Heap::deallocate(void* ptr) {
437   impl_->Free(ptr);
438 }
439 
deallocate(HeapImpl * impl,void * ptr)440 void Heap::deallocate(HeapImpl* impl, void* ptr) {
441   impl->Free(ptr);
442 }
443 
empty()444 bool Heap::empty() {
445   return impl_->Empty();
446 }
447 
448 }  // namespace android
449