1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dlmalloc_space-inl.h"
18
19 #include <sys/mman.h>
20
21 #include "base/logging.h" // For VLOG.
22 #include "base/time_utils.h"
23 #include "base/utils.h"
24 #include "gc/accounting/card_table.h"
25 #include "gc/accounting/space_bitmap-inl.h"
26 #include "gc/heap.h"
27 #include "jit/jit.h"
28 #include "jit/jit_code_cache.h"
29 #include "memory_tool_malloc_space-inl.h"
30 #include "mirror/class-inl.h"
31 #include "mirror/object-inl.h"
32 #include "runtime.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "thread.h"
35 #include "thread_list.h"
36
37 namespace art HIDDEN {
38 namespace gc {
39 namespace space {
40
41 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
42
43 // Callback for mspace_inspect_all that will madvise(2) unused pages back to
44 // the kernel.
DlmallocMadviseCallback(void * start,void * end,size_t used_bytes,void * arg)45 void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
46 // Is this chunk in use?
47 if (used_bytes != 0) {
48 return;
49 }
50 // Do we have any whole pages to give back?
51 start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::gPageSize));
52 end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::gPageSize));
53 if (end > start) {
54 size_t length = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
55 int rc = madvise(start, length, MADV_DONTNEED);
56 if (UNLIKELY(rc != 0)) {
57 errno = rc;
58 PLOG(FATAL) << "madvise failed during heap trimming";
59 }
60 size_t* reclaimed = reinterpret_cast<size_t*>(arg);
61 *reclaimed += length;
62 }
63 }
64
65 // Callback for mspace_inspect_all that will count the number of bytes
66 // allocated.
DlmallocBytesAllocatedCallback(void * start,void * end,size_t used_bytes,void * arg)67 void DlmallocBytesAllocatedCallback([[maybe_unused]] void* start,
68 [[maybe_unused]] void* end,
69 size_t used_bytes,
70 void* arg) {
71 if (used_bytes == 0) {
72 return;
73 }
74 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
75 *bytes_allocated += used_bytes + sizeof(size_t);
76 }
77
78 // Callback for mspace_inspect_all that will count the number of objects
79 // allocated.
DlmallocObjectsAllocatedCallback(void * start,void * end,size_t used_bytes,void * arg)80 void DlmallocObjectsAllocatedCallback([[maybe_unused]] void* start,
81 [[maybe_unused]] void* end,
82 size_t used_bytes,
83 void* arg) {
84 if (used_bytes == 0) {
85 return;
86 }
87 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
88 ++(*objects_allocated);
89 }
90
DlMallocSpace(MemMap && mem_map,size_t initial_size,const std::string & name,void * mspace,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size)91 DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
92 size_t initial_size,
93 const std::string& name,
94 void* mspace,
95 uint8_t* begin,
96 uint8_t* end,
97 uint8_t* limit,
98 size_t growth_limit,
99 bool can_move_objects,
100 size_t starting_size)
101 : MallocSpace(name,
102 std::move(mem_map),
103 begin,
104 end,
105 limit,
106 growth_limit,
107 /* create_bitmaps= */ true,
108 can_move_objects,
109 starting_size, initial_size),
110 mspace_(mspace) {
111 CHECK(mspace != nullptr);
112 }
113
CreateFromMemMap(MemMap && mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool can_move_objects)114 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
115 const std::string& name,
116 size_t starting_size,
117 size_t initial_size,
118 size_t growth_limit,
119 size_t capacity,
120 bool can_move_objects) {
121 DCHECK(mem_map.IsValid());
122 void* mspace = CreateMspace(mem_map.Begin(), starting_size, initial_size);
123 if (mspace == nullptr) {
124 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
125 return nullptr;
126 }
127
128 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
129 uint8_t* end = mem_map.Begin() + starting_size;
130 if (capacity - starting_size > 0) {
131 CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
132 }
133
134 // Everything is set so record in immutable structure and leave
135 uint8_t* begin = mem_map.Begin();
136 if (Runtime::Current()->IsRunningOnMemoryTool()) {
137 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
138 std::move(mem_map),
139 initial_size,
140 name,
141 mspace,
142 begin,
143 end,
144 begin + capacity, growth_limit,
145 can_move_objects,
146 starting_size);
147 } else {
148 return new DlMallocSpace(std::move(mem_map),
149 initial_size,
150 name,
151 mspace,
152 begin,
153 end,
154 begin + capacity,
155 growth_limit,
156 can_move_objects,
157 starting_size);
158 }
159 }
160
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,bool can_move_objects)161 DlMallocSpace* DlMallocSpace::Create(const std::string& name,
162 size_t initial_size,
163 size_t growth_limit,
164 size_t capacity,
165 bool can_move_objects) {
166 uint64_t start_time = 0;
167 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
168 start_time = NanoTime();
169 LOG(INFO) << "DlMallocSpace::Create entering " << name
170 << " initial_size=" << PrettySize(initial_size)
171 << " growth_limit=" << PrettySize(growth_limit)
172 << " capacity=" << PrettySize(capacity);
173 }
174
175 // Memory we promise to dlmalloc before it asks for morecore.
176 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
177 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
178 // size of the large allocation) will be greater than the footprint limit.
179 size_t starting_size = gPageSize;
180 MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
181 if (!mem_map.IsValid()) {
182 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
183 << PrettySize(capacity);
184 return nullptr;
185 }
186 DlMallocSpace* space = CreateFromMemMap(std::move(mem_map),
187 name,
188 starting_size,
189 initial_size,
190 growth_limit,
191 capacity,
192 can_move_objects);
193 // We start out with only the initial size possibly containing objects.
194 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
195 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
196 << " ) " << *space;
197 }
198 return space;
199 }
200
CreateMspace(void * begin,size_t morecore_start,size_t initial_size)201 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
202 // clear errno to allow PLOG on error
203 errno = 0;
204 // create mspace using our backing storage starting at begin and with a footprint of
205 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
206 // morecore_start bytes of memory is exhaused morecore will be called.
207 void* msp = create_mspace_with_base(begin, morecore_start, 0 /*locked*/);
208 if (msp != nullptr) {
209 // Do not allow morecore requests to succeed beyond the initial size of the heap
210 mspace_set_footprint_limit(msp, initial_size);
211 } else {
212 PLOG(ERROR) << "create_mspace_with_base failed";
213 }
214 return msp;
215 }
216
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)217 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
218 size_t* bytes_allocated, size_t* usable_size,
219 size_t* bytes_tl_bulk_allocated) {
220 mirror::Object* result;
221 {
222 MutexLock mu(self, lock_);
223 // Grow as much as possible within the space.
224 size_t max_allowed = Capacity();
225 mspace_set_footprint_limit(mspace_, max_allowed);
226 // Try the allocation.
227 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
228 bytes_tl_bulk_allocated);
229 // Shrink back down as small as possible.
230 size_t footprint = mspace_footprint(mspace_);
231 mspace_set_footprint_limit(mspace_, footprint);
232 }
233 if (result != nullptr) {
234 // Zero freshly allocated memory, done while not holding the space's lock.
235 memset(result, 0, num_bytes);
236 // Check that the result is contained in the space.
237 CHECK_IMPLIES(kDebugSpaces, Contains(result));
238 }
239 return result;
240 }
241
CreateInstance(MemMap && mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)242 MallocSpace* DlMallocSpace::CreateInstance(MemMap&& mem_map,
243 const std::string& name,
244 void* allocator,
245 uint8_t* begin,
246 uint8_t* end,
247 uint8_t* limit,
248 size_t growth_limit,
249 bool can_move_objects) {
250 if (Runtime::Current()->IsRunningOnMemoryTool()) {
251 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
252 std::move(mem_map),
253 initial_size_,
254 name,
255 allocator,
256 begin,
257 end,
258 limit,
259 growth_limit,
260 can_move_objects,
261 starting_size_);
262 } else {
263 return new DlMallocSpace(std::move(mem_map),
264 initial_size_,
265 name,
266 allocator,
267 begin,
268 end,
269 limit,
270 growth_limit,
271 can_move_objects,
272 starting_size_);
273 }
274 }
275
Free(Thread * self,mirror::Object * ptr)276 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
277 MutexLock mu(self, lock_);
278 if (kDebugSpaces) {
279 CHECK(ptr != nullptr);
280 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
281 }
282 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
283 if (kRecentFreeCount > 0) {
284 RegisterRecentFree(ptr);
285 }
286 mspace_free(mspace_, ptr);
287 return bytes_freed;
288 }
289
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)290 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
291 DCHECK(ptrs != nullptr);
292
293 // Don't need the lock to calculate the size of the freed pointers.
294 size_t bytes_freed = 0;
295 for (size_t i = 0; i < num_ptrs; i++) {
296 mirror::Object* ptr = ptrs[i];
297 const size_t look_ahead = 8;
298 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
299 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
300 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
301 }
302 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
303 }
304
305 if (kRecentFreeCount > 0) {
306 MutexLock mu(self, lock_);
307 for (size_t i = 0; i < num_ptrs; i++) {
308 RegisterRecentFree(ptrs[i]);
309 }
310 }
311
312 if (kDebugSpaces) {
313 size_t num_broken_ptrs = 0;
314 for (size_t i = 0; i < num_ptrs; i++) {
315 if (!Contains(ptrs[i])) {
316 num_broken_ptrs++;
317 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
318 } else {
319 size_t size = mspace_usable_size(ptrs[i]);
320 memset(ptrs[i], 0xEF, size);
321 }
322 }
323 CHECK_EQ(num_broken_ptrs, 0u);
324 }
325
326 {
327 MutexLock mu(self, lock_);
328 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
329 return bytes_freed;
330 }
331 }
332
Trim()333 size_t DlMallocSpace::Trim() {
334 MutexLock mu(Thread::Current(), lock_);
335 // Trim to release memory at the end of the space.
336 mspace_trim(mspace_, 0);
337 // Visit space looking for page-sized holes to advise the kernel we don't need.
338 size_t reclaimed = 0;
339 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
340 return reclaimed;
341 }
342
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)343 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
344 void* arg) {
345 MutexLock mu(Thread::Current(), lock_);
346 mspace_inspect_all(mspace_, callback, arg);
347 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
348 }
349
GetFootprint()350 size_t DlMallocSpace::GetFootprint() {
351 MutexLock mu(Thread::Current(), lock_);
352 return mspace_footprint(mspace_);
353 }
354
GetFootprintLimit()355 size_t DlMallocSpace::GetFootprintLimit() {
356 MutexLock mu(Thread::Current(), lock_);
357 return mspace_footprint_limit(mspace_);
358 }
359
SetFootprintLimit(size_t new_size)360 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
361 MutexLock mu(Thread::Current(), lock_);
362 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
363 // Compare against the actual footprint, rather than the Size(), because the heap may not have
364 // grown all the way to the allowed size yet.
365 size_t current_space_size = mspace_footprint(mspace_);
366 if (new_size < current_space_size) {
367 // Don't let the space grow any more.
368 new_size = current_space_size;
369 }
370 mspace_set_footprint_limit(mspace_, new_size);
371 }
372
GetBytesAllocated()373 uint64_t DlMallocSpace::GetBytesAllocated() {
374 MutexLock mu(Thread::Current(), lock_);
375 size_t bytes_allocated = 0;
376 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
377 return bytes_allocated;
378 }
379
GetObjectsAllocated()380 uint64_t DlMallocSpace::GetObjectsAllocated() {
381 MutexLock mu(Thread::Current(), lock_);
382 size_t objects_allocated = 0;
383 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
384 return objects_allocated;
385 }
386
Clear()387 void DlMallocSpace::Clear() {
388 size_t footprint_limit = GetFootprintLimit();
389 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
390 live_bitmap_.Clear();
391 mark_bitmap_.Clear();
392 SetEnd(Begin() + starting_size_);
393 mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
394 SetFootprintLimit(footprint_limit);
395 }
396
397 #ifndef NDEBUG
CheckMoreCoreForPrecondition()398 void DlMallocSpace::CheckMoreCoreForPrecondition() {
399 lock_.AssertHeld(Thread::Current());
400 }
401 #endif
402
403 struct MspaceCbArgs {
404 size_t max_contiguous;
405 size_t used;
406 };
407
MSpaceChunkCallback(void * start,void * end,size_t used_bytes,void * arg)408 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
409 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
410 MspaceCbArgs* mspace_cb_args = reinterpret_cast<MspaceCbArgs*>(arg);
411 mspace_cb_args->used += used_bytes;
412 if (used_bytes < chunk_size) {
413 size_t chunk_free_bytes = chunk_size - used_bytes;
414 size_t& max_contiguous_allocation = mspace_cb_args->max_contiguous;
415 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
416 }
417 }
418
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)419 bool DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
420 size_t failed_alloc_bytes) {
421 Thread* const self = Thread::Current();
422 MspaceCbArgs mspace_cb_args = {0, 0};
423 // To allow the Walk/InspectAll() to exclusively-lock the mutator
424 // lock, temporarily release the shared access to the mutator
425 // lock here by transitioning to the suspended state.
426 Locks::mutator_lock_->AssertSharedHeld(self);
427 ScopedThreadSuspension sts(self, ThreadState::kSuspended);
428 Walk(MSpaceChunkCallback, &mspace_cb_args);
429 if (failed_alloc_bytes > mspace_cb_args.max_contiguous) {
430 os << "; failed due to malloc_space fragmentation (largest possible contiguous allocation "
431 << mspace_cb_args.max_contiguous << " bytes, space in use " << mspace_cb_args.used
432 << " bytes, capacity = " << Capacity() << ")";
433 return true;
434 }
435 return false;
436 }
437
438 } // namespace space
439
440 namespace allocator {
441
442 // Implement the dlmalloc morecore callback.
ArtDlMallocMoreCore(void * mspace,intptr_t increment)443 void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) {
444 Runtime* runtime = Runtime::Current();
445 Heap* heap = runtime->GetHeap();
446 ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
447 // Support for multiple DlMalloc provided by a slow path.
448 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
449 if (LIKELY(runtime->GetJitCodeCache() != nullptr)) {
450 jit::JitCodeCache* code_cache = runtime->GetJitCodeCache();
451 if (code_cache->OwnsSpace(mspace)) {
452 return code_cache->MoreCore(mspace, increment);
453 }
454 }
455 dlmalloc_space = nullptr;
456 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
457 if (space->IsDlMallocSpace()) {
458 ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
459 if (cur_dlmalloc_space->GetMspace() == mspace) {
460 dlmalloc_space = cur_dlmalloc_space;
461 break;
462 }
463 }
464 }
465 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
466 }
467 return dlmalloc_space->MoreCore(increment);
468 }
469
470 } // namespace allocator
471
472 } // namespace gc
473 } // namespace art
474