1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map_arena_pool.h"
18 
19 #include <sys/mman.h>
20 
21 #include <algorithm>
22 #include <cstddef>
23 #include <iomanip>
24 #include <numeric>
25 
26 #include <android-base/logging.h>
27 
28 #include "base/arena_allocator-inl.h"
29 #include "base/mem_map.h"
30 #include "base/systrace.h"
31 #include "runtime_globals.h"
32 
33 namespace art HIDDEN {
34 
35 class MemMapArena final : public Arena {
36  public:
37   MemMapArena(size_t size, bool low_4gb, const char* name);
38   virtual ~MemMapArena();
39   void Release() override;
40 
41  private:
42   static MemMap Allocate(size_t size, bool low_4gb, const char* name);
43 
44   MemMap map_;
45 };
46 
MemMapArena(size_t size,bool low_4gb,const char * name)47 MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
48     : map_(Allocate(size, low_4gb, name)) {
49   memory_ = map_.Begin();
50   static_assert(ArenaAllocator::kArenaAlignment <= kMinPageSize,
51                 "Arena should not need stronger alignment than kMinPageSize.");
52   DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
53   size_ = map_.Size();
54 }
55 
Allocate(size_t size,bool low_4gb,const char * name)56 MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
57   // Round up to a full page as that's the smallest unit of allocation for mmap()
58   // and we want to be able to use all memory that we actually allocate.
59   size = RoundUp(size, gPageSize);
60   std::string error_msg;
61   // TODO(b/278665389): remove this retry logic if the root cause is found.
62   constexpr int MAX_RETRY_CNT = 3;
63   int retry_cnt = 0;
64   while (true) {
65     MemMap map = MemMap::MapAnonymous(name, size, PROT_READ | PROT_WRITE, low_4gb, &error_msg);
66     if (map.IsValid()) {
67       if (retry_cnt > 0) {
68         LOG(WARNING) << "Succeed with retry(cnt=" << retry_cnt << ")";
69       }
70       return map;
71     } else {
72       if (retry_cnt == MAX_RETRY_CNT) {
73         CHECK(map.IsValid()) << error_msg << "(retried " << retry_cnt << " times)";
74       }
75     }
76     retry_cnt++;
77     LOG(ERROR) << error_msg << " but retry(cnt=" << retry_cnt << ")";
78   }
79 }
80 
~MemMapArena()81 MemMapArena::~MemMapArena() {
82   // Destroys MemMap via std::unique_ptr<>.
83 }
84 
Release()85 void MemMapArena::Release() {
86   if (bytes_allocated_ > 0) {
87     map_.MadviseDontNeedAndZero();
88     bytes_allocated_ = 0;
89   }
90 }
91 
MemMapArenaPool(bool low_4gb,const char * name)92 MemMapArenaPool::MemMapArenaPool(bool low_4gb, const char* name)
93     : low_4gb_(low_4gb),
94       name_(name),
95       free_arenas_(nullptr) {
96   MemMap::Init();
97 }
98 
~MemMapArenaPool()99 MemMapArenaPool::~MemMapArenaPool() {
100   ReclaimMemory();
101 }
102 
ReclaimMemory()103 void MemMapArenaPool::ReclaimMemory() {
104   while (free_arenas_ != nullptr) {
105     Arena* arena = free_arenas_;
106     free_arenas_ = free_arenas_->next_;
107     delete arena;
108   }
109 }
110 
LockReclaimMemory()111 void MemMapArenaPool::LockReclaimMemory() {
112   std::lock_guard<std::mutex> lock(lock_);
113   ReclaimMemory();
114 }
115 
AllocArena(size_t size)116 Arena* MemMapArenaPool::AllocArena(size_t size) {
117   Arena* ret = nullptr;
118   {
119     std::lock_guard<std::mutex> lock(lock_);
120     if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
121       ret = free_arenas_;
122       free_arenas_ = free_arenas_->next_;
123     }
124   }
125   if (ret == nullptr) {
126     ret = new MemMapArena(size, low_4gb_, name_);
127   }
128   ret->Reset();
129   return ret;
130 }
131 
TrimMaps()132 void MemMapArenaPool::TrimMaps() {
133   ScopedTrace trace(__PRETTY_FUNCTION__);
134   std::lock_guard<std::mutex> lock(lock_);
135   for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
136     arena->Release();
137   }
138 }
139 
GetBytesAllocated() const140 size_t MemMapArenaPool::GetBytesAllocated() const {
141   size_t total = 0;
142   std::lock_guard<std::mutex> lock(lock_);
143   for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
144     total += arena->GetBytesAllocated();
145   }
146   return total;
147 }
148 
FreeArenaChain(Arena * first)149 void MemMapArenaPool::FreeArenaChain(Arena* first) {
150   if (kRunningOnMemoryTool) {
151     for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
152       MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
153     }
154   }
155 
156   if (arena_allocator::kArenaAllocatorPreciseTracking) {
157     // Do not reuse arenas when tracking.
158     while (first != nullptr) {
159       Arena* next = first->next_;
160       delete first;
161       first = next;
162     }
163     return;
164   }
165 
166   if (first != nullptr) {
167     Arena* last = first;
168     while (last->next_ != nullptr) {
169       last = last->next_;
170     }
171     std::lock_guard<std::mutex> lock(lock_);
172     last->next_ = free_arenas_;
173     free_arenas_ = first;
174   }
175 }
176 
177 }  // namespace art
178