1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "swap_space.h"
18 
19 #include <sys/mman.h>
20 
21 #include <algorithm>
22 #include <numeric>
23 
24 #include "base/bit_utils.h"
25 #include "base/mem_map.h"
26 #include "base/macros.h"
27 #include "base/mutex.h"
28 #include "thread-current-inl.h"
29 
30 namespace art {
31 
32 // The chunk size by which the swap file is increased and mapped.
33 static constexpr size_t kMinimumMapSize = 16 * MB;
34 
35 static constexpr bool kCheckFreeMaps = false;
36 
37 template <typename FreeBySizeSet>
DumpFreeMap(const FreeBySizeSet & free_by_size)38 static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
39   size_t last_size = static_cast<size_t>(-1);
40   for (const auto& entry : free_by_size) {
41     if (last_size != entry.size) {
42       last_size = entry.size;
43       LOG(INFO) << "Size " << last_size;
44     }
45     LOG(INFO) << "  0x" << std::hex << entry.free_by_start_entry->Start()
46         << " size=" << std::dec << entry.free_by_start_entry->size;
47   }
48 }
49 
RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos)50 void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
51   auto free_by_start_pos = free_by_size_pos->free_by_start_entry;
52   free_by_size_.erase(free_by_size_pos);
53   free_by_start_.erase(free_by_start_pos);
54 }
55 
InsertChunk(const SpaceChunk & chunk)56 inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
57   DCHECK_NE(chunk.size, 0u);
58   auto insert_result = free_by_start_.insert(chunk);
59   DCHECK(insert_result.second);
60   free_by_size_.emplace(chunk.size, insert_result.first);
61 }
62 
SwapSpace(int fd,size_t initial_size)63 SwapSpace::SwapSpace(int fd, size_t initial_size)
64     : fd_(fd),
65       size_(0),
66       lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
67   // Assume that the file is unlinked.
68 
69   InsertChunk(NewFileChunk(initial_size));
70 }
71 
~SwapSpace()72 SwapSpace::~SwapSpace() {
73   // Unmap all mmapped chunks. Nothing should be allocated anymore at
74   // this point, so there should be only full size chunks in free_by_start_.
75   for (const SpaceChunk& chunk : free_by_start_) {
76     if (munmap(chunk.ptr, chunk.size) != 0) {
77       PLOG(ERROR) << "Failed to unmap swap space chunk at "
78           << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
79     }
80   }
81   // All arenas are backed by the same file. Just close the descriptor.
82   close(fd_);
83 }
84 
85 template <typename FreeByStartSet, typename FreeBySizeSet>
CollectFree(const FreeByStartSet & free_by_start,const FreeBySizeSet & free_by_size)86 static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeSet& free_by_size) {
87   if (free_by_start.size() != free_by_size.size()) {
88     LOG(FATAL) << "Size: " << free_by_start.size() << " vs " << free_by_size.size();
89   }
90 
91   // Calculate over free_by_size.
92   size_t sum1 = 0;
93   for (const auto& entry : free_by_size) {
94     sum1 += entry.free_by_start_entry->size;
95   }
96 
97   // Calculate over free_by_start.
98   size_t sum2 = 0;
99   for (const auto& entry : free_by_start) {
100     sum2 += entry.size;
101   }
102 
103   if (sum1 != sum2) {
104     LOG(FATAL) << "Sum: " << sum1 << " vs " << sum2;
105   }
106   return sum1;
107 }
108 
Alloc(size_t size)109 void* SwapSpace::Alloc(size_t size) {
110   MutexLock lock(Thread::Current(), lock_);
111   size = RoundUp(size, 8U);
112 
113   // Check the free list for something that fits.
114   // TODO: Smarter implementation. Global biggest chunk, ...
115   auto it = free_by_start_.empty()
116       ? free_by_size_.end()
117       : free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
118   if (it != free_by_size_.end()) {
119     SpaceChunk old_chunk = *it->free_by_start_entry;
120     if (old_chunk.size == size) {
121       RemoveChunk(it);
122     } else {
123       // Avoid deallocating and allocating the std::set<> nodes.
124       // This would be much simpler if we could use replace() from Boost.Bimap.
125 
126       // The free_by_start_ map contains disjoint intervals ordered by the `ptr`.
127       // Shrinking the interval does not affect the ordering.
128       it->free_by_start_entry->ptr += size;
129       it->free_by_start_entry->size -= size;
130 
131       auto node = free_by_size_.extract(it);
132       node.value().size -= size;
133       free_by_size_.insert(std::move(node));
134     }
135     return old_chunk.ptr;
136   } else {
137     // Not a big enough free chunk, need to increase file size.
138     SpaceChunk new_chunk = NewFileChunk(size);
139     if (new_chunk.size != size) {
140       // Insert the remainder.
141       SpaceChunk remainder = { new_chunk.ptr + size, new_chunk.size - size };
142       InsertChunk(remainder);
143     }
144     return new_chunk.ptr;
145   }
146 }
147 
NewFileChunk(size_t min_size)148 SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
149 #if !defined(__APPLE__)
150   const size_t page_size = MemMap::GetPageSize();
151   size_t next_part = std::max(RoundUp(min_size, page_size), RoundUp(kMinimumMapSize, page_size));
152   int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
153   if (result != 0) {
154     PLOG(FATAL) << "Unable to increase swap file.";
155   }
156   uint8_t* ptr = reinterpret_cast<uint8_t*>(
157       mmap(nullptr, next_part, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, size_));
158   if (ptr == MAP_FAILED) {
159     LOG(ERROR) << "Unable to mmap new swap file chunk.";
160     LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
161     LOG(ERROR) << "Free list:";
162     DumpFreeMap(free_by_size_);
163     LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
164     PLOG(FATAL) << "Unable to mmap new swap file chunk.";
165   }
166   size_ += next_part;
167   SpaceChunk new_chunk = {ptr, next_part};
168   return new_chunk;
169 #else
170   UNUSED(min_size, kMinimumMapSize);
171   LOG(FATAL) << "No swap file support on the Mac.";
172   UNREACHABLE();
173 #endif
174 }
175 
176 // TODO: Full coalescing.
Free(void * ptr,size_t size)177 void SwapSpace::Free(void* ptr, size_t size) {
178   MutexLock lock(Thread::Current(), lock_);
179   size = RoundUp(size, 8U);
180 
181   size_t free_before = 0;
182   if (kCheckFreeMaps) {
183     free_before = CollectFree(free_by_start_, free_by_size_);
184   }
185 
186   SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
187   auto it = free_by_start_.lower_bound(chunk);
188   if (it != free_by_start_.begin()) {
189     auto prev = it;
190     --prev;
191     CHECK_LE(prev->End(), chunk.Start());
192     if (prev->End() == chunk.Start()) {
193       // Merge *prev with this chunk.
194       chunk.size += prev->size;
195       chunk.ptr -= prev->size;
196       auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
197       DCHECK(erase_pos != free_by_size_.end());
198       RemoveChunk(erase_pos);
199       // "prev" is invalidated but "it" remains valid.
200     }
201   }
202   if (it != free_by_start_.end()) {
203     CHECK_LE(chunk.End(), it->Start());
204     if (chunk.End() == it->Start()) {
205       // Merge *it with this chunk.
206       chunk.size += it->size;
207       auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
208       DCHECK(erase_pos != free_by_size_.end());
209       RemoveChunk(erase_pos);
210       // "it" is invalidated but we don't need it anymore.
211     }
212   }
213   InsertChunk(chunk);
214 
215   if (kCheckFreeMaps) {
216     size_t free_after = CollectFree(free_by_start_, free_by_size_);
217 
218     if (free_after != free_before + size) {
219       DumpFreeMap(free_by_size_);
220       CHECK_EQ(free_after, free_before + size) << "Should be " << size << " difference from " << free_before;
221     }
222   }
223 }
224 
225 }  // namespace art
226