1 // Copyright (C) 2019 The Android Open Source Project
2 // Copyright (C) 2019 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 #include "HostAddressSpace.h"
16
17 #include "aemu/base/SubAllocator.h"
18 #include "aemu/base/synchronization/Lock.h"
19 #include "host-common/address_space_device.h"
20 #include "host-common/address_space_device.hpp"
21
22 #include <unordered_map>
23 #include <vector>
24
25 #include <inttypes.h>
26
27 #define HASD_DEBUG 0
28
29 #if HASD_DEBUG
30 #define HASD_LOG(fmt,...) printf("%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
31 #else
32 #define HASD_LOG(fmt,...)
33 #endif
34
35 using android::base::AutoLock;
36 using android::base::Lock;
37 using android::base::SubAllocator;
38 using android::emulation::AddressSpaceDevicePingInfo;
39
40 namespace android {
41
42 class HostAddressSpaceDevice::Impl {
43 public:
Impl()44 Impl() : mControlOps(get_address_space_device_control_ops()),
45 mPhysicalOffsetAllocator(0, 16ULL * 1024ULL * 1048576ULL, 4096) { }
46
clear()47 void clear() {
48 std::vector<uint32_t> handlesToClose;
49 for (auto it : mEntries) {
50 handlesToClose.push_back(it.first);
51 }
52 for (auto handle : handlesToClose) {
53 close(handle);
54 }
55 mSharedRegions.clear();
56 mPhysicalOffsetAllocator.freeAll();
57 mControlOps->clear();
58 }
59
open()60 uint32_t open() {
61 uint32_t handle = mControlOps->gen_handle();
62
63 AutoLock lock(mLock);
64 auto& entry = mEntries[handle];
65
66 entry.pingInfo = new AddressSpaceDevicePingInfo;
67
68 lock.unlock();
69
70 mControlOps->tell_ping_info(handle, (uint64_t)(uintptr_t)entry.pingInfo);
71 return handle;
72 }
73
close(uint32_t handle)74 void close(uint32_t handle) {
75 mControlOps->destroy_handle(handle);
76
77 AutoLock lock(mLock);
78 auto& entry = mEntries[handle];
79 delete entry.pingInfo;
80 mEntries.erase(handle);
81 }
82
allocBlock(uint32_t handle,size_t size,uint64_t * physAddr)83 uint64_t allocBlock(uint32_t handle, size_t size, uint64_t* physAddr) {
84 AutoLock lock(mLock);
85 return allocBlockLocked(handle, size, physAddr);
86 }
87
freeBlock(uint32_t handle,uint64_t off)88 void freeBlock(uint32_t handle, uint64_t off) {
89 // mirror hw/pci/goldfish_address_space.c:
90 // first run deallocation callbacks, then update the state
91 mControlOps->run_deallocation_callbacks(kPciStart + off);
92
93 AutoLock lock(mLock);
94 freeBlockLocked(handle, off);
95 }
96
setHostAddr(uint32_t handle,size_t off,void * hva)97 void setHostAddr(uint32_t handle, size_t off, void* hva) {
98 AutoLock lock(mLock);
99 auto& entry = mEntries[handle];
100 auto& mem = entry.blocks[off];
101 mem.hva = hva;
102 }
103
setHostAddrByPhysAddr(uint64_t physAddr,void * hva)104 void setHostAddrByPhysAddr(uint64_t physAddr, void* hva) {
105 if (!physAddr) return;
106 const uint64_t off = physAddr - kPciStart;
107
108 AutoLock lock(mLock);
109 for (auto &it : mEntries) {
110 for (auto &it2 : it.second.blocks) {
111 if (it2.first == off) {
112 it2.second.hva = hva;
113 }
114 }
115 }
116
117 for (auto &it : mSharedRegions) {
118 if (it.first == off) {
119 it.second.hva = hva;
120 }
121 }
122 }
123
unsetHostAddrByPhysAddr(uint64_t physAddr)124 void unsetHostAddrByPhysAddr(uint64_t physAddr) {
125 if (!physAddr) return;
126 const uint64_t off = physAddr - kPciStart;
127
128 AutoLock lock(mLock);
129 for (auto &it : mEntries) {
130 for (auto &it2 : it.second.blocks) {
131 if (it2.first == off) {
132 it2.second.hva = nullptr;
133 }
134 }
135 }
136
137 for (auto &it : mSharedRegions) {
138 if (it.first == off) {
139 it.second.hva = nullptr;
140 }
141 }
142 }
143
getHostAddr(uint64_t physAddr)144 void* getHostAddr(uint64_t physAddr) {
145 HASD_LOG("get hva of 0x%llx", (unsigned long long)physAddr);
146
147 if (!physAddr) return nullptr;
148 const uint64_t off = physAddr - kPciStart;
149
150 AutoLock lock(mLock);
151 HASD_LOG("get hva of off 0x%llx", (unsigned long long)off);
152 void* res = 0;
153
154 // First check ping infos
155 for (const auto &it : mEntries) {
156 if ((uint64_t)(uintptr_t)it.second.pingInfo == physAddr) return it.second.pingInfo;
157 }
158
159 for (const auto &it : mEntries) {
160 for (const auto &it2 : it.second.blocks) {
161 if (blockContainsOffset(it2.first, it2.second, off)) {
162 HASD_LOG("entry [0x%llx 0x%llx] contains. hva: %p",
163 (unsigned long long)it2.first,
164 (unsigned long long)it2.first + it2.second.size,
165 it2.second.hva);
166 res = ((char*)it2.second.hva) +
167 offsetIntoBlock(it2.first, it2.second, off);
168 }
169 }
170 }
171
172 for (auto &it : mSharedRegions) {
173 if (blockContainsOffset(it.first, it.second, off)) {
174 HASD_LOG("shared region [0x%llx 0x%llx] contains. hva: %p",
175 (unsigned long long)it.first,
176 (unsigned long long)it.first + it.second.size,
177 it.second.hva);
178 res = ((char*)it.second.hva) +
179 offsetIntoBlock(it.first, it.second, off);
180 }
181 }
182
183 return res;
184 }
185
offsetToPhysAddr(uint64_t offset)186 static uint64_t offsetToPhysAddr(uint64_t offset) {
187 return kPciStart + offset;
188 }
189
ping(uint32_t handle,AddressSpaceDevicePingInfo * pingInfo)190 void ping(uint32_t handle, AddressSpaceDevicePingInfo* pingInfo) {
191 AutoLock lock(mLock);
192 auto& entry = mEntries[handle];
193 memcpy(entry.pingInfo, pingInfo, sizeof(AddressSpaceDevicePingInfo));
194
195 lock.unlock();
196
197 mControlOps->ping(handle);
198
199 lock.lock();
200 memcpy(pingInfo, entry.pingInfo, sizeof(AddressSpaceDevicePingInfo));
201 }
202
claimShared(uint32_t handle,uint64_t off,uint64_t size)203 int claimShared(uint32_t handle, uint64_t off, uint64_t size) {
204 auto& entry = mEntries[handle];
205
206 if (entry.blocks.find(off) != entry.blocks.end()) {
207 fprintf(stderr, "%s: failed, entry already owns offset 0x%llx\n", __func__,
208 (unsigned long long)off);
209 return -EINVAL;
210 }
211
212 if (!enclosingSharedRegionExists(mSharedRegions, off, size)) {
213 fprintf(stderr, "%s: failed, no shared region enclosing [0x%llx 0x%llx]\n", __func__,
214 (unsigned long long)off,
215 (unsigned long long)off + size);
216 return -EINVAL;
217 }
218
219 auto& entryBlock = entry.blocks[off];
220 entryBlock.size = size;
221 return 0;
222 }
223
unclaimShared(uint32_t handle,uint64_t off)224 int unclaimShared(uint32_t handle, uint64_t off) {
225 auto& entry = mEntries[handle];
226
227 if (entry.blocks.find(off) == entry.blocks.end()) {
228 fprintf(stderr, "%s: failed, entry does not own offset 0x%llx\n", __func__,
229 (unsigned long long)off);
230 return -EINVAL;
231 }
232
233 if (!enclosingSharedRegionExists(mSharedRegions, off, entry.blocks[off].size)) {
234 fprintf(stderr, "%s: failed, no shared region enclosing [0x%llx 0x%llx]\n", __func__,
235 (unsigned long long)off,
236 (unsigned long long)off + entry.blocks[off].size);
237 return -EINVAL;
238 }
239
240 entry.blocks.erase(off);
241
242 return 0;
243 }
244
saveSnapshot(base::Stream * stream)245 void saveSnapshot(base::Stream* stream) {
246 emulation::goldfish_address_space_memory_state_save(stream);
247 }
248
loadSnapshot(base::Stream * stream)249 void loadSnapshot(base::Stream* stream) {
250 emulation::goldfish_address_space_memory_state_load(stream);
251 }
252
253 // Simulated host interface
allocSharedHostRegion(uint64_t page_aligned_size,uint64_t * offset)254 int allocSharedHostRegion(uint64_t page_aligned_size, uint64_t* offset) {
255
256 if (!offset) return -EINVAL;
257
258 AutoLock lock(mLock);
259
260 return allocSharedHostRegionLocked(page_aligned_size, offset);
261 }
262
allocSharedHostRegionLocked(uint64_t page_aligned_size,uint64_t * offset)263 int allocSharedHostRegionLocked(uint64_t page_aligned_size, uint64_t* offset) {
264 if (!offset) return -EINVAL;
265
266 uint64_t off = (uint64_t)(uintptr_t)mPhysicalOffsetAllocator.alloc(page_aligned_size);
267 auto& block = mSharedRegions[off];
268 block.size = page_aligned_size;
269 (void)block;
270 *offset = off;
271
272 HASD_LOG("new shared region: [0x%llx 0x%llx]",
273 (unsigned long long)off,
274 (unsigned long long)off + page_aligned_size);
275 return 0;
276 }
277
allocSharedHostRegionFixedLocked(uint64_t page_aligned_size,uint64_t offset)278 int allocSharedHostRegionFixedLocked(uint64_t page_aligned_size, uint64_t offset) {
279 mPhysicalOffsetAllocator.allocFixed(page_aligned_size, offset);
280 auto& block = mSharedRegions[offset];
281 block.size = page_aligned_size;
282 (void)block;
283
284 HASD_LOG("new shared region: [0x%llx 0x%llx]",
285 (unsigned long long)offset,
286 (unsigned long long)offset + page_aligned_size);
287 return 0;
288 }
289
freeSharedHostRegion(uint64_t offset)290 int freeSharedHostRegion(uint64_t offset) {
291 AutoLock lock(mLock);
292 return freeSharedHostRegionLocked(offset);
293 }
294
freeSharedHostRegionLocked(uint64_t offset)295 int freeSharedHostRegionLocked(uint64_t offset) {
296 if (mSharedRegions.find(offset) == mSharedRegions.end()) {
297 fprintf(stderr, "%s: could not free shared region, offset 0x%llx is not a start\n", __func__,
298 (unsigned long long)offset);
299 return -EINVAL;
300 }
301
302 HASD_LOG("free shared region @ 0x%llx",
303 (unsigned long long)offset);
304
305 mSharedRegions.erase(offset);
306 mPhysicalOffsetAllocator.free((void*)(uintptr_t)offset);
307
308 return 0;
309 }
310
getPhysAddrStart()311 static uint64_t getPhysAddrStart() {
312 return kPciStart;
313 }
314
315 private:
316 struct BlockMemory {
317 size_t size = 0;
318 void* hva = nullptr;
319 };
320
321 using MemoryMap = std::unordered_map<uint64_t, BlockMemory>;
322
allocBlockLocked(uint32_t handle,size_t size,uint64_t * physAddr)323 uint64_t allocBlockLocked(uint32_t handle, size_t size, uint64_t* physAddr) {
324 uint64_t off = (uint64_t)(uintptr_t)mPhysicalOffsetAllocator.alloc(size);
325 auto& entry = mEntries[handle];
326 auto& block = entry.blocks[off];
327 block.size = size;
328 (void)block;
329 *physAddr = kPciStart + off;
330 return off;
331 }
332
freeBlockLocked(uint32_t handle,uint64_t off)333 void freeBlockLocked(uint32_t handle, uint64_t off) {
334 auto& entry = mEntries[handle];
335 entry.blocks.erase(off);
336 mPhysicalOffsetAllocator.free((void*)(uintptr_t)off);
337 }
338
blockContainsOffset(uint64_t offset,const BlockMemory & block,uint64_t physAddr) const339 bool blockContainsOffset(
340 uint64_t offset,
341 const BlockMemory& block,
342 uint64_t physAddr) const {
343 return offset <= physAddr &&
344 offset + block.size > physAddr;
345 }
346
offsetIntoBlock(uint64_t offset,const BlockMemory & block,uint64_t physAddr) const347 uint64_t offsetIntoBlock(
348 uint64_t offset,
349 const BlockMemory& block,
350 uint64_t physAddr) const {
351 if (!blockContainsOffset(offset, block, physAddr)) {
352 fprintf(stderr, "%s: block at [0x%" PRIx64 " 0x%" PRIx64"] does not contain 0x%" PRIx64 "!\n", __func__,
353 offset,
354 offset + block.size,
355 physAddr);
356 abort();
357 }
358 return physAddr - offset;
359 }
360
enclosingSharedRegionExists(const MemoryMap & memoryMap,uint64_t offset,uint64_t size) const361 bool enclosingSharedRegionExists(
362 const MemoryMap& memoryMap, uint64_t offset, uint64_t size) const {
363 for (const auto& it : memoryMap) {
364 if (it.first <= offset &&
365 it.first + it.second.size >= offset + size)
366 return true;
367 }
368 return false;
369 }
370
371 static const uint64_t kPciStart = 0x0101010100000000;
372
373 Lock mLock;
374 address_space_device_control_ops* mControlOps = nullptr;
375 android::base::SubAllocator mPhysicalOffsetAllocator;
376
377 struct Entry {
378 AddressSpaceDevicePingInfo* pingInfo = nullptr;
379 MemoryMap blocks;
380 };
381
382 std::unordered_map<uint32_t, Entry> mEntries;
383 MemoryMap mSharedRegions;
384 };
385
sHostAddressSpace()386 static HostAddressSpaceDevice* sHostAddressSpace() {
387 static HostAddressSpaceDevice* h = new HostAddressSpaceDevice;
388 return h;
389 }
390
HostAddressSpaceDevice()391 HostAddressSpaceDevice::HostAddressSpaceDevice() :
392 mImpl(new HostAddressSpaceDevice::Impl()) { }
393
394 // static
get()395 HostAddressSpaceDevice* HostAddressSpaceDevice::get() {
396 auto res = sHostAddressSpace();
397 res->initialize();
398 return res;
399 }
400
open()401 uint32_t HostAddressSpaceDevice::open() {
402 return mImpl->open();
403 }
404
close(uint32_t handle)405 void HostAddressSpaceDevice::close(uint32_t handle) {
406 mImpl->close(handle);
407 }
408
allocBlock(uint32_t handle,size_t size,uint64_t * physAddr)409 uint64_t HostAddressSpaceDevice::allocBlock(uint32_t handle, size_t size, uint64_t* physAddr) {
410 return mImpl->allocBlock(handle, size, physAddr);
411 }
412
freeBlock(uint32_t handle,uint64_t off)413 void HostAddressSpaceDevice::freeBlock(uint32_t handle, uint64_t off) {
414 return mImpl->freeBlock(handle, off);
415 }
416
setHostAddr(uint32_t handle,size_t off,void * hva)417 void HostAddressSpaceDevice::setHostAddr(uint32_t handle, size_t off, void* hva) {
418 return mImpl->setHostAddr(handle, off, hva);
419 }
420
setHostAddrByPhysAddr(uint64_t physAddr,void * hva)421 void HostAddressSpaceDevice::setHostAddrByPhysAddr(uint64_t physAddr, void* hva) {
422 mImpl->setHostAddrByPhysAddr(physAddr, hva);
423 }
424
unsetHostAddrByPhysAddr(uint64_t physAddr)425 void HostAddressSpaceDevice::unsetHostAddrByPhysAddr(uint64_t physAddr) {
426 mImpl->unsetHostAddrByPhysAddr(physAddr);
427 }
428
getHostAddr(uint64_t physAddr)429 void* HostAddressSpaceDevice::getHostAddr(uint64_t physAddr) {
430 return mImpl->getHostAddr(physAddr);
431 }
432
offsetToPhysAddr(uint64_t offset) const433 uint64_t HostAddressSpaceDevice::offsetToPhysAddr(uint64_t offset) const {
434 return mImpl->offsetToPhysAddr(offset);
435 }
436
ping(uint32_t handle,AddressSpaceDevicePingInfo * pingInfo)437 void HostAddressSpaceDevice::ping(uint32_t handle, AddressSpaceDevicePingInfo* pingInfo) {
438 mImpl->ping(handle, pingInfo);
439 }
440
claimShared(uint32_t handle,uint64_t off,uint64_t size)441 int HostAddressSpaceDevice::claimShared(uint32_t handle, uint64_t off, uint64_t size) {
442 return mImpl->claimShared(handle, off, size);
443 }
444
unclaimShared(uint32_t handle,uint64_t off)445 int HostAddressSpaceDevice::unclaimShared(uint32_t handle, uint64_t off) {
446 return mImpl->unclaimShared(handle, off);
447 }
448
saveSnapshot(base::Stream * stream)449 void HostAddressSpaceDevice::saveSnapshot(base::Stream* stream) {
450 mImpl->saveSnapshot(stream);
451 }
452
loadSnapshot(base::Stream * stream)453 void HostAddressSpaceDevice::loadSnapshot(base::Stream* stream) {
454 mImpl->loadSnapshot(stream);
455 }
456
457 // static
getImpl()458 HostAddressSpaceDevice::Impl* HostAddressSpaceDevice::getImpl() {
459 return HostAddressSpaceDevice::get()->mImpl.get();
460 }
461
allocSharedHostRegion(uint64_t page_aligned_size,uint64_t * offset)462 int HostAddressSpaceDevice::allocSharedHostRegion(uint64_t page_aligned_size, uint64_t* offset) {
463 return HostAddressSpaceDevice::getImpl()->allocSharedHostRegion(page_aligned_size, offset);
464 }
465
freeSharedHostRegion(uint64_t offset)466 int HostAddressSpaceDevice::freeSharedHostRegion(uint64_t offset) {
467 return HostAddressSpaceDevice::getImpl()->freeSharedHostRegion(offset);
468 }
469
allocSharedHostRegionLocked(uint64_t page_aligned_size,uint64_t * offset)470 int HostAddressSpaceDevice::allocSharedHostRegionLocked(uint64_t page_aligned_size, uint64_t* offset) {
471 return HostAddressSpaceDevice::getImpl()->allocSharedHostRegionLocked(page_aligned_size, offset);
472 }
473
freeSharedHostRegionLocked(uint64_t offset)474 int HostAddressSpaceDevice::freeSharedHostRegionLocked(uint64_t offset) {
475 return HostAddressSpaceDevice::getImpl()->freeSharedHostRegionLocked( offset);
476 }
477
getPhysAddrStart()478 uint64_t HostAddressSpaceDevice::getPhysAddrStart() {
479 return HostAddressSpaceDevice::getImpl()->getPhysAddrStart();
480 }
481
getGuestPageSize()482 uint32_t HostAddressSpaceDevice::getGuestPageSize() {
483 return 4096;
484 }
485
allocSharedHostRegionFixedLocked(uint64_t page_aligned_size,uint64_t offset)486 int HostAddressSpaceDevice::allocSharedHostRegionFixedLocked(uint64_t page_aligned_size, uint64_t offset) {
487 return HostAddressSpaceDevice::getImpl()->allocSharedHostRegionFixedLocked(page_aligned_size, offset);
488 }
489
490 static const AddressSpaceHwFuncs sAddressSpaceHwFuncs = {
491 &HostAddressSpaceDevice::allocSharedHostRegion,
492 &HostAddressSpaceDevice::freeSharedHostRegion,
493 &HostAddressSpaceDevice::allocSharedHostRegionLocked,
494 &HostAddressSpaceDevice::freeSharedHostRegionLocked,
495 &HostAddressSpaceDevice::getPhysAddrStart,
496 &HostAddressSpaceDevice::getPhysAddrStart,
497 &HostAddressSpaceDevice::getGuestPageSize,
498 &HostAddressSpaceDevice::allocSharedHostRegionFixedLocked,
499 };
500
initialize()501 void HostAddressSpaceDevice::initialize() {
502 if (mInitialized) return;
503 address_space_set_hw_funcs(&sAddressSpaceHwFuncs);
504 mInitialized = true;
505 }
506
clear()507 void HostAddressSpaceDevice::clear() {
508 mImpl->clear();
509 }
510
511 } // namespace android
512