1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/ptrace.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <sys/uio.h>
26 #include <unistd.h>
27 
28 #include <algorithm>
29 #include <memory>
30 #include <mutex>
31 #include <optional>
32 #include <string>
33 
34 #include <android-base/unique_fd.h>
35 
36 #include <unwindstack/Log.h>
37 #include <unwindstack/Memory.h>
38 
39 #include "MemoryBuffer.h"
40 #include "MemoryCache.h"
41 #include "MemoryFileAtOffset.h"
42 #include "MemoryLocal.h"
43 #include "MemoryLocalUnsafe.h"
44 #include "MemoryOffline.h"
45 #include "MemoryOfflineBuffer.h"
46 #include "MemoryRange.h"
47 #include "MemoryRemote.h"
48 
49 namespace unwindstack {
50 
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)51 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
52 
53   // Split up the remote read across page boundaries.
54   // From the manpage:
55   //   A partial read/write may result if one of the remote_iov elements points to an invalid
56   //   memory region in the remote process.
57   //
58   //   Partial transfers apply at the granularity of iovec elements.  These system calls won't
59   //   perform a partial transfer that splits a single iovec element.
60   constexpr size_t kMaxIovecs = 64;
61   struct iovec src_iovs[kMaxIovecs];
62 
63   uint64_t cur = remote_src;
64   size_t total_read = 0;
65   while (len > 0) {
66     struct iovec dst_iov = {
67         .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
68     };
69 
70     size_t iovecs_used = 0;
71     while (len > 0) {
72       if (iovecs_used == kMaxIovecs) {
73         break;
74       }
75 
76       // struct iovec uses void* for iov_base.
77       if (cur >= UINTPTR_MAX) {
78         errno = EFAULT;
79         return total_read;
80       }
81 
82       src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
83 
84       uintptr_t misalignment = cur & (getpagesize() - 1);
85       size_t iov_len = getpagesize() - misalignment;
86       iov_len = std::min(iov_len, len);
87 
88       len -= iov_len;
89       if (__builtin_add_overflow(cur, iov_len, &cur)) {
90         errno = EFAULT;
91         return total_read;
92       }
93 
94       src_iovs[iovecs_used].iov_len = iov_len;
95       ++iovecs_used;
96     }
97 
98     ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
99     if (rc == -1) {
100       return total_read;
101     }
102     total_read += rc;
103   }
104   return total_read;
105 }
106 
PtraceReadLong(pid_t pid,uint64_t addr,long * value)107 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
108   // ptrace() returns -1 and sets errno when the operation fails.
109   // To disambiguate -1 from a valid result, we clear errno beforehand.
110   errno = 0;
111   *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
112   if (*value == -1 && errno) {
113     return false;
114   }
115   return true;
116 }
117 
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)118 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
119   // Make sure that there is no overflow.
120   uint64_t max_size;
121   if (__builtin_add_overflow(addr, bytes, &max_size)) {
122     return 0;
123   }
124 
125   size_t bytes_read = 0;
126   long data;
127   size_t align_bytes = addr & (sizeof(long) - 1);
128   if (align_bytes != 0) {
129     if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
130       return 0;
131     }
132     size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
133     memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
134     addr += copy_bytes;
135     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
136     bytes -= copy_bytes;
137     bytes_read += copy_bytes;
138   }
139 
140   for (size_t i = 0; i < bytes / sizeof(long); i++) {
141     if (!PtraceReadLong(pid, addr, &data)) {
142       return bytes_read;
143     }
144     memcpy(dst, &data, sizeof(long));
145     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
146     addr += sizeof(long);
147     bytes_read += sizeof(long);
148   }
149 
150   size_t left_over = bytes & (sizeof(long) - 1);
151   if (left_over) {
152     if (!PtraceReadLong(pid, addr, &data)) {
153       return bytes_read;
154     }
155     memcpy(dst, &data, left_over);
156     bytes_read += left_over;
157   }
158   return bytes_read;
159 }
160 
ReadFully(uint64_t addr,void * dst,size_t size)161 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
162   size_t rc = Read(addr, dst, size);
163   return rc == size;
164 }
165 
ReadString(uint64_t addr,std::string * dst,size_t max_read)166 bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) {
167   char buffer[256];  // Large enough for 99% of symbol names.
168   size_t size = 0;   // Number of bytes which were read into the buffer.
169   for (size_t offset = 0; offset < max_read; offset += size) {
170     // Look for null-terminator first, so we can allocate string of exact size.
171     // If we know the end of valid memory range, do the reads in larger blocks.
172     size_t read = std::min(sizeof(buffer), max_read - offset);
173     size = Read(addr + offset, buffer, read);
174     if (size == 0) {
175       return false;  // We have not found end of string yet and we can not read more data.
176     }
177     size_t length = strnlen(buffer, size);  // Index of the null-terminator.
178     if (length < size) {
179       // We found the null-terminator. Allocate the string and set its content.
180       if (offset == 0) {
181         // We did just single read, so the buffer already contains the whole string.
182         dst->assign(buffer, length);
183         return true;
184       } else {
185         // The buffer contains only the last block. Read the whole string again.
186         dst->assign(offset + length, '\0');
187         return ReadFully(addr, dst->data(), dst->size());
188       }
189     }
190   }
191   return false;
192 }
193 
CreateFileMemory(const std::string & path,uint64_t offset,uint64_t size)194 std::shared_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset,
195                                                  uint64_t size) {
196   auto memory = std::make_shared<MemoryFileAtOffset>();
197 
198   if (memory->Init(path, offset, size)) {
199     return memory;
200   }
201 
202   return nullptr;
203 }
204 
CreateProcessMemoryLocalUnsafe()205 std::shared_ptr<Memory> Memory::CreateProcessMemoryLocalUnsafe() {
206   return std::shared_ptr<Memory>(new MemoryLocalUnsafe());
207 }
208 
CreateProcessMemory(pid_t pid)209 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
210   if (pid == getpid()) {
211     return std::shared_ptr<Memory>(new MemoryLocal());
212   }
213   return std::shared_ptr<Memory>(new MemoryRemote(pid));
214 }
215 
CreateProcessMemoryCached(pid_t pid)216 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
217   if (pid == getpid()) {
218     return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
219   }
220   return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
221 }
222 
CreateProcessMemoryThreadCached(pid_t pid)223 std::shared_ptr<Memory> Memory::CreateProcessMemoryThreadCached(pid_t pid) {
224   if (pid == getpid()) {
225     return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryLocal()));
226   }
227   return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryRemote(pid)));
228 }
229 
CreateOfflineMemory(const uint8_t * data,uint64_t start,uint64_t end)230 std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start,
231                                                     uint64_t end) {
232   return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end));
233 }
234 
Read(uint64_t addr,void * dst,size_t size)235 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
236   if (addr < offset_) {
237     return 0;
238   }
239   addr -= offset_;
240   size_t raw_size = raw_.size();
241   if (addr >= raw_size) {
242     return 0;
243   }
244 
245   size_t bytes_left = raw_size - static_cast<size_t>(addr);
246   size_t actual_len = std::min(bytes_left, size);
247   memcpy(dst, &raw_[addr], actual_len);
248   return actual_len;
249 }
250 
GetPtr(size_t addr)251 uint8_t* MemoryBuffer::GetPtr(size_t addr) {
252   if (addr < offset_) {
253     return nullptr;
254   }
255   addr -= offset_;
256   if (addr < raw_.size()) {
257     return &raw_[addr];
258   }
259   return nullptr;
260 }
261 
~MemoryFileAtOffset()262 MemoryFileAtOffset::~MemoryFileAtOffset() {
263   Clear();
264 }
265 
Clear()266 void MemoryFileAtOffset::Clear() {
267   if (data_) {
268     munmap(&data_[-offset_], size_ + offset_);
269     data_ = nullptr;
270   }
271 }
272 
Init(const std::string & file,uint64_t offset,uint64_t size)273 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
274   // Clear out any previous data if it exists.
275   Clear();
276 
277   android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
278   if (fd == -1) {
279     return false;
280   }
281   struct stat buf;
282   if (fstat(fd, &buf) == -1) {
283     return false;
284   }
285   if (offset >= static_cast<uint64_t>(buf.st_size)) {
286     return false;
287   }
288 
289   offset_ = offset & (getpagesize() - 1);
290   uint64_t aligned_offset = offset & ~(getpagesize() - 1);
291   if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
292       offset > static_cast<uint64_t>(buf.st_size)) {
293     return false;
294   }
295 
296   size_ = buf.st_size - aligned_offset;
297   uint64_t max_size;
298   if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
299     // Truncate the mapped size.
300     size_ = max_size;
301   }
302   void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
303   if (map == MAP_FAILED) {
304     return false;
305   }
306 
307   data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
308   size_ -= offset_;
309 
310   return true;
311 }
312 
Read(uint64_t addr,void * dst,size_t size)313 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
314   if (addr >= size_) {
315     return 0;
316   }
317 
318   size_t bytes_left = size_ - static_cast<size_t>(addr);
319   const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
320   size_t actual_len = std::min(bytes_left, size);
321 
322   memcpy(dst, actual_base, actual_len);
323   return actual_len;
324 }
325 
Read(uint64_t addr,void * dst,size_t size)326 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
327 #if !defined(__LP64__)
328   // Cannot read an address greater than 32 bits in a 32 bit context.
329   if (addr > UINT32_MAX) {
330     return 0;
331   }
332 #endif
333 
334   size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
335       reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
336   if (read_func != nullptr) {
337     return read_func(pid_, addr, dst, size);
338   } else {
339     // Prefer process_vm_read, try it first. If it doesn't work, use the
340     // ptrace function. If at least one of them returns at least some data,
341     // set that as the permanent function to use.
342     // This assumes that if process_vm_read works once, it will continue
343     // to work.
344     size_t bytes = ProcessVmRead(pid_, addr, dst, size);
345     if (bytes > 0) {
346       read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
347       return bytes;
348     }
349     bytes = PtraceRead(pid_, addr, dst, size);
350     if (bytes > 0) {
351       read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
352     }
353     return bytes;
354   }
355 }
356 
Read(uint64_t addr,void * dst,size_t size)357 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
358   return ProcessVmRead(getpid(), addr, dst, size);
359 }
360 
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)361 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
362                          uint64_t offset)
363     : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
364 
Read(uint64_t addr,void * dst,size_t size)365 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
366   if (addr < offset_) {
367     return 0;
368   }
369 
370   uint64_t read_offset = addr - offset_;
371   if (read_offset >= length_) {
372     return 0;
373   }
374 
375   uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
376   uint64_t read_addr;
377   if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
378     return 0;
379   }
380 
381   return memory_->Read(read_addr, dst, read_length);
382 }
383 
Insert(MemoryRange * memory)384 bool MemoryRanges::Insert(MemoryRange* memory) {
385   uint64_t last_addr;
386   if (__builtin_add_overflow(memory->offset(), memory->length(), &last_addr)) {
387     // This should never happen in the real world. However, it is possible
388     // that an offset in a mapped in segment could be crafted such that
389     // this value overflows. In that case, clamp the value to the max uint64
390     // value.
391     last_addr = UINT64_MAX;
392   }
393   auto entry = maps_.try_emplace(last_addr, memory);
394   if (entry.second) {
395     return true;
396   }
397   delete memory;
398   return false;
399 }
400 
Read(uint64_t addr,void * dst,size_t size)401 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
402   auto entry = maps_.upper_bound(addr);
403   if (entry != maps_.end()) {
404     return entry->second->Read(addr, dst, size);
405   }
406   return 0;
407 }
408 
Init(const std::string & file,uint64_t offset)409 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
410   auto memory_file = std::make_shared<MemoryFileAtOffset>();
411   if (!memory_file->Init(file, offset)) {
412     return false;
413   }
414 
415   // The first uint64_t value is the start of memory.
416   uint64_t start;
417   if (!memory_file->ReadFully(0, &start, sizeof(start))) {
418     return false;
419   }
420 
421   uint64_t size = memory_file->Size();
422   if (__builtin_sub_overflow(size, sizeof(start), &size)) {
423     return false;
424   }
425 
426   memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
427   return true;
428 }
429 
Init(const std::string & file,uint64_t offset,uint64_t start,uint64_t size)430 bool MemoryOffline::Init(const std::string& file, uint64_t offset, uint64_t start, uint64_t size) {
431   auto memory_file = std::make_shared<MemoryFileAtOffset>();
432   if (!memory_file->Init(file, offset)) {
433     return false;
434   }
435 
436   memory_ = std::make_unique<MemoryRange>(memory_file, 0, size, start);
437   return true;
438 }
439 
Read(uint64_t addr,void * dst,size_t size)440 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
441   if (!memory_) {
442     return 0;
443   }
444 
445   return memory_->Read(addr, dst, size);
446 }
447 
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)448 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
449     : data_(data), start_(start), end_(end) {}
450 
Reset(const uint8_t * data,uint64_t start,uint64_t end)451 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
452   data_ = data;
453   start_ = start;
454   end_ = end;
455 }
456 
Read(uint64_t addr,void * dst,size_t size)457 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
458   if (addr < start_ || addr >= end_) {
459     return 0;
460   }
461 
462   size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
463   memcpy(dst, &data_[addr - start_], read_length);
464   return read_length;
465 }
466 
~MemoryOfflineParts()467 MemoryOfflineParts::~MemoryOfflineParts() {
468   for (auto memory : memories_) {
469     delete memory;
470   }
471 }
472 
Read(uint64_t addr,void * dst,size_t size)473 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
474   if (memories_.empty()) {
475     return 0;
476   }
477 
478   // Do a read on each memory object, no support for reading across the
479   // different memory objects.
480   for (MemoryOffline* memory : memories_) {
481     size_t bytes = memory->Read(addr, dst, size);
482     if (bytes != 0) {
483       return bytes;
484     }
485   }
486   return 0;
487 }
488 
InternalCachedRead(uint64_t addr,void * dst,size_t size,CacheDataType * cache)489 size_t MemoryCacheBase::InternalCachedRead(uint64_t addr, void* dst, size_t size,
490                                            CacheDataType* cache) {
491   uint64_t addr_page = addr >> kCacheBits;
492   auto entry = cache->find(addr_page);
493   uint8_t* cache_dst;
494   if (entry != cache->end()) {
495     cache_dst = entry->second;
496   } else {
497     cache_dst = (*cache)[addr_page];
498     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
499       // Erase the entry.
500       cache->erase(addr_page);
501       return impl_->Read(addr, dst, size);
502     }
503   }
504   size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
505   if (size <= max_read) {
506     memcpy(dst, &cache_dst[addr & kCacheMask], size);
507     return size;
508   }
509 
510   // The read crossed into another cached entry, since a read can only cross
511   // into one extra cached page, duplicate the code rather than looping.
512   memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
513   dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
514   addr_page++;
515 
516   entry = cache->find(addr_page);
517   if (entry != cache->end()) {
518     cache_dst = entry->second;
519   } else {
520     cache_dst = (*cache)[addr_page];
521     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
522       // Erase the entry.
523       cache->erase(addr_page);
524       return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
525     }
526   }
527   memcpy(dst, cache_dst, size - max_read);
528   return size;
529 }
530 
Clear()531 void MemoryCache::Clear() {
532   std::lock_guard<std::mutex> lock(cache_lock_);
533   cache_.clear();
534 }
535 
CachedRead(uint64_t addr,void * dst,size_t size)536 size_t MemoryCache::CachedRead(uint64_t addr, void* dst, size_t size) {
537   // Use a single lock since this object is not designed to be performant
538   // for multiple object reading from multiple threads.
539   std::lock_guard<std::mutex> lock(cache_lock_);
540 
541   return InternalCachedRead(addr, dst, size, &cache_);
542 }
543 
MemoryThreadCache(Memory * memory)544 MemoryThreadCache::MemoryThreadCache(Memory* memory) : MemoryCacheBase(memory) {
545   thread_cache_ = std::make_optional<pthread_t>();
546   if (pthread_key_create(&*thread_cache_, [](void* memory) {
547         CacheDataType* cache = reinterpret_cast<CacheDataType*>(memory);
548         delete cache;
549       }) != 0) {
550     Log::AsyncSafe("Failed to create pthread key.");
551     thread_cache_.reset();
552   }
553 }
554 
~MemoryThreadCache()555 MemoryThreadCache::~MemoryThreadCache() {
556   if (thread_cache_) {
557     CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
558     delete cache;
559     pthread_key_delete(*thread_cache_);
560   }
561 }
562 
CachedRead(uint64_t addr,void * dst,size_t size)563 size_t MemoryThreadCache::CachedRead(uint64_t addr, void* dst, size_t size) {
564   if (!thread_cache_) {
565     return impl_->Read(addr, dst, size);
566   }
567 
568   CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
569   if (cache == nullptr) {
570     cache = new CacheDataType;
571     pthread_setspecific(*thread_cache_, cache);
572   }
573 
574   return InternalCachedRead(addr, dst, size, cache);
575 }
576 
Clear()577 void MemoryThreadCache::Clear() {
578   if (!thread_cache_) {
579     return;
580   }
581 
582   CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
583   if (cache != nullptr) {
584     delete cache;
585     pthread_setspecific(*thread_cache_, nullptr);
586   }
587 }
588 
Read(uint64_t addr,void * dst,size_t size)589 size_t MemoryLocalUnsafe::Read(uint64_t addr, void* dst, size_t size) {
590   void* raw_ptr = reinterpret_cast<void*>(addr);
591   memcpy(dst, raw_ptr, size);
592   return size;
593 }
594 
595 }  // namespace unwindstack
596