1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <signal.h>
19 #include <stdint.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <sys/mman.h>
23 #include <sys/ptrace.h>
24 #include <sys/types.h>
25 #include <unistd.h>
26
27 #include <vector>
28
29 #include <android-base/file.h>
30 #include <android-base/test_utils.h>
31 #include <gtest/gtest.h>
32
33 #include "MemoryRemote.h"
34
35 #include "ForkTest.h"
36 #include "MemoryFake.h"
37 #include "PidUtils.h"
38 #include "TestUtils.h"
39
40 namespace unwindstack {
41
42 using MemoryRemoteTest = ForkTest;
43
TEST_F(MemoryRemoteTest,read)44 TEST_F(MemoryRemoteTest, read) {
45 std::vector<uint8_t> src(1024);
46 memset(src.data(), 0x4c, 1024);
47
48 ASSERT_NO_FATAL_FAILURE(Fork());
49
50 MemoryRemote remote(pid_);
51
52 std::vector<uint8_t> dst(1024);
53 ASSERT_TRUE(remote.ReadFully(reinterpret_cast<uint64_t>(src.data()), dst.data(), 1024));
54 for (size_t i = 0; i < 1024; i++) {
55 ASSERT_EQ(0x4cU, dst[i]) << "Failed at byte " << i;
56 }
57 }
58
TEST_F(MemoryRemoteTest,read_large)59 TEST_F(MemoryRemoteTest, read_large) {
60 static constexpr size_t kTotalPages = 245;
61 std::vector<uint8_t> src(kTotalPages * getpagesize());
62 for (size_t i = 0; i < kTotalPages; i++) {
63 memset(&src[i * getpagesize()], i, getpagesize());
64 }
65
66 ASSERT_NO_FATAL_FAILURE(Fork());
67
68 MemoryRemote remote(pid_);
69
70 std::vector<uint8_t> dst(kTotalPages * getpagesize());
71 ASSERT_TRUE(remote.ReadFully(reinterpret_cast<uint64_t>(src.data()), dst.data(), src.size()));
72 for (size_t i = 0; i < kTotalPages * getpagesize(); i++) {
73 ASSERT_EQ(i / getpagesize(), dst[i]) << "Failed at byte " << i;
74 }
75 }
76
TEST_F(MemoryRemoteTest,read_partial)77 TEST_F(MemoryRemoteTest, read_partial) {
78 char* mapping = static_cast<char*>(
79 mmap(nullptr, 4 * getpagesize(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
80 ASSERT_NE(MAP_FAILED, mapping);
81 memset(mapping, 0x4c, 4 * getpagesize());
82 ASSERT_EQ(0, mprotect(mapping + getpagesize(), getpagesize(), PROT_NONE));
83 ASSERT_EQ(0, munmap(mapping + 3 * getpagesize(), getpagesize()));
84
85 ASSERT_NO_FATAL_FAILURE(Fork());
86
87 // Unmap from our process.
88 ASSERT_EQ(0, munmap(mapping, 3 * getpagesize()));
89
90 MemoryRemote remote(pid_);
91
92 std::vector<uint8_t> dst(4096);
93 size_t bytes =
94 remote.Read(reinterpret_cast<uint64_t>(mapping + getpagesize() - 1024), dst.data(), 4096);
95 // Some read methods can read PROT_NONE maps, allow that.
96 ASSERT_LE(1024U, bytes);
97 for (size_t i = 0; i < bytes; i++) {
98 ASSERT_EQ(0x4cU, dst[i]) << "Failed at byte " << i;
99 }
100
101 // Now verify that reading stops at the end of a map.
102 bytes =
103 remote.Read(reinterpret_cast<uint64_t>(mapping + 3 * getpagesize() - 1024), dst.data(), 4096);
104 ASSERT_EQ(1024U, bytes);
105 for (size_t i = 0; i < bytes; i++) {
106 ASSERT_EQ(0x4cU, dst[i]) << "Failed at byte " << i;
107 }
108 }
109
TEST_F(MemoryRemoteTest,read_fail)110 TEST_F(MemoryRemoteTest, read_fail) {
111 int pagesize = getpagesize();
112 void* src = mmap(nullptr, pagesize * 2, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE,-1, 0);
113 memset(src, 0x4c, pagesize * 2);
114 ASSERT_NE(MAP_FAILED, src);
115 // Put a hole right after the first page.
116 ASSERT_EQ(0, munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(src) + pagesize),
117 pagesize));
118
119 ASSERT_NO_FATAL_FAILURE(Fork());
120
121 MemoryRemote remote(pid_);
122
123 std::vector<uint8_t> dst(pagesize);
124 ASSERT_TRUE(remote.ReadFully(reinterpret_cast<uint64_t>(src), dst.data(), pagesize));
125 for (size_t i = 0; i < 1024; i++) {
126 ASSERT_EQ(0x4cU, dst[i]) << "Failed at byte " << i;
127 }
128
129 ASSERT_FALSE(remote.ReadFully(reinterpret_cast<uint64_t>(src) + pagesize, dst.data(), 1));
130 ASSERT_TRUE(remote.ReadFully(reinterpret_cast<uint64_t>(src) + pagesize - 1, dst.data(), 1));
131 ASSERT_FALSE(remote.ReadFully(reinterpret_cast<uint64_t>(src) + pagesize - 4, dst.data(), 8));
132
133 // Check overflow condition is caught properly.
134 ASSERT_FALSE(remote.ReadFully(UINT64_MAX - 100, dst.data(), 200));
135
136 ASSERT_EQ(0, munmap(src, pagesize));
137 }
138
TEST_F(MemoryRemoteTest,read_overflow)139 TEST_F(MemoryRemoteTest, read_overflow) {
140 ASSERT_NO_FATAL_FAILURE(Fork());
141
142 MemoryRemote remote(pid_);
143
144 // Check overflow condition is caught properly.
145 std::vector<uint8_t> dst(200);
146 ASSERT_FALSE(remote.ReadFully(UINT64_MAX - 100, dst.data(), 200));
147 }
148
TEST_F(MemoryRemoteTest,read_illegal)149 TEST_F(MemoryRemoteTest, read_illegal) {
150 ASSERT_NO_FATAL_FAILURE(Fork());
151
152 MemoryRemote remote(pid_);
153
154 std::vector<uint8_t> dst(100);
155 ASSERT_FALSE(remote.ReadFully(0, dst.data(), 1));
156 ASSERT_FALSE(remote.ReadFully(0, dst.data(), 100));
157 }
158
TEST_F(MemoryRemoteTest,read_mprotect_hole)159 TEST_F(MemoryRemoteTest, read_mprotect_hole) {
160 size_t page_size = getpagesize();
161 void* mapping =
162 mmap(nullptr, 3 * getpagesize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
163 ASSERT_NE(MAP_FAILED, mapping);
164 memset(mapping, 0xFF, 3 * page_size);
165 ASSERT_EQ(0, mprotect(static_cast<char*>(mapping) + page_size, page_size, PROT_NONE));
166
167 ASSERT_NO_FATAL_FAILURE(Fork());
168
169 ASSERT_EQ(0, munmap(mapping, 3 * page_size));
170
171 MemoryRemote remote(pid_);
172 std::vector<uint8_t> dst(getpagesize() * 4, 0xCC);
173 size_t read_size = remote.Read(reinterpret_cast<uint64_t>(mapping), dst.data(), page_size * 3);
174 // Some read methods can read PROT_NONE maps, allow that.
175 ASSERT_LE(page_size, read_size);
176 for (size_t i = 0; i < read_size; ++i) {
177 ASSERT_EQ(0xFF, dst[i]);
178 }
179 for (size_t i = read_size; i < dst.size(); ++i) {
180 ASSERT_EQ(0xCC, dst[i]);
181 }
182 }
183
TEST_F(MemoryRemoteTest,read_munmap_hole)184 TEST_F(MemoryRemoteTest, read_munmap_hole) {
185 size_t page_size = getpagesize();
186 void* mapping =
187 mmap(nullptr, 3 * getpagesize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
188 ASSERT_NE(MAP_FAILED, mapping);
189 memset(mapping, 0xFF, 3 * page_size);
190 ASSERT_EQ(0, munmap(static_cast<char*>(mapping) + page_size, page_size));
191
192 ASSERT_NO_FATAL_FAILURE(Fork());
193
194 ASSERT_EQ(0, munmap(mapping, page_size));
195 ASSERT_EQ(0, munmap(static_cast<char*>(mapping) + 2 * page_size, page_size));
196
197 MemoryRemote remote(pid_);
198 std::vector<uint8_t> dst(getpagesize() * 4, 0xCC);
199 size_t read_size = remote.Read(reinterpret_cast<uint64_t>(mapping), dst.data(), page_size * 3);
200 ASSERT_EQ(page_size, read_size);
201 for (size_t i = 0; i < read_size; ++i) {
202 ASSERT_EQ(0xFF, dst[i]);
203 }
204 for (size_t i = read_size; i < dst.size(); ++i) {
205 ASSERT_EQ(0xCC, dst[i]);
206 }
207 }
208
209 // Verify that the memory remote object chooses a memory read function
210 // properly. Either process_vm_readv or ptrace.
TEST_F(MemoryRemoteTest,read_choose_correctly)211 TEST_F(MemoryRemoteTest, read_choose_correctly) {
212 size_t page_size = getpagesize();
213 void* mapping =
214 mmap(nullptr, 2 * getpagesize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
215 ASSERT_NE(MAP_FAILED, mapping);
216 memset(mapping, 0xFC, 2 * page_size);
217 ASSERT_EQ(0, mprotect(static_cast<char*>(mapping), page_size, PROT_NONE));
218
219 ASSERT_NO_FATAL_FAILURE(Fork());
220
221 ASSERT_EQ(0, munmap(mapping, 2 * page_size));
222
223 // We know that process_vm_readv of a mprotect'd PROT_NONE region will fail.
224 // Read from the PROT_NONE area first to force the choice of ptrace.
225 MemoryRemote remote_ptrace(pid_);
226 uint32_t value;
227 size_t bytes = remote_ptrace.Read(reinterpret_cast<uint64_t>(mapping), &value, sizeof(value));
228 ASSERT_EQ(sizeof(value), bytes);
229 ASSERT_EQ(0xfcfcfcfcU, value);
230 bytes = remote_ptrace.Read(reinterpret_cast<uint64_t>(mapping) + page_size, &value, sizeof(value));
231 ASSERT_EQ(sizeof(value), bytes);
232 ASSERT_EQ(0xfcfcfcfcU, value);
233 bytes = remote_ptrace.Read(reinterpret_cast<uint64_t>(mapping), &value, sizeof(value));
234 ASSERT_EQ(sizeof(value), bytes);
235 ASSERT_EQ(0xfcfcfcfcU, value);
236
237 // Now verify that choosing process_vm_readv results in failing reads of
238 // the PROT_NONE part of the map. Read from a valid map first which
239 // should prefer process_vm_readv, and keep that as the read function.
240 MemoryRemote remote_readv(pid_);
241 bytes = remote_readv.Read(reinterpret_cast<uint64_t>(mapping) + page_size, &value, sizeof(value));
242 ASSERT_EQ(sizeof(value), bytes);
243 ASSERT_EQ(0xfcfcfcfcU, value);
244 bytes = remote_readv.Read(reinterpret_cast<uint64_t>(mapping), &value, sizeof(value));
245 ASSERT_EQ(0U, bytes);
246 bytes = remote_readv.Read(reinterpret_cast<uint64_t>(mapping) + page_size, &value, sizeof(value));
247 ASSERT_EQ(sizeof(value), bytes);
248 ASSERT_EQ(0xfcfcfcfcU, value);
249 }
250
251 } // namespace unwindstack
252