1 /**
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #define _GNU_SOURCE
17 #include <sys/types.h>
18 #include <sys/wait.h>
19 #include <sys/mman.h>
20 #include <stdlib.h>
21 #include <dlfcn.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <signal.h>
25 #include "memutils.h"
26
27 map_struct_t s_free_list[MAX_ENTRIES] = {};
28 map_struct_t s_mem_map[MAX_ENTRIES] = {};
exit_handler(void)29 void exit_handler(void) {
30 size_t page_size = getpagesize();
31 for (int i = 0; i < s_mem_map_index; i++) {
32 if (NULL != s_mem_map[i].start_ptr) {
33 ENABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
34 (s_mem_map[i].num_pages * page_size));
35 }
36 }
37 #ifdef CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE
38 for (int i = 0; i < MAX_ENTRIES; i++) {
39 if (NULL != s_free_list[i].start_ptr) {
40 ENABLE_MEM_ACCESS(s_free_list[i].start_ptr,
41 (s_free_list[i].num_pages * page_size));
42 real_free(s_free_list[i].start_ptr);
43 memset(&s_free_list[i], 0, sizeof(map_struct_t));
44 }
45 }
46 #endif /* CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE */
47 }
48
sigsegv_handler(int signum,siginfo_t * info,void * context)49 void sigsegv_handler(int signum, siginfo_t *info, void* context) {
50 exit_handler();
51 (*old_sa.sa_sigaction)(signum, info, context);
52 }
53
sighandler_init(void)54 void sighandler_init(void) {
55 sigemptyset(&new_sa.sa_mask);
56 new_sa.sa_flags = SA_SIGINFO;
57 new_sa.sa_sigaction = sigsegv_handler;
58 sigaction(SIGSEGV, &new_sa, &old_sa);
59 }
60
memutils_init(void)61 void memutils_init(void) {
62 real_memalign = dlsym(RTLD_NEXT, "memalign");
63 if (NULL == real_memalign) {
64 return;
65 }
66 #ifndef DISABLE_MALLOC_OVERLOADING
67 real_calloc = dlsym(RTLD_NEXT, "calloc");
68 if (NULL == real_calloc) {
69 return;
70 }
71 real_malloc = dlsym(RTLD_NEXT, "malloc");
72 if (NULL == real_malloc) {
73 return;
74 }
75 real_realloc = dlsym(RTLD_NEXT, "realloc");
76 if (NULL == real_realloc) {
77 return;
78 }
79 #endif /* DISABLE_MALLOC_OVERLOADING */
80 real_free = dlsym(RTLD_NEXT, "free");
81 if (NULL == real_free) {
82 return;
83 }
84 memset(&s_mem_map, 0, MAX_ENTRIES * sizeof(map_struct_t));
85 sighandler_init();
86 atexit(exit_handler);
87 s_memutils_initialized = 1;
88 }
89
memalign(size_t alignment,size_t size)90 void *memalign(size_t alignment, size_t size) {
91 if (s_memutils_initialized == 0) {
92 memutils_init();
93 }
94 #ifdef ENABLE_SELECTIVE_OVERLOADING
95 if ((enable_selective_overload & ENABLE_MEMALIGN_CHECK) != ENABLE_MEMALIGN_CHECK) {
96 return real_memalign(alignment, size);
97 }
98 #endif /* ENABLE_SELECTIVE_OVERLOADING */
99 char* start_ptr;
100 char* mem_ptr;
101 size_t total_size;
102 size_t aligned_size = size;
103 size_t num_pages;
104 size_t page_size = getpagesize();
105
106 if (s_mem_map_index == MAX_ENTRIES) {
107 return real_memalign(alignment, size);
108 }
109
110 if (alignment > page_size) {
111 return real_memalign(alignment, size);
112 }
113
114 if ((0 == page_size) || (0 == alignment) || (0 == size)) {
115 return real_memalign(alignment, size);
116 }
117 #ifdef CHECK_OVERFLOW
118 /* User specified alignment is not respected and is overridden by
119 * MINIMUM_ALIGNMENT. This is required to catch OOB read when read offset
120 * is less than user specified alignment. "MINIMUM_ALIGNMENT" helps to
121 * avoid bus errors due to non-aligned memory. */
122 if (0 != (size % MINIMUM_ALIGNMENT)) {
123 aligned_size = size + (MINIMUM_ALIGNMENT - (size % MINIMUM_ALIGNMENT));
124 }
125 #endif
126
127 if (0 != (aligned_size % page_size)) {
128 num_pages = (aligned_size / page_size) + 2;
129 } else {
130 num_pages = (aligned_size / page_size) + 1;
131 }
132
133 total_size = (num_pages * page_size);
134 start_ptr = (char *) real_memalign(page_size, total_size);
135 #ifdef CHECK_OVERFLOW
136 mem_ptr = (char *) start_ptr + ((num_pages - 1) * page_size) - aligned_size;
137 DISABLE_MEM_ACCESS((start_ptr + ((num_pages - 1) * page_size)), page_size);
138 #endif /* CHECK_OVERFLOW */
139 #ifdef CHECK_UNDERFLOW
140 mem_ptr = (char *) start_ptr + page_size;
141 DISABLE_MEM_ACCESS(start_ptr, page_size);
142 #endif /* CHECK_UNDERFLOW */
143 s_mem_map[s_mem_map_index].start_ptr = start_ptr;
144 s_mem_map[s_mem_map_index].mem_ptr = mem_ptr;
145 s_mem_map[s_mem_map_index].num_pages = num_pages;
146 s_mem_map[s_mem_map_index].mem_size = size;
147 s_mem_map_index++;
148 memset(mem_ptr, INITIAL_VAL, size);
149 return mem_ptr;
150 }
151
152 #ifndef DISABLE_MALLOC_OVERLOADING
malloc(size_t size)153 void *malloc(size_t size) {
154 if (s_memutils_initialized == 0) {
155 memutils_init();
156 }
157 #ifdef ENABLE_SELECTIVE_OVERLOADING
158 if ((enable_selective_overload & ENABLE_MALLOC_CHECK) != ENABLE_MALLOC_CHECK) {
159 return real_malloc(size);
160 }
161 #endif /* ENABLE_SELECTIVE_OVERLOADING */
162 return memalign(MINIMUM_ALIGNMENT, size);
163 }
164
calloc(size_t nitems,size_t size)165 void *calloc(size_t nitems, size_t size) {
166 if (s_memutils_initialized == 0) {
167 memutils_init();
168 }
169 #ifdef ENABLE_SELECTIVE_OVERLOADING
170 if ((enable_selective_overload & ENABLE_CALLOC_CHECK) != ENABLE_CALLOC_CHECK) {
171 return real_calloc(nitems, size);
172 }
173 #endif /* ENABLE_SELECTIVE_OVERLOADING */
174 void *ptr = memalign(sizeof(size_t), (nitems * size));
175 if (ptr)
176 memset(ptr, 0, (nitems * size));
177 return ptr;
178 }
179
realloc(void * ptr,size_t size)180 void *realloc(void *ptr, size_t size) {
181 if (s_memutils_initialized == 0) {
182 memutils_init();
183 }
184 #ifdef ENABLE_SELECTIVE_OVERLOADING
185 if ((enable_selective_overload & ENABLE_REALLOC_CHECK) != ENABLE_REALLOC_CHECK) {
186 return real_realloc(ptr, size);
187 }
188 #endif /* ENABLE_SELECTIVE_OVERLOADING */
189 if (ptr != NULL) {
190 int i = 0;
191 for (i = 0; i < s_mem_map_index; i++) {
192 if (ptr == s_mem_map[i].mem_ptr) {
193 void* temp = malloc(size);
194 if (temp == NULL) {
195 return NULL;
196 }
197 if (s_mem_map[i].mem_size > size) {
198 memcpy(temp, ptr, size);
199 } else {
200 memcpy(temp, ptr, s_mem_map[i].mem_size);
201 }
202 free(s_mem_map[i].mem_ptr);
203 return temp;
204 }
205 }
206 }
207 return real_realloc(ptr, size);
208 }
209 #endif /* DISABLE_MALLOC_OVERLOADING */
210
free(void * ptr)211 void free(void *ptr) {
212 if (s_memutils_initialized == 0) {
213 memutils_init();
214 }
215 #ifdef ENABLE_SELECTIVE_OVERLOADING
216 if ((enable_selective_overload & ENABLE_FREE_CHECK) != ENABLE_FREE_CHECK) {
217 return real_free(ptr);
218 }
219 #endif /* ENABLE_SELECTIVE_OVERLOADING */
220 if (ptr != NULL) {
221 int i = 0;
222 size_t page_size = getpagesize();
223 for (i = 0; i < s_mem_map_index; i++) {
224 if (ptr == s_mem_map[i].mem_ptr) {
225 #ifdef CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE
226 s_free_list[s_free_write_index].start_ptr =
227 s_mem_map[i].start_ptr;
228 s_free_list[s_free_write_index].mem_ptr = s_mem_map[i].mem_ptr;
229 s_free_list[s_free_write_index].num_pages =
230 s_mem_map[i].num_pages;
231 s_free_list[s_free_write_index].mem_size = s_mem_map[i].mem_size;
232 s_free_write_index++;
233 s_free_list_size += s_mem_map[i].mem_size;
234 DISABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
235 (s_mem_map[i].num_pages * page_size));
236 memset(&s_mem_map[i], 0, sizeof(map_struct_t));
237 while (s_free_list_size > CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE) {
238 ENABLE_MEM_ACCESS(
239 s_free_list[s_free_read_index].start_ptr,
240 (s_free_list[s_free_read_index].num_pages * page_size));
241 real_free(s_free_list[s_free_read_index].start_ptr);
242 s_free_list_size -= s_free_list[s_free_read_index].mem_size;
243 memset(&s_free_list[s_free_read_index], 0,
244 sizeof(map_struct_t));
245 s_free_read_index++;
246 if ((s_free_read_index == MAX_ENTRIES)
247 || (s_free_read_index >= s_free_write_index)) {
248 break;
249 }
250 }
251 return;
252 #else
253 ENABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
254 (s_mem_map[i].num_pages * page_size));
255 real_free(s_mem_map[i].start_ptr);
256 memset(&s_mem_map[i], 0, sizeof(map_struct_t));
257 return;
258 #endif /* CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE */
259 }
260 }
261 }
262 real_free(ptr);
263 return;
264 }
265