1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "fault_handler.h"
18 
19 #include <sys/ucontext.h>
20 
21 #include "arch/instruction_set.h"
22 #include "art_method.h"
23 #include "base/hex_dump.h"
24 #include "base/logging.h"  // For VLOG.
25 #include "base/macros.h"
26 #include "base/pointer_size.h"
27 #include "oat/oat_quick_method_header.h"
28 #include "runtime_globals.h"
29 #include "thread-current-inl.h"
30 
31 #if defined(__APPLE__)
32 #define ucontext __darwin_ucontext
33 
34 #if defined(__x86_64__)
35 // 64 bit mac build.
36 #define CTX_ESP uc_mcontext->__ss.__rsp
37 #define CTX_EIP uc_mcontext->__ss.__rip
38 #define CTX_EAX uc_mcontext->__ss.__rax
39 #define CTX_METHOD uc_mcontext->__ss.__rdi
40 #define CTX_RDI uc_mcontext->__ss.__rdi
41 #define CTX_JMP_BUF uc_mcontext->__ss.__rdi
42 #else
43 // 32 bit mac build.
44 #define CTX_ESP uc_mcontext->__ss.__esp
45 #define CTX_EIP uc_mcontext->__ss.__eip
46 #define CTX_EAX uc_mcontext->__ss.__eax
47 #define CTX_METHOD uc_mcontext->__ss.__eax
48 #define CTX_JMP_BUF uc_mcontext->__ss.__eax
49 #endif
50 
51 #elif defined(__x86_64__)
52 // 64 bit linux build.
53 #define CTX_ESP uc_mcontext.gregs[REG_RSP]
54 #define CTX_EIP uc_mcontext.gregs[REG_RIP]
55 #define CTX_EAX uc_mcontext.gregs[REG_RAX]
56 #define CTX_METHOD uc_mcontext.gregs[REG_RDI]
57 #define CTX_RDI uc_mcontext.gregs[REG_RDI]
58 #define CTX_JMP_BUF uc_mcontext.gregs[REG_RDI]
59 #else
60 // 32 bit linux build.
61 #define CTX_ESP uc_mcontext.gregs[REG_ESP]
62 #define CTX_EIP uc_mcontext.gregs[REG_EIP]
63 #define CTX_EAX uc_mcontext.gregs[REG_EAX]
64 #define CTX_METHOD uc_mcontext.gregs[REG_EAX]
65 #define CTX_JMP_BUF uc_mcontext.gregs[REG_EAX]
66 #endif
67 
68 //
69 // X86 (and X86_64) specific fault handler functions.
70 //
71 
72 namespace art HIDDEN {
73 
74 extern "C" void art_quick_throw_null_pointer_exception_from_signal();
75 extern "C" void art_quick_throw_stack_overflow();
76 extern "C" void art_quick_test_suspend();
77 
78 // Get the size of an instruction in bytes.
79 // Return 0 if the instruction is not handled.
GetInstructionSize(const uint8_t * pc,size_t bytes)80 static uint32_t GetInstructionSize(const uint8_t* pc, size_t bytes) {
81 #define FETCH_OR_SKIP_BYTE(assignment)  \
82   do {                                  \
83     if (bytes == 0u) {                  \
84       return 0u;                        \
85     }                                   \
86     (assignment);                       \
87     ++pc;                               \
88     --bytes;                            \
89   } while (0)
90 #define FETCH_BYTE(var) FETCH_OR_SKIP_BYTE((var) = *pc)
91 #define SKIP_BYTE() FETCH_OR_SKIP_BYTE((void)0)
92 
93 #if defined(__x86_64)
94   const bool x86_64 = true;
95 #else
96   const bool x86_64 = false;
97 #endif
98 
99   const uint8_t* startpc = pc;
100 
101   uint8_t opcode;
102   FETCH_BYTE(opcode);
103   uint8_t modrm;
104   bool has_modrm = false;
105   bool two_byte = false;
106   uint32_t displacement_size = 0;
107   uint32_t immediate_size = 0;
108   bool operand_size_prefix = false;
109 
110   // Prefixes.
111   while (true) {
112     bool prefix_present = false;
113     switch (opcode) {
114       // Group 3
115       case 0x66:
116         operand_size_prefix = true;
117         FALLTHROUGH_INTENDED;
118 
119       // Group 1
120       case 0xf0:
121       case 0xf2:
122       case 0xf3:
123 
124       // Group 2
125       case 0x2e:
126       case 0x36:
127       case 0x3e:
128       case 0x26:
129       case 0x64:
130       case 0x65:
131 
132       // Group 4
133       case 0x67:
134         FETCH_BYTE(opcode);
135         prefix_present = true;
136         break;
137     }
138     if (!prefix_present) {
139       break;
140     }
141   }
142 
143   if (x86_64 && opcode >= 0x40 && opcode <= 0x4f) {
144     FETCH_BYTE(opcode);
145   }
146 
147   if (opcode == 0x0f) {
148     // Two byte opcode
149     two_byte = true;
150     FETCH_BYTE(opcode);
151   }
152 
153   bool unhandled_instruction = false;
154 
155   if (two_byte) {
156     switch (opcode) {
157       case 0x10:        // vmovsd/ss
158       case 0x11:        // vmovsd/ss
159       case 0xb6:        // movzx
160       case 0xb7:
161       case 0xbe:        // movsx
162       case 0xbf:
163         FETCH_BYTE(modrm);
164         has_modrm = true;
165         break;
166       default:
167         unhandled_instruction = true;
168         break;
169     }
170   } else {
171     switch (opcode) {
172       case 0x88:        // mov byte
173       case 0x89:        // mov
174       case 0x8b:
175       case 0x38:        // cmp with memory.
176       case 0x39:
177       case 0x3a:
178       case 0x3b:
179       case 0x3c:
180       case 0x3d:
181       case 0x85:        // test.
182         FETCH_BYTE(modrm);
183         has_modrm = true;
184         break;
185 
186       case 0x80:        // group 1, byte immediate.
187       case 0x83:
188       case 0xc6:
189         FETCH_BYTE(modrm);
190         has_modrm = true;
191         immediate_size = 1;
192         break;
193 
194       case 0x81:        // group 1, word immediate.
195       case 0xc7:        // mov
196         FETCH_BYTE(modrm);
197         has_modrm = true;
198         immediate_size = operand_size_prefix ? 2 : 4;
199         break;
200 
201       case 0xf6:
202       case 0xf7:
203         FETCH_BYTE(modrm);
204         has_modrm = true;
205         switch ((modrm >> 3) & 7) {  // Extract "reg/opcode" from "modr/m".
206           case 0:  // test
207             immediate_size = (opcode == 0xf6) ? 1 : (operand_size_prefix ? 2 : 4);
208             break;
209           case 2:  // not
210           case 3:  // neg
211           case 4:  // mul
212           case 5:  // imul
213           case 6:  // div
214           case 7:  // idiv
215             break;
216           default:
217             unhandled_instruction = true;
218             break;
219         }
220         break;
221 
222       default:
223         unhandled_instruction = true;
224         break;
225     }
226   }
227 
228   if (unhandled_instruction) {
229     VLOG(signals) << "Unhandled x86 instruction with opcode " << static_cast<int>(opcode);
230     return 0;
231   }
232 
233   if (has_modrm) {
234     uint8_t mod = (modrm >> 6) & 3U /* 0b11 */;
235 
236     // Check for SIB.
237     if (mod != 3U /* 0b11 */ && (modrm & 7U /* 0b111 */) == 4) {
238       SKIP_BYTE();  // SIB
239     }
240 
241     switch (mod) {
242       case 0U /* 0b00 */: break;
243       case 1U /* 0b01 */: displacement_size = 1; break;
244       case 2U /* 0b10 */: displacement_size = 4; break;
245       case 3U /* 0b11 */:
246         break;
247     }
248   }
249 
250   // Skip displacement and immediate.
251   pc += displacement_size + immediate_size;
252 
253   VLOG(signals) << "x86 instruction length calculated as " << (pc - startpc);
254   return pc - startpc;
255 
256 #undef SKIP_BYTE
257 #undef FETCH_BYTE
258 #undef FETCH_OR_SKIP_BYTE
259 }
260 
GetFaultPc(siginfo_t * siginfo,void * context)261 uintptr_t FaultManager::GetFaultPc([[maybe_unused]] siginfo_t* siginfo, void* context) {
262   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
263   if (uc->CTX_ESP == 0) {
264     VLOG(signals) << "Missing SP";
265     return 0u;
266   }
267   return uc->CTX_EIP;
268 }
269 
GetFaultSp(void * context)270 uintptr_t FaultManager::GetFaultSp(void* context) {
271   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
272   return uc->CTX_ESP;
273 }
274 
Action(int,siginfo_t * sig,void * context)275 bool NullPointerHandler::Action(int, siginfo_t* sig, void* context) {
276   uintptr_t fault_address = reinterpret_cast<uintptr_t>(sig->si_addr);
277   if (!IsValidFaultAddress(fault_address)) {
278     return false;
279   }
280 
281   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
282   ArtMethod** sp = reinterpret_cast<ArtMethod**>(uc->CTX_ESP);
283   ArtMethod* method = *sp;
284   if (!IsValidMethod(method)) {
285     return false;
286   }
287 
288   // For null checks in compiled code we insert a stack map that is immediately
289   // after the load/store instruction that might cause the fault and we need to
290   // pass the return PC to the handler. For null checks in Nterp, we similarly
291   // need the return PC to recognize that this was a null check in Nterp, so
292   // that the handler can get the needed data from the Nterp frame.
293 
294   // Note: Allowing nested faults if `IsValidMethod()` returned a false positive.
295   // Note: The `ArtMethod::GetOatQuickMethodHeader()` can acquire locks, which is
296   // essentially unsafe in a signal handler, but we allow that here just like in
297   // `NullPointerHandler::IsValidReturnPc()`. For more details see comments there.
298   uintptr_t pc = uc->CTX_EIP;
299   const OatQuickMethodHeader* method_header = method->GetOatQuickMethodHeader(pc);
300   if (method_header == nullptr) {
301     VLOG(signals) << "No method header.";
302     return false;
303   }
304   const uint8_t* pc_ptr = reinterpret_cast<const uint8_t*>(pc);
305   size_t offset = pc_ptr - method_header->GetCode();
306   size_t code_size = method_header->GetCodeSize();
307   CHECK_LT(offset, code_size);
308   size_t max_instr_size = code_size - offset;
309   uint32_t instr_size = GetInstructionSize(pc_ptr, max_instr_size);
310   if (instr_size == 0u) {
311     // Unknown instruction (can't really happen) or not enough bytes until end of method code.
312     return false;
313   }
314 
315   uintptr_t return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
316   if (!IsValidReturnPc(sp, return_pc)) {
317     return false;
318   }
319 
320   // Push the return PC and fault address onto the stack.
321   uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp) - 2;
322   next_sp[1] = return_pc;
323   next_sp[0] = fault_address;
324   uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
325 
326   // Arrange for the signal handler to return to the NPE entrypoint.
327   uc->CTX_EIP = reinterpret_cast<uintptr_t>(
328       art_quick_throw_null_pointer_exception_from_signal);
329   VLOG(signals) << "Generating null pointer exception";
330   return true;
331 }
332 
333 // A suspend check is done using the following instruction sequence:
334 // (x86)
335 // 0xf720f1df:         648B058C000000      mov     eax, fs:[0x8c]  ; suspend_trigger
336 // .. some intervening instructions.
337 // 0xf720f1e6:                   8500      test    eax, [eax]
338 // (x86_64)
339 // 0x7f579de45d9e: 65488B0425A8000000      movq    rax, gs:[0xa8]  ; suspend_trigger
340 // .. some intervening instructions.
341 // 0x7f579de45da7:               8500      test    eax, [eax]
342 
343 // The offset from fs is Thread::ThreadSuspendTriggerOffset().
344 // To check for a suspend check, we examine the instructions that caused
345 // the fault.
Action(int,siginfo_t *,void * context)346 bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
347   // These are the instructions to check for.  The first one is the mov eax, fs:[xxx]
348   // where xxx is the offset of the suspend trigger.
349   uint32_t trigger = Thread::ThreadSuspendTriggerOffset<kRuntimePointerSize>().Int32Value();
350 
351   VLOG(signals) << "Checking for suspension point";
352 #if defined(__x86_64__)
353   uint8_t checkinst1[] = {0x65, 0x48, 0x8b, 0x04, 0x25, static_cast<uint8_t>(trigger & 0xff),
354       static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
355 #else
356   uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff),
357       static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
358 #endif
359   uint8_t checkinst2[] = {0x85, 0x00};
360 
361   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
362   uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
363   uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
364 
365   if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) {
366     // Second instruction is not correct (test eax,[eax]).
367     VLOG(signals) << "Not a suspension point";
368     return false;
369   }
370 
371   // The first instruction can a little bit up the stream due to load hoisting
372   // in the compiler.
373   uint8_t* limit = pc - 100;   // Compiler will hoist to a max of 20 instructions.
374   uint8_t* ptr = pc - sizeof(checkinst1);
375   bool found = false;
376   while (ptr > limit) {
377     if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) {
378       found = true;
379       break;
380     }
381     ptr -= 1;
382   }
383 
384   if (found) {
385     VLOG(signals) << "suspend check match";
386 
387     // We need to arrange for the signal handler to return to the null pointer
388     // exception generator.  The return address must be the address of the
389     // next instruction (this instruction + 2).  The return address
390     // is on the stack at the top address of the current frame.
391 
392     // Push the return address onto the stack.
393     uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + 2);
394     uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t));
395     *next_sp = retaddr;
396     uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
397 
398     uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
399 
400     // Now remove the suspend trigger that caused this fault.
401     Thread::Current()->RemoveSuspendTrigger();
402     VLOG(signals) << "removed suspend trigger invoking test suspend";
403     return true;
404   }
405   VLOG(signals) << "Not a suspend check match, first instruction mismatch";
406   return false;
407 }
408 
409 // The stack overflow check is done using the following instruction:
410 // test eax, [esp+ -xxx]
411 // where 'xxx' is the size of the overflow area.
412 //
413 // This is done before any frame is established in the method.  The return
414 // address for the previous method is on the stack at ESP.
415 
Action(int,siginfo_t * info,void * context)416 bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
417   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
418   uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
419 
420   uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);
421   VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
422   VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
423     ", fault_addr: " << fault_addr;
424 
425 #if defined(__x86_64__)
426   uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86_64);
427 #else
428   uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86);
429 #endif
430 
431   // Check that the fault address is the value expected for a stack overflow.
432   if (fault_addr != overflow_addr) {
433     VLOG(signals) << "Not a stack overflow";
434     return false;
435   }
436 
437   VLOG(signals) << "Stack overflow found";
438 
439   // Since the compiler puts the implicit overflow
440   // check before the callee save instructions, the SP is already pointing to
441   // the previous frame.
442 
443   // Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
444   uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
445 
446   return true;
447 }
448 }       // namespace art
449