1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18
19 #include <algorithm>
20 #include <optional>
21 #include <tuple>
22 #include <utility>
23 #include <vector>
24
25 #include <unwindstack/DwarfError.h>
26 #include <unwindstack/DwarfLocation.h>
27 #include <unwindstack/DwarfMemory.h>
28 #include <unwindstack/DwarfSection.h>
29 #include <unwindstack/DwarfStructs.h>
30 #include <unwindstack/Elf.h>
31 #include <unwindstack/Log.h>
32 #include <unwindstack/Memory.h>
33 #include <unwindstack/Regs.h>
34
35 #include "DwarfCfa.h"
36 #include "DwarfDebugFrame.h"
37 #include "DwarfEhFrame.h"
38 #include "DwarfEncoding.h"
39 #include "DwarfOp.h"
40 #include "RegsInfo.h"
41
42 namespace unwindstack {
43
DwarfSection(std::shared_ptr<Memory> & memory)44 DwarfSection::DwarfSection(std::shared_ptr<Memory>& memory) : memory_(memory) {}
45
Step(uint64_t pc,Regs * regs,Memory * process_memory,bool * finished,bool * is_signal_frame)46 bool DwarfSection::Step(uint64_t pc, Regs* regs, Memory* process_memory, bool* finished,
47 bool* is_signal_frame) {
48 // Lookup the pc in the cache.
49 auto it = loc_regs_.upper_bound(pc);
50 if (it == loc_regs_.end() || pc < it->second.pc_start) {
51 last_error_.code = DWARF_ERROR_NONE;
52 const DwarfFde* fde = GetFdeFromPc(pc);
53 if (fde == nullptr || fde->cie == nullptr) {
54 last_error_.code = DWARF_ERROR_ILLEGAL_STATE;
55 return false;
56 }
57
58 // Now get the location information for this pc.
59 DwarfLocations loc_regs;
60 if (!GetCfaLocationInfo(pc, fde, &loc_regs, regs->Arch())) {
61 return false;
62 }
63 loc_regs.cie = fde->cie;
64
65 // Store it in the cache.
66 it = loc_regs_.emplace(loc_regs.pc_end, std::move(loc_regs)).first;
67 }
68
69 *is_signal_frame = it->second.cie->is_signal_frame;
70
71 // Now eval the actual registers.
72 return Eval(it->second.cie, process_memory, it->second, regs, finished);
73 }
74
75 template <typename AddressType>
GetCieFromOffset(uint64_t offset)76 const DwarfCie* DwarfSectionImpl<AddressType>::GetCieFromOffset(uint64_t offset) {
77 auto cie_entry = cie_entries_.find(offset);
78 if (cie_entry != cie_entries_.end()) {
79 return &cie_entry->second;
80 }
81 DwarfCie* cie = &cie_entries_[offset];
82 memory_.set_data_offset(entries_offset_);
83 memory_.set_cur_offset(offset);
84 if (!FillInCieHeader(cie) || !FillInCie(cie)) {
85 // Erase the cached entry.
86 cie_entries_.erase(offset);
87 return nullptr;
88 }
89 return cie;
90 }
91
92 template <typename AddressType>
FillInCieHeader(DwarfCie * cie)93 bool DwarfSectionImpl<AddressType>::FillInCieHeader(DwarfCie* cie) {
94 cie->lsda_encoding = DW_EH_PE_omit;
95 uint32_t length32;
96 if (!memory_.ReadBytes(&length32, sizeof(length32))) {
97 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
98 last_error_.address = memory_.cur_offset();
99 return false;
100 }
101 if (length32 == static_cast<uint32_t>(-1)) {
102 // 64 bit Cie
103 uint64_t length64;
104 if (!memory_.ReadBytes(&length64, sizeof(length64))) {
105 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
106 last_error_.address = memory_.cur_offset();
107 return false;
108 }
109
110 cie->cfa_instructions_end = memory_.cur_offset() + length64;
111 // TODO(b/192012848): This is wrong. We need to propagate pointer size here.
112 cie->fde_address_encoding = DW_EH_PE_udata8;
113
114 uint64_t cie_id;
115 if (!memory_.ReadBytes(&cie_id, sizeof(cie_id))) {
116 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
117 last_error_.address = memory_.cur_offset();
118 return false;
119 }
120 if (cie_id != cie64_value_) {
121 // This is not a Cie, something has gone horribly wrong.
122 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
123 return false;
124 }
125 } else {
126 // 32 bit Cie
127 cie->cfa_instructions_end = memory_.cur_offset() + length32;
128 // TODO(b/192012848): This is wrong. We need to propagate pointer size here.
129 cie->fde_address_encoding = DW_EH_PE_udata4;
130
131 uint32_t cie_id;
132 if (!memory_.ReadBytes(&cie_id, sizeof(cie_id))) {
133 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
134 last_error_.address = memory_.cur_offset();
135 return false;
136 }
137 if (cie_id != cie32_value_) {
138 // This is not a Cie, something has gone horribly wrong.
139 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
140 return false;
141 }
142 }
143 return true;
144 }
145
146 template <typename AddressType>
FillInCie(DwarfCie * cie)147 bool DwarfSectionImpl<AddressType>::FillInCie(DwarfCie* cie) {
148 if (!memory_.ReadBytes(&cie->version, sizeof(cie->version))) {
149 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
150 last_error_.address = memory_.cur_offset();
151 return false;
152 }
153
154 if (cie->version != 1 && cie->version != 3 && cie->version != 4 && cie->version != 5) {
155 // Unrecognized version.
156 last_error_.code = DWARF_ERROR_UNSUPPORTED_VERSION;
157 return false;
158 }
159
160 // Read the augmentation string.
161 char aug_value;
162 do {
163 if (!memory_.ReadBytes(&aug_value, 1)) {
164 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
165 last_error_.address = memory_.cur_offset();
166 return false;
167 }
168 cie->augmentation_string.push_back(aug_value);
169 } while (aug_value != '\0');
170
171 if (cie->version == 4 || cie->version == 5) {
172 char address_size;
173 if (!memory_.ReadBytes(&address_size, 1)) {
174 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
175 last_error_.address = memory_.cur_offset();
176 return false;
177 }
178 cie->fde_address_encoding = address_size == 8 ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
179
180 // Segment Size
181 if (!memory_.ReadBytes(&cie->segment_size, 1)) {
182 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
183 last_error_.address = memory_.cur_offset();
184 return false;
185 }
186 }
187
188 // Code Alignment Factor
189 if (!memory_.ReadULEB128(&cie->code_alignment_factor)) {
190 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
191 last_error_.address = memory_.cur_offset();
192 return false;
193 }
194
195 // Data Alignment Factor
196 if (!memory_.ReadSLEB128(&cie->data_alignment_factor)) {
197 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
198 last_error_.address = memory_.cur_offset();
199 return false;
200 }
201
202 if (cie->version == 1) {
203 // Return Address is a single byte.
204 uint8_t return_address_register;
205 if (!memory_.ReadBytes(&return_address_register, 1)) {
206 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
207 last_error_.address = memory_.cur_offset();
208 return false;
209 }
210 cie->return_address_register = return_address_register;
211 } else if (!memory_.ReadULEB128(&cie->return_address_register)) {
212 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
213 last_error_.address = memory_.cur_offset();
214 return false;
215 }
216
217 if (cie->augmentation_string[0] != 'z') {
218 cie->cfa_instructions_offset = memory_.cur_offset();
219 return true;
220 }
221
222 uint64_t aug_length;
223 if (!memory_.ReadULEB128(&aug_length)) {
224 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
225 last_error_.address = memory_.cur_offset();
226 return false;
227 }
228 cie->cfa_instructions_offset = memory_.cur_offset() + aug_length;
229
230 for (size_t i = 1; i < cie->augmentation_string.size(); i++) {
231 switch (cie->augmentation_string[i]) {
232 case 'L':
233 if (!memory_.ReadBytes(&cie->lsda_encoding, 1)) {
234 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
235 last_error_.address = memory_.cur_offset();
236 return false;
237 }
238 break;
239 case 'P': {
240 uint8_t encoding;
241 if (!memory_.ReadBytes(&encoding, 1)) {
242 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
243 last_error_.address = memory_.cur_offset();
244 return false;
245 }
246 memory_.set_pc_offset(pc_offset_);
247 if (!memory_.ReadEncodedValue<AddressType>(encoding, &cie->personality_handler)) {
248 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
249 last_error_.address = memory_.cur_offset();
250 return false;
251 }
252 } break;
253 case 'R':
254 if (!memory_.ReadBytes(&cie->fde_address_encoding, 1)) {
255 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
256 last_error_.address = memory_.cur_offset();
257 return false;
258 }
259 break;
260 case 'S':
261 cie->is_signal_frame = true;
262 break;
263 }
264 }
265 return true;
266 }
267
268 template <typename AddressType>
GetFdeFromOffset(uint64_t offset)269 const DwarfFde* DwarfSectionImpl<AddressType>::GetFdeFromOffset(uint64_t offset) {
270 auto fde_entry = fde_entries_.find(offset);
271 if (fde_entry != fde_entries_.end()) {
272 return &fde_entry->second;
273 }
274 DwarfFde* fde = &fde_entries_[offset];
275 memory_.set_data_offset(entries_offset_);
276 memory_.set_cur_offset(offset);
277 if (!FillInFdeHeader(fde) || !FillInFde(fde)) {
278 fde_entries_.erase(offset);
279 return nullptr;
280 }
281 return fde;
282 }
283
284 template <typename AddressType>
FillInFdeHeader(DwarfFde * fde)285 bool DwarfSectionImpl<AddressType>::FillInFdeHeader(DwarfFde* fde) {
286 uint32_t length32;
287 if (!memory_.ReadBytes(&length32, sizeof(length32))) {
288 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
289 last_error_.address = memory_.cur_offset();
290 return false;
291 }
292
293 if (length32 == static_cast<uint32_t>(-1)) {
294 // 64 bit Fde.
295 uint64_t length64;
296 if (!memory_.ReadBytes(&length64, sizeof(length64))) {
297 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
298 last_error_.address = memory_.cur_offset();
299 return false;
300 }
301 fde->cfa_instructions_end = memory_.cur_offset() + length64;
302
303 uint64_t value64;
304 if (!memory_.ReadBytes(&value64, sizeof(value64))) {
305 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
306 last_error_.address = memory_.cur_offset();
307 return false;
308 }
309 if (value64 == cie64_value_) {
310 // This is a Cie, this means something has gone wrong.
311 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
312 return false;
313 }
314
315 // Get the Cie pointer, which is necessary to properly read the rest of
316 // of the Fde information.
317 fde->cie_offset = GetCieOffsetFromFde64(value64);
318 } else {
319 // 32 bit Fde.
320 fde->cfa_instructions_end = memory_.cur_offset() + length32;
321
322 uint32_t value32;
323 if (!memory_.ReadBytes(&value32, sizeof(value32))) {
324 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
325 last_error_.address = memory_.cur_offset();
326 return false;
327 }
328 if (value32 == cie32_value_) {
329 // This is a Cie, this means something has gone wrong.
330 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
331 return false;
332 }
333
334 // Get the Cie pointer, which is necessary to properly read the rest of
335 // of the Fde information.
336 fde->cie_offset = GetCieOffsetFromFde32(value32);
337 }
338 return true;
339 }
340
341 template <typename AddressType>
FillInFde(DwarfFde * fde)342 bool DwarfSectionImpl<AddressType>::FillInFde(DwarfFde* fde) {
343 uint64_t cur_offset = memory_.cur_offset();
344
345 const DwarfCie* cie = GetCieFromOffset(fde->cie_offset);
346 if (cie == nullptr) {
347 return false;
348 }
349 fde->cie = cie;
350
351 if (cie->segment_size != 0) {
352 // Skip over the segment selector for now.
353 cur_offset += cie->segment_size;
354 }
355 memory_.set_cur_offset(cur_offset);
356
357 // The load bias only applies to the start.
358 memory_.set_pc_offset(section_bias_);
359 bool valid = memory_.ReadEncodedValue<AddressType>(cie->fde_address_encoding, &fde->pc_start);
360 fde->pc_start = AdjustPcFromFde(fde->pc_start);
361
362 memory_.set_pc_offset(0);
363 if (!valid || !memory_.ReadEncodedValue<AddressType>(cie->fde_address_encoding, &fde->pc_end)) {
364 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
365 last_error_.address = memory_.cur_offset();
366 return false;
367 }
368 fde->pc_end += fde->pc_start;
369
370 if (cie->augmentation_string.size() > 0 && cie->augmentation_string[0] == 'z') {
371 // Augmentation Size
372 uint64_t aug_length;
373 if (!memory_.ReadULEB128(&aug_length)) {
374 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
375 last_error_.address = memory_.cur_offset();
376 return false;
377 }
378 uint64_t cur_offset = memory_.cur_offset();
379
380 memory_.set_pc_offset(pc_offset_);
381 if (!memory_.ReadEncodedValue<AddressType>(cie->lsda_encoding, &fde->lsda_address)) {
382 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
383 last_error_.address = memory_.cur_offset();
384 return false;
385 }
386
387 // Set our position to after all of the augmentation data.
388 memory_.set_cur_offset(cur_offset + aug_length);
389 }
390 fde->cfa_instructions_offset = memory_.cur_offset();
391
392 return true;
393 }
394
395 template <typename AddressType>
EvalExpression(const DwarfLocation & loc,Memory * regular_memory,AddressType * value,RegsInfo<AddressType> * regs_info,bool * is_dex_pc)396 bool DwarfSectionImpl<AddressType>::EvalExpression(const DwarfLocation& loc, Memory* regular_memory,
397 AddressType* value,
398 RegsInfo<AddressType>* regs_info,
399 bool* is_dex_pc) {
400 DwarfOp<AddressType> op(&memory_, regular_memory);
401 op.set_regs_info(regs_info);
402
403 // Need to evaluate the op data.
404 uint64_t end = loc.values[1];
405 uint64_t start = end - loc.values[0];
406 if (!op.Eval(start, end)) {
407 last_error_ = op.last_error();
408 return false;
409 }
410 if (op.StackSize() == 0) {
411 last_error_.code = DWARF_ERROR_ILLEGAL_STATE;
412 return false;
413 }
414 // We don't support an expression that evaluates to a register number.
415 if (op.is_register()) {
416 last_error_.code = DWARF_ERROR_NOT_IMPLEMENTED;
417 return false;
418 }
419 *value = op.StackAt(0);
420 if (is_dex_pc != nullptr && op.dex_pc_set()) {
421 *is_dex_pc = true;
422 }
423 return true;
424 }
425
426 template <typename AddressType>
427 struct EvalInfo {
428 const DwarfLocations* loc_regs;
429 const DwarfCie* cie;
430 Memory* regular_memory;
431 AddressType cfa;
432 bool return_address_undefined = false;
433 RegsInfo<AddressType> regs_info;
434 };
435
436 template <typename AddressType>
EvalRegister(const DwarfLocation * loc,uint32_t reg,AddressType * reg_ptr,void * info)437 bool DwarfSectionImpl<AddressType>::EvalRegister(const DwarfLocation* loc, uint32_t reg,
438 AddressType* reg_ptr, void* info) {
439 EvalInfo<AddressType>* eval_info = reinterpret_cast<EvalInfo<AddressType>*>(info);
440 Memory* regular_memory = eval_info->regular_memory;
441 switch (loc->type) {
442 case DWARF_LOCATION_OFFSET:
443 if (!regular_memory->ReadFully(eval_info->cfa + loc->values[0], reg_ptr, sizeof(AddressType))) {
444 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
445 last_error_.address = eval_info->cfa + loc->values[0];
446 return false;
447 }
448 break;
449 case DWARF_LOCATION_VAL_OFFSET:
450 *reg_ptr = eval_info->cfa + loc->values[0];
451 break;
452 case DWARF_LOCATION_REGISTER: {
453 uint16_t cur_reg = eval_info->regs_info.regs->Convert(loc->values[0]);
454 if (cur_reg >= eval_info->regs_info.Total()) {
455 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
456 return false;
457 }
458 *reg_ptr = eval_info->regs_info.Get(cur_reg) + loc->values[1];
459 break;
460 }
461 case DWARF_LOCATION_EXPRESSION:
462 case DWARF_LOCATION_VAL_EXPRESSION: {
463 AddressType value;
464 bool is_dex_pc = false;
465 if (!EvalExpression(*loc, regular_memory, &value, &eval_info->regs_info, &is_dex_pc)) {
466 return false;
467 }
468 if (loc->type == DWARF_LOCATION_EXPRESSION) {
469 if (!regular_memory->ReadFully(value, reg_ptr, sizeof(AddressType))) {
470 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
471 last_error_.address = value;
472 return false;
473 }
474 } else {
475 *reg_ptr = value;
476 if (is_dex_pc) {
477 eval_info->regs_info.regs->set_dex_pc(value);
478 }
479 }
480 break;
481 }
482 case DWARF_LOCATION_UNDEFINED:
483 if (reg == eval_info->cie->return_address_register) {
484 eval_info->return_address_undefined = true;
485 }
486 break;
487 case DWARF_LOCATION_PSEUDO_REGISTER:
488 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
489 return false;
490 default:
491 break;
492 }
493
494 return true;
495 }
496
497 template <typename AddressType>
Eval(const DwarfCie * cie,Memory * regular_memory,const DwarfLocations & loc_regs,Regs * regs,bool * finished)498 bool DwarfSectionImpl<AddressType>::Eval(const DwarfCie* cie, Memory* regular_memory,
499 const DwarfLocations& loc_regs, Regs* regs,
500 bool* finished) {
501 RegsImpl<AddressType>* cur_regs = reinterpret_cast<RegsImpl<AddressType>*>(regs);
502 if (cie->return_address_register >= cur_regs->total_regs()) {
503 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
504 return false;
505 }
506
507 // Get the cfa value;
508 auto cfa_entry = loc_regs.find(CFA_REG);
509 if (cfa_entry == loc_regs.end()) {
510 last_error_.code = DWARF_ERROR_CFA_NOT_DEFINED;
511 return false;
512 }
513
514 // Always set the dex pc to zero when evaluating.
515 cur_regs->set_dex_pc(0);
516
517 // Reset necessary pseudo registers before evaluation.
518 // This is needed for ARM64, for example.
519 regs->ResetPseudoRegisters();
520
521 EvalInfo<AddressType> eval_info{.loc_regs = &loc_regs,
522 .cie = cie,
523 .regular_memory = regular_memory,
524 .regs_info = RegsInfo<AddressType>(cur_regs)};
525 const DwarfLocation* loc = &cfa_entry->second;
526 // Only a few location types are valid for the cfa.
527 switch (loc->type) {
528 case DWARF_LOCATION_REGISTER:
529 if (loc->values[0] >= cur_regs->total_regs()) {
530 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
531 return false;
532 }
533 eval_info.cfa = (*cur_regs)[loc->values[0]];
534 eval_info.cfa += loc->values[1];
535 break;
536 case DWARF_LOCATION_VAL_EXPRESSION: {
537 AddressType value;
538 if (!EvalExpression(*loc, regular_memory, &value, &eval_info.regs_info, nullptr)) {
539 return false;
540 }
541 // There is only one type of valid expression for CFA evaluation.
542 eval_info.cfa = value;
543 break;
544 }
545 default:
546 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
547 return false;
548 }
549
550 for (const auto& entry : loc_regs) {
551 uint32_t reg = entry.first;
552 // Already handled the CFA register.
553 if (reg == CFA_REG) continue;
554
555 AddressType* reg_ptr;
556 if (reg >= cur_regs->total_regs()) {
557 if (entry.second.type != DWARF_LOCATION_PSEUDO_REGISTER) {
558 // Skip this unknown register.
559 continue;
560 }
561 if (!eval_info.regs_info.regs->SetPseudoRegister(reg, entry.second.values[0])) {
562 last_error_.code = DWARF_ERROR_ILLEGAL_VALUE;
563 return false;
564 }
565 } else {
566 reg_ptr = eval_info.regs_info.Save(reg);
567 if (!EvalRegister(&entry.second, reg, reg_ptr, &eval_info)) {
568 return false;
569 }
570 }
571 }
572
573 // Find the return address location.
574 if (eval_info.return_address_undefined) {
575 cur_regs->set_pc(0);
576 } else {
577 cur_regs->set_pc((*cur_regs)[cie->return_address_register]);
578 }
579
580 // If the pc was set to zero, consider this the final frame. Exception: if
581 // this is the sigreturn frame, then we want to try to recover the real PC
582 // using the return address (from LR or the stack), so keep going.
583 *finished = (cur_regs->pc() == 0 && !cie->is_signal_frame) ? true : false;
584
585 cur_regs->set_sp(eval_info.cfa);
586
587 return true;
588 }
589
590 template <typename AddressType>
GetCfaLocationInfo(uint64_t pc,const DwarfFde * fde,DwarfLocations * loc_regs,ArchEnum arch)591 bool DwarfSectionImpl<AddressType>::GetCfaLocationInfo(uint64_t pc, const DwarfFde* fde,
592 DwarfLocations* loc_regs, ArchEnum arch) {
593 DwarfCfa<AddressType> cfa(&memory_, fde, arch);
594
595 // Look for the cached copy of the cie data.
596 auto reg_entry = cie_loc_regs_.find(fde->cie_offset);
597 if (reg_entry == cie_loc_regs_.end()) {
598 if (!cfa.GetLocationInfo(pc, fde->cie->cfa_instructions_offset, fde->cie->cfa_instructions_end,
599 loc_regs)) {
600 last_error_ = cfa.last_error();
601 return false;
602 }
603 cie_loc_regs_[fde->cie_offset] = *loc_regs;
604 }
605 cfa.set_cie_loc_regs(&cie_loc_regs_[fde->cie_offset]);
606 if (!cfa.GetLocationInfo(pc, fde->cfa_instructions_offset, fde->cfa_instructions_end, loc_regs)) {
607 last_error_ = cfa.last_error();
608 return false;
609 }
610 return true;
611 }
612
613 template <typename AddressType>
Log(uint8_t indent,uint64_t pc,const DwarfFde * fde,ArchEnum arch)614 bool DwarfSectionImpl<AddressType>::Log(uint8_t indent, uint64_t pc, const DwarfFde* fde,
615 ArchEnum arch) {
616 DwarfCfa<AddressType> cfa(&memory_, fde, arch);
617
618 // Always print the cie information.
619 const DwarfCie* cie = fde->cie;
620 if (!cfa.Log(indent, pc, cie->cfa_instructions_offset, cie->cfa_instructions_end)) {
621 last_error_ = cfa.last_error();
622 return false;
623 }
624 if (!cfa.Log(indent, pc, fde->cfa_instructions_offset, fde->cfa_instructions_end)) {
625 last_error_ = cfa.last_error();
626 return false;
627 }
628 return true;
629 }
630
631 template <typename AddressType>
Init(const SectionInfo & info)632 bool DwarfSectionImpl<AddressType>::Init(const SectionInfo& info) {
633 if (info.flags & SHF_COMPRESSED) {
634 return false;
635 }
636 section_bias_ = info.bias;
637 entries_offset_ = info.offset;
638 entries_end_ = info.offset + info.size;
639
640 memory_.clear_func_offset();
641 memory_.clear_text_offset();
642 memory_.set_cur_offset(info.offset);
643 pc_offset_ = info.offset;
644
645 return true;
646 }
647
648 // Read CIE or FDE entry at the given offset, and set the offset to the following entry.
649 // The 'fde' argument is set only if we have seen an FDE entry.
650 template <typename AddressType>
GetNextCieOrFde(uint64_t & next_entries_offset,std::optional<DwarfFde> & fde_entry)651 bool DwarfSectionImpl<AddressType>::GetNextCieOrFde(uint64_t& next_entries_offset,
652 std::optional<DwarfFde>& fde_entry) {
653 const uint64_t start_offset = next_entries_offset;
654
655 memory_.set_data_offset(entries_offset_);
656 memory_.set_cur_offset(next_entries_offset);
657 uint32_t value32;
658 if (!memory_.ReadBytes(&value32, sizeof(value32))) {
659 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
660 last_error_.address = memory_.cur_offset();
661 return false;
662 }
663
664 uint64_t cie_offset;
665 uint8_t cie_fde_encoding;
666 bool entry_is_cie = false;
667 if (value32 == static_cast<uint32_t>(-1)) {
668 // 64 bit entry.
669 uint64_t value64;
670 if (!memory_.ReadBytes(&value64, sizeof(value64))) {
671 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
672 last_error_.address = memory_.cur_offset();
673 return false;
674 }
675
676 next_entries_offset = memory_.cur_offset() + value64;
677 // Read the Cie Id of a Cie or the pointer of the Fde.
678 if (!memory_.ReadBytes(&value64, sizeof(value64))) {
679 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
680 last_error_.address = memory_.cur_offset();
681 return false;
682 }
683
684 if (value64 == cie64_value_) {
685 entry_is_cie = true;
686 cie_fde_encoding = DW_EH_PE_udata8;
687 } else {
688 cie_offset = GetCieOffsetFromFde64(value64);
689 }
690 } else {
691 next_entries_offset = memory_.cur_offset() + value32;
692
693 // 32 bit Cie
694 if (!memory_.ReadBytes(&value32, sizeof(value32))) {
695 last_error_.code = DWARF_ERROR_MEMORY_INVALID;
696 last_error_.address = memory_.cur_offset();
697 return false;
698 }
699
700 if (value32 == cie32_value_) {
701 entry_is_cie = true;
702 cie_fde_encoding = DW_EH_PE_udata4;
703 } else {
704 cie_offset = GetCieOffsetFromFde32(value32);
705 }
706 }
707
708 if (entry_is_cie) {
709 auto entry = cie_entries_.find(start_offset);
710 if (entry == cie_entries_.end()) {
711 DwarfCie* cie = &cie_entries_[start_offset];
712 cie->lsda_encoding = DW_EH_PE_omit;
713 cie->cfa_instructions_end = next_entries_offset;
714 cie->fde_address_encoding = cie_fde_encoding;
715
716 if (!FillInCie(cie)) {
717 cie_entries_.erase(start_offset);
718 return false;
719 }
720 }
721 fde_entry.reset();
722 } else {
723 fde_entry = DwarfFde{};
724 fde_entry->cfa_instructions_end = next_entries_offset;
725 fde_entry->cie_offset = cie_offset;
726 if (!FillInFde(&*fde_entry)) {
727 return false;
728 }
729 }
730 return true;
731 }
732
733 template <typename AddressType>
GetFdes(std::vector<const DwarfFde * > * fdes)734 void DwarfSectionImpl<AddressType>::GetFdes(std::vector<const DwarfFde*>* fdes) {
735 if (fde_index_.empty()) {
736 BuildFdeIndex();
737 }
738 for (auto& it : fde_index_) {
739 fdes->push_back(GetFdeFromOffset(it.second));
740 }
741 }
742
743 template <typename AddressType>
GetFdeFromPc(uint64_t pc)744 const DwarfFde* DwarfSectionImpl<AddressType>::GetFdeFromPc(uint64_t pc) {
745 // Ensure that the binary search table is initialized.
746 if (fde_index_.empty()) {
747 BuildFdeIndex();
748 }
749
750 // Find the FDE offset in the binary search table.
751 auto comp = [](uint64_t pc, auto& entry) { return pc < entry.first; };
752 auto it = std::upper_bound(fde_index_.begin(), fde_index_.end(), pc, comp);
753 if (it == fde_index_.end()) {
754 return nullptr;
755 }
756
757 // Load the full FDE entry based on the offset.
758 const DwarfFde* fde = GetFdeFromOffset(/*fde_offset=*/it->second);
759 return fde != nullptr && fde->pc_start <= pc ? fde : nullptr;
760 }
761
762 // Create binary search table to make FDE lookups fast (sorted by pc_end).
763 // We store only the FDE offset rather than the full entry to save memory.
764 //
765 // If there are overlapping entries, it inserts additional entries to ensure
766 // that one of the overlapping entries is found (it is undefined which one).
767 template <typename AddressType>
BuildFdeIndex()768 void DwarfSectionImpl<AddressType>::BuildFdeIndex() {
769 struct FdeInfo {
770 uint64_t pc_start, pc_end, fde_offset;
771 };
772 std::vector<FdeInfo> fdes;
773 for (uint64_t offset = entries_offset_; offset < entries_end_;) {
774 const uint64_t initial_offset = offset;
775 std::optional<DwarfFde> fde;
776 if (!GetNextCieOrFde(offset, fde)) {
777 break;
778 }
779 if (fde.has_value() && /* defensive check */ (fde->pc_start < fde->pc_end)) {
780 fdes.push_back({fde->pc_start, fde->pc_end, initial_offset});
781 }
782 if (offset <= initial_offset) {
783 break; // Jump back. Simply consider the processing done in this case.
784 }
785 }
786 std::sort(fdes.begin(), fdes.end(), [](const FdeInfo& a, const FdeInfo& b) {
787 return std::tie(a.pc_end, a.fde_offset) < std::tie(b.pc_end, b.fde_offset);
788 });
789
790 // If there are overlapping entries, ensure that we can always find one of them.
791 // For example, for entries: [300, 350) [400, 450) [100, 550) [600, 650)
792 // We add the following: [100, 300) [100, 400)
793 // Which ensures that the [100, 550) entry can be found in its whole range.
794 if (!fdes.empty()) {
795 FdeInfo filling = fdes.back(); // Entry with the minimal pc_start seen so far.
796 for (ssize_t i = fdes.size() - 1; i >= 0; i--) { // Iterate backwards.
797 uint64_t prev_pc_end = (i > 0) ? fdes[i - 1].pc_end : 0;
798 // If there is a gap between entries and the filling reaches the gap, fill it.
799 if (prev_pc_end < fdes[i].pc_start && filling.pc_start < fdes[i].pc_start) {
800 fdes.push_back({filling.pc_start, fdes[i].pc_start, filling.fde_offset});
801 }
802 if (fdes[i].pc_start < filling.pc_start) {
803 filling = fdes[i];
804 }
805 }
806 }
807
808 // Copy data to the final binary search table (pc_end, fde_offset) and sort it.
809 fde_index_.reserve(fdes.size());
810 for (const FdeInfo& it : fdes) {
811 fde_index_.emplace_back(it.pc_end, it.fde_offset);
812 }
813 if (!std::is_sorted(fde_index_.begin(), fde_index_.end())) {
814 std::sort(fde_index_.begin(), fde_index_.end());
815 }
816 }
817
818 // Explicitly instantiate DwarfSectionImpl
819 template class DwarfSectionImpl<uint32_t>;
820 template class DwarfSectionImpl<uint64_t>;
821
822 // Explicitly instantiate DwarfDebugFrame
823 template class DwarfDebugFrame<uint32_t>;
824 template class DwarfDebugFrame<uint64_t>;
825
826 // Explicitly instantiate DwarfEhFrame
827 template class DwarfEhFrame<uint32_t>;
828 template class DwarfEhFrame<uint64_t>;
829
830 } // namespace unwindstack
831