1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "optimizing_compiler.h"
18
19 #include <fstream>
20 #include <memory>
21 #include <sstream>
22
23 #include <stdint.h>
24
25 #include "art_method-inl.h"
26 #include "base/arena_allocator.h"
27 #include "base/arena_containers.h"
28 #include "base/dumpable.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/scoped_arena_allocator.h"
33 #include "base/systrace.h"
34 #include "base/timing_logger.h"
35 #include "builder.h"
36 #include "code_generator.h"
37 #include "compiler.h"
38 #include "debug/elf_debug_writer.h"
39 #include "debug/method_debug_info.h"
40 #include "dex/dex_file_types.h"
41 #include "driver/compiled_code_storage.h"
42 #include "driver/compiler_options.h"
43 #include "driver/dex_compilation_unit.h"
44 #include "graph_checker.h"
45 #include "graph_visualizer.h"
46 #include "inliner.h"
47 #include "jit/debugger_interface.h"
48 #include "jit/jit.h"
49 #include "jit/jit_code_cache.h"
50 #include "jit/jit_logger.h"
51 #include "jni/quick/jni_compiler.h"
52 #include "linker/linker_patch.h"
53 #include "nodes.h"
54 #include "oat/oat_quick_method_header.h"
55 #include "optimizing/write_barrier_elimination.h"
56 #include "prepare_for_register_allocation.h"
57 #include "profiling_info_builder.h"
58 #include "reference_type_propagation.h"
59 #include "register_allocator_linear_scan.h"
60 #include "select_generator.h"
61 #include "ssa_builder.h"
62 #include "ssa_liveness_analysis.h"
63 #include "ssa_phi_elimination.h"
64 #include "stack_map_stream.h"
65 #include "utils/assembler.h"
66
67 namespace art HIDDEN {
68
69 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
70
71 static constexpr const char* kPassNameSeparator = "$";
72
73 /**
74 * Filter to apply to the visualizer. Methods whose name contain that filter will
75 * be dumped.
76 */
77 static constexpr const char kStringFilter[] = "";
78
79 class PassScope;
80
81 class PassObserver : public ValueObject {
82 public:
PassObserver(HGraph * graph,CodeGenerator * codegen,std::ostream * visualizer_output,const CompilerOptions & compiler_options)83 PassObserver(HGraph* graph,
84 CodeGenerator* codegen,
85 std::ostream* visualizer_output,
86 const CompilerOptions& compiler_options)
87 : graph_(graph),
88 last_seen_graph_size_(0),
89 cached_method_name_(),
90 timing_logger_enabled_(compiler_options.GetDumpPassTimings()),
91 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
92 disasm_info_(graph->GetAllocator()),
93 visualizer_oss_(),
94 visualizer_output_(visualizer_output),
95 visualizer_enabled_(!compiler_options.GetDumpCfgFileName().empty()),
96 visualizer_(&visualizer_oss_, graph, codegen),
97 codegen_(codegen),
98 graph_in_bad_state_(false) {
99 if (timing_logger_enabled_ || visualizer_enabled_) {
100 if (!IsVerboseMethod(compiler_options, GetMethodName())) {
101 timing_logger_enabled_ = visualizer_enabled_ = false;
102 }
103 if (visualizer_enabled_) {
104 visualizer_.PrintHeader(GetMethodName());
105 codegen->SetDisassemblyInformation(&disasm_info_);
106 }
107 }
108 }
109
~PassObserver()110 ~PassObserver() {
111 if (timing_logger_enabled_) {
112 LOG(INFO) << "TIMINGS " << GetMethodName();
113 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
114 }
115 if (visualizer_enabled_) {
116 FlushVisualizer();
117 }
118 DCHECK(visualizer_oss_.str().empty());
119 }
120
DumpDisassembly()121 void DumpDisassembly() {
122 if (visualizer_enabled_) {
123 visualizer_.DumpGraphWithDisassembly();
124 FlushVisualizer();
125 }
126 }
127
SetGraphInBadState()128 void SetGraphInBadState() { graph_in_bad_state_ = true; }
129
GetMethodName()130 const char* GetMethodName() {
131 // PrettyMethod() is expensive, so we delay calling it until we actually have to.
132 if (cached_method_name_.empty()) {
133 cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
134 }
135 return cached_method_name_.c_str();
136 }
137
138 private:
StartPass(const char * pass_name)139 void StartPass(const char* pass_name) {
140 VLOG(compiler) << "Starting pass: " << pass_name;
141 // Dump graph first, then start timer.
142 if (visualizer_enabled_) {
143 visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
144 FlushVisualizer();
145 }
146 if (timing_logger_enabled_) {
147 timing_logger_.StartTiming(pass_name);
148 }
149 }
150
FlushVisualizer()151 void FlushVisualizer() {
152 *visualizer_output_ << visualizer_oss_.str();
153 visualizer_output_->flush();
154 visualizer_oss_.str("");
155 visualizer_oss_.clear();
156 }
157
EndPass(const char * pass_name,bool pass_change)158 void EndPass(const char* pass_name, bool pass_change) {
159 // Pause timer first, then dump graph.
160 if (timing_logger_enabled_) {
161 timing_logger_.EndTiming();
162 }
163 if (visualizer_enabled_) {
164 visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
165 FlushVisualizer();
166 }
167
168 // Validate the HGraph if running in debug mode.
169 if (kIsDebugBuild) {
170 if (!graph_in_bad_state_) {
171 GraphChecker checker(graph_, codegen_);
172 last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
173 if (!checker.IsValid()) {
174 std::ostringstream stream;
175 graph_->Dump(stream, codegen_);
176 LOG(FATAL_WITHOUT_ABORT) << "Error after " << pass_name << "(" << graph_->PrettyMethod()
177 << "): " << stream.str();
178 LOG(FATAL) << "(" << pass_name << "): " << Dumpable<GraphChecker>(checker);
179 }
180 }
181 }
182 }
183
IsVerboseMethod(const CompilerOptions & compiler_options,const char * method_name)184 static bool IsVerboseMethod(const CompilerOptions& compiler_options, const char* method_name) {
185 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
186 // empty kStringFilter matching all methods.
187 if (compiler_options.HasVerboseMethods()) {
188 return compiler_options.IsVerboseMethod(method_name);
189 }
190
191 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
192 // warning when the string is empty.
193 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
194 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
195 return true;
196 }
197
198 return false;
199 }
200
201 HGraph* const graph_;
202 size_t last_seen_graph_size_;
203
204 std::string cached_method_name_;
205
206 bool timing_logger_enabled_;
207 TimingLogger timing_logger_;
208
209 DisassemblyInformation disasm_info_;
210
211 std::ostringstream visualizer_oss_;
212 std::ostream* visualizer_output_;
213 bool visualizer_enabled_;
214 HGraphVisualizer visualizer_;
215 CodeGenerator* codegen_;
216
217 // Flag to be set by the compiler if the pass failed and the graph is not
218 // expected to validate.
219 bool graph_in_bad_state_;
220
221 friend PassScope;
222
223 DISALLOW_COPY_AND_ASSIGN(PassObserver);
224 };
225
226 class PassScope : public ValueObject {
227 public:
PassScope(const char * pass_name,PassObserver * pass_observer)228 PassScope(const char *pass_name, PassObserver* pass_observer)
229 : pass_name_(pass_name),
230 pass_change_(true), // assume change
231 pass_observer_(pass_observer) {
232 pass_observer_->StartPass(pass_name_);
233 }
234
SetPassNotChanged()235 void SetPassNotChanged() {
236 pass_change_ = false;
237 }
238
~PassScope()239 ~PassScope() {
240 pass_observer_->EndPass(pass_name_, pass_change_);
241 }
242
243 private:
244 const char* const pass_name_;
245 bool pass_change_;
246 PassObserver* const pass_observer_;
247 };
248
249 class OptimizingCompiler final : public Compiler {
250 public:
251 explicit OptimizingCompiler(const CompilerOptions& compiler_options,
252 CompiledCodeStorage* storage);
253 ~OptimizingCompiler() override;
254
255 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
256
257 CompiledMethod* Compile(const dex::CodeItem* code_item,
258 uint32_t access_flags,
259 InvokeType invoke_type,
260 uint16_t class_def_idx,
261 uint32_t method_idx,
262 Handle<mirror::ClassLoader> class_loader,
263 const DexFile& dex_file,
264 Handle<mirror::DexCache> dex_cache) const override;
265
266 CompiledMethod* JniCompile(uint32_t access_flags,
267 uint32_t method_idx,
268 const DexFile& dex_file,
269 Handle<mirror::DexCache> dex_cache) const override;
270
GetEntryPointOf(ArtMethod * method) const271 uintptr_t GetEntryPointOf(ArtMethod* method) const override
272 REQUIRES_SHARED(Locks::mutator_lock_) {
273 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
274 InstructionSetPointerSize(GetCompilerOptions().GetInstructionSet())));
275 }
276
277 bool JitCompile(Thread* self,
278 jit::JitCodeCache* code_cache,
279 jit::JitMemoryRegion* region,
280 ArtMethod* method,
281 CompilationKind compilation_kind,
282 jit::JitLogger* jit_logger)
283 override
284 REQUIRES_SHARED(Locks::mutator_lock_);
285
286 private:
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef definitions[],size_t length) const287 bool RunOptimizations(HGraph* graph,
288 CodeGenerator* codegen,
289 const DexCompilationUnit& dex_compilation_unit,
290 PassObserver* pass_observer,
291 const OptimizationDef definitions[],
292 size_t length) const {
293 // Convert definitions to optimization passes.
294 ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
295 definitions,
296 length,
297 graph->GetAllocator(),
298 graph,
299 compilation_stats_.get(),
300 codegen,
301 dex_compilation_unit);
302 DCHECK_EQ(length, optimizations.size());
303 // Run the optimization passes one by one. Any "depends_on" pass refers back to
304 // the most recent occurrence of that pass, skipped or executed.
305 std::bitset<static_cast<size_t>(OptimizationPass::kLast) + 1u> pass_changes;
306 pass_changes[static_cast<size_t>(OptimizationPass::kNone)] = true;
307 bool change = false;
308 for (size_t i = 0; i < length; ++i) {
309 if (pass_changes[static_cast<size_t>(definitions[i].depends_on)]) {
310 // Execute the pass and record whether it changed anything.
311 PassScope scope(optimizations[i]->GetPassName(), pass_observer);
312 bool pass_change = optimizations[i]->Run();
313 pass_changes[static_cast<size_t>(definitions[i].pass)] = pass_change;
314 if (pass_change) {
315 change = true;
316 } else {
317 scope.SetPassNotChanged();
318 }
319 } else {
320 // Skip the pass and record that nothing changed.
321 pass_changes[static_cast<size_t>(definitions[i].pass)] = false;
322 }
323 }
324 return change;
325 }
326
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef (& definitions)[length]) const327 template <size_t length> bool RunOptimizations(
328 HGraph* graph,
329 CodeGenerator* codegen,
330 const DexCompilationUnit& dex_compilation_unit,
331 PassObserver* pass_observer,
332 const OptimizationDef (&definitions)[length]) const {
333 return RunOptimizations(
334 graph, codegen, dex_compilation_unit, pass_observer, definitions, length);
335 }
336
337 void RunOptimizations(HGraph* graph,
338 CodeGenerator* codegen,
339 const DexCompilationUnit& dex_compilation_unit,
340 PassObserver* pass_observer) const;
341
342 // Create a 'CompiledMethod' for an optimized graph.
343 CompiledMethod* Emit(ArenaAllocator* allocator,
344 CodeGenerator* codegen,
345 bool is_intrinsic,
346 const dex::CodeItem* item) const;
347
348 // Try compiling a method and return the code generator used for
349 // compiling it.
350 // This method:
351 // 1) Builds the graph. Returns null if it failed to build it.
352 // 2) Transforms the graph to SSA. Returns null if it failed.
353 // 3) Runs optimizations on the graph, including register allocator.
354 CodeGenerator* TryCompile(ArenaAllocator* allocator,
355 ArenaStack* arena_stack,
356 const DexCompilationUnit& dex_compilation_unit,
357 ArtMethod* method,
358 CompilationKind compilation_kind,
359 VariableSizedHandleScope* handles) const;
360
361 CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
362 ArenaStack* arena_stack,
363 const DexCompilationUnit& dex_compilation_unit,
364 ArtMethod* method,
365 VariableSizedHandleScope* handles) const;
366
367 bool RunArchOptimizations(HGraph* graph,
368 CodeGenerator* codegen,
369 const DexCompilationUnit& dex_compilation_unit,
370 PassObserver* pass_observer) const;
371
372 bool RunRequiredPasses(HGraph* graph,
373 CodeGenerator* codegen,
374 const DexCompilationUnit& dex_compilation_unit,
375 PassObserver* pass_observer) const;
376
377 std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
378
379 // This must be called before any other function that dumps data to the cfg
380 void DumpInstructionSetFeaturesToCfg() const;
381
382 std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
383
384 std::unique_ptr<std::ostream> visualizer_output_;
385
386 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
387 };
388
389 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
390
OptimizingCompiler(const CompilerOptions & compiler_options,CompiledCodeStorage * storage)391 OptimizingCompiler::OptimizingCompiler(const CompilerOptions& compiler_options,
392 CompiledCodeStorage* storage)
393 : Compiler(compiler_options, storage, kMaximumCompilationTimeBeforeWarning) {
394 // Enable C1visualizer output.
395 const std::string& cfg_file_name = compiler_options.GetDumpCfgFileName();
396 if (!cfg_file_name.empty()) {
397 std::ios_base::openmode cfg_file_mode =
398 compiler_options.GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
399 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
400 DumpInstructionSetFeaturesToCfg();
401 }
402 if (compiler_options.GetDumpStats()) {
403 compilation_stats_.reset(new OptimizingCompilerStats());
404 }
405 }
406
~OptimizingCompiler()407 OptimizingCompiler::~OptimizingCompiler() {
408 if (compilation_stats_.get() != nullptr) {
409 compilation_stats_->Log();
410 }
411 }
412
DumpInstructionSetFeaturesToCfg() const413 void OptimizingCompiler::DumpInstructionSetFeaturesToCfg() const {
414 const CompilerOptions& compiler_options = GetCompilerOptions();
415 const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
416 std::string isa_string =
417 std::string("isa:") + GetInstructionSetString(features->GetInstructionSet());
418 std::string features_string = "isa_features:" + features->GetFeatureString();
419 std::string read_barrier_type = "none";
420 if (compiler_options.EmitReadBarrier()) {
421 if (art::kUseBakerReadBarrier)
422 read_barrier_type = "baker";
423 else if (art::kUseTableLookupReadBarrier)
424 read_barrier_type = "tablelookup";
425 }
426 std::string read_barrier_string = ART_FORMAT("read_barrier_type:{}", read_barrier_type);
427 // It is assumed that visualizer_output_ is empty when calling this function, hence the fake
428 // compilation block containing the ISA features will be printed at the beginning of the .cfg
429 // file.
430 *visualizer_output_ << HGraphVisualizer::InsertMetaDataAsCompilationBlock(
431 isa_string + ' ' + features_string + ' ' + read_barrier_string);
432 }
433
CanCompileMethod(uint32_t method_idx,const DexFile & dex_file) const434 bool OptimizingCompiler::CanCompileMethod([[maybe_unused]] uint32_t method_idx,
435 [[maybe_unused]] const DexFile& dex_file) const {
436 return true;
437 }
438
IsInstructionSetSupported(InstructionSet instruction_set)439 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
440 return instruction_set == InstructionSet::kArm ||
441 instruction_set == InstructionSet::kArm64 ||
442 instruction_set == InstructionSet::kThumb2 ||
443 instruction_set == InstructionSet::kRiscv64 ||
444 instruction_set == InstructionSet::kX86 ||
445 instruction_set == InstructionSet::kX86_64;
446 }
447
RunRequiredPasses(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const448 bool OptimizingCompiler::RunRequiredPasses(HGraph* graph,
449 CodeGenerator* codegen,
450 const DexCompilationUnit& dex_compilation_unit,
451 PassObserver* pass_observer) const {
452 switch (codegen->GetCompilerOptions().GetInstructionSet()) {
453 #if defined(ART_ENABLE_CODEGEN_arm)
454 case InstructionSet::kThumb2:
455 case InstructionSet::kArm: {
456 OptimizationDef arm_optimizations[] = {
457 OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
458 };
459 return RunOptimizations(graph,
460 codegen,
461 dex_compilation_unit,
462 pass_observer,
463 arm_optimizations);
464 }
465 #endif
466 #if defined(ART_ENABLE_CODEGEN_riscv64)
467 case InstructionSet::kRiscv64: {
468 OptimizationDef riscv64_optimizations[] = {
469 OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64),
470 };
471 return RunOptimizations(graph,
472 codegen,
473 dex_compilation_unit,
474 pass_observer,
475 riscv64_optimizations);
476 }
477 #endif
478 #ifdef ART_ENABLE_CODEGEN_x86
479 case InstructionSet::kX86: {
480 OptimizationDef x86_optimizations[] = {
481 OptDef(OptimizationPass::kPcRelativeFixupsX86),
482 };
483 return RunOptimizations(graph,
484 codegen,
485 dex_compilation_unit,
486 pass_observer,
487 x86_optimizations);
488 }
489 #endif
490 default:
491 UNUSED(graph);
492 UNUSED(codegen);
493 UNUSED(dex_compilation_unit);
494 UNUSED(pass_observer);
495 return false;
496 }
497 }
498
RunArchOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const499 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
500 CodeGenerator* codegen,
501 const DexCompilationUnit& dex_compilation_unit,
502 PassObserver* pass_observer) const {
503 switch (codegen->GetCompilerOptions().GetInstructionSet()) {
504 #if defined(ART_ENABLE_CODEGEN_arm)
505 case InstructionSet::kThumb2:
506 case InstructionSet::kArm: {
507 OptimizationDef arm_optimizations[] = {
508 OptDef(OptimizationPass::kInstructionSimplifierArm),
509 OptDef(OptimizationPass::kSideEffectsAnalysis),
510 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
511 OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
512 OptDef(OptimizationPass::kScheduling)
513 };
514 return RunOptimizations(graph,
515 codegen,
516 dex_compilation_unit,
517 pass_observer,
518 arm_optimizations);
519 }
520 #endif
521 #ifdef ART_ENABLE_CODEGEN_arm64
522 case InstructionSet::kArm64: {
523 OptimizationDef arm64_optimizations[] = {
524 OptDef(OptimizationPass::kInstructionSimplifierArm64),
525 OptDef(OptimizationPass::kSideEffectsAnalysis),
526 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
527 OptDef(OptimizationPass::kScheduling)
528 };
529 return RunOptimizations(graph,
530 codegen,
531 dex_compilation_unit,
532 pass_observer,
533 arm64_optimizations);
534 }
535 #endif
536 #if defined(ART_ENABLE_CODEGEN_riscv64)
537 case InstructionSet::kRiscv64: {
538 OptimizationDef riscv64_optimizations[] = {
539 OptDef(OptimizationPass::kInstructionSimplifierRiscv64),
540 OptDef(OptimizationPass::kSideEffectsAnalysis),
541 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
542 OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64)
543 };
544 return RunOptimizations(graph,
545 codegen,
546 dex_compilation_unit,
547 pass_observer,
548 riscv64_optimizations);
549 }
550 #endif
551 #ifdef ART_ENABLE_CODEGEN_x86
552 case InstructionSet::kX86: {
553 OptimizationDef x86_optimizations[] = {
554 OptDef(OptimizationPass::kInstructionSimplifierX86),
555 OptDef(OptimizationPass::kSideEffectsAnalysis),
556 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
557 OptDef(OptimizationPass::kPcRelativeFixupsX86),
558 OptDef(OptimizationPass::kX86MemoryOperandGeneration)
559 };
560 return RunOptimizations(graph,
561 codegen,
562 dex_compilation_unit,
563 pass_observer,
564 x86_optimizations);
565 }
566 #endif
567 #ifdef ART_ENABLE_CODEGEN_x86_64
568 case InstructionSet::kX86_64: {
569 OptimizationDef x86_64_optimizations[] = {
570 OptDef(OptimizationPass::kInstructionSimplifierX86_64),
571 OptDef(OptimizationPass::kSideEffectsAnalysis),
572 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
573 OptDef(OptimizationPass::kX86MemoryOperandGeneration)
574 };
575 return RunOptimizations(graph,
576 codegen,
577 dex_compilation_unit,
578 pass_observer,
579 x86_64_optimizations);
580 }
581 #endif
582 default:
583 UNUSED(graph);
584 UNUSED(dex_compilation_unit);
585 UNUSED(pass_observer);
586 return false;
587 }
588 }
589
590 NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
AllocateRegisters(HGraph * graph,CodeGenerator * codegen,PassObserver * pass_observer,OptimizingCompilerStats * stats)591 static void AllocateRegisters(HGraph* graph,
592 CodeGenerator* codegen,
593 PassObserver* pass_observer,
594 OptimizingCompilerStats* stats) {
595 {
596 PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
597 pass_observer);
598 PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
599 }
600 // Use local allocator shared by SSA liveness analysis and register allocator.
601 // (Register allocator creates new objects in the liveness data.)
602 ScopedArenaAllocator local_allocator(graph->GetArenaStack());
603 SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
604 {
605 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
606 liveness.Analyze();
607 }
608 {
609 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
610 std::unique_ptr<RegisterAllocator> register_allocator =
611 RegisterAllocator::Create(&local_allocator, codegen, liveness);
612 register_allocator->AllocateRegisters();
613 }
614 }
615
616 // Strip pass name suffix to get optimization name.
ConvertPassNameToOptimizationName(const std::string & pass_name)617 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
618 size_t pos = pass_name.find(kPassNameSeparator);
619 return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
620 }
621
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const622 void OptimizingCompiler::RunOptimizations(HGraph* graph,
623 CodeGenerator* codegen,
624 const DexCompilationUnit& dex_compilation_unit,
625 PassObserver* pass_observer) const {
626 const std::vector<std::string>* pass_names = GetCompilerOptions().GetPassesToRun();
627 if (pass_names != nullptr) {
628 // If passes were defined on command-line, build the optimization
629 // passes and run these instead of the built-in optimizations.
630 // TODO: a way to define depends_on via command-line?
631 const size_t length = pass_names->size();
632 std::vector<OptimizationDef> optimizations;
633 for (const std::string& pass_name : *pass_names) {
634 std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
635 optimizations.push_back(OptDef(OptimizationPassByName(opt_name), pass_name.c_str()));
636 }
637 RunOptimizations(graph,
638 codegen,
639 dex_compilation_unit,
640 pass_observer,
641 optimizations.data(),
642 length);
643 return;
644 }
645
646 OptimizationDef optimizations[] = {
647 // Initial optimizations.
648 OptDef(OptimizationPass::kConstantFolding),
649 OptDef(OptimizationPass::kInstructionSimplifier),
650 OptDef(OptimizationPass::kDeadCodeElimination,
651 "dead_code_elimination$initial"),
652 // Inlining.
653 OptDef(OptimizationPass::kInliner),
654 // Simplification (if inlining occurred, or if we analyzed the invoke as "always throwing").
655 OptDef(OptimizationPass::kConstantFolding,
656 "constant_folding$after_inlining",
657 OptimizationPass::kInliner),
658 OptDef(OptimizationPass::kInstructionSimplifier,
659 "instruction_simplifier$after_inlining",
660 OptimizationPass::kInliner),
661 OptDef(OptimizationPass::kDeadCodeElimination,
662 "dead_code_elimination$after_inlining",
663 OptimizationPass::kInliner),
664 // GVN.
665 OptDef(OptimizationPass::kSideEffectsAnalysis,
666 "side_effects$before_gvn"),
667 OptDef(OptimizationPass::kGlobalValueNumbering),
668 // Simplification (TODO: only if GVN occurred).
669 OptDef(OptimizationPass::kSelectGenerator),
670 OptDef(OptimizationPass::kConstantFolding,
671 "constant_folding$after_gvn"),
672 OptDef(OptimizationPass::kInstructionSimplifier,
673 "instruction_simplifier$after_gvn"),
674 OptDef(OptimizationPass::kDeadCodeElimination,
675 "dead_code_elimination$after_gvn"),
676 // High-level optimizations.
677 OptDef(OptimizationPass::kSideEffectsAnalysis,
678 "side_effects$before_licm"),
679 OptDef(OptimizationPass::kInvariantCodeMotion),
680 OptDef(OptimizationPass::kInductionVarAnalysis),
681 OptDef(OptimizationPass::kBoundsCheckElimination),
682 OptDef(OptimizationPass::kLoopOptimization),
683 // Simplification.
684 OptDef(OptimizationPass::kConstantFolding,
685 "constant_folding$after_loop_opt"),
686 OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
687 "instruction_simplifier$after_loop_opt"),
688 OptDef(OptimizationPass::kDeadCodeElimination,
689 "dead_code_elimination$after_loop_opt"),
690 // Other high-level optimizations.
691 OptDef(OptimizationPass::kLoadStoreElimination),
692 OptDef(OptimizationPass::kCHAGuardOptimization),
693 OptDef(OptimizationPass::kCodeSinking),
694 // Simplification.
695 OptDef(OptimizationPass::kConstantFolding,
696 "constant_folding$before_codegen"),
697 // The codegen has a few assumptions that only the instruction simplifier
698 // can satisfy. For example, the code generator does not expect to see a
699 // HTypeConversion from a type to the same type.
700 OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
701 "instruction_simplifier$before_codegen"),
702 // Simplification may result in dead code that should be removed prior to
703 // code generation.
704 OptDef(OptimizationPass::kDeadCodeElimination,
705 "dead_code_elimination$before_codegen"),
706 // Eliminate constructor fences after code sinking to avoid
707 // complicated sinking logic to split a fence with many inputs.
708 OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
709 };
710 RunOptimizations(graph,
711 codegen,
712 dex_compilation_unit,
713 pass_observer,
714 optimizations);
715
716 RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer);
717 }
718
EmitAndSortLinkerPatches(CodeGenerator * codegen)719 static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
720 ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
721 codegen->EmitLinkerPatches(&linker_patches);
722
723 // Sort patches by literal offset. Required for .oat_patches encoding.
724 std::sort(linker_patches.begin(), linker_patches.end(),
725 [](const linker::LinkerPatch& lhs, const linker::LinkerPatch& rhs) {
726 return lhs.LiteralOffset() < rhs.LiteralOffset();
727 });
728
729 return linker_patches;
730 }
731
Emit(ArenaAllocator * allocator,CodeGenerator * codegen,bool is_intrinsic,const dex::CodeItem * code_item_for_osr_check) const732 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
733 CodeGenerator* codegen,
734 bool is_intrinsic,
735 const dex::CodeItem* code_item_for_osr_check) const {
736 ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
737 ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
738
739 CompiledCodeStorage* storage = GetCompiledCodeStorage();
740 CompiledMethod* compiled_method = storage->CreateCompiledMethod(
741 codegen->GetInstructionSet(),
742 codegen->GetCode(),
743 ArrayRef<const uint8_t>(stack_map),
744 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
745 ArrayRef<const linker::LinkerPatch>(linker_patches),
746 is_intrinsic);
747
748 for (const linker::LinkerPatch& patch : linker_patches) {
749 if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
750 ArenaVector<uint8_t> code(allocator->Adapter());
751 std::string debug_name;
752 codegen->EmitThunkCode(patch, &code, &debug_name);
753 storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
754 }
755 }
756
757 return compiled_method;
758 }
759
TryCompile(ArenaAllocator * allocator,ArenaStack * arena_stack,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,CompilationKind compilation_kind,VariableSizedHandleScope * handles) const760 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
761 ArenaStack* arena_stack,
762 const DexCompilationUnit& dex_compilation_unit,
763 ArtMethod* method,
764 CompilationKind compilation_kind,
765 VariableSizedHandleScope* handles) const {
766 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
767 const CompilerOptions& compiler_options = GetCompilerOptions();
768 InstructionSet instruction_set = compiler_options.GetInstructionSet();
769 const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
770 uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
771 const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
772
773 // Always use the Thumb-2 assembler: some runtime functionality
774 // (like implicit stack overflow checks) assume Thumb-2.
775 DCHECK_NE(instruction_set, InstructionSet::kArm);
776
777 // Do not attempt to compile on architectures we do not support.
778 if (!IsInstructionSetSupported(instruction_set)) {
779 MaybeRecordStat(compilation_stats_.get(),
780 MethodCompilationStat::kNotCompiledUnsupportedIsa);
781 return nullptr;
782 }
783
784 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
785 SCOPED_TRACE << "Not compiling because of pathological case";
786 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
787 return nullptr;
788 }
789
790 // Implementation of the space filter: do not compile a code item whose size in
791 // code units is bigger than 128.
792 static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
793 if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
794 && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
795 kSpaceFilterOptimizingThreshold)) {
796 SCOPED_TRACE << "Not compiling because of space filter";
797 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
798 return nullptr;
799 }
800
801 CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
802
803 bool dead_reference_safe;
804 // For AOT compilation, we may not get a method, for example if its class is erroneous,
805 // possibly due to an unavailable superclass. JIT should always have a method.
806 DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
807 if (method != nullptr) {
808 const dex::ClassDef* containing_class;
809 {
810 ScopedObjectAccess soa(Thread::Current());
811 containing_class = &method->GetClassDef();
812 }
813 // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
814 // is currently rarely true.
815 dead_reference_safe =
816 annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
817 && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
818 } else {
819 // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
820 dead_reference_safe = false;
821 }
822
823 HGraph* graph = new (allocator) HGraph(
824 allocator,
825 arena_stack,
826 handles,
827 dex_file,
828 method_idx,
829 compiler_options.GetInstructionSet(),
830 kInvalidInvokeType,
831 dead_reference_safe,
832 compiler_options.GetDebuggable(),
833 compilation_kind);
834
835 if (method != nullptr) {
836 graph->SetArtMethod(method);
837 }
838
839 jit::Jit* jit = Runtime::Current()->GetJit();
840 if (jit != nullptr) {
841 ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current());
842 graph->SetProfilingInfo(info);
843 }
844
845 std::unique_ptr<CodeGenerator> codegen(
846 CodeGenerator::Create(graph,
847 compiler_options,
848 compilation_stats_.get()));
849 if (codegen.get() == nullptr) {
850 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
851 return nullptr;
852 }
853 codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
854
855 PassObserver pass_observer(graph,
856 codegen.get(),
857 visualizer_output_.get(),
858 compiler_options);
859
860 {
861 VLOG(compiler) << "Building " << pass_observer.GetMethodName();
862 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
863 HGraphBuilder builder(graph,
864 code_item_accessor,
865 &dex_compilation_unit,
866 &dex_compilation_unit,
867 codegen.get(),
868 compilation_stats_.get());
869 GraphAnalysisResult result = builder.BuildGraph();
870 if (result != kAnalysisSuccess) {
871 SCOPED_TRACE << "Not compiling because of " << result;
872 switch (result) {
873 case kAnalysisSkipped: {
874 MaybeRecordStat(compilation_stats_.get(),
875 MethodCompilationStat::kNotCompiledSkipped);
876 break;
877 }
878 case kAnalysisInvalidBytecode: {
879 MaybeRecordStat(compilation_stats_.get(),
880 MethodCompilationStat::kNotCompiledInvalidBytecode);
881 break;
882 }
883 case kAnalysisFailThrowCatchLoop: {
884 MaybeRecordStat(compilation_stats_.get(),
885 MethodCompilationStat::kNotCompiledThrowCatchLoop);
886 break;
887 }
888 case kAnalysisFailAmbiguousArrayOp: {
889 MaybeRecordStat(compilation_stats_.get(),
890 MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
891 break;
892 }
893 case kAnalysisFailIrreducibleLoopAndStringInit: {
894 MaybeRecordStat(compilation_stats_.get(),
895 MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
896 break;
897 }
898 case kAnalysisFailPhiEquivalentInOsr: {
899 MaybeRecordStat(compilation_stats_.get(),
900 MethodCompilationStat::kNotCompiledPhiEquivalentInOsr);
901 break;
902 }
903 case kAnalysisSuccess:
904 LOG(FATAL) << "Unreachable";
905 UNREACHABLE();
906 }
907 pass_observer.SetGraphInBadState();
908 return nullptr;
909 }
910 }
911
912 if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
913 graph->SetUsefulOptimizing();
914 // Branch profiling currently doesn't support running optimizations.
915 RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
916 } else {
917 RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
918 PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
919 WriteBarrierElimination(graph, compilation_stats_.get()).Run();
920 }
921
922 // If we are compiling baseline and we haven't created a profiling info for
923 // this method already, do it now.
924 if (jit != nullptr &&
925 compilation_kind == CompilationKind::kBaseline &&
926 graph->IsUsefulOptimizing() &&
927 graph->GetProfilingInfo() == nullptr) {
928 ProfilingInfoBuilder(
929 graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run();
930 // We expect a profiling info to be created and attached to the graph.
931 // However, we may have run out of memory trying to create it, so in this
932 // case just abort the compilation.
933 if (graph->GetProfilingInfo() == nullptr) {
934 SCOPED_TRACE << "Not compiling because of out of memory";
935 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
936 return nullptr;
937 }
938 }
939
940 AllocateRegisters(graph,
941 codegen.get(),
942 &pass_observer,
943 compilation_stats_.get());
944
945 if (UNLIKELY(codegen->GetFrameSize() > codegen->GetMaximumFrameSize())) {
946 SCOPED_TRACE << "Not compiling because of stack frame too large";
947 LOG(WARNING) << "Stack frame size is " << codegen->GetFrameSize()
948 << " which is larger than the maximum of " << codegen->GetMaximumFrameSize()
949 << " bytes. Method: " << graph->PrettyMethod();
950 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledFrameTooBig);
951 return nullptr;
952 }
953
954 codegen->Compile();
955 pass_observer.DumpDisassembly();
956
957 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
958 return codegen.release();
959 }
960
TryCompileIntrinsic(ArenaAllocator * allocator,ArenaStack * arena_stack,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,VariableSizedHandleScope * handles) const961 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
962 ArenaAllocator* allocator,
963 ArenaStack* arena_stack,
964 const DexCompilationUnit& dex_compilation_unit,
965 ArtMethod* method,
966 VariableSizedHandleScope* handles) const {
967 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
968 const CompilerOptions& compiler_options = GetCompilerOptions();
969 InstructionSet instruction_set = compiler_options.GetInstructionSet();
970 const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
971 uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
972
973 // Always use the Thumb-2 assembler: some runtime functionality
974 // (like implicit stack overflow checks) assume Thumb-2.
975 DCHECK_NE(instruction_set, InstructionSet::kArm);
976
977 // Do not attempt to compile on architectures we do not support.
978 if (!IsInstructionSetSupported(instruction_set)) {
979 return nullptr;
980 }
981
982 HGraph* graph = new (allocator) HGraph(
983 allocator,
984 arena_stack,
985 handles,
986 dex_file,
987 method_idx,
988 compiler_options.GetInstructionSet(),
989 kInvalidInvokeType,
990 /* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety.
991 compiler_options.GetDebuggable(),
992 CompilationKind::kOptimized);
993
994 DCHECK(Runtime::Current()->IsAotCompiler());
995 DCHECK(method != nullptr);
996 graph->SetArtMethod(method);
997
998 std::unique_ptr<CodeGenerator> codegen(
999 CodeGenerator::Create(graph,
1000 compiler_options,
1001 compilation_stats_.get()));
1002 if (codegen.get() == nullptr) {
1003 return nullptr;
1004 }
1005 codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
1006
1007 PassObserver pass_observer(graph,
1008 codegen.get(),
1009 visualizer_output_.get(),
1010 compiler_options);
1011
1012 {
1013 VLOG(compiler) << "Building intrinsic graph " << pass_observer.GetMethodName();
1014 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
1015 HGraphBuilder builder(graph,
1016 CodeItemDebugInfoAccessor(), // Null code item.
1017 &dex_compilation_unit,
1018 &dex_compilation_unit,
1019 codegen.get(),
1020 compilation_stats_.get());
1021 builder.BuildIntrinsicGraph(method);
1022 }
1023
1024 OptimizationDef optimizations[] = {
1025 // The codegen has a few assumptions that only the instruction simplifier
1026 // can satisfy.
1027 OptDef(OptimizationPass::kInstructionSimplifier),
1028 };
1029 RunOptimizations(graph,
1030 codegen.get(),
1031 dex_compilation_unit,
1032 &pass_observer,
1033 optimizations);
1034
1035 RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
1036 {
1037 PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
1038 WriteBarrierElimination(graph, compilation_stats_.get()).Run();
1039 }
1040
1041 AllocateRegisters(graph,
1042 codegen.get(),
1043 &pass_observer,
1044 compilation_stats_.get());
1045 if (!codegen->IsLeafMethod()) {
1046 VLOG(compiler) << "Intrinsic method is not leaf: " << method->GetIntrinsic()
1047 << " " << graph->PrettyMethod();
1048 return nullptr;
1049 }
1050
1051 CHECK_LE(codegen->GetFrameSize(), codegen->GetMaximumFrameSize());
1052 codegen->Compile();
1053 pass_observer.DumpDisassembly();
1054
1055 VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
1056 << " " << graph->PrettyMethod();
1057 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
1058 return codegen.release();
1059 }
1060
Compile(const dex::CodeItem * code_item,uint32_t access_flags,InvokeType invoke_type,uint16_t class_def_idx,uint32_t method_idx,Handle<mirror::ClassLoader> jclass_loader,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1061 CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
1062 uint32_t access_flags,
1063 InvokeType invoke_type,
1064 uint16_t class_def_idx,
1065 uint32_t method_idx,
1066 Handle<mirror::ClassLoader> jclass_loader,
1067 const DexFile& dex_file,
1068 Handle<mirror::DexCache> dex_cache) const {
1069 const CompilerOptions& compiler_options = GetCompilerOptions();
1070 DCHECK(compiler_options.IsAotCompiler());
1071 CompiledMethod* compiled_method = nullptr;
1072 Runtime* runtime = Runtime::Current();
1073 DCHECK(runtime->IsAotCompiler());
1074 ArenaAllocator allocator(runtime->GetArenaPool());
1075 ArenaStack arena_stack(runtime->GetArenaPool());
1076 std::unique_ptr<CodeGenerator> codegen;
1077 bool compiled_intrinsic = false;
1078 {
1079 ScopedObjectAccess soa(Thread::Current());
1080 ArtMethod* method =
1081 runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1082 method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
1083 DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
1084 soa.Self()->ClearException(); // Suppress exception if any.
1085 VariableSizedHandleScope handles(soa.Self());
1086 Handle<mirror::Class> compiling_class =
1087 handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
1088 DexCompilationUnit dex_compilation_unit(
1089 jclass_loader,
1090 runtime->GetClassLinker(),
1091 dex_file,
1092 code_item,
1093 class_def_idx,
1094 method_idx,
1095 access_flags,
1096 /*verified_method=*/ nullptr, // Not needed by the Optimizing compiler.
1097 dex_cache,
1098 compiling_class);
1099 // All signature polymorphic methods are native.
1100 DCHECK(method == nullptr || !method->IsSignaturePolymorphic());
1101 // Go to native so that we don't block GC during compilation.
1102 ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1103 // Try to compile a fully intrinsified implementation.
1104 if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1105 DCHECK(compiler_options.IsBootImage());
1106 codegen.reset(
1107 TryCompileIntrinsic(&allocator,
1108 &arena_stack,
1109 dex_compilation_unit,
1110 method,
1111 &handles));
1112 if (codegen != nullptr) {
1113 compiled_intrinsic = true;
1114 }
1115 }
1116 if (codegen == nullptr) {
1117 codegen.reset(
1118 TryCompile(&allocator,
1119 &arena_stack,
1120 dex_compilation_unit,
1121 method,
1122 compiler_options.IsBaseline()
1123 ? CompilationKind::kBaseline
1124 : CompilationKind::kOptimized,
1125 &handles));
1126 }
1127 }
1128 if (codegen.get() != nullptr) {
1129 compiled_method = Emit(&allocator,
1130 codegen.get(),
1131 compiled_intrinsic,
1132 compiled_intrinsic ? nullptr : code_item);
1133
1134 if (kArenaAllocatorCountAllocations) {
1135 codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
1136 size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1137 if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1138 MemStats mem_stats(allocator.GetMemStats());
1139 MemStats peak_stats(arena_stack.GetPeakStats());
1140 LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1141 << dex_file.PrettyMethod(method_idx)
1142 << "\n" << Dumpable<MemStats>(mem_stats)
1143 << "\n" << Dumpable<MemStats>(peak_stats);
1144 }
1145 }
1146 }
1147
1148 if (kIsDebugBuild &&
1149 compiler_options.CompileArtTest() &&
1150 IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
1151 // For testing purposes, we put a special marker on method names
1152 // that should be compiled with this compiler (when the
1153 // instruction set is supported). This makes sure we're not
1154 // regressing.
1155 std::string method_name = dex_file.PrettyMethod(method_idx);
1156 bool shouldCompile = method_name.find("$opt$") != std::string::npos;
1157 DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name;
1158 }
1159
1160 return compiled_method;
1161 }
1162
CreateJniStackMap(ScopedArenaAllocator * allocator,const JniCompiledMethod & jni_compiled_method,size_t code_size,bool debuggable)1163 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
1164 const JniCompiledMethod& jni_compiled_method,
1165 size_t code_size,
1166 bool debuggable) {
1167 // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
1168 // to stay clear of the frame size limit.
1169 std::unique_ptr<StackMapStream> stack_map_stream(
1170 new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
1171 stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(),
1172 jni_compiled_method.GetCoreSpillMask(),
1173 jni_compiled_method.GetFpSpillMask(),
1174 /* num_dex_registers= */ 0,
1175 /* baseline= */ false,
1176 debuggable);
1177 stack_map_stream->EndMethod(code_size);
1178 return stack_map_stream->Encode();
1179 }
1180
JniCompile(uint32_t access_flags,uint32_t method_idx,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1181 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
1182 uint32_t method_idx,
1183 const DexFile& dex_file,
1184 Handle<mirror::DexCache> dex_cache) const {
1185 Runtime* runtime = Runtime::Current();
1186 ArenaAllocator allocator(runtime->GetArenaPool());
1187 ArenaStack arena_stack(runtime->GetArenaPool());
1188
1189 const CompilerOptions& compiler_options = GetCompilerOptions();
1190 if (compiler_options.IsBootImage()) {
1191 ScopedObjectAccess soa(Thread::Current());
1192 ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
1193 method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
1194 // Try to compile a fully intrinsified implementation. Do not try to do this for
1195 // signature polymorphic methods as the InstructionBuilder cannot handle them;
1196 // and it would be useless as they always have a slow path for type conversions.
1197 if (method != nullptr && UNLIKELY(method->IsIntrinsic()) && !method->IsSignaturePolymorphic()) {
1198 VariableSizedHandleScope handles(soa.Self());
1199 ScopedNullHandle<mirror::ClassLoader> class_loader; // null means boot class path loader.
1200 Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1201 DexCompilationUnit dex_compilation_unit(
1202 class_loader,
1203 runtime->GetClassLinker(),
1204 dex_file,
1205 /*code_item=*/ nullptr,
1206 /*class_def_idx=*/ DexFile::kDexNoIndex16,
1207 method_idx,
1208 access_flags,
1209 /*verified_method=*/ nullptr,
1210 dex_cache,
1211 compiling_class);
1212 // Go to native so that we don't block GC during compilation.
1213 ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1214 std::unique_ptr<CodeGenerator> codegen(
1215 TryCompileIntrinsic(&allocator,
1216 &arena_stack,
1217 dex_compilation_unit,
1218 method,
1219 &handles));
1220 if (codegen != nullptr) {
1221 return Emit(&allocator,
1222 codegen.get(),
1223 /*is_intrinsic=*/ true,
1224 /*item=*/ nullptr);
1225 }
1226 }
1227 }
1228
1229 JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1230 compiler_options, dex_file.GetMethodShortyView(method_idx), access_flags, &allocator);
1231 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
1232
1233 ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
1234 ScopedArenaVector<uint8_t> stack_map =
1235 CreateJniStackMap(&stack_map_allocator,
1236 jni_compiled_method,
1237 jni_compiled_method.GetCode().size(),
1238 compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
1239 return GetCompiledCodeStorage()->CreateCompiledMethod(
1240 jni_compiled_method.GetInstructionSet(),
1241 jni_compiled_method.GetCode(),
1242 ArrayRef<const uint8_t>(stack_map),
1243 jni_compiled_method.GetCfi(),
1244 /*patches=*/ ArrayRef<const linker::LinkerPatch>(),
1245 /*is_intrinsic=*/ false);
1246 }
1247
CreateOptimizingCompiler(const CompilerOptions & compiler_options,CompiledCodeStorage * storage)1248 Compiler* CreateOptimizingCompiler(const CompilerOptions& compiler_options,
1249 CompiledCodeStorage* storage) {
1250 return new OptimizingCompiler(compiler_options, storage);
1251 }
1252
EncodeArtMethodInInlineInfo(ArtMethod * method)1253 bool EncodeArtMethodInInlineInfo([[maybe_unused]] ArtMethod* method) {
1254 // Note: the runtime is null only for unit testing.
1255 return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
1256 }
1257
JitCompile(Thread * self,jit::JitCodeCache * code_cache,jit::JitMemoryRegion * region,ArtMethod * method,CompilationKind compilation_kind,jit::JitLogger * jit_logger)1258 bool OptimizingCompiler::JitCompile(Thread* self,
1259 jit::JitCodeCache* code_cache,
1260 jit::JitMemoryRegion* region,
1261 ArtMethod* method,
1262 CompilationKind compilation_kind,
1263 jit::JitLogger* jit_logger) {
1264 const CompilerOptions& compiler_options = GetCompilerOptions();
1265 DCHECK(compiler_options.IsJitCompiler());
1266 DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
1267 StackHandleScope<3> hs(self);
1268 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
1269 method->GetDeclaringClass()->GetClassLoader()));
1270 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
1271 DCHECK(method->IsCompilable());
1272
1273 const DexFile* dex_file = method->GetDexFile();
1274 const uint16_t class_def_idx = method->GetClassDefIndex();
1275 const dex::CodeItem* code_item = method->GetCodeItem();
1276 const uint32_t method_idx = method->GetDexMethodIndex();
1277 const uint32_t access_flags = method->GetAccessFlags();
1278
1279 Runtime* runtime = Runtime::Current();
1280 ArenaAllocator allocator(runtime->GetJitArenaPool());
1281
1282 if (UNLIKELY(method->IsNative())) {
1283 // Use GenericJniTrampoline for critical native methods in debuggable runtimes. We don't
1284 // support calling method entry / exit hooks for critical native methods yet.
1285 // TODO(mythria): Add support for calling method entry / exit hooks in JITed stubs for critical
1286 // native methods too.
1287 if (compiler_options.GetDebuggable() && method->IsCriticalNative()) {
1288 DCHECK(compiler_options.IsJitCompiler());
1289 return false;
1290 }
1291 // Java debuggable runtimes should set compiler options to debuggable, so that we either
1292 // generate method entry / exit hooks or skip JITing. For critical native methods we don't
1293 // generate method entry / exit hooks so we shouldn't JIT them in debuggable runtimes.
1294 DCHECK_IMPLIES(method->IsCriticalNative(), !runtime->IsJavaDebuggable());
1295
1296 JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1297 compiler_options, dex_file->GetMethodShortyView(method_idx), access_flags, &allocator);
1298 std::vector<Handle<mirror::Object>> roots;
1299 ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
1300 allocator.Adapter(kArenaAllocCHA));
1301 ArenaStack arena_stack(runtime->GetJitArenaPool());
1302 // StackMapStream is large and it does not fit into this frame, so we need helper method.
1303 ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
1304 ScopedArenaVector<uint8_t> stack_map =
1305 CreateJniStackMap(&stack_map_allocator,
1306 jni_compiled_method,
1307 jni_compiled_method.GetCode().size(),
1308 compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
1309
1310 ArrayRef<const uint8_t> reserved_code;
1311 ArrayRef<const uint8_t> reserved_data;
1312 if (!code_cache->Reserve(self,
1313 region,
1314 jni_compiled_method.GetCode().size(),
1315 stack_map.size(),
1316 /* number_of_roots= */ 0,
1317 method,
1318 /*out*/ &reserved_code,
1319 /*out*/ &reserved_data)) {
1320 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1321 return false;
1322 }
1323 const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1324
1325 // Add debug info after we know the code location but before we update entry-point.
1326 std::vector<uint8_t> debug_info;
1327 if (compiler_options.GenerateAnyDebugInfo()) {
1328 debug::MethodDebugInfo info = {};
1329 // Simpleperf relies on art_jni_trampoline to detect jni methods.
1330 info.custom_name = "art_jni_trampoline";
1331 info.dex_file = dex_file;
1332 info.class_def_index = class_def_idx;
1333 info.dex_method_index = method_idx;
1334 info.access_flags = access_flags;
1335 info.code_item = code_item;
1336 info.isa = jni_compiled_method.GetInstructionSet();
1337 info.deduped = false;
1338 info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1339 info.is_optimized = true;
1340 info.is_code_address_text_relative = false;
1341 info.code_address = reinterpret_cast<uintptr_t>(code);
1342 info.code_size = jni_compiled_method.GetCode().size();
1343 info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
1344 info.code_info = nullptr;
1345 info.cfi = jni_compiled_method.GetCfi();
1346 debug_info = GenerateJitDebugInfo(info);
1347 }
1348
1349 if (!code_cache->Commit(self,
1350 region,
1351 method,
1352 reserved_code,
1353 jni_compiled_method.GetCode(),
1354 reserved_data,
1355 roots,
1356 ArrayRef<const uint8_t>(stack_map),
1357 debug_info,
1358 /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1359 compilation_kind,
1360 cha_single_implementation_list)) {
1361 code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1362 return false;
1363 }
1364
1365 Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1366 if (jit_logger != nullptr) {
1367 jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
1368 }
1369 return true;
1370 }
1371
1372 ArenaStack arena_stack(runtime->GetJitArenaPool());
1373 VariableSizedHandleScope handles(self);
1374
1375 std::unique_ptr<CodeGenerator> codegen;
1376 {
1377 Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1378 DexCompilationUnit dex_compilation_unit(
1379 class_loader,
1380 runtime->GetClassLinker(),
1381 *dex_file,
1382 code_item,
1383 class_def_idx,
1384 method_idx,
1385 access_flags,
1386 /*verified_method=*/ nullptr,
1387 dex_cache,
1388 compiling_class);
1389
1390 // Go to native so that we don't block GC during compilation.
1391 ScopedThreadSuspension sts(self, ThreadState::kNative);
1392 codegen.reset(
1393 TryCompile(&allocator,
1394 &arena_stack,
1395 dex_compilation_unit,
1396 method,
1397 compilation_kind,
1398 &handles));
1399 if (codegen.get() == nullptr) {
1400 return false;
1401 }
1402 }
1403
1404 ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
1405
1406 ArrayRef<const uint8_t> reserved_code;
1407 ArrayRef<const uint8_t> reserved_data;
1408 if (!code_cache->Reserve(self,
1409 region,
1410 codegen->GetAssembler()->CodeSize(),
1411 stack_map.size(),
1412 /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
1413 method,
1414 /*out*/ &reserved_code,
1415 /*out*/ &reserved_data)) {
1416 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1417 return false;
1418 }
1419 const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1420 const uint8_t* roots_data = reserved_data.data();
1421
1422 std::vector<Handle<mirror::Object>> roots;
1423 codegen->EmitJitRoots(const_cast<uint8_t*>(codegen->GetAssembler()->CodeBufferBaseAddress()),
1424 roots_data,
1425 &roots);
1426 // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1427 DCHECK(std::all_of(roots.begin(),
1428 roots.end(),
1429 [&handles](Handle<mirror::Object> root){
1430 return handles.Contains(root.GetReference());
1431 }));
1432
1433 // Add debug info after we know the code location but before we update entry-point.
1434 std::vector<uint8_t> debug_info;
1435 if (compiler_options.GenerateAnyDebugInfo()) {
1436 debug::MethodDebugInfo info = {};
1437 DCHECK(info.custom_name.empty());
1438 info.dex_file = dex_file;
1439 info.class_def_index = class_def_idx;
1440 info.dex_method_index = method_idx;
1441 info.access_flags = access_flags;
1442 info.code_item = code_item;
1443 info.isa = codegen->GetInstructionSet();
1444 info.deduped = false;
1445 info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1446 info.is_optimized = true;
1447 info.is_code_address_text_relative = false;
1448 info.code_address = reinterpret_cast<uintptr_t>(code);
1449 info.code_size = codegen->GetAssembler()->CodeSize(),
1450 info.frame_size_in_bytes = codegen->GetFrameSize();
1451 info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
1452 info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
1453 debug_info = GenerateJitDebugInfo(info);
1454 }
1455
1456 if (compilation_kind == CompilationKind::kBaseline &&
1457 !codegen->GetGraph()->IsUsefulOptimizing()) {
1458 compilation_kind = CompilationKind::kOptimized;
1459 }
1460
1461 if (!code_cache->Commit(self,
1462 region,
1463 method,
1464 reserved_code,
1465 codegen->GetCode(),
1466 reserved_data,
1467 roots,
1468 ArrayRef<const uint8_t>(stack_map),
1469 debug_info,
1470 /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1471 compilation_kind,
1472 codegen->GetGraph()->GetCHASingleImplementationList())) {
1473 CHECK_EQ(CodeInfo::HasShouldDeoptimizeFlag(stack_map.data()),
1474 codegen->GetGraph()->HasShouldDeoptimizeFlag());
1475 code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1476 return false;
1477 }
1478
1479 Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1480 if (jit_logger != nullptr) {
1481 jit_logger->WriteLog(code, codegen->GetAssembler()->CodeSize(), method);
1482 }
1483
1484 if (kArenaAllocatorCountAllocations) {
1485 codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
1486 size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1487 if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1488 MemStats mem_stats(allocator.GetMemStats());
1489 MemStats peak_stats(arena_stack.GetPeakStats());
1490 LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1491 << dex_file->PrettyMethod(method_idx)
1492 << "\n" << Dumpable<MemStats>(mem_stats)
1493 << "\n" << Dumpable<MemStats>(peak_stats);
1494 }
1495 }
1496
1497 return true;
1498 }
1499
GenerateJitDebugInfo(const debug::MethodDebugInfo & info)1500 std::vector<uint8_t> OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) {
1501 const CompilerOptions& compiler_options = GetCompilerOptions();
1502 if (compiler_options.GenerateAnyDebugInfo()) {
1503 // If both flags are passed, generate full debug info.
1504 const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
1505
1506 // Create entry for the single method that we just compiled.
1507 InstructionSet isa = compiler_options.GetInstructionSet();
1508 const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
1509 return debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
1510 }
1511 return std::vector<uint8_t>();
1512 }
1513
1514 } // namespace art
1515