/art/compiler/trampolines/ |
D | trampoline_compiler.cc | 43 #define __ assembler. 53 #define ___ assembler.GetVIXLAssembler()-> 61 ArmVIXLAssembler assembler(allocator); in CreateTrampoline() local 65 vixl::aarch32::UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); in CreateTrampoline() 96 Arm64Assembler assembler(allocator); in CreateTrampoline() local 131 Riscv64Assembler assembler(allocator); in CreateTrampoline() local 132 ScratchRegisterScope srs(&assembler); in CreateTrampoline() 162 X86Assembler assembler(allocator); in CreateTrampoline() local 183 x86_64::X86_64Assembler assembler(allocator); in CreateTrampoline() local
|
/art/compiler/optimizing/ |
D | intrinsics_arm_vixl.cc | 42 #define __ assembler->GetVIXLAssembler()-> 87 ArmVIXLAssembler* assembler = arm_codegen->GetAssembler(); in EmitNativeCode() local 109 assembler->MaybeUnpoisonHeapReference(tmp); in EmitNativeCode() 134 assembler->MaybePoisonHeapReference(tmp); in EmitNativeCode() 178 static void MoveFPToInt(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) { in MoveFPToInt() argument 188 static void MoveIntToFP(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) { in MoveIntToFP() argument 259 ArmVIXLAssembler* assembler = codegen->GetAssembler(); in GenNumberOfLeadingZeros() local 304 ArmVIXLAssembler* assembler = codegen->GetAssembler(); in GenNumberOfTrailingZeros() local 350 ArmVIXLAssembler* assembler = GetAssembler(); in VisitMathSqrt() local 362 ArmVIXLAssembler* assembler = GetAssembler(); in VisitMathRint() local [all …]
|
D | intrinsics_riscv64.cc | 31 #define __ assembler-> 42 Riscv64Assembler* assembler = codegen->GetAssembler(); in EmitNativeCode() local 176 Riscv64Assembler* assembler = GetAssembler(); in VisitDoubleDoubleToRawLongBits() local 186 Riscv64Assembler* assembler = GetAssembler(); in VisitDoubleLongBitsToDouble() local 196 Riscv64Assembler* assembler = GetAssembler(); in VisitFloatFloatToRawIntBits() local 206 Riscv64Assembler* assembler = GetAssembler(); in VisitFloatIntBitsToFloat() local 216 Riscv64Assembler* assembler = GetAssembler(); in VisitDoubleIsInfinite() local 229 Riscv64Assembler* assembler = GetAssembler(); in VisitFloatIsInfinite() local 254 Riscv64Assembler* assembler = GetAssembler(); in VisitMemoryPeekByte() local 263 Riscv64Assembler* assembler = GetAssembler(); in VisitMemoryPeekIntNative() local [all …]
|
D | intrinsics_x86.cc | 72 #define __ assembler-> 74 static void GenArrayAddress(X86Assembler* assembler, in GenArrayAddress() argument 99 X86Assembler* assembler = x86_codegen->GetAssembler(); in EmitNativeCode() local 119 GenArrayAddress(assembler, src_stop_addr, src_curr_addr, length, type, /*data_offset=*/ 0u); in EmitNativeCode() 171 static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86Assembler* assembler) { in MoveFPToInt() argument 186 static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) { in MoveIntToFP() argument 253 X86Assembler* assembler) { in GenReverseBytes() argument 292 X86Assembler* assembler = GetAssembler(); in VisitLongReverseBytes() local 338 static void GenSSE41FPToFPIntrinsic(HInvoke* invoke, X86Assembler* assembler, int round_mode) { in GenSSE41FPToFPIntrinsic() argument 399 X86Assembler* assembler = GetAssembler(); in VisitMathRoundFloat() local [all …]
|
D | intrinsics_x86_64.cc | 68 #define __ assembler-> 70 static void GenArrayAddress(X86_64Assembler* assembler, in GenArrayAddress() argument 96 X86_64Assembler* assembler = x86_64_codegen->GetAssembler(); in EmitNativeCode() local 115 GenArrayAddress(assembler, src_stop_addr, src_curr_addr, length, type, /*data_offset=*/ 0u); in EmitNativeCode() 158 static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) { in MoveFPToInt() argument 164 static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86_64Assembler* assembler) { in MoveIntToFP() argument 232 X86_64Assembler* assembler = codegen->GetAssembler(); in GenIsInfinite() local 320 static void GenSSE41FPToFPIntrinsic(HInvoke* invoke, X86_64Assembler* assembler, int round_mode) { in GenSSE41FPToFPIntrinsic() argument 381 X86_64Assembler* assembler = GetAssembler(); in VisitMathRoundFloat() local 422 X86_64Assembler* assembler = GetAssembler(); in VisitMathRoundDouble() local [all …]
|
D | jit_patches_arm64.h | 49 JitPatchesARM64(Arm64Assembler* assembler, ArenaAllocator* allocator) : in JitPatchesARM64() argument 50 assembler_(assembler), in JitPatchesARM64()
|
D | intrinsics_utils.h | 58 TAssembler* assembler = down_cast<TAssembler*>(codegen->GetAssembler()); in EmitNativeCode() local 59 assembler->Bind(this->GetEntryLabel()); in EmitNativeCode() 92 assembler->Jump(this->GetExitLabel()); in EmitNativeCode()
|
D | code_generator_arm64.cc | 5442 Arm64Assembler assembler(GetGraph()->GetAllocator()); in EmitThunkCode() local 5449 assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); in EmitThunkCode() 5457 assembler.JumpTo(ManagedRegister(arm64::TR), offset, ManagedRegister(arm64::IP0)); in EmitThunkCode() 5465 CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); in EmitThunkCode() 5474 assembler.FinalizeCode(); in EmitThunkCode() 5475 code->resize(assembler.CodeSize()); in EmitThunkCode() 5477 assembler.CopyInstructions(code_region); in EmitThunkCode() 7224 #define __ assembler.GetVIXLAssembler()-> 7226 static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, in EmitGrayCheckAndFastPath() argument 7265 static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, in LoadReadBarrierMarkIntrospectionEntrypoint() argument [all …]
|
D | code_generator_arm_vixl.cc | 122 EmitAdrCode(ArmVIXLMacroAssembler* assembler, vixl32::Register rd, vixl32::Label* label) in EmitAdrCode() argument 123 : assembler_(assembler), rd_(rd), label_(label) { in EmitAdrCode() 124 DCHECK(!assembler->AllowMacroInstructions()); // In ExactAssemblyScope. in EmitAdrCode() 125 adr_location_ = assembler->GetCursorOffset(); in EmitAdrCode() 126 assembler->adr(EncodingSize(Wide), rd, label); in EmitAdrCode() 6179 ArmVIXLAssembler* assembler = codegen->GetAssembler(); in CanEncode32BitConstantAsImmediate() local 6180 if (assembler->ShifterOperandCanHold(opcode, value, flags_update)) { in CanEncode32BitConstantAsImmediate() 6197 if (assembler->ShifterOperandCanHold(neg_opcode, neg_value, flags_update)) { in CanEncode32BitConstantAsImmediate() 9959 arm::ArmVIXLAssembler assembler(GetGraph()->GetAllocator()); in EmitThunkCode() local 9965 assembler.LoadFromOffset(arm::kLoadWord, vixl32::pc, vixl32::r0, offset.Int32Value()); in EmitThunkCode() [all …]
|
D | intrinsics_arm64.cc | 1144 Arm64Assembler* assembler = codegen->GetAssembler(); in EmitLoadExclusive() local 1145 MacroAssembler* masm = assembler->GetVIXLAssembler(); in EmitLoadExclusive() 1185 assembler->MaybeUnpoisonHeapReference(old_value); in EmitLoadExclusive() 1198 Arm64Assembler* assembler = codegen->GetAssembler(); in EmitStoreExclusive() local 1199 MacroAssembler* masm = assembler->GetVIXLAssembler(); in EmitStoreExclusive() 1201 assembler->MaybePoisonHeapReference(new_value); in EmitStoreExclusive() 1235 assembler->MaybeUnpoisonHeapReference(new_value); in EmitStoreExclusive() 1261 Arm64Assembler* assembler = codegen->GetAssembler(); in GenerateCompareAndSet() local 1262 MacroAssembler* masm = assembler->GetVIXLAssembler(); in GenerateCompareAndSet() 1358 Arm64Assembler* assembler = arm64_codegen->GetAssembler(); in EmitNativeCode() local [all …]
|
D | optimizing_cfi_test_expected.inc | 142 // as with the old assembler.
|
D | code_generator_x86_64.cc | 8406 X86_64Assembler* assembler = codegen_->GetAssembler(); in CreateJumpTable() local 8409 const int32_t offset_in_constant_table = assembler->ConstantAreaSize(); in CreateJumpTable() 8413 const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table; in CreateJumpTable() 8425 assembler->AppendInt32(offset_to_block); in CreateJumpTable() 8435 X86_64Assembler* assembler = GetAssembler(); in Finalize() local 8436 if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) { in Finalize() 8438 assembler->Align(4, 0); in Finalize() 8439 constant_area_start_ = assembler->CodeSize(); in Finalize() 8447 assembler->AddConstantArea(); in Finalize()
|
D | code_generator_arm_vixl.h | 990 void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
|
D | code_generator_arm64.h | 1107 void CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
|
D | code_generator_x86.cc | 9089 X86Assembler* assembler = codegen_->GetAssembler(); in CreateJumpTable() local 9092 const int32_t offset_in_constant_table = assembler->ConstantAreaSize(); in CreateJumpTable() 9109 assembler->AppendInt32(offset_to_block); in CreateJumpTable() 9119 X86Assembler* assembler = GetAssembler(); in Finalize() local 9121 if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) { in Finalize() 9124 assembler->Align(4, 0); in Finalize() 9125 constant_area_start_ = assembler->CodeSize(); in Finalize() 9133 assembler->AddConstantArea(); in Finalize()
|
D | code_generator_riscv64.cc | 798 Riscv64Assembler* assembler = down_cast<CodeGeneratorRISCV64*>(codegen_)->GetAssembler(); in FpBinOp() local 800 (assembler->*opS)(rd, rs1, rs2); in FpBinOp() 803 (assembler->*opD)(rd, rs1, rs2); in FpBinOp() 857 Riscv64Assembler* assembler = down_cast<CodeGeneratorRISCV64*>(codegen_)->GetAssembler(); in FpUnOp() local 859 (assembler->*opS)(rd, rs1); in FpUnOp() 862 (assembler->*opD)(rd, rs1); in FpUnOp()
|
/art/compiler/utils/x86_64/ |
D | assembler_x86_64_test.cc | 654 std::string shll_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { in shll_fn() argument 659 assembler->shll(reg, shifter); in shll_fn() 675 std::string shlq_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { in shlq_fn() argument 680 assembler->shlq(reg, shifter); in shlq_fn() 696 std::string shrl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { in shrl_fn() argument 701 assembler->shrl(reg, shifter); in shrl_fn() 716 std::string shrq_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { in shrq_fn() argument 721 assembler->shrq(reg, shifter); in shrq_fn() 736 std::string sarl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { in sarl_fn() argument 741 assembler->sarl(reg, shifter); in sarl_fn() [all …]
|
D | jni_macro_assembler_x86_64.cc | 144 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) { in DecreaseFrameSizeImpl() argument 147 assembler->addq(CpuRegister(RSP), Immediate(adjust)); in DecreaseFrameSizeImpl() 148 assembler->cfi().AdjustCFAOffset(-adjust); in DecreaseFrameSizeImpl()
|
/art/compiler/utils/ |
D | assembler_thumb_test.cc | 45 ArmVIXLAssemblerTest() : pool(), allocator(&pool), assembler(&allocator) { } in ArmVIXLAssemblerTest() 75 #define __ assembler. 89 #define __ assembler. 93 ArmVIXLJNIMacroAssembler assembler; member in art::arm::ArmVIXLAssemblerTest 257 #define __ assembler.asm_. 279 vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); in TEST_F() 312 vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); in TEST_F()
|
/art/test/538-checker-embed-constants/ |
D | info.txt | 1 Test embedding of constants in assembler instructions.
|
/art/compiler/utils/x86/ |
D | jni_macro_assembler_x86.cc | 115 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) { in DecreaseFrameSizeImpl() argument 118 assembler->addl(ESP, Immediate(adjust)); in DecreaseFrameSizeImpl() 119 assembler->cfi().AdjustCFAOffset(-adjust); in DecreaseFrameSizeImpl()
|
D | assembler_x86_test.cc | 495 std::string rorl_fn(AssemblerX86Test::Base* assembler_test, x86::X86Assembler* assembler) { in rorl_fn() argument 500 assembler->rorl(reg, shifter); in rorl_fn() 515 std::string roll_fn(AssemblerX86Test::Base* assembler_test, x86::X86Assembler* assembler) { in roll_fn() argument 520 assembler->roll(reg, shifter); in roll_fn()
|
/art/compiler/utils/riscv64/ |
D | assembler_riscv64.h | 2744 ScopedExtensionsOverride(Riscv64Assembler* assembler, Riscv64ExtensionMask enabled_extensions) in ScopedExtensionsOverride() argument 2745 : assembler_(assembler), in ScopedExtensionsOverride() 2746 old_enabled_extensions_(assembler->enabled_extensions_) { in ScopedExtensionsOverride() 2747 assembler->enabled_extensions_ = enabled_extensions; in ScopedExtensionsOverride() 2755 static Riscv64ExtensionMask GetEnabledExtensions(Riscv64Assembler* assembler) { in GetEnabledExtensions() argument 2756 return assembler->enabled_extensions_; in GetEnabledExtensions() 2767 explicit ScopedExtensionsRestriction(Riscv64Assembler* assembler) in ScopedExtensionsRestriction() argument 2768 : ScopedExtensionsOverride(assembler, GetEnabledExtensions(assembler) & kMask) {} in ScopedExtensionsRestriction() 2774 explicit ScopedExtensionsInclusion(Riscv64Assembler* assembler) in ScopedExtensionsInclusion() argument 2775 : ScopedExtensionsOverride(assembler, GetEnabledExtensions(assembler) | kMask) {} in ScopedExtensionsInclusion() [all …]
|
/art/test/ |
D | README.md | 14 [Jasmin](http://jasmin.sourceforge.net/) assembler or the
|
/art/compiler/ |
D | Android.bp | 197 "utils/assembler.cc",
|