1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "arch/arm64/instruction_set_features_arm64.h"
18 #include "assembler_arm64.h"
19 #include "entrypoints/quick/quick_entrypoints.h"
20 #include "heap_poisoning.h"
21 #include "offsets.h"
22 #include "thread.h"
23
24 using namespace vixl::aarch64; // NOLINT(build/namespaces)
25
26 namespace art HIDDEN {
27 namespace arm64 {
28
29 #ifdef ___
30 #error "ARM64 Assembler macro already defined."
31 #else
32 #define ___ vixl_masm_.
33 #endif
34
35 // Sets vixl::CPUFeatures according to ART instruction set features.
SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler * vixl_masm_,const Arm64InstructionSetFeatures * art_features)36 static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
37 const Arm64InstructionSetFeatures* art_features) {
38 // Retrieve already initialized default features of vixl.
39 vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
40
41 DCHECK(features->Has(vixl::CPUFeatures::kFP));
42 DCHECK(features->Has(vixl::CPUFeatures::kNEON));
43 DCHECK(art_features != nullptr);
44 if (art_features->HasCRC()) {
45 features->Combine(vixl::CPUFeatures::kCRC32);
46 }
47 if (art_features->HasDotProd()) {
48 features->Combine(vixl::CPUFeatures::kDotProduct);
49 }
50 if (art_features->HasFP16()) {
51 features->Combine(vixl::CPUFeatures::kFPHalf);
52 features->Combine(vixl::CPUFeatures::kNEONHalf);
53 }
54 if (art_features->HasLSE()) {
55 features->Combine(vixl::CPUFeatures::kAtomics);
56 }
57 if (art_features->HasSVE()) {
58 features->Combine(vixl::CPUFeatures::kSVE);
59 }
60 }
61
Arm64Assembler(ArenaAllocator * allocator,const Arm64InstructionSetFeatures * art_features)62 Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
63 const Arm64InstructionSetFeatures* art_features)
64 : Assembler(allocator) {
65 if (art_features != nullptr) {
66 SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
67 }
68 }
69
FinalizeCode()70 void Arm64Assembler::FinalizeCode() {
71 ___ FinalizeCode();
72 }
73
CodeSize() const74 size_t Arm64Assembler::CodeSize() const {
75 return vixl_masm_.GetSizeOfCodeGenerated();
76 }
77
CodeBufferBaseAddress() const78 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
79 return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
80 }
81
CopyInstructions(const MemoryRegion & region)82 void Arm64Assembler::CopyInstructions(const MemoryRegion& region) {
83 // Copy the instructions from the buffer.
84 MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
85 region.CopyFrom(0, from);
86 }
87
LoadRawPtr(ManagedRegister m_dst,ManagedRegister m_base,Offset offs)88 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
89 Arm64ManagedRegister dst = m_dst.AsArm64();
90 Arm64ManagedRegister base = m_base.AsArm64();
91 CHECK(dst.IsXRegister() && base.IsXRegister());
92 // Remove dst and base form the temp list - higher level API uses IP1, IP0.
93 UseScratchRegisterScope temps(&vixl_masm_);
94 temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
95 ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
96 }
97
JumpTo(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)98 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
99 Arm64ManagedRegister base = m_base.AsArm64();
100 Arm64ManagedRegister scratch = m_scratch.AsArm64();
101 CHECK(base.IsXRegister()) << base;
102 CHECK(scratch.IsXRegister()) << scratch;
103 // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
104 UseScratchRegisterScope temps(&vixl_masm_);
105 temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
106 ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
107 ___ Br(reg_x(scratch.AsXRegister()));
108 }
109
SpillRegisters(CPURegList registers,int offset)110 void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
111 int size = registers.GetRegisterSizeInBytes();
112 const Register sp = vixl_masm_.StackPointer();
113 // Since we are operating on register pairs, we would like to align on
114 // double the standard size; on the other hand, we don't want to insert
115 // an extra store, which will happen if the number of registers is even.
116 if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
117 const CPURegister& dst0 = registers.PopLowestIndex();
118 ___ Str(dst0, MemOperand(sp, offset));
119 cfi_.RelOffset(DWARFReg(dst0), offset);
120 offset += size;
121 }
122 while (registers.GetCount() >= 2) {
123 const CPURegister& dst0 = registers.PopLowestIndex();
124 const CPURegister& dst1 = registers.PopLowestIndex();
125 ___ Stp(dst0, dst1, MemOperand(sp, offset));
126 cfi_.RelOffset(DWARFReg(dst0), offset);
127 cfi_.RelOffset(DWARFReg(dst1), offset + size);
128 offset += 2 * size;
129 }
130 if (!registers.IsEmpty()) {
131 const CPURegister& dst0 = registers.PopLowestIndex();
132 ___ Str(dst0, MemOperand(sp, offset));
133 cfi_.RelOffset(DWARFReg(dst0), offset);
134 }
135 DCHECK(registers.IsEmpty());
136 }
137
UnspillRegisters(CPURegList registers,int offset)138 void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
139 int size = registers.GetRegisterSizeInBytes();
140 const Register sp = vixl_masm_.StackPointer();
141 // Be consistent with the logic for spilling registers.
142 if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
143 const CPURegister& dst0 = registers.PopLowestIndex();
144 ___ Ldr(dst0, MemOperand(sp, offset));
145 cfi_.Restore(DWARFReg(dst0));
146 offset += size;
147 }
148 while (registers.GetCount() >= 2) {
149 const CPURegister& dst0 = registers.PopLowestIndex();
150 const CPURegister& dst1 = registers.PopLowestIndex();
151 ___ Ldp(dst0, dst1, MemOperand(sp, offset));
152 cfi_.Restore(DWARFReg(dst0));
153 cfi_.Restore(DWARFReg(dst1));
154 offset += 2 * size;
155 }
156 if (!registers.IsEmpty()) {
157 const CPURegister& dst0 = registers.PopLowestIndex();
158 ___ Ldr(dst0, MemOperand(sp, offset));
159 cfi_.Restore(DWARFReg(dst0));
160 }
161 DCHECK(registers.IsEmpty());
162 }
163
PoisonHeapReference(Register reg)164 void Arm64Assembler::PoisonHeapReference(Register reg) {
165 DCHECK(reg.IsW());
166 // reg = -reg.
167 ___ Neg(reg, Operand(reg));
168 }
169
UnpoisonHeapReference(Register reg)170 void Arm64Assembler::UnpoisonHeapReference(Register reg) {
171 DCHECK(reg.IsW());
172 // reg = -reg.
173 ___ Neg(reg, Operand(reg));
174 }
175
MaybePoisonHeapReference(Register reg)176 void Arm64Assembler::MaybePoisonHeapReference(Register reg) {
177 if (kPoisonHeapReferences) {
178 PoisonHeapReference(reg);
179 }
180 }
181
MaybeUnpoisonHeapReference(Register reg)182 void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
183 if (kPoisonHeapReferences) {
184 UnpoisonHeapReference(reg);
185 }
186 }
187
GenerateMarkingRegisterCheck(Register temp,int code)188 void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
189 DCHECK(kReserveMarkingRegister);
190
191 vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
192 vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
193 vixl::aarch64::Label mr_is_ok;
194
195 // temp = self.tls32_.is.gc_marking
196 ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
197 // Check that mr == self.tls32_.is.gc_marking.
198 ___ Cmp(mr.W(), temp);
199 ___ B(eq, &mr_is_ok);
200 ___ Brk(code);
201 ___ Bind(&mr_is_ok);
202 }
203
204 #undef ___
205
206 } // namespace arm64
207 } // namespace art
208