1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "trampoline_compiler.h"
18 
19 #include "base/arena_allocator.h"
20 #include "base/malloc_arena_pool.h"
21 #include "jni/jni_env_ext.h"
22 
23 #ifdef ART_ENABLE_CODEGEN_arm
24 #include "utils/arm/assembler_arm_vixl.h"
25 #endif
26 
27 #ifdef ART_ENABLE_CODEGEN_arm64
28 #include "utils/arm64/assembler_arm64.h"
29 #endif
30 
31 #ifdef ART_ENABLE_CODEGEN_riscv64
32 #include "utils/riscv64/assembler_riscv64.h"
33 #endif
34 
35 #ifdef ART_ENABLE_CODEGEN_x86
36 #include "utils/x86/assembler_x86.h"
37 #endif
38 
39 #ifdef ART_ENABLE_CODEGEN_x86_64
40 #include "utils/x86_64/assembler_x86_64.h"
41 #endif
42 
43 #define __ assembler.
44 
45 namespace art HIDDEN {
46 
47 #ifdef ART_ENABLE_CODEGEN_arm
48 namespace arm {
49 
50 #ifdef ___
51 #error "ARM Assembler macro already defined."
52 #else
53 #define ___ assembler.GetVIXLAssembler()->
54 #endif
55 
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset32 offset)56 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
57     ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
58   using vixl::aarch32::MemOperand;
59   using vixl::aarch32::pc;
60   using vixl::aarch32::r0;
61   ArmVIXLAssembler assembler(allocator);
62 
63   switch (abi) {
64     case kJniAbi: {  // Load via Thread* held in JNIEnv* in first argument (R0).
65       vixl::aarch32::UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
66       const vixl::aarch32::Register temp_reg = temps.Acquire();
67 
68       // VIXL will use the destination as a scratch register if
69       // the offset is not encodable as an immediate operand.
70       ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(kArmPointerSize).Int32Value()));
71       ___ Ldr(pc, MemOperand(temp_reg, offset.Int32Value()));
72       break;
73     }
74     case kQuickAbi:  // TR holds Thread*.
75       ___ Ldr(pc, MemOperand(tr, offset.Int32Value()));
76   }
77 
78   __ FinalizeCode();
79   size_t cs = __ CodeSize();
80   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
81   MemoryRegion code(entry_stub->data(), entry_stub->size());
82   __ CopyInstructions(code);
83 
84   return std::move(entry_stub);
85 }
86 
87 #undef ___
88 
89 }  // namespace arm
90 #endif  // ART_ENABLE_CODEGEN_arm
91 
92 #ifdef ART_ENABLE_CODEGEN_arm64
93 namespace arm64 {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset64 offset)94 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
95     ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
96   Arm64Assembler assembler(allocator);
97 
98   switch (abi) {
99     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
100       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
101                       Arm64ManagedRegister::FromXRegister(X0),
102                       Offset(JNIEnvExt::SelfOffset(kArm64PointerSize).Int32Value()));
103 
104       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
105                 Arm64ManagedRegister::FromXRegister(IP0));
106 
107       break;
108     case kQuickAbi:  // X18 holds Thread*.
109       __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
110                 Arm64ManagedRegister::FromXRegister(IP0));
111 
112       break;
113   }
114 
115   __ FinalizeCode();
116   size_t cs = __ CodeSize();
117   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
118   MemoryRegion code(entry_stub->data(), entry_stub->size());
119   __ CopyInstructions(code);
120 
121   return std::move(entry_stub);
122 }
123 }  // namespace arm64
124 #endif  // ART_ENABLE_CODEGEN_arm64
125 
126 #ifdef ART_ENABLE_CODEGEN_riscv64
127 namespace riscv64 {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset64 offset)128 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
129                                                                     EntryPointCallingConvention abi,
130                                                                     ThreadOffset64 offset) {
131   Riscv64Assembler assembler(allocator);
132   ScratchRegisterScope srs(&assembler);
133   XRegister tmp = srs.AllocateXRegister();
134 
135   switch (abi) {
136     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
137       __ Loadd(tmp, A0, JNIEnvExt::SelfOffset(kRiscv64PointerSize).Int32Value());
138       __ Loadd(tmp, tmp, offset.Int32Value());
139       __ Jr(tmp);
140       break;
141     case kQuickAbi:  // TR holds Thread*.
142       __ Loadd(tmp, TR, offset.Int32Value());
143       __ Jr(tmp);
144       break;
145   }
146 
147   __ FinalizeCode();
148   size_t cs = __ CodeSize();
149   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
150   MemoryRegion code(entry_stub->data(), entry_stub->size());
151   __ CopyInstructions(code);
152 
153   return std::move(entry_stub);
154 }
155 }  // namespace riscv64
156 #endif  // ART_ENABLE_CODEGEN_riscv64
157 
158 #ifdef ART_ENABLE_CODEGEN_x86
159 namespace x86 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset32 offset)160 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
161                                                                     ThreadOffset32 offset) {
162   X86Assembler assembler(allocator);
163 
164   // All x86 trampolines call via the Thread* held in fs.
165   __ fs()->jmp(Address::Absolute(offset));
166   __ int3();
167 
168   __ FinalizeCode();
169   size_t cs = __ CodeSize();
170   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
171   MemoryRegion code(entry_stub->data(), entry_stub->size());
172   __ CopyInstructions(code);
173 
174   return std::move(entry_stub);
175 }
176 }  // namespace x86
177 #endif  // ART_ENABLE_CODEGEN_x86
178 
179 #ifdef ART_ENABLE_CODEGEN_x86_64
180 namespace x86_64 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset64 offset)181 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
182                                                                     ThreadOffset64 offset) {
183   x86_64::X86_64Assembler assembler(allocator);
184 
185   // All x86 trampolines call via the Thread* held in gs.
186   __ gs()->jmp(x86_64::Address::Absolute(offset, true));
187   __ int3();
188 
189   __ FinalizeCode();
190   size_t cs = __ CodeSize();
191   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
192   MemoryRegion code(entry_stub->data(), entry_stub->size());
193   __ CopyInstructions(code);
194 
195   return std::move(entry_stub);
196 }
197 }  // namespace x86_64
198 #endif  // ART_ENABLE_CODEGEN_x86_64
199 
CreateTrampoline64(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset64 offset)200 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
201                                                                EntryPointCallingConvention abi,
202                                                                ThreadOffset64 offset) {
203   MallocArenaPool pool;
204   ArenaAllocator allocator(&pool);
205   switch (isa) {
206 #ifdef ART_ENABLE_CODEGEN_arm64
207     case InstructionSet::kArm64:
208       return arm64::CreateTrampoline(&allocator, abi, offset);
209 #endif
210 #ifdef ART_ENABLE_CODEGEN_riscv64
211     case InstructionSet::kRiscv64:
212       return riscv64::CreateTrampoline(&allocator, abi, offset);
213 #endif
214 #ifdef ART_ENABLE_CODEGEN_x86_64
215     case InstructionSet::kX86_64:
216       return x86_64::CreateTrampoline(&allocator, offset);
217 #endif
218     default:
219       UNUSED(abi);
220       UNUSED(offset);
221       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
222       UNREACHABLE();
223   }
224 }
225 
CreateTrampoline32(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset32 offset)226 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
227                                                                EntryPointCallingConvention abi,
228                                                                ThreadOffset32 offset) {
229   MallocArenaPool pool;
230   ArenaAllocator allocator(&pool);
231   switch (isa) {
232 #ifdef ART_ENABLE_CODEGEN_arm
233     case InstructionSet::kArm:
234     case InstructionSet::kThumb2:
235       return arm::CreateTrampoline(&allocator, abi, offset);
236 #endif
237 #ifdef ART_ENABLE_CODEGEN_x86
238     case InstructionSet::kX86:
239       UNUSED(abi);
240       return x86::CreateTrampoline(&allocator, offset);
241 #endif
242     default:
243       UNUSED(abi);
244       UNUSED(offset);
245       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
246       UNREACHABLE();
247   }
248 }
249 
250 }  // namespace art
251