1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_arm.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/arm/jni_frame_arm.h"
22 #include "arch/instruction_set.h"
23 #include "base/macros.h"
24 #include "utils/arm/managed_register_arm.h"
25 
26 namespace art HIDDEN {
27 namespace arm {
28 
29 //
30 // JNI calling convention constants.
31 //
32 
33 // List of parameters passed via registers for JNI.
34 // JNI uses soft-float, so there is only a GPR list.
35 static constexpr Register kJniArgumentRegisters[] = {
36     R0, R1, R2, R3
37 };
38 
39 static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters));
40 
41 //
42 // Managed calling convention constants.
43 //
44 
45 // Used by hard float. (General purpose registers.)
46 static constexpr ManagedRegister kHFCoreArgumentRegisters[] = {
47     ArmManagedRegister::FromCoreRegister(R0),
48     ArmManagedRegister::FromCoreRegister(R1),
49     ArmManagedRegister::FromCoreRegister(R2),
50     ArmManagedRegister::FromCoreRegister(R3),
51 };
52 static constexpr size_t kHFCoreArgumentRegistersCount = arraysize(kHFCoreArgumentRegisters);
53 
54 // (VFP single-precision registers.)
55 static constexpr SRegister kHFSArgumentRegisters[] = {
56     S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
57 };
58 static constexpr size_t kHFSArgumentRegistersCount = arraysize(kHFSArgumentRegisters);
59 
60 // (VFP double-precision registers.)
61 static constexpr DRegister kHFDArgumentRegisters[] = {
62     D0, D1, D2, D3, D4, D5, D6, D7
63 };
64 static constexpr size_t kHFDArgumentRegistersCount = arraysize(kHFDArgumentRegisters);
65 
66 static_assert(kHFDArgumentRegistersCount * 2 == kHFSArgumentRegistersCount,
67     "ks d argument registers mismatch");
68 
69 //
70 // Shared managed+JNI calling convention constants.
71 //
72 
73 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
74     // Core registers.
75     ArmManagedRegister::FromCoreRegister(R5),
76     ArmManagedRegister::FromCoreRegister(R6),
77     ArmManagedRegister::FromCoreRegister(R7),
78     ArmManagedRegister::FromCoreRegister(R8),
79     ArmManagedRegister::FromCoreRegister(R10),
80     ArmManagedRegister::FromCoreRegister(R11),
81     ArmManagedRegister::FromCoreRegister(LR),
82     // Hard float registers.
83     ArmManagedRegister::FromSRegister(S16),
84     ArmManagedRegister::FromSRegister(S17),
85     ArmManagedRegister::FromSRegister(S18),
86     ArmManagedRegister::FromSRegister(S19),
87     ArmManagedRegister::FromSRegister(S20),
88     ArmManagedRegister::FromSRegister(S21),
89     ArmManagedRegister::FromSRegister(S22),
90     ArmManagedRegister::FromSRegister(S23),
91     ArmManagedRegister::FromSRegister(S24),
92     ArmManagedRegister::FromSRegister(S25),
93     ArmManagedRegister::FromSRegister(S26),
94     ArmManagedRegister::FromSRegister(S27),
95     ArmManagedRegister::FromSRegister(S28),
96     ArmManagedRegister::FromSRegister(S29),
97     ArmManagedRegister::FromSRegister(S30),
98     ArmManagedRegister::FromSRegister(S31)
99 };
100 
101 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])102 static constexpr uint32_t CalculateCoreCalleeSpillMask(
103     const ManagedRegister (&callee_saves)[size]) {
104   // LR is a special callee save which is not reported by CalleeSaveRegisters().
105   uint32_t result = 0u;
106   for (auto&& r : callee_saves) {
107     if (r.AsArm().IsCoreRegister()) {
108       result |= (1u << r.AsArm().AsCoreRegister());
109     }
110   }
111   return result;
112 }
113 
114 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])115 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
116   uint32_t result = 0u;
117   for (auto&& r : callee_saves) {
118     if (r.AsArm().IsSRegister()) {
119       result |= (1u << r.AsArm().AsSRegister());
120     }
121   }
122   return result;
123 }
124 
125 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
126 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
127 
128 static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = {
129     // Core registers.
130     ArmManagedRegister::FromCoreRegister(R4),
131     ArmManagedRegister::FromCoreRegister(R5),
132     ArmManagedRegister::FromCoreRegister(R6),
133     ArmManagedRegister::FromCoreRegister(R7),
134     ArmManagedRegister::FromCoreRegister(R8),
135     ArmManagedRegister::FromCoreRegister(R9),  // The platform register is callee-save on Android.
136     ArmManagedRegister::FromCoreRegister(R10),
137     ArmManagedRegister::FromCoreRegister(R11),
138     ArmManagedRegister::FromCoreRegister(LR),
139     // Hard float registers.
140     ArmManagedRegister::FromSRegister(S16),
141     ArmManagedRegister::FromSRegister(S17),
142     ArmManagedRegister::FromSRegister(S18),
143     ArmManagedRegister::FromSRegister(S19),
144     ArmManagedRegister::FromSRegister(S20),
145     ArmManagedRegister::FromSRegister(S21),
146     ArmManagedRegister::FromSRegister(S22),
147     ArmManagedRegister::FromSRegister(S23),
148     ArmManagedRegister::FromSRegister(S24),
149     ArmManagedRegister::FromSRegister(S25),
150     ArmManagedRegister::FromSRegister(S26),
151     ArmManagedRegister::FromSRegister(S27),
152     ArmManagedRegister::FromSRegister(S28),
153     ArmManagedRegister::FromSRegister(S29),
154     ArmManagedRegister::FromSRegister(S30),
155     ArmManagedRegister::FromSRegister(S31)
156 };
157 
158 static constexpr uint32_t kAapcsCoreCalleeSpillMask =
159     CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters);
160 static constexpr uint32_t kAapcsFpCalleeSpillMask =
161     CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters);
162 
163 // Calling convention
164 
ReturnRegister() const165 ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() const {
166   switch (GetShorty()[0]) {
167     case 'V':
168       return ArmManagedRegister::NoRegister();
169     case 'D':
170       return ArmManagedRegister::FromDRegister(D0);
171     case 'F':
172       return ArmManagedRegister::FromSRegister(S0);
173     case 'J':
174       return ArmManagedRegister::FromRegisterPair(R0_R1);
175     default:
176       return ArmManagedRegister::FromCoreRegister(R0);
177   }
178 }
179 
ReturnRegister() const180 ManagedRegister ArmJniCallingConvention::ReturnRegister() const {
181   switch (GetShorty()[0]) {
182   case 'V':
183     return ArmManagedRegister::NoRegister();
184   case 'D':
185   case 'J':
186     return ArmManagedRegister::FromRegisterPair(R0_R1);
187   default:
188     return ArmManagedRegister::FromCoreRegister(R0);
189   }
190 }
191 
IntReturnRegister() const192 ManagedRegister ArmJniCallingConvention::IntReturnRegister() const {
193   return ArmManagedRegister::FromCoreRegister(R0);
194 }
195 
196 // Managed runtime calling convention
197 
MethodRegister()198 ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
199   return ArmManagedRegister::FromCoreRegister(R0);
200 }
201 
ArgumentRegisterForMethodExitHook()202 ManagedRegister ArmManagedRuntimeCallingConvention::ArgumentRegisterForMethodExitHook() {
203   return ArmManagedRegister::FromCoreRegister(R2);
204 }
205 
ResetIterator(FrameOffset displacement)206 void ArmManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
207   ManagedRuntimeCallingConvention::ResetIterator(displacement);
208   gpr_index_ = 1u;  // Skip r0 for ArtMethod*
209   float_index_ = 0u;
210   double_index_ = 0u;
211 }
212 
Next()213 void ArmManagedRuntimeCallingConvention::Next() {
214   if (IsCurrentParamAFloatOrDouble()) {
215     if (float_index_ % 2 == 0) {
216       // The register for the current float is the same as the first register for double.
217       DCHECK_EQ(float_index_, double_index_ * 2u);
218     } else {
219       // There is a space for an extra float before space for a double.
220       DCHECK_LT(float_index_, double_index_ * 2u);
221     }
222     if (IsCurrentParamADouble()) {
223       double_index_ += 1u;
224       if (float_index_ % 2 == 0) {
225         float_index_ = double_index_ * 2u;
226       }
227     } else {
228       if (float_index_ % 2 == 0) {
229         float_index_ += 1u;
230         double_index_ += 1u;  // Leaves space for one more float before the next double.
231       } else {
232         float_index_ = double_index_ * 2u;
233       }
234     }
235   } else {  // Not a float/double.
236     if (IsCurrentParamALong()) {
237       // Note that the alignment to even register is done lazily.
238       gpr_index_ = RoundUp(gpr_index_, 2u) + 2u;
239     } else {
240       gpr_index_ += 1u;
241     }
242   }
243   ManagedRuntimeCallingConvention::Next();
244 }
245 
IsCurrentParamInRegister()246 bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
247   if (IsCurrentParamAFloatOrDouble()) {
248     if (IsCurrentParamADouble()) {
249       return double_index_ < kHFDArgumentRegistersCount;
250     } else {
251       return float_index_ < kHFSArgumentRegistersCount;
252     }
253   } else {
254     if (IsCurrentParamALong()) {
255       // Round up to even register and do not split a long between the last register and the stack.
256       return RoundUp(gpr_index_, 2u) + 1u < kHFCoreArgumentRegistersCount;
257     } else {
258       return gpr_index_ < kHFCoreArgumentRegistersCount;
259     }
260   }
261 }
262 
IsCurrentParamOnStack()263 bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
264   return !IsCurrentParamInRegister();
265 }
266 
CurrentParamRegister()267 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
268   DCHECK(IsCurrentParamInRegister());
269   if (IsCurrentParamAFloatOrDouble()) {
270     if (IsCurrentParamADouble()) {
271       return ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[double_index_]);
272     } else {
273       return ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[float_index_]);
274     }
275   } else {
276     if (IsCurrentParamALong()) {
277       // Currently the only register pair for a long parameter is r2-r3.
278       // Note that the alignment to even register is done lazily.
279       CHECK_EQ(RoundUp(gpr_index_, 2u), 2u);
280       return ArmManagedRegister::FromRegisterPair(R2_R3);
281     } else {
282       return kHFCoreArgumentRegisters[gpr_index_];
283     }
284   }
285 }
286 
CurrentParamStackOffset()287 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
288   return FrameOffset(displacement_.Int32Value() +        // displacement
289                      kFramePointerSize +                 // Method*
290                      (itr_slots_ * kFramePointerSize));  // offset into in args
291 }
292 
293 // JNI calling convention
294 
ArmJniCallingConvention(bool is_static,bool is_synchronized,bool is_fast_native,bool is_critical_native,std::string_view shorty)295 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
296                                                  bool is_synchronized,
297                                                  bool is_fast_native,
298                                                  bool is_critical_native,
299                                                  std::string_view shorty)
300     : JniCallingConvention(is_static,
301                            is_synchronized,
302                            is_fast_native,
303                            is_critical_native,
304                            shorty,
305                            kArmPointerSize) {
306   // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are
307   // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to
308   // maintain 8-byte alignment invariant.
309   //
310   // Compute padding to ensure longs and doubles are not split in AAPCS.
311   size_t shift = 0;
312 
313   size_t cur_arg, cur_reg;
314   if (LIKELY(HasExtraArgumentsForJni())) {
315     // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
316     // We start at the aligned register r2.
317     //
318     // Ignore the first 2 parameters because they are guaranteed to be aligned.
319     cur_arg = NumImplicitArgs();  // skip the "this" arg.
320     cur_reg = 2;  // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2).
321   } else {
322     // Check every parameter.
323     cur_arg = 0;
324     cur_reg = 0;
325   }
326 
327   // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner?
328   // (this just seems like an unnecessary micro-optimization).
329 
330   // Shift across a logical register mapping that looks like:
331   //
332   //   | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 |
333   //
334   //   (where SP is some arbitrary stack pointer that our 0th stack arg would go into).
335   //
336   // Any time there would normally be a long/double in an odd logical register,
337   // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
338   //
339   // This works for both physical register pairs {r0, r1}, {r2, r3} and for when
340   // the value is on the stack.
341   //
342   // For example:
343   // (a) long would normally go into r1, but we shift it into r2
344   //  | INT | (PAD) | LONG      |
345   //  | r0  |  r1   |  r2  | r3 |
346   //
347   // (b) long would normally go into r3, but we shift it into SP
348   //  | INT | INT | INT | (PAD) | LONG     |
349   //  | r0  |  r1 |  r2 |  r3   | SP+4 SP+8|
350   //
351   // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
352   for (; cur_arg < NumArgs(); cur_arg++) {
353     if (IsParamALongOrDouble(cur_arg)) {
354       if ((cur_reg & 1) != 0) {  // check that it's in a logical contiguous register pair
355         shift += 4;
356         cur_reg++;  // additional bump to ensure alignment
357       }
358       cur_reg += 2;  // bump the iterator twice for every long argument
359     } else {
360       cur_reg++;  // bump the iterator for every non-long argument
361     }
362   }
363 
364   if (cur_reg <= kJniArgumentRegisterCount) {
365     // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
366     // we actually have 0 stack padding.
367     //
368     // For example with @CriticalNative and:
369     // (int, long) -> shifts the long but doesn't need to pad the stack
370     //
371     //          shift
372     //           \/
373     //  | INT | (PAD) | LONG      | (EMPTY) ...
374     //  | r0  |  r1   |  r2  | r3 |   SP    ...
375     //                                /\
376     //                          no stack padding
377     padding_ = 0;
378   } else {
379     padding_ = shift;
380   }
381 
382   // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases
383   // (a) Using r0,r1 pair = f(long,...)
384   // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...);
385   // (c) Shifting but not introducing a stack padding = f(int, long);
386 }
387 
CoreSpillMask() const388 uint32_t ArmJniCallingConvention::CoreSpillMask() const {
389   // Compute spill mask to agree with callee saves initialized in the constructor
390   return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
391 }
392 
FpSpillMask() const393 uint32_t ArmJniCallingConvention::FpSpillMask() const {
394   return is_critical_native_ ? 0u : kFpCalleeSpillMask;
395 }
396 
CalleeSaveScratchRegisters() const397 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveScratchRegisters() const {
398   DCHECK(!IsCriticalNative());
399   // Use R5-R8, R10-R11 from managed callee saves.
400   constexpr size_t kStart = 0u;
401   constexpr size_t kLength = 6u;
402   static_assert(kCalleeSaveRegisters[kStart].Equals(ArmManagedRegister::FromCoreRegister(R5)));
403   static_assert(kCalleeSaveRegisters[kStart + kLength - 1u].Equals(
404                     ArmManagedRegister::FromCoreRegister(R11)));
405   static_assert((kCoreCalleeSpillMask & (1u << R9)) == 0u);  // Does not contain thread register R9.
406   static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
407   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(kStart, kLength);
408 }
409 
ArgumentScratchRegisters() const410 ArrayRef<const ManagedRegister> ArmJniCallingConvention::ArgumentScratchRegisters() const {
411   DCHECK(!IsCriticalNative());
412   ArrayRef<const ManagedRegister> scratch_regs(kHFCoreArgumentRegisters);
413   // Exclude return registers (R0-R1) even if unused. Using the same scratch registers helps
414   // making more JNI stubs identical for better reuse, such as deduplicating them in oat files.
415   static_assert(kHFCoreArgumentRegisters[0].Equals(ArmManagedRegister::FromCoreRegister(R0)));
416   static_assert(kHFCoreArgumentRegisters[1].Equals(ArmManagedRegister::FromCoreRegister(R1)));
417   scratch_regs = scratch_regs.SubArray(/*pos=*/ 2u);
418   DCHECK(std::none_of(scratch_regs.begin(),
419                       scratch_regs.end(),
420                       [return_reg = ReturnRegister().AsArm()](ManagedRegister reg) {
421                         return return_reg.Overlaps(reg.AsArm());
422                       }));
423   return scratch_regs;
424 }
425 
FrameSize() const426 size_t ArmJniCallingConvention::FrameSize() const {
427   if (UNLIKELY(is_critical_native_)) {
428     CHECK(!SpillsMethod());
429     CHECK(!HasLocalReferenceSegmentState());
430     return 0u;  // There is no managed frame for @CriticalNative.
431   }
432 
433   // Method*, callee save area size, local reference segment state
434   DCHECK(SpillsMethod());
435   const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
436   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
437   size_t total_size = method_ptr_size + callee_save_area_size;
438 
439   DCHECK(HasLocalReferenceSegmentState());
440   // Cookie is saved in one of the spilled registers.
441 
442   return RoundUp(total_size, kStackAlignment);
443 }
444 
OutFrameSize() const445 size_t ArmJniCallingConvention::OutFrameSize() const {
446   // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
447   size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
448   // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.)
449   size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args);
450   // The size of outgoing arguments.
451   size_t size = stack_args * kFramePointerSize + padding_;
452 
453   // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
454   static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
455   static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u);
456 
457   // For @CriticalNative, we can make a tail call if there are no stack args and the
458   // return type is not an FP type (otherwise we need to move the result to FP register).
459   DCHECK(!RequiresSmallResultTypeExtension());
460   if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) {
461     size += kFramePointerSize;  // We need to spill LR with the args.
462   }
463   size_t out_args_size = RoundUp(size, kAapcsStackAlignment);
464   if (UNLIKELY(IsCriticalNative())) {
465     DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty()));
466   }
467   return out_args_size;
468 }
469 
CalleeSaveRegisters() const470 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveRegisters() const {
471   if (UNLIKELY(IsCriticalNative())) {
472     if (UseTailCall()) {
473       return ArrayRef<const ManagedRegister>();  // Do not spill anything.
474     } else {
475       // Spill LR with out args.
476       static_assert((kCoreCalleeSpillMask >> LR) == 1u);  // Contains LR as the highest bit.
477       constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
478       static_assert(kCalleeSaveRegisters[lr_index].Equals(
479                         ArmManagedRegister::FromCoreRegister(LR)));
480       return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
481           /*pos*/ lr_index, /*length=*/ 1u);
482     }
483   } else {
484     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
485   }
486 }
487 
488 // JniCallingConvention ABI follows AAPCS where longs and doubles must occur
489 // in even register numbers and stack slots
Next()490 void ArmJniCallingConvention::Next() {
491   // Update the iterator by usual JNI rules.
492   JniCallingConvention::Next();
493 
494   if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
495     // Ensure slot is 8-byte aligned for longs/doubles (AAPCS).
496     if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
497       // itr_slots_ needs to be an even number, according to AAPCS.
498       itr_slots_++;
499     }
500   }
501 }
502 
IsCurrentParamInRegister()503 bool ArmJniCallingConvention::IsCurrentParamInRegister() {
504   return itr_slots_ < kJniArgumentRegisterCount;
505 }
506 
IsCurrentParamOnStack()507 bool ArmJniCallingConvention::IsCurrentParamOnStack() {
508   return !IsCurrentParamInRegister();
509 }
510 
CurrentParamRegister()511 ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
512   CHECK_LT(itr_slots_, kJniArgumentRegisterCount);
513   if (IsCurrentParamALongOrDouble()) {
514     // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair:
515     // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and
516     // r3). The content of the registers is as if the value had been loaded from memory
517     // representation with a single LDM instruction."
518     if (itr_slots_ == 0u) {
519       return ArmManagedRegister::FromRegisterPair(R0_R1);
520     } else if (itr_slots_ == 2u) {
521       return ArmManagedRegister::FromRegisterPair(R2_R3);
522     } else {
523       // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values.
524       LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_;
525       UNREACHABLE();
526     }
527   } else {
528     // All other types can fit into one register.
529     return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
530   }
531 }
532 
CurrentParamStackOffset()533 FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
534   CHECK_GE(itr_slots_, kJniArgumentRegisterCount);
535   size_t offset =
536       displacement_.Int32Value()
537           - OutFrameSize()
538           + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize);
539   CHECK_LT(offset, OutFrameSize());
540   return FrameOffset(offset);
541 }
542 
543 // R4 is neither managed callee-save, nor argument register. It is suitable for use as the
544 // locking argument for synchronized methods and hidden argument for @CriticalNative methods.
545 // (It is native callee-save but the value coming from managed code can be clobbered.)
AssertR4IsNeitherCalleeSaveNorArgumentRegister()546 static void AssertR4IsNeitherCalleeSaveNorArgumentRegister() {
547   // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
548   DCHECK(std::none_of(kCalleeSaveRegisters,
549                       kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
550                       [](ManagedRegister callee_save) constexpr {
551                         return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4));
552                       }));
553   DCHECK(std::none_of(kJniArgumentRegisters,
554                       kJniArgumentRegisters + std::size(kJniArgumentRegisters),
555                       [](Register arg) { return arg == R4; }));
556 }
557 
LockingArgumentRegister() const558 ManagedRegister ArmJniCallingConvention::LockingArgumentRegister() const {
559   DCHECK(!IsFastNative());
560   DCHECK(!IsCriticalNative());
561   DCHECK(IsSynchronized());
562   AssertR4IsNeitherCalleeSaveNorArgumentRegister();
563   return ArmManagedRegister::FromCoreRegister(R4);
564 }
565 
HiddenArgumentRegister() const566 ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const {
567   CHECK(IsCriticalNative());
568   AssertR4IsNeitherCalleeSaveNorArgumentRegister();
569   return ArmManagedRegister::FromCoreRegister(R4);
570 }
571 
572 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const573 bool ArmJniCallingConvention::UseTailCall() const {
574   CHECK(IsCriticalNative());
575   return OutFrameSize() == 0u;
576 }
577 
578 }  // namespace arm
579 }  // namespace art
580