/art/test/623-checker-loop-regressions/src/ |
D | Main.java | 828 int temp0 = a[i] * b[i]; in testDotProdAndDotProdExtraMul0() local 829 int temp1 = (byte)(temp0) * a[i]; in testDotProdAndDotProdExtraMul0() 831 s1 += temp0; in testDotProdAndDotProdExtraMul0() 846 int temp0 = a[i] * b[i]; in testDotProdAndDotProdExtraMul1() local 847 int temp1 = (byte)(temp0) * a[i]; in testDotProdAndDotProdExtraMul1() 848 s0 += temp0; in testDotProdAndDotProdExtraMul1() 880 int temp0 = Math.abs(x[i] - y[i]); in testSADAndSADExtraAbs0() local 881 int temp1 = Math.abs(temp0 - y[i]); in testSADAndSADExtraAbs0() 883 s1 += temp0; in testSADAndSADExtraAbs0() 914 int temp0 = Math.abs(x[i] - y[i]); in testSADAndSADExtraAbs1() local [all …]
|
/art/runtime/arch/riscv64/ |
D | quick_entrypoints_riscv64.S | 1769 slowPathLabel, class, count, temp0, temp1, temp2 1780 ld \temp0, THREAD_LOCAL_POS_OFFSET(xSELF) // Check tlab for space, note that 1787 sub \temp2, \temp2, \temp0 1795 mv a0, \temp0 1796 add \temp0, \temp0, \temp1 1797 sd \temp0, THREAD_LOCAL_POS_OFFSET(xSELF) // Store new thread_local_pos. 1832 .macro COMPUTE_ARRAY_SIZE_UNKNOWN class, count, temp0, temp1, temp2 1834 lwu \temp0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET(\class) // Load component type 1835 UNPOISON_HEAP_REF \temp0 1836 lwu \temp0, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(\temp0) [all …]
|
/art/compiler/optimizing/ |
D | intrinsics_riscv64.cc | 2851 XRegister temp0 = locations->GetTemp(0).AsRegister<XRegister>(); in VisitStringCompareTo() local 2889 __ Srliw(temp0, temp3, 1u); in VisitStringCompareTo() 2893 __ Loadwu(temp0, str, count_offset); in VisitStringCompareTo() 2897 __ Subw(out, temp0, temp1); in VisitStringCompareTo() 2900 __ Minu(temp0, temp0, temp1); in VisitStringCompareTo() 2902 __ Beqz(temp0, &end); in VisitStringCompareTo() 2914 __ Sll(temp0, temp0, temp3); in VisitStringCompareTo() 2936 __ Addi(temp0, temp0, (mirror::kUseStringCompression) ? -8 : -4); in VisitStringCompareTo() 2937 __ Bgtz(temp0, &loop); in VisitStringCompareTo() 2949 __ Ble(temp0, temp1, &end); in VisitStringCompareTo() [all …]
|
D | intrinsics_arm_vixl.cc | 567 const vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0)); in VisitStringCompareTo() local 602 __ Lsr(temp0, temp3, 1u); in VisitStringCompareTo() 606 __ Ldr(temp0, MemOperand(str, count_offset)); in VisitStringCompareTo() 610 __ Subs(out, temp0, temp1); in VisitStringCompareTo() 619 __ mov(gt, temp0, temp1); in VisitStringCompareTo() 625 __ CompareAndBranchIfZero(temp0, &end, mirror::kUseStringCompression); in VisitStringCompareTo() 641 __ add(ne, temp0, temp0, temp0); in VisitStringCompareTo() 664 const vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0)); in GenerateStringCompareToLoop() local 705 __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4)); in GenerateStringCompareToLoop() 711 __ Subs(temp0, temp0, 4); // 4 bytes previously compared. in GenerateStringCompareToLoop() [all …]
|
D | intrinsics_arm64.cc | 1858 Register temp0 = WRegisterFrom(locations->GetTemp(0)); in VisitStringCompareTo() local 1896 __ Lsr(temp0, temp3, 1u); in VisitStringCompareTo() 1900 __ Ldr(temp0, HeapOperand(str, count_offset)); in VisitStringCompareTo() 1904 __ Subs(out, temp0, temp1); in VisitStringCompareTo() 1906 __ Csel(temp0, temp1, temp0, ge); in VisitStringCompareTo() 1908 __ Cbz(temp0, &end); in VisitStringCompareTo() 1923 __ Lsl(temp0, temp0, temp3); in VisitStringCompareTo() 1947 __ Subs(temp0, temp0, (mirror::kUseStringCompression) ? 8 : 4); in VisitStringCompareTo() 1967 __ Cmp(temp0, Operand(temp1.W(), LSR, (mirror::kUseStringCompression) ? 3 : 4)); in VisitStringCompareTo() 2009 __ Lsl(temp0, temp0, 1u); in VisitStringCompareTo() [all …]
|