1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_INLINE_INTRINSIC_H_
18 #define BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_INLINE_INTRINSIC_H_
19
20 #include <cstdint>
21 #include <optional>
22 #include <tuple>
23 #include <type_traits>
24
25 #include "berberis/assembler/x86_64.h"
26 #include "berberis/base/checks.h"
27 #include "berberis/base/dependent_false.h"
28 #include "berberis/guest_state/guest_state.h"
29 #include "berberis/intrinsics/guest_cpu_flags.h"
30 #include "berberis/intrinsics/intrinsics_process_bindings.h"
31 #include "berberis/intrinsics/macro_assembler.h"
32 #include "berberis/runtime_primitives/platform.h"
33
34 namespace berberis::inline_intrinsic {
35
36 template <auto kFunction,
37 typename RegAlloc,
38 typename SIMDRegAlloc,
39 typename AssemblerResType,
40 typename... AssemblerArgType>
41 bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
42 RegAlloc&& reg_alloc,
43 SIMDRegAlloc&& simd_reg_alloc,
44 AssemblerResType result,
45 AssemblerArgType... args);
46
47 template <auto kFunc>
48 class InlineIntrinsic {
49 public:
50 template <typename RegAlloc, typename SIMDRegAlloc, typename ResType, typename... ArgType>
TryInlineWithHostRounding(MacroAssembler<x86_64::Assembler> & as,RegAlloc && reg_alloc,SIMDRegAlloc && simd_reg_alloc,ResType result,ArgType...args)51 static bool TryInlineWithHostRounding(MacroAssembler<x86_64::Assembler>& as,
52 RegAlloc&& reg_alloc,
53 SIMDRegAlloc&& simd_reg_alloc,
54 ResType result,
55 ArgType... args) {
56 std::tuple args_tuple = std::make_tuple(args...);
57 if constexpr (IsTagEq<&intrinsics::FMul<intrinsics::Float64>>) {
58 auto [rm, frm, src1, src2] = args_tuple;
59 if (rm != FPFlags::DYN) {
60 return false;
61 }
62 return TryInlineIntrinsic<&intrinsics::FMulHostRounding<intrinsics::Float64>>(
63 as, reg_alloc, simd_reg_alloc, result, src1, src2);
64 } else if constexpr (IsTagEq<&intrinsics::FMul<intrinsics::Float32>>) {
65 auto [rm, frm, src1, src2] = args_tuple;
66 if (rm != FPFlags::DYN) {
67 return false;
68 }
69 return TryInlineIntrinsic<&intrinsics::FMulHostRounding<intrinsics::Float32>>(
70 as, reg_alloc, simd_reg_alloc, result, src1, src2);
71 } else if constexpr (IsTagEq<&intrinsics::FAdd<intrinsics::Float64>>) {
72 auto [rm, frm, src1, src2] = args_tuple;
73 if (rm != FPFlags::DYN) {
74 return false;
75 }
76 return TryInlineIntrinsic<&intrinsics::FAddHostRounding<intrinsics::Float64>>(
77 as, reg_alloc, simd_reg_alloc, result, src1, src2);
78 } else if constexpr (IsTagEq<&intrinsics::FAdd<intrinsics::Float32>>) {
79 auto [rm, frm, src1, src2] = args_tuple;
80 if (rm != FPFlags::DYN) {
81 return false;
82 }
83 return TryInlineIntrinsic<&intrinsics::FAddHostRounding<intrinsics::Float32>>(
84 as, reg_alloc, simd_reg_alloc, result, src1, src2);
85 } else if constexpr (IsTagEq<&intrinsics::FSub<intrinsics::Float64>>) {
86 auto [rm, frm, src1, src2] = args_tuple;
87 if (rm != FPFlags::DYN) {
88 return false;
89 }
90 return TryInlineIntrinsic<&intrinsics::FSubHostRounding<intrinsics::Float64>>(
91 as, reg_alloc, simd_reg_alloc, result, src1, src2);
92 } else if constexpr (IsTagEq<&intrinsics::FSub<intrinsics::Float32>>) {
93 auto [rm, frm, src1, src2] = args_tuple;
94 if (rm != FPFlags::DYN) {
95 return false;
96 }
97 return TryInlineIntrinsic<&intrinsics::FSubHostRounding<intrinsics::Float32>>(
98 as, reg_alloc, simd_reg_alloc, result, src1, src2);
99 } else if constexpr (IsTagEq<&intrinsics::FDiv<intrinsics::Float64>>) {
100 auto [rm, frm, src1, src2] = args_tuple;
101 if (rm != FPFlags::DYN) {
102 return false;
103 }
104 return TryInlineIntrinsic<&intrinsics::FDivHostRounding<intrinsics::Float64>>(
105 as, reg_alloc, simd_reg_alloc, result, src1, src2);
106 } else if constexpr (IsTagEq<&intrinsics::FDiv<intrinsics::Float32>>) {
107 auto [rm, frm, src1, src2] = args_tuple;
108 if (rm != FPFlags::DYN) {
109 return false;
110 }
111 return TryInlineIntrinsic<&intrinsics::FDivHostRounding<intrinsics::Float32>>(
112 as, reg_alloc, simd_reg_alloc, result, src1, src2);
113 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int64_t, intrinsics::Float64>>) {
114 auto [rm, frm, src] = args_tuple;
115 if (rm != FPFlags::DYN) {
116 return false;
117 }
118 return TryInlineIntrinsic<
119 &intrinsics::FCvtFloatToIntegerHostRounding<int64_t, intrinsics::Float64>>(
120 as, reg_alloc, simd_reg_alloc, result, src);
121 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int64_t, intrinsics::Float32>>) {
122 auto [rm, frm, src] = args_tuple;
123 if (rm != FPFlags::DYN) {
124 return false;
125 }
126 return TryInlineIntrinsic<
127 &intrinsics::FCvtFloatToIntegerHostRounding<int64_t, intrinsics::Float32>>(
128 as, reg_alloc, simd_reg_alloc, result, src);
129 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int32_t, intrinsics::Float64>>) {
130 auto [rm, frm, src] = args_tuple;
131 if (rm != FPFlags::DYN) {
132 return false;
133 }
134 return TryInlineIntrinsic<
135 &intrinsics::FCvtFloatToIntegerHostRounding<int32_t, intrinsics::Float64>>(
136 as, reg_alloc, simd_reg_alloc, result, src);
137 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int32_t, intrinsics::Float32>>) {
138 auto [rm, frm, src] = args_tuple;
139 if (rm != FPFlags::DYN) {
140 return false;
141 }
142 return TryInlineIntrinsic<
143 &intrinsics::FCvtFloatToIntegerHostRounding<int32_t, intrinsics::Float32>>(
144 as, reg_alloc, simd_reg_alloc, result, src);
145 }
146 return false;
147 }
148
149 private:
150 template <auto kFunction>
151 class FunctionCompareTag;
152
153 template <auto kOtherFunction>
154 static constexpr bool IsTagEq =
155 std::is_same_v<FunctionCompareTag<kFunc>, FunctionCompareTag<kOtherFunction>>;
156 };
157
158 template <typename format, typename DestType, typename SrcType>
159 auto Mov(MacroAssembler<x86_64::Assembler>& as, DestType dest, SrcType src)
160 -> decltype(std::declval<MacroAssembler<x86_64::Assembler>>()
161 .Mov<format>(std::declval<DestType>(), std::declval<SrcType>())) {
162 if constexpr (std::is_integral_v<format>) {
163 return as.template Mov<format>(dest, src);
164 } else if (host_platform::kHasAVX) {
165 return as.template Vmov<format>(dest, src);
166 } else {
167 return as.template Mov<format>(dest, src);
168 }
169 }
170
171 template <typename format, typename DestType, typename SrcType>
172 auto Mov(MacroAssembler<x86_64::Assembler>& as, DestType dest, SrcType src)
173 -> decltype(std::declval<MacroAssembler<x86_64::Assembler>>()
174 .Movs<format>(std::declval<DestType>(), std::declval<SrcType>())) {
175 if (host_platform::kHasAVX) {
176 if constexpr (std::is_same_v<DestType, MacroAssembler<x86_64::Assembler>::XMMRegister> &&
177 std::is_same_v<SrcType, MacroAssembler<x86_64::Assembler>::XMMRegister>) {
178 return as.template Vmovs<format>(dest, dest, src);
179 } else {
180 return as.template Vmovs<format>(dest, src);
181 }
182 } else {
183 return as.template Movs<format>(dest, src);
184 }
185 }
186
187 template <auto kFunction,
188 typename RegAlloc,
189 typename SIMDRegAlloc,
190 typename AssemblerResType,
191 typename... AssemblerArgType>
192 class TryBindingBasedInlineIntrinsic {
193 template <auto kFunctionForFriend,
194 typename RegAllocForFriend,
195 typename SIMDRegAllocForFriend,
196 typename AssemblerResTypeForFriend,
197 typename... AssemblerArgTypeForFriend>
198 friend bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
199 RegAllocForFriend&& reg_alloc,
200 SIMDRegAllocForFriend&& simd_reg_alloc,
201 AssemblerResTypeForFriend result,
202 AssemblerArgTypeForFriend... args);
203 template <auto kFunc,
204 typename Assembler_common_x86,
205 typename Assembler_x86_64,
206 typename MacroAssembler,
207 typename Result,
208 typename Callback,
209 typename... Args>
210 friend Result intrinsics::bindings::ProcessBindings(Callback callback,
211 Result def_result,
212 Args&&... args);
213 template <
214 auto kIntrinsicTemplateName,
215 auto kMacroInstructionTemplateName,
216 auto kMnemo,
217 typename GetOpcode,
218 intrinsics::bindings::CPUIDRestriction kCPUIDRestrictionTemplateValue,
219 intrinsics::bindings::PreciseNanOperationsHandling kPreciseNanOperationsHandlingTemplateValue,
220 bool kSideEffectsTemplateValue,
221 typename... Types>
222 friend class intrinsics::bindings::AsmCallInfo;
223
224 TryBindingBasedInlineIntrinsic() = delete;
225 TryBindingBasedInlineIntrinsic(const TryBindingBasedInlineIntrinsic&) = delete;
226 TryBindingBasedInlineIntrinsic(TryBindingBasedInlineIntrinsic&&) = default;
227 TryBindingBasedInlineIntrinsic& operator=(const TryBindingBasedInlineIntrinsic&) = delete;
228 TryBindingBasedInlineIntrinsic& operator=(TryBindingBasedInlineIntrinsic&&) = default;
229
TryBindingBasedInlineIntrinsic(MacroAssembler<x86_64::Assembler> & as,RegAlloc & reg_alloc,SIMDRegAlloc & simd_reg_alloc,AssemblerResType result,AssemblerArgType...args)230 TryBindingBasedInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
231 RegAlloc& reg_alloc,
232 SIMDRegAlloc& simd_reg_alloc,
233 AssemblerResType result,
234 AssemblerArgType... args)
235 : as_(as),
236 reg_alloc_(reg_alloc),
237 simd_reg_alloc_(simd_reg_alloc),
238 result_{result},
239 input_args_(std::tuple{args...}),
240 success_(
241 intrinsics::bindings::ProcessBindings<kFunction,
242 AssemblerX86<x86_64::Assembler>,
243 x86_64::Assembler,
244 std::tuple<MacroAssembler<x86_64::Assembler>>,
245 bool,
246 TryBindingBasedInlineIntrinsic&>(*this, false)) {}
247 operator bool() { return success_; }
248
249 template <typename AsmCallInfo>
operator()250 std::optional<bool> /*ProcessBindingsClient*/ operator()(AsmCallInfo asm_call_info) {
251 static_assert(std::is_same_v<decltype(kFunction), typename AsmCallInfo::IntrinsicType>);
252 static_assert(AsmCallInfo::kPreciseNanOperationsHandling ==
253 intrinsics::bindings::kNoNansOperation);
254 if constexpr (AsmCallInfo::kCPUIDRestriction == intrinsics::bindings::kHasAVX) {
255 if (!host_platform::kHasAVX) {
256 return false;
257 }
258 } else if constexpr (AsmCallInfo::kCPUIDRestriction == intrinsics::bindings::kHasBMI) {
259 if (!host_platform::kHasBMI) {
260 return false;
261 }
262 } else if constexpr (AsmCallInfo::kCPUIDRestriction == intrinsics::bindings::kHasLZCNT) {
263 if (!host_platform::kHasLZCNT) {
264 return false;
265 }
266 } else if constexpr (AsmCallInfo::kCPUIDRestriction == intrinsics::bindings::kHasPOPCNT) {
267 if (!host_platform::kHasPOPCNT) {
268 return false;
269 }
270 } else if constexpr (AsmCallInfo::kCPUIDRestriction ==
271 intrinsics::bindings::kNoCPUIDRestriction) {
272 // No restrictions. Do nothing.
273 } else {
274 static_assert(kDependentValueFalse<AsmCallInfo::kCPUIDRestriction>);
275 }
276 std::apply(
277 AsmCallInfo::kMacroInstruction,
278 std::tuple_cat(std::tuple<MacroAssembler<x86_64::Assembler>&>{as_},
279 AsmCallInfo::template MakeTuplefromBindings<TryBindingBasedInlineIntrinsic&>(
280 *this, asm_call_info)));
281 if constexpr (std::tuple_size_v<typename AsmCallInfo::OutputArguments> == 0) {
282 // No return value. Do nothing.
283 } else if constexpr (std::tuple_size_v<typename AsmCallInfo::OutputArguments> == 1) {
284 using ReturnType = std::tuple_element_t<0, typename AsmCallInfo::OutputArguments>;
285 if constexpr (std::is_integral_v<ReturnType>) {
286 if (result_reg_ != x86_64::Assembler::no_register) {
287 Mov<ReturnType>(as_, result_, result_reg_);
288 CHECK_EQ(result_xmm_reg_.num, x86_64::Assembler::no_xmm_register.num);
289 } else if (result_xmm_reg_ != x86_64::Assembler::no_xmm_register) {
290 Mov<typename TypeTraits<ReturnType>::Float>(as_, result_, result_xmm_reg_);
291 CHECK_EQ(result_reg_.num, x86_64::Assembler::no_register.num);
292 }
293 } else {
294 CHECK_EQ(result_reg_.num, x86_64::Assembler::no_register.num);
295 CHECK_EQ(result_xmm_reg_.num, x86_64::Assembler::no_xmm_register.num);
296 }
297 if constexpr (std::is_integral_v<ReturnType> && sizeof(ReturnType) < sizeof(std::int32_t)) {
298 // Don't handle these types just yet. We are not sure how to expand them and there
299 // are no examples.
300 static_assert(kDependentTypeFalse<ReturnType>);
301 }
302 if constexpr (std::is_same_v<ReturnType, int32_t> || std::is_same_v<ReturnType, uint32_t>) {
303 // Expans 32 bit values as signed. Even if actual results are processed as unsigned!
304 as_.Expand<int64_t, std::make_signed_t<ReturnType>>(result_, result_);
305 } else if constexpr (std::is_integral_v<ReturnType> &&
306 sizeof(ReturnType) == sizeof(std::int64_t)) {
307 // Do nothing, we have already produced expanded value.
308 } else if constexpr (std::is_same_v<ReturnType, intrinsics::Float32> ||
309 std::is_same_v<ReturnType, intrinsics::Float64>) {
310 // Do nothing, NaN boxing is handled by semantics player.
311 } else {
312 static_assert(kDependentTypeFalse<ReturnType>);
313 }
314 } else {
315 static_assert(kDependentTypeFalse<typename AsmCallInfo::OutputArguments>);
316 }
317 return {true};
318 }
319
320 template <typename ArgBinding, typename AsmCallInfo>
operator()321 auto /*MakeTuplefromBindingsClient*/ operator()(ArgTraits<ArgBinding>, AsmCallInfo) {
322 static constexpr const auto& arg_info = ArgTraits<ArgBinding>::arg_info;
323 if constexpr (arg_info.arg_type == ArgInfo::IMM_ARG) {
324 return ProcessArgInput<ArgBinding, AsmCallInfo>(reg_alloc_);
325 } else {
326 using RegisterClass = typename ArgTraits<ArgBinding>::RegisterClass;
327 if constexpr (RegisterClass::kAsRegister == 'x') {
328 return ProcessArgInput<ArgBinding, AsmCallInfo>(simd_reg_alloc_);
329 } else {
330 return ProcessArgInput<ArgBinding, AsmCallInfo>(reg_alloc_);
331 }
332 }
333 }
334
335 template <typename ArgBinding, typename AsmCallInfo, typename RegAllocForArg>
ProcessArgInput(RegAllocForArg && reg_alloc)336 auto ProcessArgInput(RegAllocForArg&& reg_alloc) {
337 static constexpr const auto& arg_info = ArgTraits<ArgBinding>::arg_info;
338 if constexpr (arg_info.arg_type == ArgInfo::IMM_ARG) {
339 return std::tuple{std::get<arg_info.from>(input_args_)};
340 } else {
341 using RegisterClass = typename ArgTraits<ArgBinding>::RegisterClass;
342 using Usage = typename ArgTraits<ArgBinding>::Usage;
343 if constexpr (arg_info.arg_type == ArgInfo::IN_ARG) {
344 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
345 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
346 auto reg = reg_alloc();
347 Mov<typename TypeTraits<int64_t>::Float>(as_, reg, std::get<arg_info.from>(input_args_));
348 return std::tuple{reg};
349 } else {
350 static_assert(std::is_same_v<Usage, intrinsics::bindings::Use>);
351 static_assert(!RegisterClass::kIsImplicitReg);
352 return std::tuple{std::get<arg_info.from>(input_args_)};
353 }
354 } else if constexpr (arg_info.arg_type == ArgInfo::IN_OUT_ARG) {
355 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
356 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
357 static_assert(!RegisterClass::kIsImplicitReg);
358 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
359 static_assert(std::is_integral_v<
360 std::tuple_element_t<arg_info.to, typename AsmCallInfo::OutputArguments>>);
361 CHECK_EQ(result_xmm_reg_.num, x86_64::Assembler::no_xmm_register.num);
362 result_xmm_reg_ = reg_alloc();
363 Mov<typename TypeTraits<int64_t>::Float>(
364 as_, result_xmm_reg_, std::get<arg_info.from>(input_args_));
365 return std::tuple{result_xmm_reg_};
366 } else {
367 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
368 as_, result_, std::get<arg_info.from>(input_args_));
369 return std::tuple{result_};
370 }
371 } else if constexpr (arg_info.arg_type == ArgInfo::IN_TMP_ARG) {
372 if constexpr (RegisterClass::kAsRegister == 'c') {
373 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
374 as_, as_.rcx, std::get<arg_info.from>(input_args_));
375 return std::tuple{};
376 } else if constexpr (RegisterClass::kAsRegister == 'a') {
377 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
378 as_, as_.rax, std::get<arg_info.from>(input_args_));
379 return std::tuple{};
380 } else {
381 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
382 static_assert(!RegisterClass::kIsImplicitReg);
383 auto reg = reg_alloc();
384 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
385 as_, reg, std::get<arg_info.from>(input_args_));
386 return std::tuple{reg};
387 }
388 } else if constexpr (arg_info.arg_type == ArgInfo::IN_OUT_TMP_ARG) {
389 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
390 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
391 static_assert(RegisterClass::kIsImplicitReg);
392 if constexpr (RegisterClass::kAsRegister == 'a') {
393 CHECK_EQ(result_reg_.num, x86_64::Assembler::no_register.num);
394 Mov<Type>(as_, as_.rax, std::get<arg_info.from>(input_args_));
395 result_reg_ = as_.rax;
396 return std::tuple{};
397 } else {
398 static_assert(kDependentValueFalse<arg_info.arg_type>);
399 }
400 } else if constexpr (arg_info.arg_type == ArgInfo::OUT_ARG) {
401 using Type = std::tuple_element_t<arg_info.to, typename AsmCallInfo::OutputArguments>;
402 static_assert(std::is_same_v<Usage, intrinsics::bindings::Def> ||
403 std::is_same_v<Usage, intrinsics::bindings::DefEarlyClobber>);
404 if constexpr (RegisterClass::kAsRegister == 'a') {
405 CHECK_EQ(result_reg_.num, x86_64::Assembler::no_register.num);
406 result_reg_ = as_.rax;
407 return std::tuple{};
408 } else if constexpr (RegisterClass::kAsRegister == 'c') {
409 CHECK_EQ(result_reg_.num, x86_64::Assembler::no_register.num);
410 result_reg_ = as_.rcx;
411 return std::tuple{};
412 } else {
413 static_assert(!RegisterClass::kIsImplicitReg);
414 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
415 CHECK_EQ(result_xmm_reg_.num, x86_64::Assembler::no_xmm_register.num);
416 result_xmm_reg_ = reg_alloc();
417 return std::tuple{result_xmm_reg_};
418 } else {
419 return std::tuple{result_};
420 }
421 }
422 } else if constexpr (arg_info.arg_type == ArgInfo::OUT_TMP_ARG) {
423 if constexpr (RegisterClass::kAsRegister == 'd') {
424 result_reg_ = as_.rdx;
425 return std::tuple{};
426 } else {
427 static_assert(kDependentValueFalse<arg_info.arg_type>);
428 }
429 } else if constexpr (arg_info.arg_type == ArgInfo::TMP_ARG) {
430 static_assert(std::is_same_v<Usage, intrinsics::bindings::Def> ||
431 std::is_same_v<Usage, intrinsics::bindings::DefEarlyClobber>);
432 if constexpr (RegisterClass::kAsRegister == 'm') {
433 if (scratch_arg_ >= config::kScratchAreaSize / config::kScratchAreaSlotSize) {
434 FATAL("Only two scratch registers are supported for now");
435 }
436 return std::tuple{x86_64::Assembler::Operand{
437 .base = as_.rbp,
438 .disp = static_cast<int>(offsetof(ThreadState, intrinsics_scratch_area) +
439 config::kScratchAreaSlotSize * scratch_arg_++)}};
440 } else if constexpr (RegisterClass::kIsImplicitReg) {
441 return std::tuple{};
442 } else {
443 return std::tuple{reg_alloc()};
444 }
445 } else {
446 static_assert(kDependentValueFalse<arg_info.arg_type>);
447 }
448 }
449 }
450
451 private:
452 MacroAssembler<x86_64::Assembler>& as_;
453 RegAlloc& reg_alloc_;
454 SIMDRegAlloc& simd_reg_alloc_;
455 AssemblerResType result_;
456 x86_64::Assembler::Register result_reg_ = x86_64::Assembler::no_register;
457 x86_64::Assembler::XMMRegister result_xmm_reg_ = x86_64::Assembler::no_xmm_register;
458 std::tuple<AssemblerArgType...> input_args_;
459 uint32_t scratch_arg_ = 0;
460 bool success_;
461 };
462
463 template <auto kFunction,
464 typename RegAlloc,
465 typename SIMDRegAlloc,
466 typename AssemblerResType,
467 typename... AssemblerArgType>
TryInlineIntrinsic(MacroAssembler<x86_64::Assembler> & as,RegAlloc && reg_alloc,SIMDRegAlloc && simd_reg_alloc,AssemblerResType result,AssemblerArgType...args)468 bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
469 RegAlloc&& reg_alloc,
470 SIMDRegAlloc&& simd_reg_alloc,
471 AssemblerResType result,
472 AssemblerArgType... args) {
473 if (InlineIntrinsic<kFunction>::TryInlineWithHostRounding(
474 as, reg_alloc, simd_reg_alloc, result, args...)) {
475 return true;
476 }
477
478 return TryBindingBasedInlineIntrinsic<kFunction,
479 RegAlloc,
480 SIMDRegAlloc,
481 AssemblerResType,
482 AssemblerArgType...>(
483 as, reg_alloc, simd_reg_alloc, result, args...);
484 }
485
486 } // namespace berberis::inline_intrinsic
487
488 #endif // BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_CALL_INTRINSIC_H_
489