1%% -*-Fundamental-*-
2
3%define-kinds canonical ndk hal_1.0 hal_1.1 hal_1.2 hal_1.3 aidl
4
5%kind ndk
6%define ANN ANEURALNETWORKS_
7%define Ann ANeuralNetworks
8%define DeclareOperation ANEURALNETWORKS_%{1} = %{2}
9%define DeclareOperation_1.2 ANEURALNETWORKS_%{1} = %{2}
10%define DeclareOperation_1.3 ANEURALNETWORKS_%{1} = %{2}
11%define DeclareOperation_fl6 ANEURALNETWORKS_%{1} = %{2}
12%define DeclareOperation_fl7 ANEURALNETWORKS_%{1} = %{2}
13%define FusedActivationFunc FuseCode
14%define DeclareFusedActivationFunc ANEURALNETWORKS_FUSED_%{1} = %{2}
15%define DeclareExecutionPreference ANEURALNETWORKS_PREFER_%{1} = %{2}
16%define DeclareDeviceType ANEURALNETWORKS_DEVICE_%{1} = %{2}
17%define OperandType OperandCode
18%define OperandTypeLinkPfx ANEURALNETWORKS_
19%define OperationTypeLinkPfx ANEURALNETWORKS_
20%define runtime_or_driver runtime
21%define NNAPILevel3 NNAPI feature level 3
22%define NNAPILevel4 NNAPI feature level 4
23%define NNAPILevel6 NNAPI feature level 6
24%define NNAPILevel7 NNAPI feature level 7
25%define BeforeNNAPILevel3For Before NNAPI feature level 3, for
26%define or_1.2 or {@link ANEURALNETWORKS_%{1}}
27%define NDK_if_specified  (if specified)
28%define otherOperandParameters other operand parameters
29%section AVAIL1
30     *
31     * Available since NNAPI feature level 1.
32%/section
33%section AVAIL1Short
34 *
35 * Available since NNAPI feature level 1.
36%/section
37%section AVAIL2
38     *
39     * Available since NNAPI feature level 2.
40%/section
41%section AVAIL3
42     *
43     * Available since NNAPI feature level 3.
44%/section
45%section AVAIL4
46     *
47     * Available since NNAPI feature level 4.
48%/section
49%section AVAIL6
50     *
51     * Available since NNAPI feature level 6.
52%/section
53%section AVAIL7
54     *
55     * Available since NNAPI feature level 7.
56%/section
57%section OutputState
58     *
59     * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
60     * and NNAPI does not maintain internal states. This operator does not support the usage pattern
61     * in which multiple cells are chained and state tensors are propagated.
62%/section
63%section PaddingCodeValues
64     *      {@link PaddingCode} values.
65%/section
66%/kind
67
68%kind aidl canonical hal*
69%define ANN
70%define Ann
71%define FusedActivationFunc FusedActivationFunc
72%define DeclareFusedActivationFunc %{1} = %{2}
73%define DeclareExecutionPreference %{1} = %{2}
74%define DeclareDeviceType %{1} = %{2}
75%define OperandType OperandType
76%define OperandTypeLinkPfx OperandType::
77%define OperationTypeLinkPfx OperationType::
78%define runtime_or_driver driver
79%define NNAPILevel3 HAL version 1.2
80%define NNAPILevel4 HAL version 1.3
81%define NNAPILevel6 NNAPI feature level 6
82%define NNAPILevel7 NNAPI feature level 7
83%define NDK_if_specified
84%define otherOperandParameters extraParams
85%section AVAIL1
86%/section
87%section AVAIL1Short
88%/section
89%section AVAIL2
90%/section
91%section AVAIL3
92%/section
93%section AVAIL4
94%/section
95%section AVAIL6
96%/section
97%section AVAIL7
98%/section
99%section PaddingCodeValues
100     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
101%/section
102%section OutputState
103%/section
104%/kind
105
106%kind hal_1.0 hal_1.1
107%define DeclareOperation %{1} = %{2}
108%define BeforeNNAPILevel3For For
109%define or_1.2
110%section NHWC_NCHW
111     * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
112     * and Channels) data layout.
113%/section
114%section GenericZero
115%/section
116%section ZeroBatchesNNAPILevel3
117%/section
118%define DeclareOperation_1.2 @@@NOT_DEFINED@@@
119%define DeclareOperation_1.3 @@@NOT_DEFINED@@@
120%define DeclareOperation_fl6 @@@NOT_DEFINED@@@
121%define DeclareOperation_fl7 @@@NOT_DEFINED@@@
122%/kind
123
124%kind aidl canonical hal_1.2 hal_1.3
125%define BeforeNNAPILevel3For Before HAL version 1.2, for
126%define or_1.2 or {@link OperandType::%{1}}
127%/kind
128
129%kind hal_1.2
130%define DeclareOperation %{1} = @1.1::OperationType:%{1}
131%define DeclareOperation_1.2 %{1} = %{2}
132%define DeclareOperation_1.3 @@@NOT_DEFINED@@@
133%define DeclareOperation_fl6 @@@NOT_DEFINED@@@
134%define DeclareOperation_fl7 @@@NOT_DEFINED@@@
135%/kind
136
137%kind hal_1.3
138%define DeclareOperation %{1} = @1.2::OperationType:%{1}
139%define DeclareOperation_1.2 %{1} = @1.2::OperationType:%{1}
140%define DeclareOperation_1.3 %{1} = %{2}
141%define DeclareOperation_fl6 @@@NOT_DEFINED@@@
142%define DeclareOperation_fl7 @@@NOT_DEFINED@@@
143%/kind
144
145%kind aidl
146%define DeclareOperation %{1} = %{2}
147%define DeclareOperation_1.2 %{1} = %{2}
148%define DeclareOperation_1.3 %{1} = %{2}
149%define DeclareOperation_fl6 %{1} = %{2}
150%define DeclareOperation_fl7 %{1} = %{2}
151%define DeclareEnumValue %{1} = %{2}
152%define OperandLifeTime OperandLifeTime
153%define :: ::
154%define vec std::vector
155%define string std::string
156%define init_bool  = false
157%define init_float  = 0.0f
158%define init_int  = 0
159%define init_pod {}
160%define Dimensions Dimensions
161%define concat_or_skip_first %{2}
162%/kind
163
164%kind canonical
165%define DeclareOperation %{1} = %{2}
166%define DeclareOperation_1.2 %{1} = %{2}
167%define DeclareOperation_1.3 %{1} = %{2}
168%define DeclareOperation_fl6 %{1} = %{2}
169%define DeclareOperation_fl7 %{1} = %{2}
170%define DeclareEnumValue %{1} = %{2}
171%define OperandLifeTime Operand::LifeTime
172%define :: ::
173%define vec std::vector
174%define string std::string
175%define init_bool  = false
176%define init_float  = 0.0f
177%define init_int  = 0
178%define init_pod {}
179%define Dimensions Dimensions
180%define concat_or_skip_first %{2}
181%/kind
182
183%kind hal*
184%define DeclareEnumValue %{1}
185%define OperandLifeTime OperandLifeTime
186%define :: .
187%define vec vec
188%define string string
189%define init_bool
190%define init_float
191%define init_int
192%define init_pod
193%define Dimensions vec<uint32_t>
194%define concat_or_skip_first %{1}%{2}
195%/kind
196
197%kind ndk
198%define DeclareEnumValue @@@NOT_DEFINED@@@
199%define OperandLifeTime @@@NOT_DEFINED@@@
200%define :: @@@NOT_DEFINED@@@
201%define vec @@@NOT_DEFINED@@@
202%define string @@@NOT_DEFINED@@@
203%define init_bool @@@NOT_DEFINED@@@
204%define init_float @@@NOT_DEFINED@@@
205%define init_int @@@NOT_DEFINED@@@
206%define init_pod @@@NOT_DEFINED@@@
207%define Dimensions @@@NOT_DEFINED@@@
208%define concat_or_skip_first @@@NOT_DEFINED@@@
209%/kind
210
211%kind aidl canonical ndk hal_1.2 hal_1.3
212%section NHWC_NCHW
213     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
214     * With the default data layout NHWC, the data is stored in the order of:
215     * [batch, height, width, channels]. Alternatively, the data layout could
216     * be NCHW, the data storage order of: [batch, channels, height, width].
217     * NCHW is supported since %{NNAPILevel3}.
218%/section
219%section GenericZero
220     * Since %{NNAPILevel3}, generic zero-sized input tensor is supported. Zero
221     * dimension is only compatible with 0 or 1. The size of the output
222     * dimension is zero if either of corresponding input dimension is zero.
223     *
224%/section
225%section ZeroBatchesNNAPILevel3
226     *      Since %{NNAPILevel3}, zero batches is supported for this tensor.
227%/section
228%/kind
229
230%kind aidl canonical ndk hal_1.3
231%define AndQuant8Signed
232%/kind
233%kind hal_1.0 hal_1.1 hal_1.2
234%define AndQuant8Signed
235%/kind
236
237%kind ndk hal_1.0 hal_1.1 hal_1.2
238%define model_or_subgraph model
239%define MODEL_or_SUBGRAPH MODEL
240%define the_model_or_a_subgraph the model
241%/kind
242
243%kind aidl canonical hal_1.3+
244%define model_or_subgraph subgraph
245%define MODEL_or_SUBGRAPH SUBGRAPH
246%define the_model_or_a_subgraph a subgraph
247%/kind
248
249%% Declaring enums that work across all kinds:
250%%
251%%     %{enum X underlying_hal_type} {
252%%         %{DeclareX ...},
253%%         ...
254%%     }%{ndk_enum_name X};
255%%
256%% Note that %{ndk_enum_name X} can be omitted for non-NDK enums because the
257%% macro definition is empty for all other kinds.
258%kind aidl
259%define enum enum %{1}
260%define ndk_enum_name
261%define DeclarePriority %{1} = %{2}
262%/kind
263%kind canonical
264%define enum enum class %{1}
265%define ndk_enum_name
266%define DeclarePriority %{1} = %{2}
267%/kind
268%kind ndk
269%define enum typedef enum
270%define ndk_enum_name  %{1}
271%define DeclarePriority ANEURALNETWORKS_PRIORITY_%{1} = %{3}
272%/kind
273%kind hal*
274%define enum enum %{1} : %{2}
275%define ndk_enum_name
276%define DeclarePriority %{1}
277%/kind
278
279%section OEMDeprecationAndOperandTypeRangeMaxComment
280
281    /*
282     * DEPRECATED. Since HAL version 1.2, extensions are the preferred
283     * alternative to OEM operation and data types.
284     *
285     * OEM specific scalar value.
286     * OEM                 = 10000,
287     */
288    /*
289     * DEPRECATED. Since HAL version 1.2, extensions are the preferred
290     * alternative to OEM operation and data types.
291     *
292     * A tensor of OEM specific values.
293     * TENSOR_OEM_BYTE     = 10001,
294     */
295    /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
296     * OperandTypeRange::FUNDAMENTAL_MAX.
297     */
298    /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF
299     * OperandTypeRange::OEM_MAX.
300     */
301%/section
302
303
304%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
305
306%% HAL OperandType for 1.0
307%% NDK OperandCode for API 27
308
309%section canonical_empty_line
310%kind canonical
311
312%/kind
313%/section
314
315%section Operand_1.0_Comment
316/**
317 * Operand types.
318 *
319 * The type of an operand in a model.
320 *
321 * Types prefaced with %{ANN}TENSOR_* must be used for tensor data (i.e., tensors
322 * with at least one dimension). Types not prefaced by %{ANN}TENSOR_* represent
323 * scalar values and must have no dimensions.
324 *
325 * Although we define many types, most operators accept just a few
326 * types. Most used are {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
327 * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
328 * and {@link %{OperandTypeLinkPfx}INT32}.
329%insert AVAIL1Short
330 */
331%/section
332
333%section Operand_1.0
334    /** A 32 bit floating point scalar value. */
335    %{ANN}FLOAT32 = 0,
336%insert canonical_empty_line
337    /** A signed 32 bit integer scalar value. */
338    %{ANN}INT32 = 1,
339%insert canonical_empty_line
340    /** An unsigned 32 bit integer scalar value. */
341    %{ANN}UINT32 = 2,
342%insert canonical_empty_line
343    /** A tensor of 32 bit floating point values. */
344    %{ANN}TENSOR_FLOAT32 = 3,
345%insert canonical_empty_line
346    /** A tensor of 32 bit integer values. */
347    %{ANN}TENSOR_INT32 = 4,
348%insert canonical_empty_line
349    /**
350     * A tensor of 8 bit unsigned integers that represent real numbers.
351     *
352     * Attached to this tensor are two numbers that can be used to convert the
353     * 8 bit integer to the real value and vice versa. These two numbers are:
354     * - scale: a 32 bit floating point value greater than zero.
355     * - zeroPoint: a 32 bit integer, in range [0, 255].
356     *
357     * The formula is:
358     *   real_value = (integer_value - zeroPoint) * scale.
359     */
360    %{ANN}TENSOR_QUANT8_ASYMM = 5,
361%/section
362
363%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
364
365%% HAL OperationType for 1.0
366%% NDK OperationCode for API 27
367
368%section Operation_1.0_Comment
369/**
370 * Operation types.
371 *
372 * The type of an operation in a model.
373%insert AVAIL1Short
374 */
375%/section
376
377%section Operation_1.0
378    /**
379     * Adds two tensors, element-wise.
380     *
381     * Takes two input tensors of identical {@link %{OperandType}} and compatible
382     * dimensions. The output is the sum of both input tensors, optionally
383     * modified by an activation function.
384     *
385     * Two dimensions are compatible when:
386     *     1. they are equal, or
387     *     2. one of them is 1
388     *
389     * The size of the output is the maximum size along each dimension of the
390     * input operands. It starts with the trailing dimensions, and works its
391     * way forward.
392     *
393     * Example:
394     *
395     *     input1.dimension = {4, 1, 2}
396     *     input2.dimension = {5, 4, 3, 1}
397     *     output.dimension = {5, 4, 3, 2}
398     *
399%insert GenericZero
400     * Supported tensor {@link %{OperandType}}:
401%kind aidl canonical ndk hal_1.2+
402     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
403%/kind
404     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
405     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
406%kind aidl canonical ndk hal_1.3+
407     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
408     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4})
409%/kind
410     *
411     * Supported tensor rank: up to 4
412     *
413     * Inputs:
414     * * 0: A tensor.
415     * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions
416     *      as input0.
417%kind aidl canonical ndk hal_1.3+
418     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
419     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
420     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
421%else
422     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
423     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
424%/kind
425     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
426     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
427     *      invoke on the result.
428%kind aidl canonical ndk hal_1.3+
429     *      For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor,
430     *      the {@link %{FusedActivationFunc}} must be "NONE".
431%/kind
432     *
433     * Outputs:
434     * * 0: The sum, a tensor of the same {@link %{OperandType}} as input0.
435%kind aidl canonical ndk hal_1.3+
436     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
437     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
438     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
439%else
440     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
441     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
442%/kind
443%insert AVAIL1
444     */
445    %{DeclareOperation ADD 0},
446
447    /**
448     * Performs a 2-D average pooling operation.
449     *
450     * The output dimensions are functions of the filter dimensions, stride, and
451     * padding.
452     *
453     * The values in the output tensor are computed as:
454     *
455     *     output[b, i, j, channel] =
456     *         sum_{di, dj}(
457     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
458     *         ) / sum(1)
459     *
460     * Supported tensor {@link %{OperandType}}:
461%kind aidl canonical ndk hal_1.2+
462     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
463%/kind
464     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
465     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
466%kind aidl canonical ndk hal_1.3+
467     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
468%/kind
469     *
470%insert NHWC_NCHW
471     *
472     * Both explicit padding and implicit padding are supported.
473     *
474     * Inputs (explicit padding):
475     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
476     *      the input.
477%insert ZeroBatchesNNAPILevel3
478     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
479     *      the left, in thewidthdimension.
480     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
481     *      the right, in thewidthdimension.
482     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
483     *      the top, in theheightdimension.
484     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
485     *      the bottom, in theheightdimension.
486     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
487     *      walking through input in thewidthdimension.
488     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
489     *      walking through input in theheightdimension.
490     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
491     *      width.
492     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
493     *      height.
494     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
495     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
496     *      invoke on the result.
497%kind aidl canonical ndk hal_1.2+
498     * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
499     *       Set to true to specify NCHW data layout for input0 and output0.
500     *       Available since %{NNAPILevel3}.
501%/kind
502     *
503     * Inputs (implicit padding):
504     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
505     *      the input.
506%insert ZeroBatchesNNAPILevel3
507     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
508     *      padding scheme, has to be one of the
509%insert PaddingCodeValues
510     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
511     *      walking through input in thewidthdimension.
512     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
513     *      walking through input in theheightdimension.
514     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
515     *      width.
516     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
517     *      height.
518     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
519     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
520     *      invoke on the result.
521%kind aidl canonical ndk hal_1.2+
522     * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
523     *      Set to true to specify NCHW data layout for input0 and output0.
524     *      Available since %{NNAPILevel3}.
525%/kind
526     *
527     * Outputs:
528     * * 0: The output 4-D tensor, of shape
529     *      [batches, out_height, out_width, depth].
530%kind aidl canonical ndk hal_1.3+
531     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
532     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
533     *      the scale and zeroPoint must be the same as input0.
534%else
535     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
536     *      the scale and zeroPoint must be the same as input0.
537%/kind
538%insert AVAIL1
539     */
540    %{DeclareOperation AVERAGE_POOL_2D 1},
541
542    /**
543     * Concatenates the input tensors along the given dimension.
544     *
545     * The input tensors must have identical {@link %{OperandType}} and the same
546     * dimensions except the dimension along the concatenation axis.
547     *
548     * Supported tensor {@link %{OperandType}}:
549%kind aidl canonical ndk hal_1.2+
550     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
551%/kind
552     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
553     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
554%kind aidl canonical ndk hal_1.2+
555     *   (full support since %{NNAPILevel3}, see the input section)
556%/kind
557%kind aidl canonical ndk hal_1.3+
558     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
559%/kind
560     *
561     * Supported tensor rank: up to 4
562     *
563     * Inputs:
564     * * 0 ~ n-1: The list of n input tensors, of shape
565     *            [D0, D1, ..., Daxis(i), ..., Dm].
566%kind aidl canonical ndk hal_1.2+
567     *            Before %{NNAPILevel3}, all input tensors of
568%else
569     *            All input tensors of
570%/kind
571     *            {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
572     *            must have the same scale and zeroPoint as the output tensor.
573%kind aidl canonical ndk hal_1.3+
574     *            Input tensors of
575     *            {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
576     *            are allowed to have different scale and zeroPoint.
577%/kind
578%kind aidl canonical ndk hal_1.2+
579     *            Since %{NNAPILevel3}, zero-sized tensors are supported.
580%/kind
581     * * n: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the
582     *      concatenation axis.
583     *
584     * Outputs:
585     * * 0: The output, a tensor of the same {@link %{OperandType}} as the input
586     *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
587%kind aidl canonical ndk hal_1.2+
588     *      Since %{NNAPILevel3}, for a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
589     *      the scale and zeroPoint values can be different from
590     *      input tensors. Before %{NNAPILevel3} they have to be the same as for the
591     *      input tensors.
592%else
593     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, the scale and zeroPoint
594     *      values must be the same as the input tensors'.
595%/kind
596%kind aidl canonical hal_1.3+
597     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
598     *      the scale and zeroPoint values can be different from input tensors.
599%/kind
600%insert AVAIL1
601     */
602    %{DeclareOperation CONCATENATION 2},
603
604    /**
605     * Performs a 2-D convolution operation.
606     *
607     * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
608     * batch of images, applying the filter to each window of each image of the
609     * appropriate size.
610     *
611     * The output dimensions are functions of the filter dimensions, stride, and
612     * padding.
613     *
614     * The values in the output tensor are computed as:
615     *
616     *     output[b, i, j, channel] =
617     *         sum_{di, dj, k} (
618     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
619     *             filter[channel, di, dj, k]
620     *         ) + bias[channel]
621     *
622     * Supported tensor {@link %{OperandType}} configurations:
623     * * 32 bit floating point:
624     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} for input, filter, output, and bias.
625     *
626     * * Quantized:
627     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, filter, and output.
628     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
629     * * * input.scale * filter.scale).
630     *
631%kind aidl canonical ndk hal_1.2+
632     * Available since %{NNAPILevel3}:
633     * * 16 bit floating point:
634     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias.
635     *
636     * * Quantized with symmetric per channel quantization for the filter:
637     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, and output.
638     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
639     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
640     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
641     *
642%/kind
643%kind aidl ndk hal_1.3+
644     * Available since %{NNAPILevel4}:
645     * * Quantized signed (since %{NNAPILevel4}):
646     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
647     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
648     * * * input.scale * filter.scale).
649     *
650     * * Quantized signed with filter symmetric per channel quantization
651     *   (since %{NNAPILevel4}):
652     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
653     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
654     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
655     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
656     *
657%/kind
658%insert NHWC_NCHW
659     *
660     * Both explicit padding and implicit padding are supported.
661     *
662     * Inputs (explicit padding):
663     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
664     *      specifying the input.
665%insert ZeroBatchesNNAPILevel3
666     * * 1: A 4-D tensor, of shape
667     *      [depth_out, filter_height, filter_width, depth_in], specifying the
668     *      filter.
669%kind aidl canonical ndk hal_1.2+
670     *      For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
671     *      the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim)
672     *      must be set to 0.
673%/kind
674     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
675     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
676     *      %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type.
677%kind aidl canonical ndk hal_1.3+
678     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
679     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
680%else
681     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
682%/kind
683     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
684     *      of 0 and bias_scale == input_scale * filter_scale.
685%kind aidl canonical ndk hal_1.2+
686     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
687     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
688     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
689     *      bias_scale[i] = input_scale * filter_scale[i].
690%/kind
691     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
692     *      the left, in the ‘width’ dimension.
693     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
694     *      the right, in the ‘width’ dimension.
695     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
696     *      the top, in the ‘height’ dimension.
697     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
698     *      the bottom, in the ‘height’ dimension.
699     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
700     *      walking through input in the ‘width’ dimension.
701     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
702     *      walking through input in the ‘height’ dimension.
703     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
704     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
705     *      invoke on the result.
706%kind aidl canonical ndk hal_1.2+
707     * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
708     *      Set to true to specify NCHW data layout for input0 and output0.
709     *      Available since %{NNAPILevel3}.
710     * * 11: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
711     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
712     *      cells between each filter element on width dimension. If this input is set,
713     *      input 12 (dilation factor for height) must be specified as well.
714     *      Available since %{NNAPILevel3}.
715     * * 12: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
716     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
717     *      cells between each filter element on height dimension. If this input is set,
718     *      input 11 (dilation factor for width) must be specified as well.
719     *      Available since %{NNAPILevel3}.
720%/kind
721     *
722     * Inputs (implicit padding):
723     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
724     *      specifying the input.
725%insert ZeroBatchesNNAPILevel3
726     * * 1: A 4-D tensor, of shape
727     *      [depth_out, filter_height, filter_width, depth_in], specifying the
728     *      filter.
729%kind aidl canonical ndk hal_1.2+
730     *      For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
731     *      the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim)
732     *      must be set to 0.
733%/kind
734     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
735     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
736     *      %{or_1.2 TENSOR_FLOAT16} the bias must be of the same
737     *      type.
738%kind aidl canonical ndk hal_1.3+
739     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
740     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
741%else
742     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
743%/kind
744     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
745     *      of 0 and bias_scale == input_scale * filter_scale.
746%kind aidl canonical ndk hal_1.2+
747     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
748     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
749     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
750     *      bias_scale[i] = input_scale * filter_scale[i].
751%/kind
752     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
753     *      padding scheme, has to be one of the
754%insert PaddingCodeValues
755     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
756     *      walking through input in the ‘width’ dimension.
757     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
758     *      walking through input in the ‘height’ dimension.
759     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
760     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
761     *      invoke on the result.
762%kind aidl canonical ndk hal_1.2+
763     * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
764     *      Set to true to specify NCHW data layout for input0 and output0.
765     *      Available since %{NNAPILevel3}.
766     * * 8: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
767     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
768     *      cells between each filter element on width dimension. If this input is set,
769     *      input 9 (dilation factor for height) must be specified as well.
770     *      Available since %{NNAPILevel3}.
771     * * 9: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
772     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
773     *      cells between each filter element on height dimension. If this input is set,
774     *      input 8 (dilation factor for width) must be specified as well.
775     *      Available since %{NNAPILevel3}.
776%/kind
777     *
778     * Outputs:
779     * * 0: The output 4-D tensor, of shape
780     *      [batches, out_height, out_width, depth_out].
781     *      %{BeforeNNAPILevel3For} output tensor of
782     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the following condition must
783     *      be satisfied: output_scale > input_scale * filter_scale
784%insert AVAIL1
785     */
786    %{DeclareOperation CONV_2D 3},
787
788    /**
789     * Performs a depthwise 2-D convolution operation.
790     *
791     * Given an input tensor of shape [batches, height, width, depth_in] and a
792     * filter tensor of shape [1, filter_height, filter_width, depth_out]
793     * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
794     * applies a different filter to each input channel (expanding from 1
795     * channel to channel_multiplier channels for each), then concatenates the
796     * results together.
797     *
798     * The output has depth_out = depth_in * depth_multiplier channels.
799     * The output dimensions are functions of the filter dimensions, stride, and
800     * padding.
801     *
802     * The values in the output tensor are computed as:
803     *
804     *     output[b, i, j, k * channel_multiplier + q] =
805     *         sum_{di, dj} (
806     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
807     *             filter[1, di, dj, k * channel_multiplier + q]
808     *         ) + bias[k * channel_multiplier + q]
809     *
810     * Supported tensor {@link %{OperandType}} configurations:
811     * * 32 bit floating point:
812     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} for input, filter, output, and bias.
813     *
814     * * Quantized:
815     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, filter, and output.
816     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
817     * * * input.scale * filter.scale).
818     *
819%kind aidl canonical ndk hal_1.2+
820     * Available since %{NNAPILevel3}:
821     * * 16 bit floating point:
822     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias.
823     *
824     * * Quantized with symmetric per channel quantization for the filter:
825     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, and output.
826     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
827     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
828     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
829     *
830%/kind
831%kind aidl canonical ndk hal_1.3+
832     * Available since %{NNAPILevel4}:
833     * * Quantized signed (since %{NNAPILevel4}):
834     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
835     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
836     * * * input.scale * filter.scale).
837     *
838     * * Quantized signed with filter symmetric per channel quantization
839     *   (since %{NNAPILevel4}):
840     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
841     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
842     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
843     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
844     *
845%/kind
846%insert NHWC_NCHW
847     *
848     * Both explicit padding and implicit padding are supported.
849     *
850     * Inputs (explicit padding):
851     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
852     *      specifying the input.
853     * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
854     *      specifying the filter.
855%kind aidl canonical ndk hal_1.2+
856     *      For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
857     *      the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim)
858     *      must be set to 3.
859%/kind
860     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
861     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
862     *      %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type.
863%kind aidl canonical ndk hal_1.3+
864     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
865     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
866%else
867     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
868%/kind
869     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
870     *      of 0 and bias_scale == input_scale * filter_scale.
871%kind aidl canonical ndk hal_1.2+
872     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
873     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
874     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
875     *      bias_scale[i] = input_scale * filter_scale[i].
876%/kind
877     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
878     *      the left, in the ‘width’ dimension.
879     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
880     *      the right, in the ‘width’ dimension.
881     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
882     *      the top, in the ‘height’ dimension.
883     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
884     *      the bottom, in the ‘height’ dimension.
885     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
886     *      walking through input in the ‘width’ dimension.
887     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
888     *      walking through input in the ‘height’ dimension.
889     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the depthwise
890     *      multiplier.
891     * * 10: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
892     *       {@link %{FusedActivationFunc}} values. Specifies the activation to
893     *       invoke on the result.
894%kind aidl canonical ndk hal_1.2+
895     * * 11: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
896     *       Set to true to specify NCHW data layout for input0 and output0.
897     *       Available since %{NNAPILevel3}.
898     * * 12: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
899     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
900     *      cells between each filter element on width dimension. If this input is set,
901     *      input 13 (dilation factor for height) must be specified as well.
902     *      Available since %{NNAPILevel3}.
903     * * 13: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
904     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
905     *      cells between each filter element on height dimension. If this input is set,
906     *      input 12 (dilation factor for width) must be specified as well.
907     *      Available since %{NNAPILevel3}.
908%/kind
909     *
910     * Inputs (implicit padding):
911     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
912     *      specifying the input.
913     * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
914     *      specifying the filter.
915     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
916     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
917     *      %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type.
918%kind aidl canonical ndk hal_1.3+
919     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
920     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
921%else
922     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
923%/kind
924     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
925     *      of 0 and bias_scale == input_scale * filter_scale.
926%kind aidl canonical ndk hal_1.2+
927     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
928     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
929     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
930     *      bias_scale[i] = input_scale * filter_scale[i].
931%/kind
932     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
933     *      padding scheme, has to be one of the
934%insert PaddingCodeValues
935     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
936     *      walking through input in the ‘width’ dimension.
937     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
938     *      walking through input in the ‘height’ dimension.
939     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the depthwise
940     *      multiplier.
941     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
942     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
943     *      invoke on the result.
944%kind aidl canonical ndk hal_1.2+
945     * * 8: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
946     *      Set to true to specify NCHW data layout for input0 and output0.
947     *      Available since %{NNAPILevel3}.
948     * * 9: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
949     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
950     *      cells between each filter element on width dimension. If this input is set,
951     *      input 10 (dilation factor for height) must be specified as well.
952     *      Available since %{NNAPILevel3}.
953     * * 10: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation
954     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
955     *      cells between each filter element on height dimension. If this input is set,
956     *      input 9 (dilation factor for width) must be specified as well.
957     *      Available since %{NNAPILevel3}.
958%/kind
959     *
960     * Outputs:
961     * * 0: The output 4-D tensor, of shape
962     *      [batches, out_height, out_width, depth_out]. %{BeforeNNAPILevel3For}
963     *      output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
964     *      the following condition must be satisfied:
965     *      output_scale > input_scale * filter_scale
966%insert AVAIL1
967     */
968    %{DeclareOperation DEPTHWISE_CONV_2D 4},
969
970    /**
971     * Rearranges data from depth into blocks of spatial data.
972     *
973     * More specifically, this op outputs a copy of the input tensor where
974     * values from the depth dimension are moved in spatial blocks to the height
975     * and width dimensions. The value block_size indicates the input block size
976     * and how the data is moved.
977     *
978     * Chunks of data of size block_size * block_size from depth are rearranged
979     * into non-overlapping blocks of size block_size x block_size.
980     *
981     * The width of the output tensor is input_depth * block_size, whereas the
982     * height is input_height * block_size. The depth of the input tensor must
983     * be divisible by block_size * block_size
984     *
985     * Supported tensor {@link %{OperandType}}:
986%kind aidl canonical ndk hal_1.2+
987     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
988%/kind
989     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
990     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
991%kind aidl canonical ndk hal_1.3+
992     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
993%/kind
994     *
995%insert NHWC_NCHW
996     *
997     * Inputs:
998     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
999     *      specifying the input.
1000     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the block_size.
1001     *      block_size must be >=1 and block_size * block_size must be a divisor
1002     *      of the input depth.
1003%kind aidl canonical ndk hal_1.2+
1004     * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
1005     *      Set to true to specify NCHW data layout for input0 and output0.
1006     *      Available since %{NNAPILevel3}.
1007%/kind
1008     *
1009     * Outputs:
1010     * * 0: The output 4-D tensor, of shape [batch, height*block_size,
1011     *      width*block_size, depth/(block_size*block_size)].
1012%kind aidl canonical ndk hal_1.3+
1013     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
1014     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1015     *      the scale and zeroPoint must be the same as input0.
1016%else
1017     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
1018     *      the scale and zeroPoint must be the same as input0.
1019%/kind
1020%insert AVAIL1
1021     */
1022    %{DeclareOperation DEPTH_TO_SPACE 5},
1023
1024    /**
1025     * Dequantizes the input tensor.
1026     *
1027     * The formula is:
1028     *
1029     *     output = (input - zeroPoint) * scale.
1030     *
1031     * Supported input tensor {@link %{OperandType}}:
1032     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1033%kind aidl canonical ndk hal_1.2+
1034     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM} (since %{NNAPILevel3})
1035     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} (since %{NNAPILevel3})
1036%/kind
1037%kind aidl canonical ndk hal_1.3+
1038     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1039%/kind
1040     *
1041     * Supported output tensor {@link %{OperandType}}:
1042%kind aidl canonical ndk hal_1.2+
1043     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1044%/kind
1045     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
1046     *
1047     * Supported tensor rank: up to 4
1048     *
1049     * Inputs:
1050     * * 0: A tensor.
1051%kind aidl canonical ndk hal_1.2+
1052     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
1053%/kind
1054     *
1055     * Outputs:
1056     * * 0: A tensor with the same shape as input0.
1057%insert AVAIL1
1058     */
1059    %{DeclareOperation DEQUANTIZE 6},
1060
1061    /**
1062     * Looks up sub-tensors in the input tensor.
1063     *
1064     * This operator takes for input a tensor of values (Values) and
1065     * a one-dimensional tensor of selection indices (Lookups).
1066     * The output tensor is the concatenation of sub-tensors of Values as
1067     * selected by Lookups.
1068     *
1069     * Think of Values as being sliced along its first dimension:
1070     * The entries in Lookups select which slices are concatenated together
1071     * to create the output tensor.
1072     *
1073     * For example, if Values has shape of [40, 200, 300] and
1074     * Lookups has shape of [3], all three values found in Lookups are
1075     * expected to be between 0 and 39. The resulting tensor must
1076     * have shape of [3, 200, 300].
1077     *
1078     * If a value in Lookups is out of bounds, the operation must fail
1079     * and an error must be reported.
1080     *
1081     * Supported value tensor {@link %{OperandType}}:
1082%kind aidl canonical ndk hal_1.3+
1083     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel4})
1084%/kind
1085     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1086%kind aidl canonical ndk hal_1.2+
1087     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel3})
1088     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3})
1089%/kind
1090%kind aidl canonical ndk hal_1.3+
1091     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1092%/kind
1093     *
1094     * Supported value tensor rank: from 2
1095     *
1096     * Inputs:
1097     * * 0: Lookups. A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}.
1098     *      The values are indices into the first dimension of Values.
1099     * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
1100     *      extracted.
1101     *
1102     * Output:
1103     * * 0: A n-D tensor with the same rank and shape as the Values
1104     *      tensor, except for the first dimension which has the same size
1105     *      as Lookups' only dimension.
1106%kind aidl canonical ndk hal_1.3+
1107     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
1108     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1109%else
1110     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
1111%/kind
1112     *      the scale and zeroPoint must be the same as input1.
1113%insert AVAIL1
1114     */
1115    %{DeclareOperation EMBEDDING_LOOKUP 7},
1116
1117    /**
1118     * Computes element-wise floor() on the input tensor.
1119     *
1120     * Supported tensor {@link %{OperandType}}:
1121%kind aidl canonical ndk hal_1.2+
1122     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1123%/kind
1124     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1125     *
1126     * Supported tensor rank: up to 4
1127     *
1128     * Inputs:
1129     * * 0: A tensor.
1130     *
1131     * Outputs:
1132     * * 0: The output tensor, of the same {@link %{OperandType}} and dimensions as
1133     *      the input tensor.
1134%insert AVAIL1
1135     */
1136    %{DeclareOperation FLOOR 8},
1137
1138    /**
1139     * Denotes a fully (densely) connected layer, which connects all elements
1140     * in the input tensor with each element in the output tensor.
1141     *
1142     * This layer implements the operation:
1143     *
1144     *     outputs = activation(inputs * weights’ + bias)
1145     *
1146     * Supported tensor {@link %{OperandType}}:
1147%kind aidl canonical ndk hal_1.2+
1148     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1149%/kind
1150     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1151     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1152%kind aidl canonical ndk hal_1.3+
1153     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1154%/kind
1155     *
1156     * Supported tensor rank: up to 4.
1157     *
1158     * Inputs:
1159     * * 0: A tensor of at least rank 2, specifying the input. If rank is
1160     *      greater than 2, then it gets flattened to a 2-D Tensor. The
1161     *      (flattened) 2-D Tensor is reshaped (if necessary) to
1162     *      [batch_size, input_size], where "input_size" corresponds to the
1163     *      number of inputs to the layer, matching the second dimension of
1164     *      weights, and "batch_size" is calculated by dividing the number of
1165     *      elements by "input_size".
1166%kind aidl canonical ndk hal_1.2+
1167     *      Since %{NNAPILevel3}, zero batch_size is supported for this tensor.
1168%/kind
1169     * * 1: A 2-D tensor, specifying the weights, of shape
1170     *      [num_units, input_size], where "num_units" corresponds to the number
1171     *      of output nodes.
1172     * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
1173     *      tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the bias should
1174     *      also be of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
1175%kind aidl canonical ndk hal_1.3+
1176     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1177     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
1178%else
1179     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
1180%/kind
1181     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32},
1182     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
1183     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1184     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1185     *      invoke on the result.
1186     *
1187     * Outputs:
1188     * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For}
1189     *      output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the following
1190     *      condition must be satisfied: output_scale > input_scale * filter_scale.
1191%insert AVAIL1
1192     */
1193    %{DeclareOperation FULLY_CONNECTED 9},
1194
1195    /**
1196     * Looks up sub-tensors in the input tensor using a key-value map.
1197     *
1198     * This operator takes for input a tensor of values (Values),
1199     * a one-dimensional tensor of selection values (Lookups) and
1200     * a one-dimensional tensor that maps these values to Values
1201     * indexes. The output tensor is the concatenation of sub-tensors of
1202     * Values as selected by Lookups via Keys.
1203     *
1204     * Think of Values as being sliced along its outer-most dimension.
1205     * The output is a concatenation of selected slices, with one slice
1206     * for each entry of Lookups. The slice selected is the one at the
1207     * same index as the Maps entry that matches the value in Lookups.
1208     *
1209     * For a hit, the corresponding sub-tensor of Values is included
1210     * in the Output tensor. For a miss, the corresponding sub-tensor in
1211     * Output must have zero values.
1212     *
1213     * For example, if Values has shape of [40, 200, 300],
1214     * Keys should have a shape of [40]. If Lookups tensor has shape
1215     * of [3], three slices are being concatenated, so the resulting tensor
1216     * must have the shape of [3, 200, 300]. If the first entry in Lookups
1217     * has the value 123456, that value must be located in Keys tensor.
1218     * If the sixth entry of Keys contains 123456, the sixth slice of Values
1219     * must be selected. If no entry in Keys has 123456, a slice of zeroes
1220     * must be concatenated.
1221     *
1222     * Supported value tensor {@link %{OperandType}}:
1223     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1224     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
1225     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1226     *
1227     * Supported value tensor rank: from 2
1228     *
1229     * Inputs:
1230     * * 0: Lookups. A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor with
1231     *      shape [ k ].
1232     * * 1: Keys. A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor with shape
1233     *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
1234     *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
1235     *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
1236     *      ascending order.
1237     * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
1238     *      must be n.
1239     *
1240     * Outputs:
1241     * * 0: Output. A tensor with shape [ k …].
1242     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
1243     *      the scale and zeroPoint must be the same as input2.
1244     * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
1245     *      hits (True) or not (False).
1246     *      Stored as {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} with offset 0
1247     *      and scale 1.0f.
1248     *      A non-zero byte represents True, a hit. A zero indicates otherwise.
1249%insert AVAIL1
1250     */
1251    %{DeclareOperation HASHTABLE_LOOKUP 10},
1252
1253    /**
1254     * Applies L2 normalization along the axis dimension.
1255     *
1256     * The values in the output tensor are computed as:
1257     *
1258     *     output[batch, row, col, channel] =
1259     *         input[batch, row, col, channel] /
1260     *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
1261     *
1262%kind aidl canonical ndk hal_1.2+
1263     * By default the axis dimension is the last dimension of the input tensor.
1264     *
1265%/kind
1266     * Supported tensor {@link %{OperandType}}:
1267%kind aidl canonical ndk hal_1.2+
1268     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1269%/kind
1270     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1271%kind aidl canonical ndk hal_1.2+
1272     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3})
1273%/kind
1274%kind aidl canonical ndk hal_1.3+
1275     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1276%/kind
1277     *
1278%kind aidl canonical ndk hal_1.2+
1279     * Supported tensor rank: up to 4
1280     * Tensors with rank less than 4 are only supported since %{NNAPILevel3}.
1281%else
1282     * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples,
1283     * Height, Width, and Channels).
1284%/kind
1285     *
1286     * Inputs:
1287%kind aidl canonical ndk hal_1.2+
1288     * * 0: An n-D tensor, specifying the tensor to be normalized.
1289%else
1290     * * 0: A 4-D tensor, specifying the tensor to be normalized.
1291%/kind
1292%kind aidl canonical ndk hal_1.2+
1293     * * 1: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1,
1294     *      specifying the dimension normalization would be performed on.
1295     *      Negative index is used to specify axis from the end (e.g. -1 for
1296     *      the last axis). Must be in the range [-n, n).
1297     *      Available since %{NNAPILevel3}.
1298%/kind
1299     *
1300     * Outputs:
1301     * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0.
1302%kind aidl canonical ndk hal_1.2+
1303     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
1304     *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1305%/kind
1306%kind aidl canonical ndk hal_1.3+
1307     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
1308     *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1309     *
1310     *      NOTE: Before %{NNAPILevel4}, if the elements along an axis are all zeros,
1311     *      the result is undefined. Since %{NNAPILevel4}, if the elements along an axis
1312     *      are all zeros, the result is logical zero.
1313%/kind
1314%insert AVAIL1
1315     */
1316    %{DeclareOperation L2_NORMALIZATION 11},
1317
1318    /**
1319     * Performs an 2-D L2 pooling operation.
1320     *
1321     * The output dimensions are functions of the filter dimensions, stride, and
1322     * padding.
1323     *
1324     * The values in the output tensor are computed as:
1325     *
1326     *     output[b, i, j, c] =
1327     *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
1328     *              sum(1))
1329     *
1330     * Supported tensor {@link %{OperandType}}:
1331%kind aidl canonical ndk hal_1.2+
1332     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1333%/kind
1334     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1335     *
1336%insert NHWC_NCHW
1337     *
1338     * Both explicit padding and implicit padding are supported.
1339     *
1340     * Inputs (explicit padding):
1341     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1342     *      the input.
1343%insert ZeroBatchesNNAPILevel3
1344     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1345     *      the left, in thewidthdimension.
1346     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1347     *      the right, in thewidthdimension.
1348     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1349     *      the top, in theheightdimension.
1350     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1351     *      the bottom, in theheightdimension.
1352     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1353     *      walking through input in thewidthdimension.
1354     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1355     *      walking through input in theheightdimension.
1356     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1357     *      width.
1358     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1359     *      height.
1360     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1361     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1362     *      invoke on the result.
1363%kind aidl canonical ndk hal_1.2+
1364     * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
1365     *       Set to true to specify NCHW data layout for input0 and output0.
1366     *       Available since %{NNAPILevel3}.
1367%/kind
1368     *
1369     * Inputs (implicit padding):
1370     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1371     *      the input.
1372%insert ZeroBatchesNNAPILevel3
1373     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
1374     *      padding scheme, has to be one of the
1375%insert PaddingCodeValues
1376     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1377     *      walking through input in thewidthdimension.
1378     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1379     *      walking through input in theheightdimension.
1380     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1381     *      width.
1382     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1383     *      height.
1384     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1385     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1386     *      invoke on the result.
1387%kind aidl canonical ndk hal_1.2+
1388     * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
1389     *      Set to true to specify NCHW data layout for input0 and output0.
1390     *      Available since %{NNAPILevel3}.
1391%/kind
1392     *
1393     * Outputs:
1394     * * 0: The output 4-D tensor, of shape
1395     *      [batches, out_height, out_width, depth].
1396%insert AVAIL1
1397     */
1398    %{DeclareOperation L2_POOL_2D 12},
1399
1400    /**
1401     * Applies Local Response Normalization along the depth dimension.
1402     *
1403     * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
1404     * last dimension), and each vector is normalized independently. Within a
1405     * given vector, each component is divided by the weighted, squared sum of
1406     * inputs within depth_radius.
1407     *
1408     * The output is calculated using this formula:
1409     *
1410     *     sqr_sum[a, b, c, d] = sum(
1411     *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
1412     *     output = input / pow((bias + alpha * sqr_sum), beta)
1413     *
1414%kind aidl canonical ndk hal_1.2+
1415     * For input tensor with rank less than 4, independently normalizes each
1416     * 1-D slice along specified dimension.
1417     *
1418%/kind
1419     * Supported tensor {@link %{OperandType}}:
1420%kind aidl canonical ndk hal_1.2+
1421     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1422%/kind
1423     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1424     *
1425%kind aidl canonical ndk hal_1.2+
1426     * Supported tensor rank: up to 4
1427     * Tensors with rank less than 4 are only supported since %{NNAPILevel3}.
1428%else
1429     * Supported tensor rank: 4, with "NHWC" data layout.
1430%/kind
1431     *
1432     * Inputs:
1433     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1434     *      the input.
1435     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the radius of
1436     *      the normalization window.
1437     * * 2: A scalar, specifying the bias, must not be zero.
1438%kind aidl canonical ndk hal_1.2+
1439     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias
1440     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
1441%/kind
1442     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the bias
1443     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
1444     * * 3: A scalar, specifying the scale factor, alpha.
1445%kind aidl canonical ndk hal_1.2+
1446     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the
1447     *      alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
1448%/kind
1449     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the
1450     *      alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
1451     * * 4: A scalar, specifying the exponent, beta.
1452%kind aidl canonical ndk hal_1.2+
1453     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the beta
1454     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
1455%/kind
1456     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the beta
1457     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
1458%kind aidl canonical ndk hal_1.2+
1459     * * 5: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1,
1460     *      specifying the dimension normalization would be performed on.
1461     *      Negative index is used to specify axis from the end (e.g. -1 for
1462     *      the last axis). Must be in the range [-n, n).
1463     *      Available since %{NNAPILevel3}.
1464%/kind
1465     *
1466     * Outputs:
1467     * * 0: The output tensor of same shape as input0.
1468%insert AVAIL1
1469     */
1470    %{DeclareOperation LOCAL_RESPONSE_NORMALIZATION 13},
1471
1472    /**
1473     * Computes sigmoid activation on the input tensor element-wise.
1474     *
1475     * The output is calculated using this formula:
1476     *
1477     *     output = 1 / (1 + exp(-input))
1478     *
1479     * Supported tensor {@link %{OperandType}}:
1480%kind aidl canonical ndk hal_1.2+
1481     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1482%/kind
1483     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1484     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1485%kind aidl canonical ndk hal_1.3+
1486     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1487%/kind
1488     *
1489     * Supported tensor rank: up to 4.
1490     *
1491     * Inputs:
1492     * * 0: A tensor, specifying the input.
1493%kind aidl canonical ndk hal_1.2+
1494     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
1495%/kind
1496     *
1497     * Outputs:
1498     * * 0: The output tensor of same shape as input0.
1499     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
1500     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1501%kind aidl canonical ndk hal_1.3+
1502     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
1503     *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1504%/kind
1505%insert AVAIL1
1506     */
1507    %{DeclareOperation LOGISTIC 14},
1508
1509    /**
1510     * Projects an input to a bit vector via locality senstive hashing.
1511     *
1512     * Supported input tensor {@link %{OperandType}}:
1513%kind aidl canonical ndk hal_1.2+
1514     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1515%/kind
1516     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1517     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
1518     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1519     *
1520     * Supported input tensor rank: from 1
1521     *
1522     * Inputs:
1523     * * 0: Hash functions. Dim.size == 2, DataType: Float.
1524     *      Tensor[0].Dim[0]: Number of hash functions.
1525     *      Tensor[0].Dim[1]: Number of projected output bits generated by each
1526     *      hash function.
1527     *      If the projection type is Sparse:
1528     *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
1529     *
1530     * * 1: Input. Dim.size >= 1, no restriction on DataType.
1531     * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
1532     *      If not set, each input element is considered to have the same weight
1533     *      of 1.0.
1534     *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
1535     * * 3: Type:
1536     *        Sparse:
1537%kind aidl canonical ndk hal_1.2+
1538     *          Value LSHProjectionType_SPARSE(=3) (since %{NNAPILevel3}).
1539%else
1540     *          Value LSHProjectionType_SPARSE(=1).
1541%/kind
1542     *          Computed bit vector is considered to be sparse.
1543     *          Each output element is an int32 made up of multiple bits
1544     *          computed from hash functions.
1545     *
1546%kind aidl canonical ndk hal_1.2+
1547     *          NOTE: To avoid collisions across hash functions, an offset value
1548     *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
1549     *          where k is the index of the hash function.
1550     *
1551     *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1552     *          Legacy behavior that does not include the offset value.
1553     *
1554%/kind
1555     *        Dense:
1556     *          Value LSHProjectionType_DENSE(=2).
1557     *          Computed bit vector is considered to be dense. Each output
1558     *          element represents a bit and can take the value of either
1559     *          0 or 1.
1560     *
1561     * Outputs:
1562     * * 0: If the projection type is Sparse:
1563     *      Output.Dim == { Tensor[0].Dim[0] }
1564     *      A tensor of int32 that represents hash signatures.
1565     *
1566     *      If the projection type is Dense:
1567     *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1568     *      A flattened tensor that represents projected bit vectors.
1569%insert AVAIL1
1570%kind aidl canonical ndk hal_1.2+
1571     * The offset value for sparse projections was added in %{NNAPILevel3}.
1572%/kind
1573     */
1574    %{DeclareOperation LSH_PROJECTION 15},
1575
1576    /**
1577     * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1578     *
1579     * The LSTM operation is described by the following equations.
1580     *
1581     * \f{eqnarray*}{
1582     * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1583     * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1584     * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1585     *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1586     * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1587     *      & & \\
1588     *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1589     *      & if\ there\ is\ a\ projection; \\
1590     * h_t =& & \\
1591     *      & o_t \odot g(C_t) & otherwise. \\
1592     * \f}
1593     * Where:
1594     * * \f$x_t\f$ is the input,
1595     * * \f$i_t\f$ is the input gate,
1596     * * \f$f_t\f$ is the forget gate,
1597     * * \f$C_t\f$ is the cell state,
1598     * * \f$o_t\f$ is the output,
1599     * * \f$h_t\f$ is the output state,
1600     * * \f$\sigma\f$ is the logistic sigmoid function,
1601     * * \f$g\f$ is the cell input and cell output activation function, usually
1602     *   \f$tahn\f$,
1603     * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1604     * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1605     * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1606     * * \f$b_i\f$ is the input gate bias,
1607     * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1608     * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1609     * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1610     * * \f$b_f\f$ is the forget gate bias,
1611     * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1612     * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1613     * * \f$b_c\f$ is the cell bias,
1614     * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1615     * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1616     * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1617     * * \f$b_o\f$ is the output gate bias,
1618     * * \f$W_{proj}\f$ is the projection weight matrix,
1619     * * \f$b_{proj}\f$ is the projection bias,
1620     * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1621     * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1622     * * \f$\odot\f$ is the
1623     *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1624     *   Hadamard product</a> that takes two matrices and produces another
1625     *   matrix, each element of which is the product of the corresponding
1626     *   elements of the input matrices.
1627     *
1628%kind aidl canonical ndk hal_1.2+
1629     * Since %{NNAPILevel3} LSTM supports layer normalization.
1630     * In case layer normalization is used, the inputs to internal activation
1631     * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1632     * following an approach from section 3.1 from
1633     * https://arxiv.org/pdf/1607.06450.pdf
1634     *
1635%/kind
1636     * The operation has the following independently optional inputs:
1637     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1638     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1639     *   have values or neither of them have values (i.e., all set to null). If
1640     *   they have values, the peephole optimization is used.
1641     * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1642     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1643     *   or none of them have values. If they have no values, coupling of input
1644     *   and forget gates (CIFG) is used, in which case the input gate
1645     *   (\f$i_t\f$) is calculated using the following equation instead.
1646     *   \f{eqnarray*}{
1647     *   i_t = 1 - f_t
1648     *   \f}
1649     *   In case peephole optimization is used and CIFG is not used
1650     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1651     *   cell-to-input weights must have no value.
1652     * * The projection weights (\f$W_{proj}\f$) is required only for the
1653     *   recurrent projection layer, and should otherwise have no value.
1654     * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1655     *   value if the recurrent projection layer exists, and should otherwise
1656     *   have no value.
1657%kind aidl canonical ndk hal_1.2+
1658     * * (%{NNAPILevel3} or later) The four layer normalization weights either all have
1659     *   values or none of them have values. Additionally, if CIFG is used,
1660     *   input layer normalization weights tensor is omitted and the other layer
1661     *   normalization weights either all have values or none of them have
1662     *   values. Layer normalization is used when the values of all the layer
1663     *   normalization weights are present.
1664%/kind
1665     *
1666     * References:
1667     *
1668     * The default non-peephole non-CIFG implementation is based on:
1669     * http://www.bioinf.jku.at/publications/older/2604.pdf
1670     * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1671     * Computation, 9(8):1735-1780, 1997.
1672     *
1673     * The peephole implementation and projection layer is based on:
1674     * https://research.google.com/pubs/archive/43905.pdf
1675     * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1676     * recurrent neural network architectures for large scale acoustic
1677     * modeling." INTERSPEECH, 2014.
1678     * (However, the concept of peephole optimization was introduced in work
1679     * prior to this paper.)
1680     *
1681     * The coupling of input and forget gate (CIFG) is based on:
1682     * http://arxiv.org/pdf/1503.04069.pdf
1683     * Greff et al. "LSTM: A Search Space Odyssey"
1684     *
1685%kind aidl canonical ndk hal_1.2+
1686     * The layer normalization is based on:
1687     * https://arxiv.org/pdf/1607.06450.pdf
1688     * Jimmy Ba et al. "Layer Normalization"
1689     *
1690%/kind
1691     * Supported tensor {@link %{OperandType}}:
1692%kind aidl canonical ndk hal_1.2+
1693     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1694%/kind
1695     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1696     *
1697     * All input and output tensors must be of the same type.
1698     *
1699     * Inputs:
1700     * * 0: The input (\f$x_t\f$).
1701     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1702     *      corresponds to the batching dimension, and “input_size” is the size
1703     *      of the input.
1704     * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1705     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1706     *      corresponds to the number of cell units.
1707     * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1708     *      A 2-D tensor of shape [num_units, input_size].
1709     * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1710     *      A 2-D tensor of shape [num_units, input_size].
1711     * * 4: The input-to-output weights (\f$W_{xo}\f$).
1712     *      A 2-D tensor of shape [num_units, input_size].
1713     * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1714     *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1715     *      corresponds to either the number of cell units (i.e., “num_units”),
1716     *      or the second dimension of the “projection_weights”, if defined.
1717     * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1718     *      A 2-D tensor of shape [num_units, output_size].
1719     * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1720     *      A 2-D tensor of shape [num_units, output_size].
1721     * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1722     *      A 2-D tensor of shape [num_units, output_size].
1723     * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1724     *      A 1-D tensor of shape [num_units].
1725     * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1726     *      A 1-D tensor of shape [num_units].
1727     * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1728     *      A 1-D tensor of shape [num_units].
1729     * * 12:The input gate bias (\f$b_i\f$). Optional.
1730     *      A 1-D tensor of shape [num_units].
1731     * * 13:The forget gate bias (\f$b_f\f$).
1732     *      A 1-D tensor of shape [num_units].
1733     * * 14:The cell bias (\f$b_c\f$).
1734     *      A 1-D tensor of shape [num_units].
1735     * * 15:The output gate bias (\f$b_o\f$).
1736     *      A 1-D tensor of shape [num_units].
1737     * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1738     *      A 2-D tensor of shape [output_size, num_units].
1739     * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1740     *      A 1-D tensor of shape [output_size].
1741     * * 18:The output state (in) (\f$h_{t-1}\f$).
1742     *      A 2-D tensor of shape [batch_size, output_size].
1743     * * 19:The cell state (in) (\f$C_{t-1}\f$).
1744     *      A 2-D tensor of shape [batch_size, num_units].
1745     * * 20:The activation function (\f$g\f$).
1746     *      A value indicating the activation function:
1747     *      <ul>
1748     *      <li>0: None;
1749     *      <li>1: Relu;
1750     *      <li>3: Relu6;
1751     *      <li>4: Tanh;
1752     *      <li>6: Sigmoid.
1753     *      </ul>
1754     * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1755     *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1756     *      then clipping is disabled.
1757%kind aidl canonical ndk hal_1.2+
1758     *      Until %{NNAPILevel3} this scalar must be of type {@link
1759     *      %{OperandTypeLinkPfx}FLOAT32}. Since %{NNAPILevel3}, if all the input
1760     *      tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, this
1761     *      scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32},
1762     *      otherwise if all the input tensors have the type {@link
1763     *      %{OperandTypeLinkPfx}TENSOR_FLOAT16}, this scalar must be of type {@link
1764     *      %{OperandTypeLinkPfx}FLOAT16}.
1765%/kind
1766     * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1767     *      projection layer, such that values are bound within
1768     *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1769%kind aidl canonical ndk hal_1.2+
1770     *      Until %{NNAPILevel3} this scalar must be of type {@link
1771     *      %{OperandTypeLinkPfx}FLOAT32}. Since %{NNAPILevel3}, if all the input
1772     *      tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, this
1773     *      scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32},
1774     *      otherwise if all the input tensors have the type {@link
1775     *      %{OperandTypeLinkPfx}TENSOR_FLOAT16}, this scalar must be of type {@link
1776     *      %{OperandTypeLinkPfx}FLOAT16}.
1777     * Since %{NNAPILevel3} there are additional inputs to this op:
1778     * * 23:The input layer normalization weights.
1779     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1780     *      to activation at input gate.
1781     * * 24:The forget layer normalization weights.
1782     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1783     *      to activation at forget gate.
1784     * * 25:The cell layer normalization weights.
1785     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1786     *      to activation at cell gate.
1787     * * 26:The output layer normalization weights.
1788     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1789     *      to activation at output gate.
1790%/kind
1791     *
1792     * Outputs:
1793     * * 0: The scratch buffer.
1794     *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1795     *      [batch_size, num_units * 4] without CIFG.
1796     * * 1: The output state (out) (\f$h_t\f$).
1797     *      A 2-D tensor of shape [batch_size, output_size].
1798     * * 2: The cell state (out) (\f$C_t\f$).
1799     *      A 2-D tensor of shape [batch_size, num_units].
1800     * * 3: The output (\f$o_t\f$).
1801     *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1802     *      the same as the current “output state (out)” value.
1803%insert AVAIL1
1804     */
1805    %{DeclareOperation LSTM 16},
1806
1807    /**
1808     * Performs an 2-D max pooling operation.
1809     *
1810     * The output dimensions are functions of the filter dimensions, stride, and
1811     * padding.
1812     *
1813     * The values in the output tensor are computed as:
1814     *
1815     *     output[b, i, j, channel] =
1816     *         max_{di, dj} (
1817     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1818     *         )
1819     *
1820     * Supported tensor {@link %{OperandType}}:
1821%kind aidl canonical ndk hal_1.2+
1822     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1823%/kind
1824     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1825     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1826%kind aidl canonical ndk hal_1.3+
1827     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1828%/kind
1829     *
1830%insert NHWC_NCHW
1831     *
1832     * Both explicit padding and implicit padding are supported.
1833     *
1834     * Inputs (explicit padding):
1835     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1836     *      the input.
1837%insert ZeroBatchesNNAPILevel3
1838     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1839     *      the left, in the ‘width’ dimension.
1840     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1841     *      the right, in the ‘width’ dimension.
1842     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1843     *      the top, in the ‘height’ dimension.
1844     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
1845     *      the bottom, in the ‘height’ dimension.
1846     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1847     *      walking through input in the ‘width’ dimension.
1848     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1849     *      walking through input in the ‘height’ dimension.
1850     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1851     *      width.
1852     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1853     *      height.
1854     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1855     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1856     *      invoke on the result.
1857%kind aidl canonical ndk hal_1.2+
1858     * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
1859     *       Set to true to specify NCHW data layout for input0 and output0.
1860     *       Available since %{NNAPILevel3}.
1861%/kind
1862     *
1863     * Inputs (implicit padding):
1864     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1865     *      the input.
1866%insert ZeroBatchesNNAPILevel3
1867     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
1868     *      padding scheme, has to be one of the
1869%insert PaddingCodeValues
1870     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1871     *      walking through input in the ‘width’ dimension.
1872     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
1873     *      walking through input in the ‘height’ dimension.
1874     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1875     *      width.
1876     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the filter
1877     *      height.
1878     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1879     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1880     *      invoke on the result.
1881%kind aidl canonical ndk hal_1.2+
1882     * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
1883     *      Set to true to specify NCHW data layout for input0 and output0.
1884     *      Available since %{NNAPILevel3}.
1885%/kind
1886     *
1887     * Outputs:
1888     * * 0: The output 4-D tensor, of shape
1889     *      [batches, out_height, out_width, depth].
1890%kind aidl canonical ndk hal_1.3+
1891     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
1892     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1893     *      the scale and zeroPoint must be the same as input0.
1894%else
1895     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
1896     *      the scale and zeroPoint must be the same as input0.
1897%/kind
1898%insert AVAIL1
1899     */
1900    %{DeclareOperation MAX_POOL_2D 17},
1901
1902    /**
1903     * Multiplies two tensors, element-wise.
1904     *
1905     * Takes two input tensors of identical {@link %{OperandType}} and compatible
1906     * dimensions. The output is the product of both input tensors, optionally
1907     * modified by an activation function.
1908     *
1909     * Two dimensions are compatible when:
1910     *     1. they are equal, or
1911     *     2. one of them is 1
1912     *
1913     * The size of the resulting output is the maximum size along each dimension
1914     * of the input operands. It starts with the trailing dimensions, and works
1915     * its way forward.
1916     *
1917%insert GenericZero
1918     * Supported tensor {@link %{OperandType}}:
1919%kind aidl canonical ndk hal_1.2+
1920     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1921%/kind
1922     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1923     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1924%kind aidl canonical ndk hal_1.3+
1925     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1926     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4})
1927%/kind
1928     *
1929     * Supported tensor rank: up to 4
1930     *
1931     * Inputs:
1932     * * 0: A tensor.
1933     * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions
1934     *      as input0.
1935     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
1936     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
1937     *      invoke on the result.
1938%kind aidl canonical ndk hal_1.3+
1939     *      For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor,
1940     *      the {@link %{FusedActivationFunc}} must be "NONE".
1941%/kind
1942     *
1943     * Outputs:
1944     * * 0: The product, a tensor of the same {@link %{OperandType}} as input0.
1945%kind aidl canonical ndk hal_1.3+
1946     *      For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1947     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
1948     *      the following condition must be satisfied:
1949     *      output_scale > input1_scale * input2_scale.
1950%else
1951     *      For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
1952     *      the following condition must be satisfied:
1953     *      output_scale > input1_scale * input2_scale.
1954%/kind
1955%insert AVAIL1
1956     */
1957    %{DeclareOperation MUL 18},
1958
1959    /**
1960     * Computes rectified linear activation on the input tensor element-wise.
1961     *
1962     * The output is calculated using this formula:
1963     *
1964     *     output = max(0, input)
1965     *
1966     * Supported tensor {@link %{OperandType}}:
1967%kind aidl canonical ndk hal_1.2+
1968     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
1969%/kind
1970     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
1971     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
1972%kind aidl canonical ndk hal_1.3+
1973     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
1974%/kind
1975     *
1976     * Supported tensor rank: up to 4.
1977     *
1978     * Inputs:
1979     * * 0: A tensor, specifying the input.
1980%kind aidl canonical ndk hal_1.2+
1981     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
1982%/kind
1983     *
1984     * Outputs:
1985     * * 0: The output tensor of same shape as input0.
1986%kind aidl canonical ndk hal_1.3+
1987     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
1988     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1989     *      the scale and zeroPoint must be the same as input0.
1990%else
1991     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
1992     *      the scale and zeroPoint must be the same as input0.
1993%/kind
1994%insert AVAIL1
1995     */
1996    %{DeclareOperation RELU 19},
1997
1998    /**
1999     * Computes rectified linear 1 activation on the input tensor element-wise.
2000     *
2001     * The output is calculated using this formula:
2002     *
2003     *     output = min(1.f, max(-1.f, input))
2004     *
2005     * Supported tensor {@link %{OperandType}}:
2006%kind aidl canonical ndk hal_1.2+
2007     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2008%/kind
2009     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2010     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2011%kind aidl canonical ndk hal_1.3+
2012     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2013%/kind
2014     *
2015     * Supported tensor rank: up to 4.
2016     *
2017     * Inputs:
2018     * * 0: A tensor, specifying the input.
2019%kind aidl canonical ndk hal_1.2+
2020     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
2021%/kind
2022     *
2023     * Outputs:
2024     * * 0: The output tensor of the same shape as input0.
2025%kind aidl canonical ndk hal_1.3+
2026     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2027     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2028     *      the scale and zeroPoint must be the same as input0.
2029%else
2030     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2031     *      the scale and zeroPoint must be the same as input0.
2032%/kind
2033%insert AVAIL1
2034     */
2035    %{DeclareOperation RELU1 20},
2036
2037    /**
2038     * Computes rectified linear 6 activation on the input tensor element-wise.
2039     *
2040     * The output is calculated using this formula:
2041     *
2042     *     output = min(6, max(0, input))
2043     *
2044     * Supported tensor {@link %{OperandType}}:
2045%kind aidl canonical ndk hal_1.2+
2046     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2047%/kind
2048     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2049     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2050%kind aidl canonical ndk hal_1.3+
2051     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2052%/kind
2053     *
2054     * Supported tensor rank: up to 4.
2055     *
2056     * Inputs:
2057     * * 0: A tensor, specifying the input.
2058%kind aidl canonical ndk hal_1.2+
2059     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
2060%/kind
2061     *
2062     * Outputs:
2063     * * 0: The output tensor of same shape as input0.
2064%kind aidl canonical ndk hal_1.3+
2065     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2066     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2067     *      the scale and zeroPoint must be the same as input0.
2068%else
2069     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2070     *      the scale and zeroPoint must be the same as input0.
2071%/kind
2072%insert AVAIL1
2073     */
2074    %{DeclareOperation RELU6 21},
2075
2076    /**
2077     * Reshapes a tensor.
2078     *
2079     * Given tensor, this operation returns a tensor that has the same values as
2080     * tensor, but with a newly specified shape.
2081     *
2082     * Supported tensor {@link %{OperandType}}:
2083%kind aidl canonical ndk hal_1.2+
2084     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2085%/kind
2086     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2087     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2088%kind aidl canonical ndk hal_1.3+
2089     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2090%/kind
2091%kind aidl canonical ndk
2092     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel6})
2093%/kind
2094     *
2095     * Supported tensor rank: up to 4.
2096     *
2097     * Inputs:
2098     * * 0: A tensor, specifying the tensor to be reshaped.
2099     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, defining the
2100     *      shape of the output tensor. The number of elements implied by shape
2101     *      must be the same as the number of elements in the input tensor.
2102     *
2103     *      If one component of shape is the special value -1, the size of that
2104     *      dimension is computed so that the total size remains constant. In
2105     *      particular, a shape of [-1] flattens into 1-D. At most one component
2106     *      of shape can be -1.
2107     *
2108     * Outputs:
2109     * * 0: The output tensor, of shape specified by the input shape.
2110%kind aidl canonical ndk hal_1.3+
2111     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2112     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2113     *      the scale and zeroPoint must be the same as input0.
2114%else
2115     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2116     *      the scale and zeroPoint must be the same as input0.
2117%/kind
2118%insert AVAIL1
2119     */
2120    %{DeclareOperation RESHAPE 22},
2121
2122    /**
2123     * Resizes images to given size using the bilinear interpretation.
2124     *
2125     * Resized images must be distorted if their output aspect ratio is not the
2126     * same as input aspect ratio. The corner pixels of output may not be the
2127     * same as corner pixels of input.
2128     *
2129     * Supported tensor {@link %{OperandType}}:
2130%kind aidl canonical ndk hal_1.2+
2131     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2132%/kind
2133     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2134%kind aidl canonical ndk hal_1.2+
2135     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3})
2136%/kind
2137%kind aidl canonical ndk hal_1.3+
2138     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2139%/kind
2140     *
2141%insert NHWC_NCHW
2142     *
2143%kind aidl canonical ndk hal_1.2+
2144     * Both resizing by shape and resizing by scale are supported.
2145     *
2146%/kind
2147     * Inputs (resizing by shape):
2148     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
2149     *      the input.
2150%insert ZeroBatchesNNAPILevel3
2151     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
2152     *      width of the output tensor.
2153     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
2154     *      height of the output tensor.
2155%kind aidl canonical ndk hal_1.2+
2156     * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
2157     *      Set to true to specify NCHW data layout for input0 and output0.
2158     *      Available since %{NNAPILevel3}.
2159%/kind
2160%kind aidl canonical ndk hal_1.3+
2161     * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL}
2162     *      scalar, default to false.  If True, the centers of the 4 corner
2163     *      pixels of the input and output tensors are aligned, preserving the
2164     *      values at the corner pixels.
2165     *      Available since %{NNAPILevel4}.
2166     * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL}
2167     *      scalar, default to false. If True, the pixel centers are assumed to
2168     *      be at (0.5, 0.5). This is the default behavior of image.resize in
2169     *      TF 2.0. If this parameter is True, then align_corners parameter
2170     *      must be False.
2171     *      Available since %{NNAPILevel4}.
2172%/kind
2173%kind aidl canonical ndk hal_1.2+
2174     *
2175     * Inputs (resizing by scale, since %{NNAPILevel3}):
2176     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
2177     *      the input. Zero batches is supported for this tensor.
2178     * * 1: A scalar, specifying width_scale, the scaling factor of the width
2179     *      dimension from the input tensor to the output tensor. The output
2180     *      width is calculated as new_width = floor(width * width_scale).
2181     *      The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is
2182     *      of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
2183     *      {@link %{OperandTypeLinkPfx}FLOAT32} otherwise.
2184     * * 2: A scalar, specifying height_scale, the scaling factor of the height
2185     *      dimension from the input tensor to the output tensor. The output
2186     *      height is calculated as new_height = floor(height * height_scale).
2187     *      The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is
2188     *      of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
2189     *      {@link %{OperandTypeLinkPfx}FLOAT32} otherwise.
2190     * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
2191     *      Set to true to specify NCHW data layout for input0 and output0.
2192%/kind
2193%kind aidl canonical ndk hal_1.3+
2194     * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL}
2195     *      scalar, default to false.  If True, the centers of the 4 corner
2196     *      pixels of the input and output tensors are aligned, preserving the
2197     *      values at the corner pixels.
2198     *      Available since %{NNAPILevel4}.
2199     * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL}
2200     *      scalar, default to false. If True, the pixel centers are assumed to
2201     *      be at (0.5, 0.5). This is the default behavior of image.resize in
2202     *      TF 2.0. If this parameter is True, then align_corners parameter
2203     *      must be False.
2204     *      Available since %{NNAPILevel4}.
2205%/kind
2206     *
2207     * Outputs:
2208     * * 0: The output 4-D tensor, of shape
2209     *      [batches, new_height, new_width, depth].
2210%kind aidl canonical ndk hal_1.3+
2211     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2212     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2213     *      the scale and zeroPoint must be the same as input0.
2214%/kind
2215%kind canonical ndk hal_1.2
2216     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2217     *      the scale and zeroPoint must be the same as input0.
2218%/kind
2219%insert AVAIL1
2220     */
2221    %{DeclareOperation RESIZE_BILINEAR 23},
2222
2223    /**
2224     * A basic recurrent neural network layer.
2225     *
2226     * This layer implements the operation:
2227     * outputs = state = activation(inputs * input_weights +
2228     *                              state * recurrent_weights + bias)
2229     *
2230     * Where:
2231     * * “input_weights” is a weight matrix that multiplies the inputs;
2232     * * “recurrent_weights” is a weight matrix that multiplies the current
2233     *    “state” which itself is the output from the previous time step
2234     *    computation;
2235     * * “bias” is a bias vector (added to each output vector in the batch);
2236     * * “activation” is the function passed as the “fused_activation_function”
2237     *   argument (if not “NONE”).
2238     *
2239     * Supported tensor {@link %{OperandType}}:
2240%kind aidl canonical ndk hal_1.2+
2241     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2242%/kind
2243     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2244     *
2245     * The input tensors must all be the same type.
2246     *
2247     * Inputs:
2248     * * 0: input.
2249     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2250     *      corresponds to the batching dimension, and “input_size” is the size
2251     *      of the input.
2252     * * 1: weights.
2253     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2254     *      corresponds to the number of units.
2255     * * 2: recurrent_weights.
2256     *      A 2-D tensor of shape [num_units, num_units], with columns
2257     *      corresponding to the weights from each unit.
2258     * * 3: bias.
2259     *      A 1-D tensor of shape [num_units].
2260     * * 4: hidden state (in).
2261     *      A 2-D tensor of shape [batch_size, num_units].
2262     * * 5: fused_activation_function.
2263     *      An optional {@link %{FusedActivationFunc}} value indicating the
2264     *      activation function. If “NONE” is specified then it results in a
2265     *      linear activation.
2266     *
2267     * Outputs:
2268     * * 0: hidden state (out).
2269     *      A 2-D tensor of shape [batch_size, num_units].
2270     *
2271     * * 1: output.
2272     *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
2273     *      the same as the current state value.
2274%insert AVAIL1
2275     */
2276    %{DeclareOperation RNN 24},
2277
2278    /**
2279     * Computes the softmax activation on the input tensor element-wise, per
2280     * batch, by normalizing the input vector so the maximum coefficient is
2281     * zero.
2282     *
2283     * The output is calculated using this formula:
2284     *
2285     *     output[batch, i] =
2286     *         exp((input[batch, i] - max(input[batch, :])) * beta) /
2287     *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
2288     *
2289     * For input tensor with rank other than 2, the activation will be applied
2290     * independently on each 1-D slice along specified dimension.
2291     *
2292     * Supported tensor {@link %{OperandType}}:
2293%kind aidl canonical ndk hal_1.2+
2294     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2295%/kind
2296     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2297     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2298%kind aidl canonical ndk hal_1.3+
2299     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2300%/kind
2301     *
2302%kind hal_1.0 hal_1.1
2303     * Supported tensor rank: 2 or 4.
2304%/kind
2305%kind aidl canonical ndk hal_1.2+
2306     * Supported tensor rank: up to 4.
2307     * Tensors with rank other than 2 or 4 are only supported since %{NNAPILevel3}.
2308%/kind
2309     *
2310     * Inputs:
2311     * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
2312%kind aidl canonical ndk hal_1.2+
2313     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
2314%/kind
2315%kind aidl canonical ndk hal_1.3+
2316     * * 1: A scalar, specifying the positive scaling factor for the exponent,
2317     *      beta. If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
2318     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
2319     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
2320     *      must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
2321%else
2322     * * 1: A scalar, specifying the positive scaling factor for the exponent,
2323     *      beta. If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
2324     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the scalar must be of
2325     *      {@link %{OperandTypeLinkPfx}FLOAT32}.
2326%/kind
2327%kind aidl canonical ndk hal_1.2+
2328     *      If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, then the
2329     *      scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
2330%/kind
2331%kind aidl canonical ndk hal_1.2+
2332     * * 2: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1,
2333     *      specifying the dimension the activation would be performed on.
2334     *      Negative index is used to specify axis from the end (e.g. -1 for
2335     *      the last axis). Must be in the range [-n, n).
2336     *      Available since %{NNAPILevel3}.
2337%/kind
2338     *
2339     * Outputs:
2340     * * 0: The output tensor of same shape as input0.
2341     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
2342     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
2343%kind aidl canonical ndk hal_1.3+
2344     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
2345     *      the scale must be 1.f / 256 and the zeroPoint must be -128.
2346%/kind
2347%insert AVAIL1
2348     */
2349    %{DeclareOperation SOFTMAX 25},
2350
2351    /**
2352     * Rearranges blocks of spatial data, into depth.
2353     *
2354     * More specifically, this op outputs a copy of the input tensor where
2355     * values from the height and width dimensions are moved to the depth
2356     * dimension. The value block_size indicates the input block size and how
2357     * the data is moved.
2358     *
2359     * Chunks of data of size block_size * block_size from depth are rearranged
2360     * into non-overlapping blocks of size block_size x block_size.
2361     *
2362     * The depth of the output tensor is input_depth * block_size * block_size.
2363     * The input tensor's height and width must be divisible by block_size.
2364     *
2365     * Supported tensor {@link %{OperandType}}:
2366%kind aidl canonical ndk hal_1.2+
2367     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2368%/kind
2369     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2370     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2371%kind aidl canonical ndk hal_1.3+
2372     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2373%/kind
2374     *
2375%insert NHWC_NCHW
2376     *
2377     * Inputs:
2378     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
2379     *      specifying the input.
2380     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the block_size.
2381     *      block_size must be >=1 and block_size must be a divisor of both the
2382     *      input height and width.
2383%kind aidl canonical ndk hal_1.2+
2384     * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
2385     *      Set to true to specify NCHW data layout for input0 and output0.
2386     *      Available since %{NNAPILevel3}.
2387%/kind
2388     *
2389     * Outputs:
2390     * * 0: The output 4-D tensor, of shape [batches, height/block_size,
2391     *      width/block_size, depth_in*block_size*block_size].
2392%kind aidl canonical ndk hal_1.3+
2393     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2394     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2395     *      the scale and zeroPoint must be the same as input0.
2396%else
2397     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2398     *      the scale and zeroPoint must be the same as input0.
2399%/kind
2400%insert AVAIL1
2401     */
2402    %{DeclareOperation SPACE_TO_DEPTH 26},
2403
2404    /**
2405     * SVDF op is a kind of stateful layer derived from the notion that a
2406     * densely connected layer that's processing a sequence of input frames can
2407     * be approximated by using a singular value decomposition of each of its
2408     * nodes. The implementation is based on:
2409     *
2410     * https://research.google.com/pubs/archive/43813.pdf
2411     *
2412     * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
2413     * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
2414     * INTERSPEECH, 2015.
2415     *
2416     * It processes the incoming input using a 2-stage filtering mechanism:
2417     * * stage 1 performs filtering on the "features" dimension, whose outputs
2418     *   get pushed into a memory of fixed-size memory_size.
2419     * * stage 2 performs filtering on the "time" dimension of the memory_size
2420     *   memoized outputs of stage 1.
2421     *
2422     * Specifically, for rank 1, this layer implements the operation:
2423     *
2424     *     memory = push(conv1d(inputs, weights_feature, feature_dim,
2425     *                          "%{ANN}PADDING_VALID"));
2426     *     outputs = activation(memory * weights_time + bias);
2427     *
2428     * Where:
2429     * * “weights_feature” is a weights matrix that processes the inputs (by
2430     *   convolving the input with every “feature filter”), and whose outputs
2431     *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
2432     *   entry gets dropped);
2433     * * “weights_time” is a weights matrix that processes the “memory” (by a
2434     *   batched matrix multiplication on the num_units);
2435     * * “bias” is an optional bias vector (added to each output vector in the
2436     *   batch); and
2437     * * “activation” is the function passed as the “fused_activation_function”
2438     *   argument (if not “NONE”).
2439     *
2440     * Each rank adds a dimension to the weights matrices by means of stacking
2441     * the filters.
2442     *
2443     * Supported tensor {@link %{OperandType}}:
2444%kind aidl canonical ndk hal_1.2+
2445     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2446%/kind
2447     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2448     *
2449     * All input tensors must be the same type.
2450     *
2451     * Inputs:
2452     * * 0: input.
2453     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2454     *      corresponds to the batching dimension, and “input_size” is the size
2455     *      of the input.
2456     * * 1: weights_feature.
2457     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2458     *      corresponds to the number of units.
2459     * * 2: weights_time.
2460     *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
2461     *      corresponds to the fixed-size of the memory.
2462     * * 3: bias.
2463     *      An optional 1-D tensor of shape [num_units].
2464     * * 4: state (in).
2465     *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
2466     * * 5: rank.
2467     *      The rank of the SVD approximation.
2468     * * 6: fused_activation_function.
2469     *      An optional {@link %{FusedActivationFunc}} value indicating the
2470     *      activation function. If “NONE” is specified then it results in a
2471     *      linear activation.
2472     *
2473     * Outputs:
2474     * * 0: state (out).
2475     *      A 2-D tensor of the same {@link %{OperandType}} as the inputs, with shape
2476     *      [batch_size, (memory_size - 1) * num_units * rank].
2477     * * 1: output.
2478     *      A 2-D tensor of the same {@link %{OperandType}} as the inputs, with shape
2479     *      [batch_size, num_units].
2480%insert AVAIL1
2481     */
2482    %{DeclareOperation SVDF 27},
2483
2484    /**
2485     * Computes hyperbolic tangent of input tensor element-wise.
2486     *
2487     * The output is calculated using this formula:
2488     *
2489     *     output = tanh(input)
2490     *
2491     * Supported tensor {@link %{OperandType}}:
2492%kind aidl canonical ndk hal_1.2+
2493     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2494%/kind
2495     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2496%kind aidl canonical ndk hal_1.2+
2497     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3})
2498%/kind
2499%kind aidl canonical ndk hal_1.3+
2500     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2501%/kind
2502     *
2503     * Supported tensor rank: up to 4.
2504     *
2505     * Inputs:
2506     * * 0: A tensor, specifying the input.
2507%kind aidl canonical ndk hal_1.2+
2508     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
2509%/kind
2510     *
2511     * Outputs:
2512     * * 0: The output tensor of same shape as input0.
2513%kind aidl canonical ndk hal_1.2+
2514     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
2515     *      the scale must be 1.f / 128 and the zeroPoint must be 128.
2516%/kind
2517%kind aidl canonical ndk hal_1.3+
2518     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
2519     *      the scale must be 1.f / 128 and the zeroPoint must be 0.
2520%/kind
2521%insert AVAIL1
2522     */
2523    %{DeclareOperation TANH 28},
2524%/section
2525
2526%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2527
2528%% HAL OperationType for 1.1
2529%% NDK OperationCode for API 28
2530
2531%section Operation_1.1
2532    /**
2533     * BatchToSpace for N-dimensional tensors.
2534     *
2535     * This operation reshapes the batch dimension (dimension 0) into M + 1
2536     * dimensions of shape block_shape + [batch], interleaves these blocks back
2537     * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
2538     * result with the same rank as the input.
2539     *
2540     * This is the reverse of SpaceToBatch.
2541     *
2542     * Supported tensor {@link %{OperandType}}:
2543%kind aidl canonical ndk hal_1.2+
2544     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2545%/kind
2546     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2547     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2548%kind aidl canonical ndk hal_1.3+
2549     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2550%/kind
2551     *
2552%insert NHWC_NCHW
2553     *
2554     * Inputs:
2555     * * 0: An n-D tensor, specifying the tensor to be reshaped
2556     * * 1: A 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the block
2557     *      sizes for each spatial dimension of the input tensor. All values
2558     *      must be >= 1.
2559%kind aidl canonical ndk hal_1.2+
2560     * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
2561     *      Set to true to specify NCHW data layout for input0 and output0.
2562     *      Available since API level 29.
2563%/kind
2564     *
2565     * Outputs:
2566     * * 0: A tensor of the same {@link %{OperandType}} as input0.
2567%kind aidl canonical ndk hal_1.3+
2568     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2569     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2570     *      the scale and zeroPoint must be the same as input0.
2571%else
2572     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2573     *      the scale and zeroPoint must be the same as input0.
2574%/kind
2575%insert AVAIL2
2576     */
2577    %{DeclareOperation BATCH_TO_SPACE_ND 29},
2578
2579    /**
2580     * Element-wise division of two tensors.
2581     *
2582     * Takes two input tensors of identical {@link %{OperandType}} and compatible
2583     * dimensions. The output is the result of dividing the first input tensor
2584     * by the second, optionally modified by an activation function.
2585     *
2586%kind aidl canonical ndk hal_1.3+
2587     * For inputs of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, performs
2588     * "floor division" ("//" in Python). For example,
2589     *     5 // 2 = 2
2590     *    -5 // 2 = -3
2591     *
2592%/kind
2593     * Two dimensions are compatible when:
2594     *     1. they are equal, or
2595     *     2. one of them is 1
2596     *
2597     * The size of the output is the maximum size along each dimension of the
2598     * input operands. It starts with the trailing dimensions, and works its way
2599     * forward.
2600     *
2601     * Example:
2602     *     input1.dimension =    {4, 1, 2}
2603     *     input2.dimension = {5, 4, 3, 1}
2604     *     output.dimension = {5, 4, 3, 2}
2605     *
2606%insert GenericZero
2607     * Supported tensor {@link %{OperandType}}:
2608%kind aidl canonical ndk hal_1.2+
2609     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2610%/kind
2611     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2612%kind aidl canonical ndk hal_1.3+
2613     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4})
2614%/kind
2615     *
2616     * Supported tensor rank: up to 4
2617     *
2618     * Inputs:
2619     * * 0: An n-D tensor, specifying the first input.
2620     * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions
2621     *      as input0.
2622     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
2623     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
2624     *      invoke on the result.
2625%kind aidl canonical ndk hal_1.3+
2626     *      For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor,
2627     *      the {@link %{FusedActivationFunc}} must be "NONE".
2628%/kind
2629     *
2630     * Outputs:
2631     * * 0: A tensor of the same {@link %{OperandType}} as input0.
2632%insert AVAIL2
2633     */
2634    %{DeclareOperation DIV 30},
2635
2636    /**
2637     * Computes the mean of elements across dimensions of a tensor.
2638     *
2639     * Reduces the input tensor along the given dimensions to reduce. Unless
2640     * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
2641     * in axis. If keep_dims is true, the reduced dimensions are retained with
2642     * length 1.
2643     *
2644     * Supported tensor {@link %{OperandType}}:
2645%kind aidl canonical ndk hal_1.2+
2646     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2647%/kind
2648     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2649     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2650%kind aidl canonical ndk hal_1.3+
2651     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2652%/kind
2653     *
2654     * Supported tensor rank: up to 4
2655     *
2656     * Inputs:
2657     * * 0: A tensor, specifying the input.
2658     * * 1: A 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
2659     *      to reduce. Must be in the range
2660     *      [-rank(input_tensor), rank(input_tensor)).
2661     *
2662     *      NOTE: When the operation was introduced, the documentation
2663     *      incorrectly stated that if dimensions were empty, the operation
2664     *      would reduce across all dimensions. This behavior was never
2665     *      implemented.
2666     *
2667     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, keep_dims. If positive,
2668     *      retains reduced dimensions with length 1.
2669     *
2670     * Outputs:
2671     * * 0: A tensor of the same {@link %{OperandType}} as input0.
2672%kind aidl canonical ndk hal_1.3+
2673     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2674     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2675     *      the scale and zeroPoint must be the same as input0.
2676%else
2677     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2678     *      the scale and zeroPoint must be the same as input0.
2679%/kind
2680     *      If all dimensions are reduced and keep_dims is false, the output
2681     *      shape is [1].
2682%insert AVAIL2
2683     */
2684    %{DeclareOperation MEAN 31},
2685
2686    /**
2687     * Pads a tensor.
2688     *
2689     * This operation pads a tensor according to the specified paddings.
2690     *
2691     * Supported tensor {@link %{OperandType}}:
2692%kind aidl canonical ndk hal_1.2+
2693     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2694%/kind
2695     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2696     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2697%kind aidl canonical ndk hal_1.3+
2698     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2699%/kind
2700%kind aidl canonical ndk hal_1.2+
2701     *   (full support since %{NNAPILevel3}, see the output section)
2702%else
2703     *   (the pad value is undefined)
2704%/kind
2705     *
2706     * Supported tensor rank: up to 4
2707     *
2708     * Inputs:
2709     * * 0: An n-D tensor, specifying the tensor to be padded.
2710     * * 1: A 2-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the paddings
2711     *      for each spatial dimension of the input tensor. The shape of the
2712     *      tensor must be {rank(input0), 2}.
2713     *      padding[i, 0] specifies the number of elements to be padded in the
2714     *      front of dimension i.
2715     *      padding[i, 1] specifies the number of elements to be padded after the
2716     *      end of dimension i.
2717     *
2718     * Outputs:
2719     * * 0: A tensor of the same {@link %{OperandType}} as input0. The
2720     *      output tensor has the same rank as input0, and each
2721     *      dimension of the output tensor has the same size as the
2722     *      corresponding dimension of the input tensor plus the size
2723     *      of the padding:
2724     *          output0.dimension[i] =
2725     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
2726%kind aidl canonical ndk hal_1.3+
2727     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2728     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2729     *      the scale and zeroPoint must be the same as input0.
2730%else
2731     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2732     *      the scale and zeroPoint must be the same as input0.
2733%/kind
2734%kind aidl canonical ndk hal_1.2+
2735     *
2736     *      NOTE: Before %{NNAPILevel3}, the pad value for
2737     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} is undefined.
2738     *      Since %{NNAPILevel3}, the pad value is always the logical zero.
2739%/kind
2740%insert AVAIL2
2741     */
2742    %{DeclareOperation PAD 32},
2743
2744    /**
2745     * SpaceToBatch for N-Dimensional tensors.
2746     *
2747     * This operation divides "spatial" dimensions [1, ..., M] of the input into
2748     * a grid of blocks of shape block_shape, and interleaves these blocks with
2749     * the "batch" dimension (0) such that in the output, the spatial dimensions
2750     * [1, ..., M] correspond to the position within the grid, and the batch
2751     * dimension combines both the position within a spatial block and the
2752     * original batch position. Prior to division into blocks, the spatial
2753     * dimensions of the input are optionally zero padded according to paddings.
2754     *
2755     * Supported tensor {@link %{OperandType}}:
2756%kind aidl canonical ndk hal_1.2+
2757     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2758%/kind
2759     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2760     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2761%kind aidl canonical ndk hal_1.3+
2762     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2763%/kind
2764%kind aidl canonical ndk hal_1.2+
2765     *   (full support since %{NNAPILevel3}, see the output section)
2766%else
2767     *   (the pad value is undefined)
2768%/kind
2769     *
2770%insert NHWC_NCHW
2771     *
2772     * Inputs:
2773     * * 0: An n-D tensor, specifying the input.
2774     * * 1: A 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the block
2775     *      sizes for each spatial dimension of the input tensor. All values
2776     *      must be >= 1.
2777     * * 2: A 2-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the paddings
2778     *      for each spatial dimension of the input tensor. All values must be
2779     *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2780     *      of spatial dimensions.
2781     *      padding[i, 0] specifies the number of element to be padded in the
2782     *      front of dimension i.
2783     *      padding[i, 1] specifies the number of element to be padded after the
2784     *      end of dimension i.
2785%kind aidl canonical ndk hal_1.2+
2786     * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
2787     *      Set to true to specify NCHW data layout for input0 and output0.
2788     *      Available since %{NNAPILevel3}.
2789%/kind
2790     *
2791     * Outputs:
2792     * * 0: A tensor of the same {@link %{OperandType}} as input0.
2793%kind aidl canonical ndk hal_1.3+
2794     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2795     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2796     *      the scale and zeroPoint must be the same as input0.
2797%else
2798     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2799     *      the scale and zeroPoint must be the same as input0.
2800%/kind
2801%kind aidl canonical ndk hal_1.2+
2802     *
2803     *      NOTE: Before %{NNAPILevel3}, the pad value for
2804     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} is undefined.
2805     *      Since %{NNAPILevel3}, the pad value is always the logical zero.
2806%/kind
2807%insert AVAIL2
2808     */
2809    %{DeclareOperation SPACE_TO_BATCH_ND 33},
2810
2811    /**
2812     * Removes dimensions of size 1 from the shape of a tensor.
2813     *
2814     * Given a tensor input, this operation returns a tensor of the same
2815     * {@link %{OperandType}} with all dimensions of size 1 removed. If you don't
2816     * want to remove all size 1 dimensions, you can remove specific size 1
2817     * dimensions by specifying the axes (input1).
2818     *
2819     * Supported tensor {@link %{OperandType}}:
2820%kind aidl canonical ndk hal_1.2+
2821     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2822%/kind
2823     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2824     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2825%kind aidl canonical ndk hal_1.3+
2826     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2827%/kind
2828     *
2829     * Supported tensor rank: up to 4
2830     *
2831     * Inputs:
2832     * * 0: An n-D tensor, the tensor to be squeezed.
2833     * * 1: An optional 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The
2834     *      dimensions to squeeze. If specified only squeezes the dimensions
2835     *      listed. Otherwise, squeezes all dimensions. The dimension index
2836     *      starts at 0. An error must be reported if squeezing a dimension that
2837     *      is not 1.
2838     *
2839     * Outputs:
2840     * * 0: A tensor of the same {@link %{OperandType}} as input0. Contains the
2841     *      same data as input, but has one or more dimensions of size 1
2842     *      removed.
2843%kind aidl canonical ndk hal_1.3+
2844     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2845     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2846     *      the scale and zeroPoint must be the same as input0.
2847%else
2848     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2849     *      the scale and zeroPoint must be the same as input0.
2850%/kind
2851     *      If all input dimensions are equal to 1 and are to be squeezed, the
2852     *      output shape is [1].
2853%insert AVAIL2
2854     */
2855    %{DeclareOperation SQUEEZE 34},
2856
2857    /**
2858     * Extracts a strided slice of a tensor.
2859     *
2860     * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2861     * from the given input tensor. Starting at the location specified by begin
2862     * the slice continues by adding stride to the index until all dimensions
2863     * are not less than end. Note that a stride can be negative, which causes a
2864     * reverse slice.
2865     *
2866     * Supported tensor {@link %{OperandType}}:
2867%kind aidl canonical ndk hal_1.2+
2868     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2869%/kind
2870     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2871     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2872%kind aidl canonical ndk hal_1.3+
2873     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2874%/kind
2875     *
2876     * Supported tensor rank: up to 4
2877     *
2878     * Inputs:
2879     * * 0: An n-D tensor, specifying the tensor to be sliced.
2880     * * 1: begin, a 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The
2881     *      starts of the dimensions of the input tensor to be sliced. The
2882     *      length must be of rank(input0).
2883     * * 2: end, a 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The
2884     *      ends of the dimensions of the input tensor to be sliced. The length
2885     *      must be of rank(input0).
2886     * * 3: strides, a 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The
2887     *      strides of the dimensions of the input tensor to be sliced. The
2888     *      length must be of rank(input0). The entries must be non-zero.
2889     * * 4: begin_mask, an {@link %{OperandTypeLinkPfx}INT32} scalar. If the ith bit
2890     *      of begin_mask is set, begin[i] is ignored and the fullest possible
2891     *      range in that dimension is used instead.
2892     * * 5: end_mask, an {@link %{OperandTypeLinkPfx}INT32} scalar. If the ith bit of
2893     *      end_mask is set, end[i] is ignored and the fullest possible range in
2894     *      that dimension is used instead.
2895     * * 6: shrink_axis_mask, an {@link %{OperandTypeLinkPfx}INT32} scalar. If the
2896     *      ith bit of shrink_axis_mask is set, the ith dimension specification
2897     *      shrinks the dimensionality by 1, taking on the value at index
2898     *      begin[i]. In this case, the ith specification must define a
2899     *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2900     *
2901     * Outputs:
2902     * * 0: A tensor of the same {@link %{OperandType}} as input0 and rank (n - k),
2903     *      where k is the number of bits set in shrink_axis_mask.
2904%kind aidl canonical ndk hal_1.3+
2905     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2906     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2907     *      the scale and zeroPoint must be the same as input0.
2908%else
2909     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2910     *      the scale and zeroPoint must be the same as input0.
2911%/kind
2912     *      If shrink_axis_mask is true for all input dimensions, the output
2913     *      shape is [1].
2914%insert AVAIL2
2915     */
2916    %{DeclareOperation STRIDED_SLICE 35},
2917
2918    /**
2919     * Element-wise subtraction of two tensors.
2920     *
2921     * Takes two input tensors of identical {@link %{OperandType}} and compatible
2922     * dimensions. The output is the result of subtracting the second input
2923     * tensor from the first one, optionally modified by an activation function.
2924     *
2925     * Two dimensions are compatible when:
2926     *     1. they are equal, or
2927     *     2. one of them is 1
2928     *
2929     * The size of the output is the maximum size along each dimension of the
2930     * input operands. It starts with the trailing dimensions, and works its way
2931     * forward.
2932     *
2933     * Example:
2934     *     input1.dimension =    {4, 1, 2}
2935     *     input2.dimension = {5, 4, 3, 1}
2936     *     output.dimension = {5, 4, 3, 2}
2937     *
2938%insert GenericZero
2939     * Supported tensor {@link %{OperandType}}:
2940%kind aidl canonical ndk hal_1.2+
2941     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2942%/kind
2943     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2944%kind aidl canonical ndk hal_1.2+
2945     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3})
2946%/kind
2947%kind aidl canonical ndk hal_1.3+
2948     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2949     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4})
2950%/kind
2951     *
2952     * Supported tensor rank: up to 4
2953     *
2954     * Inputs:
2955     * * 0: An n-D tensor, specifying the first input.
2956     * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions
2957     *      as input0.
2958     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
2959     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
2960     *      invoke on the result.
2961%kind aidl canonical ndk hal_1.3+
2962     *      For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor,
2963     *      the {@link %{FusedActivationFunc}} must be "NONE".
2964%/kind
2965     *
2966     * Outputs:
2967     * * 0: A tensor of the same {@link %{OperandType}} as input0.
2968%kind hal_1.2
2969     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
2970     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2971%/kind
2972%kind aidl canonical ndk hal_1.3+
2973     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
2974     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2975     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2976%/kind
2977%insert AVAIL2
2978     */
2979    %{DeclareOperation SUB 36},
2980
2981    /**
2982     * Transposes the input tensor, permuting the dimensions according to the
2983     * perm tensor.
2984     *
2985     * The returned tensor's dimension i corresponds to the input dimension
2986     * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2987     * rank of the input tensor. Hence by default, this operation performs a
2988     * regular matrix transpose on 2-D input Tensors.
2989     *
2990     * Supported tensor {@link %{OperandType}}:
2991%kind aidl canonical ndk hal_1.2+
2992     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3})
2993%/kind
2994     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
2995     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
2996%kind aidl canonical ndk hal_1.3+
2997     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
2998%/kind
2999     *
3000     * Supported tensor rank: up to 4
3001     *
3002     * Inputs:
3003     * * 0: An n-D tensor, specifying the tensor to be transposed.
3004%kind aidl canonical ndk hal_1.2+
3005     *      Since %{NNAPILevel3}, this tensor may be zero-sized.
3006%/kind
3007     * * 1: An optional 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32},
3008     *      the permutation of the dimensions of the input tensor.
3009     *
3010     * Outputs:
3011     * * 0: A tensor of the same {@link %{OperandType}} as input0.
3012%kind aidl canonical ndk hal_1.3+
3013     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
3014     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3015     *      the scale and zeroPoint must be the same as input0.
3016%else
3017     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
3018     *      the scale and zeroPoint must be the same as input0.
3019%/kind
3020%insert AVAIL2
3021     */
3022    %{DeclareOperation TRANSPOSE 37},
3023%/section
3024
3025%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3026
3027%% HAL OperandType for 1.2
3028%% NDK OperandCode for API 29
3029
3030%section Operand_1.2
3031    /**
3032     * An 8 bit boolean scalar value.
3033     *
3034     * Values of this operand type are either true or false. A zero value
3035     * represents false; any other value represents true.
3036%insert AVAIL3
3037     */
3038    %{ANN}BOOL = 6,
3039%insert canonical_empty_line
3040    /**
3041     * A tensor of 16 bit signed integers that represent real numbers.
3042     *
3043     * Attached to this tensor is a number representing real value scale that is
3044     * used to convert the 16 bit number to a real value in the following way:
3045     * realValue = integerValue * scale.
3046     *
3047     * scale is a 32 bit floating point with value greater than zero.
3048%insert AVAIL3
3049     */
3050    %{ANN}TENSOR_QUANT16_SYMM = 7,
3051%insert canonical_empty_line
3052    /**
3053     * A tensor of IEEE 754 16 bit floating point values.
3054%insert AVAIL3
3055     */
3056    %{ANN}TENSOR_FLOAT16 = 8,
3057%insert canonical_empty_line
3058    /**
3059     * A tensor of 8 bit boolean values.
3060     *
3061     * Values of this operand type are either true or false. A zero value
3062     * represents false; any other value represents true.
3063%insert AVAIL3
3064     */
3065    %{ANN}TENSOR_BOOL8 = 9,
3066%insert canonical_empty_line
3067    /**
3068     * An IEEE 754 16 bit floating point scalar value.
3069%insert AVAIL3
3070     */
3071    %{ANN}FLOAT16 = 10,
3072%insert canonical_empty_line
3073    /**
3074     * A tensor of 8 bit signed integers that represent real numbers.
3075     *
3076     * This tensor is associated with additional fields that can
3077     * be used to convert the 8 bit signed integer to the real value and vice versa.
3078     * These fields are:
3079     * - channelDim: a 32 bit unsigned integer indicating channel dimension.
3080     * - scales: an array of positive 32 bit floating point values.
3081     * The size of the scales array must be equal to dimensions[channelDim].
3082     *
3083%kind ndk
3084     * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used
3085     * to set the parameters for an Operand of this type.
3086     *
3087%/kind
3088%kind aidl canonical hal_1.2+
3089     * {@link %{Ann}SymmPerChannelQuantParams} must hold the parameters for an Operand of this type.
3090%/kind
3091     * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
3092     *
3093     * The formula is:
3094     * realValue[..., C, ...] =
3095     *     integerValue[..., C, ...] * scales[C]
3096     * where C is an index in the Channel dimension.
3097%insert AVAIL3
3098     */
3099    %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
3100%insert canonical_empty_line
3101    /**
3102     * A tensor of 16 bit unsigned integers that represent real numbers.
3103     *
3104     * Attached to this tensor are two numbers that can be used to convert the
3105     * 16 bit integer to the real value and vice versa. These two numbers are:
3106     * - scale: a 32 bit floating point value greater than zero.
3107     * - zeroPoint: a 32 bit integer, in range [0, 65535].
3108     *
3109     * The formula is:
3110     * real_value = (integer_value - zeroPoint) * scale.
3111%insert AVAIL3
3112     */
3113    %{ANN}TENSOR_QUANT16_ASYMM = 12,
3114%insert canonical_empty_line
3115    /**
3116     * A tensor of 8 bit signed integers that represent real numbers.
3117     *
3118     * Attached to this tensor is a number representing real value scale that is
3119     * used to convert the 8 bit number to a real value in the following way:
3120     * realValue = integerValue * scale.
3121     *
3122     * scale is a 32 bit floating point with value greater than zero.
3123%insert AVAIL3
3124     */
3125    %{ANN}TENSOR_QUANT8_SYMM = 13,
3126%/section
3127
3128%section Operand_1.2_MAX
3129    FUNDAMENTAL_MAX = 13,
3130%/section
3131
3132%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3133
3134%% HAL OperationType for 1.2
3135%% NDK OperationCode for API 29
3136
3137%section Operation_1.2
3138    /**
3139     * Computes the absolute value of a tensor, element-wise.
3140     *
3141     * Supported tensor {@link %{OperandType}}:
3142     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3143     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3144%kind aidl canonical ndk hal_1.3+
3145     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4})
3146%/kind
3147     *
3148     * Supported tensor rank: from 1.
3149     *
3150     * Inputs:
3151     * * 0: A tensor.
3152     *
3153     * Outputs:
3154     * * 0: The output tensor of same shape as input0.
3155%insert AVAIL3
3156     */
3157    %{DeclareOperation_1.2 ABS 38},
3158
3159    /**
3160     * Returns the index of the largest element along an axis.
3161     *
3162     * Supported tensor {@link %{OperandType}}:
3163     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3164     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3165     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
3166     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3167%kind aidl canonical ndk hal_1.3+
3168     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
3169%/kind
3170     *
3171     * Supported tensor rank: from 1
3172     *
3173     * Inputs:
3174     * * 0: An n-D tensor specifying the input. Must be non-empty.
3175     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the axis to
3176     *      reduce across. Negative index is used to specify axis from the
3177     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3178     *
3179     * Outputs:
3180     * * 0: An (n - 1)-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor.
3181     *      If input is 1-dimensional, the output shape is [1].
3182%insert AVAIL3
3183     */
3184    // There is no underscore in ARG_MAX to avoid name conflict with
3185    // the macro defined in libc/kernel/uapi/linux/limits.h.
3186    %{DeclareOperation_1.2 ARGMAX 39},
3187
3188    /**
3189     * Returns the index of the smallest element along an axis.
3190     *
3191     * Supported tensor {@link %{OperandType}}:
3192     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3193     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3194     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
3195     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3196%kind aidl canonical ndk hal_1.3+
3197     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
3198%/kind
3199     *
3200     * Supported tensor rank: from 1
3201     *
3202     * Inputs:
3203     * * 0: An n-D tensor specifying the input. Must be non-empty.
3204     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the axis to
3205     *      reduce across. Negative index is used to specify axis from the
3206     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3207     *
3208     * Outputs:
3209     * * 0: An (n - 1)-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor.
3210     *      If input is 1-dimensional, the output shape is [1].
3211%insert AVAIL3
3212     */
3213%kind aidl
3214    %{DeclareOperation_1.2 ARGMIN 40}, // See ARGMAX for naming discussion.
3215%else
3216    %{DeclareOperation_1.2 ARGMIN 40},  // See ARGMAX for naming discussion.
3217%/kind
3218
3219    /**
3220     * Transform axis-aligned bounding box proposals using bounding box deltas.
3221     *
3222     * Given the positions of bounding box proposals and the corresponding
3223     * bounding box deltas for each class, return the refined bounding box
3224     * regions. The resulting bounding boxes are cliped against the edges of
3225     * the image.
3226     *
3227     * Supported tensor {@link %{OperandType}}:
3228     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3229     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3230     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}
3231     *
3232     * Inputs:
3233     * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
3234     *      bounding box proposals, each line with format [x1, y1, x2, y2].
3235     *      For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM},
3236     *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
3237     *      is supported for this tensor.
3238     * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
3239     *      bounding box delta for each region of interest and each class. The
3240     *      bounding box deltas are organized in the following order
3241     *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
3242     *      for the center position of the bounding box with respect to the width
3243     *      and height, dw and dh is the log-scale relative correction factor
3244     *      for the width and height. For input0 of type
3245     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, this tensor should be
3246%kind aidl canonical ndk hal_1.3+
3247     *      of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
3248     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
3249%else
3250     *      of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}. Zero num_rois is
3251%/kind
3252     *      supported for this tensor.
3253     * * 2: An 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
3254     *      [num_rois], specifying the batch index of each box. Boxes with
3255     *      the same batch index are grouped together. Zero num_rois is
3256     *      supported for this tensor.
3257     * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
3258     *      each image in the batch, each line with format
3259     *      [image_height, image_width].
3260     *
3261     * Outputs:
3262     * * 0: A tensor of the same {@link %{OperandType}} as input0, with shape
3263     *      [num_rois, num_classes * 4], specifying the coordinates of each
3264     *      output bounding box for each class, with format [x1, y1, x2, y2].
3265     *      For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the
3266     *      scale must be 0.125 and the zero point must be 0.
3267%insert AVAIL3
3268     */
3269    %{DeclareOperation_1.2 AXIS_ALIGNED_BBOX_TRANSFORM 41},
3270
3271    /**
3272     * A recurrent neural network layer that applies an LSTM cell to a
3273     * sequence of inputs in forward and backward directions.
3274     *
3275     * The op supports cross-linking via an auxiliary input. Regular cell feeds
3276     * one input into the two RNN cells in the following way:
3277     *
3278     *       INPUT  (INPUT_REVERSED)
3279     *         |         |
3280     *    ---------------------
3281     *    | FW_LSTM   BW_LSTM |
3282     *    ---------------------
3283     *         |         |
3284     *      FW_OUT     BW_OUT
3285     *
3286     * An op with cross-linking takes two inputs and feeds them into the RNN
3287     * cells in the following way:
3288     *
3289     *       AUX_INPUT   (AUX_INPUT_REVERSED)
3290     *           |             |
3291     *     INPUT | (INPUT_R'D.)|
3292     *       |   |       |     |
3293     *    -----------------------
3294     *    |  \  /        \    / |
3295     *    | FW_LSTM     BW_LSTM |
3296     *    -----------------------
3297     *         |           |
3298     *      FW_OUT      BW_OUT
3299     *
3300     * The cross-linking mode is enabled iff auxiliary input and auxiliary
3301     * weights are present. While stacking this op on top of itself, this
3302     * allows to connect both forward and backward outputs from previous cell
3303     * to the next cell's input.
3304     *
3305%kind aidl canonical ndk hal_1.3+
3306     * Since %{NNAPILevel4} parallel linking mode is supported. The mode is
3307     * enabled if auxiliary input is present but auxiliary weights are omitted.
3308     * In this case, the cell feeds inputs into the RNN in the following way:
3309     *
3310     *       INPUT (AUX_INPUT_REVERSED)
3311     *         |         |
3312     *    ---------------------
3313     *    | FW_LSTM   BW_LSTM |
3314     *    ---------------------
3315     *         |         |
3316     *      FW_OUT     BW_OUT
3317     *
3318     * While stacking this op on top of itself, this allows to connect both
3319     * forward and backward outputs from previous cell to the next cell's
3320     * corresponding inputs.
3321     *
3322%/kind
3323     * Supported tensor {@link %{OperandType}}:
3324     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3325     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3326     *
3327     * Supported tensor rank: 3, either time-major or batch-major.
3328     *
3329     * All input and output tensors must be of the same type.
3330     *
3331     * Inputs:
3332     * * 0: The input.
3333     *      A 3-D tensor of shape:
3334     *        If time-major: [max_time, batch_size, input_size]
3335     *        If batch-major: [batch_size, max_time, input_size]
3336     *      where "max_time" is the number of timesteps (sequence length),
3337     *      "batch_size" corresponds to the batching dimension, and
3338     *      "input_size" is the size of the input.
3339     * * 1: The forward input-to-input weights. Optional.
3340     *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
3341     *      corresponds to the number of forward cell units.
3342     * * 2: The forward input-to-forget weights.
3343     *      A 2-D tensor of shape [fw_num_units, input_size].
3344     * * 3: The forward input-to-cell weights.
3345     *      A 2-D tensor of shape [fw_num_units, input_size].
3346     * * 4: The forward input-to-output weights.
3347     *      A 2-D tensor of shape [fw_num_units, input_size].
3348     * * 5: The forward recurrent-to-input weights. Optional.
3349     *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
3350     *      corresponds to either the number of cell units (i.e., fw_num_units),
3351     *      or the second dimension of the “fw_projection_weights”, if defined.
3352     * * 6: The forward recurrent-to-forget weights.
3353     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
3354     * * 7: The forward recurrent-to-cell weights.
3355     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
3356     * * 8: The forward recurrent-to-output weights.
3357     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
3358     * * 9: The forward cell-to-input weights. Optional.
3359     *      A 1-D tensor of shape [fw_num_units].
3360     * * 10: The forward cell-to-forget weights. Optional.
3361     *       A 1-D tensor of shape [fw_num_units].
3362     * * 11: The forward cell-to-output weights. Optional.
3363     *       A 1-D tensor of shape [fw_num_units].
3364     * * 12: The forward input gate bias. Optional.
3365     *       A 1-D tensor of shape [fw_num_units].
3366     * * 13: The forward forget gate bias.
3367     *       A 1-D tensor of shape [fw_num_units].
3368     * * 14: The forward cell gate bias.
3369     *       A 1-D tensor of shape [fw_num_units].
3370     * * 15: The forward output gate bias.
3371     *       A 1-D tensor of shape [fw_num_units].
3372     * * 16: The forward projection weights. Optional.
3373     *       A 2-D tensor of shape [fw_output_size, fw_num_units].
3374     * * 17: The forward projection bias. Optional.
3375     *       A 1-D tensor of shape [fw_output_size].
3376     * * 18: The backward input-to-input weights. Optional.
3377     *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
3378     *       corresponds to the number of backward cell units.
3379     * * 19: The backward input-to-forget weights.
3380     *       A 2-D tensor of shape [bw_num_units, input_size].
3381     * * 20: The backward input-to-cell weights.
3382     *       A 2-D tensor of shape [bw_num_units, input_size].
3383     * * 21: The backward input-to-output weights.
3384     *       A 2-D tensor of shape [bw_num_units, input_size].
3385     * * 22: The backward recurrent-to-input weights. Optional.
3386     *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
3387     *       corresponds to either the number of cell units (i.e., “bw_num_units”),
3388     *       or the second dimension of the “bw_projection_weights”, if defined.
3389     * * 23: The backward recurrent-to-forget weights.
3390     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
3391     * * 24: The backward recurrent-to-cell weights.
3392     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
3393     * * 25: The backward recurrent-to-output weights.
3394     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
3395     * * 26: The backward cell-to-input weights. Optional.
3396     *       A 1-D tensor of shape [bw_num_units].
3397     * * 27: The backward cell-to-forget weights. Optional.
3398     *       A 1-D tensor of shape [bw_num_units].
3399     * * 28: The backward cell-to-output weights. Optional.
3400     *       A 1-D tensor of shape [bw_num_units].
3401     * * 29: The backward input gate bias. Optional.
3402     *       A 1-D tensor of shape [bw_num_units].
3403     * * 30: The backward forget gate bias.
3404     *       A 1-D tensor of shape [bw_num_units].
3405     * * 31: The backward cell gate bias.
3406     *       A 1-D tensor of shape [bw_num_units].
3407     * * 32: The backward output gate bias.
3408     *       A 1-D tensor of shape [bw_num_units].
3409     * * 33: The backward projection weights. Optional.
3410     *       A 2-D tensor of shape [bw_output_size, bw_num_units].
3411     * * 34: The backward projection bias. Optional.
3412     *       A 1-D tensor of shape [bw_output_size].
3413     * * 35: The forward input activation state.
3414     *       A 2-D tensor of shape [batch_size, bw_output_size].
3415     * * 36: The forward input cell state.
3416     *       A 2-D tensor of shape [batch_size, bw_num_units].
3417     * * 37: The backward input activation state.
3418     *       A 2-D tensor of shape [batch_size, bw_output_size].
3419     * * 38: The backward input cell state.
3420     *       A 2-D tensor of shape [batch_size, bw_num_units].
3421%kind aidl canonical ndk hal_1.3+
3422     * * 39: The auxiliary input. Optional.
3423     *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
3424     *       where “batch_size” corresponds to the batching dimension, and
3425     *       “aux_input_size” is the size of the auxiliary input. Optional. See
3426     *       the docs above for the usage modes explanation.
3427     * * 40: The forward auxiliary input-to-input weights.
3428     *       Optional. See the docs above for the usage modes explanation.
3429     *       A 2-D tensor of shape [fw_num_units, aux_input_size].
3430     * * 41: The forward auxiliary input-to-forget weights.
3431     *       Optional. See the docs above for the usage modes explanation.
3432     *       A 2-D tensor of shape [fw_num_units, aux_input_size].
3433     * * 42: The forward auxiliary input-to-cell weights.
3434     *       Optional. See the docs above for the usage modes explanation.
3435     *       A 2-D tensor of shape [fw_num_units, aux_input_size].
3436     * * 43: The forward auxiliary input-to-output weights.
3437     *       Optional. See the docs above for the usage modes explanation.
3438     *       A 2-D tensor of shape [fw_num_units, aux_input_size].
3439     * * 44: The backward auxiliary input-to-input weights.
3440     *       Optional. See the docs above for the usage modes explanation.
3441     *       A 2-D tensor of shape [bw_num_units, aux_input_size].
3442     * * 45: The backward auxiliary input-to-forget weights.
3443     *       Optional. See the docs above for the usage modes explanation.
3444     *       A 2-D tensor of shape [bw_num_units, aux_input_size].
3445     * * 46: The backward auxiliary input-to-cell weights.
3446     *       Optional. See the docs above for the usage modes explanation.
3447     *       A 2-D tensor of shape [bw_num_units, aux_input_size].
3448     * * 47: The backward auxiliary input-to-output weights.
3449     *       Optional. See the docs above for the usage modes explanation.
3450     *       A 2-D tensor of shape [bw_num_units, aux_input_size].
3451%else
3452     * * 39: The auxiliary input. Optional.
3453     *       A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
3454     *       corresponds to the batching dimension, and “input_size” is the size
3455     *       of the input.
3456     * * 40: The forward auxiliary input-to-input weights. Optional.
3457     *       A 2-D tensor of shape [fw_num_units, input_size].
3458     * * 41: The forward auxiliary input-to-forget weights. Optional.
3459     *       A 2-D tensor of shape [fw_num_units, input_size].
3460     * * 42: The forward auxiliary input-to-cell weights. Optional.
3461     *       A 2-D tensor of shape [fw_num_units, input_size].
3462     * * 43: The forward auxiliary input-to-output weights. Optional.
3463     *       A 2-D tensor of shape [fw_num_units, input_size].
3464     * * 44: The backward auxiliary input-to-input weights. Optional.
3465     *       A 2-D tensor of shape [bw_num_units, input_size].
3466     * * 45: The backward auxiliary input-to-forget weights. Optional.
3467     *       A 2-D tensor of shape [bw_num_units, input_size].
3468     * * 46: The backward auxiliary input-to-cell weights. Optional.
3469     *       A 2-D tensor of shape [bw_num_units, input_size].
3470     * * 47: The backward auxiliary input-to-output weights. Optional.
3471     *       A 2-D tensor of shape [bw_num_units, input_size].
3472%/kind
3473     * * 48: The activation function.
3474     *       A value indicating the activation function:
3475     *       <ul>
3476     *       <li>0: None;
3477     *       <li>1: Relu;
3478     *       <li>3: Relu6;
3479     *       <li>4: Tanh;
3480     *       <li>6: Sigmoid.
3481     *       </ul>
3482     * * 49: The clipping threshold for the cell state, such
3483     *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
3484     *       then clipping is disabled.
3485     *       If all the input tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
3486     *       this scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32},
3487     *       otherwise if all the input tensors have the type
3488     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, this scalar must be
3489     *       of type {@link %{OperandTypeLinkPfx}FLOAT16}.
3490     * * 50: The clipping threshold for the output from the
3491     *       projection layer, such that values are bound within
3492     *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3493     *       If all the input tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
3494     *       this scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32},
3495     *       otherwise if all the input tensors have the type
3496     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, this scalar must be
3497     *       of type {@link %{OperandTypeLinkPfx}FLOAT16}.
3498     * * 51: merge_outputs
3499     *       An {@link %{OperandTypeLinkPfx}BOOL} scalar specifying if the outputs
3500     *       from forward and backward cells should be merged.
3501     * * 52: time_major
3502     *       An {@link %{OperandTypeLinkPfx}BOOL} scalar specifying the shape format
3503     *       of input and output tensors.
3504     * * 53: The forward input layer normalization weights. Optional.
3505     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
3506     *       to activation at input gate.
3507     * * 54: The forward forget layer normalization weights. Optional.
3508     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
3509     *       to activation at forget gate.
3510     * * 55: The forward cell layer normalization weights. Optional.
3511     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
3512     *       to activation at cell gate.
3513     * * 56: The forward output layer normalization weights. Optional.
3514     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
3515     *       to activation at output gate.
3516     * * 57: The backward input layer normalization weights. Optional.
3517     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
3518     *       to activation at input gate.
3519     * * 58: The backward forget layer normalization weights. Optional.
3520     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
3521     *       to activation at forget gate.
3522     * * 59: The backward cell layer normalization weights. Optional.
3523     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
3524     *       to activation at cell gate.
3525     * * 60: The backward output layer normalization weights. Optional.
3526     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
3527     *       to activation at output gate.
3528     *
3529     * Outputs:
3530     * * 0: The forward output.
3531     *      A 3-D tensor of shape:
3532     *        If time-major and not merge_outputs:
3533     *          [max_time, batch_size, fw_output_size]
3534     *        If time-major and merge_outputs:
3535     *          [max_time, batch_size, fw_output_size + bw_output_size]
3536     *        If batch-major and not merge_outputs:
3537     *          [batch_size, max_time, fw_output_size]
3538     *        If batch-major and merge_outputs:
3539     *          [batch_size, max_time, fw_output_size + bw_output_size]
3540     * * 1: The backward output.  Unused if merge_outputs is true.
3541     *      A 3-D tensor of shape:
3542     *        If time-major: [max_time, batch_size, bw_output_size]
3543     *        If batch-major: [batch_size, max_time, bw_output_size]
3544%kind aidl canonical ndk hal_1.3+
3545     * * 2: The forward activation state output.
3546     *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
3547     *      activation state from the last time step in the sequence. This
3548     *      output is optional and can be omitted. If this output is present
3549     *      then outputs 3-5 must be present as well.
3550     *      Available since %{NNAPILevel4}.
3551     * * 3: The forward cell state output.
3552     *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
3553     *      from the last time step in the sequence. This output is optional
3554     *      and can be omitted. If this output is present
3555     *      then outputs 2, 4, 5 must be present as well.
3556     *      Available since %{NNAPILevel4}.
3557     * * 4: The backward activation state output.
3558     *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
3559     *      activation state from the last time step in the sequence. This
3560     *      output is optional and can be omitted. If this output is present
3561     *      then outputs 2, 3, 5 must be present as well.
3562     *      Available since %{NNAPILevel4}.
3563     * * 5: The backward cell state output.
3564     *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
3565     *      from the last time step in the sequence. This output is optional
3566     *      and can be omitted. If this output is present
3567     *      then outputs 2-4 must be present as well.
3568     *      Available since %{NNAPILevel4}.
3569%/kind
3570%insert AVAIL3
3571%insert OutputState
3572     */
3573    %{DeclareOperation_1.2 BIDIRECTIONAL_SEQUENCE_LSTM 42},
3574
3575    /**
3576     * A recurrent neural network layer that applies a basic RNN cell to a
3577     * sequence of inputs in forward and backward directions.
3578     *
3579     * This Op unrolls the input along the sequence dimension, and implements
3580     * the following operation for each element in the sequence s =
3581     * 1...sequence_length:
3582     *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
3583     *          fw_state * fw_recurrent_weights’ + fw_bias)
3584     *
3585     * And for each element in sequence t = sequence_length : 1
3586     *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
3587     *          bw_state * bw_recurrent_weights’ + bw_bias)
3588     *
3589     * Where:
3590     * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
3591     * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
3592     *    current “state” which itself is the output from the previous time step
3593     *    computation;
3594     * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
3595     *    batch);
3596     * * “activation” is the function passed as the “fused_activation_function”
3597     *   argument (if not “NONE”).
3598     *
3599     * The op supports cross-linking via an auxiliary input. Regular cell feeds
3600     * one input into the two RNN cells in the following way:
3601     *
3602     *       INPUT  (INPUT_REVERSED)
3603     *         |         |
3604     *    ---------------------
3605     *    | FW_RNN     BW_RNN |
3606     *    ---------------------
3607     *         |         |
3608     *      FW_OUT     BW_OUT
3609     *
3610     * An op with cross-linking takes two inputs and feeds them into the RNN
3611     * cells in the following way:
3612     *
3613     *       AUX_INPUT   (AUX_INPUT_REVERSED)
3614     *           |             |
3615     *     INPUT | (INPUT_R'D.)|
3616     *       |   |       |     |
3617     *    -----------------------
3618     *    |  \  /        \    / |
3619     *    | FW_RNN       BW_RNN |
3620     *    -----------------------
3621     *         |           |
3622     *      FW_OUT      BW_OUT
3623     *
3624     * The cross-linking mode is enabled iff auxiliary input and auxiliary
3625     * weights are present. While stacking this op on top of itself, this
3626     * allows to connect both forward and backward outputs from previous cell
3627     * to the next cell's input.
3628     *
3629%kind aidl canonical ndk hal_1.3+
3630     * Since %{NNAPILevel4} parallel linking mode is supported. The mode is
3631     * enabled if auxiliary input is present but auxiliary weights are omitted.
3632     * In this case, the cell feeds inputs into the RNN in the following way:
3633     *
3634     *       INPUT (AUX_INPUT_REVERSED)
3635     *         |         |
3636     *    ---------------------
3637     *    | FW_RNN     BW_RNN |
3638     *    ---------------------
3639     *         |         |
3640     *      FW_OUT     BW_OUT
3641     *
3642     * While stacking this op on top of itself, this allows to connect both
3643     * forward and backward outputs from previous cell to the next cell's
3644     * corresponding inputs.
3645     *
3646%/kind
3647     * Supported tensor {@link %{OperandType}}:
3648     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3649     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3650     *
3651     * The input tensors must all be the same type.
3652     *
3653     * Inputs:
3654     * * 0: input.
3655     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
3656     *      it is set to true, then the input has a shape [maxTime, batchSize,
3657     *      inputSize], otherwise the input has a shape [batchSize, maxTime,
3658     *      inputSize].
3659     * * 1: fwWeights.
3660     *      A 2-D tensor of shape [fwNumUnits, inputSize].
3661     * * 2: fwRecurrentWeights.
3662     *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
3663     * * 3: fwBias.
3664     *      A 1-D tensor of shape [fwNumUnits].
3665     * * 4: fwHiddenState.
3666     *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
3667     *      state input for the first time step of the computation.
3668     * * 5: bwWeights.
3669     *      A 2-D tensor of shape [bwNumUnits, inputSize].
3670     * * 6: bwRecurrentWeights.
3671     *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
3672     * * 7: bwBias.
3673     *      A 1-D tensor of shape [bwNumUnits].
3674     * * 8: bwHiddenState
3675     *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
3676     *      state input for the first time step of the computation.
3677%kind aidl canonical ndk hal_1.3+
3678     * * 9: auxInput.
3679     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
3680     *      it is set to true, then the input has a shape [maxTime, batchSize,
3681     *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
3682     *      auxInputSize]. Can be omitted. See the docs above for the usage
3683     *      modes explanation.
3684     * * 10:fwAuxWeights.
3685     *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
3686     *      See the docs above for the usage modes explanation.
3687     * * 11:bwAuxWeights.
3688     *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
3689     *      See the docs above for the usage modes explanation.
3690%else
3691     * * 9: auxInput.
3692     *      A 3-D tensor. The shape is the same as of the input 0.
3693     * * 10:fwAuxWeights.
3694     *      A 2-D tensor of shape [fwNumUnits, inputSize].
3695     * * 11:bwAuxWeights.
3696     *      A 2-D tensor of shape [bwNumUnits, inputSize].
3697%/kind
3698     * * 12:fusedActivationFunction.
3699     *      A {@link %{FusedActivationFunc}} value indicating the activation function. If
3700     *      “NONEis specified then it results in a linear activation.
3701     * * 13:timeMajor
3702     *      An {@link %{OperandTypeLinkPfx}BOOL} scalar specifying the shape format
3703     *      of input and output tensors.
3704     * * 14:mergeOutputs
3705     *      An {@link %{OperandTypeLinkPfx}BOOL} scalar specifying if the outputs
3706     *      from forward and backward cells are separate (if set to false) or
3707     *      concatenated (if set to true).
3708     * Outputs:
3709     * * 0: fwOutput.
3710     *      A 3-D tensor. The first two dimensions of the shape are defined by
3711     *      the input 6 (timeMajor) and the third dimension is defined by the
3712     *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
3713     *      two dimensions are [maxTime, batchSize], otherwise they are set to
3714     *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
3715     *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
3716     *      to fwNumUnits.
3717     * * 1: bwOutput.
3718     *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
3719     *      this tensor is not produced. The shape is defined by the input 6
3720     *      (timeMajor). If it is set to true, then the shape is set to
3721     *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
3722     *      [batchSize, maxTime, bwNumUnits].
3723%kind aidl canonical ndk hal_1.3+
3724     * * 2: The forward hidden state output.
3725     *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
3726     *      state from the last time step in the sequence. This output is
3727     *      optional and can be omitted. If this output is present then output
3728     *      3 must be present as well.
3729     *      Available since %{NNAPILevel4}.
3730     * * 3: The backward hidden state output.
3731     *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
3732     *      state from the last time step in the sequence. This output is
3733     *      optional and can be omitted. If this output is present then output
3734     *      2 must be present as well.
3735     *      Available since %{NNAPILevel4}.
3736%/kind
3737%insert AVAIL3
3738%insert OutputState
3739     */
3740    %{DeclareOperation_1.2 BIDIRECTIONAL_SEQUENCE_RNN 43},
3741
3742    /**
3743     * Greedily selects a subset of bounding boxes in descending order of score.
3744     *
3745     * This op applies NMS algorithm to each class. In each loop of execution,
3746     * the box with maximum score gets selected and removed from the pending set.
3747     * The scores of the rest of boxes are lowered according to the
3748     * intersection-over-union (IOU) overlapping with the previously selected
3749     * boxes and a specified NMS kernel method. Any boxes with score less
3750     * than a threshold are removed from the pending set.
3751     *
3752     * Three NMS kernels are supported:
3753     * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
3754     * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
3755     * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
3756     *
3757     * Axis-aligned bounding boxes are represented by its upper-left corner
3758     * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3759     * bounding box should satisfy x1 <= x2 and y1 <= y2.
3760     *
3761     * Supported tensor {@link %{OperandType}}:
3762     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3763     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3764     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3765%kind aidl canonical ndk hal_1.3+
3766     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
3767%/kind
3768     *
3769     * Inputs:
3770     * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
3771     *      of each bounding box proposal. The boxes are grouped by batches in the
3772     *      first dimension. Zero num_rois is supported for this tensor.
3773     * * 1: A 2-D Tensor specifying the bounding boxes of shape
3774     *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
3775     *      The boxes are grouped by batches in the first dimension. The sequential
3776     *      order of the boxes corresponds with input0. For input0 of type
3777     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this tensor should be of
3778     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
3779     *      scale of 0.125.
3780%kind aidl canonical ndk hal_1.3+
3781     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
3782     *      this tensor should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM},
3783     *      with zeroPoint of -128 and scale of 0.125.
3784%/kind
3785     *      Zero num_rois is supported for this tensor.
3786     * * 2: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
3787     *      [num_rois], specifying the batch index of each box. Boxes with
3788     *      the same batch index are grouped together.
3789     * * 3: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, score_threshold. Boxes
3790     *      with scores lower than the threshold are filtered before sending
3791     *      to the NMS algorithm.
3792     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the maximum
3793     *      number of selected bounding boxes for each image. Set to a negative
3794     *      value for unlimited number of output bounding boxes.
3795     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the NMS
3796     *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
3797     * * 6: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the IoU
3798     *      threshold in hard and linear NMS kernel. This field is ignored if
3799     *      gaussian kernel is selected.
3800     * * 7: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the sigma in
3801     *      gaussian NMS kernel. This field is ignored if gaussian kernel is
3802     *      not selected.
3803     * * 8: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, nms_score_threshold.
3804     *      Boxes with scores lower than the threshold are dropped during the
3805     *      score updating phase in soft NMS.
3806     *
3807     * Outputs:
3808     * * 0: A 1-D Tensor of the same {@link %{OperandType}} as input0, with shape
3809     *      [num_output_rois], specifying the score of each output box. The boxes
3810     *      are grouped by batches, but the sequential order in each batch is not
3811     *      guaranteed. For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
3812%kind aidl canonical ndk hal_1.3+
3813     *      guaranteed. For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3814     *      or {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
3815%else
3816     *      guaranteed. For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3817%/kind
3818     *      the scale and zero point must be the same as input0.
3819     * * 1: A 2-D Tensor of the same {@link %{OperandType}} as input1, with shape
3820     *      [num_output_rois, 4], specifying the coordinates of each
3821     *      output bounding box with the same format as input1. The sequential
3822     *      order of the boxes corresponds with output0. For type of
3823     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the scale must be
3824     *      0.125 and the zero point must be 0.
3825     * * 2: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
3826     *      [num_output_rois], specifying the class of each output box. The
3827     *      sequential order of the boxes corresponds with output0.
3828     * * 3: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
3829     *      [num_output_rois], specifying the batch index of each box. Boxes
3830     *      with the same batch index are grouped together.
3831%insert AVAIL3
3832     */
3833    %{DeclareOperation_1.2 BOX_WITH_NMS_LIMIT 44},
3834
3835    /**
3836     * Casts a tensor to a type.
3837     *
3838     * This operation ignores the scale and zeroPoint of quanized tensors,
3839     * e.g. it treats a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} input
3840     * as a tensor of uint8 values.
3841     *
3842     * Supported tensor {@link %{OperandType}}:
3843     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3844     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3845     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
3846     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3847%kind aidl canonical ndk hal_1.3+
3848     * Since %{NNAPILevel4}, casting tensors of the following
3849     * {@link %{OperandType}} to the same {@link %{OperandType}} is supported:
3850     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
3851     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
3852     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}
3853     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
3854     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
3855     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
3856%/kind
3857     *
3858     * Supported tensor rank: from 1
3859     *
3860     * Inputs:
3861     * * 0: A tensor.
3862     *
3863     * Outputs:
3864     * * 0: A tensor with the same shape as input0.
3865%insert AVAIL3
3866     */
3867    %{DeclareOperation_1.2 CAST 45},
3868
3869    /**
3870     * Shuffle the channels of the input tensor.
3871     *
3872     * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
3873     * divide the channel dimension into num_groups groups, and reorganize the
3874     * channels by grouping channels with the same index in each group.
3875     *
3876     * Along the channel dimension, the output is calculated using this formula:
3877     *
3878     *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
3879     *
3880     * where group_size = num_channels / num_groups
3881     *
3882     * The number of channels must be divisible by num_groups.
3883     *
3884     * Supported tensor {@link %{OperandType}}:
3885     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3886     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3887     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
3888%kind aidl canonical ndk hal_1.3+
3889     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
3890%/kind
3891     *
3892     * Supported tensor rank: up to 4
3893     *
3894     * Inputs:
3895     * * 0: An n-D tensor, specifying the tensor to be shuffled.
3896     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
3897     *      groups.
3898     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dimension
3899     *      channel shuffle would be performed on. Negative index is used to
3900     *      specify axis from the end (e.g. -1 for the last axis). Must be in
3901     *      the range [-n, n).
3902     *
3903     * Outputs:
3904     * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0.
3905%kind aidl canonical ndk hal_1.3+
3906     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
3907     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3908     *      the scale and zeroPoint must be the same as input0.
3909%else
3910     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
3911     *      the scale and zeroPoint must be the same as input0.
3912%/kind
3913%insert AVAIL3
3914     */
3915    %{DeclareOperation_1.2 CHANNEL_SHUFFLE 46},
3916
3917    /**
3918     * Apply postprocessing steps to bounding box detections.
3919     *
3920     * Bounding box detections are generated by applying transformation on a set
3921     * of predefined anchors with the bounding box deltas from bounding box
3922     * regression. A final step of hard NMS is applied to limit the number of
3923     * returned boxes.
3924     *
3925     * Supported tensor {@link %{OperandType}}:
3926     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
3927     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
3928     *
3929     * Inputs:
3930     * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
3931     *      the score of each anchor with each class. Class 0 for each
3932     *      [batches, num_anchors, 0] is background and will be ignored.
3933     * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
3934     *      the first four values in length_box_encoding specifying the bounding
3935     *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
3936     *      where dy and dx is the linear-scale relative correction factor for the
3937     *      center position of the bounding box with respect to the width and height,
3938     *      dh and dw is the log-scale relative correction factor for the width and
3939     *      height. All the entries in length_box_encoding beyond the first four
3940     *      values are ignored in this operation.
3941     * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3942     *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
3943     *      ctr_x are the center position of the box, and h and w are the height
3944     *      and the width.
3945     * * 3: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the scaling
3946     *      factor for dy in bounding box deltas.
3947     * * 4: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the scaling
3948     *      factor for dx in bounding box deltas.
3949     * * 5: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the scaling
3950     *      factor for dh in bounding box deltas.
3951     * * 6: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the scaling
3952     *      factor for dw in bounding box deltas.
3953     * * 7: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to use regular
3954     *      multi-class NMS algorithm that do NMS separately for each class,
3955     *      set to false for a faster algorithm that only do one single NMS
3956     *      using the highest class score..
3957     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, max_num_detections, specifying
3958     *      the maximum number of boxes for the output. Boxes with the lowest
3959     *      scores are discarded to meet the limit.
3960     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, only used when input7 is
3961     *      set to false, specifying the maximum number of classes per detection.
3962     * * 10: An {@link %{OperandTypeLinkPfx}INT32} scalar, only used when input7 is
3963     *       set to true, specifying the maximum number of detections when
3964     *       applying NMS algorithm for each single class.
3965     * * 11: A scalar, score_threshold. Boxes with scores lower than the
3966     *       threshold are filtered before sending to the NMS algorithm. The
3967     *       scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is of
3968     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
3969     *       {@link %{OperandTypeLinkPfx}FLOAT32} if input0 is of
3970     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
3971     * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
3972     *       must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is of
3973     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
3974     *       {@link %{OperandTypeLinkPfx}FLOAT32} if input0 is of
3975     *       {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
3976     * * 13: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to include
3977     *       background class in the list of label map for the output, set
3978     *       to false to not include the background. When the background
3979     *       class is included, it has label 0 and the output classes start
3980     *       at 1 in the label map, otherwise, the output classes start at 0.
3981     *
3982     * Outputs:
3983     * * 0: A 2-D tensor of the same {@link %{OperandType}} as input0, with shape
3984     *      [batches, max_num_detections], specifying the score of each output
3985     *      detections.
3986     * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
3987     *      coordinates of each output bounding box, with format
3988     *      [y1, x1, y2, x2].
3989     * * 2: A 2-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
3990     *      [batches, max_num_detections], specifying the class label for each
3991     *      output detection.
3992     * * 3: An 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape [batches],
3993     *      specifying the number of valid output detections for each batch.
3994%insert AVAIL3
3995     */
3996    %{DeclareOperation_1.2 DETECTION_POSTPROCESSING 47},
3997
3998    /**
3999     * For input tensors x and y, computes x == y elementwise.
4000     *
4001     * Supported tensor {@link %{OperandType}}:
4002     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4003     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4004     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4005     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4006     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4007%kind aidl canonical ndk hal_1.3+
4008     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4009%/kind
4010     *
4011     * Supported tensor rank: from 1
4012     *
4013     * This operation supports broadcasting.
4014     *
4015     * Inputs:
4016     * * 0: A tensor.
4017     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4018     *      with input0.
4019     *
4020     * Outputs:
4021     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4022%insert AVAIL3
4023     */
4024    %{DeclareOperation_1.2 EQUAL 48},
4025
4026    /**
4027     * Computes exponential of x element-wise.
4028     *
4029     * Supported tensor {@link %{OperandType}}:
4030     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4031     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4032     *
4033     * Supported tensor rank: from 1.
4034     *
4035     * Inputs:
4036     * * 0: A tensor.
4037     *
4038     * Outputs:
4039     * * 0: The output tensor of same shape as input0.
4040%insert AVAIL3
4041     */
4042    %{DeclareOperation_1.2 EXP 49},
4043
4044    /**
4045     * Inserts a dimension of 1 into a tensor's shape.
4046     *
4047     * Given a tensor input, this operation inserts a dimension of 1 at the
4048     * given dimension index of input's shape. The dimension index starts at
4049     * zero; if you specify a negative dimension index, it is counted backward
4050     * from the end.
4051     *
4052     * Supported tensor {@link %{OperandType}}:
4053     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4054     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4055     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4056     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4057%kind aidl canonical ndk hal_1.3+
4058     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4059%/kind
4060     *
4061     * Supported tensor rank: from 1
4062     *
4063     * Inputs:
4064     * * 0: An n-D tensor.
4065     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the dimension
4066     *      index to expand. Must be in the range [-(n + 1), (n + 1)).
4067     *
4068     * Outputs:
4069     * * 0: An (n + 1)-D tensor with the same {@link %{OperandType}} and data as
4070     *      input0.
4071%kind aidl canonical ndk hal_1.3+
4072     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4073     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4074%else
4075     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4076%/kind
4077     *      the scale and zeroPoint must be the same as input0.
4078%insert AVAIL3
4079     */
4080    %{DeclareOperation_1.2 EXPAND_DIMS 50},
4081
4082    /**
4083     * Gathers values along an axis.
4084     *
4085     * Produces an output tensor with shape
4086     *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
4087     * where:
4088     *     # Vector indices (output is rank(input0)).
4089     *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
4090     *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
4091     *
4092     *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
4093     *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
4094     *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
4095     *
4096     * Supported tensor {@link %{OperandType}}:
4097     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4098     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4099     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4100     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4101%kind aidl canonical ndk hal_1.3+
4102     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4103%/kind
4104     *
4105     * Supported tensor rank: from 1
4106     *
4107     * Inputs:
4108     * * 0: An n-D tensor from which to gather values.
4109     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the axis.
4110     *      Negative index is used to specify axis from the end
4111     *      (e.g. -1 for the last axis). Must be in the range [-n, n).
4112     * * 2: A k-D tensor {@link %{OperandTypeLinkPfx}TENSOR_INT32} of indices.
4113     *      The values must be in the bounds of the corresponding dimensions
4114     *      of input0.
4115     *
4116     * Outputs:
4117     * * 0: An (n + k - 1)-D tensor with the same {@link %{OperandType}} as input0.
4118%kind aidl canonical ndk hal_1.3+
4119     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4120     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4121%else
4122     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4123%/kind
4124     *      the scale and zeroPoint must be the same as input0.
4125%insert AVAIL3
4126     */
4127    %{DeclareOperation_1.2 GATHER 51},
4128
4129    /**
4130     * Generate aixs-aligned bounding box proposals.
4131     *
4132     * Bounding box proposals are generated by applying transformation on a set
4133     * of predefined anchors with the bounding box deltas from bounding box
4134     * regression. A final step of hard NMS is applied to limit the number of
4135     * returned boxes.
4136     *
4137     * Axis-aligned bounding boxes are represented by its upper-left corner
4138     * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
4139     * bounding box should satisfy x1 <= x2 and y1 <= y2.
4140     *
4141     * Supported tensor {@link %{OperandType}}:
4142     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4143     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4144     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4145%kind aidl canonical ndk hal_1.3+
4146     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4147%/kind
4148     *
4149     * Inputs:
4150     * * 0: A 4-D Tensor specifying the score of each anchor at each
4151     *      location. With "NHWC" data layout, the tensor shape is
4152     *      [batches, height, width, num_anchors]. With "NCHW" data layout,
4153     *      the tensor shape is [batches, num_anchors, height, width].
4154     * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
4155     *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
4156     *      With "NCHW" data layout, the tensor shape is
4157     *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
4158     *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
4159     *      relative correction factor for the center position of the bounding box
4160     *      with respect to the width and height, dw and dh is the log-scale
4161     *      relative correction factor for the width and height. The last
4162     *      dimensions is the channel dimension.
4163     * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
4164     *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
4165%kind aidl canonical ndk hal_1.3+
4166     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
4167     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
4168%else
4169     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this tensor should be of
4170%/kind
4171     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}, with scale of 0.125.
4172     * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
4173     *      each image in the batch, with format [image_height, image_width].
4174%kind aidl canonical ndk hal_1.3+
4175     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
4176     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this
4177%else
4178     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this
4179%/kind
4180     *      tensor should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}, with
4181     *      scale of 0.125.
4182     * * 4: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
4183     *      from the height of original image to the height of feature map.
4184     * * 5: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
4185     *      from the width of original image to the width of feature map.
4186     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the maximum
4187     *      number of boxes before going into the hard NMS algorithm. Boxes
4188     *      with the lowest scores are discarded to meet the limit. Set to
4189     *      a non-positive value for unlimited number.
4190     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the maximum
4191     *      number of boxes returning from the hard NMS algorithm. Boxes
4192     *      with the lowest scores are discarded to meet the limit. Set to
4193     *      a non-positive value for unlimited number.
4194     * * 8: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the IoU
4195     *      threshold for hard NMS.
4196     * * 9: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, min_size. Boxes with
4197     *      height or width lower than the absolute threshold are filtered out.
4198     * * 10: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
4199     *       NCHW data layout for input0 and input1. Set to false for NHWC.
4200     *
4201     * Outputs:
4202     * * 0: A tensor of the same {@link %{OperandType}} as input0, of shape
4203     *      [num_output_rois], specifying the score of each output box.
4204     *      The boxes are grouped by batches, but the sequential order in
4205     *      each batch is not guaranteed. For type of
4206%kind aidl canonical ndk hal_1.3+
4207     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
4208     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
4209%else
4210     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the scale and zero
4211%/kind
4212     *      point must be the same as input0.
4213     * * 1: A tensor of the same {@link %{OperandType}} as input3, of shape
4214     *      [num_output_rois, 4], specifying the coordinates of each output
4215     *      bounding box for each class, with format [x1, y1, x2, y2].
4216     *      The sequential order of the boxes corresponds with output0.
4217     *      For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the
4218     *      scale must be 0.125 and the zero point must be 0.
4219     * * 2: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
4220     *      [num_output_rois], specifying the batch index of each box. Boxes
4221     *      with the same batch index are grouped together.
4222%insert AVAIL3
4223     */
4224    %{DeclareOperation_1.2 GENERATE_PROPOSALS 52},
4225
4226    /**
4227     * For input tensors x and y, computes x > y elementwise.
4228     *
4229     * Supported tensor {@link %{OperandType}}:
4230     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4231     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4232     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4233     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4234     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4235%kind aidl canonical ndk hal_1.3+
4236     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4237%/kind
4238     *
4239     * Supported tensor rank: from 1
4240     *
4241     * This operation supports broadcasting.
4242     *
4243     * Inputs:
4244     * * 0: A tensor.
4245     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4246     *      with input0.
4247     *
4248     * Outputs:
4249     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4250%insert AVAIL3
4251     */
4252    %{DeclareOperation_1.2 GREATER 53},
4253    /**
4254     * For input tensors x and y, computes x >= y elementwise.
4255     *
4256     * Supported tensor {@link %{OperandType}}:
4257     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4258     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4259     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4260     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4261     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4262%kind aidl canonical ndk hal_1.3+
4263     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4264%/kind
4265     *
4266     * Supported tensor rank: from 1
4267     *
4268     * This operation supports broadcasting.
4269     *
4270     * Inputs:
4271     * * 0: A tensor.
4272     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4273     *      with input0.
4274     *
4275     * Outputs:
4276     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4277%insert AVAIL3
4278     */
4279    %{DeclareOperation_1.2 GREATER_EQUAL 54},
4280
4281    /**
4282     * Performs a grouped 2-D convolution operation.
4283     *
4284     * Given an input tensor of shape [batches, height, width, depth_in] and a
4285     * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
4286     * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
4287     * applies a group of different filters to each input channel group, then
4288     * concatenates the results together.
4289     *
4290     * Specifically, the input channels are divided into num_groups groups, each with
4291     * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
4292     * filters are also divided into num_groups groups, i.e. depth_out is divisible
4293     * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
4294     * input channel group, and the result are concatenated together.
4295     *
4296     * The output dimensions are functions of the filter dimensions, stride, and
4297     * padding.
4298     *
4299     * The values in the output tensor are computed as:
4300     *
4301     *     output[b, i, j, g * channel_multiplier + q] =
4302     *         sum_{di, dj, dk} (
4303     *             input[b, strides[1] * i + di, strides[2] * j + dj,
4304     *                   g * depth_group + dk] *
4305     *             filter[g * channel_multiplier + q, di, dj, dk]
4306     *         ) + bias[channel]
4307     *
4308     * where channel_multiplier = depth_out / num_groups
4309     *
4310     * Supported tensor {@link %{OperandType}} configurations:
4311     * * 16 bit floating point:
4312     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias.
4313     *
4314     * * 32 bit floating point:
4315     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} for input, filter, output, and bias.
4316     *
4317     * * Quantized:
4318     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, filter, and output.
4319     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
4320     * * * input.scale * filter.scale).
4321%kind aidl canonical ndk hal_1.3+
4322     *
4323     * * Quantized signed (since %{NNAPILevel4}):
4324     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4325     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
4326     * * * input.scale * filter.scale).
4327%/kind
4328     *
4329     * * Quantized with symmetric per channel quantization for the filter:
4330     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, and output.
4331     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4332     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
4333     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4334%kind aidl canonical ndk hal_1.3+
4335     *
4336     * * Quantized signed with filter symmetric per channel quantization
4337     *   (since %{NNAPILevel4}):
4338     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4339     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4340     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
4341     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4342%/kind
4343     *
4344     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4345     * With the default data layout NHWC, the data is stored in the order of:
4346     * [batch, height, width, channels]. Alternatively, the data layout could
4347     * be NCHW, the data storage order of: [batch, channels, height, width].
4348     *
4349     * Both explicit padding and implicit padding are supported.
4350     *
4351     * Inputs (explicit padding):
4352     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4353     *      specifying the input, where depth_in = num_groups * depth_group.
4354     * * 1: A 4-D tensor, of shape
4355     *      [depth_out, filter_height, filter_width, depth_group], specifying
4356     *      the filter, where depth_out must be divisible by num_groups.  For
4357     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
4358     *      the channel dimension (channelDim at
4359     *      {@link %{Ann}SymmPerChannelQuantParams}) must be set to 0.
4360     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4361     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
4362     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same type.
4363%kind aidl canonical ndk hal_1.3+
4364     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4365     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
4366%else
4367     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
4368%/kind
4369     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
4370     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
4371     *      of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
4372     *      should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of
4373     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
4374     *      bias_scale[i] = input_scale * filter_scale[i].
4375     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
4376     *      the left, in thewidthdimension.
4377     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
4378     *      the right, in thewidthdimension.
4379     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
4380     *      the top, in theheightdimension.
4381     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
4382     *      the bottom, in theheightdimension.
4383     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
4384     *      walking through input in thewidthdimension.
4385     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
4386     *      walking through input in theheightdimension.
4387     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
4388     *      groups.
4389     * * 10: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
4390     *       {@link %{FusedActivationFunc}} values. Specifies the activation to
4391     *       invoke on the result.
4392     * * 11: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
4393     *       NCHW data layout for input0 and output0. Set to false for NHWC.
4394     *
4395     * Inputs (implicit padding):
4396     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4397     *      specifying the input, where depth_in = num_groups * depth_group.
4398     * * 1: A 4-D tensor, of shape
4399     *      [depth_out, filter_height, filter_width, depth_group], specifying
4400     *      the filter, where depth_out must be divisible by num_groups.  For
4401     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
4402     *      the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim)
4403     *      must be set to 0.
4404     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4405     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
4406     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same
4407     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same type.
4408%kind aidl canonical ndk hal_1.3+
4409     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4410     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
4411%else
4412     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
4413%/kind
4414     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint
4415     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
4416     *      of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
4417     *      should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of
4418     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
4419     *      bias_scale[i] = input_scale * filter_scale[i].
4420     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
4421     *      padding scheme, has to be one of the
4422%insert PaddingCodeValues
4423     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
4424     *      walking through input in thewidthdimension.
4425     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
4426     *      walking through input in theheightdimension.
4427     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
4428     *      groups.
4429     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
4430     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
4431     *      invoke on the result.
4432     * * 8: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
4433     *      NCHW data layout for input0 and output0. Set to false for NHWC.
4434     *
4435     * Outputs:
4436     * * 0: The output 4-D tensor, of shape
4437     *      [batches, out_height, out_width, depth_out].
4438%kind aidl canonical ndk hal_1.3+
4439     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4440     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4441     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4442%else
4443     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4444     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4445%/kind
4446%insert AVAIL3
4447     */
4448    %{DeclareOperation_1.2 GROUPED_CONV_2D 55},
4449
4450    /**
4451     * Localize the maximum keypoints from heatmaps.
4452     *
4453     * This operation approximates the accurate maximum keypoint scores and
4454     * indices after bicubic upscaling by using Taylor expansion up to the
4455     * quadratic term.
4456     *
4457     * The bounding box is represented by its upper-left corner coordinate
4458     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4459     * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
4460     *
4461     * Supported tensor {@link %{OperandType}}:
4462     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4463     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4464     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4465%kind aidl canonical ndk hal_1.3+
4466     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4467%/kind
4468     *
4469     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4470     * With the default data layout NHWC, the data is stored in the order of:
4471     * [batch, height, width, channels]. Alternatively, the data layout could
4472     * be NCHW, the data storage order of: [batch, channels, height, width].
4473     *
4474     * Inputs:
4475     * * 0: A 4-D Tensor of shape
4476     *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
4477     *      specifying the heatmaps, the height and width of heatmaps should
4478     *      be the same, and must be greater than or equal to 2.
4479     * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
4480     *      each with format [x1, y1, x2, y2]. For input0 of type
4481     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this tensor should
4482     *      be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with zeroPoint
4483     *      of 0 and scale of 0.125.
4484%kind aidl canonical ndk hal_1.3+
4485     *      For input0 of type
4486     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
4487     *      should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with
4488     *      zeroPoint of -128 and scale of 0.125.
4489%/kind
4490     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
4491     *      NCHW data layout for input0. Set to false for NHWC.
4492     *
4493     * Outputs:
4494     * * 0: A tensor of the same {@link %{OperandType}} as input0, with shape
4495     *      [num_boxes, num_keypoints], specifying score of the keypoints.
4496%kind aidl canonical ndk hal_1.3+
4497     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or
4498     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4499     *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
4500%else
4501     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4502     *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
4503%/kind
4504     * * 1: A tensor of the same {@link %{OperandType}} as input1, with shape
4505     *      [num_boxes, num_keypoints, 2], specifying the location of
4506     *      the keypoints, the second dimension is organized as
4507     *      [keypoint_x, keypoint_y].
4508     *      For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the
4509     *      scale must be 0.125 and the zero point must be 0.
4510%insert AVAIL3
4511     */
4512    %{DeclareOperation_1.2 HEATMAP_MAX_KEYPOINT 56},
4513
4514    /**
4515     * Applies instance normalization to the input tensor.
4516     *
4517     * The values in the output tensor are computed as:
4518     *
4519     *     output[b, h, w, c] =
4520     *         (input[b, h, w, c] - mean[b, c]) * gamma /
4521     *         sqrt(var[b, c] + epsilon) + beta
4522     *
4523     * Where the mean and variance are computed across the spatial dimensions:
4524     *
4525     *     mean[b, c] =
4526     *         sum_{h, w}(input[b, h, w, c]) / sum(1)
4527     *
4528     *     var[b, c] =
4529     *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
4530     *
4531     * Supported tensor {@link %{OperandType}}:
4532     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4533     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4534     *
4535     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4536     * With the default data layout NHWC, the data is stored in the order of:
4537     * [batch, height, width, channels]. Alternatively, the data layout could
4538     * be NCHW, the data storage order of: [batch, channels, height, width].
4539     *
4540     * Inputs:
4541     * * 0: An n-D tensor, specifying the tensor to be normalized.
4542     * * 1: A scalar, specifying gamma, the scale applied to the normalized
4543     *      tensor. The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if
4544     *      input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
4545     *      {@link %{OperandTypeLinkPfx}FLOAT32} if input0 is of
4546     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
4547     * * 2: A scalar, specifying beta, the offset applied to the normalized
4548     *      tensor. The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if
4549     *      input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
4550     *      {@link %{OperandTypeLinkPfx}FLOAT32} if input0 is of
4551     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
4552     * * 3: A scalar, specifying epsilon, the small value added to variance to
4553     *      avoid dividing by zero. The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if
4554     *      input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
4555     *      {@link %{OperandTypeLinkPfx}FLOAT32} if input0 is of
4556     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}.
4557     * * 4: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
4558     *      NCHW data layout for input0 and output0. Set to false for NHWC.
4559     *
4560     * Outputs:
4561     * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0.
4562%insert AVAIL3
4563     */
4564    %{DeclareOperation_1.2 INSTANCE_NORMALIZATION 57},
4565
4566    /**
4567     * For input tensors x and y, computes x < y elementwise.
4568     *
4569     * Supported tensor {@link %{OperandType}}:
4570     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4571     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4572     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4573     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4574     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4575%kind aidl canonical ndk hal_1.3+
4576     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4577%/kind
4578     *
4579     * Supported tensor rank: from 1
4580     *
4581     * This operation supports broadcasting.
4582     *
4583     * Inputs:
4584     * * 0: A tensor.
4585     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4586     *      with input0.
4587     *
4588     * Outputs:
4589     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4590%insert AVAIL3
4591     */
4592    %{DeclareOperation_1.2 LESS 58},
4593
4594    /**
4595     * For input tensors x and y, computes x <= y elementwise.
4596     *
4597     * Supported tensor {@link %{OperandType}}:
4598     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4599     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4600     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4601     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4602     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4603%kind aidl canonical ndk hal_1.3+
4604     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4605%/kind
4606     *
4607     * Supported tensor rank: from 1
4608     *
4609     * This operation supports broadcasting.
4610     *
4611     * Inputs:
4612     * * 0: A tensor.
4613     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4614     *      with input0.
4615     *
4616     * Outputs:
4617     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4618%insert AVAIL3
4619     */
4620    %{DeclareOperation_1.2 LESS_EQUAL 59},
4621
4622    /**
4623     * Computes natural logarithm of x element-wise.
4624     *
4625     * Supported tensor {@link %{OperandType}}:
4626     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4627     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4628     *
4629     * Supported tensor rank: from 1.
4630     *
4631     * Inputs:
4632     * * 0: A tensor.
4633     *
4634     * Outputs:
4635     * * 0: The output tensor of same shape as input0.
4636%insert AVAIL3
4637     */
4638    %{DeclareOperation_1.2 LOG 60},
4639
4640    /**
4641     * Returns the truth value of x AND y element-wise.
4642     *
4643     * Supported tensor {@link %{OperandType}}:
4644     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4645     *
4646     * Supported tensor rank: from 1
4647     *
4648     * This operation supports broadcasting.
4649     *
4650     * Inputs:
4651     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4652     * * 1: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and dimensions
4653     *      compatible with input0.
4654     *
4655     * Outputs:
4656     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4657%insert AVAIL3
4658     */
4659    %{DeclareOperation_1.2 LOGICAL_AND 61},
4660
4661    /**
4662     * Computes the truth value of NOT x element-wise.
4663     *
4664     * Supported tensor {@link %{OperandType}}:
4665     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4666     *
4667     * Supported tensor rank: from 1.
4668     *
4669     * Inputs:
4670     * * 0: A tensor.
4671     *
4672     * Outputs:
4673     * * 0: The output tensor of same shape as input0.
4674%insert AVAIL3
4675     */
4676    %{DeclareOperation_1.2 LOGICAL_NOT 62},
4677
4678    /**
4679     * Returns the truth value of x OR y element-wise.
4680     *
4681     * Supported tensor {@link %{OperandType}}:
4682     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4683     *
4684     * Supported tensor rank: from 1
4685     *
4686     * This operation supports broadcasting.
4687     *
4688     * Inputs:
4689     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4690     * * 1: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and dimensions
4691     *      compatible with input0.
4692     *
4693     * Outputs:
4694     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4695%insert AVAIL3
4696     */
4697    %{DeclareOperation_1.2 LOGICAL_OR 63},
4698
4699    /**
4700     * Computes the log softmax activations given logits.
4701     *
4702     * The output is calculated using this formula:
4703     *
4704     *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
4705     *
4706     * Supported tensor {@link %{OperandType}}:
4707     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4708     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4709     *
4710     * Supported tensor rank: from 1.
4711     *
4712     * Inputs:
4713     * * 0: A tensor specifying the input logits.
4714     * * 1: A scalar, specifying the positive scaling factor for the exponent,
4715     *      beta.
4716     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the beta
4717     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
4718     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the beta
4719     *      value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
4720     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the axis to
4721     *      reduce across. Negative index is used to specify axis from the
4722     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
4723     *
4724     * Outputs:
4725     * * 0: The output tensor of the same {@link %{OperandType}} and shape as
4726     *      input0.
4727%insert AVAIL3
4728     */
4729    %{DeclareOperation_1.2 LOG_SOFTMAX 64},
4730
4731    /**
4732     * Returns the element-wise maximum of two tensors.
4733     *
4734     * Supported tensor {@link %{OperandType}}:
4735     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4736     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4737     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4738     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4739%kind aidl canonical ndk hal_1.3+
4740     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4741%/kind
4742     *
4743     * Supported tensor rank: from 1.
4744     *
4745     * Inputs:
4746     * * 0: A tensor.
4747     * * 1: A tensor of the same {@link %{OperandType}} and compatible dimensions
4748     *      with input0.
4749     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4750     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4751     *
4752     * Outputs:
4753     * * 0: A tensor of the same {@link %{OperandType}} as input0.
4754%kind aidl canonical ndk hal_1.3+
4755     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4756     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4757     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4758%else
4759     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4760     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4761%/kind
4762%insert AVAIL3
4763     */
4764    %{DeclareOperation_1.2 MAXIMUM 65},
4765
4766    /**
4767     * Returns the element-wise minimum of two tensors.
4768     *
4769     * Supported tensor {@link %{OperandType}}:
4770     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4771     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4772     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4773     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4774%kind aidl canonical ndk hal_1.3+
4775     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4776%/kind
4777     *
4778     * Supported tensor rank: from 1.
4779     *
4780     * Inputs:
4781     * * 0: A tensor.
4782     * * 1: A tensor of the same {@link %{OperandType}} and compatible dimensions
4783     *      with input0.
4784     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4785     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4786     *
4787     * Outputs:
4788     * * 0: A tensor of the same {@link %{OperandType}} as input0.
4789%kind aidl canonical ndk hal_1.3+
4790     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4791     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4792     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4793%else
4794     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4795     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4796%/kind
4797%insert AVAIL3
4798     */
4799    %{DeclareOperation_1.2 MINIMUM 66},
4800
4801    /**
4802     * Computes numerical negative value element-wise.
4803     *
4804     * Supported tensor {@link %{OperandType}}:
4805     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4806     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4807     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4808     *
4809     * Supported tensor rank: from 1.
4810     *
4811     * Inputs:
4812     * * 0: A tensor.
4813     *
4814     * Outputs:
4815     * * 0: The output tensor of same shape as input0.
4816%insert AVAIL3
4817     */
4818    %{DeclareOperation_1.2 NEG 67},
4819
4820    /**
4821     * For input tensors x and y, computes x != y elementwise.
4822     *
4823     * Supported tensor {@link %{OperandType}}:
4824     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
4825     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4826     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4827     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
4828     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4829%kind aidl canonical ndk hal_1.3+
4830     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4831%/kind
4832     *
4833     * Supported tensor rank: from 1
4834     *
4835     * This operation supports broadcasting.
4836     *
4837     * Inputs:
4838     * * 0: A tensor.
4839     * * 1: A tensor of the same {@link %{OperandType}} and dimensions compatible
4840     *      with input0.
4841     *
4842     * Outputs:
4843     * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}.
4844%insert AVAIL3
4845     */
4846    %{DeclareOperation_1.2 NOT_EQUAL 68},
4847
4848    /**
4849     * Pads a tensor with the given constant value according to the specified
4850     * paddings.
4851     *
4852     * Supported tensor {@link %{OperandType}}:
4853     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4854     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4855     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4856%kind aidl canonical ndk hal_1.3+
4857     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4858%/kind
4859     *
4860     * Supported tensor rank: up to 4
4861     *
4862     * Inputs:
4863     * * 0: An n-D tensor, specifying the tensor to be padded.
4864     * * 1: A 2-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the paddings
4865     *      for each spatial dimension of the input tensor. The shape of the
4866     *      tensor must be {rank(input0), 2}.
4867     *      padding[i, 0] specifies the number of elements to be padded in the
4868     *      front of dimension i.
4869     *      padding[i, 1] specifies the number of elements to be padded after
4870     *      the end of dimension i.
4871     * * 2: A scalar specifying the value to use for padding input0.
4872     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the
4873     *      pad value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
4874     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the
4875     *      pad value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
4876%kind aidl canonical ndk hal_1.3+
4877     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4878     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
4879%else
4880     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
4881%/kind
4882     *      the pad value must be of {@link %{OperandTypeLinkPfx}INT32}. The
4883     *      scale and zeroPoint are assumed to be the same as in input0.
4884     *
4885     * Outputs:
4886     * * 0: A tensor of the same {@link %{OperandType}} as input0. The
4887     *      output tensor has the same rank as input0, and each
4888     *      dimension of the output tensor has the same size as the
4889     *      corresponding dimension of the input tensor plus the size
4890     *      of the padding:
4891     *          output0.dimension[i] =
4892     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
4893%kind aidl canonical ndk hal_1.3+
4894     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4895     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4896     *      the scale and zeroPoint must be the same as input0.
4897%else
4898     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4899     *      the scale and zeroPoint must be the same as input0.
4900%/kind
4901%insert AVAIL3
4902     */
4903    %{DeclareOperation_1.2 PAD_V2 69},
4904
4905    /**
4906     * Computes the power of one value to another.
4907     *
4908     * Given a tensor base and a tensor exponent, this operation computes
4909     * base^exponent elementwise.
4910     *
4911     * This operations supports broadcasting. The size of the output is the
4912     * maximum size along each dimension of the input operands. It starts with
4913     * the trailing dimensions, and works its way forward.
4914     *
4915     * For example:
4916     *     base.dimension     =    {4, 1, 2}
4917     *     exponent.dimension = {5, 4, 3, 1}
4918     *     output.dimension   = {5, 4, 3, 2}
4919     *
4920     * Supported tensor {@link %{OperandType}}:
4921     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4922     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4923     *
4924     * Supported tensor rank: from 1
4925     *
4926     * Inputs:
4927     * * 0: A tensor specifying the base.
4928     * * 1: A tensor specifying the exponent.
4929     *
4930     * Outputs:
4931     * * 0: An output tensor.
4932%insert AVAIL3
4933     */
4934    %{DeclareOperation_1.2 POW 70},
4935
4936    /**
4937     * Parametric Rectified Linear Unit.
4938     *
4939     * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
4940     * is a learned array with the same {@link %{OperandType}} and compatible
4941     * dimensions as input x.
4942     *
4943     * Two dimensions are compatible when:
4944     *     1. they are equal, or
4945     *     2. one of them is 1
4946     *
4947     * The size of the output is the maximum size along each dimension of the
4948     * input operands. It starts with the trailing dimensions, and works its way
4949     * forward.
4950     *
4951     * Example:
4952     *     input.dimension  =    {4, 1, 2}
4953     *     alpha.dimension  = {5, 4, 3, 1}
4954     *     output.dimension = {5, 4, 3, 2}
4955     *
4956     * Supported tensor {@link %{OperandType}}:
4957     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
4958     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
4959     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
4960%kind aidl canonical ndk hal_1.3+
4961     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
4962%/kind
4963     *
4964     * Supported tensor rank: from 1
4965     *
4966     * Inputs:
4967     * * 0: A tensor, specifying the input.
4968     * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions
4969     *      as input0, specifying the alpha.
4970     *
4971     * Outputs:
4972     * * 0: A tensor of the same {@link %{OperandType}} as input0.
4973%kind aidl canonical ndk hal_1.3+
4974     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
4975     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4976     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4977%else
4978     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
4979     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4980%/kind
4981%insert AVAIL3
4982     */
4983    %{DeclareOperation_1.2 PRELU 71},
4984
4985    /**
4986     * Quantizes the input tensor.
4987     *
4988     * The formula for {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} output tensor is:
4989     *
4990     *     output = max(0, min(255, round(input / scale) + zeroPoint)
4991     *
4992%kind aidl canonical ndk hal_1.3+
4993     * The formula for {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} output
4994     * tensor is:
4995     *
4996     *     output = max(-128, min(127, round(input / scale) + zeroPoint)
4997     *
4998%/kind
4999     * Supported input tensor {@link %{OperandType}}:
5000     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5001     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5002     *
5003     * Supported output tensor {@link %{OperandType}}:
5004     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5005%kind aidl canonical ndk hal_1.3+
5006     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5007%/kind
5008     *
5009     * Supported tensor rank: from 1
5010     *
5011     * Inputs:
5012     * * 0: A tensor, may be zero-sized.
5013     *
5014     * Outputs:
5015     * * 0: The output tensor of same shape as input0, but with
5016%kind aidl canonical ndk hal_1.3+
5017     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or.
5018     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}.
5019%else
5020     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}.
5021%/kind
5022%insert AVAIL3
5023     */
5024    %{DeclareOperation_1.2 QUANTIZE 72},
5025
5026    /**
5027     * A version of quantized LSTM, using 16 bit quantization for internal
5028     * state.
5029     *
5030     * There is no projection layer, so cell state size is equal to the output
5031     * size.
5032     *
5033     * Inputs:
5034     * * 0: A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5035     *      and shape [numBatches, inputSize] specifying the input to the LSTM
5036     *      cell. Tensor is quantized with a fixed quantization range of
5037     *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
5038     * * 1: The input-to-input weights.
5039     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5040     *      and shape [outputSize, inputSize] specifying input-to-input part of
5041     *      weights for fully-connected layer inside the LSTM cell.
5042     *      Quantization zero point and scale must be the same across all the
5043     *      weights.
5044     * * 2: The input-to-forget weights.
5045     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5046     *      and shape [outputSize, inputSize] specifying input-to-forget part of
5047     *      weights for fully-connected layer inside the LSTM cell.
5048     *      Quantization zero point and scale must be the same across all the
5049     *      weights.
5050     * * 3: The input-to-cell weights.
5051     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5052     *      and shape [outputSize, inputSize] specifying input-to-cell part of
5053     *      weights for fully-connected layer inside the LSTM cell.
5054     *      Quantization zero point and scale must be the same across all the
5055     *      weights.
5056     * * 4: The input-to-output weights.
5057     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5058     *      and shape [outputSize, inputSize] specifying input-to-output part of
5059     *      weights for fully-connected layer inside the LSTM cell.
5060     *      Quantization zero point and scale must be the same across all the
5061     *      weights.
5062     * * 5: The recurrent-to-input weights.
5063     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5064     *      and shape [outputSize, outputSize] specifying recurrent-to-input part
5065     *      of weights for fully-connected layer inside the LSTM cell.
5066     *      Quantization zero point and scale must be the same across all the
5067     *      weights.
5068     * * 6: The recurrent-to-forget weights.
5069     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5070     *      and shape [outputSize, outputSize] specifying recurrent-to-forget
5071     *      part of weights for fully-connected layer inside the LSTM cell.
5072     *      Quantization zero point and scale must be the same across all the
5073     *      weights.
5074     * * 7: The recurrent-to-cell weights.
5075     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5076     *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
5077     *      of weights for fully-connected layer inside the LSTM cell.
5078     *      Quantization zero point and scale must be the same across all the
5079     *      weights.
5080     * * 8: The recurrent-to-output weights.
5081     *      A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5082     *      and shape [outputSize, outputSize] specifying recurrent-to-output
5083     *      part of weights for fully-connected layer inside the LSTM cell.
5084     *      Quantization zero point and scale must be the same across all the
5085     *      weights.
5086     * * 9: The input gate bias.
5087     *      A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} and shape
5088     *      [outputSize] specifying the bias for the fully-connected layer
5089     *      inside the LSTM cell. Bias is quantized with scale being a product
5090     *      of input and weights scales and zeroPoint equal to 0.
5091     * * 10:The forget gate bias.
5092     *      A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} and shape
5093     *      [outputSize] specifying the bias for the fully-connected layer
5094     *      inside the LSTM cell. Bias is quantized with scale being a product
5095     *      of input and weights scales and zeroPoint equal to 0.
5096     * * 11:The cell bias.
5097     *      A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} and shape
5098     *      [outputSize] specifying the bias for the fully-connected layer
5099     *      inside the LSTM cell. Bias is quantized with scale being a product
5100     *      of input and weights scales and zeroPoint equal to 0.
5101     * * 12:The output gate bias.
5102     *      A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} and shape
5103     *      [outputSize] specifying the bias for the fully-connected layer
5104     *      inside the LSTM cell. Bias is quantized with scale being a product
5105     *      of input and weights scales and zeroPoint equal to 0.
5106     * * 13: A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
5107     *       and shape [numBatches, outputSize] specifying the cell state from the
5108     *       previous time step of the LSTM cell. It is quantized using a
5109     *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
5110     *       32768, zeroPoint = 0).
5111     * * 14: A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5112     *       and shape [numBathes, outputSize] specifying the output of the LSTM
5113     *       cell from previous time-step. Tensor is quantized with a fixed
5114     *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
5115     *       128).
5116     *
5117     *
5118     * Outputs:
5119     * * 0: A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
5120     *      and shape [numBatches, outputSize] which contains a cell state from
5121     *      the current time step. Tensor is quantized using a quantization
5122     *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
5123     *      0).
5124     * * 1: A 2-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5125     *      and shape [numBathes, outputSize] which contains the output value.
5126     *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
5127     *      (scale = 1/128, zeroPoint = 128).
5128     */
5129    %{DeclareOperation_1.2 QUANTIZED_16BIT_LSTM 73},
5130
5131    /**
5132     * Draws samples from a multinomial distribution.
5133     *
5134     * Supported tensor {@link %{OperandType}}:
5135     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5136     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5137     *
5138     * Inputs:
5139     * * 0: A 2-D tensor with shape [batches, classes], specifying the
5140     *      unnormalized log-probabilities for all classes.
5141     * * 1: A scalar {@link %{OperandTypeLinkPfx}INT32}, specifying the number of
5142     *      independent samples to draw for each row slice.
5143     * * 2: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor with shape [2],
5144     *      specifying seeds used to initialize the random distribution. If both
5145     *      provided seeds are 0, both will be randomly generated.
5146     * Outputs:
5147     * * 0: A 2-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor with shape
5148     *      [batches, samples], containing the drawn samples.
5149%insert AVAIL3
5150     */
5151    %{DeclareOperation_1.2 RANDOM_MULTINOMIAL 74},
5152
5153    /**
5154     * Reduces a tensor by computing the "logical and" of elements along given
5155     * dimensions.
5156     *
5157     * If keep_dims is true, the reduced dimensions are
5158     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5159     * 1 for each entry in dimensions.
5160     *
5161     * Supported tensor {@link %{OperandType}}:
5162     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
5163     *
5164     * Supported tensor rank: up to 4
5165     *
5166     * Inputs:
5167     * * 0: An n-D tensor.
5168     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5169     *      to reduce. Dimension values must be in the range [-n, n).
5170     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5171     *      retains reduced dimensions with length 1.
5172     *
5173     * Outputs:
5174     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5175     *      If all dimensions are reduced and keep_dims is false, the output
5176     *      shape is [1].
5177%insert AVAIL3
5178     */
5179    %{DeclareOperation_1.2 REDUCE_ALL 75},
5180
5181    /**
5182     * Reduces a tensor by computing the "logical or" of elements along given
5183     * dimensions.
5184     *
5185     * If keep_dims is true, the reduced dimensions are
5186     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5187     * 1 for each entry in dimensions.
5188     *
5189     * Supported tensor {@link %{OperandType}}:
5190     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
5191     *
5192     * Supported tensor rank: up to 4
5193     *
5194     * Inputs:
5195     * * 0: An n-D tensor.
5196     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5197     *      to reduce. Dimension values must be in the range [-n, n).
5198     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5199     *      retains reduced dimensions with length 1.
5200     *
5201     * Outputs:
5202     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5203     *      If all dimensions are reduced and keep_dims is false, the output
5204     *      shape is [1].
5205%insert AVAIL3
5206     */
5207    %{DeclareOperation_1.2 REDUCE_ANY 76},
5208
5209    /**
5210     * Reduces a tensor by computing the maximum of elements along given
5211     * dimensions.
5212     *
5213     * If keep_dims is true, the reduced dimensions are
5214     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5215     * 1 for each entry in dimensions.
5216     *
5217     * Supported tensor {@link %{OperandType}}:
5218     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5219     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5220     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5221%kind aidl canonical ndk hal_1.3+
5222     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5223%/kind
5224     *
5225     * Supported tensor rank: up to 4
5226     *
5227     * Inputs:
5228     * * 0: An n-D tensor.
5229     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5230     *      to reduce. Dimension values must be in the range [-n, n).
5231     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5232     *      retains reduced dimensions with length 1.
5233     *
5234     * Outputs:
5235     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5236     *      If all dimensions are reduced and keep_dims is false, the output
5237     *      shape is [1].
5238%kind aidl canonical ndk hal_1.3+
5239     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5240     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5241     *      the scale and zeroPoint must be the same as input0.
5242%else
5243     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5244     *      the scale and zeroPoint must be the same as input0.
5245%/kind
5246%insert AVAIL3
5247     */
5248    %{DeclareOperation_1.2 REDUCE_MAX 77},
5249
5250    /**
5251     * Reduces a tensor by computing the minimum of elements along given
5252     * dimensions.
5253     *
5254     * If keep_dims is true, the reduced dimensions are
5255     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5256     * 1 for each entry in dimensions.
5257     *
5258     * Supported tensor {@link %{OperandType}}:
5259     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5260     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5261     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5262%kind aidl canonical ndk hal_1.3+
5263     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5264%/kind
5265     *
5266     * Supported tensor rank: up to 4
5267     *
5268     * Inputs:
5269     * * 0: An n-D tensor.
5270     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5271     *      to reduce. Dimension values must be in the range [-n, n).
5272     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5273     *      retains reduced dimensions with length 1.
5274     *
5275     * Outputs:
5276     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5277     *      If all dimensions are reduced and keep_dims is false, the output
5278     *      shape is [1].
5279%kind aidl canonical ndk hal_1.3+
5280     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5281     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5282     *      the scale and zeroPoint must be the same as input0.
5283%else
5284     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5285     *      the scale and zeroPoint must be the same as input0.
5286%/kind
5287%insert AVAIL3
5288     */
5289    %{DeclareOperation_1.2 REDUCE_MIN 78},
5290
5291    /**
5292     * Reduces a tensor by multiplying elements along given dimensions.
5293     *
5294     * If keep_dims is true, the reduced dimensions are
5295     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5296     * 1 for each entry in dimensions.
5297     *
5298     * Supported tensor {@link %{OperandType}}:
5299     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5300     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5301     *
5302     * Supported tensor rank: up to 4
5303     *
5304     * Inputs:
5305     * * 0: An n-D tensor.
5306     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5307     *      to reduce. Dimension values must be in the range [-n, n).
5308     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5309     *      retains reduced dimensions with length 1.
5310     *
5311     * Outputs:
5312     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5313     *      If all dimensions are reduced and keep_dims is false, the output
5314     *      shape is [1].
5315%insert AVAIL3
5316     */
5317    %{DeclareOperation_1.2 REDUCE_PROD 79},
5318
5319    /**
5320     * Reduces a tensor by summing elements along given dimensions.
5321     *
5322     * If keep_dims is true, the reduced dimensions are
5323     * retained with length 1. Otherwise, the rank of the tensor is reduced by
5324     * 1 for each entry in dimensions.
5325     *
5326     * Supported tensor {@link %{OperandType}}:
5327     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5328     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5329     *
5330     * Supported tensor rank: up to 4
5331     *
5332     * Inputs:
5333     * * 0: An n-D tensor.
5334     * * 1: A 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}. The dimensions
5335     *      to reduce. Dimension values must be in the range [-n, n).
5336     * * 2: An {@link %{OperandTypeLinkPfx}BOOL} scalar, keep_dims. If true,
5337     *      retains reduced dimensions with length 1.
5338     *
5339     * Outputs:
5340     * * 0: A tensor of the same {@link %{OperandType}} as input0.
5341     *      If all dimensions are reduced and keep_dims is false, the output
5342     *      shape is [1].
5343%insert AVAIL3
5344     */
5345    %{DeclareOperation_1.2 REDUCE_SUM 80},
5346
5347    /**
5348     * Select and scale the feature map of each region of interest to a unified
5349     * output size by average pooling sampling points from bilinear interpolation.
5350     *
5351     * The region of interest is represented by its upper-left corner coordinate
5352     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
5353     * A spatial scaling factor is applied to map into feature map coordinate.
5354     * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
5355     *
5356     * No rounding is applied in this operation. The sampling points are unified
5357     * distributed in the pooling bin and their values are calculated by bilinear
5358     * interpolation.
5359     *
5360     * Supported tensor {@link %{OperandType}}:
5361     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5362     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5363     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5364%kind aidl canonical ndk hal_1.3+
5365     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5366%/kind
5367     *
5368     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5369     * With the default data layout NHWC, the data is stored in the order of:
5370     * [batch, height, width, channels]. Alternatively, the data layout could
5371     * be NCHW, the data storage order of: [batch, channels, height, width].
5372     *
5373     * Inputs:
5374     * * 0: A 4-D tensor, specifying the feature map.
5375     * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
5376     *      the regions of interest, each line with format [x1, y1, x2, y2].
5377     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
5378     *      this tensor should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM},
5379     *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
5380     *      supported for this tensor.
5381     * * 2: An 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
5382     *      [num_rois], specifying the batch index of each box. Boxes with
5383     *      the same batch index are grouped together. Zero num_rois is
5384     *      supported for this tensor.
5385     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
5386     *      height of the output tensor.
5387     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
5388     *      width of the output tensor.
5389     * * 5: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
5390     *      from the height of original image to the height of feature map.
5391     * * 6: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
5392     *      from the width of original image to the width of feature map.
5393     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
5394     *      sampling points in height dimension used to compute the output.
5395     *      Set to 0 for adaptive value of ceil(roi_height/out_height).
5396     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
5397     *      sampling points in width dimension used to compute the output.
5398     *      Set to 0 for adaptive value of ceil(roi_width/out_width).
5399     * * 9: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
5400     *      NCHW data layout for input0 and output0. Set to false for NHWC.
5401     *
5402     * Outputs:
5403     * * 0: A tensor of the same {@link %{OperandType}} as input0. The output
5404     *      shape is [num_rois, out_height, out_width, depth].
5405%kind aidl canonical ndk hal_1.3+
5406     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5407     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5408%else
5409     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5410%/kind
5411     *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
5412%insert AVAIL3
5413     */
5414    %{DeclareOperation_1.2 ROI_ALIGN 81},
5415
5416    /**
5417     * Select and scale the feature map of each region of interest to a unified
5418     * output size by max-pooling.
5419     *
5420     * The region of interest is represented by its upper-left corner coordinate
5421     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
5422     * A spatial scaling factor is applied to map into feature map coordinate.
5423     * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
5424     *
5425     * Rounding is applied in this operation to ensure integer boundary for
5426     * regions of interest and pooling bins.
5427     *
5428     * Supported tensor {@link %{OperandType}}:
5429     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5430     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5431     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5432%kind aidl canonical ndk hal_1.3+
5433     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5434%/kind
5435     *
5436     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5437     * With the default data layout NHWC, the data is stored in the order of:
5438     * [batch, height, width, channels]. Alternatively, the data layout could
5439     * be NCHW, the data storage order of: [batch, channels, height, width].
5440     *
5441     * Inputs:
5442     * * 0: A 4-D tensor, specifying the feature map.
5443     * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
5444     *      the regions of interest, each line with format [x1, y1, x2, y2].
5445%kind aidl canonical ndk hal_1.3+
5446     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5447     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5448%else
5449     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
5450%/kind
5451     *      this tensor should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM},
5452     *      with zeroPoint of 0 and scale of 0.125.
5453     * * 2: An 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape
5454     *      [num_rois], specifying the batch index of each box. Boxes with
5455     *      the same batch index are grouped together.
5456     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
5457     *      height of the output tensor.
5458     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
5459     *      width of the output tensor.
5460     * * 5: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
5461     *      from the height of original image to the height of feature map.
5462     * * 6: An {@link %{OperandTypeLinkPfx}FLOAT32} scalar, specifying the ratio
5463     *      from the width of original image to the width of feature map.
5464     * * 7: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
5465     *      NCHW data layout for input0 and output0. Set to false for NHWC.
5466     *
5467     * Outputs:
5468     * * 0: A tensor of the same {@link %{OperandType}} as input0. The output
5469     *      shape is [num_rois, out_height, out_width, depth].
5470%kind aidl canonical ndk hal_1.3+
5471     *      For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5472     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5473%else
5474     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5475%/kind
5476     *      the scale and zeroPoint must be the same as input0.
5477%insert AVAIL3
5478     */
5479    %{DeclareOperation_1.2 ROI_POOLING 82},
5480
5481    /**
5482     * Computes reciprocal of square root of x element-wise.
5483     *
5484     * Supported tensor {@link %{OperandType}}:
5485     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5486     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5487%kind aidl canonical ndk
5488     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel7})
5489     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel7})
5490%/kind
5491     *
5492     * Supported tensor rank: from 1.
5493     *
5494     * Inputs:
5495     * * 0: A tensor.
5496     *
5497     * Outputs:
5498     * * 0: The output tensor of same shape as input0.
5499%kind aidl canonical ndk
5500     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5501     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5502     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5503%/kind
5504%insert AVAIL3
5505     */
5506    %{DeclareOperation_1.2 RSQRT 83},
5507
5508    /**
5509     * Using a tensor of booleans c and input tensors x and y select values
5510     * elementwise from both input tensors:
5511     *
5512     * O[i] = C[i] ? x[i] : y[i].
5513     *
5514     * Supported tensor {@link %{OperandType}}:
5515     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5516     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5517     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5518     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5519%kind aidl canonical ndk hal_1.3
5520     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5521%/kind
5522     *
5523     * Supported tensor rank: from 1
5524     *
5525     * Inputs:
5526     * * 0: A tensor of type {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} acting as a
5527     *      mask that chooses, based on the value at each element, whether the
5528     *      corresponding element in the output should be taken from input1 (if
5529     *      true) or input2 (if false).
5530     * * 1: An input tensor of the same shape as input0.
5531%kind hal_1.2
5532     * * 2: An input tensor of the same shape and type as input1.
5533     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5534     *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
5535%else
5536     * * 2: An input tensor of the same shape and type as input1.
5537     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5538     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5539     *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
5540%/kind
5541     *
5542     * Outputs:
5543     * * 0: A tensor of the same type and shape as input1 and input2.
5544     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5545     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5546%insert AVAIL3
5547     */
5548    %{DeclareOperation_1.2 SELECT 84},
5549
5550    /**
5551     * Computes sin of x element-wise.
5552     *
5553     * Supported tensor {@link %{OperandType}}:
5554     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5555     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5556     *
5557     * Supported tensor rank: from 1.
5558     *
5559     * Inputs:
5560     * * 0: A tensor.
5561     *
5562     * Outputs:
5563     * * 0: The output tensor of same shape as input0.
5564%insert AVAIL3
5565     */
5566    %{DeclareOperation_1.2 SIN 85},
5567
5568    /**
5569     * Extracts a slice of specified size from the input tensor starting at a
5570     * specified location.
5571     *
5572     * The starting location is specified as a 1-D tensor containing offsets
5573     * for each dimension. The size is specified as a 1-D tensor containing
5574     * either size of a slice along corresponding dimension or -1. In the latter
5575     * case, all the remaining elements in dimension are included in the slice.
5576     *
5577     * A sum of begin offset and a size of a slice must not exceed size of a
5578     * corresponding dimension.
5579     *
5580     * Supported tensor {@link %{OperandType}}:
5581     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5582     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5583     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5584     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5585%kind aidl canonical ndk hal_1.3+
5586     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5587%/kind
5588     *
5589     * Supported tensor rank: from 1
5590     *
5591     * Inputs:
5592     * * 0: An n-D tensor to take slice from, may be zero-sized.
5593     * * 1: A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} specifying
5594     *      the beginning indices of the slice in each dimension.
5595     * * 2: A 1-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} specifying
5596     *      the size of the slice in each dimension.
5597     *
5598     * Outputs:
5599     * * 0: An n-D tensor of the same type as the input containing the slice.
5600%kind aidl canonical ndk hal_1.3+
5601     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5602     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5603%else
5604     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5605%/kind
5606     *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
5607%insert AVAIL3
5608     */
5609    %{DeclareOperation_1.2 SLICE 86},
5610
5611    /**
5612     * Splits a tensor along a given axis into num_splits subtensors.
5613     *
5614     * Supported tensor {@link %{OperandType}}:
5615     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5616     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5617     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5618     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5619%kind aidl canonical ndk hal_1.3+
5620     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5621%/kind
5622     *
5623     * Supported tensor rank: from 1
5624     *
5625     * Inputs:
5626     * * 0: An n-D tensor to split.
5627     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the axis along
5628     *      which to split.
5629     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar indicating the number of
5630     *      splits along given axis. Must evenly divide axis size.
5631     *
5632     * Outputs:
5633     * * 0 ~ (num_splits - 1): Resulting subtensors.
5634%kind aidl canonical ndk hal_1.3+
5635     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5636     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5637     *      the scale and zeroPoint must be the same as input0.
5638%else
5639     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5640     *      the scale and zeroPoint must be the same as input0.
5641%/kind
5642%insert AVAIL3
5643     */
5644    %{DeclareOperation_1.2 SPLIT 87},
5645
5646    /**
5647     * Computes square root of x element-wise.
5648     *
5649     * Supported tensor {@link %{OperandType}}:
5650     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5651     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5652     *
5653     * Supported tensor rank: from 1.
5654     *
5655     * Inputs:
5656     * * 0: A tensor.
5657     *
5658     * Outputs:
5659     * * 0: The output tensor of same shape as input0.
5660%insert AVAIL3
5661     */
5662    %{DeclareOperation_1.2 SQRT 88},
5663
5664    /**
5665     * Constructs a tensor by tiling a given tensor.
5666     *
5667     * This operation creates a new tensor by replicating `input` `multiples`
5668     * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
5669     * elements, and the values of `input` are replicated `multiples[i]` times
5670     * along the i-th dimension.
5671     * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
5672     *
5673     * Supported tensor {@link %{OperandType}}:
5674     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5675     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5676     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5677     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5678%kind aidl canonical ndk hal_1.3+
5679     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5680%/kind
5681     *
5682     * Supported tensor rank: from 1
5683     *
5684     * Inputs:
5685     * * 0: input, an n-D tensor specifying the input.
5686     * * 1: multiples, a 1-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}.
5687     *      The length of multiples must be n.
5688     *
5689     * Outputs:
5690     * * 0: A tiled tensor of the same {@link %{OperandType}} and rank as `input`.
5691%kind aidl canonical ndk hal_1.3+
5692     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5693     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5694     *      the scale and zeroPoint must be the same as input0.
5695%else
5696     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5697     *      the scale and zeroPoint must be the same as input0.
5698%/kind
5699%insert AVAIL3
5700     */
5701    %{DeclareOperation_1.2 TILE 89},
5702
5703    /**
5704     * Finds values and indices of the k largest entries for the last dimension.
5705     *
5706     * Resulting values in each dimensions are sorted in descending order. If
5707     * two values are equal, the one with larger index appears first.
5708     *
5709     * Supported tensor {@link %{OperandType}}:
5710     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5711     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5712     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5713     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5714%kind aidl canonical ndk hal_1.3+
5715     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
5716%/kind
5717     *
5718     * Supported tensor rank: from 1
5719     *
5720     * Inputs:
5721     * * 0: input, an n-D tensor specifying the input.
5722     * * 1: k, an {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
5723     *      top elements to look for along the last dimension.
5724     *
5725     * Outputs:
5726     * * 0: An n-D tensor of the same type as the input, containing the k
5727     *      largest elements along each last dimensional slice.
5728%kind aidl canonical ndk hal_1.3+
5729     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5730     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5731     *      the scale and zeroPoint must be the same as input0.
5732%else
5733     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5734     *      the scale and zeroPoint must be the same as input0.
5735%/kind
5736     * * 1: An n-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32}
5737     *      containing the indices of values within the last dimension of input.
5738%insert AVAIL3
5739     */
5740    %{DeclareOperation_1.2 TOPK_V2 90},
5741
5742    /**
5743     * Performs the transpose of 2-D convolution operation.
5744     *
5745     * This operation is sometimes called "deconvolution" after Deconvolutional
5746     * Networks, but is actually the transpose (gradient) of
5747     * {@link %{OperandTypeLinkPfx}CONV_2D} rather than an actual deconvolution.
5748     *
5749     * The output dimensions are functions of the filter dimensions, stride, and
5750     * padding.
5751     *
5752     * Supported tensor {@link %{OperandType}} configurations:
5753     * * 16 bit floating point:
5754     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias.
5755     *
5756     * * 32 bit floating point:
5757     * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} for input, filter, output, and bias.
5758     *
5759     * * Quantized:
5760     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, filter, and output.
5761     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
5762     * * * input.scale * filter.scale).
5763     *
5764     * * Quantized with symmetric per channel quantization for the filter:
5765     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, and output.
5766     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
5767     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
5768     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
5769%kind aidl canonical ndk hal_1.3+
5770     *
5771     * Available since %{NNAPILevel4}:
5772     * * Quantized signed (since %{NNAPILevel4}):
5773     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
5774     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to
5775     * * * input.scale * filter.scale).
5776     *
5777     * * Quantized signed with filter symmetric per channel quantization
5778     *   (since %{NNAPILevel4}):
5779     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
5780     * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
5781     * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0,
5782     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
5783%/kind
5784     *
5785     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5786     * With the default data layout NHWC, the data is stored in the order of:
5787     * [batch, height, width, channels]. Alternatively, the data layout could
5788     * be NCHW, the data storage order of: [batch, channels, height, width].
5789     *
5790     * Both explicit padding and implicit padding are supported.
5791     *
5792     * Inputs (explicit padding):
5793     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
5794     *      specifying the input.
5795%kind ndk
5796     *      Since API level 29, zero batches is supported for this tensor.
5797%/kind
5798     * * 1: A 4-D tensor, of shape
5799     *      [depth_out, filter_height, filter_width, depth_in], specifying the
5800     *      filter. For tensor of type
5801     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
5802     *      dimension (%{Ann}SymmPerChannelQuantParams::channelDim) must be set to 0.
5803     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
5804     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
5805     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the
5806     *      same type.
5807%kind aidl canonical ndk hal_1.3+
5808     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5809     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
5810%else
5811     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
5812%/kind
5813     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32},
5814     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5815     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
5816     *      the bias must be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
5817     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5818     *      bias_scale[i] = input_scale * filter_scale[i].
5819     * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
5820     *      the left, in the ‘width’ dimension.
5821     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
5822     *      the right, in the ‘width’ dimension.
5823     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
5824     *      the top, in the ‘height’ dimension.
5825     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on
5826     *      the bottom, in the ‘height’ dimension.
5827     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
5828     *      walking through input in the ‘width’ dimension.
5829     * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
5830     *      walking through input in the ‘height’ dimension.
5831     * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
5832     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
5833     *      invoke on the result.
5834     * * 10: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
5835     *       NCHW data layout for input0 and output0. Set to false for NHWC.
5836     *
5837     * Inputs (implicit padding):
5838     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
5839     *      specifying the input.
5840%kind ndk
5841     *      Since API level 29, zero batches is supported for this tensor.
5842%/kind
5843     * * 1: A 4-D tensor, of shape
5844     *      [depth_out, filter_height, filter_width, depth_in], specifying the
5845     *      filter. For tensor of type
5846     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
5847     *      dimension (%{Ann}SymmPerChannelQuantParams::channelDim) must be set to 0.
5848     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
5849     *      tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
5850     *      {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias should be of the
5851     *      same type.
5852%kind aidl canonical ndk hal_1.3+
5853     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
5854     *      and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED},
5855%else
5856     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM},
5857%/kind
5858     *      the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32},
5859     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5860     *      For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL},
5861     *      the bias must be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0
5862     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5863     *      bias_scale[i] = input_scale * filter_scale[i].
5864     * * 3: An {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, specifying the output
5865     *      tensor shape.
5866     * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit
5867     *      padding scheme, has to be one of the
5868%insert PaddingCodeValues
5869     * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
5870     *      walking through input in the ‘width’ dimension.
5871     * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
5872     *      walking through input in the ‘height’ dimension.
5873     * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
5874     *      {@link %{FusedActivationFunc}} values. Specifies the activation to
5875     *      invoke on the result.
5876     * * 8: An {@link %{OperandTypeLinkPfx}BOOL} scalar, set to true to specify
5877     *      NCHW data layout for input0 and output0. Set to false for NHWC.
5878     *
5879     * Outputs:
5880     * * 0: The output 4-D tensor, of shape
5881     *      [batches, out_height, out_width, depth_out].
5882%kind aidl canonical ndk hal_1.3+
5883     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
5884     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5885%else
5886     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
5887%/kind
5888     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5889%insert AVAIL3
5890     */
5891    %{DeclareOperation_1.2 TRANSPOSE_CONV_2D 91},
5892
5893    /**
5894     * A recurrent neural network specified by an LSTM cell.
5895     *
5896     * Performs (fully) dynamic unrolling of input.
5897     *
5898     * This Op unrolls the input along the time dimension, and implements the
5899     * following operation for each element in the sequence
5900     * s = 1...sequence_length:
5901     *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
5902     *
5903     * Where LSTMOp is the LSTM op as in {@link %{OperandTypeLinkPfx}LSTM},
5904     * the "projection" is an optional projection layer from state and output
5905     * and theactivationis the function passed as the
5906     * “fused_activation_functionargument (if notNONE”).
5907     *
5908     * Supported tensor {@link %{OperandType}}:
5909     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
5910     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
5911     *
5912     * Supported tensor rank: 3, either time-major or batch-major.
5913     *
5914     * All input and output tensors must be of the same type.
5915     *
5916     * Inputs:
5917     * * 0: The input (\f$x_t\f$).
5918     *      A 3-D tensor of shape:
5919     *        If time-major: [max_time, batch_size, input_size]
5920     *        If batch-major: [batch_size, max_time, input_size]
5921     *      wheremax_timeis the number of timesteps (sequence length),
5922     *      “batch_sizecorresponds to the batching dimension, and
5923     *      “input_sizeis the size of the input.
5924     * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
5925     *      A 2-D tensor of shape [num_units, input_size], wherenum_units5926     *      corresponds to the number of cell units.
5927     * * 2: The input-to-forget weights (\f$W_{xf}\f$).
5928     *      A 2-D tensor of shape [num_units, input_size].
5929     * * 3: The input-to-cell weights (\f$W_{xc}\f$).
5930     *      A 2-D tensor of shape [num_units, input_size].
5931     * * 4: The input-to-output weights (\f$W_{xo}\f$).
5932     *      A 2-D tensor of shape [num_units, input_size].
5933     * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
5934     *      A 2-D tensor of shape [num_units, output_size], whereoutput_size5935     *      corresponds to either the number of cell units (i.e., “num_units”),
5936     *      or the second dimension of theprojection_weights”, if defined.
5937     * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
5938     *      A 2-D tensor of shape [num_units, output_size].
5939     * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
5940     *      A 2-D tensor of shape [num_units, output_size].
5941     * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
5942     *      A 2-D tensor of shape [num_units, output_size].
5943     * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
5944     *      A 1-D tensor of shape [num_units].
5945     * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
5946     *      A 1-D tensor of shape [num_units].
5947     * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
5948     *      A 1-D tensor of shape [num_units].
5949     * * 12:The input gate bias (\f$b_i\f$). Optional.
5950     *      A 1-D tensor of shape [num_units].
5951     * * 13:The forget gate bias (\f$b_f\f$).
5952     *      A 1-D tensor of shape [num_units].
5953     * * 14:The cell bias (\f$b_c\f$).
5954     *      A 1-D tensor of shape [num_units].
5955     * * 15:The output gate bias (\f$b_o\f$).
5956     *      A 1-D tensor of shape [num_units].
5957     * * 16:The projection weights (\f$W_{proj}\f$). Optional.
5958     *      A 2-D tensor of shape [output_size, num_units].
5959     * * 17:The projection bias (\f$b_{proj}\f$). Optional.
5960     *      A 1-D tensor of shape [output_size].
5961     * * 18:The output state (in) (\f$h_{t-1}\f$).
5962     *      A 2-D tensor of shape [batch_size, output_size].
5963     * * 19:The cell state (in) (\f$C_{t-1}\f$).
5964     *      A 2-D tensor of shape [batch_size, num_units].
5965     * * 20:The activation function (\f$g\f$).
5966     *      A value indicating the activation function:
5967     *      <ul>
5968     *      <li>0: None;
5969     *      <li>1: Relu;
5970     *      <li>3: Relu6;
5971     *      <li>4: Tanh;
5972     *      <li>6: Sigmoid.
5973     *      </ul>
5974     * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
5975     *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
5976     *      then clipping is disabled.
5977     * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
5978     *      projection layer, such that values are bound within
5979     *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
5980     * * 23:Time-major if true, batch-major if false.
5981     * * 24:The input layer normalization weights. Optional.
5982     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5983     *      to activation at input gate.
5984     * * 25:The forget layer normalization weights. Optional.
5985     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5986     *      to activation at forget gate.
5987     * * 26:The cell layer normalization weights. Optional.
5988     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5989     *      to activation at cell gate.
5990     * * 27:The output layer normalization weights. Optional.
5991     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5992     *      to activation at output gate.
5993     *
5994     * Outputs:
5995     * * 0: The output (\f$o_t\f$).
5996     *      A 3-D tensor of shape:
5997     *        If time-major: [max_time, batch_size, output_size]
5998     *        If batch-major: [batch_size, max_time, output_size]
5999%kind aidl canonical ndk hal_1.3+
6000     * * 1: A tensor of shape [batch_size, output_size] containing a hidden
6001     *      state from the last time step in the sequence. This output is
6002     *      optional and can be omitted. If this output is present then
6003     *      output #2 must be present as well.
6004     *      Available since %{NNAPILevel4}.
6005     * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
6006     *      from the last time step in the sequence. This output is optional
6007     *      and can be omitted.
6008     *      Available since %{NNAPILevel4}.
6009%/kind
6010%insert AVAIL3
6011%insert OutputState
6012     */
6013    %{DeclareOperation_1.2 UNIDIRECTIONAL_SEQUENCE_LSTM 92},
6014
6015    /**
6016     * A recurrent neural network layer that applies a basic RNN cell to a
6017     * sequence of inputs.
6018     *
6019     * This layer unrolls the input along the sequence dimension, and implements
6020     * the following operation
6021     * for each element in the sequence s = 1...sequence_length:
6022     *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
6023     *   recurrent_weights’ + bias)
6024     *
6025     * Where:
6026     * * “input_weightsis a weight matrix that multiplies the inputs;
6027     * * “recurrent_weightsis a weight matrix that multiplies the current
6028     *    “statewhich itself is the output from the previous time step
6029     *    computation;
6030     * * “biasis a bias vector (added to each output vector in the batch);
6031     * * “activationis the function passed as thefused_activation_function6032     *   argument (if notNONE”).
6033     *
6034     * Supported tensor {@link %{OperandType}}:
6035     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6036     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6037     *
6038     * The input tensors must all be the same type.
6039     *
6040     * Inputs:
6041     * * 0: input.
6042     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
6043     *      it is set to 1, then the input has a shape [maxTime, batchSize,
6044     *      inputSize], otherwise the input has a shape [batchSize, maxTime,
6045     *      inputSize].
6046     * * 1: weights.
6047     *      A 2-D tensor of shape [numUnits, inputSize].
6048     * * 2: recurrent_weights.
6049     *      A 2-D tensor of shape [numUnits, numUnits].
6050     * * 3: bias.
6051     *      A 1-D tensor of shape [numUnits].
6052     * * 4: hidden state
6053     *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
6054     *      state input for the first time step of the computation.
6055     * * 5: fusedActivationFunction.
6056     *      A {@link %{FusedActivationFunc}} value indicating the activation function. If
6057     *      “NONEis specified then it results in a linear activation.
6058     * * 6: timeMajor
6059     *      An {@link %{OperandTypeLinkPfx}INT32} scalar specifying the shape format
6060     *      of input and output tensors. Must be set to either 0 or 1.
6061     * Outputs:
6062     * * 0: output.
6063     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
6064     *      it is set to 1, then the output has a shape [maxTime, batchSize,
6065     *      numUnits], otherwise the output has a shape [batchSize, maxTime,
6066     *      numUnits].
6067%kind ndk hal_1.3+
6068     * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
6069     *      from the last time step in the sequence. This output is optional
6070     *      and can be omitted.
6071     *      Available since %{NNAPILevel4}.
6072%/kind
6073%insert AVAIL3
6074%insert OutputState
6075     */
6076    %{DeclareOperation_1.2 UNIDIRECTIONAL_SEQUENCE_RNN 93},
6077
6078    /**
6079     * Resizes images to given size using the nearest neighbor interpretation.
6080     *
6081     * Resized images must be distorted if their output aspect ratio is not the
6082     * same as input aspect ratio. The corner pixels of output may not be the
6083     * same as corner pixels of input.
6084     *
6085     * Supported tensor {@link %{OperandType}}:
6086     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6087     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6088     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6089%kind ndk hal_1.3+
6090     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4})
6091%/kind
6092     *
6093     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
6094     * With the default data layout NHWC, the data is stored in the order of:
6095     * [batch, height, width, channels]. Alternatively, the data layout could
6096     * be NCHW, the data storage order of: [batch, channels, height, width].
6097     *
6098     * Both resizing by shape and resizing by scale are supported.
6099     *
6100     * Inputs (resizing by shape):
6101     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
6102     *      the input. Zero batches is supported for this tensor.
6103     * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
6104     *      width of the output tensor.
6105     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output
6106     *      height of the output tensor.
6107     * * 3: An {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
6108     *      Set to true to specify NCHW data layout for input0 and output0.
6109%kind aidl canonical ndk hal_1.3+
6110     * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL}
6111     *      scalar, default to false.  If True, the centers of the 4 corner
6112     *      pixels of the input and output tensors are aligned, preserving the
6113     *      values at the corner pixels.
6114     *      Available since %{NNAPILevel4}.
6115     * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL}
6116     *      scalar, default to false. If True, the pixel centers are assumed to
6117     *      be at (0.5, 0.5). This is the default behavior of image.resize in
6118     *      TF 2.0. If this parameter is True, then align_corners parameter
6119     *      must be False.
6120     *      Available since %{NNAPILevel4}.
6121%/kind
6122     *
6123     * Inputs (resizing by scale):
6124     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
6125     *      the input. Zero batches is supported for this tensor.
6126     * * 1: A scalar, specifying width_scale, the scaling factor of the width
6127     *      dimension from the input tensor to the output tensor. The output
6128     *      width is calculated as new_width = floor(width * width_scale).
6129     *      The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is
6130     *      of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
6131     *      {@link %{OperandTypeLinkPfx}FLOAT32} otherwise.
6132     * * 2: A scalar, specifying height_scale, the scaling factor of the height
6133     *      dimension from the input tensor to the output tensor. The output
6134     *      height is calculated as new_height = floor(height * height_scale).
6135     *      The scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16} if input0 is
6136     *      of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} and of
6137     *      {@link %{OperandTypeLinkPfx}FLOAT32} otherwise.
6138     * * 3: An {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false.
6139     *      Set to true to specify NCHW data layout for input0 and output0.
6140%kind aidl canonical ndk hal_1.3+
6141     * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL}
6142     *      scalar, default to false.  If True, the centers of the 4 corner
6143     *      pixels of the input and output tensors are aligned, preserving the
6144     *      values at the corner pixels.
6145     *      Available since %{NNAPILevel4}.
6146     * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL}
6147     *      scalar, default to false. If True, the pixel centers are assumed to
6148     *      be at (0.5, 0.5). This is the default behavior of image.resize in
6149     *      TF 2.0. If this parameter is True, then align_corners parameter
6150     *      must be False.
6151     *      Available since %{NNAPILevel4}.
6152%/kind
6153     *
6154     * Outputs:
6155     * * 0: The output 4-D tensor, of shape
6156     *      [batches, new_height, new_width, depth].
6157%kind aidl canonical ndk hal_1.3+
6158     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
6159     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
6160     *      the scale and zeroPoint must be the same as input0.
6161%else
6162     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor,
6163     *      the scale and zeroPoint must be the same as input0.
6164%/kind
6165%insert AVAIL3
6166     */
6167    %{DeclareOperation_1.2 RESIZE_NEAREST_NEIGHBOR 94},
6168%/section
6169
6170%section Operation_1.2_MAX
6171    FUNDAMENTAL_MAX = 94,
6172%/section
6173
6174%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6175
6176%% HAL OperandType for 1.3
6177%% NDK OperandCode for API 30
6178
6179%section Operand_1.3
6180    /**
6181     * A tensor of 8 bit signed integers that represent real numbers.
6182     *
6183     * Attached to this tensor are two numbers that can be used to convert the
6184     * 8 bit integer to the real value and vice versa. These two numbers are:
6185     * - scale: a 32 bit floating point value greater than zero.
6186     * - zeroPoint: a 32 bit integer, in range [-128, 127].
6187     *
6188     * The formula is:
6189     * real_value = (integer_value - zeroPoint) * scale.
6190%insert AVAIL4
6191     */
6192    %{ANN}TENSOR_QUANT8_ASYMM_SIGNED = 14,
6193%insert canonical_empty_line
6194    /**
6195     * A reference to a %{model_or_subgraph}.
6196%kind ndk
6197     *
6198     * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set
6199     * the value for an Operand of this type.
6200%/kind
6201%kind aidl canonical hal*
6202     *
6203     * Must have the lifetime {@link %{OperandLifeTime}::SUBGRAPH}.
6204%/kind
6205%insert AVAIL4
6206     */
6207    %{ANN}%{MODEL_or_SUBGRAPH} = 15,
6208%/section
6209
6210%section Operand_1.3_MAX
6211    FUNDAMENTAL_MAX = 15,
6212%/section
6213
6214%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6215
6216%% HAL OperationType for 1.3
6217%% NDK OperationCode for API 30
6218
6219%section Operation_1.3
6220    /**
6221     * Quantized version of {@link %{OperationTypeLinkPfx}LSTM}.
6222     *
6223     * The input and the output use asymmetric quantized types, while the rest
6224     * use symmetric ones.
6225     *
6226     * Inputs:
6227     * * 0: The input to the LSTM cell.
6228     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6229     *      Shape: [batchSize, inputSize]
6230     * * 1: The input-to-input weights. Optional.
6231     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6232     *      Shape: [numUnits, inputSize]
6233     * * 2: The input-to-forget weights.
6234     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6235     *      Shape: [numUnits, inputSize]
6236     * * 3: The input-to-cell weights.
6237     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6238     *      Shape: [numUnits, inputSize]
6239     * * 4: The input-to-output weights.
6240     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6241     *      Shape: [numUnits, inputSize]
6242     * * 5: The recurrent-to-input weights. Optional.
6243     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6244     *      Shape: [numUnits, outputSize]
6245     * * 6: The recurrent-to-forget weights.
6246     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6247     *      Shape: [numUnits, outputSize]
6248     * * 7: The recurrent-to-cell weights.
6249     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6250     *      Shape: [numUnits, outputSize]
6251     * * 8: The recurrent-to-output weights.
6252     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6253     *      Shape: [numUnits, outputSize]
6254     * * 9: The cell-to-input weights (for peephole). Optional.
6255     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6256     *      Shape: [numUnits]
6257     * * 10: The cell-to-forget weights (for peephole). Optional.
6258     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6259     *       Shape: [numUnits]
6260     * * 11: The cell-to-output weights (for peephole). Optional.
6261     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6262     *       Shape: [numUnits]
6263     * * 12: The input gate bias. Quantized with scale being the
6264     *       product of input and weights scales and zeroPoint equal to 0.
6265     *       Optional.
6266     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6267     *       Shape: [numUnits]
6268     * * 13: The forget gate bias. Quantized with scale being the
6269     *       product of input and weights scales and zeroPoint equal to 0.
6270     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6271     *       Shape: [numUnits]
6272     * * 14: The cell bias. Quantized with scale being the
6273     *       product of input and weights scales and zeroPoint equal to 0.
6274     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6275     *       Shape: [numUnits]
6276     * * 15: The output gate bias. Quantized with scale being the
6277     *       product of input and weights scales and zeroPoint equal to 0.
6278     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6279     *       Shape: [numUnits]
6280     * * 16: The projection weights. Optional.
6281     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6282     *       Shape: [outputSize, numUnits]
6283     * * 17: The projection bias. Quantized with scale being the
6284     *       product of input and weights scales and zeroPoint equal to 0.
6285     *       Optional.
6286     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6287     *       Shape: [outputSize]
6288     * * 18: The output from the previous time step.
6289     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6290     *       Shape: [batchSize, outputSize]
6291     * * 19: The cell state from the previous time step.
6292     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6293     *       Shape: [batchSize, numUnits]
6294     * * 20: The input layer normalization weights. Used to rescale
6295     *       normalized inputs to activation at input gate. Optional.
6296     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6297     *       Shape: [numUnits]
6298     * * 21: The forget layer normalization weights. Used to
6299     *       rescale normalized inputs to activation at forget gate. Optional.
6300     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6301     *       Shape: [numUnits]
6302     * * 22: The cell layer normalization weights. Used to rescale
6303     *       normalized inputs to activation at cell gate. Optional.
6304     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6305     *       Shape: [numUnits]
6306     * * 23: The output layer normalization weights. Used to
6307     *       rescale normalized inputs to activation at output gate. Optional.
6308     *       Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6309     *       Shape: [numUnits]
6310     * * 24: The cell clip. If provided the cell state is clipped
6311     *       by this value prior to the cell output activation. Optional.
6312     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6313     * * 25: The projection clip. If provided and projection is enabled,
6314     *       this is used for clipping the projected values. Optional.
6315     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6316     * * 26: The scale of the intermediate result of matmul,
6317     *       i.e. input to layer normalization, at input gate.
6318     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6319     * * 27: The scale of the intermediate result of matmul,
6320     *       i.e. input to layer normalization, at forget gate.
6321     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6322     * * 28: The scale of the intermediate result of matmul,
6323     *       i.e. input to layer normalization, at cell gate.
6324     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6325     * * 29: The scale of the intermediate result of matmul,
6326     *       i.e. input to layer normalization, at output gate.
6327     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6328     * * 30: The zero point of the hidden state, i.e. input to
6329     *       projection.
6330     *       Type: {@link %{OperandTypeLinkPfx}INT32}.
6331     * * 31: The scale of the hidden state, i.e. input to
6332     *       projection.
6333     *       Type: {@link %{OperandTypeLinkPfx}FLOAT32}.
6334     *
6335     * Outputs:
6336     * * 0: The output state (out).
6337     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6338     *      Shape: [batchSize, outputSize]
6339     * * 1: The cell state (out).
6340     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6341     *      Shape: [batchSize, numUnits]
6342     * * 2: The output. This is effectively the same as the current
6343     *      "output state (out)" value.
6344     *      Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6345     *      Shape: [batchSize, outputSize]
6346%insert AVAIL4
6347     */
6348    %{DeclareOperation_1.3 QUANTIZED_LSTM 95},
6349
6350    /**
6351     * Executes one of the two referenced %{model_or_subgraph}s as determined by a boolean
6352     * value.
6353     *
6354     * The inputs and outputs of the two referenced %{model_or_subgraph}s must agree with the
6355     * signature of this operation. That is, if the operation has (3 + n) inputs
6356     * and m outputs, both %{model_or_subgraph}s must have n inputs and m outputs with the same
6357     * types, ranks%{NDK_if_specified}, dimensions%{NDK_if_specified}, scales,
6358     * zeroPoints, and %{otherOperandParameters} as the corresponding operation
6359     * inputs and outputs.
6360%kind aidl canonical hal*
6361     * All of the operands mentioned must have fully specified dimensions.
6362%/kind
6363     *
6364     * Inputs:
6365     * * 0: A value of type {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and shape [1]
6366     *      that determines which of the two referenced %{model_or_subgraph}s to execute.
6367     *      The operand must have fully specified dimensions.
6368     * * 1: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the %{model_or_subgraph} to be
6369     *      executed if the condition is true.
6370     * * 2: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the %{model_or_subgraph} to be
6371     *      executed if the condition is false.
6372     * * 3 ~ (n + 2): Inputs to be passed to the %{model_or_subgraph} selected for execution.
6373     *
6374     * Outputs:
6375     * * 0 ~ (m - 1): Outputs produced by the selected %{model_or_subgraph}.
6376%insert AVAIL4
6377     */
6378    %{DeclareOperation_1.3 IF 96},
6379
6380    /**
6381     * Executes the body %{model_or_subgraph} until the condition %{model_or_subgraph} outputs false.
6382     *
6383     * The inputs to this operation are the condition %{model_or_subgraph}, the body %{model_or_subgraph},
6384     * and operand values for the first iteration of the loop. The values are
6385     * implicitly split into three groups of input-output, state-only, and
6386     * input-only values, as described below.
6387     *
6388     * The outputs of this operation are the final values of input-output
6389     * operands.
6390     *
6391     * Both the condition and body %{model_or_subgraph} receive (m + k + n) inputs.
6392     * * The first m (m >= 1) inputs are input-output operands. For the first
6393     *   iteration, these are initialized from the corresponding inputs of the
6394     *   WHILE operation. In subsequent iterations, their values come from the
6395     *   corresponding outputs of the body %{model_or_subgraph} produced during the previous
6396     *   iteration.
6397     * * The next k (k >= 0) inputs are state-only operands. They are similar to
6398     *   the input-output operands, except that their values are no longer
6399     *   available after the loop terminates.
6400     * * The last n (n >= 0) inputs are input-only operands. Their values come
6401     *   from the corresponding inputs of the WHILE operation.
6402     *
6403     * The body %{model_or_subgraph} produces (m + k) outputs.
6404     * * The first m outputs are input-output operands. They become the outputs
6405     *   of the WHILE operation when a termination condition is reached.
6406     * * The last k outputs are state-only operands. Their values are no longer
6407     *   available after the loop terminates.
6408     *
6409     * The numbers m, k, and n are inferred by the %{runtime_or_driver} as follows:
6410     *     m = (WHILE operation output count)
6411     *     k = (body %{model_or_subgraph} output count) - m
6412     *     n = (body %{model_or_subgraph} input count) - m - k
6413     *
6414     * The pseudo-code below illustrates the flow of a WHILE operation with
6415     * inputs condition, body, initial_input_output, initial_state, input_only
6416     * (m = 1, k = 1, n = 1):
6417     *
6418     *     input_output = initial_input_output
6419     *     state = initial_state
6420     *     while condition(input_output, state, input_only):
6421     *         input_output, state = body(input_output, state, input_only)
6422     *     return input_output
6423     *
6424%kind ndk
6425     * To prevent infinite loops, there is an implicit execution timeout
6426     * associated with each loop ("loop timeout duration"). See {@link
6427     * ANeuralNetworksExecution_setLoopTimeout}.
6428     *
6429%/kind
6430     * Inputs:
6431     * * 0: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the condition
6432     *      %{model_or_subgraph}. The %{model_or_subgraph} must have (m + k + n) inputs with
6433     *      the same types, ranks%{NDK_if_specified}, dimensions%{NDK_if_specified},
6434     *      scales, zeroPoints, and %{otherOperandParameters} as the
6435     *      corresponding inputs of the WHILE operation and exactly one output
6436     *      of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and shape [1].
6437%kind ndk
6438     *      The output operand must have fully specified dimensions.
6439%/kind
6440%kind aidl canonical hal*
6441     *      All of the operands mentioned must have fully specified dimensions.
6442%/kind
6443     * * 1: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the body %{model_or_subgraph}.
6444     *      The %{model_or_subgraph} must have (m + k + n) inputs and (m + k) outputs with
6445     *      the same types, ranks%{NDK_if_specified}, dimensions%{NDK_if_specified},
6446     *      scales, zeroPoints, and %{otherOperandParameters} as the
6447     *      corresponding inputs and outputs of the WHILE operation.
6448%kind aidl canonical hal*
6449     *      All of the operands mentioned must have fully specified dimensions.
6450%/kind
6451     * * (m inputs): Initial values for input-output operands.
6452     * * (k inputs): Initial values for state-only operands.
6453     * * (n inputs): Values for input-only operands.
6454     *
6455     * Outputs:
6456     * * 0 ~ (m - 1): Outputs produced by the loop.
6457%insert AVAIL4
6458     */
6459    %{DeclareOperation_1.3 WHILE 97},
6460
6461    /**
6462     * Computes exponential linear activation on the input tensor element-wise.
6463     *
6464     * The output is calculated using the following formula:
6465     *
6466     *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
6467     *
6468     * Supported tensor {@link %{OperandType}}:
6469     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6470     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6471     *
6472     * Supported tensor rank: from 1.
6473     *
6474     * Inputs:
6475     * * 0: A tensor, specifying the input. May be zero-sized.
6476     * * 1: A scalar, specifying the alpha parameter.
6477     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16},
6478     *      the alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
6479     *      For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
6480     *      the alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
6481     *
6482     * Outputs:
6483     * * 0: The output tensor of same shape and type as input0.
6484%insert AVAIL4
6485     */
6486    %{DeclareOperation_1.3 ELU 98},
6487
6488    /**
6489     * Computes hard-swish activation on the input tensor element-wise.
6490     *
6491     * Hard swish activation is introduced in
6492     * https://arxiv.org/pdf/1905.02244.pdf
6493     *
6494     * The output is calculated using the following formula:
6495     *
6496     *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
6497     *
6498     * Supported tensor {@link %{OperandType}}:
6499     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6500     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6501     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6502     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6503     *
6504     * Supported tensor rank: from 1.
6505     *
6506     * Inputs:
6507     * * 0: A tensor, specifying the input. May be zero-sized.
6508     *
6509     * Outputs:
6510     * * 0: The output tensor of same shape and type as input0.
6511     *      Scale and zero point of this tensor may be different from the input
6512     *      tensor's parameters.
6513%insert AVAIL4
6514     */
6515    %{DeclareOperation_1.3 HARD_SWISH 99},
6516
6517    /**
6518     * Creates a tensor filled with a scalar value.
6519     *
6520     * Supported output tensor {@link %{OperandType}}:
6521     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6522     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6523     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6524     *
6525     * Supported tensor rank: from 1.
6526     *
6527     * Inputs:
6528     * * 0: A 1-D tensor, specifying the desired output tensor shape.
6529     * * 1: A scalar, specifying the value to fill the output tensors with.
6530     *      For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16},
6531     *      the scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16}.
6532     *      For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
6533     *      the scalar must be of {@link %{OperandTypeLinkPfx}FLOAT32}.
6534     *      For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32},
6535     *      the scalar must be of {@link %{OperandTypeLinkPfx}INT32}.
6536     *
6537     * Outputs:
6538     * * 0: The output tensor.
6539%insert AVAIL4
6540     */
6541    %{DeclareOperation_1.3 FILL 100},
6542
6543    /**
6544     * Returns the rank of a tensor.
6545     *
6546     * The rank of a tensor is the number of dimensions in it. Also known as
6547     * "order", "degree", "ndims".
6548     *
6549     * Supported tensor {@link %{OperandType}}:
6550     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6551     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6552     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6553     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6554     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}
6555     * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
6556     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}
6557     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}
6558     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM}
6559     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6560     *
6561     * Supported tensor rank: from 1.
6562     *
6563     * Inputs:
6564     * * 0: The input tensor.
6565     *
6566     * Outputs:
6567     * * 0: A scalar of {@link %{OperandTypeLinkPfx}INT32}, specifying the rank
6568     *      of the input tensor.
6569%insert AVAIL4
6570     */
6571    %{DeclareOperation_1.3 RANK 101},
6572%/section
6573
6574%section Operation_1.3_MAX
6575    FUNDAMENTAL_MAX = 101,
6576%/section
6577
6578%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6579
6580%% NDK OperationCode and HAL OperationType for Feature Level 6
6581
6582%section Operation_fl6
6583    /**
6584     * Performs multiplication of two tensors in batches.
6585     *
6586     * Multiplies all slices of two input tensors and arranges the individual
6587     * results in a single output tensor of the same batch size. Each pair of
6588     * slices in the same batch have identical {@link %{OperandType}}. Each
6589     * slice can optionally be adjointed (transpose and conjugate) before
6590     * multiplication.
6591     *
6592     * The two input tensors and the output tensor must be 2-D or higher and
6593     * have the same batch size.
6594     *
6595     * Supported tensor {@link %{OperandType}}:
6596     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6597     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6598     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6599     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6600     *
6601     * Supported tensor rank: at least 2 and up to 4
6602     *
6603     * Inputs:
6604     * * 0: A tensor with 2-D or higher shape [..., r_x, c_x].
6605     * * 1: A tensor with 2-D or higher shape [..., r_y, c_y]. It has the same
6606     *      {@link %{OperandType}} and batch size as input0.
6607     * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar adj_x, default
6608     *      to false. Set to true to adjoint the slices of input0.
6609     * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar adj_y, default
6610     *      to false. Set to true to adjoint the slices of input1.
6611     *
6612     * Outputs:
6613     * * 0: A tensor with 2-D or higher shape [..., r_o, c_o], where
6614     *      r_o = c_x if adj_x else r_x
6615     *      c_o = r_y if adj_y else c_y
6616%insert AVAIL6
6617     */
6618    %{DeclareOperation_fl6 BATCH_MATMUL 102},
6619
6620    /**
6621     * Packs N input tensors (N >= 1) of rank R into one output tensor of rank R+1.
6622     * The tensors are packed along a given axis.
6623     *
6624     * The input tensors must have identical {@link %{OperandType}} and dimensions.
6625     *
6626     * For example, suppose there are N input tensors of shape (A, B, C).
6627     * If axis is 0, the output tensor will have shape (N, A, B, C).
6628     * If axis is 1, the output tensor will have shape (A, N, B, C).
6629     *
6630     * All dimensions through the axis dimension determine the output tile count;
6631     * the remaining dimensions determine the tile shape.
6632     *
6633     * Return to the example of N input tensors of shape (A, B, C).
6634     * If axis is 0, there are N tiles in the output, each of shape (A, B, C).
6635     * If axis is 1, there are A*N tiles in the output, each of shape (B, C).
6636     *
6637     * The coordinates of a tile within the output tensor are (t[0],...,t[axis]).
6638     * The coordinates of a tile within an input tensor are (t[0],...,t[axis-1]).
6639     * (If axis is 0, an input tensor consists of a single tile.)
6640     * If we index input tensors starting with 0 (rather than by operand number),
6641     * then output_tile[t[0],...,t[axis]] = input_tile[t[axis]][t[0],...,t[axis-1]].
6642     * That is, all output tile coordinates except for the axis coordinate select
6643     * the corresponding location within some input tensor; and the axis coordinate
6644     * selects the input tensor.
6645     *
6646     * Supported tensor {@link %{OperandType}}:
6647     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6648     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6649     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6650     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6651     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6652     *
6653     * Supported input tensor rank: from 1
6654     *
6655     * Inputs:
6656     * * 0: A scalar of type {@link %{OperandTypeLinkPfx}INT32}, specifying
6657     *      the axis along which to pack.  The valid range is [0, R+1).
6658     * * 1 ~ N: Input tensors to be packed together.
6659     *          For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
6660     *          {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensors,
6661     *          the scales and zeroPoint must be the same for all input tensors,
6662     *          and will be the same for the output tensor.
6663     *
6664     * Outputs:
6665     * * 0: The packed tensor.
6666%insert AVAIL6
6667     */
6668    %{DeclareOperation_fl6 PACK 103},
6669%/section
6670
6671%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6672
6673%% NDK OperationCode and HAL OperationType for Feature Level 7
6674
6675%section Operation_fl7
6676    /**
6677     * Pads a tensor with mirrored values.
6678     *
6679     * This operator specifies one of two padding modes: REFLECT or SYMMETRIC.
6680     * In the case of REFLECT mode, the mirroring excludes the border element
6681     * on the padding side.
6682     * In the case of SYMMETRIC mode, the mirroring includes the border element
6683     * on the padding side.
6684     *
6685     * For example, if the input is the 1-D tensor `[1, 2, 3]` and the padding
6686     * is `[0, 2]` (i.e., pad no elements before the first (and only) dimension,
6687     * and two elements after the first (and only) dimension), then:
6688     *     - REFLECT mode produces the output `[1, 2, 3, 2, 1]`
6689     *     - SYMMETRIC mode produces the output `[1, 2, 3, 3, 2]`
6690     *
6691     * Supported tensor {@link %{OperandType}}:
6692     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6693     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6694     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6695     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6696     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6697     *
6698     * Supported tensor rank: from 1.
6699     *
6700     * Inputs:
6701     * * 0: An n-D tensor, specifying the tensor to be padded.
6702     * * 1: A 2-D tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the paddings
6703     *      for each spatial dimension of the input tensor. The shape of the
6704     *      tensor must be {rank(input0), 2}.
6705     *      padding[i, 0] specifies the number of elements to be padded in the
6706     *      front of dimension i.
6707     *      padding[i, 1] specifies the number of elements to be padded after the
6708     *      end of dimension i.
6709     *      Each padding value must be nonnegative.
6710     *      In the case of REFLECT mode, each padding value must be less than the
6711     *      corresponding dimension.
6712     *      In the case of SYMMETRIC mode, each padding value must be less than or
6713     *      equal to the corresponding dimension.
6714     * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the mode.
6715     *      Options are 0:REFLECT and 1:SYMMETRIC.
6716     *
6717     * Outputs:
6718     * * 0: A tensor of the same {@link %{OperandType}} as input0. The
6719     *      output tensor has the same rank as input0, and each
6720     *      dimension of the output tensor has the same size as the
6721     *      corresponding dimension of the input tensor plus the size
6722     *      of the padding:
6723     *          output0.dimension[i] =
6724     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
6725     *      For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
6726     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor,
6727     *      the scale and zeroPoint must be the same as input0.
6728%insert AVAIL7
6729     */
6730    %{DeclareOperation_fl7 MIRROR_PAD 104},
6731
6732    /**
6733     * Reverses a specified dimension of a tensor.
6734     *
6735     * Supported tensor {@link %{OperandType}}:
6736     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}
6737     * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}
6738     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}
6739     * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}
6740     * * {@link %{OperandTypeLinkPfx}TENSOR_INT32}
6741     *
6742     * Supported tensor rank: up to 8.
6743     *
6744     * Inputs:
6745     * * 0: Input tensor of rank n.
6746     * * 1: Axis tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} and shape [1],
6747     *      specifying which dimension of the input tensor is to be reversed. The dimension
6748     *      must be in the range [0, n).
6749     *
6750     * Outputs:
6751     * * 0: The reversed tensor of the same shape as the input tensor.
6752     *      For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and
6753     *      {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensors,
6754     *      the scales and zeroPoint must be the same as input0.
6755%insert AVAIL7
6756     */
6757    %{DeclareOperation_fl7 REVERSE 105},
6758%/section
6759
6760%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6761
6762%% Misc HAL types
6763
6764%section OperandLifeTime
6765/**
6766 * How an operand is used.
6767 */
6768%kind canonical
6769enum class LifeTime {
6770%else
6771%{enum OperandLifeTime int32_t} {
6772%/kind
6773    /**
6774     * The operand is internal to the model. It's created by an operation and
6775     * consumed by other operations. It must be an output operand of
6776     * exactly one operation.
6777     */
6778    %{DeclareEnumValue TEMPORARY_VARIABLE 0},
6779
6780    /**
6781     * The operand is an input of %{the_model_or_a_subgraph}. It must not be an output
6782     * operand of any operation.
6783     *
6784     * An operand can't be both input and output of a %{model_or_subgraph}.
6785     */
6786%kind hal_1.0
6787    %{DeclareEnumValue MODEL_INPUT 1},
6788%else
6789    %{DeclareEnumValue SUBGRAPH_INPUT 1},
6790%/kind
6791
6792    /**
6793     * The operand is an output of %{the_model_or_a_subgraph}. It must be an output
6794     * operand of exactly one operation.
6795     *
6796     * An operand can't be both input and output of a %{model_or_subgraph}.
6797     */
6798%kind hal_1.0
6799    %{DeclareEnumValue MODEL_OUTPUT 2},
6800%else
6801    %{DeclareEnumValue SUBGRAPH_OUTPUT 2},
6802%/kind
6803
6804    /**
6805     * The operand is a constant found in Model%{::}operandValues. It must
6806     * not be an output operand of any operation.
6807     */
6808    %{DeclareEnumValue CONSTANT_COPY 3},
6809
6810    /**
6811     * The operand is a constant that was specified via a Memory
6812     * object. It must not be an output operand of any operation.
6813     */
6814    %{DeclareEnumValue CONSTANT_REFERENCE 4},
6815
6816    /**
6817     * The operand does not have a value. This is valid only for optional
6818     * arguments of operations.
6819     */
6820    %{DeclareEnumValue NO_VALUE 5},
6821%kind canonical hal_1.3+
6822
6823    /**
6824     * The operand is a reference to a subgraph. It must be an input to one
6825     * or more {@link OperationType::IF} or {@link OperationType::WHILE}
6826     * operations.
6827     */
6828    %{DeclareEnumValue SUBGRAPH 6},
6829%/kind
6830%kind canonical
6831
6832    /**
6833     * This operand is a constant found in a user buffer. It must not be an
6834     * output operand of any operation.
6835     */
6836    %{DeclareEnumValue POINTER 7},
6837%/kind
6838};
6839%/section
6840
6841%section DeviceStatus
6842/**
6843 * Status of a device.
6844 */
6845%{enum DeviceStatus int32_t} {
6846    %{DeclareEnumValue AVAILABLE 0},
6847    %{DeclareEnumValue BUSY 1},
6848    %{DeclareEnumValue OFFLINE 2},
6849    %{DeclareEnumValue UNKNOWN 3},
6850};
6851%/section
6852
6853%kind canonical
6854%define init_execTime  = kDefaultExecTime
6855%define init_powerUsage  = kDefaultPowerUsage
6856%else
6857%define init_execTime
6858%define init_powerUsage
6859%/kind
6860
6861%section PerformanceInfo
6862/**
6863 * Performance information for the reference workload.
6864 *
6865 * Used by a driver to report its performance characteristics.
6866 */
6867struct PerformanceInfo {
6868    /**
6869     * Ratio of the time taken by the driver to execute the
6870     * workload compared to the time the CPU would take for the
6871     * same workload. A lower number is better.
6872     */
6873    float execTime%{init_execTime};
6874
6875    /**
6876     * Ratio of the energy used by the driver compared to what
6877     * the CPU would use for doing the same workload. A lower number
6878     * is better.
6879     */
6880    float powerUsage%{init_powerUsage};
6881};
6882%/section
6883
6884%section OutputShape
6885/**
6886 * Describes the shape information of an output operand after execution.
6887 */
6888struct OutputShape {
6889    /**
6890     * Dimensions of the operand.
6891     */
6892    %{vec}<uint32_t> dimensions;
6893
6894    /**
6895     * Whether the provided buffer size is sufficient for the output.
6896     */
6897    bool isSufficient%{init_bool};
6898};
6899%/section
6900
6901%section MeasureTiming
6902/**
6903 * Specifies whether or not to measure timing information during execution.
6904 */
6905%{enum MeasureTiming int32_t} {
6906    NO = 0,
6907    YES = 1,
6908};
6909%/section
6910
6911%section ExecutionPreference
6912/**
6913 * Execution preferences.
6914%insert AVAIL1Short
6915 */
6916%{enum ExecutionPreference int32_t} {
6917    /**
6918     * Prefer executing in a way that minimizes battery drain.
6919     * This is desirable for compilations that will be executed often.
6920     */
6921    %{DeclareExecutionPreference LOW_POWER 0},
6922    /**
6923     * Prefer returning a single answer as fast as possible, even if this causes
6924     * more power consumption.
6925     */
6926    %{DeclareExecutionPreference FAST_SINGLE_ANSWER 1},
6927    /**
6928     * Prefer maximizing the throughput of successive frames, for example when
6929     * processing successive frames coming from the camera.
6930     */
6931    %{DeclareExecutionPreference SUSTAINED_SPEED 2},
6932%kind canonical
6933    DEFAULT = FAST_SINGLE_ANSWER,
6934%/kind
6935}%{ndk_enum_name PreferenceCode};
6936%/section
6937
6938%section DeviceType
6939/**
6940 * Device types.
6941 *
6942 * The type of NNAPI device.
6943 */
6944%{enum DeviceType int32_t} {
6945%kind hal*
6946    // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no
6947    // HAL equivalent of unknown type and a 1.2 HAL implementation must belong
6948    // to one of the categories below.
6949%else
6950    /** The device type cannot be provided. */
6951    %{DeclareDeviceType UNKNOWN 0},
6952%/kind
6953    /** The device does not fall into any category below. */
6954    %{DeclareDeviceType OTHER 1},
6955    /** The device runs NNAPI models on single or multi-core CPU. */
6956    %{DeclareDeviceType CPU 2},
6957    /** The device can run NNAPI models and also accelerate graphics APIs such
6958     * as OpenGL ES and Vulkan. */
6959    %{DeclareDeviceType GPU 3},
6960    /** Dedicated accelerator for Machine Learning workloads. */
6961    %{DeclareDeviceType ACCELERATOR 4},
6962}%{ndk_enum_name DeviceTypeCode};
6963%/section
6964
6965%% NOTE: This is different from the NDK PriorityCode.
6966%section Priority
6967/**
6968%kind ndk
6969 * Relative execution priority.
6970 *
6971 * Available since NNAPI feature level 4.
6972%else
6973 * Priority given to a prepared model for execution.
6974%/kind
6975 */
6976%{enum Priority int32_t} {
6977    %{DeclarePriority LOW 0 90},
6978    %{DeclarePriority MEDIUM 1 100},
6979    %{DeclarePriority HIGH 2 110},
6980%kind canonical ndk
6981    %{DeclarePriority DEFAULT MEDIUM ANEURALNETWORKS_PRIORITY_MEDIUM},
6982%/kind
6983}%{ndk_enum_name PriorityCode};
6984%/section
6985
6986%kind canonical
6987%define OptionalDuration OptionalDuration
6988%else
6989%define OptionalDuration uint64_t
6990%/kind
6991
6992%section Timing
6993/**
6994%kind hal_1.2
6995
6996%/kind
6997 * Timing information measured during execution. Each time is a duration from
6998 * the beginning of some task to the end of that task, including time when that
6999 * task is not active (for example, preempted by some other task, or
7000 * waiting for some resource to become available).
7001 *
7002%kind hal*
7003 * Times are measured in microseconds.
7004 * When a time is not available, it must be reported as UINT64_MAX.
7005%else
7006 * Times are measured in nanoseconds.
7007%/kind
7008 */
7009struct Timing {
7010    /** Execution time on device (not driver, which runs on host processor). */
7011    %{OptionalDuration} timeOnDevice;
7012    /** Execution time in driver (including time on device). */
7013    %{OptionalDuration} timeInDriver;
7014};
7015%/section
7016
7017%section Capabilities_float_quant_performance
7018    /**
7019     * Driver performance when operating on float32 data.
7020     */
7021    PerformanceInfo float32Performance;
7022
7023    /**
7024     * Driver performance when operating on asymmetric 8-bit quantized data.
7025     */
7026    PerformanceInfo quantized8Performance;
7027%/section
7028
7029%kind canonical
7030%define OperandPerformanceTable OperandPerformanceTable
7031%else
7032%define OperandPerformanceTable vec<OperandPerformance>
7033%/kind
7034
7035%section Capabilities_relaxedPerformance
7036    /**
7037     * Driver performance when operating on float32 data but performing
7038     * calculations with range and/or precision as low as that of the IEEE
7039     * 754 16-bit floating-point format.
7040     */
7041%kind hal_1.1
7042    PerformanceInfo relaxedFloat32toFloat16Performance;
7043%else
7044    PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
7045    PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
7046%/kind
7047%/section
7048
7049%section Capabilities_operandPerformance
7050    /**
7051     * Performance by operand type. Must be sorted by OperandType.
7052%kind hal_1.2
7053     * If a particular OperandType is not present in operandPerformance,
7054     * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
7055%else
7056     *
7057     * If a particular {@link OperandType} is not present in operandPerformance,
7058     * its performance is treated as
7059     * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
7060     *
7061     * Performance does not apply to {@link OperandType::SUBGRAPH}, and a driver
7062     * must not report operand performance for {@link OperandType::SUBGRAPH}.
7063%/kind
7064     */
7065    %{OperandPerformanceTable} operandPerformance;
7066%/section
7067
7068%section Capabilities_if_while_performance
7069    /**
7070     * Performance of an {@link OperationType::IF} operation is the sum of
7071     * {@link Capabilities::ifPerformance} and the mean of performance for the
7072     * two branch subgraphs, where performance for a subgraph is the sum of the
7073     * performance of all operations within the subgraph.
7074     */
7075    PerformanceInfo ifPerformance;
7076
7077    /**
7078     * Performance of a {@link OperationType::WHILE} operation is the sum of
7079     * {@link Capabilities::whilePerformance}, performance for the condition
7080     * subgraph and performance for the body subgraph, where performance for a
7081     * subgraph is the sum of the performance of all operations within the
7082     * subgraph.
7083     */
7084    PerformanceInfo whilePerformance;
7085%/section
7086
7087%section OperandPerformance
7088/**
7089 * Driver performance when operating on a particular data type.
7090 * In the case of float32 data, this is used when the calculations
7091 * are not relaxed.
7092 */
7093struct OperandPerformance {
7094    OperandType type%{init_pod};
7095    PerformanceInfo info;
7096};
7097%/section
7098
7099%section Capabilities
7100/**
7101 * The capabilities of a driver.
7102%kind hal_1.2
7103 *
7104 * Performance of an operation comes from the type of its first operand.
7105 * This represents performance for non extension operand types.
7106%/kind
7107%kind canonical hal_1.3+
7108 *
7109 * This represents performance of non-extension operations.
7110 *
7111 * Performance of an operation other than {@link OperationType::IF} and
7112 * {@link OperationType::WHILE} comes from the type of its first operand.
7113%/kind
7114 */
7115struct Capabilities {
7116%kind canonical
7117%insert-indented 4 PerformanceInfo
7118
7119%insert-indented 4 OperandPerformance
7120
7121    class OperandPerformanceTable {
7122       public:
7123        static Result<OperandPerformanceTable> create(
7124                std::vector<OperandPerformance> operandPerformances);
7125
7126        PerformanceInfo lookup(OperandType type) const;
7127        const std::vector<OperandPerformance>& asVector() const;
7128
7129       private:
7130        explicit OperandPerformanceTable(std::vector<OperandPerformance> operandPerformances);
7131        std::vector<OperandPerformance> mSorted;
7132    };
7133
7134%insert Capabilities_relaxedPerformance
7135
7136%insert Capabilities_operandPerformance
7137
7138%insert Capabilities_if_while_performance
7139%/kind
7140%kind hal_1.0
7141%insert Capabilities_float_quant_performance
7142%/kind
7143%kind hal_1.1
7144%insert Capabilities_float_quant_performance
7145
7146%insert Capabilities_relaxedPerformance
7147%/kind
7148%kind hal_1.2
7149%insert Capabilities_relaxedPerformance
7150
7151%insert-indented 4 OperandPerformance
7152
7153%insert Capabilities_operandPerformance
7154%/kind
7155%kind hal_1.3
7156%insert Capabilities_relaxedPerformance
7157
7158%insert-indented 4 OperandPerformance
7159
7160%insert Capabilities_operandPerformance
7161
7162%insert Capabilities_if_while_performance
7163%/kind
7164};
7165%/section
7166
7167%section DataLocation
7168/**
7169 * Describes the location of a data object.
7170 */
7171struct DataLocation {
7172%kind canonical
7173    /**
7174     * The address of the memory where the data is found.
7175     *
7176     * This field is only active when lifetime is POINTER.
7177     */
7178    std::variant<const void*, void*> pointer;
7179
7180%/kind
7181    /**
7182     * The index of the memory pool where this location is found.
7183     */
7184    uint32_t poolIndex%{init_int};
7185
7186    /**
7187     * Offset in bytes from the start of the pool.
7188     */
7189    uint32_t offset%{init_int};
7190
7191    /**
7192     * The length of the data in bytes.
7193     */
7194    uint32_t length%{init_int};
7195%kind canonical
7196
7197    /**
7198     * The end padding of the specified memory region in bytes.
7199     */
7200    uint32_t padding%{init_int};
7201%/kind
7202};
7203%/section
7204
7205%section Extension_name
7206    /**
7207     * The extension name.
7208     *
7209     * The name must consist of lowercase latin letters, numbers, periods, and
7210     * underscore signs. The name must contain at least one period.
7211     *
7212     * The name must start with the reverse domain name of the vendor.
7213     *
7214     * Example: com.google.test_extension
7215     */
7216    %{string} name;
7217%/section
7218
7219%section Extension
7220/**
7221 * Information about an extension.
7222 */
7223struct Extension {
7224%kind hal*
7225%insert Extension_name
7226
7227%/kind
7228    /**
7229     * Information about an extension operand type.
7230     */
7231    struct OperandTypeInformation {
7232        /**
7233         * The extension operand type.
7234         */
7235        uint16_t type%{init_int};
7236
7237        /**
7238         * Indicates whether the extension operand type represents a tensor or
7239         * a scalar.
7240         */
7241        bool isTensor%{init_bool};
7242
7243        /**
7244         * The byte size of the operand (if scalar) or of a single element (if
7245         * tensor).
7246         */
7247        uint32_t byteSize%{init_int};
7248    };
7249
7250%kind canonical
7251%insert Extension_name
7252
7253%/kind
7254    /**
7255     * Information about operand types defined by the extension.
7256     */
7257    %{vec}<OperandTypeInformation> operandTypes;
7258};
7259%/section
7260
7261%section Operation
7262/**
7263 * Describes one operation of the model's graph.
7264 */
7265struct Operation {
7266    /**
7267     * The operation type.
7268%kind hal_1.2+
7269     *
7270     * Besides the values listed in {@link OperationType}, any value above
7271     * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
7272     * as an extension type according to {@link Model::extensionNameToPrefix}.
7273%/kind
7274     */
7275    OperationType type%{init_pod};
7276
7277    /**
7278     * Describes the table that contains the indexes of the inputs of the
7279     * operation. The offset is the index in the operandIndexes table.
7280     */
7281    %{vec}<uint32_t> inputs;
7282
7283    /**
7284     * Describes the table that contains the indexes of the outputs of the
7285     * operation. The offset is the index in the operandIndexes table.
7286     */
7287    %{vec}<uint32_t> outputs;
7288};
7289%/section
7290
7291%section FusedActivationFunc
7292/**
7293 * Fused activation function types.
7294%insert AVAIL1Short
7295 */
7296%kind canonical
7297enum class FusedActivationFunc : int32_t {
7298%else
7299%{enum FusedActivationFunc int32_t} {
7300%/kind
7301    /** NO fused activation function. */
7302    %{DeclareFusedActivationFunc NONE 0},
7303    /** Fused ReLU activation function. */
7304    %{DeclareFusedActivationFunc RELU 1},
7305    /** Fused ReLU1 activation function. */
7306    %{DeclareFusedActivationFunc RELU1 2},
7307    /** Fused ReLU6 activation function. */
7308    %{DeclareFusedActivationFunc RELU6 3},
7309}%{ndk_enum_name FuseCode};
7310%/section
7311
7312%section ExtraParams_Comment
7313/**
7314 * Additional parameters specific to a particular operand type.
7315 */
7316%/section
7317
7318%section ExtraParams_none_Comment
7319/**
7320 * No additional parameters.
7321 */
7322%/section
7323
7324%section ExtraParams_channelQuant_Comment
7325/**
7326 * Symmetric per-channel quantization parameters.
7327 *
7328 * Only applicable to operands of type %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL.
7329 */
7330%/section
7331
7332%section ExtraParams_extension_Comment
7333/**
7334 * Extension operand parameters.
7335 *
7336 * The framework treats this as an opaque data blob.
7337 * The format is up to individual extensions.
7338 */
7339%/section
7340
7341%section SymmPerChannelQuantParams_Comment
7342/**
7343 * Parameters for %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
7344 */
7345%/section
7346
7347%section SymmPerChannelQuantParams
7348%insert SymmPerChannelQuantParams_Comment
7349struct SymmPerChannelQuantParams {
7350    /** Array of scaling values for each channel. Each value must be greater than zero. */
7351    %{vec}<float> scales;
7352    /** Index of the channel dimension */
7353    uint32_t channelDim%{init_int};
7354};
7355%/section
7356
7357%kind canonical
7358%section location_pointer_is_null
7359     * - location.pointer is null.
7360%/section
7361%else
7362%section location_pointer_is_null
7363%/section
7364%/kind
7365
7366%% List item symbol
7367%kind hal*
7368%define li     .
7369%else
7370%define li     -
7371%/kind
7372
7373%section Operand
7374/**
7375 * Describes one operand of the model's graph.
7376 */
7377struct Operand {
7378%kind canonical
7379%insert-indented 4 OperandLifeTime
7380
7381%insert-indented 4 ExtraParams_none_Comment
7382    using NoParams = std::monostate;
7383
7384%insert-indented 4 SymmPerChannelQuantParams
7385
7386%insert-indented 4 ExtraParams_extension_Comment
7387    using ExtensionParams = std::vector<uint8_t>;
7388
7389%insert-indented 4 ExtraParams_Comment
7390    using ExtraParams = std::variant<NoParams, SymmPerChannelQuantParams, ExtensionParams>;
7391
7392%/kind
7393    /**
7394%kind canonical
7395     * The data type.
7396     *
7397     * Besides the values listed in {@link OperationType}, any value equal or over
7398     * (1 << kExtensionTypeBits) is possible and should be interpreted
7399     * as an extension type according to {@link Model::extensionNameToPrefix}.
7400%/kind
7401%kind hal_1.2+
7402     * The data type.
7403     *
7404     * Besides the values listed in {@link OperandType}, any value above
7405     * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted
7406     * as an extension type according to {@link Model::extensionNameToPrefix}.
7407%/kind
7408%kind hal_1.0
7409     * Data type of the operand.
7410%/kind
7411     */
7412    OperandType type%{init_pod};
7413
7414    /**
7415     * Dimensions of the operand.
7416     *
7417     * For a scalar operand, dimensions.size() must be 0.
7418%kind hal_1.0
7419     *
7420     * For a tensor operand, dimensions.size() must be at least 1;
7421     * however, any of the dimensions may be unspecified.
7422%/kind
7423     *
7424     * A tensor operand with all dimensions specified has "fully
7425     * specified" dimensions. Whenever possible (i.e., whenever the
7426     * dimensions are known at model construction time), a tensor
7427     * operand should have (but is not required to have) fully
7428     * specified dimensions, in order to enable the best possible
7429     * performance.
7430     *
7431     * If a tensor operand's dimensions are not fully specified, the
7432     * dimensions of the operand are deduced from the operand
7433     * dimensions and values of the operation for which that operand
7434%kind hal_1.0 hal_1.1 hal_1.2
7435     * is an output.
7436%else
7437     * is an output or from the corresponding {@link OperationType::IF} or
7438     * {@link OperationType::WHILE} operation input operand dimensions in the
7439     * case of referenced subgraph input operands.
7440%/kind
7441     *
7442     * In the following situations, a tensor operand's dimensions must
7443     * be fully specified:
7444     *
7445%kind canonical
7446     * %{li} The operand has lifetime CONSTANT_COPY, CONSTANT_REFERENCE, or
7447     *       POINTER.
7448%else
7449     * %{li} The operand has lifetime CONSTANT_COPY or
7450     *       CONSTANT_REFERENCE.
7451%/kind
7452     *
7453%kind hal_1.0
7454     * %{li} The operand has lifetime MODEL_INPUT or MODEL_OUTPUT. Fully
7455     *       specified dimensions must either be present in the
7456%/kind
7457%kind hal_1.2
7458     * %{li} The operand has lifetime MODEL_INPUT. Fully
7459     *       specified dimensions must either be present in the
7460%/kind
7461%kind canonical hal_1.3+
7462     * %{li} The operand has lifetime SUBGRAPH_INPUT and belongs to the main
7463     *       subgraph. Fully specified dimensions must either be present in the
7464%/kind
7465     *       Operand or they must be provided in the corresponding
7466     *       RequestArgument.
7467%kind hal_1.0
7468     *       EXCEPTION: If the input or output is optional and omitted
7469%else
7470     *       EXCEPTION: If the input is optional and omitted
7471%/kind
7472     *       (by setting the hasNoValue field of the corresponding
7473     *       RequestArgument to true) then it need not have fully
7474     *       specified dimensions.
7475     *
7476     * A tensor operand with some number of unspecified dimensions is
7477     * represented by setting each unspecified dimension to 0.
7478%kind canonical hal_1.2+
7479     *
7480     * A tensor operand with unspecified rank is represented by providing
7481     * an empty dimensions vector.
7482%/kind
7483     */
7484    %{Dimensions} dimensions;
7485%kind hal*
7486
7487    /**
7488     * The number of times this operand appears as an operation input.
7489     *
7490     * (For example, if this operand appears once in one operation's
7491     * input list, and three times in another operation's input list,
7492     * then numberOfConsumers = 4.)
7493     */
7494    uint32_t numberOfConsumers;
7495%/kind
7496
7497    /**
7498     * Quantized scale of the operand.
7499     *
7500%kind hal_1.0
7501     * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
7502     * TENSOR_INT32.
7503%else
7504     * Must be 0 when not applicable to an operand type.
7505     *
7506     * See {@link OperandType}.
7507%/kind
7508     */
7509    float scale%{init_float};
7510
7511    /**
7512     * Quantized zero-point offset of the operand.
7513     *
7514%kind hal_1.0
7515     * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
7516%else
7517     * Must be 0 when not applicable to an operand type.
7518     *
7519     * See {@link OperandType}.
7520%/kind
7521     */
7522    int32_t zeroPoint%{init_int};
7523
7524    /**
7525     * How the operand is used.
7526     */
7527    %{concat_or_skip_first Operand LifeTime} lifetime%{init_pod};
7528
7529    /**
7530     * Where to find the data for this operand.
7531%kind hal_1.0 hal_1.1 hal_1.2
7532     * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
7533     * NO_VALUE:
7534%else
7535     * If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT,
7536     * or NO_VALUE:
7537%/kind
7538     * - All the fields must be 0.
7539     * If the lifetime is CONSTANT_COPY:
7540%insert location_pointer_is_null
7541     * - location.poolIndex is 0.
7542     * - location.offset is the offset in bytes into Model%{::}operandValues.
7543     * - location.length is set.
7544%kind canonical
7545     * - location.padding is 0.
7546%/kind
7547     * If the lifetime is CONSTANT_REFERENCE:
7548%insert location_pointer_is_null
7549     * - location.poolIndex is set.
7550     * - location.offset is the offset in bytes into the specified pool.
7551     * - location.length is set.
7552%kind canonical
7553     * - location.padding is set.
7554%/kind
7555%kind canonical hal_1.3+
7556     * If the lifetime is SUBGRAPH:
7557%insert location_pointer_is_null
7558     * - location.poolIndex is 0.
7559     * - location.offset is the index of the referenced subgraph in
7560     *   {@link Model::referenced}.
7561     * - location.length is 0.
7562%/kind
7563%kind canonical
7564     * - location.padding is 0.
7565%/kind
7566%kind canonical
7567     * If the lifetime is POINTER:
7568     * - location.pointer is non-null.
7569     * - location.poolIndex is 0.
7570     * - location.offset is 0.
7571     * - location.length is set.
7572     * - location.padding is 0.
7573%/kind
7574     */
7575    DataLocation location;
7576%kind hal_1.2
7577
7578%insert-indented 4 ExtraParams_Comment
7579    safe_union ExtraParams {
7580%insert-indented 8 ExtraParams_none_Comment
7581        Monostate none;
7582
7583%insert-indented 8 ExtraParams_channelQuant_Comment
7584        SymmPerChannelQuantParams channelQuant;
7585
7586%insert-indented 8 ExtraParams_extension_Comment
7587        vec<uint8_t> extension;
7588    } extraParams;
7589%/kind
7590%kind canonical hal_1.3
7591
7592%insert-indented 4 ExtraParams_Comment
7593    %{concat_or_skip_first @1.2::Operand. ExtraParams} extraParams;
7594%/kind
7595};
7596%/section
7597
7598%kind canonical
7599%define OperandValues OperandValues
7600%define SharedMemory SharedMemory
7601%else
7602%define OperandValues vec<uint8_t>
7603%define SharedMemory memory
7604%/kind
7605
7606%section ExtensionNameAndPrefix
7607%kind canonical
7608/**
7609 * The mapping between extension names and prefixes of values like operand and operation type, and
7610 * token in {@link TokenValuePair}.
7611 *
7612 * An operand or operation whose numeric type value is above {@link IDevice::OPERAND_TYPE_BASE_MAX}
7613 * or {@link IDevice::OPERATION_TYPE_BASE_MAX} respectively should be interpreted as an extension
7614 * operand/operation. The low kExtensionTypeBits bits of the value correspond to the type ID within
7615 * the extension and the high kExtensionPrefixBits bits encode the "prefix", which maps uniquely to
7616 * the extension name. The sign bit is always 0.
7617 *
7618 * For example, if a model contains an operation whose value is 0x7AAABBBB and
7619 * Model::extensionNameToPrefix contains an entry with prefix=0x7AAA and
7620 * name="vendor.test.test_extension", then the operation should be interpreted as the operation
7621 * 0xBBBB of the extension named vendor.test.test_extension.
7622 *
7623 * This is a one-to-one correspondence. That is, there must be at most one prefix corresponding to
7624 * each extension name and at most one extension name corresponding to each prefix.
7625 */
7626%/kind
7627%kind hal_1.2 hal_1.3
7628/**
7629 * A correspondence between an extension name and a prefix of operand and
7630 * operation type values.
7631 */
7632%/kind
7633struct ExtensionNameAndPrefix {
7634    /**
7635     * The extension name.
7636     *
7637     * See {@link Extension::name} for the format specification.
7638     */
7639    %{string} name;
7640
7641%kind canonical
7642    /**
7643     * The extension prefix. Only the lowest 15 bits are used, so the value must be less than 32768.
7644     */
7645%/kind
7646%kind hal_1.2 hal_1.3
7647    /**
7648     * The unique extension identifier within the model.
7649     *
7650     * See {@link Model::extensionNameToPrefix}.
7651     */
7652%/kind
7653    uint16_t prefix%{init_int};
7654};
7655%/section
7656
7657%section Model_1.0
7658    /**
7659     * A byte buffer containing operand data that were copied into the model.
7660     *
7661     * An operand's value must be located here if and only if Operand::lifetime
7662     * equals %{OperandLifeTime}::CONSTANT_COPY.
7663     */
7664    %{OperandValues} operandValues;
7665
7666    /**
7667     * A collection of shared memory pools containing operand values.
7668     *
7669     * An operand's value must be located here if and only if Operand::lifetime
7670     * equals %{OperandLifeTime}::CONSTANT_REFERENCE.
7671     */
7672    %{vec}<%{SharedMemory}> pools;
7673%/section
7674
7675%section Model_1.1
7676    /**
7677     * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
7678     * precision as low as that of the IEEE 754 16-bit floating-point format.
7679     * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
7680     * range and precision of the IEEE 754 32-bit floating-point format.
7681     */
7682    bool relaxComputationFloat32toFloat16%{init_bool};
7683%/section
7684
7685%section Model_1.2
7686    /**
7687     * The mapping between extension names and prefixes of operand and
7688     * operation type values.
7689     *
7690%kind canonical
7691     * An operand or operation whose numeric type value is equal to or greater
7692     * than (1 << kExtensionTypeBits) should be interpreted
7693%/kind
7694%kind hal*
7695     * An operand or operation whose numeric type value is above
7696     * {@link OperandTypeRange::BASE_MAX} or
7697     * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted
7698%/kind
7699     * as an extension operand. The low
7700%kind hal_1.2
7701     * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value
7702     * correspond to the type ID within the extension and the high
7703     * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
7704%/kind
7705%kind hal_1.3
7706     * {@link @1.2::Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the
7707     * value correspond to the type ID within the extension and the high
7708     * {@link @1.2::Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
7709%/kind
7710%kind canonical
7711     * {@link kExtensionTypeBits} bits of the value correspond to the type ID
7712     * within the extension and the high {@link kExtensionPrefixBits} bits encode
7713%/kind
7714     * the "prefix", which maps uniquely to the extension name.
7715     *
7716     * For example, if a model contains an operation whose value is
7717     * 0xAAAABBBB and extensionNameToPrefix contains an entry with
7718     * prefix=0xAAAA and name="vendor.test.test_extension", then
7719     * the operation should be interpreted as the operation 0xBBBB
7720     * of the extension named vendor.test.test_extension.
7721     *
7722     * This is a one-to-one correspondence. That is, there must be at most one
7723     * prefix corresponding to each extension name and at most one extension
7724     * name corresponding to each prefix.
7725     */
7726%kind hal_1.3
7727    %{vec}<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix;
7728%else
7729    %{vec}<ExtensionNameAndPrefix> extensionNameToPrefix;
7730%/kind
7731%/section
7732
7733%section Model_1.3_main_and_referenced_subgraphs
7734    /**
7735     * The top-level subgraph.
7736     */
7737    Subgraph main;
7738
7739    /**
7740     * Referenced subgraphs.
7741     *
7742     * Each subgraph is referenced by the main subgraph or at least one other
7743     * referenced subgraph.
7744     *
7745     * There must be no reference cycles.
7746     */
7747    %{vec}<Subgraph> referenced;
7748%/section
7749
7750%section Subgraph_fields
7751    /**
7752     * All operands included in the %{model_or_subgraph}.
7753     */
7754    %{vec}<Operand> operands;
7755
7756    /**
7757     * All operations included in the %{model_or_subgraph}.
7758     *
7759     * The operations are sorted into execution order. Every operand
7760     * with lifetime %{MODEL_or_SUBGRAPH}_OUTPUT or TEMPORARY_VARIABLE must be
7761     * written before it is read.
7762     */
7763    %{vec}<Operation> operations;
7764
7765    /**
7766     * Input indexes of the %{model_or_subgraph}. There must be at least one.
7767     *
7768     * Each value corresponds to the index of the operand in "operands".
7769     */
7770    %{vec}<uint32_t> inputIndexes;
7771
7772    /**
7773     * Output indexes of the %{model_or_subgraph}. There must be at least one.
7774     *
7775     * Each value corresponds to the index of the operand in "operands".
7776     */
7777    %{vec}<uint32_t> outputIndexes;
7778%/section
7779
7780%section Subgraph
7781/**
7782 * An excerpt of the execution graph.
7783 */
7784struct Subgraph {
7785%insert Subgraph_fields
7786};
7787%/section
7788
7789%section ExtensionTypeEncoding
7790/**
7791 * Numeric values of extension operand and operation types have the
7792 * following structure:
7793 * - 16 high bits represent the "prefix", which corresponds uniquely to the
7794 *   extension name.
7795 * - 16 low bits represent the type ID within the extension.
7796 */
7797%kind canonical
7798constexpr uint8_t kExtensionTypeBits = 16;
7799constexpr uint8_t kExtensionPrefixBits = 16;
7800constexpr uint32_t kTypeWithinExtensionMask = 0xFFFF;
7801%else
7802enum ExtensionTypeEncoding : uint8_t {
7803    HIGH_BITS_PREFIX = 16,
7804    LOW_BITS_TYPE = 16,
7805};
7806%/kind
7807%/section
7808
7809%section Model
7810/**
7811 * A Neural Network Model.
7812 *
7813 * This includes not only the execution graph, but also constant data such as
7814 * weights or scalars added at construction time. The only information that
7815%kind hal_1.0
7816 * might not be known is the shape of the input tensors.
7817%else
7818 * may not be known is the shape of the input tensors.
7819%/kind
7820 */
7821struct Model {
7822%kind canonical
7823%insert-indented 4 Subgraph
7824
7825    class OperandValues {
7826       public:
7827        OperandValues();
7828        OperandValues(const uint8_t* data, size_t length);
7829
7830        // Append a segment of memory (starting at `data` with `length` number of bytes) to the back
7831        // of `OperandValues`, adding padding as necessary so that the appended data is aligned.
7832        // Refer to `getAlignmentForLength` for more information on alignment (such as what the
7833        // current alignments are for different data lengths).
7834        DataLocation append(const uint8_t* data, size_t length);
7835
7836        const uint8_t* data() const;
7837        size_t size() const;
7838
7839       private:
7840        std::vector<uint8_t> mData;
7841    };
7842
7843%insert Model_1.3_main_and_referenced_subgraphs
7844
7845%insert Model_1.0
7846
7847%insert Model_1.1
7848
7849%insert Model_1.2
7850%/kind
7851%kind hal_1.0
7852%insert Subgraph_fields
7853
7854%insert Model_1.0
7855%/kind
7856%kind hal_1.1
7857%insert Subgraph_fields
7858
7859%insert Model_1.0
7860
7861%insert Model_1.1
7862%/kind
7863%kind hal_1.2
7864%insert Subgraph_fields
7865
7866%insert Model_1.0
7867
7868%insert Model_1.1
7869
7870%insert Model_1.2
7871
7872%insert-indented 4 ExtensionNameAndPrefix
7873
7874%insert-indented 4 ExtensionTypeEncoding
7875%/kind
7876%kind hal_1.3
7877%insert Model_1.3_main_and_referenced_subgraphs
7878
7879%insert Model_1.0
7880
7881%insert Model_1.1
7882
7883%insert Model_1.2
7884%/kind
7885};
7886%/section
7887
7888%section BufferDesc
7889/**
7890 * A buffer descriptor. Describes the properties of a buffer.
7891 */
7892struct BufferDesc {
7893    /**
7894     * Dimensions of the buffer. May have unknown dimensions or rank. A buffer with some number
7895     * of unspecified dimensions is represented by setting each unspecified dimension to 0. A
7896     * buffer with unspecified rank is represented by providing an empty dimensions vector.
7897     */
7898    %{Dimensions} dimensions;
7899};
7900%/section
7901
7902%section BufferRole
7903/**
7904 * Describes a role of an input or output to a prepared model.
7905 */
7906struct BufferRole {
7907    /**
7908     * The index of the IPreparedModel within the "preparedModel" argument passed in
7909     * IDevice::allocate.
7910     */
7911    uint32_t modelIndex%{init_int};
7912
7913    /**
7914     * The index of the input or output operand.
7915     */
7916    uint32_t ioIndex%{init_int};
7917
7918    /**
7919     * A floating-point value within the range (0.0, 1.0]. Describes how likely the
7920     * buffer is to be used in the specified role. This is provided as a hint to
7921     * optimize the case when multiple roles prefer different buffer locations or data
7922     * layouts.
7923     */
7924%kind canonical
7925    float probability%{init_float};
7926%else
7927    float frequency%{init_float};
7928%/kind
7929};
7930%/section
7931
7932%kind aidl
7933%define inputIndexes @@@NOT_DEFINED@@@
7934%define outputIndexes @@@NOT_DEFINED@@@
7935%/kind
7936%kind canonical
7937%define inputIndexes Model::main::inputIndexes
7938%define outputIndexes Model::main::outputIndexes
7939%/kind
7940%kind hal_1.3
7941%define inputIndexes Model.main.inputIndexes
7942%define outputIndexes Model.main.outputIndexes
7943%/kind
7944%kind hal_1.0 hal_1.1 hal_1.2
7945%define inputIndexes Model.inputIndexes
7946%define outputIndexes Model.outputIndexes
7947%/kind
7948%kind ndk
7949%define inputIndexes @@@NOT_DEFINED@@@
7950%define outputIndexes @@@NOT_DEFINED@@@
7951%/kind
7952
7953%kind canonical
7954%define inputs inputs
7955%define outputs outputs
7956%else
7957%define inputs input
7958%define outputs output
7959%/kind
7960
7961%section Request_inputs_and_outputs
7962    /**
7963     * Input data and information to be used in the execution of a prepared
7964     * model.
7965     *
7966     * The index of the input corresponds to the index in %{inputIndexes}.
7967     *   E.g., %{inputs}[i] corresponds to %{inputIndexes}[i].
7968     */
7969    %{vec}<%{concat_or_skip_first Request Argument}> inputs;
7970
7971    /**
7972     * Output data and information to be used in the execution of a prepared
7973     * model.
7974     *
7975     * The index of the output corresponds to the index in %{outputIndexes}.
7976     *   E.g., %{outputs}[i] corresponds to %{outputIndexes}[i].
7977     */
7978    %{vec}<%{concat_or_skip_first Request Argument}> outputs;
7979%/section
7980
7981%section Request_pools
7982    /**
7983%kind hal_1.0
7984     * A collection of shared memory pools containing operand data for both the
7985%else
7986     * A collection of memory pools containing operand data for both the
7987%/kind
7988     * inputs and the outputs to a model.
7989     */
7990%kind hal_1.0
7991    vec<memory> pools;
7992%else
7993    %{vec}<MemoryPool> pools;
7994%/kind
7995%/section
7996
7997%section Request_MemoryPool_Comment
7998/**
7999 * A memory pool.
8000 */
8001%/section
8002
8003%section RequestArgument
8004/**
8005 * Metadata information specifying the location of the input or output data and
8006 * any updates to the input or output operand.
8007 */
8008struct %{concat_or_skip_first Request Argument} {
8009%kind canonical
8010    enum class LifeTime {
8011        POOL = 0,
8012        NO_VALUE = 1,
8013        POINTER = 2,
8014    };
8015
8016%/kind
8017%kind hal_1.0
8018    /**
8019     * If true, the argument does not have a value. This can be used for
8020     * operations that take optional arguments. If true, the fields of location
8021     * are set to 0 and the dimensions vector is left empty.
8022     */
8023    bool hasNoValue;
8024%/kind
8025%kind canonical
8026    LifeTime lifetime%{init_pod};
8027%/kind
8028
8029    /**
8030     * The location within one of the memory pools passed in the Request.
8031     */
8032    DataLocation location;
8033
8034    /**
8035     * Updated dimension information.
8036     *
8037     * If dimensions.size() > 0, dimension information was provided
8038     * along with the argument. This can be the case for models that
8039     * accept inputs of varying size. This can't change the rank, just
8040     * the value of the dimensions that were unspecified in the
8041     * model. If dimensions.size() > 0, then all dimensions must be
8042     * specified here; and any dimension that was specified in the
8043     * model must have the same value here.
8044     *
8045     * If the dimensions in the model are not fully specified, then
8046     * they must be fully specified here, unless hasNoValue is set to
8047     * true. If the dimensions in the model are fully specified, then
8048     * either dimensions.size() may be 0, or the dimensions in the
8049     * model must be identical to the dimensions here.
8050     */
8051    %{Dimensions} dimensions;
8052};
8053%/section
8054
8055%section Request
8056/**
8057 * Inputs to be sent to and outputs to be retrieved from a prepared model.
8058 *
8059 * A Request serves two primary tasks:
8060 * 1) Provides the input and output data to be used when executing the model.
8061 * 2) Specifies any updates to the input operand metadata that were left
8062 *    unspecified at model preparation time.
8063 *
8064 * An output must not overlap with any other output, with an input, or
8065 * with an operand of lifetime CONSTANT_REFERENCE.
8066 */
8067struct Request {
8068%kind canonical
8069%insert-indented 4 RequestArgument
8070
8071    /**
8072     * Specifies a driver-managed buffer. It is the token corresponding to an
8073     * IBuffer returned from IDevice::allocate, and is specific to the IDevice
8074     * object.
8075     */
8076    enum class MemoryDomainToken : uint32_t {};
8077
8078%insert-indented 4 Request_MemoryPool_Comment
8079    using MemoryPool = std::variant<SharedMemory, MemoryDomainToken, SharedBuffer>;
8080
8081%/kind
8082%insert Request_inputs_and_outputs
8083%kind hal_1.3
8084
8085%insert-indented 4 Request_MemoryPool_Comment
8086    safe_union MemoryPool {
8087        /**
8088         * Specifies a client-managed shared memory pool.
8089         */
8090        memory hidlMemory;
8091
8092        /**
8093         * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate,
8094         * and is specific to the IDevice object.
8095         */
8096        uint32_t token;
8097    };
8098%/kind
8099
8100%insert Request_pools
8101};
8102%/section
8103
8104%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8105