1%% template file for generating OperationTypes.h.
2%% see README.md.
3/*
4 * Copyright (C) 2020 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *      http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
20#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
21
22namespace android::nn {
23
24%insert Operation_1.0_Comment
25enum class OperationType {
26%insert Operation_1.0
27
28%insert Operation_1.1
29
30%insert Operation_1.2
31
32%insert Operation_1.3
33
34%insert Operation_fl6
35
36%insert Operation_fl7
37
38    /**
39     * DEPRECATED. Since HAL version 1.2, extensions are the preferred
40     * alternative to OEM operation and data types.
41     *
42     * This operation is OEM specific. It should only be used for OEM
43     * applications.
44     */
45    OEM_OPERATION = 10000,
46
47#ifdef NN_EXPERIMENTAL_FEATURE
48    /**
49     * Expands a representation of a sparse tensor to a dense tensor.
50     *
51     * To encode a conceptual n-dimensional dense tensor with dims [D0, ..., Dn-1], potentially with
52     * a k-dimensional block (0 <= k <= n) with dims [Dn, ..., Dn+k-1], the format specifies:
53     * * 1: In what order to traverse these dimensions. For example, to store a 2-D matrix in row
54     *      major order, the traversal order would be [D0, D1], whereas to store it in column major
55     *      order, the traversal order would be [D1, D0]. If the 2-D matrix has a 2-D inner block,
56     *      the traversal order could be [D0, D1, D2, D3].
57     * * 2: How each block dimension in [Dn, ..., Dn+k-1] maps to the original tensor dimension in
58     *      [D0, ..., Dn-1].
59     * * 3: In the traversal order defined above, the format (dense vs. sparse) and index metadata
60     *      for each dimension. For a dense dimension, this is just the size of that dimension. For
61     *      a sparse dimension, it's the same as the compressed index defined in the Compressed
62     *      Sparse Row (CSR) format.
63     *      (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
64     *
65     * The number of inputs to this operation is determined by the number of dimensions (including
66     * the block dimensions) of the sparsity parameters. Currently, the only formats supported are
67     * DENSE and SPARSE_CSR, but additional sparsity formats may be added in later versions of this
68     * operation.
69     *
70     * Supported tensor {@link OperandType}:
71     * * {@link OperandType::TENSOR_FLOAT16}
72     * * {@link OperandType::TENSOR_FLOAT32}
73     * * {@link OperandType::TENSOR_QUANT8_SYMM}
74     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
75     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
76     * * {@link OperandType::TENSOR_BOOL8}
77     * * {@link OperandType::TENSOR_INT32}
78     * * {@link OperandType::TENSOR_QUANT16_SYMM}
79     * * {@link OperandType::TENSOR_QUANT16_ASYMM}
80     *
81     *
82     * Reference:
83     * * This implementation is a modification of the TACO format.
84     *   http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
85     *
86     * Inputs:
87     * * 0: A 1-D tensor representing the compressed sparse tensor data of a conceptual
88     *      n-dimensional tensor.
89     * * 1: A 1-D {@link OperandType::TENSOR_INT32} tensor defining the traversal order for reading
90     *      the non-zero blocks. For an n-dimensional tensor with dimensions [D0, D1, …, Dn-1]: if
91     *      block sparse with a k-dimensional block (0 < k <= n), the traversal order has n+k
92     *      elements. The first n elements are still a permutation of [D0, …, Dn-1]. The last k
93     *      elements are a permutation of [Dn, …, Dn+k-1], defining how to traverse a block
94     *      internally. If not block sparse, the traversal order is just a permutation of [D0, …,
95     *      Dn-1].
96     * * 2: An optional 1-D {@link OperandType::TENSOR_INT32} tensor defining the block map. For a
97     *      block sparse n-dimensional tensor with a k-dimensional block (0 < k <= n), it stores how
98     *      a block dimension [Dn, …, Dn+k-1] maps to the original tensor dimension in [D0, …,
99     *      Dn-1]. For i, j where 0 <= i < j < k, blockMap[i] < blockMap[j]. If not block sparse,
100     *      this is null.
101     * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor with n+k elements defining the format of
102     *      each dimension in the traversal order (listed above). The format is either DENSE (where
103     *      DENSE = 0) or SPARSE_CSR (where SPARSE_CSR = 1). DENSE means that each coordinate in
104     *      this dimension is stored implicitly. SPARSE_CSR means only the coordinates with non-zero
105     *      elements are stored.
106     * * 4: A 1-D {@link OperandType::TENSOR_INT32} tensor with n+k elements defining the size of
107     *      each dimension or block. The product of all these sizes totals the number of elements in
108     *      the dense tensor. First n elements represent the sparse tensor’s shape, and the last k
109     *      elements represent the block’s shape.
110     * * 5 ~ (5 + 2 * (n+k)): An optional pair of {@link OperandType::TENSOR_INT32} tensors which
111     *      together specify the sparse indices along that dimension. The first pair of arguments
112     *      corresponds to D0, the second to D1, and so on until Dn+k-1. If the dimension is DENSE,
113     *      both arguments in the pair are null and the dimension is implicitly specified by the
114     *      corresponding element in Input 4. If the dimension is SPARSE_CSR, then we use the pair
115     *      of array segments and array indices to encode that dimension:
116     * * * +0: An optional list of n+k input 1-D {@link OperandType::TENSOR_INT32} tensors, defining
117     *         the array segments. The array segments represent how to segment the indices array,
118     *         each segment corresponds to one element in the previous dimension. Array segments are
119     *         interspersed with array indices (listed below), so this input could be input (5, 5 +
120     *         2, …, 5 + 2*(n+k-1)). For i, j where 0 =< i < j, arraySegments[i] <=
121     *         arraySegments[j]. Used if the dimension is SPARSE_CSR, omitted if the dimension is
122     *         DENSE.
123     * * * +1: An optional list of n+k input 1-D {@link OperandType::TENSOR_INT32} tensors, defining
124     *         the array indices. The array indices represent the index of the non-zero elements
125     *         within this dimension (as those in the CSR matrix format, where the first array is
126     *         row pointers and the second array is column indices). Array indices are interspersed
127     *         with array segments (listed above), so this input could be input (6, 6 + 2, …, 6 +
128     *         2*(n+k-1)). Used if the dimension is SPARSE_CSR, omitted if the dimension is DENSE.
129     *
130     * Outputs:
131     * * 0: An n-D dense tensor. The output tensor has the same {@link OperandType} as input 0.
132     */
133    DENSIFY = 20000,
134#endif  // NN_EXPERIMENTAL_FEATURE
135};
136
137}  // namespace android::nn
138
139#endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
140