1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Operations"
18 
19 #include "MirrorPad.h"
20 
21 #include <algorithm>
22 #include <utility>
23 
24 #include "OperationResolver.h"
25 #include "OperationsExecutionUtils.h"
26 
27 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
28 #include <limits>
29 #include <vector>
30 
31 #include "CpuOperationUtils.h"
32 #endif  // NN_INCLUDE_CPU_IMPLEMENTATION
33 
34 namespace android {
35 namespace nn {
36 namespace mirror_pad_op {
37 
38 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
prepare(IOperationExecutionContext * context)39 bool prepare(IOperationExecutionContext* context) {
40     // Input tensor must be of positive rank.
41     const Shape inputShape = context->getInputShape(kInputTensor);
42     const auto inputRank = getNumberOfDimensions(inputShape);
43     NN_RET_CHECK_GT(inputRank, 0U);
44 
45     // Check mode value.
46     const int32_t mode = context->getInputValue<int32_t>(kInputModeScalar);
47     NN_RET_CHECK(mode == kModeReflect || mode == kModeSymmetric);
48 
49     Shape outputShape = context->getOutputShape(kOutputTensor);
50     NN_RET_CHECK(padPrepare(inputShape, context->getInputBuffer<int32_t>(kInputPaddingTensor),
51                             context->getInputShape(kInputPaddingTensor), &outputShape));
52 
53     // Check padding values.
54     // Note that the call to padPrepare() above verifies that the padding tensor
55     // has the correct dimensions.
56     {
57         const int32_t* paddingValues = context->getInputBuffer<int32_t>(kInputPaddingTensor);
58         for (uint32_t i = 0; i < inputRank; ++i) {
59             const int32_t paddingMax = getSizeOfDimension(inputShape, i) - (mode == kModeReflect);
60             const int32_t beforePadding = *(paddingValues++);
61             NN_RET_CHECK_GE(beforePadding, 0);
62             NN_RET_CHECK_LE(beforePadding, paddingMax);
63             const int32_t afterPadding = *(paddingValues++);
64             NN_RET_CHECK_GE(afterPadding, 0);
65             NN_RET_CHECK_LE(afterPadding, paddingMax);
66         }
67     }
68 
69     return context->setOutputShape(kOutputTensor, outputShape);
70 }
71 
72 /*-- begin execution ------------------------------------------------------------------*/
73 
74 // Based on
75 // http://cs/android/external/tensorflow/tensorflow/lite/kernels/mirror_pad.cc;l=163;rcl=84f01780a69b5900cfddf2b44d696f92e0aac331
76 
77 // The structure of that code is largely preserved, for simplicity of comparison.
78 
79 // The TFLite implementation is multithreaded.  The NNAPI implementation is not.
80 
81 namespace {
82 
83 // In adapting the TFLite implementation to NNAPI, we introduce conversions from
84 // conventional NNAPI types (typically uint32_t) to int, which is the
85 // conventional TFLite type.  This function checks that such a conversion is
86 // value-preserving.
87 template <typename T>
checkAsInt(T)88 bool checkAsInt(T) {
89     // Making the assertion expression dependent on the template type (via
90     // incorportation of "&& sizeof(T)") ensures that the static_assert will
91     // only be evaluated if this function body is instantiated (we expect only
92     // the explicit specializations to be used).  Alternatively, we could omit
93     // the body entirely, in which case an unexpected choice of T would result
94     // in a link-time failure rather than a compile-time failure.
95     static_assert(false && sizeof(T), "Unimplemented");
96     return false;
97 }
98 template <>
checkAsInt(uint32_t val)99 bool checkAsInt(uint32_t val) {
100     static_assert(sizeof(int) <= sizeof(uint32_t));
101     NN_RET_CHECK_LE(val, uint32_t(std::numeric_limits<int>::max()))
102             << kOperationName << " cannot represent value as int";
103     return true;
104 }
105 
106 // Wrapper for data computed by the eval function.
107 template <typename T>
108 struct EvalData {
EvalDataandroid::nn::mirror_pad_op::__anondd0b9cfb0111::EvalData109     EvalData(const int* thePaddingTensor, Shape theInputTensorShape,
110              std::vector<int> theOutputDimsNumElements, std::vector<int> theInputDimsNumElements,
111              const T* theInputData, int theOffset, T* theOutputData, int theNumDims)
112         : paddingTensor(thePaddingTensor),
113           inputTensorShape(std::move(theInputTensorShape)),
114           outputDimsNumElements(std::move(theOutputDimsNumElements)),
115           inputDimsNumElements(std::move(theInputDimsNumElements)),
116           inputData(theInputData),
117           offset(theOffset),
118           outputData(theOutputData),
119           numDims(theNumDims) {}
120     const int32_t* paddingTensor = nullptr;
121     Shape inputTensorShape;
122     // Holds number of elements at the nth dimension.
123     // value at last dimension = 1, at second to last = sizeof last dimension.
124     std::vector<int> outputDimsNumElements;
125     std::vector<int> inputDimsNumElements;
126     const T* inputData = nullptr;
127 
128     int offset = -1;
129     T* outputData = nullptr;
130     int numDims = 0;
131 };
132 
133 // Helper function that obtains the left and right padding amounts.
getPadding(const int32_t * paddingTensor,int dimension,int32_t * leftPad,int32_t * rightPad)134 void getPadding(const int32_t* paddingTensor, int dimension, int32_t* leftPad, int32_t* rightPad) {
135     *leftPad = *(paddingTensor + dimension * 2);
136     *rightPad = *(paddingTensor + dimension * 2 + 1);
137 }
138 
139 // Given dimension index and the left/right padding amounts.
140 // Returns the corresponding dimension in the input array.
getInputDimension(int paddedDimension,const int leftPad,const int,const int inputDimSize,const int offset)141 int getInputDimension(int paddedDimension, const int leftPad, const int /* rightPad */,
142                       const int inputDimSize, const int offset) {
143     if (paddedDimension < leftPad) {
144         const int originalInd = leftPad + offset - 1;
145         return originalInd - (std::min(paddedDimension, originalInd - offset));
146     }
147     paddedDimension -= leftPad;
148     if (paddedDimension >= inputDimSize) {
149         paddedDimension -= inputDimSize;
150         const int originalInd = inputDimSize - (1 + offset);
151         return originalInd - std::min(paddedDimension, originalInd);
152     }
153     return paddedDimension;
154 }
155 
156 // Given an index in output array, returns the index of the value in input
157 // array.
158 template <typename T>
getFlatIndex(int index,const EvalData<T> & evalData)159 int getFlatIndex(int index, const EvalData<T>& evalData) {
160     int flatIndex = 0;
161     for (int i = 0, nD = evalData.numDims; i < nD; ++i) {
162         int32_t leftPad, rightPad;
163         getPadding(evalData.paddingTensor, i, &leftPad, &rightPad);
164         const int dimensionIndex = index / evalData.outputDimsNumElements[i];
165         // getSizeOfDimension() undergoes checkAsInt() in eval().
166         const int indexInInput = getInputDimension(
167                 dimensionIndex, leftPad, rightPad,
168                 int(getSizeOfDimension(evalData.inputTensorShape, i)), evalData.offset);
169         flatIndex += indexInInput * evalData.inputDimsNumElements[i];
170         index %= evalData.outputDimsNumElements[i];
171     }
172     return flatIndex;
173 }
174 
175 template <typename T>
run(const EvalData<T> & evalData,const int outputSize)176 void run(const EvalData<T>& evalData, const int outputSize) {
177     // See MirrorPadWorkerTask::Run().
178     const auto* inputData = evalData.inputData;
179     auto* outputData = evalData.outputData;
180     for (int i = 0; i < outputSize; ++i) {
181         outputData[i] = inputData[getFlatIndex(i, evalData)];
182     }
183 }
184 }  // namespace
185 
eval(IOperationExecutionContext * context)186 bool eval(IOperationExecutionContext* context) {
187     const Shape inputShape = context->getInputShape(kInputTensor);
188     const int32_t* padding = context->getInputBuffer<int32_t>(kInputPaddingTensor);
189     const int32_t mode = context->getInputValue<int32_t>(kInputModeScalar);
190     const Shape outputShape = context->getOutputShape(kOutputTensor);
191     const auto tensorType = context->getInputType(kInputTensor);
192 
193     const uint32_t inputDims = getNumberOfDimensions(inputShape);
194     NN_RET_CHECK(checkAsInt(inputDims));
195     const int numDims = int(inputDims);
196 
197     // checkAsInt() the individual dimensions here so we do not need to do so
198     // elsewhere.
199     for (int i = 0; i < numDims; ++i) {
200         const auto inputDim = getSizeOfDimension(inputShape, i);
201         NN_RET_CHECK(checkAsInt(inputDim));
202         const auto outputDim = getSizeOfDimension(outputShape, i);
203         NN_RET_CHECK(checkAsInt(outputDim));
204     }
205 
206     std::vector<int> outputDimsNumElements(inputDims, 1);
207     std::vector<int> inputDimsNumElements(inputDims, 1);
208     for (int i = numDims - 2; i >= 0; i--) {
209         outputDimsNumElements[i] =
210                 outputDimsNumElements[i + 1] * int(getSizeOfDimension(outputShape, i + 1));
211         inputDimsNumElements[i] =
212                 inputDimsNumElements[i + 1] * int(getSizeOfDimension(inputShape, i + 1));
213     }
214 
215     const int32_t offset = mode != kModeReflect ? 0 : 1;
216 
217     const auto outputSize = getNumberOfElements(outputShape);
218 
219 #define MIRROR_PAD_CASE(operandType, dataType)                                               \
220     case OperandType::operandType: {                                                         \
221         const EvalData evalData(padding, inputShape, std::move(outputDimsNumElements),       \
222                                 std::move(inputDimsNumElements),                             \
223                                 context->getInputBuffer<dataType>(kInputTensor), offset,     \
224                                 context->getOutputBuffer<dataType>(kOutputTensor), numDims); \
225         NN_RET_CHECK(checkAsInt(outputSize));                                                \
226         run(evalData, int(outputSize));                                                      \
227         return true;                                                                         \
228     }
229     switch (tensorType) {
230         MIRROR_PAD_CASE(TENSOR_FLOAT16, _Float16)
231         MIRROR_PAD_CASE(TENSOR_FLOAT32, float)
232         MIRROR_PAD_CASE(TENSOR_QUANT8_ASYMM, uint8_t)
233         MIRROR_PAD_CASE(TENSOR_QUANT8_ASYMM_SIGNED, int8_t)
234         MIRROR_PAD_CASE(TENSOR_INT32, int32_t)
235         default:
236             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
237     }
238 }
239 
240 /*-- end execution --------------------------------------------------------------------*/
241 
242 #endif  // NN_INCLUDE_CPU_IMPLEMENTATION
243 
244 }  // namespace mirror_pad_op
245 
246 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(MIRROR_PAD, mirror_pad_op::prepare, mirror_pad_op::eval);
247 
248 }  // namespace nn
249 }  // namespace android
250