1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include "Pack.h"
20
21 #include "OperationResolver.h"
22 #include "OperationsExecutionUtils.h"
23
24 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
25 #include <limits>
26 #include <vector>
27
28 #pragma clang diagnostic push
29 #pragma clang diagnostic ignored "-Wunused-parameter"
30 #pragma clang diagnostic ignored "-Wsign-compare"
31 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
32 #pragma clang diagnostic pop
33
34 #include "CpuOperationUtils.h"
35 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
36
37 namespace android {
38 namespace nn {
39 namespace pack_op {
40
41 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
prepare(IOperationExecutionContext * context)42 bool prepare(IOperationExecutionContext* context) {
43 // All input tensors must have the same dimensions and be of rank 1 or higher.
44 const Shape firstInputTensorShape = context->getInputShape(kInputFirstTensor);
45 const uint32_t firstInputTensorRank = getNumberOfDimensions(firstInputTensorShape);
46 NN_RET_CHECK_GE(firstInputTensorRank, 1U);
47 for (uint32_t inputTensorNum = 1, inputTensorCount = context->getNumInputs() - 1;
48 inputTensorNum < inputTensorCount; ++inputTensorNum) {
49 NN_RET_CHECK(SameShape(firstInputTensorShape,
50 context->getInputShape(kInputFirstTensor + inputTensorNum)))
51 << "Input tensor #" << inputTensorNum
52 << " dimensions do not match input tensor #0 dimensions";
53 }
54
55 // Fetch the axis dimension value.
56 const int32_t axisDimension = context->getInputValue<int32_t>(kInputAxisScalar);
57 NN_RET_CHECK_GE(axisDimension, 0);
58 NN_RET_CHECK_LT(uint32_t(axisDimension), firstInputTensorRank + 1);
59
60 // TODO: http://b/78268320 validate that output shape is consistent with input rather than
61 // blindly overwriting it. Output tensor is of rank 1 higher than input tensors.
62 const uint32_t outputTensorRank = firstInputTensorRank + 1;
63 // For the (j)th output dimension:
64 // - If (j) is less than the axis dimension, the (j)th output dimension must match the (j)th
65 // input dimension.
66 // - If (j) is the axis dimension, the (j)th output dimension must equal the number of input
67 // tensors.
68 // - If (j) is greater than the axis dimension, the (j)th output dimension must match the
69 // (j-1)th input dimension.
70 Shape outputShape = context->getOutputShape(kOutputTensor);
71 outputShape.dimensions.resize(outputTensorRank);
72 for (int32_t j = 0; j < axisDimension; ++j) {
73 outputShape.dimensions[j] = firstInputTensorShape.dimensions[j];
74 }
75 outputShape.dimensions[axisDimension] = context->getNumInputs() - 1;
76 for (int32_t j = axisDimension + 1; j < int32_t(outputTensorRank); ++j) {
77 outputShape.dimensions[j] = firstInputTensorShape.dimensions[j - 1];
78 }
79 return context->setOutputShape(kOutputTensor, outputShape);
80 }
81
packParams(IOperationExecutionContext * context,tflite::PackParams * params)82 bool packParams(IOperationExecutionContext* context, tflite::PackParams* params) {
83 const int32_t axis = context->getInputValue<int32_t>(kInputAxisScalar);
84 NN_RET_CHECK_LE(axis, std::numeric_limits<typeof(params->axis)>().max())
85 << "axis value out of range";
86 params->axis = axis;
87
88 const uint32_t inputTensorCount = context->getNumInputs() - 1;
89 NN_RET_CHECK_LE(inputTensorCount, std::numeric_limits<typeof(params->inputs_count)>().max())
90 << "input count out of range";
91 params->inputs_count = inputTensorCount;
92
93 // Note that the NNAPI PACK operation specification requires all input
94 // tensors and the output tensor to have the same zeroPoint and scale.
95 const Shape tensorShape = context->getInputShape(kInputFirstTensor);
96
97 const std::vector<int32_t> paramsInputZeroPoint(inputTensorCount, tensorShape.offset);
98 params->input_zeropoint = paramsInputZeroPoint.data();
99 const std::vector<float> paramsInputScale(inputTensorCount, tensorShape.scale);
100 params->input_scale = paramsInputScale.data();
101 params->output_zeropoint = tensorShape.offset;
102 params->output_scale = tensorShape.scale;
103
104 return true;
105 }
106
107 template <typename T>
pack(IOperationExecutionContext * context)108 bool pack(IOperationExecutionContext* context) {
109 tflite::PackParams params;
110 NN_RET_CHECK(packParams(context, ¶ms));
111
112 const uint32_t inputTensorCount = context->getNumInputs() - 1;
113
114 // Note that the NNAPI PACK operation specification requires all input
115 // tensors to have the same dimensions.
116 const tflite::RuntimeShape inputTensorShapes =
117 convertShapeToTflshape(context->getInputShape(kInputFirstTensor));
118 const std::vector<const tflite::RuntimeShape*> inputShapesPtrs(inputTensorCount,
119 &inputTensorShapes);
120
121 std::vector<const T*> inputData(inputTensorCount);
122 for (uint32_t inputTensorNum = 0; inputTensorNum < inputTensorCount; ++inputTensorNum) {
123 inputData[inputTensorNum] = context->getInputBuffer<T>(kInputFirstTensor + inputTensorNum);
124 }
125
126 tflite::reference_ops::Pack(params, inputShapesPtrs.data(), inputData.data(),
127 convertShapeToTflshape(context->getOutputShape(kOutputTensor)),
128 context->getOutputBuffer<T>(kOutputTensor));
129 return true;
130 }
131
execute(IOperationExecutionContext * context)132 bool execute(IOperationExecutionContext* context) {
133 switch (context->getInputType(kInputFirstTensor)) {
134 case OperandType::TENSOR_FLOAT16:
135 return pack<_Float16>(context);
136 case OperandType::TENSOR_FLOAT32:
137 return pack<float>(context);
138 case OperandType::TENSOR_QUANT8_ASYMM:
139 return pack<uint8_t>(context);
140 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
141 return pack<int8_t>(context);
142 case OperandType::TENSOR_INT32:
143 return pack<int32_t>(context);
144 default:
145 NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
146 }
147 }
148 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
149
150 } // namespace pack_op
151
152 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(PACK, pack_op::prepare, pack_op::execute);
153
154 } // namespace nn
155 } // namespace android
156