1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "fuzzing/operation_signatures/OperationSignatureUtils.h"
18
19 namespace android {
20 namespace nn {
21 namespace fuzzing_test {
22
23 // For pooling ops with explicit padding.
poolingExplicitOpConstructor(TestOperandType,uint32_t rank,RandomOperation * op)24 static void poolingExplicitOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
25 NN_FUZZER_CHECK(rank == 4);
26
27 // Parameters
28 int32_t paddingLeft = op->inputs[1]->value<int32_t>();
29 int32_t paddingRight = op->inputs[2]->value<int32_t>();
30 int32_t paddingTop = op->inputs[3]->value<int32_t>();
31 int32_t paddingBottom = op->inputs[4]->value<int32_t>();
32 int32_t strideWidth = op->inputs[5]->value<int32_t>();
33 int32_t strideHeight = op->inputs[6]->value<int32_t>();
34 auto filterWidth = op->inputs[7]->value<RandomVariable>();
35 auto filterHeight = op->inputs[8]->value<RandomVariable>();
36 bool useNchw = false;
37 if (op->inputs.size() > 10) useNchw = op->inputs[10]->value<bool8>();
38 int heightIndex = useNchw ? 2 : 1;
39 int widthIndex = useNchw ? 3 : 2;
40 int channelIndex = useNchw ? 1 : 3;
41
42 // Input, [batch, height_in, width_in, channel]
43 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
44 RandomVariableType::FREE, RandomVariableType::FREE};
45
46 // Output, [batch, height_out, width_out, channel]
47 op->outputs[0]->dimensions.resize(4);
48
49 // batch and channel
50 op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0];
51 op->outputs[0]->dimensions[channelIndex] = op->inputs[0]->dimensions[channelIndex];
52
53 // height
54 explicitPadding(op->inputs[0]->dimensions[heightIndex], filterHeight, strideHeight,
55 /*dilation=*/1, paddingTop, paddingBottom,
56 &op->outputs[0]->dimensions[heightIndex]);
57
58 // width
59 explicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth, /*dilation=*/1,
60 paddingLeft, paddingRight, &op->outputs[0]->dimensions[widthIndex]);
61
62 setSameQuantization(op->outputs[0], op->inputs[0]);
63 }
64
65 // For pooling ops with implicit padding.
poolingImplicitOpConstructor(TestOperandType,uint32_t rank,RandomOperation * op)66 static void poolingImplicitOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
67 NN_FUZZER_CHECK(rank == 4);
68
69 // Parameters
70 int32_t paddingScheme = op->inputs[1]->value<int32_t>();
71 int32_t strideWidth = op->inputs[2]->value<int32_t>();
72 int32_t strideHeight = op->inputs[3]->value<int32_t>();
73 auto filterWidth = op->inputs[4]->value<RandomVariable>();
74 auto filterHeight = op->inputs[5]->value<RandomVariable>();
75 bool useNchw = false;
76 if (op->inputs.size() > 7) useNchw = op->inputs[7]->value<bool8>();
77 int heightIndex = useNchw ? 2 : 1;
78 int widthIndex = useNchw ? 3 : 2;
79 int channelIndex = useNchw ? 1 : 3;
80
81 // Input, [batch, height_in, width_in, channel]
82 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
83 RandomVariableType::FREE, RandomVariableType::FREE};
84
85 // Output, [batch, height_out, width_out, channel]
86 op->outputs[0]->dimensions.resize(4);
87
88 // batch and channel
89 op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0];
90 op->outputs[0]->dimensions[channelIndex] = op->inputs[0]->dimensions[channelIndex];
91
92 // height and width
93 implicitPadding(op->inputs[0]->dimensions[heightIndex], filterHeight, strideHeight,
94 /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[heightIndex]);
95 implicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth,
96 /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[widthIndex]);
97
98 setSameQuantization(op->outputs[0], op->inputs[0]);
99 }
100
101 #define DEFINE_POOLING_SIGNATURE(op, ver, ...) \
102 DEFINE_OPERATION_SIGNATURE(op##_explicit_##ver){ \
103 .opType = TestOperationType::op, \
104 .supportedDataTypes = {__VA_ARGS__}, \
105 .supportedRanks = {4}, \
106 .version = TestHalVersion::ver, \
107 .inputs = \
108 { \
109 INPUT_DEFAULT, \
110 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
111 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
112 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
113 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
114 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
115 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
116 RANDOM_INT_RANGE(1, 4), \
117 RANDOM_INT_RANGE(1, 4), \
118 PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
119 }, \
120 .outputs = {OUTPUT_DEFAULT}, \
121 .constructor = poolingExplicitOpConstructor}; \
122 DEFINE_OPERATION_SIGNATURE(op##_implicit_##ver){ \
123 .opType = TestOperationType::op, \
124 .supportedDataTypes = {__VA_ARGS__}, \
125 .supportedRanks = {4}, \
126 .version = TestHalVersion::ver, \
127 .inputs = \
128 { \
129 INPUT_DEFAULT, \
130 PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
131 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
132 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
133 RANDOM_INT_RANGE(1, 4), \
134 RANDOM_INT_RANGE(1, 4), \
135 PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
136 }, \
137 .outputs = {OUTPUT_DEFAULT}, \
138 .constructor = poolingImplicitOpConstructor};
139
140 DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32,
141 TestOperandType::TENSOR_QUANT8_ASYMM);
142 DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32);
143 DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32,
144 TestOperandType::TENSOR_QUANT8_ASYMM);
145
146 DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
147 DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
148 DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
149
150 #define DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(op, ver, ...) \
151 DEFINE_OPERATION_SIGNATURE(op##_explicit_layout_##ver){ \
152 .opType = TestOperationType::op, \
153 .supportedDataTypes = {__VA_ARGS__}, \
154 .supportedRanks = {4}, \
155 .version = TestHalVersion::ver, \
156 .inputs = \
157 { \
158 INPUT_DEFAULT, \
159 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
160 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
161 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
162 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
163 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
164 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
165 RANDOM_INT_RANGE(1, 4), \
166 RANDOM_INT_RANGE(1, 4), \
167 PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
168 PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
169 }, \
170 .outputs = {OUTPUT_DEFAULT}, \
171 .constructor = poolingExplicitOpConstructor}; \
172 DEFINE_OPERATION_SIGNATURE(op##_implicit_layout_##ver){ \
173 .opType = TestOperationType::op, \
174 .supportedDataTypes = {__VA_ARGS__}, \
175 .supportedRanks = {4}, \
176 .version = TestHalVersion::ver, \
177 .inputs = \
178 { \
179 INPUT_DEFAULT, \
180 PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
181 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
182 PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
183 RANDOM_INT_RANGE(1, 4), \
184 RANDOM_INT_RANGE(1, 4), \
185 PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
186 PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
187 }, \
188 .outputs = {OUTPUT_DEFAULT}, \
189 .constructor = poolingImplicitOpConstructor};
190
191 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
192 TestOperandType::TENSOR_FLOAT16,
193 TestOperandType::TENSOR_QUANT8_ASYMM);
194 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(L2_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
195 TestOperandType::TENSOR_FLOAT16);
196 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
197 TestOperandType::TENSOR_FLOAT16,
198 TestOperandType::TENSOR_QUANT8_ASYMM);
199 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_3,
200 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
201 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_3,
202 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
203
204 } // namespace fuzzing_test
205 } // namespace nn
206 } // namespace android
207