1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <algorithm>
18 #include <vector>
19
20 #include "fuzzing/operation_signatures/OperationSignatureUtils.h"
21
22 namespace android {
23 namespace nn {
24 namespace fuzzing_test {
25
spaceToDepthConstructor(TestOperandType,uint32_t rank,RandomOperation * op)26 static void spaceToDepthConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
27 NN_FUZZER_CHECK(rank == 4);
28
29 bool useNchw = false;
30 if (op->inputs.size() > 2) useNchw = op->inputs[2]->value<bool8>();
31 int heightIndex = useNchw ? 2 : 1;
32 int widthIndex = useNchw ? 3 : 2;
33 int depthIndex = useNchw ? 1 : 3;
34
35 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
36 RandomVariableType::FREE, RandomVariableType::FREE};
37 int32_t blockSize = op->inputs[1]->value<int32_t>();
38 auto outHeight = op->inputs[0]->dimensions[heightIndex].exactDiv(blockSize);
39 auto outWidth = op->inputs[0]->dimensions[widthIndex].exactDiv(blockSize);
40 auto outDepth = op->inputs[0]->dimensions[depthIndex] * (blockSize * blockSize);
41
42 if (useNchw) {
43 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outDepth, outHeight, outWidth};
44 } else {
45 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outHeight, outWidth, outDepth};
46 }
47 setSameQuantization(op->outputs[0], op->inputs[0]);
48 }
49
50 #define DEFINE_SPACE_TO_DEPTH_SIGNATURE(ver, ...) \
51 DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_##ver){ \
52 .opType = TestOperationType::SPACE_TO_DEPTH, \
53 .supportedDataTypes = {__VA_ARGS__}, \
54 .supportedRanks = {4}, \
55 .version = TestHalVersion::ver, \
56 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)}, \
57 .outputs = {OUTPUT_DEFAULT}, \
58 .constructor = spaceToDepthConstructor};
59
60 DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
61 TestOperandType::TENSOR_QUANT8_ASYMM);
62 DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
63 DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
64
65 #define DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(ver, ...) \
66 DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_layout_##ver){ \
67 .opType = TestOperationType::SPACE_TO_DEPTH, \
68 .supportedDataTypes = {__VA_ARGS__}, \
69 .supportedRanks = {4}, \
70 .version = TestHalVersion::ver, \
71 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5), \
72 PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
73 .outputs = {OUTPUT_DEFAULT}, \
74 .constructor = spaceToDepthConstructor};
75
76 DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
77 TestOperandType::TENSOR_QUANT8_ASYMM,
78 TestOperandType::TENSOR_FLOAT16);
79 DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
80
depthToSpaceConstructor(TestOperandType,uint32_t rank,RandomOperation * op)81 static void depthToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
82 NN_FUZZER_CHECK(rank == 4);
83
84 bool useNchw = false;
85 if (op->inputs.size() > 2) useNchw = op->inputs[2]->value<bool8>();
86 int heightIndex = useNchw ? 2 : 1;
87 int widthIndex = useNchw ? 3 : 2;
88 int depthIndex = useNchw ? 1 : 3;
89
90 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
91 RandomVariableType::FREE, RandomVariableType::FREE};
92 int32_t blockSize = op->inputs[1]->value<int32_t>();
93 auto outHeight = op->inputs[0]->dimensions[heightIndex] * blockSize;
94 auto outWidth = op->inputs[0]->dimensions[widthIndex] * blockSize;
95 auto outDepth = op->inputs[0]->dimensions[depthIndex].exactDiv(blockSize * blockSize);
96
97 if (useNchw) {
98 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outDepth, outHeight, outWidth};
99 } else {
100 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outHeight, outWidth, outDepth};
101 }
102 setSameQuantization(op->outputs[0], op->inputs[0]);
103 }
104
105 #define DEFINE_DEPTH_TO_SPACE_SIGNATURE(ver, ...) \
106 DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_##ver){ \
107 .opType = TestOperationType::DEPTH_TO_SPACE, \
108 .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
109 TestOperandType::TENSOR_QUANT8_ASYMM}, \
110 .supportedRanks = {4}, \
111 .version = TestHalVersion::ver, \
112 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)}, \
113 .outputs = {OUTPUT_DEFAULT}, \
114 .constructor = depthToSpaceConstructor};
115
116 DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
117 TestOperandType::TENSOR_QUANT8_ASYMM);
118 DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
119 DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
120
121 #define DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(ver, ...) \
122 DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_layout_##ver){ \
123 .opType = TestOperationType::DEPTH_TO_SPACE, \
124 .supportedDataTypes = {__VA_ARGS__}, \
125 .supportedRanks = {4}, \
126 .version = TestHalVersion::ver, \
127 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
128 PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
129 .outputs = {OUTPUT_DEFAULT}, \
130 .constructor = depthToSpaceConstructor};
131
132 DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
133 TestOperandType::TENSOR_QUANT8_ASYMM,
134 TestOperandType::TENSOR_FLOAT16);
135 DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
136
reshapeConstructor(TestOperandType,uint32_t rank,RandomOperation * op)137 static void reshapeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
138 setFreeDimensions(op->inputs[0], rank);
139 op->inputs[1]->dimensions = {rank};
140 op->inputs[1]->randomBuffer.resize(rank);
141 RandomVariable numInputElements = 1;
142 RandomVariable numOutputElements = 1;
143 for (uint32_t i = 0; i < rank; i++) {
144 op->inputs[1]->randomBuffer[i] = RandomVariableType::FREE;
145 numInputElements = numInputElements * op->inputs[0]->dimensions[i];
146 numOutputElements = numOutputElements * op->inputs[1]->randomBuffer[i];
147 }
148 numInputElements.setEqual(numOutputElements);
149 op->outputs[0]->dimensions = op->inputs[1]->randomBuffer;
150 setSameQuantization(op->outputs[0], op->inputs[0]);
151 }
152
153 #define DEFINE_RESHAPE_SIGNATURE(ver, ...) \
154 DEFINE_OPERATION_SIGNATURE(RESHAPE_##ver){ \
155 .opType = TestOperationType::RESHAPE, \
156 .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
157 TestOperandType::TENSOR_QUANT8_ASYMM}, \
158 .supportedRanks = {1, 2, 3, 4}, \
159 .version = TestHalVersion::ver, \
160 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
161 .outputs = {OUTPUT_DEFAULT}, \
162 .constructor = reshapeConstructor};
163
164 DEFINE_RESHAPE_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
165 TestOperandType::TENSOR_QUANT8_ASYMM);
166 DEFINE_RESHAPE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
167 DEFINE_RESHAPE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
168
batchToSpaceConstructor(TestOperandType,uint32_t rank,RandomOperation * op)169 static void batchToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
170 NN_FUZZER_CHECK(rank == 4);
171
172 bool useNchw = false;
173 if (op->inputs.size() > 2) useNchw = op->inputs[2]->value<bool8>();
174 int heightIndex = useNchw ? 2 : 1;
175 int widthIndex = useNchw ? 3 : 2;
176
177 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
178 RandomVariableType::FREE, RandomVariableType::FREE};
179 int32_t blockHeight = op->inputs[1]->value<int32_t>(0);
180 int32_t blockWidth = op->inputs[1]->value<int32_t>(1);
181 auto outBatch = op->inputs[0]->dimensions[0].exactDiv(blockHeight * blockWidth);
182 auto outHeight = op->inputs[0]->dimensions[heightIndex] * blockHeight;
183 auto outWidth = op->inputs[0]->dimensions[widthIndex] * blockWidth;
184
185 if (useNchw) {
186 op->outputs[0]->dimensions = {outBatch, op->inputs[0]->dimensions[1], outHeight, outWidth};
187 } else {
188 op->outputs[0]->dimensions = {outBatch, outHeight, outWidth, op->inputs[0]->dimensions[3]};
189 }
190 setSameQuantization(op->outputs[0], op->inputs[0]);
191 }
192
193 #define DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(ver, ...) \
194 DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_##ver){ \
195 .opType = TestOperationType::BATCH_TO_SPACE_ND, \
196 .supportedDataTypes = {__VA_ARGS__}, \
197 .supportedRanks = {4}, \
198 .version = TestHalVersion::ver, \
199 .inputs = {INPUT_DEFAULT, PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, \
200 /*len=*/2, /*range=*/1, 3)}, \
201 .outputs = {OUTPUT_DEFAULT}, \
202 .constructor = batchToSpaceConstructor};
203
204 DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
205 TestOperandType::TENSOR_QUANT8_ASYMM);
206 DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
207 DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
208
209 #define DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(ver, ...) \
210 DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_layout_##ver){ \
211 .opType = TestOperationType::BATCH_TO_SPACE_ND, \
212 .supportedDataTypes = {__VA_ARGS__}, \
213 .supportedRanks = {4}, \
214 .version = TestHalVersion::ver, \
215 .inputs = {INPUT_DEFAULT, \
216 PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
217 3), \
218 PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
219 .outputs = {OUTPUT_DEFAULT}, \
220 .constructor = batchToSpaceConstructor};
221
222 DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
223 TestOperandType::TENSOR_QUANT8_ASYMM,
224 TestOperandType::TENSOR_FLOAT16);
225 DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
226
spaceToBatchConstructor(TestOperandType,uint32_t rank,RandomOperation * op)227 static void spaceToBatchConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
228 NN_FUZZER_CHECK(rank == 4);
229
230 bool useNchw = false;
231 if (op->inputs.size() > 3) useNchw = op->inputs[3]->value<bool8>();
232 int heightIndex = useNchw ? 2 : 1;
233 int widthIndex = useNchw ? 3 : 2;
234
235 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
236 RandomVariableType::FREE, RandomVariableType::FREE};
237
238 // Compute padded height and width.
239 auto paddedHeight = op->inputs[0]->dimensions[heightIndex] +
240 (op->inputs[2]->value<int32_t>(0) + op->inputs[2]->value<int32_t>(1));
241 auto paddedWidth = op->inputs[0]->dimensions[widthIndex] +
242 (op->inputs[2]->value<int32_t>(2) + op->inputs[2]->value<int32_t>(3));
243
244 // blockHeight/blockWidth must be a divisor of padded height/width
245 int32_t blockHeight = op->inputs[1]->value<int32_t>(0);
246 int32_t blockWidth = op->inputs[1]->value<int32_t>(1);
247 auto outBatch = op->inputs[0]->dimensions[0] * (blockHeight * blockWidth);
248 auto outHeight = paddedHeight.exactDiv(blockHeight);
249 auto outWidth = paddedWidth.exactDiv(blockWidth);
250
251 if (useNchw) {
252 op->outputs[0]->dimensions = {outBatch, op->inputs[0]->dimensions[1], outHeight, outWidth};
253 } else {
254 op->outputs[0]->dimensions = {outBatch, outHeight, outWidth, op->inputs[0]->dimensions[3]};
255 }
256 setSameQuantization(op->outputs[0], op->inputs[0]);
257 }
258
259 // The paddings tensor in SPACE_TOBATCH_ND, a [2, 2] tensor with value selected from [0, 10].
260 static const OperandSignature paddingTensor_SPACE_TO_BATCH_ND = {
261 .type = RandomOperandType::CONST,
__anon7a9120630102() 262 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) {
263 op->dataType = TestOperandType::TENSOR_INT32;
264 op->dimensions = {2, 2};
265 op->resizeBuffer<int32_t>(4);
266 for (int i = 0; i < 4; i++) op->value<int32_t>(i) = getUniform<int32_t>(0, 10);
267 }};
268
269 #define DEFINE_SPACE_TO_BATCH_SIGNATURE(ver, ...) \
270 DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_##ver){ \
271 .opType = TestOperationType::SPACE_TO_BATCH_ND, \
272 .supportedDataTypes = {__VA_ARGS__}, \
273 .supportedRanks = {4}, \
274 .version = TestHalVersion::ver, \
275 .inputs = {INPUT_DEFAULT, \
276 PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
277 5), \
278 paddingTensor_SPACE_TO_BATCH_ND}, \
279 .outputs = {OUTPUT_DEFAULT}, \
280 .constructor = spaceToBatchConstructor};
281
282 DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
283 TestOperandType::TENSOR_QUANT8_ASYMM);
284 DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
285 DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
286
287 #define DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(ver, ...) \
288 DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_layout_##ver){ \
289 .opType = TestOperationType::SPACE_TO_BATCH_ND, \
290 .supportedDataTypes = {__VA_ARGS__}, \
291 .supportedRanks = {4}, \
292 .version = TestHalVersion::ver, \
293 .inputs = {INPUT_DEFAULT, \
294 PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
295 5), \
296 paddingTensor_SPACE_TO_BATCH_ND, \
297 PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
298 .outputs = {OUTPUT_DEFAULT}, \
299 .constructor = spaceToBatchConstructor};
300
301 DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
302 TestOperandType::TENSOR_QUANT8_ASYMM,
303 TestOperandType::TENSOR_FLOAT16);
304 DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
305
padConstructor(TestOperandType,uint32_t rank,RandomOperation * op)306 static void padConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
307 setFreeDimensions(op->inputs[0], rank);
308 op->inputs[1]->dimensions = {rank, 2};
309 op->inputs[1]->resizeBuffer<int32_t>(rank * 2);
310 op->outputs[0]->dimensions.resize(rank);
311 for (uint32_t i = 0; i < rank; i++) {
312 int32_t left = getUniform<int32_t>(0, 5), right = getUniform<int32_t>(0, 5);
313 op->inputs[1]->value<int32_t>(i * 2) = left;
314 op->inputs[1]->value<int32_t>(i * 2 + 1) = right;
315 op->outputs[0]->dimensions[i] = op->inputs[0]->dimensions[i] + (left + right);
316 }
317 setSameQuantization(op->outputs[0], op->inputs[0]);
318 }
319
320 static const OperandSignature paddingScalar_PAD_V2 = {
321 .type = RandomOperandType::CONST,
__anon7a9120630202() 322 .constructor = [](TestOperandType dataType, uint32_t, RandomOperand* op) {
323 switch (dataType) {
324 case TestOperandType::TENSOR_FLOAT32:
325 op->dataType = TestOperandType::FLOAT32;
326 op->setScalarValue<float>(getUniform<float>(-10.0f, 10.0f));
327 break;
328 case TestOperandType::TENSOR_FLOAT16:
329 op->dataType = TestOperandType::FLOAT16;
330 op->setScalarValue<_Float16>(getUniform<_Float16>(-10.0f, 10.0f));
331 break;
332 case TestOperandType::TENSOR_QUANT8_ASYMM:
333 op->dataType = TestOperandType::INT32;
334 op->setScalarValue<int32_t>(getUniform<int32_t>(0, 255));
335 break;
336 case TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED:
337 op->dataType = TestOperandType::INT32;
338 op->setScalarValue<int32_t>(getUniform<int32_t>(-128, 127));
339 break;
340 default:
341 NN_FUZZER_CHECK(false) << "Unsupported data type for PAD_V2";
342 }
343 }};
344
345 #define DEFINE_PAD_SIGNATURE(ver, ...) \
346 DEFINE_OPERATION_SIGNATURE(PAD_##ver){ \
347 .opType = TestOperationType::PAD, \
348 .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
349 TestOperandType::TENSOR_QUANT8_ASYMM}, \
350 .supportedRanks = {1, 2, 3, 4}, \
351 .version = TestHalVersion::ver, \
352 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
353 .outputs = {OUTPUT_DEFAULT}, \
354 .constructor = padConstructor};
355
356 DEFINE_PAD_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM);
357 DEFINE_PAD_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
358 DEFINE_PAD_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
359
360 #define DEFINE_PAD_V2_SIGNATURE(ver, ...) \
361 DEFINE_OPERATION_SIGNATURE(PAD_V2_##ver){ \
362 .opType = TestOperationType::PAD_V2, \
363 .supportedDataTypes = {__VA_ARGS__}, \
364 .supportedRanks = {1, 2, 3, 4}, \
365 .version = TestHalVersion::ver, \
366 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
367 paddingScalar_PAD_V2}, \
368 .outputs = {OUTPUT_DEFAULT}, \
369 .constructor = padConstructor};
370
371 DEFINE_PAD_V2_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM,
372 TestOperandType::TENSOR_FLOAT16);
373 DEFINE_PAD_V2_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
374
transposeConstructor(TestOperandType,uint32_t rank,RandomOperation * op)375 static void transposeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
376 // Create the permutation value by randomly shuffling a sequential array.
377 std::vector<int32_t> permutation(rank);
378 std::iota(permutation.begin(), permutation.end(), 0);
379 randomShuffle(&permutation);
380 op->inputs[1]->resizeBuffer<int32_t>(rank);
381 std::copy(permutation.begin(), permutation.end(),
382 reinterpret_cast<int32_t*>(op->inputs[1]->buffer.data()));
383
384 setFreeDimensions(op->inputs[0], rank);
385 op->inputs[1]->dimensions = {rank};
386 op->outputs[0]->dimensions.resize(rank);
387 for (uint32_t i = 0; i < rank; i++) {
388 op->outputs[0]->dimensions[i] = op->inputs[0]->dimensions[permutation[i]];
389 }
390 setSameQuantization(op->outputs[0], op->inputs[0]);
391 }
392
transposeOmittedConstructor(TestOperandType,uint32_t rank,RandomOperation * op)393 static void transposeOmittedConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
394 NN_FUZZER_CHECK(rank == 2);
395 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE};
396 op->inputs[1]->dimensions = {2};
397 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[1], op->inputs[0]->dimensions[0]};
398 setSameQuantization(op->outputs[0], op->inputs[0]);
399 }
400
401 #define DEFINE_TRANSPOSE_SIGNATURE(ver, ...) \
402 DEFINE_OPERATION_SIGNATURE(TRANSPOSE_##ver){ \
403 .opType = TestOperationType::TRANSPOSE, \
404 .supportedDataTypes = {__VA_ARGS__}, \
405 .supportedRanks = {1, 2, 3, 4}, \
406 .version = TestHalVersion::ver, \
407 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
408 .outputs = {OUTPUT_DEFAULT}, \
409 .constructor = transposeConstructor}; \
410 DEFINE_OPERATION_SIGNATURE(TRANSPOSE_omitted_##ver){ \
411 .opType = TestOperationType::TRANSPOSE, \
412 .supportedDataTypes = {__VA_ARGS__}, \
413 .supportedRanks = {2}, \
414 .version = TestHalVersion::ver, \
415 .inputs = {INPUT_DEFAULT, PARAMETER_NO_VALUE(TestOperandType::TENSOR_INT32)}, \
416 .outputs = {OUTPUT_DEFAULT}, \
417 .constructor = transposeOmittedConstructor};
418
419 DEFINE_TRANSPOSE_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
420 TestOperandType::TENSOR_QUANT8_ASYMM);
421 DEFINE_TRANSPOSE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
422 DEFINE_TRANSPOSE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
423
channelShuffleConstructor(TestOperandType dataType,uint32_t rank,RandomOperation * op)424 static void channelShuffleConstructor(TestOperandType dataType, uint32_t rank,
425 RandomOperation* op) {
426 sameShapeOpConstructor(dataType, rank, op);
427 // The number of groups must be a divisor of the target axis size.
428 int32_t axis = getRandomAxis(rank);
429 op->inputs[2]->setScalarValue<int32_t>(axis);
430 int32_t numGroups = op->inputs[1]->value<int32_t>();
431 axis = toPositiveAxis(axis, rank);
432 (op->inputs[0]->dimensions[axis] % numGroups).setEqual(0);
433 }
434
435 #define DEFINE_CHANNEL_SHUFFLE_SIGNATURE(ver, ...) \
436 DEFINE_OPERATION_SIGNATURE(CHANNEL_SHUFFLE_##ver){ \
437 .opType = TestOperationType::CHANNEL_SHUFFLE, \
438 .supportedDataTypes = {__VA_ARGS__}, \
439 .supportedRanks = {1, 2, 3, 4}, \
440 .version = TestHalVersion::ver, \
441 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5), \
442 PARAMETER_NONE(TestOperandType::INT32)}, \
443 .outputs = {OUTPUT_DEFAULT}, \
444 .constructor = channelShuffleConstructor};
445
446 DEFINE_CHANNEL_SHUFFLE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
447 TestOperandType::TENSOR_QUANT8_ASYMM,
448 TestOperandType::TENSOR_FLOAT16);
449 DEFINE_CHANNEL_SHUFFLE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
450
squeezeConstructor(TestOperandType,uint32_t rank,RandomOperation * op)451 static void squeezeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
452 // A boolean array indicating whether each dimension is selected to be squeezed.
453 bool squeeze[4] = {false, false, false, false};
454 uint32_t numAxis = getUniform<int32_t>(1, 10);
455 op->inputs[1]->dimensions = {numAxis};
456 op->inputs[1]->resizeBuffer<int32_t>(numAxis);
457 for (uint32_t i = 0; i < numAxis; i++) {
458 // Generate values for the "axis" tensor.
459 int32_t dim = getUniform<int32_t>(0, rank - 1);
460 op->inputs[1]->value<int32_t>(i) = dim;
461 squeeze[dim] = true;
462 }
463
464 op->inputs[0]->dimensions.resize(rank);
465 for (uint32_t i = 0; i < rank; i++) {
466 if (squeeze[i]) {
467 op->inputs[0]->dimensions[i] = 1;
468 } else {
469 op->inputs[0]->dimensions[i] = RandomVariableType::FREE;
470 op->outputs[0]->dimensions.emplace_back(op->inputs[0]->dimensions[i]);
471 }
472 }
473 setSameQuantization(op->outputs[0], op->inputs[0]);
474 }
475
squeezeOmittedConstructor(TestOperandType,uint32_t rank,RandomOperation * op)476 static void squeezeOmittedConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
477 // A boolean array indicating whether each dimension is selected to be squeezed.
478 std::vector<bool> squeeze(rank, false);
479 for (uint32_t i = 0; i < rank; i++) {
480 squeeze[i] = getBernoulli(0.5f);
481 }
482 op->inputs[0]->dimensions.resize(rank);
483 op->inputs[1]->dimensions = {0};
484 for (uint32_t i = 0; i < rank; i++) {
485 if (squeeze[i]) {
486 op->inputs[0]->dimensions[i] = 1;
487 } else {
488 // Set the dimension to any value greater than 1 to prevent from getting sqeezed.
489 op->inputs[0]->dimensions[i] = RandomVariableType::FREE;
490 op->inputs[0]->dimensions[i].setGreaterThan(1);
491 op->outputs[0]->dimensions.emplace_back(op->inputs[0]->dimensions[i]);
492 }
493 }
494 setSameQuantization(op->outputs[0], op->inputs[0]);
495 }
496
497 #define DEFINE_SQUEEZE_SIGNATURE(ver, ...) \
498 DEFINE_OPERATION_SIGNATURE(SQUEEZE_##ver){ \
499 .opType = TestOperationType::SQUEEZE, \
500 .supportedDataTypes = {__VA_ARGS__}, \
501 .supportedRanks = {1, 2, 3, 4}, \
502 .version = TestHalVersion::ver, \
503 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
504 .outputs = {OUTPUT_DEFAULT}, \
505 .constructor = squeezeConstructor}; \
506 DEFINE_OPERATION_SIGNATURE(SQUEEZE_omitted_##ver){ \
507 .opType = TestOperationType::SQUEEZE, \
508 .supportedDataTypes = {__VA_ARGS__}, \
509 .supportedRanks = {1, 2, 3, 4}, \
510 .version = TestHalVersion::ver, \
511 .inputs = {INPUT_DEFAULT, PARAMETER_NO_VALUE(TestOperandType::TENSOR_INT32)}, \
512 .outputs = {OUTPUT_DEFAULT}, \
513 .constructor = squeezeOmittedConstructor};
514
515 DEFINE_SQUEEZE_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
516 TestOperandType::TENSOR_QUANT8_ASYMM);
517 DEFINE_SQUEEZE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
518 DEFINE_SQUEEZE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
519
expandDimsConstructor(TestOperandType,uint32_t rank,RandomOperation * op)520 static void expandDimsConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
521 // Generate values for the "axis" tensor.
522 int32_t axis = getRandomAxis(rank + 1);
523 op->inputs[1]->setScalarValue<int32_t>(axis);
524 if (axis < 0) axis += static_cast<int32_t>(rank + 1);
525
526 setFreeDimensions(op->inputs[0], rank);
527 for (uint32_t i = 0; i < rank; i++) {
528 if (i == static_cast<uint32_t>(axis)) {
529 op->outputs[0]->dimensions.push_back(1);
530 }
531 op->outputs[0]->dimensions.push_back(op->inputs[0]->dimensions[i]);
532 }
533 if (rank == static_cast<uint32_t>(axis)) op->outputs[0]->dimensions.push_back(1);
534 setSameQuantization(op->outputs[0], op->inputs[0]);
535 }
536
537 #define DEFINE_EXPAND_DIMS_SIGNATURE(ver, ...) \
538 DEFINE_OPERATION_SIGNATURE(EXPAND_DIMS_##ver){ \
539 .opType = TestOperationType::EXPAND_DIMS, \
540 .supportedDataTypes = {__VA_ARGS__}, \
541 .supportedRanks = {1, 2, 3, 4, 5}, \
542 .version = TestHalVersion::ver, \
543 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)}, \
544 .outputs = {OUTPUT_DEFAULT}, \
545 .constructor = expandDimsConstructor};
546
547 DEFINE_EXPAND_DIMS_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
548 TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
549 DEFINE_EXPAND_DIMS_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
550
tileConstructor(TestOperandType,uint32_t rank,RandomOperation * op)551 static void tileConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
552 setFreeDimensions(op->inputs[0], rank);
553 op->outputs[0]->dimensions.resize(rank);
554 op->inputs[1]->dimensions = {rank};
555 op->inputs[1]->resizeBuffer<int32_t>(rank);
556 for (uint32_t i = 0; i < rank; i++) {
557 int32_t multiple = getUniform<int32_t>(1, 5);
558 op->inputs[1]->value<int32_t>(i) = multiple;
559 op->outputs[0]->dimensions[i] = op->inputs[0]->dimensions[i] * multiple;
560 }
561 setSameQuantization(op->outputs[0], op->inputs[0]);
562 }
563
564 #define DEFINE_TILE_SIGNATURE(ver, ...) \
565 DEFINE_OPERATION_SIGNATURE(TILE_##ver){ \
566 .opType = TestOperationType::TILE, \
567 .supportedDataTypes = {__VA_ARGS__}, \
568 .supportedRanks = {1, 2, 3, 4, 5}, \
569 .version = TestHalVersion::ver, \
570 .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
571 .outputs = {OUTPUT_DEFAULT}, \
572 .constructor = tileConstructor};
573
574 DEFINE_TILE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
575 TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
576 DEFINE_TILE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
577
fillConstructor(TestOperandType,uint32_t rank,RandomOperation * op)578 static void fillConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
579 op->inputs[0]->dimensions = {rank};
580 setFreeDimensions(op->outputs[0], rank);
581 op->inputs[0]->randomBuffer = op->outputs[0]->dimensions;
582 }
583
DEFINE_OPERATION_SIGNATURE(FILL_V1_3)584 DEFINE_OPERATION_SIGNATURE(FILL_V1_3){
585 .opType = TestOperationType::FILL,
586 .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
587 TestOperandType::TENSOR_INT32},
588 .supportedRanks = {1, 2, 3, 4, 5},
589 .version = TestHalVersion::V1_3,
590 .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_SCALAR},
591 .outputs = {OUTPUT_DEFAULT},
592 .constructor = fillConstructor};
593
rankConstructor(TestOperandType,uint32_t rank,RandomOperation * op)594 static void rankConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
595 setFreeDimensions(op->inputs[0], rank);
596 }
597
DEFINE_OPERATION_SIGNATURE(RANK_V1_3)598 DEFINE_OPERATION_SIGNATURE(RANK_V1_3){
599 .opType = TestOperationType::RANK,
600 .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
601 TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM,
602 TestOperandType::TENSOR_BOOL8},
603 .supportedRanks = {1, 2, 3, 4, 5},
604 .version = TestHalVersion::V1_3,
605 .inputs = {INPUT_DEFAULT},
606 .outputs = {OUTPUT_TYPED(TestOperandType::INT32)},
607 .constructor = rankConstructor};
608
609 } // namespace fuzzing_test
610 } // namespace nn
611 } // namespace android
612