1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19 #include <android-base/logging.h>
20 #include <gtest/gtest.h>
21
22 #include "1.3/Callbacks.h"
23 #include "1.3/Utils.h"
24 #include "GeneratedTestHarness.h"
25 #include "MemoryUtils.h"
26 #include "TestHarness.h"
27 #include "Utils.h"
28 #include "VtsHalNeuralnetworks.h"
29
30 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
31
32 using namespace test_helper;
33 using implementation::ExecutionCallback;
34 using implementation::PreparedModelCallback;
35 using V1_0::RequestArgument;
36 using V1_1::ExecutionPreference;
37 using V1_2::Constant;
38 using V1_2::MeasureTiming;
39 using V1_2::OutputShape;
40 using V1_2::Timing;
41
42 namespace {
43
44 const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
45
46 // A 1.3 driver is likely to support at least one of the following operand types.
47 const std::vector<TestOperandType> kTestOperandTypeChoicesVector = {
48 TestOperandType::TENSOR_FLOAT32,
49 TestOperandType::TENSOR_FLOAT16,
50 TestOperandType::TENSOR_QUANT8_ASYMM,
51 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
52 };
53 const auto kTestOperandTypeChoices = testing::ValuesIn(kTestOperandTypeChoicesVector);
54
isInChoices(TestOperandType type)55 bool isInChoices(TestOperandType type) {
56 return std::count(kTestOperandTypeChoicesVector.begin(), kTestOperandTypeChoicesVector.end(),
57 type) > 0;
58 }
59
isFloat(TestOperandType type)60 bool isFloat(TestOperandType type) {
61 CHECK(isInChoices(type));
62 return type == TestOperandType::TENSOR_FLOAT32 || type == TestOperandType::TENSOR_FLOAT16;
63 }
64
65 // Create dummy buffers for model constants as well as inputs and outputs.
66 // We only care about the size here because we will not check accuracy in validation tests.
createDummyData(TestModel * testModel)67 void createDummyData(TestModel* testModel) {
68 for (auto& operand : testModel->main.operands) {
69 if (operand.data != nullptr) continue;
70 switch (operand.lifetime) {
71 case TestOperandLifeTime::SUBGRAPH_INPUT:
72 case TestOperandLifeTime::SUBGRAPH_OUTPUT:
73 case TestOperandLifeTime::CONSTANT_COPY:
74 case TestOperandLifeTime::CONSTANT_REFERENCE: {
75 const uint32_t size = nn::nonExtensionOperandSizeOfData(
76 static_cast<OperandType>(operand.type), operand.dimensions);
77 operand.data = TestBuffer(size);
78 } break;
79 default:
80 break;
81 }
82 }
83 }
84
createInt32Scalar(int32_t value)85 TestOperand createInt32Scalar(int32_t value) {
86 return {
87 .type = TestOperandType::INT32,
88 .dimensions = {},
89 .numberOfConsumers = 1,
90 .scale = 0.0f,
91 .zeroPoint = 0,
92 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
93 .data = TestBuffer::createFromVector<int32_t>({value}),
94 };
95 }
96
97 // Construct a test model with multiple CONV_2D operations with the given operand as inputs.
98 // The dimensions of the filters are chosen to ensure outputs has the same dimensions as inputs.
99 // We choose CONV_2D operation because it is commonly supported by most drivers.
createConvModel(const TestOperand & operand,uint32_t numOperations)100 TestModel createConvModel(const TestOperand& operand, uint32_t numOperations) {
101 CHECK(isInChoices(operand.type));
102
103 TestOperand weight = {.type = operand.type,
104 .dimensions = {operand.dimensions[3], 3, 3, operand.dimensions[3]},
105 .numberOfConsumers = 1,
106 .scale = isFloat(operand.type) ? 0.0f : 1.0f,
107 .zeroPoint = 0,
108 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
109
110 TestOperand bias = {
111 .type = isFloat(operand.type) ? operand.type : TestOperandType::TENSOR_INT32,
112 .dimensions = {operand.dimensions[3]},
113 .numberOfConsumers = 1,
114 .scale = operand.scale * weight.scale,
115 .zeroPoint = 0,
116 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
117
118 TestOperand output = operand;
119 output.numberOfConsumers = 0;
120 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
121
122 const std::vector<TestOperand> operands = {
123 operand,
124 std::move(weight),
125 std::move(bias),
126 createInt32Scalar(1), // same padding
127 createInt32Scalar(1), // width stride
128 createInt32Scalar(1), // height stride
129 createInt32Scalar(0), // activation = NONE
130 std::move(output),
131 };
132
133 TestModel model;
134 for (uint32_t i = 0; i < numOperations; i++) {
135 model.main.operands.insert(model.main.operands.end(), operands.begin(), operands.end());
136 const uint32_t inputIndex = operands.size() * i;
137 const uint32_t outputIndex = inputIndex + operands.size() - 1;
138 std::vector<uint32_t> inputs(operands.size() - 1);
139 std::iota(inputs.begin(), inputs.end(), inputIndex);
140 model.main.operations.push_back({.type = TestOperationType::CONV_2D,
141 .inputs = std::move(inputs),
142 .outputs = {outputIndex}});
143 model.main.inputIndexes.push_back(inputIndex);
144 model.main.outputIndexes.push_back(outputIndex);
145 }
146 createDummyData(&model);
147 return model;
148 }
149
150 // Construct a test model with a single ADD operation with the given operand as input0 and input1.
151 // This is to cover additional cases that the CONV_2D model does not support, e.g. arbitrary input
152 // operand rank, scalar input operand. We choose ADD operation because it is commonly supported by
153 // most drivers.
createSingleAddModel(const TestOperand & operand)154 TestModel createSingleAddModel(const TestOperand& operand) {
155 CHECK(isInChoices(operand.type));
156
157 TestOperand act = {
158 .type = TestOperandType::INT32,
159 .dimensions = {},
160 .numberOfConsumers = 1,
161 .scale = 0.0f,
162 .zeroPoint = 0,
163 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
164 };
165
166 TestOperand output = operand;
167 output.numberOfConsumers = 0;
168 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
169
170 TestModel model = {
171 .main =
172 {
173 .operands =
174 {
175 operand,
176 operand,
177 std::move(act),
178 output,
179 },
180 .operations = {{.type = TestOperationType::ADD,
181 .inputs = {0, 1, 2},
182 .outputs = {3}}},
183 .inputIndexes = {0, 1, 2},
184 .outputIndexes = {3},
185 },
186 };
187 createDummyData(&model);
188 return model;
189 }
190
191 // A dummy invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel
192 class InvalidPreparedModel : public IPreparedModel {
193 public:
execute(const V1_0::Request &,const sp<V1_0::IExecutionCallback> &)194 Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
195 const sp<V1_0::IExecutionCallback>&) override {
196 return V1_0::ErrorStatus::GENERAL_FAILURE;
197 }
execute_1_2(const V1_0::Request &,V1_2::MeasureTiming,const sp<V1_2::IExecutionCallback> &)198 Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, V1_2::MeasureTiming,
199 const sp<V1_2::IExecutionCallback>&) override {
200 return V1_0::ErrorStatus::GENERAL_FAILURE;
201 }
execute_1_3(const V1_3::Request &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,const sp<V1_3::IExecutionCallback> &)202 Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, V1_2::MeasureTiming,
203 const V1_3::OptionalTimePoint&,
204 const V1_3::OptionalTimeoutDuration&,
205 const sp<V1_3::IExecutionCallback>&) override {
206 return V1_3::ErrorStatus::GENERAL_FAILURE;
207 }
executeSynchronously(const V1_0::Request &,V1_2::MeasureTiming,executeSynchronously_cb)208 Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
209 executeSynchronously_cb) override {
210 return Void();
211 }
executeSynchronously_1_3(const V1_3::Request &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,executeSynchronously_1_3_cb)212 Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
213 const V1_3::OptionalTimePoint&,
214 const V1_3::OptionalTimeoutDuration&,
215 executeSynchronously_1_3_cb) override {
216 return Void();
217 }
configureExecutionBurst(const sp<V1_2::IBurstCallback> &,const MQDescriptorSync<V1_2::FmqRequestDatum> &,const MQDescriptorSync<V1_2::FmqResultDatum> &,configureExecutionBurst_cb)218 Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
219 const MQDescriptorSync<V1_2::FmqRequestDatum>&,
220 const MQDescriptorSync<V1_2::FmqResultDatum>&,
221 configureExecutionBurst_cb) override {
222 return Void();
223 }
executeFenced(const V1_3::Request &,const hidl_vec<hidl_handle> &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,const V1_3::OptionalTimeoutDuration &,executeFenced_cb)224 Return<void> executeFenced(const V1_3::Request&, const hidl_vec<hidl_handle>&,
225 V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
226 const V1_3::OptionalTimeoutDuration&,
227 const V1_3::OptionalTimeoutDuration&, executeFenced_cb) override {
228 return Void();
229 }
230 };
231
232 } // namespace
233
234 class MemoryDomainTestBase : public testing::Test {
235 protected:
MemoryDomainTestBase(sp<IDevice> device,TestOperandType type)236 MemoryDomainTestBase(sp<IDevice> device, TestOperandType type)
237 : kDevice(std::move(device)),
238 kTestOperandType(type),
239 kTestOperand(kTestOperandMap.at(type)),
240 kTestOperandDataSize(nn::nonExtensionOperandSizeOfData(static_cast<OperandType>(type),
241 kTestOperand.dimensions)) {}
242
SetUp()243 void SetUp() override {
244 testing::Test::SetUp();
245 ASSERT_NE(kDevice, nullptr);
246 const bool deviceIsResponsive = kDevice->ping().isOk();
247 ASSERT_TRUE(deviceIsResponsive);
248 }
249
createConvPreparedModel(const TestOperand & testOperand,uint32_t numOperations=1)250 sp<IPreparedModel> createConvPreparedModel(const TestOperand& testOperand,
251 uint32_t numOperations = 1) {
252 const TestModel testModel = createConvModel(testOperand, numOperations);
253 const Model model = createModel(testModel);
254 sp<IPreparedModel> preparedModel;
255 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
256 return preparedModel;
257 }
258
createAddPreparedModel(const TestOperand & testOperand)259 sp<IPreparedModel> createAddPreparedModel(const TestOperand& testOperand) {
260 const TestModel testModel = createSingleAddModel(testOperand);
261 const Model model = createModel(testModel);
262 sp<IPreparedModel> preparedModel;
263 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
264 return preparedModel;
265 }
266
267 static const std::map<TestOperandType, TestOperand> kTestOperandMap;
268
269 const sp<IDevice> kDevice;
270 const TestOperandType kTestOperandType;
271 const TestOperand& kTestOperand;
272 const uint32_t kTestOperandDataSize;
273 };
274
275 const std::map<TestOperandType, TestOperand> MemoryDomainTestBase::kTestOperandMap = {
276 {TestOperandType::TENSOR_FLOAT32,
277 {
278 .type = TestOperandType::TENSOR_FLOAT32,
279 .dimensions = {1, 32, 32, 8},
280 .numberOfConsumers = 1,
281 .scale = 0.0f,
282 .zeroPoint = 0,
283 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
284 }},
285 {TestOperandType::TENSOR_FLOAT16,
286 {
287 .type = TestOperandType::TENSOR_FLOAT16,
288 .dimensions = {1, 32, 32, 8},
289 .numberOfConsumers = 1,
290 .scale = 0.0f,
291 .zeroPoint = 0,
292 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
293 }},
294 {TestOperandType::TENSOR_QUANT8_ASYMM,
295 {
296 .type = TestOperandType::TENSOR_QUANT8_ASYMM,
297 .dimensions = {1, 32, 32, 8},
298 .numberOfConsumers = 1,
299 .scale = 0.5f,
300 .zeroPoint = 0,
301 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
302 }},
303 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
304 {
305 .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
306 .dimensions = {1, 32, 32, 8},
307 .numberOfConsumers = 1,
308 .scale = 0.5f,
309 .zeroPoint = 0,
310 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
311 }},
312 };
313
314 using MemoryDomainAllocateTestParam = std::tuple<NamedDevice, TestOperandType>;
315 class MemoryDomainAllocateTest : public MemoryDomainTestBase,
316 public testing::WithParamInterface<MemoryDomainAllocateTestParam> {
317 protected:
MemoryDomainAllocateTest()318 MemoryDomainAllocateTest()
319 : MemoryDomainTestBase(getData(std::get<NamedDevice>(GetParam())),
320 std::get<TestOperandType>(GetParam())) {}
321
322 struct AllocateTestArgs {
323 hidl_vec<uint32_t> dimensions;
324 hidl_vec<sp<IPreparedModel>> preparedModels;
325 hidl_vec<BufferRole> inputRoles;
326 hidl_vec<BufferRole> outputRoles;
327 };
328
329 // Validation test for IDevice::allocate. The driver is expected to fail with INVALID_ARGUMENT,
330 // or GENERAL_FAILURE if memory domain is not supported.
validateAllocate(AllocateTestArgs args)331 void validateAllocate(AllocateTestArgs args) {
332 const auto ret = kDevice->allocate(
333 {.dimensions = std::move(args.dimensions)}, std::move(args.preparedModels),
334 std::move(args.inputRoles), std::move(args.outputRoles),
335 [](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
336 EXPECT_TRUE(status == ErrorStatus::INVALID_ARGUMENT ||
337 status == ErrorStatus::GENERAL_FAILURE);
338 EXPECT_EQ(buffer, nullptr);
339 EXPECT_EQ(token, 0);
340 });
341 ASSERT_TRUE(ret.isOk());
342 }
343
testConflictOperands(const sp<IPreparedModel> & model1,const sp<IPreparedModel> & model2)344 void testConflictOperands(const sp<IPreparedModel>& model1, const sp<IPreparedModel>& model2) {
345 validateAllocate({
346 .preparedModels = {model1, model2},
347 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
348 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
349 });
350 validateAllocate({
351 .preparedModels = {model1, model2},
352 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
353 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
354 });
355 validateAllocate({
356 .preparedModels = {model1, model2},
357 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
358 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
359 });
360 }
361 };
362
TEST_P(MemoryDomainAllocateTest,EmptyRole)363 TEST_P(MemoryDomainAllocateTest, EmptyRole) {
364 // Test with empty prepared models and roles.
365 validateAllocate({});
366
367 auto preparedModel = createConvPreparedModel(kTestOperand);
368 if (preparedModel == nullptr) return;
369
370 // Test again with non-empty prepared models but empty roles.
371 validateAllocate({
372 .preparedModels = {preparedModel},
373 });
374 }
375
TEST_P(MemoryDomainAllocateTest,NullptrPreparedModel)376 TEST_P(MemoryDomainAllocateTest, NullptrPreparedModel) {
377 // Test with nullptr prepared model as input role.
378 validateAllocate({
379 .preparedModels = {nullptr},
380 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
381 });
382
383 // Test with nullptr prepared model as output role.
384 validateAllocate({
385 .preparedModels = {nullptr},
386 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
387 });
388 }
389
TEST_P(MemoryDomainAllocateTest,InvalidPreparedModel)390 TEST_P(MemoryDomainAllocateTest, InvalidPreparedModel) {
391 sp<InvalidPreparedModel> invalidPreparedModel = new InvalidPreparedModel();
392
393 // Test with invalid prepared model as input role.
394 validateAllocate({
395 .preparedModels = {invalidPreparedModel},
396 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
397 });
398
399 // Test with invalid prepared model as output role.
400 validateAllocate({
401 .preparedModels = {invalidPreparedModel},
402 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
403 });
404 }
405
TEST_P(MemoryDomainAllocateTest,InvalidModelIndex)406 TEST_P(MemoryDomainAllocateTest, InvalidModelIndex) {
407 auto preparedModel = createConvPreparedModel(kTestOperand);
408 if (preparedModel == nullptr) return;
409
410 // This should fail, because the model index is out of bound.
411 validateAllocate({
412 .preparedModels = {preparedModel},
413 .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
414 });
415
416 // This should fail, because the model index is out of bound.
417 validateAllocate({
418 .preparedModels = {preparedModel},
419 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
420 });
421 }
422
TEST_P(MemoryDomainAllocateTest,InvalidIOIndex)423 TEST_P(MemoryDomainAllocateTest, InvalidIOIndex) {
424 auto preparedModel = createConvPreparedModel(kTestOperand);
425 if (preparedModel == nullptr) return;
426
427 // This should fail, because the model only has one input.
428 validateAllocate({
429 .preparedModels = {preparedModel},
430 .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
431 });
432
433 // This should fail, because the model only has one output.
434 validateAllocate({
435 .preparedModels = {preparedModel},
436 .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
437 });
438 }
439
TEST_P(MemoryDomainAllocateTest,InvalidFrequency)440 TEST_P(MemoryDomainAllocateTest, InvalidFrequency) {
441 auto preparedModel = createConvPreparedModel(kTestOperand);
442 if (preparedModel == nullptr) return;
443
444 for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
445 // Test with invalid frequency for input roles.
446 validateAllocate({
447 .preparedModels = {preparedModel},
448 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
449 });
450 // Test with invalid frequency for output roles.
451 validateAllocate({
452 .preparedModels = {preparedModel},
453 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
454 });
455 }
456 }
457
TEST_P(MemoryDomainAllocateTest,SameRoleSpecifiedTwice)458 TEST_P(MemoryDomainAllocateTest, SameRoleSpecifiedTwice) {
459 auto preparedModel = createConvPreparedModel(kTestOperand);
460 if (preparedModel == nullptr) return;
461
462 // Same role with same model index.
463 validateAllocate({
464 .preparedModels = {preparedModel},
465 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
466 {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
467 });
468 validateAllocate({
469 .preparedModels = {preparedModel},
470 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
471 {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
472 });
473
474 // Different model indexes, but logically referring to the same role.
475 validateAllocate({
476 .preparedModels = {preparedModel, preparedModel},
477 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
478 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
479 });
480 validateAllocate({
481 .preparedModels = {preparedModel, preparedModel},
482 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
483 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
484 });
485 }
486
TEST_P(MemoryDomainAllocateTest,ConflictOperandType)487 TEST_P(MemoryDomainAllocateTest, ConflictOperandType) {
488 const std::map<TestOperandType, TestOperandType> conflictTypeMap = {
489 {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
490 {TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32},
491 {TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
492 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::TENSOR_QUANT8_ASYMM},
493 };
494
495 TestOperand conflictTestOperand = kTestOperand;
496 const auto it = conflictTypeMap.find(kTestOperandType);
497 ASSERT_FALSE(it == conflictTypeMap.end());
498 conflictTestOperand.type = it->second;
499
500 auto preparedModel = createConvPreparedModel(kTestOperand);
501 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
502 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
503 testConflictOperands(preparedModel, conflictPreparedModel);
504 }
505
TEST_P(MemoryDomainAllocateTest,ConflictScale)506 TEST_P(MemoryDomainAllocateTest, ConflictScale) {
507 if (isFloat(kTestOperandType)) return;
508
509 TestOperand conflictTestOperand = kTestOperand;
510 ASSERT_NE(conflictTestOperand.scale, 1.0f);
511 conflictTestOperand.scale = 1.0f;
512
513 auto preparedModel = createConvPreparedModel(kTestOperand);
514 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
515 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
516 testConflictOperands(preparedModel, conflictPreparedModel);
517 }
518
TEST_P(MemoryDomainAllocateTest,ConflictZeroPoint)519 TEST_P(MemoryDomainAllocateTest, ConflictZeroPoint) {
520 if (isFloat(kTestOperandType)) return;
521
522 TestOperand conflictTestOperand = kTestOperand;
523 ASSERT_NE(conflictTestOperand.zeroPoint, 10);
524 conflictTestOperand.zeroPoint = 10;
525
526 auto preparedModel = createConvPreparedModel(kTestOperand);
527 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
528 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
529 testConflictOperands(preparedModel, conflictPreparedModel);
530 }
531
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoles)532 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoles) {
533 TestOperand conflictTestOperand = kTestOperand;
534 conflictTestOperand.dimensions.pop_back();
535
536 auto preparedModel = createAddPreparedModel(kTestOperand);
537 auto conflictPreparedModel = createAddPreparedModel(conflictTestOperand);
538 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
539 testConflictOperands(preparedModel, conflictPreparedModel);
540 }
541
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoles)542 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoles) {
543 TestOperand conflictTestOperand = kTestOperand;
544 conflictTestOperand.dimensions[0] = 4;
545
546 auto preparedModel = createConvPreparedModel(kTestOperand);
547 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
548 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
549 testConflictOperands(preparedModel, conflictPreparedModel);
550 }
551
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoleAndDesc)552 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoleAndDesc) {
553 auto preparedModel = createConvPreparedModel(kTestOperand);
554 if (preparedModel == nullptr) return;
555
556 auto badDimensions = kTestOperand.dimensions;
557 badDimensions.pop_back();
558
559 validateAllocate({
560 .dimensions = badDimensions,
561 .preparedModels = {preparedModel},
562 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
563 });
564 validateAllocate({
565 .dimensions = badDimensions,
566 .preparedModels = {preparedModel},
567 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
568 });
569 }
570
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoleAndDesc)571 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoleAndDesc) {
572 auto preparedModel = createConvPreparedModel(kTestOperand);
573 if (preparedModel == nullptr) return;
574
575 auto badDimensions = kTestOperand.dimensions;
576 badDimensions[0] = 4;
577
578 validateAllocate({
579 .dimensions = badDimensions,
580 .preparedModels = {preparedModel},
581 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
582 });
583 validateAllocate({
584 .dimensions = badDimensions,
585 .preparedModels = {preparedModel},
586 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
587 });
588 }
589
TEST_P(MemoryDomainAllocateTest,ConflictRankWithScalarRole)590 TEST_P(MemoryDomainAllocateTest, ConflictRankWithScalarRole) {
591 auto preparedModel = createAddPreparedModel(kTestOperand);
592 if (preparedModel == nullptr) return;
593
594 // This should fail, because the target operand is a scalar but a non-empty dimension is
595 // specified.
596 validateAllocate({
597 .dimensions = {1},
598 .preparedModels = {preparedModel},
599 .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .frequency = 1.0f}},
600 });
601 }
602
printMemoryDomainAllocateTest(const testing::TestParamInfo<MemoryDomainAllocateTestParam> & info)603 std::string printMemoryDomainAllocateTest(
604 const testing::TestParamInfo<MemoryDomainAllocateTestParam>& info) {
605 const auto& [namedDevice, operandType] = info.param;
606 const std::string type = toString(static_cast<OperandType>(operandType));
607 return gtestCompliantName(getName(namedDevice) + "_" + type);
608 }
609
610 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainAllocateTest);
611 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainAllocateTest,
612 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
613 printMemoryDomainAllocateTest);
614
615 class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
616 protected:
MemoryDomainCopyTestBase(sp<IDevice> device,TestOperandType type)617 MemoryDomainCopyTestBase(sp<IDevice> device, TestOperandType type)
618 : MemoryDomainTestBase(std::move(device), type) {}
619
620 // Allocates device memory for roles of a single prepared model.
621 // Returns {IBuffer, token} if success; returns {nullptr, 0} if not supported.
allocateBuffer(const sp<IPreparedModel> & preparedModel,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes,const std::vector<uint32_t> & dimensions)622 std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
623 const std::vector<uint32_t>& inputIndexes,
624 const std::vector<uint32_t>& outputIndexes,
625 const std::vector<uint32_t>& dimensions) {
626 if (preparedModel == nullptr) {
627 return {nullptr, 0};
628 }
629
630 hidl_vec<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
631 auto trans = [](uint32_t ind) -> BufferRole {
632 return {.modelIndex = 0, .ioIndex = ind, .frequency = 1.0f};
633 };
634 std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
635 std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
636
637 sp<IBuffer> buffer;
638 uint32_t token = 0;
639 const auto ret = kDevice->allocate(
640 {.dimensions = dimensions}, {preparedModel}, std::move(inputRoles),
641 std::move(outputRoles),
642 [&buffer, &token](ErrorStatus err, const sp<IBuffer>& buf, uint32_t tok) {
643 if (err == ErrorStatus::NONE) {
644 EXPECT_NE(buf, nullptr);
645 EXPECT_GT(tok, 0);
646 buffer = buf;
647 token = tok;
648 } else {
649 EXPECT_EQ(err, ErrorStatus::GENERAL_FAILURE);
650 EXPECT_EQ(buf, nullptr);
651 EXPECT_EQ(tok, 0);
652 }
653 });
654 EXPECT_TRUE(ret.isOk());
655 return {std::move(buffer), token};
656 }
657
allocateBuffer(const sp<IPreparedModel> & preparedModel,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes)658 std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
659 const std::vector<uint32_t>& inputIndexes,
660 const std::vector<uint32_t>& outputIndexes) {
661 return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
662 }
663
allocateSharedMemory(uint32_t size)664 hidl_memory allocateSharedMemory(uint32_t size) {
665 hidl_memory memory = nn::allocateSharedMemory(size);
666 EXPECT_EQ(memory.size(), size);
667 return memory;
668 }
669
testCopyFrom(const sp<IBuffer> & buffer,const hidl_memory & memory,const std::vector<uint32_t> & dimensions,ErrorStatus expectedStatus)670 void testCopyFrom(const sp<IBuffer>& buffer, const hidl_memory& memory,
671 const std::vector<uint32_t>& dimensions, ErrorStatus expectedStatus) {
672 const auto ret = buffer->copyFrom(memory, dimensions);
673 ASSERT_TRUE(ret.isOk());
674 ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
675 }
676
testCopyTo(const sp<IBuffer> & buffer,const hidl_memory & memory,ErrorStatus expectedStatus)677 void testCopyTo(const sp<IBuffer>& buffer, const hidl_memory& memory,
678 ErrorStatus expectedStatus) {
679 const auto ret = buffer->copyTo(memory);
680 ASSERT_TRUE(ret.isOk());
681 ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
682 }
683
initializeDeviceMemory(const sp<IBuffer> & buffer)684 void initializeDeviceMemory(const sp<IBuffer>& buffer) {
685 hidl_memory memory = nn::allocateSharedMemory(kTestOperandDataSize);
686 ASSERT_EQ(memory.size(), kTestOperandDataSize);
687 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
688 }
689 };
690
691 using MemoryDomainCopyTestParam = std::tuple<NamedDevice, TestOperandType>;
692 class MemoryDomainCopyTest : public MemoryDomainCopyTestBase,
693 public testing::WithParamInterface<MemoryDomainCopyTestParam> {
694 protected:
MemoryDomainCopyTest()695 MemoryDomainCopyTest()
696 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
697 std::get<TestOperandType>(GetParam())) {}
698 };
699
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize)700 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize) {
701 auto preparedModel = createConvPreparedModel(kTestOperand);
702 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
703 if (buffer == nullptr) return;
704
705 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
706 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
707 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
708 testCopyFrom(buffer, badMemory1, {}, ErrorStatus::INVALID_ARGUMENT);
709 testCopyFrom(buffer, badMemory2, {}, ErrorStatus::INVALID_ARGUMENT);
710 }
711
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize_DynamicShape)712 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize_DynamicShape) {
713 TestOperand testOperand = kTestOperand;
714 testOperand.dimensions[0] = 0;
715 auto preparedModel = createConvPreparedModel(testOperand);
716 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
717 if (buffer == nullptr) return;
718
719 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
720 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
721 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
722 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
723
724 auto badDimensions = kTestOperand.dimensions;
725 badDimensions[0] = 2;
726
727 testCopyFrom(buffer, badMemory1, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
728 testCopyFrom(buffer, badMemory2, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
729 testCopyFrom(buffer, goodMemory, kTestOperand.dimensions, ErrorStatus::NONE);
730 testCopyFrom(buffer, goodMemory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
731 }
732
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions)733 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions) {
734 auto preparedModel = createConvPreparedModel(kTestOperand);
735 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
736 if (buffer == nullptr) return;
737
738 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
739
740 std::vector<uint32_t> badDimensions;
741 badDimensions = kTestOperand.dimensions;
742 badDimensions.pop_back();
743 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
744
745 badDimensions = kTestOperand.dimensions;
746 badDimensions[0] = 2;
747 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
748
749 badDimensions = kTestOperand.dimensions;
750 badDimensions[0] = 0;
751 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
752
753 testCopyFrom(buffer, memory, {}, ErrorStatus::NONE);
754 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
755 }
756
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions_DynamicShape)757 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions_DynamicShape) {
758 TestOperand testOperand = kTestOperand;
759 testOperand.dimensions[0] = 0;
760 auto preparedModel = createConvPreparedModel(testOperand);
761 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
762 if (buffer == nullptr) return;
763
764 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
765
766 std::vector<uint32_t> badDimensions;
767 badDimensions = kTestOperand.dimensions;
768 badDimensions.pop_back();
769 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
770
771 badDimensions = kTestOperand.dimensions;
772 badDimensions[0] = 2;
773 badDimensions[3] = 4;
774 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
775
776 badDimensions = kTestOperand.dimensions;
777 badDimensions[0] = 1;
778 badDimensions[3] = 0;
779 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
780
781 testCopyFrom(buffer, memory, {}, ErrorStatus::INVALID_ARGUMENT);
782 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
783 }
784
TEST_P(MemoryDomainCopyTest,CopyTo_UninitializedMemory)785 TEST_P(MemoryDomainCopyTest, CopyTo_UninitializedMemory) {
786 auto preparedModel = createConvPreparedModel(kTestOperand);
787 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
788 if (buffer == nullptr) return;
789
790 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
791 testCopyTo(buffer, memory, ErrorStatus::GENERAL_FAILURE);
792 }
793
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize)794 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize) {
795 auto preparedModel = createConvPreparedModel(kTestOperand);
796 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
797 if (buffer == nullptr) return;
798
799 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
800 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
801 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
802 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
803
804 initializeDeviceMemory(buffer);
805 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
806 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
807 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
808 }
809
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize_DynamicShape)810 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize_DynamicShape) {
811 TestOperand testOperand = kTestOperand;
812 testOperand.dimensions[0] = 0;
813 auto preparedModel = createConvPreparedModel(testOperand);
814 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
815 if (buffer == nullptr) return;
816
817 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
818 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
819 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
820 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
821
822 initializeDeviceMemory(buffer);
823 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
824 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
825 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
826 }
827
printMemoryDomainCopyTest(const testing::TestParamInfo<MemoryDomainCopyTestParam> & info)828 std::string printMemoryDomainCopyTest(
829 const testing::TestParamInfo<MemoryDomainCopyTestParam>& info) {
830 const auto& [namedDevice, operandType] = info.param;
831 const std::string type = toString(static_cast<OperandType>(operandType));
832 return gtestCompliantName(getName(namedDevice) + "_" + type);
833 }
834
835 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainCopyTest);
836 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainCopyTest,
837 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
838 printMemoryDomainCopyTest);
839
840 using MemoryDomainExecutionTestParam = std::tuple<NamedDevice, TestOperandType, Executor>;
841 class MemoryDomainExecutionTest
842 : public MemoryDomainCopyTestBase,
843 public testing::WithParamInterface<MemoryDomainExecutionTestParam> {
844 protected:
MemoryDomainExecutionTest()845 MemoryDomainExecutionTest()
846 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
847 std::get<TestOperandType>(GetParam())) {}
848
createSharedMemoryPool(uint32_t size)849 Request::MemoryPool createSharedMemoryPool(uint32_t size) {
850 hidl_memory memory = allocateSharedMemory(size);
851 Request::MemoryPool pool;
852 pool.hidlMemory(memory);
853 return pool;
854 }
855
createDeviceMemoryPool(uint32_t token)856 Request::MemoryPool createDeviceMemoryPool(uint32_t token) {
857 Request::MemoryPool pool;
858 pool.token(token);
859 return pool;
860 }
861
testExecution(const sp<IPreparedModel> & preparedModel,const Request & request,ErrorStatus expectedStatus)862 void testExecution(const sp<IPreparedModel>& preparedModel, const Request& request,
863 ErrorStatus expectedStatus) {
864 switch (kExecutor) {
865 case Executor::ASYNC:
866 EXPECT_EQ(executeAsync(preparedModel, request), expectedStatus);
867 break;
868 case Executor::SYNC:
869 EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
870 break;
871 case Executor::FENCED:
872 EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
873 break;
874 default:
875 ASSERT_TRUE(false);
876 }
877 }
878
executeAsync(const sp<IPreparedModel> & preparedModel,const Request & request)879 ErrorStatus executeAsync(const sp<IPreparedModel>& preparedModel, const Request& request) {
880 ErrorStatus executionStatus;
881
882 // launch execution
883 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
884 const auto ret =
885 preparedModel->execute_1_3(request, MeasureTiming::NO, {}, {}, executionCallback);
886 EXPECT_TRUE(ret.isOk());
887 executionStatus = static_cast<ErrorStatus>(ret);
888
889 // retrieve execution status
890 executionCallback->wait();
891 if (executionStatus == ErrorStatus::NONE) {
892 executionStatus = executionCallback->getStatus();
893 } else {
894 EXPECT_EQ(executionStatus, executionCallback->getStatus());
895 }
896 const auto timing = executionCallback->getTiming();
897 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
898 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
899 if (executionStatus != ErrorStatus::NONE) {
900 EXPECT_EQ(executionCallback->getOutputShapes().size(), 0);
901 }
902 return executionStatus;
903 }
904
executeSync(const sp<IPreparedModel> & preparedModel,const Request & request)905 ErrorStatus executeSync(const sp<IPreparedModel>& preparedModel, const Request& request) {
906 ErrorStatus executionStatus;
907 const auto ret = preparedModel->executeSynchronously_1_3(
908 request, MeasureTiming::NO, {}, {},
909 [&executionStatus](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
910 const Timing& time) {
911 executionStatus = error;
912 EXPECT_EQ(UINT64_MAX, time.timeOnDevice);
913 EXPECT_EQ(UINT64_MAX, time.timeInDriver);
914 if (executionStatus != ErrorStatus::NONE) {
915 EXPECT_EQ(shapes.size(), 0);
916 }
917 });
918 EXPECT_TRUE(ret.isOk());
919 return executionStatus;
920 }
921
executeFenced(const sp<IPreparedModel> & preparedModel,const Request & request)922 ErrorStatus executeFenced(const sp<IPreparedModel>& preparedModel, const Request& request) {
923 ErrorStatus executionStatus;
924 hidl_handle syncFenceHandle;
925 sp<IFencedExecutionCallback> fencedCallback;
926 const auto callbackFunc = [&executionStatus, &syncFenceHandle, &fencedCallback](
927 ErrorStatus error, const hidl_handle& handle,
928 const sp<IFencedExecutionCallback>& callback) {
929 executionStatus = error;
930 syncFenceHandle = handle;
931 fencedCallback = callback;
932 };
933 Return<void> ret = preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {},
934 callbackFunc);
935 EXPECT_TRUE(ret.isOk());
936 if (executionStatus != ErrorStatus::NONE) {
937 EXPECT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
938 EXPECT_EQ(fencedCallback, nullptr);
939 return executionStatus;
940 }
941 if (syncFenceHandle.getNativeHandle()) {
942 waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]);
943 }
944 EXPECT_NE(fencedCallback, nullptr);
945 ret = fencedCallback->getExecutionInfo(
946 [&executionStatus](ErrorStatus error, Timing t, Timing) {
947 executionStatus = error;
948 EXPECT_EQ(UINT64_MAX, t.timeOnDevice);
949 EXPECT_EQ(UINT64_MAX, t.timeInDriver);
950 });
951 EXPECT_TRUE(ret.isOk());
952 return executionStatus;
953 }
954
955 const Executor kExecutor = std::get<Executor>(GetParam());
956 };
957
TEST_P(MemoryDomainExecutionTest,InvalidToken)958 TEST_P(MemoryDomainExecutionTest, InvalidToken) {
959 auto preparedModel = createConvPreparedModel(kTestOperand);
960 if (preparedModel == nullptr) return;
961
962 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
963 Request::MemoryPool badDeviceMemory1 = createDeviceMemoryPool(0); // Invalid token.
964 Request::MemoryPool badDeviceMemory2 = createDeviceMemoryPool(100); // Unknown token.
965 RequestArgument sharedMemoryArg = {
966 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
967 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
968
969 testExecution(preparedModel,
970 {.inputs = {deviceMemoryArg},
971 .outputs = {sharedMemoryArg},
972 .pools = {sharedMemory, badDeviceMemory1}},
973 ErrorStatus::INVALID_ARGUMENT);
974 testExecution(preparedModel,
975 {.inputs = {deviceMemoryArg},
976 .outputs = {sharedMemoryArg},
977 .pools = {sharedMemory, badDeviceMemory2}},
978 ErrorStatus::INVALID_ARGUMENT);
979 testExecution(preparedModel,
980 {.inputs = {sharedMemoryArg},
981 .outputs = {deviceMemoryArg},
982 .pools = {sharedMemory, badDeviceMemory1}},
983 ErrorStatus::INVALID_ARGUMENT);
984 testExecution(preparedModel,
985 {.inputs = {sharedMemoryArg},
986 .outputs = {deviceMemoryArg},
987 .pools = {sharedMemory, badDeviceMemory2}},
988 ErrorStatus::INVALID_ARGUMENT);
989 }
990
TEST_P(MemoryDomainExecutionTest,InvalidPreparedModel)991 TEST_P(MemoryDomainExecutionTest, InvalidPreparedModel) {
992 auto preparedModel = createConvPreparedModel(kTestOperand);
993 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
994 if (buffer == nullptr) return;
995 auto badPreparedModel = createConvPreparedModel(kTestOperand);
996 if (badPreparedModel == nullptr) return;
997
998 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
999 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1000 RequestArgument sharedMemoryArg = {
1001 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1002 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1003
1004 // This should fail, because the buffer is not allocated for badPreparedModel.
1005 initializeDeviceMemory(buffer);
1006 testExecution(badPreparedModel,
1007 {.inputs = {deviceMemoryArg},
1008 .outputs = {sharedMemoryArg},
1009 .pools = {sharedMemory, deviceMemory}},
1010 ErrorStatus::INVALID_ARGUMENT);
1011 testExecution(badPreparedModel,
1012 {.inputs = {sharedMemoryArg},
1013 .outputs = {deviceMemoryArg},
1014 .pools = {sharedMemory, deviceMemory}},
1015 ErrorStatus::INVALID_ARGUMENT);
1016 }
1017
TEST_P(MemoryDomainExecutionTest,InvalidIOIndex)1018 TEST_P(MemoryDomainExecutionTest, InvalidIOIndex) {
1019 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1020 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {});
1021 if (buffer == nullptr) return;
1022
1023 Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1024 Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1025 Request::MemoryPool sharedMemory3 = createSharedMemoryPool(kTestOperandDataSize);
1026 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1027 RequestArgument sharedMemoryArg1 = {
1028 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1029 RequestArgument sharedMemoryArg2 = {
1030 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1031 RequestArgument sharedMemoryArg3 = {
1032 .location = {.poolIndex = 2, .offset = 0, .length = kTestOperandDataSize}};
1033 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 3}};
1034
1035 // This should fail, because the device memory is not allocated for input 1.
1036 initializeDeviceMemory(buffer);
1037 testExecution(preparedModel,
1038 {.inputs = {sharedMemoryArg1, deviceMemoryArg},
1039 .outputs = {sharedMemoryArg2, sharedMemoryArg3},
1040 .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
1041 ErrorStatus::INVALID_ARGUMENT);
1042
1043 // This should fail, because the device memory is not allocated for output 1.
1044 testExecution(preparedModel,
1045 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1046 .outputs = {sharedMemoryArg3, deviceMemoryArg},
1047 .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
1048 ErrorStatus::INVALID_ARGUMENT);
1049 }
1050
TEST_P(MemoryDomainExecutionTest,InvalidIOType)1051 TEST_P(MemoryDomainExecutionTest, InvalidIOType) {
1052 auto preparedModel = createConvPreparedModel(kTestOperand);
1053 auto [inputBuffer, inputToken] = allocateBuffer(preparedModel, {0}, {});
1054 auto [outputBuffer, outputToken] = allocateBuffer(preparedModel, {}, {0});
1055 if (inputBuffer == nullptr || outputBuffer == nullptr) return;
1056
1057 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1058 Request::MemoryPool deviceMemory = createDeviceMemoryPool(inputToken);
1059 RequestArgument sharedMemoryArg = {
1060 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1061 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1062
1063 // This should fail, because the device memory is allocated for input but used as output.
1064 testExecution(preparedModel,
1065 {.inputs = {sharedMemoryArg},
1066 .outputs = {deviceMemoryArg},
1067 .pools = {sharedMemory, deviceMemory}},
1068 ErrorStatus::INVALID_ARGUMENT);
1069
1070 // This should fail, because the device memory is allocated for output but used as input.
1071 deviceMemory.token(outputToken);
1072 initializeDeviceMemory(outputBuffer);
1073 testExecution(preparedModel,
1074 {.inputs = {deviceMemoryArg},
1075 .outputs = {sharedMemoryArg},
1076 .pools = {sharedMemory, deviceMemory}},
1077 ErrorStatus::INVALID_ARGUMENT);
1078 }
1079
TEST_P(MemoryDomainExecutionTest,UninitializedMemory)1080 TEST_P(MemoryDomainExecutionTest, UninitializedMemory) {
1081 auto preparedModel = createConvPreparedModel(kTestOperand);
1082 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1083 if (buffer == nullptr) return;
1084
1085 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1086 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1087 RequestArgument sharedMemoryArg = {
1088 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1089 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1090
1091 // This should fail, because the device memory is not initialized.
1092 testExecution(preparedModel,
1093 {.inputs = {deviceMemoryArg},
1094 .outputs = {sharedMemoryArg},
1095 .pools = {sharedMemory, deviceMemory}},
1096 ErrorStatus::GENERAL_FAILURE);
1097
1098 // This should initialize the device memory.
1099 testExecution(preparedModel,
1100 {.inputs = {sharedMemoryArg},
1101 .outputs = {deviceMemoryArg},
1102 .pools = {sharedMemory, deviceMemory}},
1103 ErrorStatus::NONE);
1104
1105 // Test again with initialized device memory.
1106 testExecution(preparedModel,
1107 {.inputs = {deviceMemoryArg},
1108 .outputs = {sharedMemoryArg},
1109 .pools = {sharedMemory, deviceMemory}},
1110 ErrorStatus::NONE);
1111 }
1112
TEST_P(MemoryDomainExecutionTest,SameRequestMultipleRoles)1113 TEST_P(MemoryDomainExecutionTest, SameRequestMultipleRoles) {
1114 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1115 auto [buffer, token] = allocateBuffer(preparedModel, {0, 1}, {0, 1});
1116 if (buffer == nullptr) return;
1117
1118 Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1119 Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1120 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1121 RequestArgument sharedMemoryArg1 = {
1122 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1123 RequestArgument sharedMemoryArg2 = {
1124 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1125 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 2}};
1126
1127 // This should fail, because the same device memory cannot be used for both input and output.
1128 initializeDeviceMemory(buffer);
1129 testExecution(preparedModel,
1130 {.inputs = {deviceMemoryArg, sharedMemoryArg1},
1131 .outputs = {deviceMemoryArg, sharedMemoryArg2},
1132 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1133 ErrorStatus::INVALID_ARGUMENT);
1134
1135 // This should fail, because the same device memory cannot be used for multiple outputs.
1136 testExecution(preparedModel,
1137 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1138 .outputs = {deviceMemoryArg, deviceMemoryArg},
1139 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1140 ErrorStatus::INVALID_ARGUMENT);
1141
1142 // The same device memory can be used for multiple inputs.
1143 initializeDeviceMemory(buffer);
1144 testExecution(preparedModel,
1145 {.inputs = {deviceMemoryArg, deviceMemoryArg},
1146 .outputs = {sharedMemoryArg1, sharedMemoryArg2},
1147 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1148 ErrorStatus::NONE);
1149 }
1150
TEST_P(MemoryDomainExecutionTest,InvalidDimensions)1151 TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
1152 // FENCED execution does not support dynamic shape.
1153 if (kExecutor == Executor::FENCED) return;
1154
1155 TestOperand testOperand = kTestOperand;
1156 testOperand.dimensions[0] = 0;
1157 auto preparedModel = createConvPreparedModel(testOperand);
1158 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0}, kTestOperand.dimensions);
1159 if (buffer == nullptr) return;
1160
1161 // Use an incompatible dimension and make sure the length matches with the bad dimension.
1162 auto badDimensions = kTestOperand.dimensions;
1163 badDimensions[0] = 2;
1164 const uint32_t badTestOperandDataSize = kTestOperandDataSize * 2;
1165
1166 Request::MemoryPool sharedMemory = createSharedMemoryPool(badTestOperandDataSize);
1167 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1168 RequestArgument sharedMemoryArg = {
1169 .location = {.poolIndex = 0, .offset = 0, .length = badTestOperandDataSize},
1170 .dimensions = badDimensions};
1171 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1172 RequestArgument deviceMemoryArgWithBadDimensions = {.location = {.poolIndex = 1},
1173 .dimensions = badDimensions};
1174
1175 initializeDeviceMemory(buffer);
1176 testExecution(preparedModel,
1177 {.inputs = {deviceMemoryArgWithBadDimensions},
1178 .outputs = {sharedMemoryArg},
1179 .pools = {sharedMemory, deviceMemory}},
1180 ErrorStatus::INVALID_ARGUMENT);
1181
1182 testExecution(preparedModel,
1183 {.inputs = {sharedMemoryArg},
1184 .outputs = {deviceMemoryArgWithBadDimensions},
1185 .pools = {sharedMemory, deviceMemory}},
1186 ErrorStatus::INVALID_ARGUMENT);
1187
1188 testExecution(preparedModel,
1189 {.inputs = {sharedMemoryArg},
1190 .outputs = {deviceMemoryArg},
1191 .pools = {sharedMemory, deviceMemory}},
1192 ErrorStatus::GENERAL_FAILURE);
1193 }
1194
1195 const auto kExecutorChoices = testing::Values(Executor::ASYNC, Executor::SYNC, Executor::FENCED);
1196
printMemoryDomainExecutionTest(const testing::TestParamInfo<MemoryDomainExecutionTestParam> & info)1197 std::string printMemoryDomainExecutionTest(
1198 const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
1199 const auto& [namedDevice, operandType, executor] = info.param;
1200 const std::string type = toString(static_cast<OperandType>(operandType));
1201 const std::string executorStr = toString(executor);
1202 return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + executorStr);
1203 }
1204
1205 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainExecutionTest);
1206 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainExecutionTest,
1207 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices,
1208 kExecutorChoices),
1209 printMemoryDomainExecutionTest);
1210
1211 } // namespace android::hardware::neuralnetworks::V1_3::vts::functional
1212