1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <android-base/unique_fd.h>
20 #include <ftw.h>
21 #include <gtest/gtest.h>
22 #include <libgen.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25
26 #include <algorithm>
27 #include <cassert>
28 #include <cmath>
29 #include <fstream>
30 #include <iostream>
31 #include <map>
32 #include <memory>
33 #include <random>
34 #include <set>
35 #include <string>
36 #include <thread>
37 #include <utility>
38 #include <vector>
39
40 #include "GeneratedTestUtils.h"
41 #include "SupportLibraryTestUtils.h"
42 #include "SupportLibraryWrapper.h"
43 #include "TmpDirectoryUtils.h"
44
45 // Systrace is not available from CTS tests due to platform layering
46 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
47 // the case for CTS (public APIs only).
48 #ifndef NNTEST_ONLY_PUBLIC_API
49 #include <Tracing.h>
50 #else
51 #define NNTRACE_FULL_RAW(...)
52 #define NNTRACE_APP(...)
53 #define NNTRACE_APP_SWITCH(...)
54 #endif
55
56 const char* kQCDspLoadPathEnv = "ADSP_LIBRARY_PATH";
57
58 extern std::string SUPPORT_LIBRARY_NAME;
59
60 namespace android::nn::generated_tests {
61 using namespace sl_wrapper;
62 using namespace test_helper;
63
64 enum ComputeWithDeviceMemoriesResult {
65 SKIP,
66 OK,
67 };
68
69 class GeneratedTests : public GeneratedTestBase {
70 protected:
71 void SetUp() override;
72 void TearDown() override;
73
74 bool shouldSkipTest();
75
76 ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation,
77 uint32_t index);
78 ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
79 uint32_t index);
80 ComputeWithDeviceMemoriesResult computeWithDeviceMemories(
81 const Compilation& compilation, const TestModel& testModel, Execution* execution,
82 Execution::ComputeMode computeMode, Result* result, std::vector<TestBuffer>* outputs);
83 bool checkSupported(const Model& model, ANeuralNetworksDevice* device);
84 std::optional<Compilation> compileModel(const Model& model, ANeuralNetworksDevice* device);
85 void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
86 void executeOnce(const Model& model, const TestModel& testModel);
87 void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel);
88 void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel);
89 // Test driver for those generated from ml/nn/runtime/test/spec
90 void execute(const TestModel& testModel);
91
92 // VNDK version of the device under test.
93 static int mVndkVersion;
94
95 std::string mCacheDir;
96 std::vector<uint8_t> mToken;
97 bool mTestCompilationCaching = false;
98 bool mTestDynamicOutputShape = false;
99 bool mExpectFailure = false;
100 bool mTestQuantizationCoupling = false;
101 bool mTestDeviceMemory = false;
102 Execution::ComputeMode mComputeMode = Execution::getComputeMode();
103
104 std::unique_ptr<const NnApiSupportLibrary> mNnApi;
105 };
106
107 int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__;
108
109 // Tag for the dynamic output shape tests
110 class DynamicOutputShapeTest : public GeneratedTests {
111 protected:
DynamicOutputShapeTest()112 DynamicOutputShapeTest() { mTestDynamicOutputShape = true; }
113 };
114
115 // Tag for the fenced execute tests
116 class FencedComputeTest : public GeneratedTests {};
117
118 // Tag for the generated validation tests
119 class GeneratedValidationTests : public GeneratedTests {
120 protected:
GeneratedValidationTests()121 GeneratedValidationTests() { mExpectFailure = true; }
122 };
123
124 class QuantizationCouplingTest : public GeneratedTests {
125 protected:
QuantizationCouplingTest()126 QuantizationCouplingTest() { mTestQuantizationCoupling = true; }
127 };
128
129 class DeviceMemoryTest : public GeneratedTests {
130 protected:
DeviceMemoryTest()131 DeviceMemoryTest() { mTestDeviceMemory = true; }
132 };
133
checkSupported(const Model & model,ANeuralNetworksDevice * device)134 bool GeneratedTests::checkSupported(const Model& model, ANeuralNetworksDevice* device) {
135 constexpr static int MAX_NUM_OPS = 256;
136 std::array<bool, MAX_NUM_OPS> supportedOps;
137 for (int i = 0; i < MAX_NUM_OPS; ++i) {
138 supportedOps[i] = true;
139 }
140 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksModel_getSupportedOperationsForDevices(
141 model.getHandle(), &device, /*numDevices=*/1, supportedOps.data()),
142 ANEURALNETWORKS_NO_ERROR);
143 const bool fullySupportedModel =
144 std::all_of(supportedOps.begin(), supportedOps.end(), [](bool v) { return v; });
145 return fullySupportedModel;
146 }
147
createCacheFds(const std::vector<std::string> & files)148 static std::vector<base::unique_fd> createCacheFds(const std::vector<std::string>& files) {
149 std::vector<base::unique_fd> fds;
150 fds.reserve(files.size());
151 for (const auto& file : files) {
152 auto fd = base::unique_fd(open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR));
153 if (fd.get() == -1) {
154 [] { FAIL(); }();
155 return {};
156 }
157 fds.push_back(std::move(fd));
158 }
159 return fds;
160 }
161
compileModel(const Model & model,ANeuralNetworksDevice * device)162 std::optional<Compilation> GeneratedTests::compileModel(const Model& model,
163 ANeuralNetworksDevice* device) {
164 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
165
166 if (mTestCompilationCaching) {
167 // Compile the model twice with the same token, so that compilation caching will be
168 // exercised if supported by the driver.
169 // No invalid model will be passed to this branch.
170 EXPECT_FALSE(mExpectFailure);
171
172 std::string mode = ::android::base::GetProperty("debug.nn.slts.caching", "random");
173 bool useSetCachingFromFds;
174 if (mode == "path") {
175 useSetCachingFromFds = false;
176 } else if (mode == "fds") {
177 useSetCachingFromFds = true;
178 } else if (mode == "random") {
179 std::string testName = ::testing::UnitTest::GetInstance()->current_test_info()->name();
180 std::seed_seq seq(testName.begin(), testName.end());
181 std::mt19937 gen(seq);
182 std::bernoulli_distribution d(0.5);
183 useSetCachingFromFds = d(gen);
184 } else {
185 [&mode] {
186 FAIL() << "System property debug.nn.slts.caching should be one of \"path\", "
187 "\"fds\", or \"random\"; got \""
188 << mode << "\"";
189 }();
190 return {};
191 }
192 SCOPED_TRACE("Use setCachingFromFds = " + std::to_string(useSetCachingFromFds) + " (" +
193 mode + ")");
194 std::cout << "\nUse setCachingFromFds = " << std::boolalpha << useSetCachingFromFds << " ("
195 << mode << ")" << std::endl;
196
197 std::vector<std::string> modelCacheFilenames, dataCacheFilenames;
198 if (useSetCachingFromFds) {
199 uint32_t numModelCacheFiles, numDataCacheFiles;
200 EXPECT_EQ(mNnApi->getFL5()->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(
201 device, &numModelCacheFiles, &numDataCacheFiles),
202 ANEURALNETWORKS_NO_ERROR);
203 for (uint32_t i = 0; i < numModelCacheFiles; i++) {
204 modelCacheFilenames.push_back({mCacheDir + "/model" + std::to_string(i)});
205 }
206 for (uint32_t i = 0; i < numDataCacheFiles; i++) {
207 dataCacheFilenames.push_back({mCacheDir + "/data" + std::to_string(i)});
208 }
209 }
210
211 auto resultCompilation1 = Compilation::createForDevice(mNnApi.get(), &model, device);
212 EXPECT_EQ(resultCompilation1.first, Result::NO_ERROR);
213 auto compilation1 = std::move(resultCompilation1.second);
214 if (useSetCachingFromFds) {
215 auto modelCacheFds = createCacheFds(modelCacheFilenames);
216 auto dataCacheFds = createCacheFds(dataCacheFilenames);
217 EXPECT_EQ(compilation1.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
218 Result::NO_ERROR);
219 } else {
220 EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
221 }
222 EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
223
224 auto resultCompilation2 = Compilation::createForDevice(mNnApi.get(), &model, device);
225 EXPECT_EQ(resultCompilation2.first, Result::NO_ERROR);
226 auto compilation2 = std::move(resultCompilation2.second);
227 if (useSetCachingFromFds) {
228 auto modelCacheFds = createCacheFds(modelCacheFilenames);
229 auto dataCacheFds = createCacheFds(dataCacheFilenames);
230 EXPECT_EQ(compilation2.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
231 Result::NO_ERROR);
232 } else {
233 EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
234 }
235 EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
236
237 return compilation2;
238 } else {
239 auto resultCompilation = Compilation::createForDevice(mNnApi.get(), &model, device);
240 EXPECT_EQ(resultCompilation.first, Result::NO_ERROR);
241 auto compilation = std::move(resultCompilation.second);
242 Result result = compilation.finish();
243
244 // For valid model, we check the compilation result == NO_ERROR.
245 // For invalid model, the driver may fail at compilation or execution, so any result code is
246 // permitted at this point.
247 if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
248 EXPECT_EQ(result, Result::NO_ERROR);
249 return compilation;
250 }
251 }
252
computeWithPtrs(const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)253 void computeWithPtrs(const TestModel& testModel, Execution* execution,
254 Execution::ComputeMode computeMode, Result* result,
255 std::vector<TestBuffer>* outputs) {
256 {
257 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithPtrs example");
258 createRequest(testModel, execution, outputs);
259 }
260 *result = execution->compute(computeMode);
261 }
262
createDeviceMemoryForInput(const Compilation & compilation,uint32_t index)263 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForInput(const Compilation& compilation,
264 uint32_t index) {
265 ANeuralNetworksMemoryDesc* desc = nullptr;
266 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
267 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_addInputRole(
268 desc, compilation.getHandle(), index, 1.0f),
269 ANEURALNETWORKS_NO_ERROR);
270 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
271 ANeuralNetworksMemory* memory = nullptr;
272 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory);
273 mNnApi->getFL5()->ANeuralNetworksMemoryDesc_free(desc);
274 return memory;
275 }
276
createDeviceMemoryForOutput(const Compilation & compilation,uint32_t index)277 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForOutput(const Compilation& compilation,
278 uint32_t index) {
279 ANeuralNetworksMemoryDesc* desc = nullptr;
280 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
281 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_addOutputRole(
282 desc, compilation.getHandle(), index, 1.0f),
283 ANEURALNETWORKS_NO_ERROR);
284 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
285 ANeuralNetworksMemory* memory = nullptr;
286 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory);
287 mNnApi->getFL5()->ANeuralNetworksMemoryDesc_free(desc);
288 return memory;
289 }
290
291 // Set result = Result::NO_ERROR and outputs = {} if the test should be skipped.
computeWithDeviceMemories(const Compilation & compilation,const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)292 ComputeWithDeviceMemoriesResult GeneratedTests::computeWithDeviceMemories(
293 const Compilation& compilation, const TestModel& testModel, Execution* execution,
294 Execution::ComputeMode computeMode, Result* result, std::vector<TestBuffer>* outputs) {
295 EXPECT_NE(execution, nullptr);
296 EXPECT_NE(result, nullptr);
297 EXPECT_NE(outputs, nullptr);
298 outputs->clear();
299 std::vector<Memory> inputMemories, outputMemories;
300
301 {
302 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example");
303 // Model inputs.
304 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
305 SCOPED_TRACE("Input index: " + std::to_string(i));
306 const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
307 // Omitted input.
308 if (operand.data.size() == 0) {
309 EXPECT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
310 continue;
311 }
312
313 // Create device memory.
314 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
315 if (memory == nullptr) {
316 return ComputeWithDeviceMemoriesResult::SKIP;
317 }
318 auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory));
319
320 // Copy data from TestBuffer to device memory.
321 auto ashmem = TestAshmem::createFrom(mNnApi.get(), operand.data);
322 EXPECT_NE(ashmem, nullptr);
323 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
324 ANEURALNETWORKS_NO_ERROR);
325 EXPECT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
326 }
327
328 // Model outputs.
329 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
330 SCOPED_TRACE("Output index: " + std::to_string(i));
331 ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
332 if (memory == nullptr) {
333 return ComputeWithDeviceMemoriesResult::SKIP;
334 }
335 auto& wrapperMemory = outputMemories.emplace_back(Memory(mNnApi.get(), memory));
336 EXPECT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
337 }
338 }
339
340 *result = execution->compute(computeMode);
341
342 // Copy out output results.
343 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
344 SCOPED_TRACE("Output index: " + std::to_string(i));
345 const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
346 const size_t bufferSize = operand.data.size();
347 auto& output = outputs->emplace_back(bufferSize);
348
349 auto ashmem = TestAshmem::createFrom(mNnApi.get(), output);
350 EXPECT_NE(ashmem, nullptr);
351 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(outputMemories[i].get(),
352 ashmem->get()->get()),
353 ANEURALNETWORKS_NO_ERROR);
354 std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
355 output.getMutable<uint8_t>());
356 }
357 return ComputeWithDeviceMemoriesResult::OK;
358 }
359
executeWithCompilation(const Compilation & compilation,const TestModel & testModel)360 void GeneratedTests::executeWithCompilation(const Compilation& compilation,
361 const TestModel& testModel) {
362 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
363
364 Execution execution(mNnApi.get(), &compilation);
365 Result result;
366 std::vector<TestBuffer> outputs;
367
368 if (mTestDeviceMemory) {
369 if (computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result,
370 &outputs) == ComputeWithDeviceMemoriesResult::SKIP) {
371 std::cout << "\nModel not supported by device memories. Skipping" << std::endl;
372 return;
373 }
374 } else {
375 computeWithPtrs(testModel, &execution, mComputeMode, &result, &outputs);
376 }
377
378 if (result == Result::NO_ERROR && outputs.empty()) {
379 return;
380 }
381
382 {
383 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
384 if (mExpectFailure) {
385 ASSERT_NE(result, Result::NO_ERROR);
386 return;
387 } else {
388 ASSERT_EQ(result, Result::NO_ERROR);
389 }
390
391 // Check output dimensions.
392 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
393 SCOPED_TRACE("Output index: " + std::to_string(i));
394 const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
395 if (output.isIgnored) continue;
396 std::vector<uint32_t> actualDimensions;
397 ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions));
398 ASSERT_EQ(output.dimensions, actualDimensions);
399 }
400
401 checkResults(testModel, outputs);
402 }
403 }
404
executeOnce(const Model & model,const TestModel & testModel)405 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
406 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
407 uint32_t numDevices = 0;
408 mNnApi->getFL5()->ANeuralNetworks_getDeviceCount(&numDevices);
409 bool modelSupported = false;
410 for (uint32_t i = 0; i < numDevices; ++i) {
411 ANeuralNetworksDevice* device = nullptr;
412 mNnApi->getFL5()->ANeuralNetworks_getDevice(i, &device);
413 const char* deviceName = nullptr;
414 mNnApi->getFL5()->ANeuralNetworksDevice_getName(device, &deviceName);
415 SCOPED_TRACE("Device = " + std::string(deviceName));
416 std::cout << "\nDevice = " << deviceName << std::endl;
417 if (!checkSupported(model, device)) {
418 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
419 << std::endl;
420 continue;
421 }
422 modelSupported = true;
423 std::cout << "\nModel supported" << std::endl;
424 std::optional<Compilation> compilation = compileModel(model, device);
425 // Early return if compilation fails. The compilation result code is
426 // checked in compileModel.
427 if (!compilation) return;
428 executeWithCompilation(compilation.value(), testModel);
429 std::cout << "\nExecution completed" << std::endl;
430 }
431 if (!modelSupported) {
432 std::cout << "\nModel not supported by any device\n"
433 << "SKIPPED" << std::endl;
434 }
435 }
436
executeMultithreadedOwnCompilation(const Model & model,const TestModel & testModel)437 void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model,
438 const TestModel& testModel) {
439 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
440 SCOPED_TRACE("MultithreadedOwnCompilation");
441 std::cout << "\nMultithreadedOwnCompilation" << std::endl;
442 std::vector<std::thread> threads;
443 for (int i = 0; i < 10; i++) {
444 threads.push_back(std::thread([&]() { executeOnce(model, testModel); }));
445 }
446 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
447 }
448
executeMultithreadedSharedCompilation(const Model & model,const TestModel & testModel)449 void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
450 const TestModel& testModel) {
451 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
452 SCOPED_TRACE("MultithreadedSharedCompilation");
453 std::cout << "\nMultithreadedSharedCompilation" << std::endl;
454 uint32_t numDevices = 0;
455 mNnApi->getFL5()->ANeuralNetworks_getDeviceCount(&numDevices);
456 bool modelSupported = false;
457 for (uint32_t i = 0; i < numDevices; ++i) {
458 ANeuralNetworksDevice* device = nullptr;
459 mNnApi->getFL5()->ANeuralNetworks_getDevice(i, &device);
460 const char* deviceName = nullptr;
461 mNnApi->getFL5()->ANeuralNetworksDevice_getName(device, &deviceName);
462 SCOPED_TRACE("Device = " + std::string(deviceName));
463 std::cout << "\nDevice = " << deviceName << std::endl;
464 if (!checkSupported(model, device)) {
465 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
466 << std::endl;
467 continue;
468 }
469 modelSupported = true;
470 std::cout << "\nModel supported" << std::endl;
471 std::optional<Compilation> compilation = compileModel(model, device);
472 // Early return if compilation fails. The ompilation result code is
473 // checked in compileModel.
474 if (!compilation) return;
475 std::vector<std::thread> threads;
476 for (int i = 0; i < 10; i++) {
477 threads.push_back(
478 std::thread([&]() { executeWithCompilation(compilation.value(), testModel); }));
479 }
480 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
481 std::cout << "\nExecution completed" << std::endl;
482 }
483 if (!modelSupported) {
484 std::cout << "\nModel not supported by any device\n"
485 << "SKIPPED" << std::endl;
486 }
487 }
488
489 // Test driver for those generated from ml/nn/runtime/test/spec
execute(const TestModel & testModel)490 void GeneratedTests::execute(const TestModel& testModel) {
491 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
492 GeneratedModel model(mNnApi.get());
493 createModel(mNnApi.get(), testModel, mTestDynamicOutputShape, &model);
494 if (testModel.expectFailure && !model.isValid()) {
495 return;
496 }
497 ASSERT_EQ(model.finish(), Result::NO_ERROR);
498 ASSERT_TRUE(model.isValid());
499 auto executeInternal = [&testModel, &model, this]() {
500 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
501 std::cout << "\nCompilationCaching = " << std::boolalpha << mTestCompilationCaching
502 << std::endl;
503 #ifndef NNTEST_MULTITHREADED
504 executeOnce(model, testModel);
505 #else // defined(NNTEST_MULTITHREADED)
506 executeMultithreadedOwnCompilation(model, testModel);
507 executeMultithreadedSharedCompilation(model, testModel);
508 #endif // !defined(NNTEST_MULTITHREADED)
509 };
510 mTestCompilationCaching = false;
511 executeInternal();
512 if (!mExpectFailure) {
513 mTestCompilationCaching = true;
514 executeInternal();
515 }
516 }
517
shouldSkipTest()518 bool GeneratedTests::shouldSkipTest() {
519 // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}.
520 // The listed tests are added in a later release, but exercising old APIs. They should be
521 // skipped if the device has a mixed build of system and vendor partitions.
522 static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = {
523 {
524 __ANDROID_API_R__,
525 {
526 "add_broadcast_quant8_all_inputs_as_internal",
527 },
528 },
529 };
530 for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) {
531 if (mVndkVersion < minVersion && names.count(kTestName) > 0) {
532 return true;
533 }
534 }
535 return false;
536 }
537
SetUp()538 void GeneratedTests::SetUp() {
539 const char* libdir = dirname(SUPPORT_LIBRARY_NAME.c_str());
540 setenv(kQCDspLoadPathEnv, libdir, 1);
541 LOG(INFO) << "Overwritten system env variable " << kQCDspLoadPathEnv << " with " << libdir;
542 mNnApi = loadNnApiSupportLibrary(SUPPORT_LIBRARY_NAME);
543
544 GeneratedTestBase::SetUp();
545
546 mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
547 if (shouldSkipTest()) {
548 GTEST_SKIP();
549 return;
550 }
551
552 char cacheDirTemp[] = NN_TMP_DIR "/TestCompilationCachingXXXXXX";
553 char* cacheDir = mkdtemp(cacheDirTemp);
554 ASSERT_NE(cacheDir, nullptr);
555 mCacheDir = cacheDir;
556 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
557 }
558
TearDown()559 void GeneratedTests::TearDown() {
560 mNnApi.reset(nullptr);
561
562 if (!::testing::Test::HasFailure()) {
563 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
564 // Remove the cache directory specified by path recursively.
565 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
566 return remove(child);
567 };
568 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
569 }
570 GeneratedTestBase::TearDown();
571 }
572
573 #ifdef NNTEST_COMPUTE_MODE
TEST_P(GeneratedTests,Sync)574 TEST_P(GeneratedTests, Sync) {
575 std::cout << "\nComputeMode = SYNC" << std::endl;
576 mComputeMode = Execution::ComputeMode::SYNC;
577 execute(testModel);
578 }
579
TEST_P(GeneratedTests,Burst)580 TEST_P(GeneratedTests, Burst) {
581 std::cout << "\nComputeMode = BURST" << std::endl;
582 mComputeMode = Execution::ComputeMode::BURST;
583 execute(testModel);
584 }
585 #else
TEST_P(GeneratedTests,Test)586 TEST_P(GeneratedTests, Test) {
587 execute(testModel);
588 }
589 #endif
590
TEST_P(DynamicOutputShapeTest,Test)591 TEST_P(DynamicOutputShapeTest, Test) {
592 execute(testModel);
593 }
594
TEST_P(GeneratedValidationTests,Test)595 TEST_P(GeneratedValidationTests, Test) {
596 execute(testModel);
597 }
598
TEST_P(QuantizationCouplingTest,Test)599 TEST_P(QuantizationCouplingTest, Test) {
600 execute(convertQuant8AsymmOperandsToSigned(testModel));
601 }
602
TEST_P(DeviceMemoryTest,Test)603 TEST_P(DeviceMemoryTest, Test) {
604 execute(testModel);
605 }
606
TEST_P(FencedComputeTest,Test)607 TEST_P(FencedComputeTest, Test) {
608 mComputeMode = Execution::ComputeMode::FENCED;
609 execute(testModel);
610 }
611
612 INSTANTIATE_GENERATED_TEST(GeneratedTests,
__anon9bdf8d9c0a02(const TestModel& testModel) 613 [](const TestModel& testModel) { return !testModel.expectFailure; });
614
__anon9bdf8d9c0b02(const TestModel& testModel) 615 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
616 return !testModel.expectFailure && !testModel.hasScalarOutputs();
617 });
618
__anon9bdf8d9c0c02(const TestModel& testModel) 619 INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) {
620 return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest();
621 });
622
__anon9bdf8d9c0d02(const TestModel& testModel) 623 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
624 return !testModel.expectFailure && testModel.main.operations.size() == 1 &&
625 testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands();
626 });
627
__anon9bdf8d9c0e02(const TestModel& testModel) 628 INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
629 return !testModel.expectFailure &&
630 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
631 [&testModel](uint32_t index) {
632 return testModel.main.operands[index].data.size() > 0;
633 });
634 });
635
__anon9bdf8d9c1002(const TestModel& testModel) 636 INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
637 return !testModel.expectFailure &&
638 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
639 [&testModel](uint32_t index) {
640 return testModel.main.operands[index].data.size() > 0;
641 });
642 });
643
644 } // namespace android::nn::generated_tests
645