1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_aidl_hal_test"
18
19 #include "VtsHalNeuralnetworks.h"
20
21 #include <android-base/logging.h>
22 #include <android/binder_auto_utils.h>
23 #include <android/binder_interface_utils.h>
24 #include <android/binder_manager.h>
25 #include <android/binder_status.h>
26 #include <gtest/gtest.h>
27 #include <memory>
28 #include <string>
29 #include <utility>
30
31 #include <TestHarness.h>
32 #include <nnapi/hal/aidl/Conversions.h>
33
34 #include "Callbacks.h"
35 #include "GeneratedTestHarness.h"
36 #include "Utils.h"
37
38 #ifdef __ANDROID__
39 #include <aidl/Vintf.h>
40 #else // __ANDROID__
41 #include <CanonicalDevice.h>
42 #include <nnapi/hal/aidl/Adapter.h>
43 #endif // __ANDROID__
44
45 namespace aidl::android::hardware::neuralnetworks::vts::functional {
46
47 using implementation::PreparedModelCallback;
48
49 // internal helper function
createPreparedModel(const std::shared_ptr<IDevice> & device,const Model & model,std::shared_ptr<IPreparedModel> * preparedModel,bool reportSkipping,bool useConfig)50 void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
51 std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping,
52 bool useConfig) {
53 ASSERT_NE(nullptr, preparedModel);
54 *preparedModel = nullptr;
55
56 // see if service can handle model
57 std::vector<bool> supportedOperations;
58 const auto supportedCallStatus = device->getSupportedOperations(model, &supportedOperations);
59 ASSERT_TRUE(supportedCallStatus.isOk());
60 ASSERT_NE(0ul, supportedOperations.size());
61 const bool fullySupportsModel = std::all_of(
62 supportedOperations.begin(), supportedOperations.end(), [](bool v) { return v; });
63
64 // launch prepare model
65 const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
66 ndk::SharedRefBase::make<PreparedModelCallback>();
67 if (useConfig) {
68 const auto prepareLaunchStatus =
69 device->prepareModelWithConfig(model,
70 {ExecutionPreference::FAST_SINGLE_ANSWER,
71 kDefaultPriority,
72 kNoDeadline,
73 {},
74 {},
75 kEmptyCacheTokenArray,
76 {},
77 {}},
78 preparedModelCallback);
79 ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
80 } else {
81 const auto prepareLaunchStatus = device->prepareModel(
82 model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline, {},
83 {}, kEmptyCacheToken, preparedModelCallback);
84 ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
85 }
86 // retrieve prepared model
87 preparedModelCallback->wait();
88 const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
89 *preparedModel = preparedModelCallback->getPreparedModel();
90
91 // The getSupportedOperations call returns a list of operations that are guaranteed not to fail
92 // if prepareModel is called, and 'fullySupportsModel' is true i.f.f. the entire model is
93 // guaranteed. If a driver has any doubt that it can prepare an operation, it must return false.
94 // So here, if a driver isn't sure if it can support an operation, but reports that it
95 // successfully prepared the model, the test can continue.
96 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
97 ASSERT_EQ(nullptr, preparedModel->get());
98 if (!reportSkipping) {
99 return;
100 }
101 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
102 "model that it does not support.";
103 std::cout << "[ ] Early termination of test because vendor service cannot "
104 "prepare model that it does not support."
105 << std::endl;
106 GTEST_SKIP();
107 }
108
109 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
110 ASSERT_NE(nullptr, preparedModel->get());
111 }
112
SetUp()113 void NeuralNetworksAidlTest::SetUp() {
114 testing::TestWithParam<NeuralNetworksAidlTestParam>::SetUp();
115 ASSERT_NE(kDevice, nullptr);
116 const bool deviceIsResponsive =
117 ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
118 ASSERT_TRUE(deviceIsResponsive);
119 }
120
121 #ifdef __ANDROID__
makeNamedDevice(const std::string & name)122 static NamedDevice makeNamedDevice(const std::string& name) {
123 ndk::SpAIBinder binder(AServiceManager_waitForService(name.c_str()));
124 return {name, IDevice::fromBinder(binder)};
125 }
126
getNamedDevicesImpl()127 static std::vector<NamedDevice> getNamedDevicesImpl() {
128 // Retrieves the name of all service instances that implement IDevice,
129 // including any Lazy HAL instances.
130 const std::vector<std::string> names = ::android::getAidlHalInstanceNames(IDevice::descriptor);
131
132 // Get a handle to each device and pair it with its name.
133 std::vector<NamedDevice> namedDevices;
134 namedDevices.reserve(names.size());
135 std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
136 return namedDevices;
137 }
138 #else // __ANDROID__
getNamedDevicesImpl()139 static std::vector<NamedDevice> getNamedDevicesImpl() {
140 const std::string name = "nnapi-sample";
141 auto device = std::make_shared<const ::android::nn::sample::Device>(name);
142 auto aidlDevice = adapter::adapt(device);
143 return {{name, aidlDevice}};
144 }
145 #endif // __ANDROID__
146
getNamedDevices()147 const std::vector<NamedDevice>& getNamedDevices() {
148 const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
149 return devices;
150 }
151
printNeuralNetworksAidlTest(const testing::TestParamInfo<NeuralNetworksAidlTestParam> & info)152 std::string printNeuralNetworksAidlTest(
153 const testing::TestParamInfo<NeuralNetworksAidlTestParam>& info) {
154 return gtestCompliantName(getName(info.param));
155 }
156
157 INSTANTIATE_DEVICE_TEST(NeuralNetworksAidlTest);
158
159 // Forward declaration from ValidateModel.cpp
160 void validateModel(const std::shared_ptr<IDevice>& device, const Model& model);
161 // Forward declaration from ValidateRequest.cpp
162 void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request);
163 // Forward declaration from ValidateRequest.cpp
164 void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request);
165 // Forward declaration from ValidateRequest.cpp
166 void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
167 const Request& request);
168
validateEverything(const std::shared_ptr<IDevice> & device,const Model & model,const Request & request)169 void validateEverything(const std::shared_ptr<IDevice>& device, const Model& model,
170 const Request& request) {
171 validateModel(device, model);
172
173 // Create IPreparedModel.
174 std::shared_ptr<IPreparedModel> preparedModel;
175 createPreparedModel(device, model, &preparedModel);
176 if (preparedModel == nullptr) return;
177
178 validateRequest(preparedModel, request);
179 validateBurst(preparedModel, request);
180 // HIDL also had test that expected executeFenced to fail on received null fd (-1). This is not
181 // allowed in AIDL and will result in EX_TRANSACTION_FAILED.
182 }
183
validateFailure(const std::shared_ptr<IDevice> & device,const Model & model,const Request & request)184 void validateFailure(const std::shared_ptr<IDevice>& device, const Model& model,
185 const Request& request) {
186 // TODO: Should this always succeed?
187 // What if the invalid input is part of the model (i.e., a parameter).
188 validateModel(device, model);
189
190 // Create IPreparedModel.
191 std::shared_ptr<IPreparedModel> preparedModel;
192 createPreparedModel(device, model, &preparedModel);
193 if (preparedModel == nullptr) return;
194
195 validateRequestFailure(preparedModel, request);
196 }
197
TEST_P(ValidationTest,Test)198 TEST_P(ValidationTest, Test) {
199 const Model model = createModel(kTestModel);
200 ExecutionContext context;
201 const Request request = context.createRequest(kTestModel);
202 if (kTestModel.expectFailure) {
203 validateFailure(kDevice, model, request);
204 } else {
205 validateEverything(kDevice, model, request);
206 }
207 }
208
__anon0ec80da00202(const std::string& testName) 209 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
210 // Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
211 // generated tests.
212 return testName.find("inputs_as_internal") == std::string::npos &&
213 testName.find("all_tensors_as_inputs") == std::string::npos;
214 });
215
toString(Executor executor)216 std::string toString(Executor executor) {
217 switch (executor) {
218 case Executor::SYNC:
219 return "SYNC";
220 case Executor::BURST:
221 return "BURST";
222 case Executor::FENCED:
223 return "FENCED";
224 default:
225 CHECK(false);
226 }
227 }
228
229 } // namespace aidl::android::hardware::neuralnetworks::vts::functional
230