1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19 #include "VtsHalNeuralnetworks.h"
20
21 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
22
23 using implementation::PreparedModelCallback;
24 using V1_0::DeviceStatus;
25 using V1_0::PerformanceInfo;
26 using V1_1::ExecutionPreference;
27 using V1_2::Constant;
28 using V1_2::DeviceType;
29 using V1_2::Extension;
30 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
31
32 // create device test
TEST_P(NeuralnetworksHidlTest,CreateDevice)33 TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
34
35 // status test
TEST_P(NeuralnetworksHidlTest,StatusTest)36 TEST_P(NeuralnetworksHidlTest, StatusTest) {
37 Return<DeviceStatus> status = kDevice->getStatus();
38 ASSERT_TRUE(status.isOk());
39 EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
40 }
41
42 // initialization
TEST_P(NeuralnetworksHidlTest,GetCapabilitiesTest)43 TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
44 using OperandPerformance = Capabilities::OperandPerformance;
45 Return<void> ret = kDevice->getCapabilities_1_3([](ErrorStatus status,
46 const Capabilities& capabilities) {
47 EXPECT_EQ(ErrorStatus::NONE, status);
48
49 auto isPositive = [](const PerformanceInfo& perf) {
50 return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
51 };
52
53 EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
54 EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
55 const auto& opPerf = capabilities.operandPerformance;
56 EXPECT_TRUE(std::all_of(
57 opPerf.begin(), opPerf.end(),
58 [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
59 EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
60 [](const OperandPerformance& a, const OperandPerformance& b) {
61 return a.type < b.type;
62 }));
63 EXPECT_TRUE(std::all_of(opPerf.begin(), opPerf.end(), [](const OperandPerformance& a) {
64 return a.type != OperandType::SUBGRAPH;
65 }));
66 EXPECT_TRUE(isPositive(capabilities.ifPerformance));
67 EXPECT_TRUE(isPositive(capabilities.whilePerformance));
68 });
69 EXPECT_TRUE(ret.isOk());
70 }
71
72 // detect cycle
TEST_P(NeuralnetworksHidlTest,CycleTest)73 TEST_P(NeuralnetworksHidlTest, CycleTest) {
74 // opnd0 = TENSOR_FLOAT32 // model input
75 // opnd1 = TENSOR_FLOAT32 // model input
76 // opnd2 = INT32 // model input
77 // opnd3 = ADD(opnd0, opnd4, opnd2)
78 // opnd4 = ADD(opnd1, opnd3, opnd2)
79 // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
80 //
81 // +-----+
82 // | |
83 // v |
84 // 3 = ADD(0, 4, 2) |
85 // | |
86 // +----------+ |
87 // | |
88 // v |
89 // 4 = ADD(1, 3, 2) |
90 // | |
91 // +----------------+
92 // |
93 // |
94 // +-------+
95 // |
96 // v
97 // 5 = ADD(4, 0, 2)
98
99 const std::vector<Operand> operands = {
100 {
101 // operands[0]
102 .type = OperandType::TENSOR_FLOAT32,
103 .dimensions = {1},
104 .numberOfConsumers = 2,
105 .scale = 0.0f,
106 .zeroPoint = 0,
107 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
108 .location = {.poolIndex = 0, .offset = 0, .length = 0},
109 },
110 {
111 // operands[1]
112 .type = OperandType::TENSOR_FLOAT32,
113 .dimensions = {1},
114 .numberOfConsumers = 1,
115 .scale = 0.0f,
116 .zeroPoint = 0,
117 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
118 .location = {.poolIndex = 0, .offset = 0, .length = 0},
119 },
120 {
121 // operands[2]
122 .type = OperandType::INT32,
123 .dimensions = {},
124 .numberOfConsumers = 3,
125 .scale = 0.0f,
126 .zeroPoint = 0,
127 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
128 .location = {.poolIndex = 0, .offset = 0, .length = 0},
129 },
130 {
131 // operands[3]
132 .type = OperandType::TENSOR_FLOAT32,
133 .dimensions = {1},
134 .numberOfConsumers = 1,
135 .scale = 0.0f,
136 .zeroPoint = 0,
137 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
138 .location = {.poolIndex = 0, .offset = 0, .length = 0},
139 },
140 {
141 // operands[4]
142 .type = OperandType::TENSOR_FLOAT32,
143 .dimensions = {1},
144 .numberOfConsumers = 2,
145 .scale = 0.0f,
146 .zeroPoint = 0,
147 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
148 .location = {.poolIndex = 0, .offset = 0, .length = 0},
149 },
150 {
151 // operands[5]
152 .type = OperandType::TENSOR_FLOAT32,
153 .dimensions = {1},
154 .numberOfConsumers = 0,
155 .scale = 0.0f,
156 .zeroPoint = 0,
157 .lifetime = OperandLifeTime::SUBGRAPH_OUTPUT,
158 .location = {.poolIndex = 0, .offset = 0, .length = 0},
159 },
160 };
161
162 const std::vector<Operation> operations = {
163 {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
164 {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
165 {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
166 };
167
168 Subgraph subgraph = {
169 .operands = operands,
170 .operations = operations,
171 .inputIndexes = {0, 1, 2},
172 .outputIndexes = {5},
173 };
174 const Model model = {
175 .main = std::move(subgraph),
176 .referenced = {},
177 .operandValues = {},
178 .pools = {},
179 };
180
181 // ensure that getSupportedOperations_1_2() checks model validity
182 ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
183 Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_3(
184 model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
185 const hidl_vec<bool>& supported) {
186 supportedOpsErrorStatus = status;
187 if (status == ErrorStatus::NONE) {
188 ASSERT_EQ(supported.size(), model.main.operations.size());
189 }
190 });
191 ASSERT_TRUE(supportedOpsReturn.isOk());
192 ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
193
194 // ensure that prepareModel_1_3() checks model validity
195 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
196 Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_3(
197 model, ExecutionPreference::FAST_SINGLE_ANSWER, Priority::MEDIUM, {},
198 hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
199 ASSERT_TRUE(prepareLaunchReturn.isOk());
200 // Note that preparation can fail for reasons other than an
201 // invalid model (invalid model should result in
202 // INVALID_ARGUMENT) -- for example, perhaps not all
203 // operations are supported, or perhaps the device hit some
204 // kind of capacity limit.
205 EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
206 EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
207 EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
208 }
209
210 } // namespace android::hardware::neuralnetworks::V1_3::vts::functional
211