1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Callbacks.h"
18
19 #include "Conversions.h"
20 #include "PreparedModel.h"
21 #include "Utils.h"
22
23 #include <android/hardware/neuralnetworks/1.0/types.h>
24 #include <android/hardware/neuralnetworks/1.2/types.h>
25 #include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
26 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
27 #include <android/hardware/neuralnetworks/1.3/types.h>
28 #include <nnapi/IPreparedModel.h>
29 #include <nnapi/Result.h>
30 #include <nnapi/Types.h>
31 #include <nnapi/hal/1.0/Callbacks.h>
32 #include <nnapi/hal/1.0/Conversions.h>
33 #include <nnapi/hal/1.0/HandleError.h>
34 #include <nnapi/hal/1.0/PreparedModel.h>
35 #include <nnapi/hal/1.0/ProtectCallback.h>
36 #include <nnapi/hal/1.2/Callbacks.h>
37 #include <nnapi/hal/1.2/Conversions.h>
38 #include <nnapi/hal/1.2/PreparedModel.h>
39 #include <nnapi/hal/CommonUtils.h>
40 #include <nnapi/hal/TransferValue.h>
41
42 #include <utility>
43
44 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
45 // lifetimes across processes and for protecting asynchronous calls across HIDL.
46
47 namespace android::hardware::neuralnetworks::V1_3::utils {
48 namespace {
49
prepareModelCallback(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)50 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
51 V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
52 if (const auto dynamicPreparedModel =
53 V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
54 const auto currentVersionStatus = NN_TRY(convertFromNonCanonical(status));
55 return V1_3::utils::prepareModelCallback(currentVersionStatus, dynamicPreparedModel);
56 }
57 if (const auto dynamicPreparedModel =
58 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
59 return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
60 }
61 return V1_0::utils::prepareModelCallback(status, preparedModel);
62 }
63
64 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)65 convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
66 const V1_2::Timing& timing) {
67 return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
68 }
69
70 } // namespace
71
supportedOperationsCallback(ErrorStatus status,const hidl_vec<bool> & supportedOperations)72 nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
73 ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
74 HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status);
75 return supportedOperations;
76 }
77
prepareModelCallback(ErrorStatus status,const sp<IPreparedModel> & preparedModel)78 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
79 ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
80 HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
81 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
82 }
83
executionCallback(ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)84 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
85 ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
86 const V1_2::Timing& timing) {
87 if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
88 auto canonicalOutputShapes =
89 nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
90 return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
91 << "execution failed with " << toString(status);
92 }
93 HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
94 return convertExecutionGeneralResultsHelper(outputShapes, timing);
95 }
96
notify(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)97 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
98 const sp<V1_0::IPreparedModel>& preparedModel) {
99 mData.put(prepareModelCallback(status, preparedModel));
100 return Void();
101 }
102
notify_1_2(V1_0::ErrorStatus status,const sp<V1_2::IPreparedModel> & preparedModel)103 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
104 const sp<V1_2::IPreparedModel>& preparedModel) {
105 mData.put(prepareModelCallback(status, preparedModel));
106 return Void();
107 }
108
notify_1_3(ErrorStatus status,const sp<IPreparedModel> & preparedModel)109 Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
110 const sp<IPreparedModel>& preparedModel) {
111 mData.put(prepareModelCallback(status, preparedModel));
112 return Void();
113 }
114
notifyAsDeadObject()115 void PreparedModelCallback::notifyAsDeadObject() {
116 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
117 }
118
get()119 PreparedModelCallback::Data PreparedModelCallback::get() {
120 return mData.take();
121 }
122
123 // ExecutionCallback methods begin here
124
notify(V1_0::ErrorStatus status)125 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
126 mData.put(V1_0::utils::executionCallback(status));
127 return Void();
128 }
129
notify_1_2(V1_0::ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)130 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
131 const hidl_vec<V1_2::OutputShape>& outputShapes,
132 const V1_2::Timing& timing) {
133 mData.put(V1_2::utils::executionCallback(status, outputShapes, timing));
134 return Void();
135 }
136
notify_1_3(ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)137 Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
138 const hidl_vec<V1_2::OutputShape>& outputShapes,
139 const V1_2::Timing& timing) {
140 mData.put(executionCallback(status, outputShapes, timing));
141 return Void();
142 }
143
notifyAsDeadObject()144 void ExecutionCallback::notifyAsDeadObject() {
145 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
146 }
147
get()148 ExecutionCallback::Data ExecutionCallback::get() {
149 return mData.take();
150 }
151
152 } // namespace android::hardware::neuralnetworks::V1_3::utils
153