1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.neuralnetworks@1.2;
18
19import @1.0::ErrorStatus;
20import @1.1::ExecutionPreference;
21import @1.1::IDevice;
22import IPreparedModelCallback;
23
24/**
25 * This interface represents a device driver.
26 */
27interface IDevice extends @1.1::IDevice {
28    /**
29     * Get the version string of the driver implementation.
30     *
31     * The version string must be a unique token among the set of version strings of
32     * drivers of a specific device. The token identifies the device driver's
33     * implementation. The token must not be confused with the feature level which is solely
34     * defined by the interface version. This API is opaque to the Android framework, but the
35     * Android framework may use the information for debugging or to pass on to NNAPI applications.
36     *
37     * Application developers sometimes have specific requirements to ensure good user experiences,
38     * and they need more information to make intelligent decisions when the Android framework cannot.
39     * For example, combined with the device name and other information, the token can help
40     * NNAPI applications filter devices based on their needs:
41     *     - An application demands a certain level of performance, but a specific version of
42     *       the driver cannot meet that requirement because of a performance regression.
43     *       The application can disallow the driver based on the version provided.
44     *     - An application has a minimum precision requirement, but certain versions of
45     *       the driver cannot meet that requirement because of bugs or certain optimizations.
46     *       The application can filter out versions of these drivers.
47     *
48     * @return status Error status returned from querying the version string. Must be:
49     *     - NONE if the query was successful
50     *     - DEVICE_UNAVAILABLE if driver is offline or busy
51     *     - GENERAL_FAILURE if the query resulted in an
52     *       unspecified error
53     * @return version The version string of the device implementation.
54     *     Must have nonzero length
55     */
56    getVersionString() generates (ErrorStatus status, string version);
57
58    /**
59     * Get the type of a given device.
60     *
61     * The device type can be used to help application developers to distribute
62     * Machine Learning workloads and other workloads such as graphical rendering.
63     * E.g., for an app which renders AR scenes based on real time object detection
64     * results, the developer could choose an ACCELERATOR type device for ML
65     * workloads, and reserve GPU for graphical rendering.
66     *
67     * @return status Error status returned from querying the device type. Must be:
68     *                - NONE if the query was successful
69     *                - DEVICE_UNAVAILABLE if driver is offline or busy
70     *                - GENERAL_FAILURE if the query resulted in an
71     *                  unspecified error
72     * @return type The DeviceType of the device. Please note, this is not a
73     *              bitfield of DeviceTypes. Each device must only be of a
74     *              single DeviceType.
75     */
76    getType() generates (ErrorStatus status, DeviceType type);
77
78    /**
79     * Gets the capabilities of a driver.
80     *
81     * @return status Error status of the call, must be:
82     *                - NONE if successful
83     *                - DEVICE_UNAVAILABLE if driver is offline or busy
84     *                - GENERAL_FAILURE if there is an unspecified error
85     * @return capabilities Capabilities of the driver.
86     */
87    getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
88
89    /**
90     * Gets information about extensions supported by the driver implementation.
91     *
92     * All extension operations and operands must be fully supported for the
93     * extension to appear in the list of supported extensions.
94     *
95     * @return status Error status of the call, must be:
96     *     - NONE if successful
97     *     - DEVICE_UNAVAILABLE if driver is offline or busy
98     *     - GENERAL_FAILURE if there is an unspecified error
99     * @return extensions A list of supported extensions.
100     */
101    getSupportedExtensions()
102        generates (ErrorStatus status, vec<Extension> extensions);
103
104    /**
105     * Gets the supported operations in a model.
106     *
107     * getSupportedOperations indicates which operations of a model are fully
108     * supported by the vendor driver. If an operation may not be supported for
109     * any reason, getSupportedOperations must return false for that operation.
110     *
111     * @param model A model whose operations--and their corresponding operands--
112     *     are to be verified by the driver.
113     * @return status Error status of the call, must be:
114     *     - NONE if successful
115     *     - DEVICE_UNAVAILABLE if driver is offline or busy
116     *     - GENERAL_FAILURE if there is an unspecified error
117     *     - INVALID_ARGUMENT if provided model is invalid
118     * @return supportedOperations A list of supported operations, where true
119     *     indicates the operation is supported and false indicates the
120     *     operation is not supported. The index of "supported" corresponds with
121     *     the index of the operation it is describing.
122     */
123    getSupportedOperations_1_2(Model model)
124            generates (ErrorStatus status, vec<bool> supportedOperations);
125
126    /**
127     * Gets the caching requirements of the driver implementation.
128     *
129     * There are two types of cache file descriptors provided to the driver: model cache
130     * and data cache.
131     *
132     * The data cache is for caching constant data, possibly including preprocessed
133     * and transformed tensor buffers. Any modification to the data cache should
134     * have no worse effect than generating bad output values at execution time.
135     *
136     * The model cache is for caching security-sensitive data such as compiled
137     * executable machine code in the device's native binary format. A modification
138     * to the model cache may affect the driver's execution behavior, and a malicious
139     * client could make use of this to execute beyond the granted permission. Thus,
140     * the driver must always check whether the model cache is corrupted before
141     * preparing the model from cache.
142     *
143     * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver
144     * implementation needs to cache a single prepared model. Returning 0 for both types
145     * indicates compilation caching is not supported by this driver. The driver may
146     * still choose not to cache certain compiled models even if it reports that caching
147     * is supported.
148     *
149     * If the device reports that caching is not supported, the user may avoid calling
150     * IDevice::prepareModelFromCache or providing cache file descriptors to
151     * IDevice::prepareModel_1_2.
152     *
153     * @return status Error status of the call, must be:
154     *     - NONE if successful
155     *     - DEVICE_UNAVAILABLE if driver is offline or busy
156     *     - GENERAL_FAILURE if there is an unspecified error
157     * @return numModelCache An unsigned integer indicating how many files for model cache
158     *                       the driver needs to cache a single prepared model. It must
159     *                       be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
160     * @return numDataCache An unsigned integer indicating how many files for data cache
161     *                      the driver needs to cache a single prepared model. It must
162     *                      be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
163     */
164    getNumberOfCacheFilesNeeded()
165            generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache);
166
167    /**
168     * Asynchronously creates a prepared model for execution and optionally saves it
169     * into cache files.
170     *
171     * prepareModel is used to make any necessary transformations to or alternative
172     * representations to a model for execution, possibly including
173     * transformations on the constant data, optimization on the model's graph,
174     * or compilation into the device's native binary format. The model itself
175     * is not changed.
176     *
177     * Optionally, caching information may be provided for the driver to save
178     * the prepared model to cache files for faster model compilation time
179     * when the same model preparation is requested in the future. There are
180     * two types of cache file handles provided to the driver: model cache
181     * and data cache. For more information on the two types of cache handles,
182     * refer to getNumberOfCacheFilesNeeded.
183     *
184     * The file descriptors must be opened with read and write permission. A file may
185     * have any size, and the corresponding file descriptor may have any offset. The
186     * driver must truncate a file to zero size before writing to that file. The file
187     * descriptors may be closed by the client once the asynchronous preparation has
188     * finished. The driver must dup a file descriptor if it wants to get access to
189     * the cache file later.
190     *
191     * The model is prepared asynchronously with respect to the caller. The
192     * prepareModel function must verify the inputs to the preparedModel function
193     * related to preparing the model (as opposed to saving the prepared model to
194     * cache) are correct. If there is an error, prepareModel must immediately invoke
195     * the callback with the appropriate ErrorStatus value and nullptr for the
196     * IPreparedModel, then return with the same ErrorStatus. If the inputs to the
197     * prepareModel function that are related to preparing the model are valid and
198     * there is no error, prepareModel must launch an asynchronous task
199     * to prepare the model in the background, and immediately return from
200     * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch,
201     * prepareModel must immediately invoke the callback with
202     * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return
203     * with ErrorStatus::GENERAL_FAILURE.
204     *
205     * When the asynchronous task has finished preparing the model, it must
206     * immediately invoke the callback function provided as an input to
207     * prepareModel. If the model was prepared successfully, the callback object
208     * must be invoked with an error status of ErrorStatus::NONE and the
209     * produced IPreparedModel object. If an error occurred preparing the model,
210     * the callback object must be invoked with the appropriate ErrorStatus
211     * value and nullptr for the IPreparedModel.
212     *
213     * Optionally, the driver may save the prepared model to cache during the
214     * asynchronous preparation. Any error that occurs when saving to cache must
215     * not affect the status of preparing the model. Even if the input arguments
216     * related to the cache may be invalid, or the driver may fail to save to cache,
217     * the prepareModel function must finish preparing the model. The driver
218     * may choose not to save to cache even if the caching information is
219     * provided and valid.
220     *
221     * The only information that may be unknown to the model at this stage is
222     * the shape of the tensors, which may only be known at execution time. As
223     * such, some driver services may return partially prepared models, where
224     * the prepared model may only be finished when it is paired with a set of
225     * inputs to the model. Note that the same prepared model object may be
226     * used with different shapes of inputs on different (possibly concurrent)
227     * executions.
228     *
229     * Multiple threads may call prepareModel on the same model concurrently.
230     *
231     * @param model The model to be prepared for execution.
232     * @param preference Indicates the intended execution behavior of a prepared
233     *     model.
234     * @param modelCache A vector of handles with each entry holding exactly one
235     *     cache file descriptor for the security-sensitive cache. The length of
236     *     the vector must either be 0 indicating that caching information is not provided,
237     *     or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache
238     *     handles will be provided in the same order when retrieving the
239     *     preparedModel from cache files with prepareModelFromCache.
240     * @param dataCache A vector of handles with each entry holding exactly one
241     *     cache file descriptor for the constants' cache. The length of
242     *     the vector must either be 0 indicating that caching information is not provided,
243     *     or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache
244     *     handles will be provided in the same order when retrieving the
245     *     preparedModel from cache files with prepareModelFromCache.
246     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
247     *     identifying the prepared model. The same token will be provided when retrieving
248     *     the prepared model from the cache files with prepareModelFromCache.
249     *     Tokens should be chosen to have a low rate of collision for a particular
250     *     application. The driver cannot detect a collision; a collision will result
251     *     in a failed execution or in a successful execution that produces incorrect
252     *     output values. If both modelCache and dataCache are empty indicating that
253     *     caching information is not provided, this token must be ignored.
254     * @param callback A callback object used to return the error status of
255     *     preparing the model for execution and the prepared model if
256     *     successful, nullptr otherwise. The callback object's notify function
257     *     must be called exactly once, even if the model could not be prepared.
258     * @return status Error status of launching a task which prepares the model
259     *     in the background; must be:
260     *     - NONE if preparation task is successfully launched
261     *     - DEVICE_UNAVAILABLE if driver is offline or busy
262     *     - GENERAL_FAILURE if there is an unspecified error
263     *     - INVALID_ARGUMENT if one of the input arguments related to preparing the
264     *       model is invalid
265     */
266    prepareModel_1_2(Model model, ExecutionPreference preference,
267                     vec<handle> modelCache, vec<handle> dataCache,
268                     uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
269                     IPreparedModelCallback callback)
270          generates (ErrorStatus status);
271
272    /**
273     * Creates a prepared model from cache files for execution.
274     *
275     * prepareModelFromCache is used to retrieve a prepared model directly from
276     * cache files to avoid slow model compilation time. There are
277     * two types of cache file handles provided to the driver: model cache
278     * and data cache. For more information on the two types of cache handles,
279     * refer to getNumberOfCacheFilesNeeded.
280     *
281     * The file descriptors must be opened with read and write permission. A file may
282     * have any size, and the corresponding file descriptor may have any offset. The
283     * driver must truncate a file to zero size before writing to that file. The file
284     * descriptors may be closed by the client once the asynchronous preparation has
285     * finished. The driver must dup a file descriptor if it wants to get access to
286     * the cache file later.
287     *
288     * The model is prepared asynchronously with respect to the caller. The
289     * prepareModelFromCache function must verify the inputs to the
290     * prepareModelFromCache function are correct, and that the security-sensitive
291     * cache has not been modified since it was last written by the driver.
292     * If there is an error, or if compilation caching is not supported, or if the
293     * security-sensitive cache has been modified, prepareModelFromCache must
294     * immediately invoke the callback with the appropriate ErrorStatus value and
295     * nullptr for the IPreparedModel, then return with the same ErrorStatus. If
296     * the inputs to the prepareModelFromCache function are valid, the security-sensitive
297     * cache is not modified, and there is no error, prepareModelFromCache must launch an
298     * asynchronous task to prepare the model in the background, and immediately return
299     * from prepareModelFromCache with ErrorStatus::NONE. If the asynchronous task
300     * fails to launch, prepareModelFromCache must immediately invoke the callback
301     * with ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then
302     * return with ErrorStatus::GENERAL_FAILURE.
303     *
304     * When the asynchronous task has finished preparing the model, it must
305     * immediately invoke the callback function provided as an input to
306     * prepareModelFromCache. If the model was prepared successfully, the
307     * callback object must be invoked with an error status of ErrorStatus::NONE
308     * and the produced IPreparedModel object. If an error occurred preparing
309     * the model, the callback object must be invoked with the appropriate
310     * ErrorStatus value and nullptr for the IPreparedModel.
311     *
312     * The only information that may be unknown to the model at this stage is
313     * the shape of the tensors, which may only be known at execution time. As
314     * such, some driver services may return partially prepared models, where
315     * the prepared model may only be finished when it is paired with a set of
316     * inputs to the model. Note that the same prepared model object may be
317     * used with different shapes of inputs on different (possibly concurrent)
318     * executions.
319     *
320     * @param modelCache A vector of handles with each entry holding exactly one
321     *     cache file descriptor for the security-sensitive cache. The length of
322     *     the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
323     *     The cache handles will be provided in the same order as with prepareModel_1_2.
324     * @param dataCache A vector of handles with each entry holding exactly one
325     *     cache file descriptor for the constants' cache. The length of the vector
326     *     must match the numDataCache returned from getNumberOfCacheFilesNeeded.
327     *     The cache handles will be provided in the same order as with prepareModel_1_2.
328     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
329     *     identifying the prepared model. It is the same token provided when saving
330     *     the cache files with prepareModel_1_2. Tokens should be chosen
331     *     to have a low rate of collision for a particular application. The driver
332     *     cannot detect a collision; a collision will result in a failed execution
333     *     or in a successful execution that produces incorrect output values.
334     * @param callback A callback object used to return the error status of
335     *     preparing the model for execution and the prepared model if
336     *     successful, nullptr otherwise. The callback object's notify function
337     *     must be called exactly once, even if the model could not be prepared.
338     * @return status Error status of launching a task which prepares the model
339     *     in the background; must be:
340     *     - NONE if preparation task is successfully launched
341     *     - DEVICE_UNAVAILABLE if driver is offline or busy
342     *     - GENERAL_FAILURE if caching is not supported or if there is an
343     *       unspecified error
344     *     - INVALID_ARGUMENT if one of the input arguments is invalid
345     */
346    prepareModelFromCache(vec<handle> modelCache, vec<handle> dataCache,
347                          uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
348                          IPreparedModelCallback callback)
349            generates (ErrorStatus status);
350};
351