1/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.neuralnetworks@1.3;
18
19import @1.1::ExecutionPreference;
20import @1.2::Constant;
21import @1.2::DeviceType;
22import @1.2::Extension;
23import @1.2::IDevice;
24import BufferDesc;
25import BufferRole;
26import Capabilities;
27import ErrorStatus;
28import Model;
29import OptionalTimePoint;
30import Priority;
31import IBuffer;
32import IPreparedModel;
33import IPreparedModelCallback;
34
35/**
36 * This interface represents a device driver.
37 */
38interface IDevice extends @1.2::IDevice {
39    /**
40     * Gets the capabilities of a driver.
41     *
42     * @return status Error status of the call, must be:
43     *                - NONE if successful
44     *                - DEVICE_UNAVAILABLE if driver is offline or busy
45     *                - GENERAL_FAILURE if there is an unspecified error
46     * @return capabilities Capabilities of the driver.
47     */
48    getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities);
49
50    /**
51     * Gets the supported operations in a model.
52     *
53     * getSupportedOperations indicates which operations of the top-level
54     * subgraph are fully supported by the vendor driver. If an operation may
55     * not be supported for any reason, getSupportedOperations must return
56     * false for that operation.
57     *
58     * The {@link OperationType::IF} and {@link OperationType::WHILE}
59     * operations may only be fully supported if the vendor driver fully
60     * supports all operations in the referenced subgraphs.
61     *
62     * @param model A model whose operations--and their corresponding operands--
63     *     are to be verified by the driver.
64     * @return status Error status of the call, must be:
65     *     - NONE if successful
66     *     - DEVICE_UNAVAILABLE if driver is offline or busy
67     *     - GENERAL_FAILURE if there is an unspecified error
68     *     - INVALID_ARGUMENT if provided model is invalid
69     * @return supportedOperations A list of supported operations, where true
70     *     indicates the operation is supported and false indicates the
71     *     operation is not supported. The index of "supported" corresponds with
72     *     the index of the operation it is describing.
73     */
74    getSupportedOperations_1_3(Model model)
75        generates (ErrorStatus status, vec<bool> supportedOperations);
76
77    /**
78     * Asynchronously creates a prepared model for execution and optionally
79     * saves it into cache files.
80     *
81     * prepareModel is used to make any necessary transformations to or
82     * alternative representations to a model for execution, possibly including
83     * transformations on the constant data, optimization on the model's graph,
84     * or compilation into the device's native binary format. The model itself
85     * is not changed.
86     *
87     * Optionally, caching information may be provided for the driver to save
88     * the prepared model to cache files for faster model compilation time when
89     * the same model preparation is requested in the future. There are two
90     * types of cache file handles provided to the driver: model cache and data
91     * cache. For more information on the two types of cache handles, refer to
92     * getNumberOfCacheFilesNeeded.
93     *
94     * The file descriptors must be opened with read and write permission. A
95     * file may have any size, and the corresponding file descriptor may have
96     * any offset. The driver must truncate a file to zero size before writing
97     * to that file. The file descriptors may be closed by the client once the
98     * asynchronous preparation has finished. The driver must dup a file
99     * descriptor if it wants to get access to the cache file later.
100     *
101     * The model is prepared asynchronously with respect to the caller. The
102     * prepareModel function must verify the inputs to the preparedModel
103     * function related to preparing the model (as opposed to saving the
104     * prepared model to cache) are correct. If there is an error, prepareModel
105     * must immediately invoke the callback with the appropriate ErrorStatus
106     * value and nullptr for the IPreparedModel, then return with the same
107     * ErrorStatus. If the inputs to the prepareModel function that are related
108     * to preparing the model are valid and there is no error, prepareModel must
109     * launch an asynchronous task to prepare the model in the background, and
110     * immediately return from prepareModel with ErrorStatus::NONE. If the
111     * asynchronous task fails to launch, prepareModel must immediately invoke
112     * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
113     * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
114     *
115     * When the asynchronous task has finished preparing the model, it must
116     * immediately invoke the callback function provided as an input to
117     * prepareModel. If the model was prepared successfully, the callback object
118     * must be invoked with an error status of ErrorStatus::NONE and the
119     * produced IPreparedModel object. If an error occurred preparing the model,
120     * the callback object must be invoked with the appropriate ErrorStatus
121     * value and nullptr for the IPreparedModel.
122     *
123     * The model is prepared with a priority. This priority is relative to other
124     * prepared models owned by the same client. Higher priority executions may
125     * use more compute resources than lower priority executions, and may
126     * preempt or starve lower priority executions.
127     *
128     * prepareModel_1_3 can be called with an optional deadline. If the model
129     * is not able to be prepared before the provided deadline, the model
130     * preparation may be aborted, and either {@link
131     * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link
132     * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due
133     * to an abort must be sent the same way as other errors, described above.
134     * The deadline is represented as nanoseconds since the epoch of the steady
135     * clock (as if from std::chrono::steady_clock::time_point), but the service
136     * may convert it to the nanoseconds since boot time (as if from
137     * clock_gettime(CLOCK_BOOTTIME, &ts) or
138     * android::base::boot_clock::time_point) to account for time when the
139     * system is suspended. This conversion can by done by finding the timeout
140     * duration remaining compared to the steady_clock and adding it to the
141     * current boot_clock time.
142     *
143     * Optionally, the driver may save the prepared model to cache during the
144     * asynchronous preparation. Any error that occurs when saving to cache must
145     * not affect the status of preparing the model. Even if the input arguments
146     * related to the cache may be invalid, or the driver may fail to save to
147     * cache, the prepareModel function must finish preparing the model. The
148     * driver may choose not to save to cache even if the caching information is
149     * provided and valid.
150     *
151     * The only information that may be unknown to the model at this stage is
152     * the shape of the tensors, which may only be known at execution time. As
153     * such, some driver services may return partially prepared models, where
154     * the prepared model may only be finished when it is paired with a set of
155     * inputs to the model. Note that the same prepared model object may be used
156     * with different shapes of inputs on different (possibly concurrent)
157     * executions.
158     *
159     * Multiple threads may call prepareModel on the same model concurrently.
160     *
161     * @param model The model to be prepared for execution.
162     * @param preference Indicates the intended execution behavior of a prepared
163     *     model.
164     * @param priority The priority of the prepared model relative to other
165     *     prepared models owned by the client.
166     * @param deadline The time by which the model is expected to be prepared.
167     *     If the model cannot be prepared by the deadline, the preparation may
168     *     be aborted.
169     * @param modelCache A vector of handles with each entry holding exactly one
170     *     cache file descriptor for the security-sensitive cache. The length of
171     *     the vector must either be 0 indicating that caching information is
172     *     not provided, or match the numModelCache returned from
173     *     getNumberOfCacheFilesNeeded. The cache handles will be provided in
174     *     the same order when retrieving the preparedModel from cache files
175     *     with prepareModelFromCache_1_3.
176     * @param dataCache A vector of handles with each entry holding exactly one
177     *     cache file descriptor for the constants' cache. The length of the
178     *     vector must either be 0 indicating that caching information is not
179     *     provided, or match the numDataCache returned from
180     *     getNumberOfCacheFilesNeeded. The cache handles will be provided in
181     *     the same order when retrieving the preparedModel from cache files
182     *     with prepareModelFromCache_1_3.
183     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
184     *     identifying the prepared model. The same token will be provided when
185     *     retrieving the prepared model from the cache files with
186     *     prepareModelFromCache_1_3.  Tokens should be chosen to have a low rate of
187     *     collision for a particular application. The driver cannot detect a
188     *     collision; a collision will result in a failed execution or in a
189     *     successful execution that produces incorrect output values. If both
190     *     modelCache and dataCache are empty indicating that caching
191     *     information is not provided, this token must be ignored.
192     * @param callback A callback object used to return the error status of
193     *     preparing the model for execution and the prepared model if
194     *     successful, nullptr otherwise. The callback object's notify function
195     *     must be called exactly once, even if the model could not be prepared.
196     * @return status Error status of launching a task which prepares the model
197     *     in the background; must be:
198     *     - NONE if preparation task is successfully launched
199     *     - DEVICE_UNAVAILABLE if driver is offline or busy
200     *     - GENERAL_FAILURE if there is an unspecified error
201     *     - INVALID_ARGUMENT if one of the input arguments related to preparing
202     *       the model is invalid
203     *     - MISSED_DEADLINE_* if the preparation is aborted because the model
204     *       cannot be prepared by the deadline
205     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
206     */
207    prepareModel_1_3(Model model, ExecutionPreference preference,
208                     Priority priority, OptionalTimePoint deadline,
209                     vec<handle> modelCache, vec<handle> dataCache,
210                     uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
211                     IPreparedModelCallback callback)
212        generates (ErrorStatus status);
213
214    /**
215     * Creates a prepared model from cache files for execution.
216     *
217     * prepareModelFromCache_1_3 is used to retrieve a prepared model directly from
218     * cache files to avoid slow model compilation time. There are
219     * two types of cache file handles provided to the driver: model cache
220     * and data cache. For more information on the two types of cache handles,
221     * refer to getNumberOfCacheFilesNeeded.
222     *
223     * The file descriptors must be opened with read and write permission. A file may
224     * have any size, and the corresponding file descriptor may have any offset. The
225     * driver must truncate a file to zero size before writing to that file. The file
226     * descriptors may be closed by the client once the asynchronous preparation has
227     * finished. The driver must dup a file descriptor if it wants to get access to
228     * the cache file later.
229     *
230     * The model is prepared asynchronously with respect to the caller. The
231     * prepareModelFromCache_1_3 function must verify the inputs to the
232     * prepareModelFromCache_1_3 function are correct, and that the security-sensitive
233     * cache has not been modified since it was last written by the driver.
234     * If there is an error, or if compilation caching is not supported, or if the
235     * security-sensitive cache has been modified, prepareModelFromCache_1_3 must
236     * immediately invoke the callback with the appropriate ErrorStatus value and
237     * nullptr for the IPreparedModel, then return with the same ErrorStatus. If
238     * the inputs to the prepareModelFromCache_1_3 function are valid, the security-sensitive
239     * cache is not modified, and there is no error, prepareModelFromCache_1_3 must launch an
240     * asynchronous task to prepare the model in the background, and immediately return
241     * from prepareModelFromCache_1_3 with ErrorStatus::NONE. If the asynchronous task
242     * fails to launch, prepareModelFromCache_1_3 must immediately invoke the callback
243     * with ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then
244     * return with ErrorStatus::GENERAL_FAILURE.
245     *
246     * When the asynchronous task has finished preparing the model, it must
247     * immediately invoke the callback function provided as an input to
248     * prepareModelFromCache_1_3. If the model was prepared successfully, the
249     * callback object must be invoked with an error status of ErrorStatus::NONE
250     * and the produced IPreparedModel object. If an error occurred preparing
251     * the model, the callback object must be invoked with the appropriate
252     * ErrorStatus value and nullptr for the IPreparedModel.
253     *
254     * prepareModelFromCache_1_3 can be called with an optional deadline. If the
255     * model is not able to prepared before the provided deadline, the model
256     * preparation may be aborted, and either {@link
257     * ErrorStatus::MISSED_DEADLINE_TRANSIENT}
258     * or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The
259     * error due to an abort must be sent the same way as other errors,
260     * described above. The deadline is represented as nanoseconds since the
261     * epoch of the steady clock (as if from
262     * std::chrono::steady_clock::time_point), but the service may convert it to
263     * the nanoseconds since boot time (as if from
264     * clock_gettime(CLOCK_BOOTTIME, &ts) or
265     * android::base::boot_clock::time_point) to account for time when the
266     * system is suspended. This conversion can by done by finding the timeout
267     * duration remaining compared to the steady_clock and adding it to the
268     * current boot_clock time.
269     *
270     * The only information that may be unknown to the model at this stage is
271     * the shape of the tensors, which may only be known at execution time. As
272     * such, some driver services may return partially prepared models, where
273     * the prepared model may only be finished when it is paired with a set of
274     * inputs to the model. Note that the same prepared model object may be
275     * used with different shapes of inputs on different (possibly concurrent)
276     * executions.
277     *
278     * @param deadline The time by which the model is expected to be prepared.
279     *     If the model cannot be prepared by the deadline, the preparation may
280     *     be aborted.
281     * @param modelCache A vector of handles with each entry holding exactly one
282     *     cache file descriptor for the security-sensitive cache. The length of
283     *     the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
284     *     The cache handles will be provided in the same order as with prepareModel_1_3.
285     * @param dataCache A vector of handles with each entry holding exactly one
286     *     cache file descriptor for the constants' cache. The length of the vector
287     *     must match the numDataCache returned from getNumberOfCacheFilesNeeded.
288     *     The cache handles will be provided in the same order as with prepareModel_1_3.
289     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
290     *     identifying the prepared model. It is the same token provided when saving
291     *     the cache files with prepareModel_1_3. Tokens should be chosen
292     *     to have a low rate of collision for a particular application. The driver
293     *     cannot detect a collision; a collision will result in a failed execution
294     *     or in a successful execution that produces incorrect output values.
295     * @param callback A callback object used to return the error status of
296     *     preparing the model for execution and the prepared model if
297     *     successful, nullptr otherwise. The callback object's notify function
298     *     must be called exactly once, even if the model could not be prepared.
299     * @return status Error status of launching a task which prepares the model
300     *     in the background; must be:
301     *     - NONE if preparation task is successfully launched
302     *     - DEVICE_UNAVAILABLE if driver is offline or busy
303     *     - GENERAL_FAILURE if caching is not supported or if there is an
304     *       unspecified error
305     *     - INVALID_ARGUMENT if one of the input arguments is invalid
306     *     - MISSED_DEADLINE_* if the preparation is aborted because the model
307     *       cannot be prepared by the deadline
308     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
309     */
310    prepareModelFromCache_1_3(OptionalTimePoint deadline,
311                              vec<handle> modelCache, vec<handle> dataCache,
312                              uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
313                              IPreparedModelCallback callback)
314            generates (ErrorStatus status);
315
316    /**
317     * Allocates a driver-managed buffer with the properties specified by the buffer descriptor
318     * as well as the input and output roles.
319     *
320     * The allocate function must verify its inputs are correct. If there is an error, or if a
321     * certain role or property is not supported by the driver, the allocate
322     * function must return with an appropriate ErrorStatus, a nullptr as the IBuffer, and 0 as the
323     * buffer token. If the allocation is successful, this method must return with ErrorStatus::NONE
324     * and the produced IBuffer with a positive token identifying the allocated buffer. A successful
325     * allocation must accommodate all of the specified roles and buffer properties.
326     *
327     * The buffer is allocated to an uninitialized state. An uninitialized buffer may only be used
328     * in ways that are specified by outputRoles. A buffer is initialized after it is used as an
329     * output in a successful execution, or after a successful invocation of IBuffer::copyFrom on
330     * the buffer. An initialized buffer may be used according to all roles specified in inputRoles
331     * and outputRoles. A buffer will return to the uninitialized state if it is used as an output
332     * in a failed execution, or after a failed invocation of IBuffer::copyFrom on the buffer.
333     *
334     * The dimensions of the buffer can be deduced from the buffer descriptor as well as the
335     * dimensions of the corresponding model operands of the input and output roles. The dimensions
336     * or rank of the buffer may be unknown at this stage. As such, some driver services may only
337     * create a placeholder and defer the actual allocation until execution time. Note that the
338     * same buffer may be used for different shapes of outputs on different executions. When the
339     * buffer is used as an input, the input shape must be the same as the output shape from the
340     * last execution using this buffer as an output.
341     *
342     * The driver must apply proper validatation upon every usage of the buffer, and must fail the
343     * execution immediately if the usage is illegal.
344     *
345     * @param desc A buffer descriptor specifying the properties of the buffer to allocate.
346     * @param preparedModels A vector of IPreparedModel objects. Must only contain IPreparedModel
347     *     objects from the same IDevice as this method is being invoked on.
348     * @param inputRoles A vector of roles with each specifying an input to a prepared model.
349     * @param outputRoles A vector of roles with each specifying an output to a prepared model.
350     *     Each role specified in inputRoles and outputRoles must be unique. The corresponding
351     *     model operands of the roles must have the same OperandType, scale, zero point, and
352     *     ExtraParams. The dimensions of the operands and the dimensions specified in the buffer
353     *     descriptor must be compatible with each other. Two dimensions are incompatible if there
354     *     is at least one axis that is fully specified in both but has different values.
355     * @return status Error status of the buffer allocation. Must be:
356     *     - NONE if successful
357     *     - DEVICE_UNAVAILABLE if driver is offline or busy
358     *     - GENERAL_FAILURE if a certain buffer property or a certain role is not supported,
359     *       or if there is an unspecified error
360     *     - INVALID_ARGUMENT if one of the input arguments is invalid
361     * @return buffer The allocated IBuffer object. If the buffer was unable to be allocated
362     *     due to an error, nullptr must be returned.
363     * @return token A positive token identifying the allocated buffer. The same token will be
364     *     provided when referencing the buffer as one of the memory pools in the request of an
365     *     execution. The token must not collide with the tokens of other IBuffer objects that are
366     *     currently alive in the same driver service. If the buffer was unable to be allocated
367     *     due to an error, the token must be 0.
368     */
369    allocate(BufferDesc desc, vec<IPreparedModel> preparedModels, vec<BufferRole> inputRoles,
370             vec<BufferRole> outputRoles)
371            generates (ErrorStatus status, IBuffer buffer, uint32_t token);
372};
373