1/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.audio@6.0;
18
19import android.hardware.audio.common@6.0;
20
21enum Result : int32_t {
22    OK,
23    NOT_INITIALIZED,
24    INVALID_ARGUMENTS,
25    INVALID_STATE,
26    /**
27     * Methods marked as "Optional method" must return this result value
28     * if the operation is not supported by HAL.
29     */
30    NOT_SUPPORTED
31};
32
33@export(name="audio_drain_type_t", value_prefix="AUDIO_DRAIN_")
34enum AudioDrain : int32_t {
35    /** drain() returns when all data has been played. */
36    ALL,
37    /**
38     * drain() returns a short time before all data from the current track has
39     * been played to give time for gapless track switch.
40     */
41    EARLY_NOTIFY
42};
43
44/**
45 * A substitute for POSIX timespec.
46 */
47struct TimeSpec {
48    uint64_t tvSec;   // seconds
49    uint64_t tvNSec;  // nanoseconds
50};
51
52struct ParameterValue {
53    string key;
54    string value;
55};
56
57enum MmapBufferFlag : uint32_t {
58    NONE    = 0x0,
59    /**
60     * If the buffer can be securely shared to untrusted applications
61     * through the AAudio exclusive mode.
62     * Only set this flag if applications are restricted from accessing the
63     * memory surrounding the audio data buffer by a kernel mechanism.
64     * See Linux kernel's dma_buf.
65     */
66    APPLICATION_SHAREABLE    = 0x1,
67};
68
69/**
70 * Mmap buffer descriptor returned by IStream.createMmapBuffer().
71 * Used by streams opened in mmap mode.
72 */
73struct MmapBufferInfo {
74    /** Mmap memory buffer */
75    memory  sharedMemory;
76    /** Total buffer size in frames */
77    uint32_t bufferSizeFrames;
78    /** Transfer size granularity in frames */
79    uint32_t burstSizeFrames;
80    /** Attributes describing the buffer. */
81    bitfield<MmapBufferFlag> flags;
82};
83
84/**
85 * Mmap buffer read/write position returned by IStream.getMmapPosition().
86 * Used by streams opened in mmap mode.
87 */
88struct MmapPosition {
89    int64_t  timeNanoseconds; // time stamp in ns, CLOCK_MONOTONIC
90    int32_t  positionFrames;  // increasing 32 bit frame count reset when IStream.stop() is called
91};
92
93/**
94 * The message queue flags used to synchronize reads and writes from
95 * message queues used by StreamIn and StreamOut.
96 */
97enum MessageQueueFlagBits : uint32_t {
98    NOT_EMPTY = 1 << 0,
99    NOT_FULL = 1 << 1
100};
101
102/*
103 * Microphone information
104 *
105 */
106
107/**
108 * A 3D point used to represent position or orientation of a microphone.
109 *
110 * Position: Coordinates of the microphone's capsule, in meters, from the
111 * bottom-left-back corner of the bounding box of android device in natural
112 * orientation (PORTRAIT for phones, LANDSCAPE for tablets, tvs, etc).
113 * The orientation musth match the reported by the api Display.getRotation().
114 *
115 * Orientation: Normalized vector to signal the main orientation of the
116 * microphone's capsule. Magnitude = sqrt(x^2 + y^2 + z^2) = 1
117 */
118struct AudioMicrophoneCoordinate {
119    float x;
120    float y;
121    float z;
122};
123
124/**
125 * Enum to identify the type of channel mapping for active microphones.
126 * Used channels further identify if the microphone has any significative
127 * process (e.g. High Pass Filtering, dynamic compression)
128 * Simple processing as constant gain adjustment must be DIRECT.
129 */
130enum AudioMicrophoneChannelMapping : uint32_t {
131    UNUSED      = 0, /* Channel not used */
132    DIRECT      = 1, /* Channel used and signal not processed */
133    PROCESSED   = 2, /* Channel used and signal has some process */
134};
135
136/**
137 * Enum to identify locations of microphones in regards to the body of the
138 * android device.
139 */
140enum AudioMicrophoneLocation : uint32_t {
141    UNKNOWN             = 0,
142    MAINBODY            = 1,
143    MAINBODY_MOVABLE    = 2,
144    PERIPHERAL          = 3,
145};
146
147/**
148 * Identifier to help group related microphones together
149 * e.g. microphone arrays should belong to the same group
150 */
151typedef int32_t AudioMicrophoneGroup;
152
153/**
154 * Enum with standard polar patterns of microphones
155 */
156enum AudioMicrophoneDirectionality : uint32_t {
157    UNKNOWN         = 0,
158    OMNI            = 1,
159    BI_DIRECTIONAL  = 2,
160    CARDIOID        = 3,
161    HYPER_CARDIOID  = 4,
162    SUPER_CARDIOID  = 5,
163};
164
165/**
166 * A (frequency, level) pair. Used to represent frequency response.
167 */
168struct AudioFrequencyResponsePoint {
169    /** In Hz */
170    float frequency;
171    /** In dB */
172    float level;
173};
174
175/**
176 * Structure used by the HAL to describe microphone's characteristics
177 * Used by StreamIn and Device
178 */
179struct MicrophoneInfo {
180    /** Unique alphanumeric id for microphone. Guaranteed to be the same
181     * even after rebooting.
182     */
183    string                                  deviceId;
184    /**
185     * Device specific information
186     */
187    DeviceAddress                           deviceAddress;
188    /** Each element of the vector must describe the channel with the same
189     *  index.
190     */
191    vec<AudioMicrophoneChannelMapping>      channelMapping;
192    /** Location of the microphone in regard to the body of the device */
193    AudioMicrophoneLocation                 location;
194    /** Identifier to help group related microphones together
195     *  e.g. microphone arrays should belong to the same group
196     */
197    AudioMicrophoneGroup                    group;
198    /** Index of this microphone within the group.
199     *  (group, index) must be unique within the same device.
200     */
201    uint32_t                                indexInTheGroup;
202    /** Level in dBFS produced by a 1000 Hz tone at 94 dB SPL */
203    float                                   sensitivity;
204    /** Level in dB of the max SPL supported at 1000 Hz */
205    float                                   maxSpl;
206    /** Level in dB of the min SPL supported at 1000 Hz */
207    float                                   minSpl;
208    /** Standard polar pattern of the microphone */
209    AudioMicrophoneDirectionality           directionality;
210    /** Vector with ordered frequency responses (from low to high frequencies)
211     *  with the frequency response of the microphone.
212     *  Levels are in dB, relative to level at 1000 Hz
213     */
214    vec<AudioFrequencyResponsePoint>        frequencyResponse;
215    /** Position of the microphone's capsule in meters, from the
216     *  bottom-left-back corner of the bounding box of device.
217     */
218    AudioMicrophoneCoordinate               position;
219    /** Normalized point to signal the main orientation of the microphone's
220     *  capsule. sqrt(x^2 + y^2 + z^2) = 1
221     */
222    AudioMicrophoneCoordinate               orientation;
223};
224
225/**
226 * Constants used by the HAL to determine how to select microphones and process those inputs in
227 * order to optimize for capture in the specified direction.
228 *
229 * MicrophoneDirection Constants are defined in MicrophoneDirection.java.
230 */
231@export(name="audio_microphone_direction_t", value_prefix="MIC_DIRECTION_")
232enum MicrophoneDirection : int32_t {
233    /**
234     * Don't do any directionality processing of the activated microphone(s).
235     */
236    UNSPECIFIED = 0,
237    /**
238     * Optimize capture for audio coming from the screen-side of the device.
239     */
240    FRONT = 1,
241    /**
242     * Optimize capture for audio coming from the side of the device opposite the screen.
243     */
244    BACK = 2,
245    /**
246     * Optimize capture for audio coming from an off-device microphone.
247     */
248    EXTERNAL = 3,
249};
250
251
252/* Dual Mono handling is used when a stereo audio stream
253 * contains separate audio content on the left and right channels.
254 * Such information about the content of the stream may be found, for example,
255 * in ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
256 */
257@export(name="audio_dual_mono_mode_t", value_prefix="AUDIO_DUAL_MONO_MODE_")
258enum DualMonoMode : int32_t {
259    // Need to be in sync with DUAL_MONO_MODE* constants in
260    // frameworks/base/media/java/android/media/AudioTrack.java
261    /**
262     * Disable any Dual Mono presentation effect.
263     *
264     */
265    OFF = 0,
266    /**
267     * This mode indicates that a stereo stream should be presented
268     * with the left and right audio channels blended together
269     * and delivered to both channels.
270     *
271     * Behavior for non-stereo streams is implementation defined.
272     * A suggested guideline is that the left-right stereo symmetric
273     * channels are pairwise blended, the other channels such as center
274     * are left alone.
275     */
276    LR = 1,
277    /**
278     * This mode indicates that a stereo stream should be presented
279     * with the left audio channel replicated into the right audio channel.
280     *
281     * Behavior for non-stereo streams is implementation defined.
282     * A suggested guideline is that all channels with left-right
283     * stereo symmetry will have the left channel position replicated
284     * into the right channel position. The center channels (with no
285     * left/right symmetry) or unbalanced channels are left alone.
286     */
287    LL = 2,
288    /**
289     * This mode indicates that a stereo stream should be presented
290     * with the right audio channel replicated into the left audio channel.
291     *
292     * Behavior for non-stereo streams is implementation defined.
293     * A suggested guideline is that all channels with left-right
294     * stereo symmetry will have the right channel position replicated
295     * into the left channel position. The center channels (with no
296     * left/right symmetry) or unbalanced channels are left alone.
297     */
298    RR = 3,
299};
300
301/**
302 * Algorithms used for timestretching (preserving pitch while playing audio
303 * content at different speed).
304 */
305@export(name="audio_timestretch_stretch_mode_t", value_prefix="AUDIO_TIMESTRETCH_STRETCH_")
306enum TimestretchMode : int32_t {
307    // Need to be in sync with AUDIO_STRETCH_MODE_* constants in
308    // frameworks/base/media/java/android/media/PlaybackParams.java
309    DEFAULT = 0,
310    /** Selects timestretch algorithm best suitable for voice (speech) content. */
311    VOICE = 1,
312};
313
314/**
315 * Behavior when the values for speed and / or pitch are out
316 * of applicable range.
317 */
318@export(name="audio_timestretch_fallback_mode_t", value_prefix="AUDIO_TIMESTRETCH_FALLBACK_")
319enum TimestretchFallbackMode : int32_t {
320    // Need to be in sync with AUDIO_FALLBACK_MODE_* constants in
321    // frameworks/base/media/java/android/media/PlaybackParams.java
322    /** Play silence for parameter values that are out of range. */
323    MUTE = 1,
324    /** Return an error while trying to set the parameters. */
325    FAIL = 2,
326};
327
328/**
329 * Parameters determining playback behavior. They are used to speed up or
330 * slow down playback and / or change the tonal frequency of the audio content
331 * (pitch).
332 */
333struct PlaybackRate {
334    /**
335     * Speed factor (multiplier). Normal speed has the value of 1.0f.
336     * Values less than 1.0f slow down playback, value greater than 1.0f
337     * speed it up.
338     */
339    float speed;
340    /**
341     * Pitch factor (multiplier). Setting pitch value to 1.0f together
342     * with changing playback speed preserves the pitch, this is often
343     * called "timestretching." Setting the pitch value equal to speed produces
344     * the same effect as playing audio content at different sampling rate.
345     */
346    float pitch;
347    /**
348     * Selects the algorithm used for timestretching (preserving pitch while
349     * playing audio at different speed).
350     */
351    TimestretchMode timestretchMode;
352    /**
353     * Selects the behavior when the specified values for speed and / or pitch
354     * are out of applicable range.
355     */
356    TimestretchFallbackMode fallbackMode;
357};
358