1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media.audio.cts; 18 19 import static com.google.common.truth.Truth.assertWithMessage; 20 21 import static org.junit.Assert.assertEquals; 22 import static org.junit.Assert.assertFalse; 23 import static org.junit.Assert.assertNotNull; 24 import static org.junit.Assert.assertNull; 25 import static org.junit.Assert.assertThrows; 26 import static org.junit.Assert.assertTrue; 27 import static org.junit.Assert.fail; 28 29 import android.Manifest; 30 import android.app.ActivityManager; 31 import android.content.Context; 32 import android.content.pm.PackageManager; 33 import android.media.AudioDeviceInfo; 34 import android.media.AudioFormat; 35 import android.media.AudioManager; 36 import android.media.AudioRecord; 37 import android.media.AudioRecord.OnRecordPositionUpdateListener; 38 import android.media.AudioRecordingConfiguration; 39 import android.media.AudioSystem; 40 import android.media.AudioTimestamp; 41 import android.media.MediaFormat; 42 import android.media.MediaRecorder; 43 import android.media.MicrophoneDirection; 44 import android.media.MicrophoneInfo; 45 import android.media.cts.AudioHelper; 46 import android.media.cts.StreamUtils; 47 import android.media.metrics.LogSessionId; 48 import android.media.metrics.MediaMetricsManager; 49 import android.media.metrics.RecordingSession; 50 import android.os.Handler; 51 import android.os.Looper; 52 import android.os.Message; 53 import android.os.PersistableBundle; 54 import android.os.Process; 55 import android.os.SystemClock; 56 import android.platform.test.annotations.Presubmit; 57 import android.util.Log; 58 59 import androidx.test.InstrumentationRegistry; 60 import androidx.test.runner.AndroidJUnit4; 61 62 import com.android.compatibility.common.util.CddTest; 63 import com.android.compatibility.common.util.DeviceReportLog; 64 import com.android.compatibility.common.util.NonMainlineTest; 65 import com.android.compatibility.common.util.ResultType; 66 import com.android.compatibility.common.util.ResultUnit; 67 import com.android.compatibility.common.util.SystemUtil; 68 69 import com.google.common.collect.Range; 70 71 import org.junit.After; 72 import org.junit.Before; 73 import org.junit.Test; 74 import org.junit.runner.RunWith; 75 76 import java.io.IOException; 77 import java.nio.ByteBuffer; 78 import java.nio.ShortBuffer; 79 import java.util.ArrayList; 80 import java.util.List; 81 import java.util.concurrent.Executor; 82 import java.util.function.BiFunction; 83 84 @NonMainlineTest 85 @RunWith(AndroidJUnit4.class) 86 public class AudioRecordTest { 87 private final static String TAG = "AudioRecordTest"; 88 private static final String REPORT_LOG_NAME = "CtsMediaAudioTestCases"; 89 private AudioRecord mAudioRecord; 90 private AudioManager mAudioManager; 91 private static final int SAMPLING_RATE_HZ = 44100; 92 private boolean mIsOnMarkerReachedCalled; 93 private boolean mIsOnPeriodicNotificationCalled; 94 private boolean mIsHandleMessageCalled; 95 private Looper mLooper; 96 // For doTest 97 private int mMarkerPeriodInFrames; 98 private int mMarkerPosition; 99 private Handler mHandler = new Handler(Looper.getMainLooper()) { 100 @Override 101 public void handleMessage(Message msg) { 102 mIsHandleMessageCalled = true; 103 super.handleMessage(msg); 104 } 105 }; 106 private static final int RECORD_DURATION_MS = 500; 107 private static final int TEST_TIMING_TOLERANCE_MS = 70; 108 109 @Before setUp()110 public void setUp() throws Exception { 111 if (!hasMicrophone()) { 112 return; 113 } 114 mAudioManager = InstrumentationRegistry .getInstrumentation() 115 .getContext().getSystemService(AudioManager.class); 116 /* 117 * InstrumentationTestRunner.onStart() calls Looper.prepare(), which creates a looper 118 * for the current thread. However, since we don't actually call loop() in the test, 119 * any messages queued with that looper will never be consumed. Therefore, we must 120 * create the instance in another thread, either without a looper, so the main looper is 121 * used, or with an active looper. 122 */ 123 Thread t = new Thread() { 124 @Override 125 public void run() { 126 Looper.prepare(); 127 mLooper = Looper.myLooper(); 128 synchronized(this) { 129 mAudioRecord = new AudioRecord.Builder() 130 .setAudioFormat(new AudioFormat.Builder() 131 .setSampleRate(SAMPLING_RATE_HZ) 132 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 133 .setChannelMask(AudioFormat.CHANNEL_IN_MONO).build()) 134 .setAudioSource(MediaRecorder.AudioSource.DEFAULT) 135 .setBufferSizeInBytes( 136 AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 137 AudioFormat.CHANNEL_IN_MONO, 138 AudioFormat.ENCODING_PCM_16BIT) * 10) 139 .build(); 140 this.notify(); 141 } 142 Looper.loop(); 143 } 144 }; 145 synchronized(t) { 146 t.start(); // will block until we wait 147 t.wait(); 148 } 149 assertNotNull(mAudioRecord); 150 } 151 152 @After tearDown()153 public void tearDown() throws Exception { 154 if (hasMicrophone()) { 155 mAudioRecord.release(); 156 mLooper.quit(); 157 } 158 } 159 reset()160 private void reset() { 161 mIsOnMarkerReachedCalled = false; 162 mIsOnPeriodicNotificationCalled = false; 163 mIsHandleMessageCalled = false; 164 } 165 166 @Test testAudioRecordProperties()167 public void testAudioRecordProperties() throws Exception { 168 if (!hasMicrophone()) { 169 return; 170 } 171 assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat()); 172 assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource()); 173 assertEquals(1, mAudioRecord.getChannelCount()); 174 assertEquals(AudioFormat.CHANNEL_IN_MONO, 175 mAudioRecord.getChannelConfiguration()); 176 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 177 assertEquals(SAMPLING_RATE_HZ, mAudioRecord.getSampleRate()); 178 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 179 180 int bufferSize = AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 181 AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT); 182 assertTrue(bufferSize > 0); 183 } 184 185 @Test testAudioRecordOP()186 public void testAudioRecordOP() throws Exception { 187 if (!hasMicrophone()) { 188 return; 189 } 190 final int SLEEP_TIME = 10; 191 final int RECORD_TIME = 5000; 192 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 193 194 int markerInFrames = mAudioRecord.getSampleRate() / 2; 195 assertEquals(AudioRecord.SUCCESS, 196 mAudioRecord.setNotificationMarkerPosition(markerInFrames)); 197 assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition()); 198 int periodInFrames = mAudioRecord.getSampleRate(); 199 assertEquals(AudioRecord.SUCCESS, 200 mAudioRecord.setPositionNotificationPeriod(periodInFrames)); 201 assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod()); 202 OnRecordPositionUpdateListener listener = new OnRecordPositionUpdateListener() { 203 204 public void onMarkerReached(AudioRecord recorder) { 205 mIsOnMarkerReachedCalled = true; 206 } 207 208 public void onPeriodicNotification(AudioRecord recorder) { 209 mIsOnPeriodicNotificationCalled = true; 210 } 211 }; 212 mAudioRecord.setRecordPositionUpdateListener(listener); 213 214 // use byte array as buffer 215 final int BUFFER_SIZE = 102400; 216 byte[] byteData = new byte[BUFFER_SIZE]; 217 long time = System.currentTimeMillis(); 218 mAudioRecord.startRecording(); 219 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 220 while (System.currentTimeMillis() - time < RECORD_TIME) { 221 Thread.sleep(SLEEP_TIME); 222 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 223 } 224 mAudioRecord.stop(); 225 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 226 assertTrue(mIsOnMarkerReachedCalled); 227 assertTrue(mIsOnPeriodicNotificationCalled); 228 reset(); 229 230 // use short array as buffer 231 short[] shortData = new short[BUFFER_SIZE]; 232 time = System.currentTimeMillis(); 233 mAudioRecord.startRecording(); 234 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 235 while (System.currentTimeMillis() - time < RECORD_TIME) { 236 Thread.sleep(SLEEP_TIME); 237 mAudioRecord.read(shortData, 0, BUFFER_SIZE); 238 } 239 mAudioRecord.stop(); 240 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 241 assertTrue(mIsOnMarkerReachedCalled); 242 assertTrue(mIsOnPeriodicNotificationCalled); 243 reset(); 244 245 // use ByteBuffer as buffer 246 ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 247 time = System.currentTimeMillis(); 248 mAudioRecord.startRecording(); 249 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 250 while (System.currentTimeMillis() - time < RECORD_TIME) { 251 Thread.sleep(SLEEP_TIME); 252 mAudioRecord.read(byteBuffer, BUFFER_SIZE); 253 } 254 mAudioRecord.stop(); 255 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 256 assertTrue(mIsOnMarkerReachedCalled); 257 assertTrue(mIsOnPeriodicNotificationCalled); 258 reset(); 259 260 // use handler 261 final Handler handler = new Handler(Looper.getMainLooper()) { 262 @Override 263 public void handleMessage(Message msg) { 264 mIsHandleMessageCalled = true; 265 super.handleMessage(msg); 266 } 267 }; 268 269 mAudioRecord.setRecordPositionUpdateListener(listener, handler); 270 time = System.currentTimeMillis(); 271 mAudioRecord.startRecording(); 272 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 273 while (System.currentTimeMillis() - time < RECORD_TIME) { 274 Thread.sleep(SLEEP_TIME); 275 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 276 } 277 mAudioRecord.stop(); 278 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 279 assertTrue(mIsOnMarkerReachedCalled); 280 assertTrue(mIsOnPeriodicNotificationCalled); 281 // The handler argument is only ever used for getting the associated Looper 282 assertFalse(mIsHandleMessageCalled); 283 284 mAudioRecord.release(); 285 assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState()); 286 } 287 288 @Test testAudioRecordResamplerMono8Bit()289 public void testAudioRecordResamplerMono8Bit() throws Exception { 290 doTest("resampler_mono_8bit", true /*localRecord*/, false /*customHandler*/, 291 1 /*periodsPerSecond*/, 1 /*markerPeriodsPerSecond*/, 292 false /*useByteBuffer*/, false /*blocking*/, 293 false /*auditRecording*/, false /*isChannelIndex*/, 88200 /*TEST_SR*/, 294 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_8BIT); 295 } 296 297 @Test testAudioRecordResamplerStereo8Bit()298 public void testAudioRecordResamplerStereo8Bit() throws Exception { 299 doTest("resampler_stereo_8bit", true /*localRecord*/, false /*customHandler*/, 300 0 /*periodsPerSecond*/, 3 /*markerPeriodsPerSecond*/, 301 true /*useByteBuffer*/, true /*blocking*/, 302 false /*auditRecording*/, false /*isChannelIndex*/, 45000 /*TEST_SR*/, 303 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_8BIT); 304 } 305 306 @Presubmit 307 @Test testAudioRecordLocalMono16BitShort()308 public void testAudioRecordLocalMono16BitShort() throws Exception { 309 doTest("local_mono_16bit_short", true /*localRecord*/, false /*customHandler*/, 310 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 311 false /*useByteBuffer*/, true /*blocking*/, 312 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 313 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 500 /*TEST_TIME_MS*/); 314 } 315 316 @Test testAudioRecordLocalMono16Bit()317 public void testAudioRecordLocalMono16Bit() throws Exception { 318 doTest("local_mono_16bit", true /*localRecord*/, false /*customHandler*/, 319 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 320 false /*useByteBuffer*/, true /*blocking*/, 321 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 322 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); 323 } 324 325 @Test testAudioRecordStereo16Bit()326 public void testAudioRecordStereo16Bit() throws Exception { 327 doTest("stereo_16bit", false /*localRecord*/, false /*customHandler*/, 328 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 329 false /*useByteBuffer*/, false /*blocking*/, 330 false /*auditRecording*/, false /*isChannelIndex*/, 17000 /*TEST_SR*/, 331 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT); 332 } 333 334 @Test testAudioRecordMonoFloat()335 public void testAudioRecordMonoFloat() throws Exception { 336 doTest("mono_float", false /*localRecord*/, true /*customHandler*/, 337 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 338 false /*useByteBuffer*/, true /*blocking*/, 339 false /*auditRecording*/, false /*isChannelIndex*/, 32000 /*TEST_SR*/, 340 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_FLOAT); 341 } 342 343 @Test testAudioRecordLocalNonblockingStereoFloat()344 public void testAudioRecordLocalNonblockingStereoFloat() throws Exception { 345 doTest("local_nonblocking_stereo_float", true /*localRecord*/, true /*customHandler*/, 346 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 347 false /*useByteBuffer*/, false /*blocking*/, 348 false /*auditRecording*/, false /*isChannelIndex*/, 48000 /*TEST_SR*/, 349 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 350 } 351 352 // Audit modes work best with non-blocking mode 353 @Test testAudioRecordAuditByteBufferResamplerStereoFloat()354 public void testAudioRecordAuditByteBufferResamplerStereoFloat() throws Exception { 355 if (isLowRamDevice()) { 356 return; // skip. FIXME: reenable when AF memory allocation is updated. 357 } 358 doTest("audit_byte_buffer_resampler_stereo_float", 359 false /*localRecord*/, true /*customHandler*/, 360 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 361 true /*useByteBuffer*/, false /*blocking*/, 362 true /*auditRecording*/, false /*isChannelIndex*/, 96000 /*TEST_SR*/, 363 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 364 } 365 366 @Test testAudioRecordAuditChannelIndexMonoFloat()367 public void testAudioRecordAuditChannelIndexMonoFloat() throws Exception { 368 doTest("audit_channel_index_mono_float", true /*localRecord*/, true /*customHandler*/, 369 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 370 false /*useByteBuffer*/, false /*blocking*/, 371 true /*auditRecording*/, true /*isChannelIndex*/, 47000 /*TEST_SR*/, 372 (1 << 0) /* 1 channel */, AudioFormat.ENCODING_PCM_FLOAT); 373 } 374 375 // Audit buffers can run out of space with high sample rate, 376 // so keep the channels and pcm encoding low 377 @Test testAudioRecordAuditChannelIndex2()378 public void testAudioRecordAuditChannelIndex2() throws Exception { 379 if (isLowRamDevice()) { 380 return; // skip. FIXME: reenable when AF memory allocation is updated. 381 } 382 doTest("audit_channel_index_2", true /*localRecord*/, true /*customHandler*/, 383 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 384 false /*useByteBuffer*/, false /*blocking*/, 385 true /*auditRecording*/, true /*isChannelIndex*/, 192000 /*TEST_SR*/, 386 (1 << 0) | (1 << 2) /* 2 channels, gap in middle */, 387 AudioFormat.ENCODING_PCM_8BIT); 388 } 389 390 // Audit buffers can run out of space with high numbers of channels, 391 // so keep the sample rate low. 392 @Test testAudioRecordAuditChannelIndex5()393 public void testAudioRecordAuditChannelIndex5() throws Exception { 394 doTest("audit_channel_index_5", true /*localRecord*/, true /*customHandler*/, 395 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 396 false /*useByteBuffer*/, false /*blocking*/, 397 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 398 (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4) /* 5 channels */, 399 AudioFormat.ENCODING_PCM_16BIT); 400 } 401 402 // Audit buffers can run out of space with high numbers of channels, 403 // so keep the sample rate low. 404 // This tests the maximum reported Mixed PCM channel capability 405 // for AudioRecord and AudioTrack. 406 @Test testAudioRecordAuditChannelIndexMax()407 public void testAudioRecordAuditChannelIndexMax() throws Exception { 408 // We skip this test for isLowRamDevice(s). 409 // Otherwise if the build reports a high PCM channel count capability, 410 // we expect this CTS test to work at 16kHz. 411 if (isLowRamDevice()) { 412 return; // skip. FIXME: reenable when AF memory allocation is updated. 413 } 414 final int maxChannels = AudioSystem.OUT_CHANNEL_COUNT_MAX; // FCC_LIMIT 415 doTest("audit_channel_index_max", true /*localRecord*/, true /*customHandler*/, 416 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 417 false /*useByteBuffer*/, false /*blocking*/, 418 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 419 (1 << maxChannels) - 1, 420 AudioFormat.ENCODING_PCM_16BIT); 421 } 422 423 // Audit buffers can run out of space with high numbers of channels, 424 // so keep the sample rate low. 425 @Test testAudioRecordAuditChannelIndex3()426 public void testAudioRecordAuditChannelIndex3() throws Exception { 427 doTest("audit_channel_index_3", true /*localRecord*/, true /*customHandler*/, 428 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 429 true /*useByteBuffer*/, false /*blocking*/, 430 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 431 (1 << 0) | (1 << 1) | (1 << 2) /* 3 channels */, 432 AudioFormat.ENCODING_PCM_24BIT_PACKED); 433 } 434 435 // Audit buffers can run out of space with high numbers of channels, 436 // so keep the sample rate low. 437 @Test testAudioRecordAuditChannelIndex1()438 public void testAudioRecordAuditChannelIndex1() throws Exception { 439 doTest("audit_channel_index_1", true /*localRecord*/, true /*customHandler*/, 440 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 441 true /*useByteBuffer*/, false /*blocking*/, 442 true /*auditRecording*/, true /*isChannelIndex*/, 24000 /*TEST_SR*/, 443 (1 << 0) /* 1 channels */, 444 AudioFormat.ENCODING_PCM_32BIT); 445 } 446 447 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 448 // an empty Builder matches the documentation / expected values 449 @Test testAudioRecordBuilderDefault()450 public void testAudioRecordBuilderDefault() throws Exception { 451 if (!hasMicrophone()) { 452 return; 453 } 454 // constants for test 455 final String TEST_NAME = "testAudioRecordBuilderDefault"; 456 // expected values below match the AudioRecord.Builder documentation 457 final int expectedCapturePreset = MediaRecorder.AudioSource.DEFAULT; 458 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 459 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 460 final int expectedState = AudioRecord.STATE_INITIALIZED; 461 // use builder with default values 462 final AudioRecord rec = new AudioRecord.Builder().build(); 463 // save results 464 final int observedSource = rec.getAudioSource(); 465 final int observedChannel = rec.getChannelConfiguration(); 466 final int observedEncoding = rec.getAudioFormat(); 467 final int observedState = rec.getState(); 468 // release recorder before the test exits (either successfully or with an exception) 469 rec.release(); 470 // compare results 471 assertEquals(TEST_NAME + ": default capture preset", expectedCapturePreset, observedSource); 472 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 473 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 474 assertEquals(TEST_NAME + ": state", expectedState, observedState); 475 } 476 477 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 478 // an incomplete AudioFormat matches the documentation / expected values 479 @Test testAudioRecordBuilderPartialFormat()480 public void testAudioRecordBuilderPartialFormat() throws Exception { 481 if (!hasMicrophone()) { 482 return; 483 } 484 // constants for test 485 final String TEST_NAME = "testAudioRecordBuilderPartialFormat"; 486 final int expectedRate = 16000; 487 final int expectedState = AudioRecord.STATE_INITIALIZED; 488 // expected values below match the AudioRecord.Builder documentation 489 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 490 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 491 // use builder with a partial audio format 492 final AudioRecord rec = new AudioRecord.Builder() 493 .setAudioFormat(new AudioFormat.Builder().setSampleRate(expectedRate).build()) 494 .build(); 495 // save results 496 final int observedRate = rec.getSampleRate(); 497 final int observedChannel = rec.getChannelConfiguration(); 498 final int observedEncoding = rec.getAudioFormat(); 499 final int observedState = rec.getState(); 500 // release recorder before the test exits (either successfully or with an exception) 501 rec.release(); 502 // compare results 503 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 504 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 505 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 506 assertEquals(TEST_NAME + ": state", expectedState, observedState); 507 } 508 509 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord matches 510 // the parameters used in the builder 511 @Test testAudioRecordBuilderParams()512 public void testAudioRecordBuilderParams() throws Exception { 513 if (!hasMicrophone()) { 514 return; 515 } 516 // constants for test 517 final String TEST_NAME = "testAudioRecordBuilderParams"; 518 final int expectedRate = 8000; 519 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 520 final int expectedChannelCount = 1; 521 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 522 final int expectedSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION; 523 final int expectedState = AudioRecord.STATE_INITIALIZED; 524 // use builder with expected parameters 525 final AudioRecord rec = new AudioRecord.Builder() 526 .setAudioFormat(new AudioFormat.Builder() 527 .setSampleRate(expectedRate) 528 .setChannelMask(expectedChannel) 529 .setEncoding(expectedEncoding) 530 .build()) 531 .setAudioSource(expectedSource) 532 .build(); 533 // save results 534 final int observedRate = rec.getSampleRate(); 535 final int observedChannel = rec.getChannelConfiguration(); 536 final int observedChannelCount = rec.getChannelCount(); 537 final int observedEncoding = rec.getAudioFormat(); 538 final int observedSource = rec.getAudioSource(); 539 final int observedState = rec.getState(); 540 // release recorder before the test exits (either successfully or with an exception) 541 rec.release(); 542 // compare results 543 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 544 assertEquals(TEST_NAME + ": configured channel config", expectedChannel, observedChannel); 545 assertEquals(TEST_NAME + ": configured encoding", expectedEncoding, observedEncoding); 546 assertEquals(TEST_NAME + ": implicit channel count", expectedChannelCount, 547 observedChannelCount); 548 assertEquals(TEST_NAME + ": configured source", expectedSource, observedSource); 549 assertEquals(TEST_NAME + ": state", expectedState, observedState); 550 } 551 // Test AudioRecord.Builder.setRequestHotwordStream, and hotword capture 552 @Test testAudioRecordBuilderHotword()553 public void testAudioRecordBuilderHotword() throws Exception { 554 if (!hasMicrophone()) { 555 return; 556 } 557 // Verify typical behavior continues to work, and clearing works 558 AudioRecord regularRecord = new AudioRecord.Builder() 559 .setRequestHotwordStream(true) 560 .setRequestHotwordStream(false) 561 .build(); 562 563 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 564 assertFalse(regularRecord.isHotwordStream()); 565 assertFalse(regularRecord.isHotwordLookbackStream()); 566 regularRecord.startRecording(); 567 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 568 regularRecord.stop(); 569 regularRecord.release(); 570 571 regularRecord = new AudioRecord.Builder() 572 .setRequestHotwordLookbackStream(true) 573 .setRequestHotwordLookbackStream(false) 574 .build(); 575 576 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 577 assertFalse(regularRecord.isHotwordStream()); 578 assertFalse(regularRecord.isHotwordLookbackStream()); 579 regularRecord.startRecording(); 580 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 581 regularRecord.stop(); 582 regularRecord.release(); 583 584 // Should fail due to incompatible arguments 585 assertThrows(UnsupportedOperationException.class, 586 () -> new AudioRecord.Builder() 587 .setRequestHotwordStream(true) 588 .setRequestHotwordLookbackStream(true) 589 .build()); 590 591 // Should fail due to permission issues 592 assertThrows(UnsupportedOperationException.class, 593 () -> new AudioRecord.Builder() 594 .setRequestHotwordStream(true) 595 .build()); 596 assertThrows(UnsupportedOperationException.class, 597 () -> new AudioRecord.Builder() 598 .setRequestHotwordLookbackStream(true) 599 .build()); 600 601 // Adopt permissions to access query APIs and test functionality 602 InstrumentationRegistry.getInstrumentation() 603 .getUiAutomation() 604 .adoptShellPermissionIdentity( 605 Manifest.permission.CAPTURE_AUDIO_HOTWORD); 606 607 608 for (final boolean lookbackOn : new boolean[] { false, true} ) { 609 AudioRecord audioRecord = null; 610 if (!mAudioManager.isHotwordStreamSupported(lookbackOn)) { 611 // Hardware does not support capturing hotword content 612 continue; 613 } 614 try { 615 AudioRecord.Builder builder = new AudioRecord.Builder(); 616 if (lookbackOn) { 617 builder.setRequestHotwordLookbackStream(true); 618 } else { 619 builder.setRequestHotwordStream(true); 620 } 621 audioRecord = builder.build(); 622 if (lookbackOn) { 623 assertTrue(audioRecord.isHotwordLookbackStream()); 624 } else { 625 assertTrue(audioRecord.isHotwordStream()); 626 } 627 audioRecord.startRecording(); 628 audioRecord.read(ByteBuffer.allocateDirect(4096), 4096); 629 audioRecord.stop(); 630 } finally { 631 if (audioRecord != null) { 632 audioRecord.release(); 633 } 634 } 635 } 636 InstrumentationRegistry.getInstrumentation() 637 .getUiAutomation() 638 .dropShellPermissionIdentity(); 639 } 640 641 // Test AudioRecord to ensure we can build after a failure. 642 @Test testAudioRecordBufferSize()643 public void testAudioRecordBufferSize() throws Exception { 644 if (!hasMicrophone()) { 645 return; 646 } 647 // constants for test 648 final String TEST_NAME = "testAudioRecordBufferSize"; 649 650 // use builder with parameters that should fail 651 final int superBigBufferSize = 1 << 28; 652 try { 653 final AudioRecord record = new AudioRecord.Builder() 654 .setBufferSizeInBytes(superBigBufferSize) 655 .build(); 656 record.release(); 657 fail(TEST_NAME + ": should throw exception on failure"); 658 } catch (UnsupportedOperationException e) { 659 ; 660 } 661 662 // we should be able to create again with minimum buffer size 663 final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples 664 final AudioRecord record2 = new AudioRecord.Builder() 665 .setBufferSizeInBytes(verySmallBufferSize) 666 .build(); 667 668 final int observedState2 = record2.getState(); 669 final int observedBufferSize2 = record2.getBufferSizeInFrames(); 670 record2.release(); 671 672 // succeeds for minimum buffer size 673 assertEquals(TEST_NAME + ": state", AudioRecord.STATE_INITIALIZED, observedState2); 674 // should force the minimum size buffer which is > 0 675 assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); 676 } 677 678 @Test testTimestamp()679 public void testTimestamp() throws Exception { 680 if (!hasMicrophone()) { 681 return; 682 } 683 final String TEST_NAME = "testTimestamp"; 684 AudioRecord record = null; 685 686 try { 687 final int NANOS_PER_MILLISECOND = 1000000; 688 final long RECORD_TIME_MS = 2000; 689 final long RECORD_TIME_NS = RECORD_TIME_MS * NANOS_PER_MILLISECOND; 690 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; // fixed at this time. 691 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO; 692 final int RECORD_SAMPLE_RATE = 23456; // requires resampling 693 record = new AudioRecord.Builder() 694 .setAudioFormat(new AudioFormat.Builder() 695 .setSampleRate(RECORD_SAMPLE_RATE) 696 .setChannelMask(RECORD_CHANNEL_MASK) 697 .setEncoding(RECORD_ENCODING) 698 .build()) 699 .build(); 700 701 // For our tests, we could set test duration by timed sleep or by # frames received. 702 // Since we don't know *exactly* when AudioRecord actually begins recording, 703 // we end the test by # frames read. 704 final int numChannels = 705 AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK); 706 final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING); 707 final int bytesPerFrame = numChannels * bytesPerSample; 708 // careful about integer overflow in the formula below: 709 final int targetFrames = 710 (int)((long)RECORD_TIME_MS * RECORD_SAMPLE_RATE / 1000); 711 final int targetSamples = targetFrames * numChannels; 712 final int BUFFER_FRAMES = 512; 713 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 714 715 final int tries = 4; 716 for (int i = 0; i < tries; ++i) { 717 Log.d(TEST_NAME, "try " + i); 718 final long trackStartTimeNs = System.nanoTime(); 719 final long trackStartTimeBootNs = android.os.SystemClock.elapsedRealtimeNanos(); 720 721 record.startRecording(); 722 723 final AudioTimestamp ts = new AudioTimestamp(); 724 int samplesRead = 0; 725 // For 16 bit data, use shorts 726 final short[] shortData = new short[BUFFER_SAMPLES]; 727 final AudioHelper.TimestampVerifier tsVerifier = 728 new AudioHelper.TimestampVerifier(TAG, RECORD_SAMPLE_RATE, 729 0 /* startFrames */, isProAudioDevice()); 730 731 while (samplesRead < targetSamples) { 732 final int amount = samplesRead == 0 ? numChannels : 733 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 734 final int ret = record.read(shortData, 0, amount); 735 assertWithMessage("read incorrect amount") 736 .that(ret) 737 .isEqualTo(amount); 738 // timestamps follow a different path than data, so it is conceivable 739 // that first data arrives before the first timestamp is ready. 740 741 if (record.getTimestamp(ts, AudioTimestamp.TIMEBASE_MONOTONIC) 742 == AudioRecord.SUCCESS) { 743 tsVerifier.add(ts); 744 } 745 samplesRead += ret; 746 } 747 record.stop(); 748 749 // stop is synchronous, but need not be in the future. 750 final long SLEEP_AFTER_STOP_FOR_INACTIVITY_MS = 1000; 751 Thread.sleep(SLEEP_AFTER_STOP_FOR_INACTIVITY_MS); 752 753 AudioTimestamp stopTs = new AudioTimestamp(); 754 AudioTimestamp stopTsBoot = new AudioTimestamp(); 755 756 assertWithMessage("timestamp monotonic returns success") 757 .that(record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 758 .isEqualTo(AudioRecord.SUCCESS); 759 assertWithMessage("timestamp boottime returns success") 760 .that(record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)) 761 .isEqualTo(AudioRecord.SUCCESS); 762 763 assertWithMessage("timestamp monotonic and boottime have same frame position") 764 .that(stopTs.framePosition) 765 .isEqualTo(stopTsBoot.framePosition); 766 767 assertWithMessage("timestamp monotonic frame position is at least target frames") 768 .that(stopTs.framePosition) 769 .isAtLeast(targetFrames); 770 assertWithMessage("timestamp monotonic elapsed time is at least record time") 771 .that(stopTs.nanoTime - trackStartTimeNs) 772 .isAtLeast(RECORD_TIME_NS); 773 assertWithMessage("timestamp boottime elapsed time is at least record time") 774 .that(stopTsBoot.nanoTime - trackStartTimeBootNs) 775 .isAtLeast(RECORD_TIME_NS); 776 777 tsVerifier.verifyAndLog(trackStartTimeNs, "test_timestamp" /* logName */); 778 } 779 } finally { 780 if (record != null) { 781 record.release(); 782 record = null; 783 } 784 } 785 } 786 787 @Test testRecordNoDataForIdleUids()788 public void testRecordNoDataForIdleUids() throws Exception { 789 // Removed in favor of audiorecordpermissiontests 790 } 791 792 @Test testRestrictedAudioSourcePermissions()793 public void testRestrictedAudioSourcePermissions() throws Exception { 794 // Make sure that the following audio sources cannot be used by apps that 795 // don't have the CAPTURE_AUDIO_OUTPUT permissions: 796 // - VOICE_CALL, 797 // - VOICE_DOWNLINK 798 // - VOICE_UPLINK 799 // - REMOTE_SUBMIX 800 // - ECHO_REFERENCE - 1997 801 // - RADIO_TUNER - 1998 802 // - HOTWORD - 1999 803 // The attempt to build an AudioRecord with those sources should throw either 804 // UnsupportedOperationException or IllegalArgumentException exception. 805 final int[] restrictedAudioSources = new int [] { 806 MediaRecorder.AudioSource.VOICE_CALL, 807 MediaRecorder.AudioSource.VOICE_DOWNLINK, 808 MediaRecorder.AudioSource.VOICE_UPLINK, 809 MediaRecorder.AudioSource.REMOTE_SUBMIX, 810 1997, 811 1998, 812 1999 813 }; 814 815 for (int source : restrictedAudioSources) { 816 // AudioRecord.Builder should fail when trying to use 817 // one of the voice call audio sources. 818 try { 819 AudioRecord ar = new AudioRecord.Builder() 820 .setAudioSource(source) 821 .setAudioFormat(new AudioFormat.Builder() 822 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 823 .setSampleRate(8000) 824 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 825 .build()) 826 .build(); 827 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 828 + source); 829 } catch (Exception e) { 830 Log.i(TAG, "Exception: " + e); 831 if (!UnsupportedOperationException.class.isInstance(e) 832 && !IllegalArgumentException.class.isInstance(e)) { 833 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 834 + source + " Exception:" + e); 835 } 836 } 837 } 838 } 839 840 @Test testMediaMetrics()841 public void testMediaMetrics() throws Exception { 842 if (!hasMicrophone()) { 843 return; 844 } 845 846 AudioRecord record = null; 847 try { 848 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 849 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_MONO; 850 final int RECORD_SAMPLE_RATE = 8000; 851 final AudioFormat format = new AudioFormat.Builder() 852 .setSampleRate(RECORD_SAMPLE_RATE) 853 .setChannelMask(RECORD_CHANNEL_MASK) 854 .setEncoding(RECORD_ENCODING) 855 .build(); 856 857 // Setup a recorder 858 record = new AudioRecord.Builder() 859 .setAudioSource(MediaRecorder.AudioSource.MIC) 860 .setAudioFormat(format) 861 .build(); 862 863 final PersistableBundle metrics = record.getMetrics(); 864 865 assertNotNull("null metrics", metrics); 866 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.ENCODING, 867 new String("AUDIO_FORMAT_PCM_16_BIT")); 868 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SOURCE, 869 new String("AUDIO_SOURCE_MIC")); 870 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SAMPLERATE, 871 new Integer(RECORD_SAMPLE_RATE)); 872 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNELS, 873 new Integer(AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK))); 874 875 // deprecated, value ignored. 876 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.LATENCY); 877 878 // TestApi: 879 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNEL_MASK, 880 new Long(RECORD_CHANNEL_MASK)); 881 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.FRAME_COUNT, 882 new Integer(record.getBufferSizeInFrames())); 883 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.DURATION_MS, 884 new Double(0.)); 885 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.START_COUNT, 886 new Long(0)); 887 888 // TestApi: no particular value checking. 889 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.PORT_ID); 890 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.ATTRIBUTES); 891 } finally { 892 if (record != null) { 893 record.release(); 894 } 895 } 896 } 897 printMicrophoneInfo(MicrophoneInfo microphone)898 private void printMicrophoneInfo(MicrophoneInfo microphone) { 899 Log.i(TAG, "deviceId:" + microphone.getDescription()); 900 Log.i(TAG, "portId:" + microphone.getId()); 901 Log.i(TAG, "type:" + microphone.getType()); 902 Log.i(TAG, "address:" + microphone.getAddress()); 903 Log.i(TAG, "deviceLocation:" + microphone.getLocation()); 904 Log.i(TAG, "deviceGroup:" + microphone.getGroup() 905 + " index:" + microphone.getIndexInTheGroup()); 906 MicrophoneInfo.Coordinate3F position = microphone.getPosition(); 907 Log.i(TAG, "position:" + position.x + "," + position.y + "," + position.z); 908 MicrophoneInfo.Coordinate3F orientation = microphone.getOrientation(); 909 Log.i(TAG, "orientation:" + orientation.x + "," + orientation.y + "," + orientation.z); 910 Log.i(TAG, "frequencyResponse:" + microphone.getFrequencyResponse()); 911 Log.i(TAG, "channelMapping:" + microphone.getChannelMapping()); 912 Log.i(TAG, "sensitivity:" + microphone.getSensitivity()); 913 Log.i(TAG, "max spl:" + microphone.getMaxSpl()); 914 Log.i(TAG, "min spl:" + microphone.getMinSpl()); 915 Log.i(TAG, "directionality:" + microphone.getDirectionality()); 916 Log.i(TAG, "******"); 917 } 918 919 @CddTest(requirement="5.4.1/C-1-4") 920 @Test testGetActiveMicrophones()921 public void testGetActiveMicrophones() throws Exception { 922 if (!hasMicrophone()) { 923 return; 924 } 925 mAudioRecord.startRecording(); 926 try { 927 Thread.sleep(1000); 928 } catch (InterruptedException e) { 929 } 930 List<MicrophoneInfo> activeMicrophones = mAudioRecord.getActiveMicrophones(); 931 assertTrue(activeMicrophones.size() > 0); 932 for (MicrophoneInfo activeMicrophone : activeMicrophones) { 933 printMicrophoneInfo(activeMicrophone); 934 } 935 } 936 937 private Executor mExec = new Executor() { 938 @Override 939 public void execute(Runnable command) { 940 command.run(); 941 } 942 }; 943 944 @Test testAudioRecordInfoCallback()945 public void testAudioRecordInfoCallback() throws Exception { 946 if (!hasMicrophone()) { 947 return; 948 } 949 AudioTestUtil.AudioRecordingCallbackUtil callback = 950 new AudioTestUtil.AudioRecordingCallbackUtil( 951 mAudioRecord.getAudioSessionId(), MediaRecorder.AudioSource.DEFAULT); 952 mAudioRecord.registerAudioRecordingCallback(mExec, callback); 953 mAudioRecord.startRecording(); 954 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 955 956 callback.await(TEST_TIMING_TOLERANCE_MS); 957 assertTrue(callback.mCalled); 958 assertTrue(callback.mConfigs.size() <= 1); 959 if (callback.mConfigs.size() == 1) { 960 checkRecordingConfig(callback.mConfigs.get(0)); 961 } 962 963 Thread.sleep(RECORD_DURATION_MS); 964 mAudioRecord.unregisterAudioRecordingCallback(callback); 965 } 966 967 @Test testGetActiveRecordingConfiguration()968 public void testGetActiveRecordingConfiguration() throws Exception { 969 if (!hasMicrophone()) { 970 return; 971 } 972 mAudioRecord.startRecording(); 973 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 974 975 try { 976 Thread.sleep(RECORD_DURATION_MS); 977 } catch (InterruptedException e) { 978 } 979 980 AudioRecordingConfiguration config = mAudioRecord.getActiveRecordingConfiguration(); 981 checkRecordingConfig(config); 982 983 mAudioRecord.release(); 984 // test no exception is thrown when querying immediately after release() 985 // which is not a synchronous operation 986 config = mAudioRecord.getActiveRecordingConfiguration(); 987 try { 988 Thread.sleep(TEST_TIMING_TOLERANCE_MS); 989 } catch (InterruptedException e) { 990 } 991 assertNull("Recording configuration not null after release", 992 mAudioRecord.getActiveRecordingConfiguration()); 993 } 994 checkRecordingConfig(AudioRecordingConfiguration config)995 private static void checkRecordingConfig(AudioRecordingConfiguration config) { 996 assertNotNull(config); 997 AudioFormat format = config.getClientFormat(); 998 assertEquals(AudioFormat.CHANNEL_IN_MONO, format.getChannelMask()); 999 assertEquals(AudioFormat.ENCODING_PCM_16BIT, format.getEncoding()); 1000 assertEquals(SAMPLING_RATE_HZ, format.getSampleRate()); 1001 assertEquals(MediaRecorder.AudioSource.MIC, config.getAudioSource()); 1002 assertNotNull(config.getAudioDevice()); 1003 assertNotNull(config.getClientEffects()); 1004 assertNotNull(config.getEffects()); 1005 // no requirement here, just testing the API 1006 config.isClientSilenced(); 1007 } 1008 createAudioRecord( int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, boolean auditRecording, boolean isChannelIndex)1009 private AudioRecord createAudioRecord( 1010 int audioSource, int sampleRateInHz, 1011 int channelConfig, int audioFormat, int bufferSizeInBytes, 1012 boolean auditRecording, boolean isChannelIndex) { 1013 final AudioRecord record; 1014 if (auditRecording) { 1015 record = new AudioHelper.AudioRecordAudit( 1016 audioSource, sampleRateInHz, channelConfig, 1017 audioFormat, bufferSizeInBytes, isChannelIndex); 1018 } else if (isChannelIndex) { 1019 record = new AudioRecord.Builder() 1020 .setAudioFormat(new AudioFormat.Builder() 1021 .setChannelIndexMask(channelConfig) 1022 .setEncoding(audioFormat) 1023 .setSampleRate(sampleRateInHz) 1024 .build()) 1025 .setBufferSizeInBytes(bufferSizeInBytes) 1026 .build(); 1027 } else { 1028 record = new AudioRecord(audioSource, sampleRateInHz, channelConfig, 1029 audioFormat, bufferSizeInBytes); 1030 } 1031 1032 // did we get the AudioRecord we expected? 1033 final AudioFormat format = record.getFormat(); 1034 assertEquals(isChannelIndex ? channelConfig : AudioFormat.CHANNEL_INVALID, 1035 format.getChannelIndexMask()); 1036 assertEquals(isChannelIndex ? AudioFormat.CHANNEL_INVALID : channelConfig, 1037 format.getChannelMask()); 1038 assertEquals(audioFormat, format.getEncoding()); 1039 assertEquals(sampleRateInHz, format.getSampleRate()); 1040 final int frameSize = 1041 format.getChannelCount() * AudioFormat.getBytesPerSample(audioFormat); 1042 // our native frame count cannot be smaller than our minimum buffer size request. 1043 assertTrue(record.getBufferSizeInFrames() * frameSize >= bufferSizeInBytes); 1044 return record; 1045 } 1046 doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT)1047 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1048 int periodsPerSecond, int markerPeriodsPerSecond, 1049 boolean useByteBuffer, boolean blocking, 1050 final boolean auditRecording, final boolean isChannelIndex, 1051 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT) throws Exception { 1052 final int TEST_TIME_MS = auditRecording ? 10000 : 2000; 1053 doTest(reportName, localRecord, customHandler, periodsPerSecond, markerPeriodsPerSecond, 1054 useByteBuffer, blocking, auditRecording, isChannelIndex, 1055 TEST_SR, TEST_CONF, TEST_FORMAT, TEST_TIME_MS); 1056 } doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS)1057 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1058 int periodsPerSecond, int markerPeriodsPerSecond, 1059 boolean useByteBuffer, boolean blocking, 1060 final boolean auditRecording, final boolean isChannelIndex, 1061 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS) 1062 throws Exception { 1063 if (!hasMicrophone()) { 1064 return; 1065 } 1066 // audit recording plays back recorded audio, so use longer test timing 1067 final int TEST_SOURCE = MediaRecorder.AudioSource.DEFAULT; 1068 mIsHandleMessageCalled = false; 1069 1070 // For channelIndex use one frame in bytes for buffer size. 1071 // This is adjusted to the minimum buffer size by native code. 1072 final int bufferSizeInBytes = isChannelIndex ? 1073 (AudioFormat.getBytesPerSample(TEST_FORMAT) 1074 * AudioFormat.channelCountFromInChannelMask(TEST_CONF)) : 1075 AudioRecord.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1076 assertWithMessage("getMinBufferSize() reports nonzero value") 1077 .that(bufferSizeInBytes) 1078 .isGreaterThan(0); 1079 1080 final AudioRecord record; 1081 final AudioHelper 1082 .MakeSomethingAsynchronouslyAndLoop<AudioRecord> makeSomething; 1083 1084 if (localRecord) { 1085 makeSomething = null; 1086 record = createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1087 TEST_FORMAT, bufferSizeInBytes, auditRecording, isChannelIndex); 1088 } else { 1089 makeSomething = 1090 new AudioHelper.MakeSomethingAsynchronouslyAndLoop<AudioRecord>( 1091 new AudioHelper.MakesSomething<AudioRecord>() { 1092 @Override 1093 public AudioRecord makeSomething() { 1094 return createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1095 TEST_FORMAT, bufferSizeInBytes, auditRecording, 1096 isChannelIndex); 1097 } 1098 } 1099 ); 1100 // create AudioRecord on different thread's looper. 1101 record = makeSomething.make(); 1102 } 1103 1104 // AudioRecord creation may have silently failed, check state now 1105 assertWithMessage("getState() reports STATE_INITIALIZED") 1106 .that(record.getState()) 1107 .isEqualTo(AudioRecord.STATE_INITIALIZED); 1108 1109 final MockOnRecordPositionUpdateListener listener; 1110 if (customHandler) { 1111 listener = new MockOnRecordPositionUpdateListener(record, mHandler); 1112 } else { 1113 listener = new MockOnRecordPositionUpdateListener(record); 1114 } 1115 1116 final int updatePeriodInFrames = (periodsPerSecond == 0) 1117 ? 0 : TEST_SR / periodsPerSecond; 1118 // After starting, there is no guarantee when the first frame of data is read. 1119 long firstSampleTime = 0; 1120 1121 // blank final variables: all successful paths will initialize the times. 1122 // this must be declared here for visibility as they are set within the try block. 1123 final long endTime; 1124 final long startTime; 1125 final long stopRequestTime; 1126 final long stopTime; 1127 final long coldInputStartTime; 1128 1129 try { 1130 if (markerPeriodsPerSecond != 0) { 1131 mMarkerPeriodInFrames = TEST_SR / markerPeriodsPerSecond; 1132 mMarkerPosition = mMarkerPeriodInFrames; 1133 assertWithMessage("setNotificationMarkerPosition() should succeed") 1134 .that(record.setNotificationMarkerPosition(mMarkerPosition)) 1135 .isEqualTo(AudioRecord.SUCCESS); 1136 } else { 1137 mMarkerPeriodInFrames = 0; 1138 } 1139 1140 assertEquals(AudioRecord.SUCCESS, 1141 record.setPositionNotificationPeriod(updatePeriodInFrames)); 1142 1143 // at the start, there is no timestamp. 1144 AudioTimestamp startTs = new AudioTimestamp(); 1145 assertWithMessage("getTimestamp without startRecording() is ERROR_INVALID_OPERATION") 1146 .that(record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 1147 .isEqualTo(AudioRecord.ERROR_INVALID_OPERATION); 1148 assertWithMessage("invalid getTimestamp doesn't affect nanoTime") 1149 .that(startTs.nanoTime) 1150 .isEqualTo(0); 1151 1152 listener.start(TEST_SR); 1153 record.startRecording(); 1154 assertWithMessage("getRecordingState() should report RECORDSTATE_RECORDING") 1155 .that(record.getRecordingState()) 1156 .isEqualTo(AudioRecord.RECORDSTATE_RECORDING); 1157 startTime = System.currentTimeMillis(); 1158 1159 // For our tests, we could set test duration by timed sleep or by # frames received. 1160 // Since we don't know *exactly* when AudioRecord actually begins recording, 1161 // we end the test by # frames read. 1162 final int numChannels = AudioFormat.channelCountFromInChannelMask(TEST_CONF); 1163 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 1164 final int bytesPerFrame = numChannels * bytesPerSample; 1165 // careful about integer overflow in the formula below: 1166 final int targetFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1167 final int targetSamples = targetFrames * numChannels; 1168 final int BUFFER_FRAMES = 512; 1169 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 1170 // TODO: verify behavior when buffer size is not a multiple of frame size. 1171 1172 // For fine accuracy timestamp checks, we sample the timestamps 1173 // 1/6 and 5/6 of the way through recording to avoid the effects 1174 // of AudioRecord start and stop. 1175 final int runningTimestampStart = targetSamples * 1 / 6; 1176 final int runningTimestampStop = targetSamples * 5 / 6; 1177 AudioTimestamp running1Ts = new AudioTimestamp(); 1178 AudioTimestamp running2Ts = new AudioTimestamp(); 1179 1180 int samplesRead = 0; 1181 // abstract out the buffer type used with lambda. 1182 final byte[] byteData = new byte[BUFFER_SAMPLES]; 1183 final short[] shortData = new short[BUFFER_SAMPLES]; 1184 final float[] floatData = new float[BUFFER_SAMPLES]; 1185 final ByteBuffer byteBuffer = 1186 ByteBuffer.allocateDirect(BUFFER_SAMPLES * bytesPerSample); 1187 BiFunction<Integer, Boolean, Integer> reader = null; 1188 1189 // depending on the options, create a lambda to read data. 1190 if (useByteBuffer) { 1191 reader = (samples, blockForData) -> { 1192 final int amount = samples * bytesPerSample; // in bytes 1193 // read always places data at the start of the byte buffer with 1194 // position and limit are ignored. test this by setting 1195 // position and limit to arbitrary values here. 1196 final int lastPosition = 7; 1197 final int lastLimit = 13; 1198 byteBuffer.position(lastPosition); 1199 byteBuffer.limit(lastLimit); 1200 final int ret = blockForData ? record.read(byteBuffer, amount) : 1201 record.read(byteBuffer, amount, AudioRecord.READ_NON_BLOCKING); 1202 return ret / bytesPerSample; 1203 }; 1204 } else { 1205 switch (TEST_FORMAT) { 1206 case AudioFormat.ENCODING_PCM_8BIT: 1207 reader = (samples, blockForData) -> { 1208 return blockForData ? record.read(byteData, 0, samples) : 1209 record.read(byteData, 0, samples, 1210 AudioRecord.READ_NON_BLOCKING); 1211 }; 1212 break; 1213 case AudioFormat.ENCODING_PCM_16BIT: 1214 reader = (samples, blockForData) -> { 1215 return blockForData ? record.read(shortData, 0, samples) : 1216 record.read(shortData, 0, samples, 1217 AudioRecord.READ_NON_BLOCKING); 1218 }; 1219 break; 1220 case AudioFormat.ENCODING_PCM_FLOAT: 1221 reader = (samples, blockForData) -> { 1222 return record.read(floatData, 0, samples, 1223 blockForData ? AudioRecord.READ_BLOCKING 1224 : AudioRecord.READ_NON_BLOCKING); 1225 }; 1226 break; 1227 } 1228 } 1229 1230 while (samplesRead < targetSamples) { 1231 // the first time through, we read a single frame. 1232 // this sets the recording anchor position. 1233 final int amount = samplesRead == 0 ? numChannels : 1234 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1235 final int ret = reader.apply(amount, blocking); 1236 if (blocking) { 1237 assertWithMessage("blocking reads should return amount requested") 1238 .that(amount).isEqualTo(ret); 1239 } else { 1240 assertWithMessage("non-blocking reads should return amount in range: " 1241 + "0 <= " + ret + " <= " + amount) 1242 .that(ret) 1243 .isIn(Range.closed(0, amount)); 1244 } 1245 if (samplesRead == 0 && ret > 0) { 1246 firstSampleTime = System.currentTimeMillis(); 1247 } 1248 samplesRead += ret; 1249 if (startTs.nanoTime == 0 && ret > 0 && 1250 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) 1251 == AudioRecord.SUCCESS) { 1252 assertWithMessage("expecting valid timestamp with nonzero nanoTime") 1253 .that(startTs.nanoTime) 1254 .isGreaterThan(0); 1255 } 1256 if (samplesRead > runningTimestampStart 1257 && running1Ts.nanoTime == 0 && ret > 0) { 1258 record.getTimestamp(running1Ts, AudioTimestamp.TIMEBASE_MONOTONIC); 1259 } 1260 if (samplesRead > runningTimestampStop 1261 && running2Ts.nanoTime == 0 && ret > 0) { 1262 record.getTimestamp(running2Ts, AudioTimestamp.TIMEBASE_MONOTONIC); 1263 } 1264 } 1265 1266 // We've read all the frames, now check the record timing. 1267 endTime = System.currentTimeMillis(); 1268 1269 coldInputStartTime = firstSampleTime - startTime; 1270 //Log.d(TAG, "first sample time " + coldInputStartTime 1271 // + " test time " + (endTime - firstSampleTime)); 1272 1273 if (coldInputStartTime > 200) { 1274 Log.w(TAG, "cold input start time way too long " 1275 + coldInputStartTime + " > 200ms"); 1276 } else if (coldInputStartTime > 100) { 1277 Log.w(TAG, "cold input start time too long " 1278 + coldInputStartTime + " > 100ms"); 1279 } 1280 1281 final int COLD_INPUT_START_TIME_LIMIT_MS = 5000; 1282 assertWithMessage("track must start within " + COLD_INPUT_START_TIME_LIMIT_MS 1283 + " millis") 1284 .that(coldInputStartTime) 1285 .isLessThan(COLD_INPUT_START_TIME_LIMIT_MS); 1286 1287 // Verify recording completes within 50 ms of expected test time (typical 20ms) 1288 final int RECORDING_TIME_TOLERANCE_MS = auditRecording ? 1289 (isLowLatencyDevice() ? 1000 : 2000) : (isLowLatencyDevice() ? 50 : 400); 1290 assertWithMessage("recording must complete within " + RECORDING_TIME_TOLERANCE_MS 1291 + " of expected test time") 1292 .that((double) (endTime - firstSampleTime)) 1293 .isWithin(RECORDING_TIME_TOLERANCE_MS) 1294 .of(TEST_TIME_MS); 1295 1296 // Even though we've read all the frames we want, the events may not be sent to 1297 // the listeners (events are handled through a separate internal callback thread). 1298 // One must sleep to make sure the last event(s) come in. 1299 Thread.sleep(30); 1300 1301 stopRequestTime = System.currentTimeMillis(); 1302 record.stop(); 1303 assertWithMessage("state should be RECORDSTATE_STOPPED after stop()") 1304 .that(record.getRecordingState()) 1305 .isEqualTo(AudioRecord.RECORDSTATE_STOPPED); 1306 1307 stopTime = System.currentTimeMillis(); 1308 1309 // stop listening - we should be done. 1310 // Caution M behavior and likely much earlier: 1311 // we assume no events can happen after stop(), but this may not 1312 // always be true as stop can take 100ms to complete (as it may disable 1313 // input recording on the hal); thus the event handler may be block with 1314 // valid events, issuing right after stop completes. Except for those events, 1315 // no other events should show up after stop. 1316 // This behavior may change in the future but we account for it here in testing. 1317 final long SLEEP_AFTER_STOP_FOR_EVENTS_MS = 30; 1318 Thread.sleep(SLEEP_AFTER_STOP_FOR_EVENTS_MS); 1319 listener.stop(); 1320 1321 // get stop timestamp 1322 // Note: the stop timestamp is collected *after* stop is called. 1323 AudioTimestamp stopTs = new AudioTimestamp(); 1324 assertWithMessage("should successfully get timestamp after stop") 1325 .that(record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 1326 .isEqualTo(AudioRecord.SUCCESS); 1327 AudioTimestamp stopTsBoot = new AudioTimestamp(); 1328 assertWithMessage("should successfully get boottime timestamp after stop") 1329 .that(record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)) 1330 .isEqualTo(AudioRecord.SUCCESS); 1331 1332 // printTimestamp("startTs", startTs); 1333 // printTimestamp("stopTs", stopTs); 1334 // printTimestamp("stopTsBoot", stopTsBoot); 1335 // Log.d(TAG, "time Monotonic " + System.nanoTime()); 1336 // Log.d(TAG, "time Boottime " + SystemClock.elapsedRealtimeNanos()); 1337 1338 // stop should not reset timestamps 1339 assertWithMessage("stop timestamp position should be no less than frames read") 1340 .that(stopTs.framePosition) 1341 .isAtLeast(targetFrames); 1342 assertWithMessage("stop timestamp position should be same " 1343 + "between monotonic and boot timestamps") 1344 .that(stopTs.framePosition) 1345 .isEqualTo(stopTsBoot.framePosition); 1346 assertWithMessage("stop timestamp nanoTime must be greater than 0") 1347 .that(stopTs.nanoTime) 1348 .isGreaterThan(0); 1349 1350 // timestamps follow a different path than data, so it is conceivable 1351 // that first data arrives before the first timestamp is ready. 1352 assertWithMessage("start timestamp must have positive time") 1353 .that(startTs.nanoTime) 1354 .isGreaterThan(0); 1355 1356 // we allow more timestamp inaccuacy for the entire recording run, 1357 // including start and stop. 1358 verifyContinuousTimestamps(startTs, stopTs, TEST_SR, true /* coarse */); 1359 1360 // during the middle 2/3 of the run, we expect stable timestamps. 1361 verifyContinuousTimestamps(running1Ts, running2Ts, TEST_SR, false /* coarse */); 1362 1363 // clean up 1364 if (makeSomething != null) { 1365 makeSomething.join(); 1366 } 1367 1368 } finally { 1369 listener.release(); 1370 // we must release the record immediately as it is a system-wide 1371 // resource needed for other tests. 1372 record.release(); 1373 } 1374 1375 final int markerPeriods = markerPeriodsPerSecond * TEST_TIME_MS / 1000; 1376 final int updatePeriods = periodsPerSecond * TEST_TIME_MS / 1000; 1377 final int markerPeriodsMax = 1378 markerPeriodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1379 final int updatePeriodsMax = 1380 periodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1381 1382 // collect statistics 1383 final ArrayList<Integer> markerList = listener.getMarkerList(); 1384 final ArrayList<Integer> periodicList = listener.getPeriodicList(); 1385 // verify count of markers and periodic notifications. 1386 // there could be an extra notification since we don't stop() immediately 1387 // rather wait for potential events to come in. 1388 //Log.d(TAG, "markerPeriods " + markerPeriods + 1389 // " markerPeriodsReceived " + markerList.size()); 1390 //Log.d(TAG, "updatePeriods " + updatePeriods + 1391 // " updatePeriodsReceived " + periodicList.size()); 1392 if (isLowLatencyDevice()) { 1393 assertWithMessage(TAG + ": markerPeriods " + markerPeriods 1394 + " <= markerPeriodsReceived " + markerList.size()) 1395 .that(markerPeriods) 1396 .isAtMost(markerList.size()); 1397 assertWithMessage(TAG + ": markerPeriodsReceived " + markerList.size() 1398 + " <= markerPeriodsMax " + markerPeriodsMax) 1399 .that(markerList.size()) 1400 .isAtMost(markerPeriodsMax); 1401 1402 assertWithMessage(TAG + ": updatePeriods " + updatePeriods 1403 + " <= updatePeriodsReceived " + periodicList.size()) 1404 .that(updatePeriods) 1405 .isAtMost(periodicList.size()); 1406 assertWithMessage(TAG + ": updatePeriodsReceived " + periodicList.size() 1407 + " <= updatePeriodsMax " + updatePeriodsMax) 1408 .that(periodicList.size()) 1409 .isAtMost(updatePeriodsMax); 1410 } 1411 1412 // Since we don't have accurate positioning of the start time of the recorder, 1413 // and there is no record.getPosition(), we consider only differential timing 1414 // from the first marker or periodic event. 1415 final int toleranceInFrames = TEST_SR * 80 / 1000; // 80 ms 1416 final int testTimeInFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1417 1418 AudioHelper.Statistics markerStat = new AudioHelper.Statistics(); 1419 for (int i = 1; i < markerList.size(); ++i) { 1420 final int expected = mMarkerPeriodInFrames * i; 1421 if (markerList.get(i) > testTimeInFrames) { 1422 break; // don't consider any notifications when we might be stopping. 1423 } 1424 final int actual = markerList.get(i) - markerList.get(0); 1425 //Log.d(TAG, "Marker: " + i + " expected(" + expected + ") actual(" + actual 1426 // + ") diff(" + (actual - expected) + ")" 1427 // + " tolerance " + toleranceInFrames); 1428 if (isLowLatencyDevice()) { 1429 assertWithMessage("marker period should match frame count") 1430 .that((double) actual) 1431 .isWithin(toleranceInFrames) 1432 .of(expected); 1433 } 1434 markerStat.add((double)(actual - expected) * 1000 / TEST_SR); 1435 } 1436 1437 AudioHelper.Statistics periodicStat = new AudioHelper.Statistics(); 1438 for (int i = 1; i < periodicList.size(); ++i) { 1439 final int expected = updatePeriodInFrames * i; 1440 if (periodicList.get(i) > testTimeInFrames) { 1441 break; // don't consider any notifications when we might be stopping. 1442 } 1443 final int actual = periodicList.get(i) - periodicList.get(0); 1444 //Log.d(TAG, "Update: " + i + " expected(" + expected + ") actual(" + actual 1445 // + ") diff(" + (actual - expected) + ")" 1446 // + " tolerance " + toleranceInFrames); 1447 if (isLowLatencyDevice()) { 1448 assertWithMessage("position period check should match frame count") 1449 .that((double) actual) 1450 .isWithin(toleranceInFrames) 1451 .of(expected); 1452 } 1453 periodicStat.add((double)(actual - expected) * 1000 / TEST_SR); 1454 } 1455 1456 // report this 1457 DeviceReportLog log = new DeviceReportLog(REPORT_LOG_NAME, reportName); 1458 log.addValue("start_recording_lag", coldInputStartTime, ResultType.LOWER_BETTER, 1459 ResultUnit.MS); 1460 log.addValue("stop_execution_time", stopTime - stopRequestTime, ResultType.LOWER_BETTER, 1461 ResultUnit.MS); 1462 log.addValue("total_record_time_expected", TEST_TIME_MS, ResultType.NEUTRAL, ResultUnit.MS); 1463 log.addValue("total_record_time_actual", endTime - firstSampleTime, ResultType.NEUTRAL, 1464 ResultUnit.MS); 1465 log.addValue("total_markers_expected", markerPeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1466 log.addValue("total_markers_actual", markerList.size(), ResultType.NEUTRAL, 1467 ResultUnit.COUNT); 1468 log.addValue("total_periods_expected", updatePeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1469 log.addValue("total_periods_actual", periodicList.size(), ResultType.NEUTRAL, 1470 ResultUnit.COUNT); 1471 log.addValue("average_marker_diff", markerStat.getAvg(), ResultType.LOWER_BETTER, 1472 ResultUnit.MS); 1473 log.addValue("maximum_marker_abs_diff", markerStat.getMaxAbs(), ResultType.LOWER_BETTER, 1474 ResultUnit.MS); 1475 log.addValue("average_marker_abs_diff", markerStat.getAvgAbs(), ResultType.LOWER_BETTER, 1476 ResultUnit.MS); 1477 log.addValue("average_periodic_diff", periodicStat.getAvg(), ResultType.LOWER_BETTER, 1478 ResultUnit.MS); 1479 log.addValue("maximum_periodic_abs_diff", periodicStat.getMaxAbs(), ResultType.LOWER_BETTER, 1480 ResultUnit.MS); 1481 log.addValue("average_periodic_abs_diff", periodicStat.getAvgAbs(), ResultType.LOWER_BETTER, 1482 ResultUnit.MS); 1483 log.setSummary("unified_abs_diff", (periodicStat.getAvgAbs() + markerStat.getAvgAbs()) / 2, 1484 ResultType.LOWER_BETTER, ResultUnit.MS); 1485 log.submit(InstrumentationRegistry.getInstrumentation()); 1486 } 1487 1488 private class MockOnRecordPositionUpdateListener 1489 implements OnRecordPositionUpdateListener { MockOnRecordPositionUpdateListener(AudioRecord record)1490 public MockOnRecordPositionUpdateListener(AudioRecord record) { 1491 mAudioRecord = record; 1492 record.setRecordPositionUpdateListener(this); 1493 } 1494 MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler)1495 public MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler) { 1496 mAudioRecord = record; 1497 record.setRecordPositionUpdateListener(this, handler); 1498 } 1499 onMarkerReached(AudioRecord record)1500 public synchronized void onMarkerReached(AudioRecord record) { 1501 if (mIsTestActive) { 1502 int position = getPosition(); 1503 mOnMarkerReachedCalled.add(position); 1504 mMarkerPosition += mMarkerPeriodInFrames; 1505 assertWithMessage("setNotificationMarkerPosition() returns SUCCESS") 1506 .that(mAudioRecord.setNotificationMarkerPosition(mMarkerPosition)) 1507 .isEqualTo(AudioRecord.SUCCESS); 1508 } else { 1509 // see comment on stop() 1510 final long delta = System.currentTimeMillis() - mStopTime; 1511 Log.d(TAG, "onMarkerReached called " + delta + " ms after stop"); 1512 fail("onMarkerReached called when not active"); 1513 } 1514 } 1515 onPeriodicNotification(AudioRecord record)1516 public synchronized void onPeriodicNotification(AudioRecord record) { 1517 if (mIsTestActive) { 1518 int position = getPosition(); 1519 mOnPeriodicNotificationCalled.add(position); 1520 } else { 1521 // see comment on stop() 1522 final long delta = System.currentTimeMillis() - mStopTime; 1523 Log.d(TAG, "onPeriodicNotification called " + delta + " ms after stop"); 1524 fail("onPeriodicNotification called when not active"); 1525 } 1526 } 1527 start(int sampleRate)1528 public synchronized void start(int sampleRate) { 1529 mIsTestActive = true; 1530 mSampleRate = sampleRate; 1531 mStartTime = System.currentTimeMillis(); 1532 } 1533 stop()1534 public synchronized void stop() { 1535 // the listener should be stopped some time after AudioRecord is stopped 1536 // as some messages may not yet be posted. 1537 mIsTestActive = false; 1538 mStopTime = System.currentTimeMillis(); 1539 } 1540 getMarkerList()1541 public ArrayList<Integer> getMarkerList() { 1542 return mOnMarkerReachedCalled; 1543 } 1544 getPeriodicList()1545 public ArrayList<Integer> getPeriodicList() { 1546 return mOnPeriodicNotificationCalled; 1547 } 1548 release()1549 public synchronized void release() { 1550 stop(); 1551 mAudioRecord.setRecordPositionUpdateListener(null); 1552 mAudioRecord = null; 1553 } 1554 getPosition()1555 private int getPosition() { 1556 // we don't have mAudioRecord.getRecordPosition(); 1557 // so we fake this by timing. 1558 long delta = System.currentTimeMillis() - mStartTime; 1559 return (int)(delta * mSampleRate / 1000); 1560 } 1561 1562 private long mStartTime; 1563 private long mStopTime; 1564 private int mSampleRate; 1565 private boolean mIsTestActive = true; 1566 private AudioRecord mAudioRecord; 1567 private ArrayList<Integer> mOnMarkerReachedCalled = new ArrayList<Integer>(); 1568 private ArrayList<Integer> mOnPeriodicNotificationCalled = new ArrayList<Integer>(); 1569 } 1570 hasMicrophone()1571 private boolean hasMicrophone() { 1572 return getContext().getPackageManager().hasSystemFeature( 1573 PackageManager.FEATURE_MICROPHONE); 1574 } 1575 isLowRamDevice()1576 private boolean isLowRamDevice() { 1577 return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE)) 1578 .isLowRamDevice(); 1579 } 1580 isLowLatencyDevice()1581 private boolean isLowLatencyDevice() { 1582 return getContext().getPackageManager().hasSystemFeature( 1583 PackageManager.FEATURE_AUDIO_LOW_LATENCY); 1584 } 1585 isProAudioDevice()1586 private boolean isProAudioDevice() { 1587 return getContext().getPackageManager().hasSystemFeature( 1588 PackageManager.FEATURE_AUDIO_PRO); 1589 } 1590 verifyContinuousTimestamps( AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate, boolean coarse)1591 private void verifyContinuousTimestamps( 1592 AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate, boolean coarse) 1593 throws Exception { 1594 final long timeDiff = stopTs.nanoTime - startTs.nanoTime; 1595 final long frameDiff = stopTs.framePosition - startTs.framePosition; 1596 final long NANOS_PER_SECOND = 1000000000; 1597 final long timeByFrames = frameDiff * NANOS_PER_SECOND / sampleRate; 1598 final double ratio = (double)timeDiff / timeByFrames; 1599 final double tolerance = (isLowLatencyDevice() ? 0.01 : 0.5) * (coarse ? 3. : 1.); 1600 1601 // Usually the ratio is accurate to one part per thousand or better. 1602 // Log.d(TAG, "ratio=" + ratio + ", timeDiff=" + timeDiff + ", frameDiff=" + frameDiff + 1603 // ", timeByFrames=" + timeByFrames + ", sampleRate=" + sampleRate); 1604 assertWithMessage("Timestamp rate must match sample rate by ratio") 1605 .that(ratio) 1606 .isWithin(tolerance) 1607 .of(1.); 1608 } 1609 1610 // remove if AudioTimestamp has a better toString(). printTimestamp(String s, AudioTimestamp ats)1611 private void printTimestamp(String s, AudioTimestamp ats) { 1612 Log.d(TAG, s + ": pos: " + ats.framePosition + " time: " + ats.nanoTime); 1613 } 1614 readDataTimed(AudioRecord recorder, long durationMillis, ShortBuffer out)1615 private static void readDataTimed(AudioRecord recorder, long durationMillis, 1616 ShortBuffer out) throws IOException { 1617 final short[] buffer = new short[1024]; 1618 final long startTimeMillis = SystemClock.uptimeMillis(); 1619 final long stopTimeMillis = startTimeMillis + durationMillis; 1620 while (SystemClock.uptimeMillis() < stopTimeMillis) { 1621 final int readCount = recorder.read(buffer, 0, buffer.length); 1622 if (readCount <= 0) { 1623 return; 1624 } 1625 out.put(buffer, 0, readCount); 1626 } 1627 } 1628 isAudioSilent(ShortBuffer buffer)1629 private static boolean isAudioSilent(ShortBuffer buffer) { 1630 // Always need some bytes read 1631 assertWithMessage("Buffer should have some data") 1632 .that(buffer.position()) 1633 .isGreaterThan(0); 1634 1635 // It is possible that the transition from empty to non empty bytes 1636 // happened in the middle of the read data due to the async nature of 1637 // the system. Therefore, we look for the transitions from non-empty 1638 // to empty and from empty to non-empty values for robustness. 1639 int totalSilenceCount = 0; 1640 final int valueCount = buffer.position(); 1641 for (int i = valueCount - 1; i >= 0; i--) { 1642 final short value = buffer.get(i); 1643 if (value == 0) { 1644 totalSilenceCount++; 1645 } 1646 } 1647 return totalSilenceCount > valueCount / 2; 1648 } 1649 getContext()1650 private static Context getContext() { 1651 return InstrumentationRegistry.getInstrumentation().getTargetContext(); 1652 } 1653 1654 /* 1655 * Microphone Direction API tests 1656 */ 1657 @Test testSetPreferredMicrophoneDirection()1658 public void testSetPreferredMicrophoneDirection() { 1659 if (!hasMicrophone()) { 1660 return; 1661 } 1662 1663 try { 1664 boolean success = 1665 mAudioRecord.setPreferredMicrophoneDirection( 1666 MicrophoneDirection.MIC_DIRECTION_TOWARDS_USER); 1667 1668 // Can't actually test this as HAL may not have implemented it 1669 // Just verify that it doesn't crash or throw an exception 1670 // assertTrue(success); 1671 } catch (Exception ex) { 1672 Log.e(TAG, "testSetPreferredMicrophoneDirection() exception:" + ex); 1673 throw(ex); 1674 } 1675 return; 1676 } 1677 1678 @Test testSetPreferredMicrophoneFieldDimension()1679 public void testSetPreferredMicrophoneFieldDimension() { 1680 if (!hasMicrophone()) { 1681 return; 1682 } 1683 1684 try { 1685 boolean success = mAudioRecord.setPreferredMicrophoneFieldDimension(1.0f); 1686 1687 // Can't actually test this as HAL may not have implemented it 1688 // Just verify that it doesn't crash or throw an exception 1689 // assertTrue(success); 1690 } catch (Exception ex) { 1691 Log.e(TAG, "testSetPreferredMicrophoneFieldDimension() exception:" + ex); 1692 throw(ex); 1693 } 1694 return; 1695 } 1696 1697 /** 1698 * Test AudioRecord Builder error handling. 1699 * 1700 * @throws Exception 1701 */ 1702 @Test testAudioRecordBuilderError()1703 public void testAudioRecordBuilderError() throws Exception { 1704 if (!hasMicrophone()) { 1705 return; 1706 } 1707 1708 final AudioRecord[] audioRecord = new AudioRecord[1]; // pointer to AudioRecord. 1709 final int BIGNUM = Integer.MAX_VALUE; // large value that should be invalid. 1710 final int INVALID_SESSION_ID = 1024; // can never occur (wrong type in 3 lsbs) 1711 final int INVALID_CHANNEL_MASK = -1; 1712 1713 try { 1714 // NOTE: 1715 // AudioFormat tested in AudioFormatTest#testAudioFormatBuilderError. 1716 1717 // We must be able to create the AudioRecord. 1718 audioRecord[0] = new AudioRecord.Builder().build(); 1719 audioRecord[0].release(); 1720 1721 // Out of bounds buffer size. A large size will fail in AudioRecord creation. 1722 assertThrows(UnsupportedOperationException.class, () -> { 1723 audioRecord[0] = new AudioRecord.Builder() 1724 .setBufferSizeInBytes(BIGNUM) 1725 .build(); 1726 }); 1727 1728 // 0 and negative buffer size throw IllegalArgumentException 1729 for (int bufferSize : new int[] {-BIGNUM, -1, 0}) { 1730 assertThrows(IllegalArgumentException.class, () -> { 1731 audioRecord[0] = new AudioRecord.Builder() 1732 .setBufferSizeInBytes(bufferSize) 1733 .build(); 1734 }); 1735 } 1736 1737 assertThrows(IllegalArgumentException.class, () -> { 1738 audioRecord[0] = new AudioRecord.Builder() 1739 .setAudioSource(BIGNUM) 1740 .build(); 1741 }); 1742 1743 assertThrows(IllegalArgumentException.class, () -> { 1744 audioRecord[0] = new AudioRecord.Builder() 1745 .setAudioSource(-2) 1746 .build(); 1747 }); 1748 1749 // Invalid session id that is positive. 1750 // (logcat error message vague) 1751 assertThrows(UnsupportedOperationException.class, () -> { 1752 audioRecord[0] = new AudioRecord.Builder() 1753 .setSessionId(INVALID_SESSION_ID) 1754 .build(); 1755 }); 1756 1757 // Specialty AudioRecord tests 1758 assertThrows(NullPointerException.class, () -> { 1759 audioRecord[0] = new AudioRecord.Builder() 1760 .setAudioPlaybackCaptureConfig(null) 1761 .build(); 1762 }); 1763 1764 assertThrows(NullPointerException.class, () -> { 1765 audioRecord[0] = new AudioRecord.Builder() 1766 .setContext(null) 1767 .build(); 1768 }); 1769 1770 // Bad audio encoding DRA expected unsupported. 1771 try { 1772 audioRecord[0] = new AudioRecord.Builder() 1773 .setAudioFormat(new AudioFormat.Builder() 1774 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 1775 .setEncoding(AudioFormat.ENCODING_DRA) 1776 .build()) 1777 .build(); 1778 // Don't throw an exception, maybe it is supported somehow, but warn. 1779 Log.w(TAG, "ENCODING_DRA is expected to be unsupported"); 1780 audioRecord[0].release(); 1781 audioRecord[0] = null; 1782 } catch (UnsupportedOperationException e) { 1783 ; // OK expected 1784 } 1785 1786 // Sample rate out of bounds. 1787 // System levels caught on AudioFormat. 1788 assertThrows(IllegalArgumentException.class, () -> { 1789 audioRecord[0] = new AudioRecord.Builder() 1790 .setAudioFormat(new AudioFormat.Builder() 1791 .setSampleRate(BIGNUM) 1792 .build()) 1793 .build(); 1794 }); 1795 1796 // Invalid channel mask 1797 // This is a UOE for AudioRecord vs IAE for AudioTrack. 1798 assertThrows(UnsupportedOperationException.class, () -> { 1799 audioRecord[0] = new AudioRecord.Builder() 1800 .setAudioFormat(new AudioFormat.Builder() 1801 .setChannelMask(INVALID_CHANNEL_MASK) 1802 .build()) 1803 .build(); 1804 }); 1805 } finally { 1806 // Did we successfully complete for some reason but did not 1807 // release? 1808 if (audioRecord[0] != null) { 1809 audioRecord[0].release(); 1810 audioRecord[0] = null; 1811 } 1812 } 1813 } 1814 1815 @Test testPrivacySensitiveBuilder()1816 public void testPrivacySensitiveBuilder() throws Exception { 1817 if (!hasMicrophone()) { 1818 return; 1819 } 1820 1821 for (final boolean privacyOn : new boolean[] { false, true} ) { 1822 AudioRecord record = new AudioRecord.Builder() 1823 .setAudioFormat(new AudioFormat.Builder() 1824 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1825 .setSampleRate(8000) 1826 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1827 .build()) 1828 .setPrivacySensitive(privacyOn) 1829 .build(); 1830 assertWithMessage("Builder with privacyOn " + privacyOn + " is set correctly") 1831 .that(record.isPrivacySensitive()) 1832 .isEqualTo(privacyOn); 1833 record.release(); 1834 } 1835 } 1836 1837 @Test testPrivacySensitiveDefaults()1838 public void testPrivacySensitiveDefaults() throws Exception { 1839 if (!hasMicrophone()) { 1840 return; 1841 } 1842 1843 AudioRecord record = new AudioRecord.Builder() 1844 .setAudioSource(MediaRecorder.AudioSource.MIC) 1845 .setAudioFormat(new AudioFormat.Builder() 1846 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1847 .setSampleRate(8000) 1848 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1849 .build()) 1850 .build(); 1851 assertWithMessage("AudioSource.MIC should not be privacy sensitive") 1852 .that(record.isPrivacySensitive()).isFalse(); 1853 record.release(); 1854 1855 record = new AudioRecord.Builder() 1856 .setAudioSource(MediaRecorder.AudioSource.VOICE_COMMUNICATION) 1857 .setAudioFormat(new AudioFormat.Builder() 1858 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1859 .setSampleRate(8000) 1860 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1861 .build()) 1862 .build(); 1863 assertWithMessage("AudioSource.VOICE_COMMUNICATION should be privacy sensitive") 1864 .that(record.isPrivacySensitive()).isTrue(); 1865 record.release(); 1866 } 1867 1868 @Test testSetLogSessionId()1869 public void testSetLogSessionId() throws Exception { 1870 if (!hasMicrophone()) { 1871 return; 1872 } 1873 AudioRecord audioRecord = null; 1874 try { 1875 audioRecord = new AudioRecord.Builder() 1876 .setAudioFormat(new AudioFormat.Builder() 1877 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1878 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1879 .build()) 1880 .build(); 1881 audioRecord.setLogSessionId(LogSessionId.LOG_SESSION_ID_NONE); // should not throw. 1882 assertWithMessage("Can set LogSessionId.LOG_SESSION_ID_NONE") 1883 .that(audioRecord.getLogSessionId()) 1884 .isEqualTo(LogSessionId.LOG_SESSION_ID_NONE); 1885 1886 final MediaMetricsManager mediaMetricsManager = 1887 getContext().getSystemService(MediaMetricsManager.class); 1888 final RecordingSession recordingSession = 1889 mediaMetricsManager.createRecordingSession(); 1890 audioRecord.setLogSessionId(recordingSession.getSessionId()); 1891 assertWithMessage("Can set recordingSession sessionId") 1892 .that(audioRecord.getLogSessionId()) 1893 .isEqualTo(recordingSession.getSessionId()); 1894 1895 // record some data to generate a log entry. 1896 short data[] = new short[audioRecord.getSampleRate() / 2]; 1897 audioRecord.startRecording(); 1898 audioRecord.read(data, 0 /* offsetInShorts */, data.length); 1899 audioRecord.stop(); 1900 1901 // Also can check the mediametrics dumpsys to validate logs generated. 1902 } finally { 1903 if (audioRecord != null) { 1904 audioRecord.release(); 1905 } 1906 } 1907 } 1908 1909 @Test testCompressedCaptureAAC()1910 public void testCompressedCaptureAAC() throws Exception { 1911 final int ENCODING = AudioFormat.ENCODING_AAC_LC; 1912 final String MIMETYPE = MediaFormat.MIMETYPE_AUDIO_AAC; 1913 final int BUFFER_SIZE = 16000; 1914 if (!hasMicrophone()) { 1915 return; 1916 } 1917 AudioDeviceInfo[] devices = mAudioManager.getDevices(AudioManager.GET_DEVICES_INPUTS); 1918 // TODO test multiple supporting devices if available 1919 AudioDeviceInfo supportingDevice = null; 1920 for (AudioDeviceInfo device : devices) { 1921 for (int encoding : device.getEncodings()) { 1922 if (encoding == ENCODING) { 1923 supportingDevice = device; 1924 break; 1925 } 1926 } 1927 if (supportingDevice != null) break; 1928 } 1929 if (supportingDevice == null) { 1930 Log.i(TAG, "Compressed audio (AAC) not supported"); 1931 return; // Compressed Audio is not supported 1932 } 1933 Log.i(TAG, "Compressed audio (AAC) supported"); 1934 AudioRecord audioRecord = null; 1935 try { 1936 audioRecord = new AudioRecord.Builder() 1937 .setAudioFormat(new AudioFormat.Builder() 1938 .setEncoding(ENCODING) 1939 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1940 .build()) 1941 .build(); 1942 audioRecord.setPreferredDevice(supportingDevice); 1943 class ByteBufferImpl extends StreamUtils.ByteBufferStream { 1944 @Override 1945 public ByteBuffer read() throws IOException { 1946 if (mCount < 1 /* only one buffer */) { 1947 ++mCount; 1948 return mByteBuffer; 1949 } 1950 return null; 1951 } 1952 public ByteBuffer mByteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 1953 private int mCount = 0; 1954 } 1955 1956 ByteBufferImpl byteBufferImpl = new ByteBufferImpl(); 1957 audioRecord.startRecording(); 1958 audioRecord.read(byteBufferImpl.mByteBuffer, BUFFER_SIZE); 1959 audioRecord.stop(); 1960 // Attempt to decode compressed data 1961 //sample rate/ch count not needed 1962 final MediaFormat format = MediaFormat.createAudioFormat(MIMETYPE, 0, 0); 1963 final StreamUtils.MediaCodecStream decodingStream 1964 = new StreamUtils.MediaCodecStream(byteBufferImpl, format, false); 1965 ByteBuffer decoded = decodingStream.read(); 1966 int totalDecoded = 0; 1967 while (decoded != null) { 1968 // TODO validate actual data 1969 totalDecoded += decoded.remaining(); 1970 decoded = decodingStream.read(); 1971 } 1972 Log.i(TAG, "Decoded size:" + String.valueOf(totalDecoded)); 1973 // TODO rethrow following exceptions on verification 1974 } catch (UnsupportedOperationException e) { 1975 Log.w(TAG, "Compressed AudioRecord unable to be built"); 1976 } catch (IllegalStateException e) { 1977 Log.w(TAG, "Compressed AudioRecord unable to be started"); 1978 } finally { 1979 if (audioRecord != null) { 1980 audioRecord.release(); 1981 } 1982 } 1983 } 1984 } 1985