From 962c62475e31ccb5b1315bf646138652e273d0f5 Mon Sep 17 00:00:00 2001 From: "henrika@webrtc.org" Date: Mon, 23 Feb 2015 11:54:05 +0000 Subject: [PATCH] Refactoring WebRTC Java/JNI audio track in C++ and Java. This CL is part II in a major refactoring effort. See https://webrtc-codereview.appspot.com/33969004 for part I. - Removes unused code and old WEBRTC logging macros - Now uses optimal sample rate and buffer size in Java AudioTrack (used hard-coded sample rate before) - Makes code more inline with the implementation in Chrome - Adds helper methods for JNI handling to improve readability - Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy) - Simplified the delay estimate - Adds basic thread checks - Removes all locks in C++ land - Removes all locks in Java - Improves construction/destruction - Additional cleanup Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate). BUG=NONE R=magjed@webrtc.org, perkj@webrtc.org Review URL: https://webrtc-codereview.appspot.com/39169004 Cr-Commit-Position: refs/heads/master@{#8460} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8460 4adac7df-926f-26a2-2b94-8c16560cd09d --- talk/libjingle.gyp | 1 + .../org/webrtc/webrtcdemo/MediaEngine.java | 2 - .../src/org/webrtc/webrtcdemo/WebRTCDemo.java | 4 +- .../opensl_loopback/jni/opensl_runner.cc | 4 +- .../audio_device/android/audio_common.h | 3 + .../android/audio_device_template.h | 100 +- .../audio_device/android/audio_record_jni.cc | 26 +- .../audio_device/android/audio_record_jni.h | 15 +- .../audio_device/android/audio_track_jni.cc | 1523 +++-------------- .../audio_device/android/audio_track_jni.h | 216 ++- .../webrtc/voiceengine/WebRtcAudioRecord.java | 154 +- .../webrtc/voiceengine/WebRtcAudioTrack.java | 456 +++-- .../webrtc/voiceengine/WebRtcAudioUtils.java | 63 + .../audio_device/android/opensles_input.cc | 8 +- .../audio_device/android/opensles_input.h | 4 +- .../audio_device/android/opensles_output.cc | 8 +- .../audio_device/android/opensles_output.h | 3 +- 17 files changed, 715 insertions(+), 1875 deletions(-) create mode 100644 webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp index 9f7d4cb6a..c0179da4f 100755 --- a/talk/libjingle.gyp +++ b/talk/libjingle.gyp @@ -135,6 +135,7 @@ '<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java', '<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java', '<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java', + '<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java', '<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java', '<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java', ], diff --git a/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/MediaEngine.java b/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/MediaEngine.java index bed4b3096..5fe789621 100644 --- a/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/MediaEngine.java +++ b/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/MediaEngine.java @@ -174,8 +174,6 @@ public class MediaEngine implements VideoDecodeEncodeObserver { cameras[info.facing] = info; } setDefaultCamera(); - check(voe.setSpeakerVolume(volumeLevel) == 0, - "Failed setSpeakerVolume"); check(voe.setAecmMode(VoiceEngine.AecmModes.SPEAKERPHONE, false) == 0, "VoE set Aecm speakerphone mode failed"); check(vie.setKeyFrameRequestMethod(videoChannel, diff --git a/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/WebRTCDemo.java b/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/WebRTCDemo.java index 3badf18c7..b447144ba 100644 --- a/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/WebRTCDemo.java +++ b/webrtc/examples/android/media_demo/src/org/webrtc/webrtcdemo/WebRTCDemo.java @@ -230,5 +230,5 @@ public class WebRTCDemo extends Activity implements MenuStateProvider { main.toggleStart(); handler.postDelayed(startOrStopCallback, getCallRestartPeriodicity()); } - }; -} \ No newline at end of file + }; +} diff --git a/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc b/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc index 81e4ec451..526df4dfd 100644 --- a/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc +++ b/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc @@ -29,8 +29,8 @@ template class OpenSlRunnerTemplate { public: OpenSlRunnerTemplate() - : output_(0), - input_() { + : output_(), + input_(&output_) { output_.AttachAudioBuffer(&audio_buffer_); if (output_.Init() != 0) { assert(false); diff --git a/webrtc/modules/audio_device/android/audio_common.h b/webrtc/modules/audio_device/android/audio_common.h index 4a5303f92..783933b86 100644 --- a/webrtc/modules/audio_device/android/audio_common.h +++ b/webrtc/modules/audio_device/android/audio_common.h @@ -18,6 +18,9 @@ enum { kBitsPerSample = 16, kNumChannels = 1, kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000, + // Number of bytes per audio frame. + // Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame] + kBytesPerFrame = kNumChannels * (kBitsPerSample / 8), }; class PlayoutDelayProvider { diff --git a/webrtc/modules/audio_device/android/audio_device_template.h b/webrtc/modules/audio_device/android/audio_device_template.h index 914a8e855..be5167e44 100644 --- a/webrtc/modules/audio_device/android/audio_device_template.h +++ b/webrtc/modules/audio_device/android/audio_device_template.h @@ -34,10 +34,10 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { InputType::ClearAndroidAudioDeviceObjects(); } + // TODO(henrika): remove id explicit AudioDeviceTemplate(const int32_t id) - : output_(id), - // TODO(henrika): provide proper delay estimate using input_(&output_). - input_() { + : output_(), + input_(&output_) { } virtual ~AudioDeviceTemplate() { @@ -58,11 +58,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } bool Initialized() const { - return output_.Initialized(); + return true; } int16_t PlayoutDevices() { - return output_.PlayoutDevices(); + return 1; } int16_t RecordingDevices() { @@ -73,23 +73,28 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - return output_.PlayoutDeviceName(index, name, guid); + FATAL() << "Should never be called"; + return -1; } int32_t RecordingDeviceName( uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { + FATAL() << "Should never be called"; return -1; } int32_t SetPlayoutDevice(uint16_t index) { - return output_.SetPlayoutDevice(index); + // OK to use but it has no effect currently since device selection is + // done using Andoid APIs instead. + return 0; } int32_t SetPlayoutDevice( AudioDeviceModule::WindowsDeviceType device) { - return output_.SetPlayoutDevice(device); + FATAL() << "Should never be called"; + return -1; } int32_t SetRecordingDevice(uint16_t index) { @@ -106,7 +111,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t PlayoutIsAvailable( bool& available) { // NOLINT - return output_.PlayoutIsAvailable(available); + available = true; + return 0; } int32_t InitPlayout() { @@ -175,17 +181,16 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t WaveOutVolume( uint16_t& volumeLeft, // NOLINT uint16_t& volumeRight) const { // NOLINT - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0, - " API call not supported on this platform"); + FATAL() << "Should never be called"; return -1; } int32_t InitSpeaker() { - return output_.InitSpeaker(); + return 0; } bool SpeakerIsInitialized() const { - return output_.SpeakerIsInitialized(); + return true; } int32_t InitMicrophone() { @@ -198,31 +203,42 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SpeakerVolumeIsAvailable( bool& available) { // NOLINT - return output_.SpeakerVolumeIsAvailable(available); + available = false; + FATAL() << "Should never be called"; + return -1; } + // TODO(henrika): add support if/when needed. int32_t SetSpeakerVolume(uint32_t volume) { - return output_.SetSpeakerVolume(volume); + FATAL() << "Should never be called"; + return -1; } + // TODO(henrika): add support if/when needed. int32_t SpeakerVolume( uint32_t& volume) const { // NOLINT - return output_.SpeakerVolume(volume); + FATAL() << "Should never be called"; + return -1; } + // TODO(henrika): add support if/when needed. int32_t MaxSpeakerVolume( uint32_t& maxVolume) const { // NOLINT - return output_.MaxSpeakerVolume(maxVolume); + FATAL() << "Should never be called"; + return -1; } + // TODO(henrika): add support if/when needed. int32_t MinSpeakerVolume( uint32_t& minVolume) const { // NOLINT - return output_.MinSpeakerVolume(minVolume); + FATAL() << "Should never be called"; + return -1; } int32_t SpeakerVolumeStepSize( uint16_t& stepSize) const { // NOLINT - return output_.SpeakerVolumeStepSize(stepSize); + FATAL() << "Should never be called"; + return -1; } int32_t MicrophoneVolumeIsAvailable( @@ -263,16 +279,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SpeakerMuteIsAvailable( bool& available) { // NOLINT - return output_.SpeakerMuteIsAvailable(available); + FATAL() << "Should never be called"; + return -1; } int32_t SetSpeakerMute(bool enable) { - return output_.SetSpeakerMute(enable); + FATAL() << "Should never be called"; + return -1; } int32_t SpeakerMute( bool& enabled) const { // NOLINT - return output_.SpeakerMute(enabled); + FATAL() << "Should never be called"; + return -1; } int32_t MicrophoneMuteIsAvailable( @@ -311,16 +330,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t StereoPlayoutIsAvailable( bool& available) { // NOLINT - return output_.StereoPlayoutIsAvailable(available); + available = false; + return 0; } int32_t SetStereoPlayout(bool enable) { - return output_.SetStereoPlayout(enable); + return -1; } int32_t StereoPlayout( bool& enabled) const { // NOLINT - return output_.StereoPlayout(enabled); + enabled = false; + FATAL() << "Should never be called"; + return -1; } int32_t StereoRecordingIsAvailable( @@ -342,13 +364,15 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SetPlayoutBuffer( const AudioDeviceModule::BufferType type, uint16_t sizeMS) { - return output_.SetPlayoutBuffer(type, sizeMS); + FATAL() << "Should never be called"; + return -1; } int32_t PlayoutBuffer( AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const { // NOLINT - return output_.PlayoutBuffer(type, sizeMS); + FATAL() << "Should never be called"; + return -1; } int32_t PlayoutDelay( @@ -368,11 +392,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } bool PlayoutWarning() const { - return output_.PlayoutWarning(); + return false; } bool PlayoutError() const { - return output_.PlayoutError(); + return false; } bool RecordingWarning() const { @@ -383,13 +407,9 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { return false; } - void ClearPlayoutWarning() { - return output_.ClearPlayoutWarning(); - } + void ClearPlayoutWarning() {} - void ClearPlayoutError() { - return output_.ClearPlayoutError(); - } + void ClearPlayoutError() {} void ClearRecordingWarning() {} @@ -401,18 +421,22 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { input_.AttachAudioBuffer(audioBuffer); } + // TODO(henrika): remove int32_t SetPlayoutSampleRate( const uint32_t samplesPerSec) { - return output_.SetPlayoutSampleRate(samplesPerSec); + FATAL() << "Should never be called"; + return -1; } int32_t SetLoudspeakerStatus(bool enable) { - return output_.SetLoudspeakerStatus(enable); + FATAL() << "Should never be called"; + return -1; } int32_t GetLoudspeakerStatus( bool& enable) const { // NOLINT - return output_.GetLoudspeakerStatus(enable); + FATAL() << "Should never be called"; + return -1; } bool BuiltInAECIsAvailable() const { diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc index 4946d8f12..035698656 100644 --- a/webrtc/modules/audio_device/android/audio_record_jni.cc +++ b/webrtc/modules/audio_device/android/audio_record_jni.cc @@ -25,10 +25,6 @@ namespace webrtc { -// Number of bytes per audio frame. -// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame] -static const int kBytesPerFrame = kNumChannels * (kBitsPerSample / 8); - // We are unable to obtain exact measurements of the hardware delay on Android. // Instead, a lower bound (based on measurements) is used. // TODO(henrika): is it possible to improve this? @@ -59,6 +55,8 @@ void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env, jni, "org/webrtc/voiceengine/WebRtcAudioRecord"); g_audio_record_class = reinterpret_cast( NewGlobalRef(jni, local_class)); + jni->DeleteLocalRef(local_class); + CHECK_EXCEPTION(jni); // Register native methods with the WebRtcAudioRecord class. These methods // are declared private native in WebRtcAudioRecord.java. @@ -86,15 +84,17 @@ void AudioRecordJni::ClearAndroidAudioDeviceObjects() { g_jvm = NULL; } -AudioRecordJni::AudioRecordJni() - : j_audio_record_(NULL), +AudioRecordJni::AudioRecordJni(PlayoutDelayProvider* delay_provider) + : delay_provider_(delay_provider), + j_audio_record_(NULL), direct_buffer_address_(NULL), direct_buffer_capacity_in_bytes_(0), frames_per_buffer_(0), initialized_(false), recording_(false), audio_device_buffer_(NULL), - sample_rate_hz_(0) { + sample_rate_hz_(0), + playout_delay_in_milliseconds_(0) { ALOGD("ctor%s", GetThreadInfo().c_str()); CHECK(HasDeviceObjects()); CreateJavaInstance(); @@ -197,7 +197,6 @@ int32_t AudioRecordJni::StopRecording() { initialized_ = false; recording_ = false; return 0; - } int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const { // NOLINT @@ -268,7 +267,7 @@ void AudioRecordJni::OnCacheDirectBufferAddress( void JNICALL AudioRecordJni::DataIsRecorded( JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) { webrtc::AudioRecordJni* this_object = - reinterpret_cast (nativeAudioRecord ); + reinterpret_cast (nativeAudioRecord); this_object->OnDataIsRecorded(length); } @@ -276,10 +275,15 @@ void JNICALL AudioRecordJni::DataIsRecorded( // the thread is 'AudioRecordThread'. void AudioRecordJni::OnDataIsRecorded(int length) { DCHECK(thread_checker_java_.CalledOnValidThread()); + if (playout_delay_in_milliseconds_ == 0) { + playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs(); + ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_); + } audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_, frames_per_buffer_); - // TODO(henrika): improve playout delay estimate. - audio_device_buffer_->SetVQEData(0, kHardwareDelayInMilliseconds, 0); + audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_, + kHardwareDelayInMilliseconds, + 0 /* clockDrift */); audio_device_buffer_->DeliverRecordedData(); } diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h index 3260104a9..109fe0442 100644 --- a/webrtc/modules/audio_device/android/audio_record_jni.h +++ b/webrtc/modules/audio_device/android/audio_record_jni.h @@ -41,7 +41,7 @@ class PlayoutDelayProvider; // CHECK that the calling thread is attached to a Java VM. // // All methods use AttachThreadScoped to attach to a Java VM if needed and then -// detach when method goes out of scope. We do so beacuse this class does not +// detach when method goes out of scope. We do so because this class does not // own the thread is is created and called on and other objects on the same // thread might put us in a detached state at any time. class AudioRecordJni { @@ -57,7 +57,7 @@ class AudioRecordJni { // existing global references and enables garbage collection. static void ClearAndroidAudioDeviceObjects(); - AudioRecordJni(); + AudioRecordJni(PlayoutDelayProvider* delay_provider); ~AudioRecordJni(); int32_t Init(); @@ -118,10 +118,11 @@ class AudioRecordJni { // thread in Java. Detached during construction of this object. rtc::ThreadChecker thread_checker_java_; - - // Should return the current playout delay. - // TODO(henrika): fix on Android. Reports zero today. - // PlayoutDelayProvider* delay_provider_; + // Returns the current playout delay. + // TODO(henrika): this value is currently fixed since initial tests have + // shown that the estimated delay varies very little over time. It might be + // possible to make improvements in this area. + PlayoutDelayProvider* delay_provider_; // The Java WebRtcAudioRecord instance. jobject j_audio_record_; @@ -151,6 +152,8 @@ class AudioRecordJni { // and audio configuration. int sample_rate_hz_; + // Contains a delay estimate from the playout side given by |delay_provider_|. + int playout_delay_in_milliseconds_; }; } // namespace webrtc diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc index f27500cb3..2b7c5b847 100644 --- a/webrtc/modules/audio_device/android/audio_track_jni.cc +++ b/webrtc/modules/audio_device/android/audio_track_jni.cc @@ -8,1387 +8,284 @@ * be found in the AUTHORS file in the root of the source tree. */ -/* - * Android audio device implementation (JNI/AudioTrack usage) - */ - -// TODO(xians): Break out attach and detach current thread to JVM to -// separate functions. - #include "webrtc/modules/audio_device/android/audio_track_jni.h" #include -#include -#include -#include "webrtc/modules/audio_device/audio_device_config.h" -#include "webrtc/modules/audio_device/audio_device_utility.h" +#include "webrtc/base/arraysize.h" +#include "webrtc/base/checks.h" +#include "webrtc/modules/audio_device/android/audio_common.h" -#include "webrtc/system_wrappers/interface/event_wrapper.h" -#include "webrtc/system_wrappers/interface/thread_wrapper.h" -#include "webrtc/system_wrappers/interface/trace.h" +#define TAG "AudioTrackJni" +#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) +#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__) +#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__) +#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__) +#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__) namespace webrtc { -JavaVM* AudioTrackJni::globalJvm = NULL; -JNIEnv* AudioTrackJni::globalJNIEnv = NULL; -jobject AudioTrackJni::globalContext = NULL; -jclass AudioTrackJni::globalScClass = NULL; +static JavaVM* g_jvm = NULL; +static jobject g_context = NULL; +static jclass g_audio_track_class = NULL; -int32_t AudioTrackJni::SetAndroidAudioDeviceObjects(void* javaVM, void* env, - void* context) { - assert(env); - globalJvm = reinterpret_cast(javaVM); - globalJNIEnv = reinterpret_cast(env); - // Get java class type (note path to class packet). - jclass javaScClassLocal = globalJNIEnv->FindClass( - "org/webrtc/voiceengine/WebRtcAudioTrack"); - if (!javaScClassLocal) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, - "%s: could not find java class", __FUNCTION__); - return -1; // exception thrown - } +void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* env, + void* context) { + ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str()); - // Create a global reference to the class (to tell JNI that we are - // referencing it after this function has returned). - globalScClass = reinterpret_cast ( - globalJNIEnv->NewGlobalRef(javaScClassLocal)); - if (!globalScClass) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, - "%s: could not create reference", __FUNCTION__); - return -1; - } + CHECK(jvm); + CHECK(env); + CHECK(context); - globalContext = globalJNIEnv->NewGlobalRef( - reinterpret_cast(context)); - if (!globalContext) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, - "%s: could not create context reference", __FUNCTION__); - return -1; - } + g_jvm = reinterpret_cast(jvm); + JNIEnv* jni = GetEnv(g_jvm); + CHECK(jni) << "AttachCurrentThread must be called on this tread"; - // Delete local class ref, we only use the global ref - globalJNIEnv->DeleteLocalRef(javaScClassLocal); - return 0; + g_context = NewGlobalRef(jni, reinterpret_cast(context)); + jclass local_class = FindClass( + jni, "org/webrtc/voiceengine/WebRtcAudioTrack"); + g_audio_track_class = reinterpret_cast( + NewGlobalRef(jni, local_class)); + jni->DeleteLocalRef(local_class); + CHECK_EXCEPTION(jni); + + // Register native methods with the WebRtcAudioTrack class. These methods + // are declared private native in WebRtcAudioTrack.java. + JNINativeMethod native_methods[] = { + {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V", + reinterpret_cast( + &webrtc::AudioTrackJni::CacheDirectBufferAddress)}, + {"nativeGetPlayoutData", "(IJ)V", + reinterpret_cast(&webrtc::AudioTrackJni::GetPlayoutData)}}; + jni->RegisterNatives(g_audio_track_class, + native_methods, arraysize(native_methods)); + CHECK_EXCEPTION(jni) << "Error during RegisterNatives"; } +// TODO(henrika): figure out if it is required to call this method? If so, +// ensure that is is always called as part of the destruction phase. void AudioTrackJni::ClearAndroidAudioDeviceObjects() { - WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, - "%s: env is NULL, assuming deinit", __FUNCTION__); - - globalJvm = NULL; - if (!globalJNIEnv) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1, - "%s: saved env already NULL", __FUNCTION__); - return; - } - - globalJNIEnv->DeleteGlobalRef(globalContext); - globalContext = reinterpret_cast(NULL); - - globalJNIEnv->DeleteGlobalRef(globalScClass); - globalScClass = reinterpret_cast(NULL); - - globalJNIEnv = reinterpret_cast(NULL); + ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str()); + JNIEnv* jni = GetEnv(g_jvm); + CHECK(jni) << "AttachCurrentThread must be called on this tread"; + jni->UnregisterNatives(g_audio_track_class); + CHECK_EXCEPTION(jni) << "Error during UnregisterNatives"; + DeleteGlobalRef(jni, g_audio_track_class); + g_audio_track_class = NULL; + DeleteGlobalRef(jni, g_context); + g_context = NULL; + g_jvm = NULL; } -AudioTrackJni::AudioTrackJni(const int32_t id) - : _javaVM(NULL), - _jniEnvPlay(NULL), - _javaScClass(0), - _javaScObj(0), - _javaPlayBuffer(0), - _javaDirectPlayBuffer(NULL), - _javaMidPlayAudio(0), - _ptrAudioBuffer(NULL), - _critSect(*CriticalSectionWrapper::CreateCriticalSection()), - _id(id), - _initialized(false), - _timeEventPlay(*EventWrapper::Create()), - _playStartStopEvent(*EventWrapper::Create()), - _ptrThreadPlay(NULL), - _playThreadID(0), - _playThreadIsInitialized(false), - _shutdownPlayThread(false), - _playoutDeviceIsSpecified(false), - _playing(false), - _playIsInitialized(false), - _speakerIsInitialized(false), - _startPlay(false), - _playWarning(0), - _playError(0), - _delayPlayout(0), - _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)), - _maxSpeakerVolume(0) { +AudioTrackJni::AudioTrackJni() + : j_audio_track_(NULL), + direct_buffer_address_(NULL), + direct_buffer_capacity_in_bytes_(0), + frames_per_buffer_(0), + initialized_(false), + playing_(false), + audio_device_buffer_(NULL), + sample_rate_hz_(0), + delay_in_milliseconds_(0) { + ALOGD("ctor%s", GetThreadInfo().c_str()); + CHECK(HasDeviceObjects()); + CreateJavaInstance(); + // Detach from this thread since we want to use the checker to verify calls + // from the Java based audio thread. + thread_checker_java_.DetachFromThread(); } AudioTrackJni::~AudioTrackJni() { - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, - "%s destroyed", __FUNCTION__); - + ALOGD("~dtor%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); Terminate(); - - delete &_playStartStopEvent; - delete &_timeEventPlay; - delete &_critSect; + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jni->DeleteGlobalRef(j_audio_track_); + j_audio_track_ = NULL; } int32_t AudioTrackJni::Init() { - CriticalSectionScoped lock(&_critSect); - if (_initialized) - { - return 0; - } - - _playWarning = 0; - _playError = 0; - - // Init Java member variables - // and set up JNI interface to - // AudioDeviceAndroid java class - if (InitJavaResources() != 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Failed to init Java resources", __FUNCTION__); - return -1; - } - - // Check the sample rate to be used for playback and recording - // and the max playout volume - if (InitSampleRate() != 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Failed to init samplerate", __FUNCTION__); - return -1; - } - - const char* threadName = "jni_audio_render_thread"; - _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this, - kRealtimePriority, threadName); - if (_ptrThreadPlay == NULL) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, - " failed to create the play audio thread"); - return -1; - } - - unsigned int threadID = 0; - if (!_ptrThreadPlay->Start(threadID)) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, - " failed to start the play audio thread"); - delete _ptrThreadPlay; - _ptrThreadPlay = NULL; - return -1; - } - _playThreadID = threadID; - - _initialized = true; - + ALOGD("Init%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); return 0; } int32_t AudioTrackJni::Terminate() { - CriticalSectionScoped lock(&_critSect); - if (!_initialized) - { - return 0; - } - + ALOGD("Terminate%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); StopPlayout(); - _shutdownPlayThread = true; - _timeEventPlay.Set(); // Release rec thread from waiting state - if (_ptrThreadPlay) - { - // First, the thread must detach itself from Java VM - _critSect.Leave(); - if (kEventSignaled != _playStartStopEvent.Wait(5000)) - { - WEBRTC_TRACE( - kTraceError, - kTraceAudioDevice, - _id, - "%s: Playout thread shutdown timed out, cannot " - "terminate thread", - __FUNCTION__); - // If we close thread anyway, the app will crash - return -1; - } - _critSect.Enter(); - - // Close down play thread - ThreadWrapper* tmpThread = _ptrThreadPlay; - _ptrThreadPlay = NULL; - _critSect.Leave(); - _timeEventPlay.Set(); - if (tmpThread->Stop()) - { - delete tmpThread; - _jniEnvPlay = NULL; - } - else - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " failed to close down the play audio thread"); - } - _critSect.Enter(); - - _playThreadIsInitialized = false; - } - _speakerIsInitialized = false; - _playoutDeviceIsSpecified = false; - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Could not attach thread to JVM (%d, %p)", - __FUNCTION__, res, env); - return -1; - } - isAttached = true; - } - - // Make method IDs and buffer pointers unusable - _javaMidPlayAudio = 0; - _javaDirectPlayBuffer = NULL; - - // Delete the references to the java buffers, this allows the - // garbage collector to delete them - env->DeleteGlobalRef(_javaPlayBuffer); - _javaPlayBuffer = 0; - - // Delete the references to the java object and class, this allows the - // garbage collector to delete them - env->DeleteGlobalRef(_javaScObj); - _javaScObj = 0; - _javaScClass = 0; - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "%s: Could not detach thread from JVM", __FUNCTION__); - } - } - - _initialized = false; - return 0; } -int32_t AudioTrackJni::PlayoutDeviceName(uint16_t index, - char name[kAdmMaxDeviceNameSize], - char guid[kAdmMaxGuidSize]) { - if (0 != index) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } - - // Return empty string - memset(name, 0, kAdmMaxDeviceNameSize); - - if (guid) - { - memset(guid, 0, kAdmMaxGuidSize); - } - - return 0; -} - -int32_t AudioTrackJni::SetPlayoutDevice(uint16_t index) { - if (_playIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout already initialized"); - return -1; - } - - if (0 != index) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } - - // Do nothing but set a flag, this is to have consistent behavior - // with other platforms - _playoutDeviceIsSpecified = true; - - return 0; -} - -int32_t AudioTrackJni::SetPlayoutDevice( - AudioDeviceModule::WindowsDeviceType device) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; -} - - -int32_t AudioTrackJni::PlayoutIsAvailable(bool& available) { // NOLINT - available = false; - - // Try to initialize the playout side - int32_t res = InitPlayout(); - - // Cancel effect of initialization - StopPlayout(); - - if (res != -1) - { - available = true; - } - - return res; -} - int32_t AudioTrackJni::InitPlayout() { - CriticalSectionScoped lock(&_critSect); - - if (!_initialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Not initialized"); - return -1; - } - - if (_playing) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Playout already started"); - return -1; - } - - if (!_playoutDeviceIsSpecified) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout device is not specified"); - return -1; - } - - if (_playIsInitialized) - { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout already initialized"); - return 0; - } - - // Initialize the speaker - if (InitSpeaker() == -1) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " InitSpeaker() failed"); - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "attaching"); - - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback", - "(I)I"); - - int samplingFreq = 44100; - if (_samplingFreqOut != 44) - { - samplingFreq = _samplingFreqOut * 1000; - } - - int retVal = -1; - - // Call java sc object method - jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "InitPlayback failed (%d)", res); - } - else - { - // Set the audio device buffer sampling rate - _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000); - _playIsInitialized = true; - retVal = 0; - } - - // Detach this thread if it was attached - if (isAttached) - { - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "detaching"); - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Could not detach thread from JVM"); - } - } - - return retVal; + ALOGD("InitPlayout%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK(!initialized_); + DCHECK(!playing_); + if (initialized_ || playing_) { + return -1; + } + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jmethodID initPlayoutID = GetMethodID( + jni, g_audio_track_class, "InitPlayout", "(I)I"); + jint delay_in_milliseconds = jni->CallIntMethod( + j_audio_track_, initPlayoutID, sample_rate_hz_); + CHECK_EXCEPTION(jni); + if (delay_in_milliseconds < 0) { + ALOGE("InitPlayout failed!"); + return -1; + } + delay_in_milliseconds_ = delay_in_milliseconds; + ALOGD("delay_in_milliseconds: %d", delay_in_milliseconds); + initialized_ = true; + return 0; } int32_t AudioTrackJni::StartPlayout() { - CriticalSectionScoped lock(&_critSect); - - if (!_playIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout not initialized"); + ALOGD("StartPlayout%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK(initialized_); + DCHECK(!playing_); + if (!initialized_ || playing_) { return -1; } - - if (_playing) - { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout already started"); - return 0; - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID startPlaybackID = env->GetMethodID(_javaScClass, "StartPlayback", - "()I"); - - // Call java sc object method - jint res = env->CallIntMethod(_javaScObj, startPlaybackID); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "StartPlayback failed (%d)", res); - return -1; - } - - _playWarning = 0; - _playError = 0; - - // Signal to playout thread that we want to start - _startPlay = true; - _timeEventPlay.Set(); // Release thread from waiting state - _critSect.Leave(); - // Wait for thread to init - if (kEventSignaled != _playStartStopEvent.Wait(5000)) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Timeout or error starting"); - } - _critSect.Enter(); - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Could not detach thread from JVM"); - } - } - - return 0; + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jmethodID startPlayoutID = GetMethodID( + jni, g_audio_track_class, "StartPlayout", "()Z"); + jboolean res = jni->CallBooleanMethod(j_audio_track_, startPlayoutID); + CHECK_EXCEPTION(jni); + if (!res) { + ALOGE("StartPlayout failed!"); + return -1; + } + playing_ = true; + return 0; } int32_t AudioTrackJni::StopPlayout() { - CriticalSectionScoped lock(&_critSect); - - if (!_playIsInitialized) - { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout is not initialized"); + ALOGD("StopPlayout%s", GetThreadInfo().c_str()); + DCHECK(thread_checker_.CalledOnValidThread()); + if (!initialized_) { return 0; } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback", - "()I"); - - // Call java sc object method - jint res = env->CallIntMethod(_javaScObj, stopPlaybackID); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "StopPlayback failed (%d)", res); - } - - _playIsInitialized = false; - _playing = false; - _playWarning = 0; - _playError = 0; - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Could not detach thread from JVM"); - } - } - - return 0; - -} - -int32_t AudioTrackJni::InitSpeaker() { - CriticalSectionScoped lock(&_critSect); - - if (_playing) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Playout already started"); + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jmethodID stopPlayoutID = GetMethodID( + jni, g_audio_track_class, "StopPlayout", "()Z"); + jboolean res = jni->CallBooleanMethod(j_audio_track_, stopPlayoutID); + CHECK_EXCEPTION(jni); + if (!res) { + ALOGE("StopPlayout failed!"); return -1; } - - if (!_playoutDeviceIsSpecified) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout device is not specified"); - return -1; - } - - // Nothing needs to be done here, we use a flag to have consistent - // behavior with other platforms - _speakerIsInitialized = true; - - return 0; -} - -int32_t AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) { // NOLINT - available = true; // We assume we are always be able to set/get volume - return 0; -} - -int32_t AudioTrackJni::SetSpeakerVolume(uint32_t volume) { - if (!_speakerIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - if (!globalContext) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Context is not set"); - return -1; - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID setPlayoutVolumeID = env->GetMethodID(_javaScClass, - "SetPlayoutVolume", "(I)I"); - - // call java sc object method - jint res = env->CallIntMethod(_javaScObj, setPlayoutVolumeID, - static_cast (volume)); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "SetPlayoutVolume failed (%d)", res); - return -1; - } - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Could not detach thread from JVM"); - } - } - - return 0; -} - -int32_t AudioTrackJni::SpeakerVolume(uint32_t& volume) const { // NOLINT - if (!_speakerIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - if (!globalContext) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Context is not set"); - return -1; - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID getPlayoutVolumeID = env->GetMethodID(_javaScClass, - "GetPlayoutVolume", "()I"); - - // call java sc object method - jint level = env->CallIntMethod(_javaScObj, getPlayoutVolumeID); - if (level < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "GetPlayoutVolume failed (%d)", level); - return -1; - } - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Could not detach thread from JVM"); - } - } - - volume = static_cast (level); - - return 0; -} - - -int32_t AudioTrackJni::MaxSpeakerVolume(uint32_t& maxVolume) const { // NOLINT - if (!_speakerIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - - maxVolume = _maxSpeakerVolume; - - return 0; -} - -int32_t AudioTrackJni::MinSpeakerVolume(uint32_t& minVolume) const { // NOLINT - if (!_speakerIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - minVolume = 0; - return 0; -} - -int32_t AudioTrackJni::SpeakerVolumeStepSize( - uint16_t& stepSize) const { // NOLINT - if (!_speakerIsInitialized) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - - stepSize = 1; - - return 0; -} - -int32_t AudioTrackJni::SpeakerMuteIsAvailable(bool& available) { // NOLINT - available = false; // Speaker mute not supported on Android - return 0; -} - -int32_t AudioTrackJni::SetSpeakerMute(bool enable) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; -} - -int32_t AudioTrackJni::SpeakerMute(bool& /*enabled*/) const { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; -} - -int32_t AudioTrackJni::StereoPlayoutIsAvailable(bool& available) { // NOLINT - available = false; // Stereo playout not supported on Android - return 0; -} - -int32_t AudioTrackJni::SetStereoPlayout(bool enable) { - if (enable) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Enabling not available"); - return -1; - } - - return 0; -} - -int32_t AudioTrackJni::StereoPlayout(bool& enabled) const { // NOLINT - enabled = false; - return 0; -} - -int32_t AudioTrackJni::SetPlayoutBuffer( - const AudioDeviceModule::BufferType type, - uint16_t sizeMS) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; -} - - -int32_t AudioTrackJni::PlayoutBuffer( - AudioDeviceModule::BufferType& type, // NOLINT - uint16_t& sizeMS) const { // NOLINT - type = AudioDeviceModule::kAdaptiveBufferSize; - sizeMS = _delayPlayout; // Set to current playout delay - - return 0; -} - -int32_t AudioTrackJni::PlayoutDelay(uint16_t& delayMS) const { // NOLINT - delayMS = _delayPlayout; + // If we don't detach here, we will hit a DCHECK in OnDataIsRecorded() next + // time StartRecording() is called since it will create a new Java thread. + thread_checker_java_.DetachFromThread(); + initialized_ = false; + playing_ = false; return 0; } +// TODO(henrika): possibly add stereo support. void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - CriticalSectionScoped lock(&_critSect); - _ptrAudioBuffer = audioBuffer; - // inform the AudioBuffer about default settings for this implementation - _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); - _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); + ALOGD("AttachAudioBuffer"); + DCHECK(thread_checker_.CalledOnValidThread()); + audio_device_buffer_ = audioBuffer; + sample_rate_hz_ = GetNativeSampleRate(); + ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz_); + audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz_); + audio_device_buffer_->SetPlayoutChannels(kNumChannels); } -int32_t AudioTrackJni::SetPlayoutSampleRate(const uint32_t samplesPerSec) { - if (samplesPerSec > 48000 || samplesPerSec < 8000) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Invalid sample rate"); - return -1; - } - - // set the playout sample rate to use - if (samplesPerSec == 44100) - { - _samplingFreqOut = 44; - } - else - { - _samplingFreqOut = samplesPerSec / 1000; - } - - // Update the AudioDeviceBuffer - _ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec); - +int32_t AudioTrackJni::PlayoutDelay(uint16_t& delayMS) const { + // No need for thread check or locking since we set |delay_in_milliseconds_| + // only once (on the creating thread) during initialization. + delayMS = delay_in_milliseconds_; return 0; } -bool AudioTrackJni::PlayoutWarning() const { - return (_playWarning > 0); +int AudioTrackJni::PlayoutDelayMs() { + // This method can be called from the Java based AudioRecordThread but we + // don't need locking since it is only set once (on the main thread) during + // initialization. + return delay_in_milliseconds_; } -bool AudioTrackJni::PlayoutError() const { - return (_playError > 0); +void JNICALL AudioTrackJni::CacheDirectBufferAddress( + JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack) { + webrtc::AudioTrackJni* this_object = + reinterpret_cast (nativeAudioTrack); + this_object->OnCacheDirectBufferAddress(env, byte_buffer); } -void AudioTrackJni::ClearPlayoutWarning() { - _playWarning = 0; +void AudioTrackJni::OnCacheDirectBufferAddress( + JNIEnv* env, jobject byte_buffer) { + ALOGD("OnCacheDirectBufferAddress"); + DCHECK(thread_checker_.CalledOnValidThread()); + direct_buffer_address_ = + env->GetDirectBufferAddress(byte_buffer); + jlong capacity = env->GetDirectBufferCapacity(byte_buffer); + ALOGD("direct buffer capacity: %lld", capacity); + direct_buffer_capacity_in_bytes_ = static_cast (capacity); + frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / kBytesPerFrame; + ALOGD("frames_per_buffer: %d", frames_per_buffer_); } -void AudioTrackJni::ClearPlayoutError() { - _playError = 0; +void JNICALL AudioTrackJni::GetPlayoutData( + JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack) { + webrtc::AudioTrackJni* this_object = + reinterpret_cast (nativeAudioTrack); + this_object->OnGetPlayoutData(length); } -int32_t AudioTrackJni::SetLoudspeakerStatus(bool enable) { - if (!globalContext) - { - WEBRTC_TRACE(kTraceError, kTraceUtility, -1, - " Context is not set"); - return -1; - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - - // Get the JNI env for this thread - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceUtility, -1, - " Could not attach thread to JVM (%d, %p)", res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID setPlayoutSpeakerID = env->GetMethodID(_javaScClass, - "SetPlayoutSpeaker", - "(Z)I"); - - // call java sc object method - jint res = env->CallIntMethod(_javaScObj, setPlayoutSpeakerID, enable); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceUtility, -1, - " SetPlayoutSpeaker failed (%d)", res); - return -1; - } - - _loudSpeakerOn = enable; - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceUtility, -1, - " Could not detach thread from JVM"); - } - } - - return 0; +// This method is called on a high-priority thread from Java. The name of +// the thread is 'AudioRecordTrack'. +void AudioTrackJni::OnGetPlayoutData(int length) { + DCHECK(thread_checker_java_.CalledOnValidThread()); + // ALOGD("OnGetPlayoutData(length=%d, delay=%d)", length); + DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame); + // Pull decoded data (in 16-bit PCM format) from jitter buffer. + int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_); + DCHECK_EQ(samples, frames_per_buffer_); + // Copy decoded data into common byte buffer to ensure that it can be + // written to the Java based audio track. + samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_); + DCHECK_EQ(length, kBytesPerFrame * samples); } -int32_t AudioTrackJni::GetLoudspeakerStatus(bool& enabled) const { // NOLINT - enabled = _loudSpeakerOn; - return 0; +bool AudioTrackJni::HasDeviceObjects() { + return (g_jvm && g_context && g_audio_track_class); } -int32_t AudioTrackJni::InitJavaResources() { - // todo: Check if we already have created the java object - _javaVM = globalJvm; - _javaScClass = globalScClass; - - // use the jvm that has been set - if (!_javaVM) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Not a valid Java VM pointer", __FUNCTION__); - return -1; - } - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Could not attach thread to JVM (%d, %p)", - __FUNCTION__, res, env); - return -1; - } - isAttached = true; - } - - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "get method id"); - - // get the method ID for the void(void) constructor - jmethodID cid = env->GetMethodID(_javaScClass, "", "()V"); - if (cid == NULL) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get constructor ID", __FUNCTION__); - return -1; /* exception thrown */ - } - - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "construct object", __FUNCTION__); - - // construct the object - jobject javaScObjLocal = env->NewObject(_javaScClass, cid); - if (!javaScObjLocal) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "%s: could not create Java sc object", __FUNCTION__); - return -1; - } - - // Create a reference to the object (to tell JNI that we are referencing it - // after this function has returned). - _javaScObj = env->NewGlobalRef(javaScObjLocal); - if (!_javaScObj) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not create Java sc object reference", - __FUNCTION__); - return -1; - } - - // Delete local object ref, we only use the global ref. - env->DeleteLocalRef(javaScObjLocal); - - ////////////////////// - // AUDIO MANAGEMENT - - // This is not mandatory functionality - if (globalContext) { - jfieldID context_id = env->GetFieldID(globalScClass, - "_context", - "Landroid/content/Context;"); - if (!context_id) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get _context id", __FUNCTION__); - return -1; - } - - env->SetObjectField(_javaScObj, context_id, globalContext); - jobject javaContext = env->GetObjectField(_javaScObj, context_id); - if (!javaContext) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not set or get _context", __FUNCTION__); - return -1; - } - } - else { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "%s: did not set Context - some functionality is not " - "supported", - __FUNCTION__); - } - - ///////////// - // PLAYOUT - - // Get play buffer field ID. - jfieldID fidPlayBuffer = env->GetFieldID(_javaScClass, "_playBuffer", - "Ljava/nio/ByteBuffer;"); - if (!fidPlayBuffer) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get play buffer fid", __FUNCTION__); - return -1; - } - - // Get play buffer object. - jobject javaPlayBufferLocal = - env->GetObjectField(_javaScObj, fidPlayBuffer); - if (!javaPlayBufferLocal) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get play buffer", __FUNCTION__); - return -1; - } - - // Create a global reference to the object (to tell JNI that we are - // referencing it after this function has returned) - // NOTE: we are referencing it only through the direct buffer (see below). - _javaPlayBuffer = env->NewGlobalRef(javaPlayBufferLocal); - if (!_javaPlayBuffer) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get play buffer reference", __FUNCTION__); - return -1; - } - - // Delete local object ref, we only use the global ref. - env->DeleteLocalRef(javaPlayBufferLocal); - - // Get direct buffer. - _javaDirectPlayBuffer = env->GetDirectBufferAddress(_javaPlayBuffer); - if (!_javaDirectPlayBuffer) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get direct play buffer", __FUNCTION__); - return -1; - } - - // Get the play audio method ID. - _javaMidPlayAudio = env->GetMethodID(_javaScClass, "PlayAudio", "(I)I"); - if (!_javaMidPlayAudio) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: could not get play audio mid", __FUNCTION__); - return -1; - } - - // Detach this thread if it was attached. - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "%s: Could not detach thread from JVM", __FUNCTION__); - } - } - - return 0; - +void AudioTrackJni::CreateJavaInstance() { + ALOGD("CreateJavaInstance"); + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jmethodID constructorID = GetMethodID( + jni, g_audio_track_class, "", "(Landroid/content/Context;J)V"); + j_audio_track_ = jni->NewObject(g_audio_track_class, + constructorID, + g_context, + reinterpret_cast(this)); + CHECK_EXCEPTION(jni) << "Error during NewObject"; + CHECK(j_audio_track_); + j_audio_track_ = jni->NewGlobalRef(j_audio_track_); + CHECK_EXCEPTION(jni) << "Error during NewGlobalRef"; + CHECK(j_audio_track_); } -int32_t AudioTrackJni::InitSampleRate() { - int samplingFreq = 44100; - jint res = 0; - - // get the JNI env for this thread - JNIEnv *env; - bool isAttached = false; - - // get the JNI env for this thread - if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) - { - // try to attach the thread and get the env - // Attach this thread to JVM - jint res = _javaVM->AttachCurrentThread(&env, NULL); - if ((res < 0) || !env) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Could not attach thread to JVM (%d, %p)", - __FUNCTION__, res, env); - return -1; - } - isAttached = true; - } - - // get the method ID - jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback", - "(I)I"); - - if (_samplingFreqOut > 0) - { - // read the configured sampling rate - samplingFreq = 44100; - if (_samplingFreqOut != 44) - { - samplingFreq = _samplingFreqOut * 1000; - } - WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, - " Trying configured playback sampling rate %d", - samplingFreq); - } - else - { - // set the preferred sampling frequency - if (samplingFreq == 8000) - { - // try 16000 - samplingFreq = 16000; - } - // else use same as recording - } - - bool keepTrying = true; - while (keepTrying) - { - // call java sc object method - res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq); - if (res < 0) - { - switch (samplingFreq) - { - case 44100: - samplingFreq = 16000; - break; - case 16000: - samplingFreq = 8000; - break; - default: // error - WEBRTC_TRACE(kTraceError, - kTraceAudioDevice, _id, - "InitPlayback failed (%d)", res); - return -1; - } - } - else - { - keepTrying = false; - } - } - - // Store max playout volume - _maxSpeakerVolume = static_cast (res); - if (_maxSpeakerVolume < 1) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Did not get valid max speaker volume value (%d)", - _maxSpeakerVolume); - } - - // set the playback sample rate to use - if (samplingFreq == 44100) - { - _samplingFreqOut = 44; - } - else - { - _samplingFreqOut = samplingFreq / 1000; - } - - WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, - "Playback sample rate set to (%d)", _samplingFreqOut); - - // get the method ID - jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback", - "()I"); - - // Call java sc object method - res = env->CallIntMethod(_javaScObj, stopPlaybackID); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "StopPlayback failed (%d)", res); - } - - // Detach this thread if it was attached - if (isAttached) - { - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "%s: Could not detach thread from JVM", __FUNCTION__); - } - } - - return 0; - -} - -bool AudioTrackJni::PlayThreadFunc(void* pThis) -{ - return (static_cast (pThis)->PlayThreadProcess()); -} - -bool AudioTrackJni::PlayThreadProcess() -{ - if (!_playThreadIsInitialized) - { - // Do once when thread is started - - // Attach this thread to JVM and get the JNI env for this thread - jint res = _javaVM->AttachCurrentThread(&_jniEnvPlay, NULL); - if ((res < 0) || !_jniEnvPlay) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, - _id, - "Could not attach playout thread to JVM (%d, %p)", - res, _jniEnvPlay); - return false; // Close down thread - } - - _playThreadIsInitialized = true; - } - - if (!_playing) - { - switch (_timeEventPlay.Wait(1000)) - { - case kEventSignaled: - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, - _id, "Playout thread event signal"); - break; - case kEventError: - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, - _id, "Playout thread event error"); - return true; - case kEventTimeout: - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, - _id, "Playout thread event timeout"); - return true; - } - } - - Lock(); - - if (_startPlay) - { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - "_startPlay true, performing initial actions"); - _startPlay = false; - _playing = true; - _playWarning = 0; - _playError = 0; - _playStartStopEvent.Set(); - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "Sent signal"); - } - - if (_playing) - { - int8_t playBuffer[2 * 480]; // Max 10 ms @ 48 kHz / 16 bit - uint32_t samplesToPlay = _samplingFreqOut * 10; - - // ask for new PCM data to be played out using the AudioDeviceBuffer - // ensure that this callback is executed without taking the - // audio-thread lock - UnLock(); - uint32_t nSamples = - _ptrAudioBuffer->RequestPlayoutData(samplesToPlay); - Lock(); - - // Check again since play may have stopped during unlocked period - if (!_playing) - { - UnLock(); - return true; - } - - nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); - if (nSamples != samplesToPlay) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " invalid number of output samples(%d)", nSamples); - _playWarning = 1; - } - - // Copy data to our direct buffer (held by java sc object) - // todo: Give _javaDirectPlayBuffer directly to VoE? - memcpy(_javaDirectPlayBuffer, playBuffer, nSamples * 2); - - UnLock(); - - // Call java sc object method to process data in direct buffer - // Will block until data has been put in OS playout buffer - // (see java sc class) - jint res = _jniEnvPlay->CallIntMethod(_javaScObj, _javaMidPlayAudio, - 2 * nSamples); - if (res < 0) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "PlayAudio failed (%d)", res); - _playWarning = 1; - } - else if (res > 0) - { - // we are not recording and have got a delay value from playback - _delayPlayout = res / _samplingFreqOut; - } - Lock(); - - } // _playing - - if (_shutdownPlayThread) - { - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "Detaching thread from Java VM"); - - // Detach thread from Java VM - if (_javaVM->DetachCurrentThread() < 0) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, - _id, "Could not detach playout thread from JVM"); - _shutdownPlayThread = false; - // If we say OK (i.e. set event) and close thread anyway, - // app will crash - } - else - { - _jniEnvPlay = NULL; - _shutdownPlayThread = false; - _playStartStopEvent.Set(); // Signal to Terminate() that we are done - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "Sent signal"); - } - } - - UnLock(); - return true; +int AudioTrackJni::GetNativeSampleRate() { + AttachThreadScoped ats(g_jvm); + JNIEnv* jni = ats.env(); + jmethodID getNativeSampleRate = GetMethodID( + jni, g_audio_track_class, "GetNativeSampleRate", "()I"); + jint sample_rate_hz = jni->CallIntMethod( + j_audio_track_, getNativeSampleRate); + CHECK_EXCEPTION(jni); + return sample_rate_hz; } } // namespace webrtc diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h index 1871c7871..f89b99731 100644 --- a/webrtc/modules/audio_device/android/audio_track_jni.h +++ b/webrtc/modules/audio_device/android/audio_track_jni.h @@ -13,161 +13,139 @@ #include -#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" +#include "webrtc/base/thread_checker.h" #include "webrtc/modules/audio_device/android/audio_common.h" #include "webrtc/modules/audio_device/include/audio_device_defines.h" #include "webrtc/modules/audio_device/audio_device_generic.h" +#include "webrtc/modules/utility/interface/helpers_android.h" namespace webrtc { -class EventWrapper; -class ThreadWrapper; - -const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz -const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout - +// Implements 16-bit mono PCM audio output support for Android using the Java +// AudioTrack interface. Most of the work is done by its Java counterpart in +// WebRtcAudioTrack.java. This class is created and lives on a thread in +// C++-land, but decoded audio buffers are requested on a high-priority +// thread managed by the Java class. +// +// An instance must be created and destroyed on one and the same thread. +// All public methods must also be called on the same thread. A thread checker +// will DCHECK if any method is called on an invalid thread. +// It is possible to call the two static methods (SetAndroidAudioDeviceObjects +// and ClearAndroidAudioDeviceObjects) from a different thread but both will +// CHECK that the calling thread is attached to a Java VM. +// +// All methods use AttachThreadScoped to attach to a Java VM if needed and then +// detach when method goes out of scope. We do so because this class does not +// own the thread is is created and called on and other objects on the same +// thread might put us in a detached state at any time. class AudioTrackJni : public PlayoutDelayProvider { public: - static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env, - void* context); + // Use the invocation API to allow the native application to use the JNI + // interface pointer to access VM features. + // |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer + // and |context| corresponds to android.content.Context in Java. + // This method also sets a global jclass object, |g_audio_track_class| for + // the "org/webrtc/voiceengine/WebRtcAudioTrack"-class. + static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context); + // Always call this method after the object has been destructed. It deletes + // existing global references and enables garbage collection. static void ClearAndroidAudioDeviceObjects(); - explicit AudioTrackJni(const int32_t id); - virtual ~AudioTrackJni(); - // Main initializaton and termination + AudioTrackJni(); + ~AudioTrackJni(); + int32_t Init(); int32_t Terminate(); - bool Initialized() const { return _initialized; } - // Device enumeration - int16_t PlayoutDevices() { return 1; } // There is one device only. - - int32_t PlayoutDeviceName(uint16_t index, - char name[kAdmMaxDeviceNameSize], - char guid[kAdmMaxGuidSize]); - - // Device selection - int32_t SetPlayoutDevice(uint16_t index); - int32_t SetPlayoutDevice( - AudioDeviceModule::WindowsDeviceType device); - - // Audio transport initialization - int32_t PlayoutIsAvailable(bool& available); // NOLINT int32_t InitPlayout(); - bool PlayoutIsInitialized() const { return _playIsInitialized; } + bool PlayoutIsInitialized() const { return initialized_; } - // Audio transport control int32_t StartPlayout(); int32_t StopPlayout(); - bool Playing() const { return _playing; } + bool Playing() const { return playing_; } - // Audio mixer initialization - int32_t InitSpeaker(); - bool SpeakerIsInitialized() const { return _speakerIsInitialized; } + int32_t PlayoutDelay(uint16_t& delayMS) const; - // Speaker volume controls - int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT - int32_t SetSpeakerVolume(uint32_t volume); - int32_t SpeakerVolume(uint32_t& volume) const; // NOLINT - int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT - int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT - int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT - - // Speaker mute control - int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT - int32_t SetSpeakerMute(bool enable); - int32_t SpeakerMute(bool& enabled) const; // NOLINT - - - // Stereo support - int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT - int32_t SetStereoPlayout(bool enable); - int32_t StereoPlayout(bool& enabled) const; // NOLINT - - // Delay information and control - int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type, - uint16_t sizeMS); - int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT - uint16_t& sizeMS) const; - int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT - - // Attach audio buffer void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); - int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec); - - // Error and warning information - bool PlayoutWarning() const; - bool PlayoutError() const; - void ClearPlayoutWarning(); - void ClearPlayoutError(); - - // Speaker audio routing - int32_t SetLoudspeakerStatus(bool enable); - int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT - protected: - // TODO(henrika): improve this estimate. - virtual int PlayoutDelayMs() { return 0; } + // PlayoutDelayProvider implementation. + virtual int PlayoutDelayMs(); private: - void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) { - _critSect.Enter(); - } - void UnLock() UNLOCK_FUNCTION(_critSect) { - _critSect.Leave(); - } + // Called from Java side so we can cache the address of the Java-manged + // |byte_buffer| in |direct_buffer_address_|. The size of the buffer + // is also stored in |direct_buffer_capacity_in_bytes_|. + // This method will be called by the WebRtcAudioTrack constructor, i.e., + // on the same thread that this object is created on. + static void JNICALL CacheDirectBufferAddress( + JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack); + void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer); - int32_t InitJavaResources(); - int32_t InitSampleRate(); + // Called periodically by the Java based WebRtcAudioTrack object when + // playout has started. Each call indicates that |length| new bytes should + // be written to the memory area |direct_buffer_address_| for playout. + // This method is called on a high-priority thread from Java. The name of + // the thread is 'AudioTrackThread'. + static void JNICALL GetPlayoutData( + JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack); + void OnGetPlayoutData(int length); - static bool PlayThreadFunc(void*); - bool PlayThreadProcess(); + // Returns true if SetAndroidAudioDeviceObjects() has been called + // successfully. + bool HasDeviceObjects(); - // TODO(leozwang): Android holds only one JVM, all these jni handling - // will be consolidated into a single place to make it consistant and - // reliable. Chromium has a good example at base/android. - static JavaVM* globalJvm; - static JNIEnv* globalJNIEnv; - static jobject globalContext; - static jclass globalScClass; + // Called from the constructor. Defines the |j_audio_track_| member. + void CreateJavaInstance(); - JavaVM* _javaVM; // denotes a Java VM - JNIEnv* _jniEnvPlay; // The JNI env for playout thread - jclass _javaScClass; // AudioDeviceAndroid class - jobject _javaScObj; // AudioDeviceAndroid object - jobject _javaPlayBuffer; - void* _javaDirectPlayBuffer; // Direct buffer pointer to play buffer - jmethodID _javaMidPlayAudio; // Method ID of play in AudioDeviceAndroid + // Returns the native, or optimal, sample rate reported by the audio input + // device. + int GetNativeSampleRate(); - AudioDeviceBuffer* _ptrAudioBuffer; - CriticalSectionWrapper& _critSect; - int32_t _id; - bool _initialized; + // Stores thread ID in constructor. + // We can then use ThreadChecker::CalledOnValidThread() to ensure that + // other methods are called from the same thread. + rtc::ThreadChecker thread_checker_; - EventWrapper& _timeEventPlay; - EventWrapper& _playStartStopEvent; - ThreadWrapper* _ptrThreadPlay; - uint32_t _playThreadID; - bool _playThreadIsInitialized; - bool _shutdownPlayThread; - bool _playoutDeviceIsSpecified; + // Stores thread ID in first call to OnGetPlayoutData() from high-priority + // thread in Java. Detached during construction of this object. + rtc::ThreadChecker thread_checker_java_; - bool _playing; - bool _playIsInitialized; - bool _speakerIsInitialized; + // The Java WebRtcAudioTrack instance. + jobject j_audio_track_; - bool _startPlay; + // Cached copy of address to direct audio buffer owned by |j_audio_track_|. + void* direct_buffer_address_; - uint16_t _playWarning; - uint16_t _playError; + // Number of bytes in the direct audio buffer owned by |j_audio_track_|. + int direct_buffer_capacity_in_bytes_; - uint16_t _delayPlayout; + // Number of audio frames per audio buffer. Each audio frame corresponds to + // one sample of PCM mono data at 16 bits per sample. Hence, each audio + // frame contains 2 bytes (given that the Java layer only supports mono). + // Example: 480 for 48000 Hz or 441 for 44100 Hz. + int frames_per_buffer_; - uint16_t _samplingFreqOut; // Sampling frequency for Speaker - uint32_t _maxSpeakerVolume; // The maximum speaker volume value - bool _loudSpeakerOn; + bool initialized_; + bool playing_; + + // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the + // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). + // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance + // and therefore outlives this object. + AudioDeviceBuffer* audio_device_buffer_; + + // Native sample rate set in AttachAudioBuffer() which uses JNI to ask the + // Java layer for the best possible sample rate for this particular device + // and audio configuration. + int sample_rate_hz_; + + // Estimated playout delay caused by buffering in the Java based audio track. + // We are using a fixed value here since measurements have shown that the + // variations are very small (~10ms) and it is not worth the extra complexity + // to update this estimate on a continuous basis. + int delay_in_milliseconds_; }; } // namespace webrtc diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java index e73038424..4c40683c1 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -11,12 +11,8 @@ package org.webrtc.voiceengine; import java.lang.System; -import java.lang.Thread; import java.nio.ByteBuffer; import java.util.concurrent.TimeUnit; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; import android.content.Context; import android.media.AudioFormat; @@ -36,9 +32,6 @@ class WebRtcAudioRecord { private static final String TAG = "WebRtcAudioRecord"; - // Use 44.1kHz as the default sampling rate. - private static final int SAMPLE_RATE_HZ = 44100; - // Mono recording is default. private static final int CHANNELS = 1; @@ -71,16 +64,6 @@ class WebRtcAudioRecord { private AcousticEchoCanceler aec = null; private boolean useBuiltInAEC = false; - private final Set threadIds = new HashSet(); - - private static boolean runningOnJellyBeanOrHigher() { - return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN; - } - - private static boolean runningOnJellyBeanMR1OrHigher() { - return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1; - } - /** * Audio thread which keeps calling ByteBuffer.read() waiting for audio * to be recorded. Feeds recorded data to the native counterpart as a @@ -97,16 +80,15 @@ class WebRtcAudioRecord { @Override public void run() { Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO); - DoLog("AudioRecordThread" + getThreadInfo()); - AddThreadId(); + Logd("AudioRecordThread" + WebRtcAudioUtils.getThreadInfo()); try { audioRecord.startRecording(); } catch (IllegalStateException e) { - DoLogErr("AudioRecord.startRecording failed: " + e.getMessage()); + Loge("AudioRecord.startRecording failed: " + e.getMessage()); return; } - assertIsTrue(audioRecord.getRecordingState() + assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING); long lastTime = System.nanoTime(); @@ -115,7 +97,7 @@ class WebRtcAudioRecord { if (bytesRead == byteBuffer.capacity()) { nativeDataIsRecorded(bytesRead, nativeAudioRecord); } else { - DoLogErr("AudioRecord.read failed: " + bytesRead); + Loge("AudioRecord.read failed: " + bytesRead); if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) { keepAlive = false; } @@ -125,16 +107,15 @@ class WebRtcAudioRecord { long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime)); lastTime = nowTime; - DoLog("bytesRead[" + durationInMs + "] " + bytesRead); + Logd("bytesRead[" + durationInMs + "] " + bytesRead); } } try { audioRecord.stop(); } catch (IllegalStateException e) { - DoLogErr("AudioRecord.stop failed: " + e.getMessage()); + Loge("AudioRecord.stop failed: " + e.getMessage()); } - RemoveThreadId(); } public void joinThread() { @@ -150,43 +131,34 @@ class WebRtcAudioRecord { } WebRtcAudioRecord(Context context, long nativeAudioRecord) { - DoLog("ctor" + getThreadInfo()); + Logd("ctor" + WebRtcAudioUtils.getThreadInfo()); this.context = context; this.nativeAudioRecord = nativeAudioRecord; - audioManager = ((AudioManager) context.getSystemService( - Context.AUDIO_SERVICE)); + audioManager = (AudioManager) context.getSystemService( + Context.AUDIO_SERVICE); sampleRate = GetNativeSampleRate(); bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND); framesPerBuffer = sampleRate / BUFFERS_PER_SECOND; byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer); - DoLog("byteBuffer.capacity: " + byteBuffer.capacity()); + Logd("byteBuffer.capacity: " + byteBuffer.capacity()); // Rather than passing the ByteBuffer with every callback (requiring // the potentially expensive GetDirectBufferAddress) we simply have the // the native class cache the address to the memory once. nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord); - AddThreadId(); + + if (DEBUG) { + WebRtcAudioUtils.logDeviceInfo(TAG); + } } - /** - * Returns the native or optimal input sample rate for this device's - * primary input stream. Unit is in Hz. - * Note that we actually query the output device but the same result is - * also valid for input. - */ private int GetNativeSampleRate() { - if (!runningOnJellyBeanMR1OrHigher()) { - return SAMPLE_RATE_HZ; - } - String sampleRateString = audioManager.getProperty( - AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); - return (sampleRateString == null) ? - SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString); + return WebRtcAudioUtils.GetNativeSampleRate(audioManager); } public static boolean BuiltInAECIsAvailable() { // AcousticEchoCanceler was added in API level 16 (Jelly Bean). - if (!runningOnJellyBeanOrHigher()) { + if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) { return false; } // TODO(henrika): add black-list based on device name. We could also @@ -196,10 +168,9 @@ class WebRtcAudioRecord { } private boolean EnableBuiltInAEC(boolean enable) { - DoLog("EnableBuiltInAEC(" + enable + ')'); - AddThreadId(); + Logd("EnableBuiltInAEC(" + enable + ')'); // AcousticEchoCanceler was added in API level 16 (Jelly Bean). - if (!runningOnJellyBeanOrHigher()) { + if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) { return false; } // Store the AEC state. @@ -208,17 +179,16 @@ class WebRtcAudioRecord { if (aec != null) { int ret = aec.setEnabled(enable); if (ret != AudioEffect.SUCCESS) { - DoLogErr("AcousticEchoCanceler.setEnabled failed"); + Loge("AcousticEchoCanceler.setEnabled failed"); return false; } - DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled()); + Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled()); } return true; } private int InitRecording(int sampleRate) { - DoLog("InitRecording(sampleRate=" + sampleRate + ")"); - AddThreadId(); + Logd("InitRecording(sampleRate=" + sampleRate + ")"); // Get the minimum buffer size required for the successful creation of // an AudioRecord object, in byte units. // Note that this size doesn't guarantee a smooth recording under load. @@ -227,19 +197,16 @@ class WebRtcAudioRecord { sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); - DoLog("AudioRecord.getMinBufferSize: " + minBufferSize); + Logd("AudioRecord.getMinBufferSize: " + minBufferSize); if (aec != null) { aec.release(); aec = null; } - if (audioRecord != null) { - audioRecord.release(); - audioRecord = null; - } + assertTrue(audioRecord == null); int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize); - DoLog("bufferSizeInBytes: " + bufferSizeInBytes); + Logd("bufferSizeInBytes: " + bufferSizeInBytes); try { audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION, sampleRate, @@ -248,105 +215,76 @@ class WebRtcAudioRecord { bufferSizeInBytes); } catch (IllegalArgumentException e) { - DoLog(e.getMessage()); + Logd(e.getMessage()); return -1; } - assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED); + assertTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED); - DoLog("AudioRecord " + + Logd("AudioRecord " + "session ID: " + audioRecord.getAudioSessionId() + ", " + "audio format: " + audioRecord.getAudioFormat() + ", " + "channels: " + audioRecord.getChannelCount() + ", " + "sample rate: " + audioRecord.getSampleRate()); - DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable()); + Logd("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable()); if (!BuiltInAECIsAvailable()) { return framesPerBuffer; } aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId()); if (aec == null) { - DoLogErr("AcousticEchoCanceler.create failed"); + Loge("AcousticEchoCanceler.create failed"); return -1; } int ret = aec.setEnabled(useBuiltInAEC); if (ret != AudioEffect.SUCCESS) { - DoLogErr("AcousticEchoCanceler.setEnabled failed"); + Loge("AcousticEchoCanceler.setEnabled failed"); return -1; } Descriptor descriptor = aec.getDescriptor(); - DoLog("AcousticEchoCanceler " + + Logd("AcousticEchoCanceler " + "name: " + descriptor.name + ", " + "implementor: " + descriptor.implementor + ", " + "uuid: " + descriptor.uuid); - DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled()); + Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled()); return framesPerBuffer; } private boolean StartRecording() { - DoLog("StartRecording"); - AddThreadId(); - if (audioRecord == null) { - DoLogErr("start() called before init()"); - return false; - } - if (audioThread != null) { - DoLogErr("start() was already called"); - return false; - } + Logd("StartRecording"); + assertTrue(audioRecord != null); + assertTrue(audioThread == null); audioThread = new AudioRecordThread("AudioRecordJavaThread"); audioThread.start(); return true; } private boolean StopRecording() { - DoLog("StopRecording"); - AddThreadId(); - if (audioThread == null) { - DoLogErr("start() was never called, or stop() was already called"); - return false; - } + Logd("StopRecording"); + assertTrue(audioThread != null); audioThread.joinThread(); audioThread = null; if (aec != null) { aec.release(); aec = null; } - if (audioRecord != null) { - audioRecord.release(); - audioRecord = null; - } + audioRecord.release(); + audioRecord = null; return true; } - private void DoLog(String msg) { - Log.d(TAG, msg); - } - - private void DoLogErr(String msg) { - Log.e(TAG, msg); - } - - /** Helper method for building a string of thread information.*/ - private static String getThreadInfo() { - return "@[name=" + Thread.currentThread().getName() - + ", id=" + Thread.currentThread().getId() + "]"; - } - - /** Helper method which throws an exception when an assertion has failed. */ - private static void assertIsTrue(boolean condition) { + /** Helper method which throws an exception when an assertion has failed. */ + private static void assertTrue(boolean condition) { if (!condition) { throw new AssertionError("Expected condition to be true"); } } - private void AddThreadId() { - threadIds.add(Thread.currentThread().getId()); - DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")"); + private static void Logd(String msg) { + Log.d(TAG, msg); } - private void RemoveThreadId() { - threadIds.remove(Thread.currentThread().getId()); - DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")"); + private static void Loge(String msg) { + Log.e(TAG, msg); } private native void nativeCacheDirectBufferAddress( diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java index 273d03f35..0a0678e75 100644 --- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -10,300 +10,234 @@ package org.webrtc.voiceengine; +import java.lang.Thread; import java.nio.ByteBuffer; -import java.util.concurrent.locks.ReentrantLock; import android.content.Context; import android.media.AudioFormat; import android.media.AudioManager; -import android.media.AudioRecord; import android.media.AudioTrack; +import android.os.Process; import android.util.Log; class WebRtcAudioTrack { - private AudioTrack _audioTrack = null; + private static final boolean DEBUG = false; - private Context _context; - private AudioManager _audioManager; + private static final String TAG = "WebRtcAudioTrack"; - private ByteBuffer _playBuffer; - private byte[] _tempBufPlay; + // Mono playout is default. + // TODO(henrika): add stereo support. + private static final int CHANNELS = 1; - private final ReentrantLock _playLock = new ReentrantLock(); + // Default audio data format is PCM 16 bit per sample. + // Guaranteed to be supported by all devices. + private static final int BITS_PER_SAMPLE = 16; - private boolean _doPlayInit = true; - private boolean _doRecInit = true; - private boolean _isRecording = false; - private boolean _isPlaying = false; + // Number of bytes per audio frame. + // Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame] + private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8); - private int _bufferedPlaySamples = 0; - private int _playPosition = 0; + // Requested size of each recorded buffer provided to the client. + private static final int CALLBACK_BUFFER_SIZE_MS = 10; - WebRtcAudioTrack() { + // Average number of callbacks per second. + private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS; + + private ByteBuffer byteBuffer; + private final int sampleRate; + + private final long nativeAudioTrack; + private final Context context; + private final AudioManager audioManager; + + private AudioTrack audioTrack = null; + private AudioTrackThread audioThread = null; + + /** + * Audio thread which keeps calling AudioTrack.write() to stream audio. + * Data is periodically acquired from the native WebRTC layer using the + * nativeGetPlayoutData callback function. + * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority. + */ + private class AudioTrackThread extends Thread { + private volatile boolean keepAlive = true; + + public AudioTrackThread(String name) { + super(name); + } + + @Override + public void run() { + Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO); + Logd("AudioTrackThread" + WebRtcAudioUtils.getThreadInfo()); + + try { + // In MODE_STREAM mode we can optionally prime the output buffer by + // writing up to bufferSizeInBytes (from constructor) before starting. + // This priming will avoid an immediate underrun, but is not required. + // TODO(henrika): initial tests have shown that priming is not required. + audioTrack.play(); + assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING); + } catch (IllegalStateException e) { + Loge("AudioTrack.play failed: " + e.getMessage()); + return; + } + + // Fixed size in bytes of each 10ms block of audio data that we ask for + // using callbacks to the native WebRTC client. + final int sizeInBytes = byteBuffer.capacity(); + + while (keepAlive) { + // Get 10ms of PCM data from the native WebRTC client. Audio data is + // written into the common ByteBuffer using the address that was + // cached at construction. + nativeGetPlayoutData(sizeInBytes, nativeAudioTrack); + // Write data until all data has been written to the audio sink. + // Upon return, the buffer position will have been advanced to reflect + // the amount of data that was successfully written to the AudioTrack. + assertTrue(sizeInBytes <= byteBuffer.remaining()); + int bytesWritten = audioTrack.write(byteBuffer, + sizeInBytes, + AudioTrack.WRITE_BLOCKING); + if (bytesWritten != sizeInBytes) { + Loge("AudioTrack.write failed: " + bytesWritten); + if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) { + keepAlive = false; + } + } + // The byte buffer must be rewinded since byteBuffer.position() is + // increased at each call to AudioTrack.write(). If we don't do this, + // next call to AudioTrack.write() will fail. + byteBuffer.rewind(); + + // TODO(henrika): it is possible to create a delay estimate here by + // counting number of written frames and subtracting the result from + // audioTrack.getPlaybackHeadPosition(). + } + + try { + audioTrack.stop(); + } catch (IllegalStateException e) { + Loge("AudioTrack.stop failed: " + e.getMessage()); + } + assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); + audioTrack.flush(); + } + + public void joinThread() { + keepAlive = false; + while (isAlive()) { try { - _playBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48 - // kHz - } catch (Exception e) { - DoLog(e.getMessage()); + join(); + } catch (InterruptedException e) { + // Ignore. } - - _tempBufPlay = new byte[2 * 480]; + } } + } - @SuppressWarnings("unused") - private int InitPlayback(int sampleRate) { - // get the minimum buffer size that can be used - int minPlayBufSize = AudioTrack.getMinBufferSize( - sampleRate, - AudioFormat.CHANNEL_OUT_MONO, - AudioFormat.ENCODING_PCM_16BIT); + WebRtcAudioTrack(Context context, long nativeAudioTrack) { + Logd("ctor" + WebRtcAudioUtils.getThreadInfo()); + this.context = context; + this.nativeAudioTrack = nativeAudioTrack; + audioManager = (AudioManager) context.getSystemService( + Context.AUDIO_SERVICE); + sampleRate = GetNativeSampleRate(); + byteBuffer = byteBuffer.allocateDirect( + BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND)); + Logd("byteBuffer.capacity: " + byteBuffer.capacity()); - // DoLog("min play buf size is " + minPlayBufSize); + // Rather than passing the ByteBuffer with every callback (requiring + // the potentially expensive GetDirectBufferAddress) we simply have the + // the native class cache the address to the memory once. + nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack); - int playBufSize = minPlayBufSize; - if (playBufSize < 6000) { - playBufSize *= 2; - } - _bufferedPlaySamples = 0; - // DoLog("play buf size is " + playBufSize); - - // release the object - if (_audioTrack != null) { - _audioTrack.release(); - _audioTrack = null; - } - - try { - _audioTrack = new AudioTrack( - AudioManager.STREAM_VOICE_CALL, - sampleRate, - AudioFormat.CHANNEL_OUT_MONO, - AudioFormat.ENCODING_PCM_16BIT, - playBufSize, AudioTrack.MODE_STREAM); - } catch (Exception e) { - DoLog(e.getMessage()); - return -1; - } - - // check that the audioRecord is ready to be used - if (_audioTrack.getState() != AudioTrack.STATE_INITIALIZED) { - // DoLog("play not initialized " + sampleRate); - return -1; - } - - // DoLog("play sample rate set to " + sampleRate); - - if (_audioManager == null && _context != null) { - _audioManager = (AudioManager) - _context.getSystemService(Context.AUDIO_SERVICE); - } - - // Return max playout volume - if (_audioManager == null) { - // Don't know the max volume but still init is OK for playout, - // so we should not return error. - return 0; - } - return _audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL); + if (DEBUG) { + WebRtcAudioUtils.logDeviceInfo(TAG); } + } - @SuppressWarnings("unused") - private int StartPlayback() { - // start playout - try { - _audioTrack.play(); + private int GetNativeSampleRate() { + return WebRtcAudioUtils.GetNativeSampleRate(audioManager); + } - } catch (IllegalStateException e) { - e.printStackTrace(); - return -1; - } + private int InitPlayout(int sampleRate) { + Logd("InitPlayout(sampleRate=" + sampleRate + ")"); + // Get the minimum buffer size required for the successful creation of an + // AudioTrack object to be created in the MODE_STREAM mode. + // Note that this size doesn't guarantee a smooth playback under load. + // TODO(henrika): should we extend the buffer size to avoid glitches? + final int minBufferSizeInBytes = AudioTrack.getMinBufferSize( + sampleRate, + AudioFormat.CHANNEL_OUT_MONO, + AudioFormat.ENCODING_PCM_16BIT); + Logd("AudioTrack.getMinBufferSize: " + minBufferSizeInBytes); + assertTrue(audioTrack == null); - _isPlaying = true; - return 0; + // For the streaming mode, data must be written to the audio sink in + // chunks of size (given by byteBuffer.capacity()) less than or equal + // to the total buffer size |minBufferSizeInBytes|. + assertTrue(byteBuffer.capacity() < minBufferSizeInBytes); + try { + // Create an AudioTrack object and initialize its associated audio buffer. + // The size of this buffer determines how long an AudioTrack can play + // before running out of data. + audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, + sampleRate, + AudioFormat.CHANNEL_OUT_MONO, + AudioFormat.ENCODING_PCM_16BIT, + minBufferSizeInBytes, + AudioTrack.MODE_STREAM); + } catch (IllegalArgumentException e) { + Logd(e.getMessage()); + return -1; } + assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED); + assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); + assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL); - @SuppressWarnings("unused") - private int StopPlayback() { - _playLock.lock(); - try { - // only stop if we are playing - if (_audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) { - // stop playout - try { - _audioTrack.stop(); - } catch (IllegalStateException e) { - e.printStackTrace(); - return -1; - } + // Return a delay estimate in milliseconds given the minimum buffer size. + return (1000 * (minBufferSizeInBytes / BYTES_PER_FRAME) / sampleRate); + } - // flush the buffers - _audioTrack.flush(); - } + private boolean StartPlayout() { + Logd("StartPlayout"); + assertTrue(audioTrack != null); + assertTrue(audioThread == null); + audioThread = new AudioTrackThread("AudioTrackJavaThread"); + audioThread.start(); + return true; + } - // release the object - _audioTrack.release(); - _audioTrack = null; - - } finally { - // Ensure we always unlock, both for success, exception or error - // return. - _doPlayInit = true; - _playLock.unlock(); - } - - _isPlaying = false; - return 0; + private boolean StopPlayout() { + Logd("StopPlayout"); + assertTrue(audioThread != null); + audioThread.joinThread(); + audioThread = null; + if (audioTrack != null) { + audioTrack.release(); + audioTrack = null; } + return true; + } - @SuppressWarnings("unused") - private int PlayAudio(int lengthInBytes) { - - _playLock.lock(); - try { - if (_audioTrack == null) { - return -2; // We have probably closed down while waiting for - // play lock - } - - // Set priority, only do once - if (_doPlayInit == true) { - try { - android.os.Process.setThreadPriority( - android.os.Process.THREAD_PRIORITY_URGENT_AUDIO); - } catch (Exception e) { - DoLog("Set play thread priority failed: " + e.getMessage()); - } - _doPlayInit = false; - } - - int written = 0; - _playBuffer.get(_tempBufPlay); - written = _audioTrack.write(_tempBufPlay, 0, lengthInBytes); - _playBuffer.rewind(); // Reset the position to start of buffer - - // DoLog("Wrote data to sndCard"); - - // increase by number of written samples - _bufferedPlaySamples += (written >> 1); - - // decrease by number of played samples - int pos = _audioTrack.getPlaybackHeadPosition(); - if (pos < _playPosition) { // wrap or reset by driver - _playPosition = 0; // reset - } - _bufferedPlaySamples -= (pos - _playPosition); - _playPosition = pos; - - if (written != lengthInBytes) { - // DoLog("Could not write all data to sc (written = " + written - // + ", length = " + lengthInBytes + ")"); - return -1; - } - - } finally { - // Ensure we always unlock, both for success, exception or error - // return. - _playLock.unlock(); - } - - return _bufferedPlaySamples; + /** Helper method which throws an exception when an assertion has failed. */ + private static void assertTrue(boolean condition) { + if (!condition) { + throw new AssertionError("Expected condition to be true"); } + } - @SuppressWarnings("unused") - private int SetPlayoutSpeaker(boolean loudspeakerOn) { - // create audio manager if needed - if (_audioManager == null && _context != null) { - _audioManager = (AudioManager) - _context.getSystemService(Context.AUDIO_SERVICE); - } + private static void Logd(String msg) { + Log.d(TAG, msg); + } - if (_audioManager == null) { - DoLogErr("Could not change audio routing - no audio manager"); - return -1; - } + private static void Loge(String msg) { + Log.e(TAG, msg); + } - int apiLevel = android.os.Build.VERSION.SDK_INT; + private native void nativeCacheDirectBufferAddress( + ByteBuffer byteBuffer, long nativeAudioRecord); - if ((3 == apiLevel) || (4 == apiLevel)) { - // 1.5 and 1.6 devices - if (loudspeakerOn) { - // route audio to back speaker - _audioManager.setMode(AudioManager.MODE_NORMAL); - } else { - // route audio to earpiece - _audioManager.setMode(AudioManager.MODE_IN_CALL); - } - } else { - // 2.x devices - if ((android.os.Build.BRAND.equals("Samsung") || - android.os.Build.BRAND.equals("samsung")) && - ((5 == apiLevel) || (6 == apiLevel) || - (7 == apiLevel))) { - // Samsung 2.0, 2.0.1 and 2.1 devices - if (loudspeakerOn) { - // route audio to back speaker - _audioManager.setMode(AudioManager.MODE_IN_CALL); - _audioManager.setSpeakerphoneOn(loudspeakerOn); - } else { - // route audio to earpiece - _audioManager.setSpeakerphoneOn(loudspeakerOn); - _audioManager.setMode(AudioManager.MODE_NORMAL); - } - } else { - // Non-Samsung and Samsung 2.2 and up devices - _audioManager.setSpeakerphoneOn(loudspeakerOn); - } - } - - return 0; - } - - @SuppressWarnings("unused") - private int SetPlayoutVolume(int level) { - - // create audio manager if needed - if (_audioManager == null && _context != null) { - _audioManager = (AudioManager) - _context.getSystemService(Context.AUDIO_SERVICE); - } - - int retVal = -1; - - if (_audioManager != null) { - _audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, - level, 0); - retVal = 0; - } - - return retVal; - } - - @SuppressWarnings("unused") - private int GetPlayoutVolume() { - - // create audio manager if needed - if (_audioManager == null && _context != null) { - _audioManager = (AudioManager) - _context.getSystemService(Context.AUDIO_SERVICE); - } - - int level = -1; - - if (_audioManager != null) { - level = _audioManager.getStreamVolume( - AudioManager.STREAM_VOICE_CALL); - } - - return level; - } - - final String logTag = "WebRTC AD java"; - - private void DoLog(String msg) { - Log.d(logTag, msg); - } - - private void DoLogErr(String msg) { - Log.e(logTag, msg); - } + private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); } diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java new file mode 100644 index 000000000..6821726ee --- /dev/null +++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.voiceengine; + +import java.lang.Thread; + +import android.media.AudioManager; +import android.os.Build; +import android.util.Log; + +public final class WebRtcAudioUtils { + // Use 44.1kHz as the default sampling rate. + private static final int SAMPLE_RATE_HZ = 44100; + + public static boolean runningOnJellyBeanOrHigher() { + return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN; + } + + public static boolean runningOnJellyBeanMR1OrHigher() { + return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1; + } + + /** Helper method for building a string of thread information.*/ + public static String getThreadInfo() { + return "@[name=" + Thread.currentThread().getName() + + ", id=" + Thread.currentThread().getId() + "]"; + } + + /** Information about the current build, taken from system properties. */ + public static void logDeviceInfo(String tag) { + Log.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", " + + "Release: " + Build.VERSION.RELEASE + ", " + + "Brand: " + Build.BRAND + ", " + + "Device: " + Build.DEVICE + ", " + + "Id: " + Build.ID + ", " + + "Hardware: " + Build.HARDWARE + ", " + + "Manufacturer: " + Build.MANUFACTURER + ", " + + "Model: " + Build.MODEL + ", " + + "Product: " + Build.PRODUCT); + } + + /** + * Returns the native or optimal output sample rate for this device's + * primary output stream. Unit is in Hz. + */ + public static int GetNativeSampleRate(AudioManager audioManager) { + if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { + return SAMPLE_RATE_HZ; + } + String sampleRateString = audioManager.getProperty( + AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); + return (sampleRateString == null) ? + SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString); + } +} diff --git a/webrtc/modules/audio_device/android/opensles_input.cc b/webrtc/modules/audio_device/android/opensles_input.cc index e31b57f60..1cdaea15f 100644 --- a/webrtc/modules/audio_device/android/opensles_input.cc +++ b/webrtc/modules/audio_device/android/opensles_input.cc @@ -41,8 +41,9 @@ enum { namespace webrtc { -OpenSlesInput::OpenSlesInput() - : initialized_(false), +OpenSlesInput::OpenSlesInput(PlayoutDelayProvider* delay_provider) + : delay_provider_(delay_provider), + initialized_(false), mic_initialized_(false), rec_initialized_(false), crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), @@ -527,8 +528,7 @@ bool OpenSlesInput::CbThreadImpl() { while (fifo_->size() > 0 && recording_) { int8_t* audio = fifo_->Pop(); audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples()); - // TODO(henrika): improve the delay estimate. - audio_buffer_->SetVQEData(100, + audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(), recording_delay_, 0); audio_buffer_->DeliverRecordedData(); } diff --git a/webrtc/modules/audio_device/android/opensles_input.h b/webrtc/modules/audio_device/android/opensles_input.h index 05a1ef0f5..e2170e209 100644 --- a/webrtc/modules/audio_device/android/opensles_input.h +++ b/webrtc/modules/audio_device/android/opensles_input.h @@ -35,7 +35,7 @@ class ThreadWrapper; // to non-const methods require exclusive access to the object. class OpenSlesInput { public: - OpenSlesInput(); + OpenSlesInput(PlayoutDelayProvider* delay_provider); ~OpenSlesInput(); static int32_t SetAndroidAudioDeviceObjects(void* javaVM, @@ -174,6 +174,8 @@ class OpenSlesInput { // Thread-compatible. bool CbThreadImpl(); + PlayoutDelayProvider* delay_provider_; + // Java API handle AudioManagerJni audio_manager_; diff --git a/webrtc/modules/audio_device/android/opensles_output.cc b/webrtc/modules/audio_device/android/opensles_output.cc index 487e28404..192e5354e 100644 --- a/webrtc/modules/audio_device/android/opensles_output.cc +++ b/webrtc/modules/audio_device/android/opensles_output.cc @@ -25,8 +25,6 @@ do { \ SLresult err = (op); \ if (err != SL_RESULT_SUCCESS) { \ - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \ - "OpenSL error: %d", err); \ assert(false); \ return ret_val; \ } \ @@ -43,9 +41,8 @@ enum { namespace webrtc { -OpenSlesOutput::OpenSlesOutput(const int32_t id) - : id_(id), - initialized_(false), +OpenSlesOutput::OpenSlesOutput() + : initialized_(false), speaker_initialized_(false), play_initialized_(false), crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), @@ -468,7 +465,6 @@ bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) { if (event_id == kNoUnderrun) { return false; } - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio underrun"); assert(event_id == kUnderrun); assert(event_msg > 0); // Wait for all enqueued buffers to be flushed. diff --git a/webrtc/modules/audio_device/android/opensles_output.h b/webrtc/modules/audio_device/android/opensles_output.h index aa9b5bf12..9cb758d96 100644 --- a/webrtc/modules/audio_device/android/opensles_output.h +++ b/webrtc/modules/audio_device/android/opensles_output.h @@ -35,7 +35,7 @@ class ThreadWrapper; // to non-const methods require exclusive access to the object. class OpenSlesOutput : public PlayoutDelayProvider { public: - explicit OpenSlesOutput(const int32_t id); + explicit OpenSlesOutput(); virtual ~OpenSlesOutput(); static int32_t SetAndroidAudioDeviceObjects(void* javaVM, @@ -191,7 +191,6 @@ class OpenSlesOutput : public PlayoutDelayProvider { // Java API handle AudioManagerJni audio_manager_; - int id_; bool initialized_; bool speaker_initialized_; bool play_initialized_;