Refactoring WebRTC Java/JNI audio track in C++ and Java.

This CL is part II in a major refactoring effort. See https://webrtc-codereview.appspot.com/33969004 for part I.

- Removes unused code and old WEBRTC logging macros
- Now uses optimal sample rate and buffer size in Java AudioTrack (used hard-coded sample rate before)
- Makes code more inline with the implementation in Chrome
- Adds helper methods for JNI handling to improve readability
- Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy)
- Simplified the delay estimate
- Adds basic thread checks
- Removes all locks in C++ land
- Removes all locks in Java
- Improves construction/destruction
- Additional cleanup

Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and
Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate).

BUG=NONE
R=magjed@webrtc.org, perkj@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/39169004

Cr-Commit-Position: refs/heads/master@{#8460}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8460 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrika@webrtc.org 2015-02-23 11:54:05 +00:00
parent 2ad3bb17a7
commit 962c62475e
17 changed files with 715 additions and 1875 deletions

View File

@ -135,6 +135,7 @@
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java',
],

View File

@ -174,8 +174,6 @@ public class MediaEngine implements VideoDecodeEncodeObserver {
cameras[info.facing] = info;
}
setDefaultCamera();
check(voe.setSpeakerVolume(volumeLevel) == 0,
"Failed setSpeakerVolume");
check(voe.setAecmMode(VoiceEngine.AecmModes.SPEAKERPHONE, false) == 0,
"VoE set Aecm speakerphone mode failed");
check(vie.setKeyFrameRequestMethod(videoChannel,

View File

@ -230,5 +230,5 @@ public class WebRTCDemo extends Activity implements MenuStateProvider {
main.toggleStart();
handler.postDelayed(startOrStopCallback, getCallRestartPeriodicity());
}
};
}
};
}

View File

@ -29,8 +29,8 @@ template <class InputType, class OutputType>
class OpenSlRunnerTemplate {
public:
OpenSlRunnerTemplate()
: output_(0),
input_() {
: output_(),
input_(&output_) {
output_.AttachAudioBuffer(&audio_buffer_);
if (output_.Init() != 0) {
assert(false);

View File

@ -18,6 +18,9 @@ enum {
kBitsPerSample = 16,
kNumChannels = 1,
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
// Number of bytes per audio frame.
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
kBytesPerFrame = kNumChannels * (kBitsPerSample / 8),
};
class PlayoutDelayProvider {

View File

@ -34,10 +34,10 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
InputType::ClearAndroidAudioDeviceObjects();
}
// TODO(henrika): remove id
explicit AudioDeviceTemplate(const int32_t id)
: output_(id),
// TODO(henrika): provide proper delay estimate using input_(&output_).
input_() {
: output_(),
input_(&output_) {
}
virtual ~AudioDeviceTemplate() {
@ -58,11 +58,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
bool Initialized() const {
return output_.Initialized();
return true;
}
int16_t PlayoutDevices() {
return output_.PlayoutDevices();
return 1;
}
int16_t RecordingDevices() {
@ -73,23 +73,28 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
return output_.PlayoutDeviceName(index, name, guid);
FATAL() << "Should never be called";
return -1;
}
int32_t RecordingDeviceName(
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
FATAL() << "Should never be called";
return -1;
}
int32_t SetPlayoutDevice(uint16_t index) {
return output_.SetPlayoutDevice(index);
// OK to use but it has no effect currently since device selection is
// done using Andoid APIs instead.
return 0;
}
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) {
return output_.SetPlayoutDevice(device);
FATAL() << "Should never be called";
return -1;
}
int32_t SetRecordingDevice(uint16_t index) {
@ -106,7 +111,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t PlayoutIsAvailable(
bool& available) { // NOLINT
return output_.PlayoutIsAvailable(available);
available = true;
return 0;
}
int32_t InitPlayout() {
@ -175,17 +181,16 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t WaveOutVolume(
uint16_t& volumeLeft, // NOLINT
uint16_t& volumeRight) const { // NOLINT
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
" API call not supported on this platform");
FATAL() << "Should never be called";
return -1;
}
int32_t InitSpeaker() {
return output_.InitSpeaker();
return 0;
}
bool SpeakerIsInitialized() const {
return output_.SpeakerIsInitialized();
return true;
}
int32_t InitMicrophone() {
@ -198,31 +203,42 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t SpeakerVolumeIsAvailable(
bool& available) { // NOLINT
return output_.SpeakerVolumeIsAvailable(available);
available = false;
FATAL() << "Should never be called";
return -1;
}
// TODO(henrika): add support if/when needed.
int32_t SetSpeakerVolume(uint32_t volume) {
return output_.SetSpeakerVolume(volume);
FATAL() << "Should never be called";
return -1;
}
// TODO(henrika): add support if/when needed.
int32_t SpeakerVolume(
uint32_t& volume) const { // NOLINT
return output_.SpeakerVolume(volume);
FATAL() << "Should never be called";
return -1;
}
// TODO(henrika): add support if/when needed.
int32_t MaxSpeakerVolume(
uint32_t& maxVolume) const { // NOLINT
return output_.MaxSpeakerVolume(maxVolume);
FATAL() << "Should never be called";
return -1;
}
// TODO(henrika): add support if/when needed.
int32_t MinSpeakerVolume(
uint32_t& minVolume) const { // NOLINT
return output_.MinSpeakerVolume(minVolume);
FATAL() << "Should never be called";
return -1;
}
int32_t SpeakerVolumeStepSize(
uint16_t& stepSize) const { // NOLINT
return output_.SpeakerVolumeStepSize(stepSize);
FATAL() << "Should never be called";
return -1;
}
int32_t MicrophoneVolumeIsAvailable(
@ -263,16 +279,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t SpeakerMuteIsAvailable(
bool& available) { // NOLINT
return output_.SpeakerMuteIsAvailable(available);
FATAL() << "Should never be called";
return -1;
}
int32_t SetSpeakerMute(bool enable) {
return output_.SetSpeakerMute(enable);
FATAL() << "Should never be called";
return -1;
}
int32_t SpeakerMute(
bool& enabled) const { // NOLINT
return output_.SpeakerMute(enabled);
FATAL() << "Should never be called";
return -1;
}
int32_t MicrophoneMuteIsAvailable(
@ -311,16 +330,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t StereoPlayoutIsAvailable(
bool& available) { // NOLINT
return output_.StereoPlayoutIsAvailable(available);
available = false;
return 0;
}
int32_t SetStereoPlayout(bool enable) {
return output_.SetStereoPlayout(enable);
return -1;
}
int32_t StereoPlayout(
bool& enabled) const { // NOLINT
return output_.StereoPlayout(enabled);
enabled = false;
FATAL() << "Should never be called";
return -1;
}
int32_t StereoRecordingIsAvailable(
@ -342,13 +364,15 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
uint16_t sizeMS) {
return output_.SetPlayoutBuffer(type, sizeMS);
FATAL() << "Should never be called";
return -1;
}
int32_t PlayoutBuffer(
AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const { // NOLINT
return output_.PlayoutBuffer(type, sizeMS);
FATAL() << "Should never be called";
return -1;
}
int32_t PlayoutDelay(
@ -368,11 +392,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
bool PlayoutWarning() const {
return output_.PlayoutWarning();
return false;
}
bool PlayoutError() const {
return output_.PlayoutError();
return false;
}
bool RecordingWarning() const {
@ -383,13 +407,9 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
return false;
}
void ClearPlayoutWarning() {
return output_.ClearPlayoutWarning();
}
void ClearPlayoutWarning() {}
void ClearPlayoutError() {
return output_.ClearPlayoutError();
}
void ClearPlayoutError() {}
void ClearRecordingWarning() {}
@ -401,18 +421,22 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
input_.AttachAudioBuffer(audioBuffer);
}
// TODO(henrika): remove
int32_t SetPlayoutSampleRate(
const uint32_t samplesPerSec) {
return output_.SetPlayoutSampleRate(samplesPerSec);
FATAL() << "Should never be called";
return -1;
}
int32_t SetLoudspeakerStatus(bool enable) {
return output_.SetLoudspeakerStatus(enable);
FATAL() << "Should never be called";
return -1;
}
int32_t GetLoudspeakerStatus(
bool& enable) const { // NOLINT
return output_.GetLoudspeakerStatus(enable);
FATAL() << "Should never be called";
return -1;
}
bool BuiltInAECIsAvailable() const {

View File

@ -25,10 +25,6 @@
namespace webrtc {
// Number of bytes per audio frame.
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
static const int kBytesPerFrame = kNumChannels * (kBitsPerSample / 8);
// We are unable to obtain exact measurements of the hardware delay on Android.
// Instead, a lower bound (based on measurements) is used.
// TODO(henrika): is it possible to improve this?
@ -59,6 +55,8 @@ void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
g_audio_record_class = reinterpret_cast<jclass>(
NewGlobalRef(jni, local_class));
jni->DeleteLocalRef(local_class);
CHECK_EXCEPTION(jni);
// Register native methods with the WebRtcAudioRecord class. These methods
// are declared private native in WebRtcAudioRecord.java.
@ -86,15 +84,17 @@ void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
g_jvm = NULL;
}
AudioRecordJni::AudioRecordJni()
: j_audio_record_(NULL),
AudioRecordJni::AudioRecordJni(PlayoutDelayProvider* delay_provider)
: delay_provider_(delay_provider),
j_audio_record_(NULL),
direct_buffer_address_(NULL),
direct_buffer_capacity_in_bytes_(0),
frames_per_buffer_(0),
initialized_(false),
recording_(false),
audio_device_buffer_(NULL),
sample_rate_hz_(0) {
sample_rate_hz_(0),
playout_delay_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
CHECK(HasDeviceObjects());
CreateJavaInstance();
@ -197,7 +197,6 @@ int32_t AudioRecordJni::StopRecording() {
initialized_ = false;
recording_ = false;
return 0;
}
int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const { // NOLINT
@ -268,7 +267,7 @@ void AudioRecordJni::OnCacheDirectBufferAddress(
void JNICALL AudioRecordJni::DataIsRecorded(
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
webrtc::AudioRecordJni* this_object =
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord );
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
this_object->OnDataIsRecorded(length);
}
@ -276,10 +275,15 @@ void JNICALL AudioRecordJni::DataIsRecorded(
// the thread is 'AudioRecordThread'.
void AudioRecordJni::OnDataIsRecorded(int length) {
DCHECK(thread_checker_java_.CalledOnValidThread());
if (playout_delay_in_milliseconds_ == 0) {
playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs();
ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_);
}
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_);
// TODO(henrika): improve playout delay estimate.
audio_device_buffer_->SetVQEData(0, kHardwareDelayInMilliseconds, 0);
audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_,
kHardwareDelayInMilliseconds,
0 /* clockDrift */);
audio_device_buffer_->DeliverRecordedData();
}

View File

@ -41,7 +41,7 @@ class PlayoutDelayProvider;
// CHECK that the calling thread is attached to a Java VM.
//
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
// detach when method goes out of scope. We do so beacuse this class does not
// detach when method goes out of scope. We do so because this class does not
// own the thread is is created and called on and other objects on the same
// thread might put us in a detached state at any time.
class AudioRecordJni {
@ -57,7 +57,7 @@ class AudioRecordJni {
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
AudioRecordJni();
AudioRecordJni(PlayoutDelayProvider* delay_provider);
~AudioRecordJni();
int32_t Init();
@ -118,10 +118,11 @@ class AudioRecordJni {
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
// Should return the current playout delay.
// TODO(henrika): fix on Android. Reports zero today.
// PlayoutDelayProvider* delay_provider_;
// Returns the current playout delay.
// TODO(henrika): this value is currently fixed since initial tests have
// shown that the estimated delay varies very little over time. It might be
// possible to make improvements in this area.
PlayoutDelayProvider* delay_provider_;
// The Java WebRtcAudioRecord instance.
jobject j_audio_record_;
@ -151,6 +152,8 @@ class AudioRecordJni {
// and audio configuration.
int sample_rate_hz_;
// Contains a delay estimate from the playout side given by |delay_provider_|.
int playout_delay_in_milliseconds_;
};
} // namespace webrtc

File diff suppressed because it is too large Load Diff

View File

@ -13,161 +13,139 @@
#include <jni.h>
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
namespace webrtc {
class EventWrapper;
class ThreadWrapper;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
// Implements 16-bit mono PCM audio output support for Android using the Java
// AudioTrack interface. Most of the work is done by its Java counterpart in
// WebRtcAudioTrack.java. This class is created and lives on a thread in
// C++-land, but decoded audio buffers are requested on a high-priority
// thread managed by the Java class.
//
// An instance must be created and destroyed on one and the same thread.
// All public methods must also be called on the same thread. A thread checker
// will DCHECK if any method is called on an invalid thread.
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
// CHECK that the calling thread is attached to a Java VM.
//
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
// detach when method goes out of scope. We do so because this class does not
// own the thread is is created and called on and other objects on the same
// thread might put us in a detached state at any time.
class AudioTrackJni : public PlayoutDelayProvider {
public:
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
void* context);
// Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features.
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
// and |context| corresponds to android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_track_class| for
// the "org/webrtc/voiceengine/WebRtcAudioTrack"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
// Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
explicit AudioTrackJni(const int32_t id);
virtual ~AudioTrackJni();
// Main initializaton and termination
AudioTrackJni();
~AudioTrackJni();
int32_t Init();
int32_t Terminate();
bool Initialized() const { return _initialized; }
// Device enumeration
int16_t PlayoutDevices() { return 1; } // There is one device only.
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]);
// Device selection
int32_t SetPlayoutDevice(uint16_t index);
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device);
// Audio transport initialization
int32_t PlayoutIsAvailable(bool& available); // NOLINT
int32_t InitPlayout();
bool PlayoutIsInitialized() const { return _playIsInitialized; }
bool PlayoutIsInitialized() const { return initialized_; }
// Audio transport control
int32_t StartPlayout();
int32_t StopPlayout();
bool Playing() const { return _playing; }
bool Playing() const { return playing_; }
// Audio mixer initialization
int32_t InitSpeaker();
bool SpeakerIsInitialized() const { return _speakerIsInitialized; }
int32_t PlayoutDelay(uint16_t& delayMS) const;
// Speaker volume controls
int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT
int32_t SetSpeakerVolume(uint32_t volume);
int32_t SpeakerVolume(uint32_t& volume) const; // NOLINT
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT
int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT
// Speaker mute control
int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
int32_t SetSpeakerMute(bool enable);
int32_t SpeakerMute(bool& enabled) const; // NOLINT
// Stereo support
int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
int32_t SetStereoPlayout(bool enable);
int32_t StereoPlayout(bool& enabled) const; // NOLINT
// Delay information and control
int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
uint16_t sizeMS);
int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT
uint16_t& sizeMS) const;
int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT
// Attach audio buffer
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
// Error and warning information
bool PlayoutWarning() const;
bool PlayoutError() const;
void ClearPlayoutWarning();
void ClearPlayoutError();
// Speaker audio routing
int32_t SetLoudspeakerStatus(bool enable);
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
protected:
// TODO(henrika): improve this estimate.
virtual int PlayoutDelayMs() { return 0; }
// PlayoutDelayProvider implementation.
virtual int PlayoutDelayMs();
private:
void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
_critSect.Enter();
}
void UnLock() UNLOCK_FUNCTION(_critSect) {
_critSect.Leave();
}
// Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|.
// This method will be called by the WebRtcAudioTrack constructor, i.e.,
// on the same thread that this object is created on.
static void JNICALL CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack);
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
int32_t InitJavaResources();
int32_t InitSampleRate();
// Called periodically by the Java based WebRtcAudioTrack object when
// playout has started. Each call indicates that |length| new bytes should
// be written to the memory area |direct_buffer_address_| for playout.
// This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioTrackThread'.
static void JNICALL GetPlayoutData(
JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack);
void OnGetPlayoutData(int length);
static bool PlayThreadFunc(void*);
bool PlayThreadProcess();
// Returns true if SetAndroidAudioDeviceObjects() has been called
// successfully.
bool HasDeviceObjects();
// TODO(leozwang): Android holds only one JVM, all these jni handling
// will be consolidated into a single place to make it consistant and
// reliable. Chromium has a good example at base/android.
static JavaVM* globalJvm;
static JNIEnv* globalJNIEnv;
static jobject globalContext;
static jclass globalScClass;
// Called from the constructor. Defines the |j_audio_track_| member.
void CreateJavaInstance();
JavaVM* _javaVM; // denotes a Java VM
JNIEnv* _jniEnvPlay; // The JNI env for playout thread
jclass _javaScClass; // AudioDeviceAndroid class
jobject _javaScObj; // AudioDeviceAndroid object
jobject _javaPlayBuffer;
void* _javaDirectPlayBuffer; // Direct buffer pointer to play buffer
jmethodID _javaMidPlayAudio; // Method ID of play in AudioDeviceAndroid
// Returns the native, or optimal, sample rate reported by the audio input
// device.
int GetNativeSampleRate();
AudioDeviceBuffer* _ptrAudioBuffer;
CriticalSectionWrapper& _critSect;
int32_t _id;
bool _initialized;
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
rtc::ThreadChecker thread_checker_;
EventWrapper& _timeEventPlay;
EventWrapper& _playStartStopEvent;
ThreadWrapper* _ptrThreadPlay;
uint32_t _playThreadID;
bool _playThreadIsInitialized;
bool _shutdownPlayThread;
bool _playoutDeviceIsSpecified;
// Stores thread ID in first call to OnGetPlayoutData() from high-priority
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
bool _playing;
bool _playIsInitialized;
bool _speakerIsInitialized;
// The Java WebRtcAudioTrack instance.
jobject j_audio_track_;
bool _startPlay;
// Cached copy of address to direct audio buffer owned by |j_audio_track_|.
void* direct_buffer_address_;
uint16_t _playWarning;
uint16_t _playError;
// Number of bytes in the direct audio buffer owned by |j_audio_track_|.
int direct_buffer_capacity_in_bytes_;
uint16_t _delayPlayout;
// Number of audio frames per audio buffer. Each audio frame corresponds to
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
// frame contains 2 bytes (given that the Java layer only supports mono).
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
int frames_per_buffer_;
uint16_t _samplingFreqOut; // Sampling frequency for Speaker
uint32_t _maxSpeakerVolume; // The maximum speaker volume value
bool _loudSpeakerOn;
bool initialized_;
bool playing_;
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
// and therefore outlives this object.
AudioDeviceBuffer* audio_device_buffer_;
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
// Java layer for the best possible sample rate for this particular device
// and audio configuration.
int sample_rate_hz_;
// Estimated playout delay caused by buffering in the Java based audio track.
// We are using a fixed value here since measurements have shown that the
// variations are very small (~10ms) and it is not worth the extra complexity
// to update this estimate on a continuous basis.
int delay_in_milliseconds_;
};
} // namespace webrtc

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -11,12 +11,8 @@
package org.webrtc.voiceengine;
import java.lang.System;
import java.lang.Thread;
import java.nio.ByteBuffer;
import java.util.concurrent.TimeUnit;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import android.content.Context;
import android.media.AudioFormat;
@ -36,9 +32,6 @@ class WebRtcAudioRecord {
private static final String TAG = "WebRtcAudioRecord";
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
// Mono recording is default.
private static final int CHANNELS = 1;
@ -71,16 +64,6 @@ class WebRtcAudioRecord {
private AcousticEchoCanceler aec = null;
private boolean useBuiltInAEC = false;
private final Set<Long> threadIds = new HashSet<Long>();
private static boolean runningOnJellyBeanOrHigher() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
}
private static boolean runningOnJellyBeanMR1OrHigher() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
/**
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
* to be recorded. Feeds recorded data to the native counterpart as a
@ -97,16 +80,15 @@ class WebRtcAudioRecord {
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
DoLog("AudioRecordThread" + getThreadInfo());
AddThreadId();
Logd("AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
try {
audioRecord.startRecording();
} catch (IllegalStateException e) {
DoLogErr("AudioRecord.startRecording failed: " + e.getMessage());
Loge("AudioRecord.startRecording failed: " + e.getMessage());
return;
}
assertIsTrue(audioRecord.getRecordingState()
assertTrue(audioRecord.getRecordingState()
== AudioRecord.RECORDSTATE_RECORDING);
long lastTime = System.nanoTime();
@ -115,7 +97,7 @@ class WebRtcAudioRecord {
if (bytesRead == byteBuffer.capacity()) {
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
} else {
DoLogErr("AudioRecord.read failed: " + bytesRead);
Loge("AudioRecord.read failed: " + bytesRead);
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
keepAlive = false;
}
@ -125,16 +107,15 @@ class WebRtcAudioRecord {
long durationInMs =
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
lastTime = nowTime;
DoLog("bytesRead[" + durationInMs + "] " + bytesRead);
Logd("bytesRead[" + durationInMs + "] " + bytesRead);
}
}
try {
audioRecord.stop();
} catch (IllegalStateException e) {
DoLogErr("AudioRecord.stop failed: " + e.getMessage());
Loge("AudioRecord.stop failed: " + e.getMessage());
}
RemoveThreadId();
}
public void joinThread() {
@ -150,43 +131,34 @@ class WebRtcAudioRecord {
}
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
DoLog("ctor" + getThreadInfo());
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioRecord = nativeAudioRecord;
audioManager = ((AudioManager) context.getSystemService(
Context.AUDIO_SERVICE));
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
sampleRate = GetNativeSampleRate();
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
DoLog("byteBuffer.capacity: " + byteBuffer.capacity());
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
AddThreadId();
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
/**
* Returns the native or optimal input sample rate for this device's
* primary input stream. Unit is in Hz.
* Note that we actually query the output device but the same result is
* also valid for input.
*/
private int GetNativeSampleRate() {
if (!runningOnJellyBeanMR1OrHigher()) {
return SAMPLE_RATE_HZ;
}
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
return (sampleRateString == null) ?
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
}
public static boolean BuiltInAECIsAvailable() {
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
if (!runningOnJellyBeanOrHigher()) {
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
return false;
}
// TODO(henrika): add black-list based on device name. We could also
@ -196,10 +168,9 @@ class WebRtcAudioRecord {
}
private boolean EnableBuiltInAEC(boolean enable) {
DoLog("EnableBuiltInAEC(" + enable + ')');
AddThreadId();
Logd("EnableBuiltInAEC(" + enable + ')');
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
if (!runningOnJellyBeanOrHigher()) {
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
return false;
}
// Store the AEC state.
@ -208,17 +179,16 @@ class WebRtcAudioRecord {
if (aec != null) {
int ret = aec.setEnabled(enable);
if (ret != AudioEffect.SUCCESS) {
DoLogErr("AcousticEchoCanceler.setEnabled failed");
Loge("AcousticEchoCanceler.setEnabled failed");
return false;
}
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
}
return true;
}
private int InitRecording(int sampleRate) {
DoLog("InitRecording(sampleRate=" + sampleRate + ")");
AddThreadId();
Logd("InitRecording(sampleRate=" + sampleRate + ")");
// Get the minimum buffer size required for the successful creation of
// an AudioRecord object, in byte units.
// Note that this size doesn't guarantee a smooth recording under load.
@ -227,19 +197,16 @@ class WebRtcAudioRecord {
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
DoLog("AudioRecord.getMinBufferSize: " + minBufferSize);
Logd("AudioRecord.getMinBufferSize: " + minBufferSize);
if (aec != null) {
aec.release();
aec = null;
}
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
assertTrue(audioRecord == null);
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
DoLog("bufferSizeInBytes: " + bufferSizeInBytes);
Logd("bufferSizeInBytes: " + bufferSizeInBytes);
try {
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
@ -248,105 +215,76 @@ class WebRtcAudioRecord {
bufferSizeInBytes);
} catch (IllegalArgumentException e) {
DoLog(e.getMessage());
Logd(e.getMessage());
return -1;
}
assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
assertTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
DoLog("AudioRecord " +
Logd("AudioRecord " +
"session ID: " + audioRecord.getAudioSessionId() + ", " +
"audio format: " + audioRecord.getAudioFormat() + ", " +
"channels: " + audioRecord.getChannelCount() + ", " +
"sample rate: " + audioRecord.getSampleRate());
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
Logd("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
if (!BuiltInAECIsAvailable()) {
return framesPerBuffer;
}
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
if (aec == null) {
DoLogErr("AcousticEchoCanceler.create failed");
Loge("AcousticEchoCanceler.create failed");
return -1;
}
int ret = aec.setEnabled(useBuiltInAEC);
if (ret != AudioEffect.SUCCESS) {
DoLogErr("AcousticEchoCanceler.setEnabled failed");
Loge("AcousticEchoCanceler.setEnabled failed");
return -1;
}
Descriptor descriptor = aec.getDescriptor();
DoLog("AcousticEchoCanceler " +
Logd("AcousticEchoCanceler " +
"name: " + descriptor.name + ", " +
"implementor: " + descriptor.implementor + ", " +
"uuid: " + descriptor.uuid);
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
return framesPerBuffer;
}
private boolean StartRecording() {
DoLog("StartRecording");
AddThreadId();
if (audioRecord == null) {
DoLogErr("start() called before init()");
return false;
}
if (audioThread != null) {
DoLogErr("start() was already called");
return false;
}
Logd("StartRecording");
assertTrue(audioRecord != null);
assertTrue(audioThread == null);
audioThread = new AudioRecordThread("AudioRecordJavaThread");
audioThread.start();
return true;
}
private boolean StopRecording() {
DoLog("StopRecording");
AddThreadId();
if (audioThread == null) {
DoLogErr("start() was never called, or stop() was already called");
return false;
}
Logd("StopRecording");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;
if (aec != null) {
aec.release();
aec = null;
}
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
audioRecord.release();
audioRecord = null;
return true;
}
private void DoLog(String msg) {
Log.d(TAG, msg);
}
private void DoLogErr(String msg) {
Log.e(TAG, msg);
}
/** Helper method for building a string of thread information.*/
private static String getThreadInfo() {
return "@[name=" + Thread.currentThread().getName()
+ ", id=" + Thread.currentThread().getId() + "]";
}
/** Helper method which throws an exception when an assertion has failed. */
private static void assertIsTrue(boolean condition) {
/** Helper method which throws an exception when an assertion has failed. */
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
private void AddThreadId() {
threadIds.add(Thread.currentThread().getId());
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
private static void Logd(String msg) {
Log.d(TAG, msg);
}
private void RemoveThreadId() {
threadIds.remove(Thread.currentThread().getId());
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
private static void Loge(String msg) {
Log.e(TAG, msg);
}
private native void nativeCacheDirectBufferAddress(

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -10,300 +10,234 @@
package org.webrtc.voiceengine;
import java.lang.Thread;
import java.nio.ByteBuffer;
import java.util.concurrent.locks.ReentrantLock;
import android.content.Context;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.os.Process;
import android.util.Log;
class WebRtcAudioTrack {
private AudioTrack _audioTrack = null;
private static final boolean DEBUG = false;
private Context _context;
private AudioManager _audioManager;
private static final String TAG = "WebRtcAudioTrack";
private ByteBuffer _playBuffer;
private byte[] _tempBufPlay;
// Mono playout is default.
// TODO(henrika): add stereo support.
private static final int CHANNELS = 1;
private final ReentrantLock _playLock = new ReentrantLock();
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
private boolean _doPlayInit = true;
private boolean _doRecInit = true;
private boolean _isRecording = false;
private boolean _isPlaying = false;
// Number of bytes per audio frame.
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
private int _bufferedPlaySamples = 0;
private int _playPosition = 0;
// Requested size of each recorded buffer provided to the client.
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
WebRtcAudioTrack() {
// Average number of callbacks per second.
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
private ByteBuffer byteBuffer;
private final int sampleRate;
private final long nativeAudioTrack;
private final Context context;
private final AudioManager audioManager;
private AudioTrack audioTrack = null;
private AudioTrackThread audioThread = null;
/**
* Audio thread which keeps calling AudioTrack.write() to stream audio.
* Data is periodically acquired from the native WebRTC layer using the
* nativeGetPlayoutData callback function.
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
*/
private class AudioTrackThread extends Thread {
private volatile boolean keepAlive = true;
public AudioTrackThread(String name) {
super(name);
}
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logd("AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
try {
// In MODE_STREAM mode we can optionally prime the output buffer by
// writing up to bufferSizeInBytes (from constructor) before starting.
// This priming will avoid an immediate underrun, but is not required.
// TODO(henrika): initial tests have shown that priming is not required.
audioTrack.play();
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
} catch (IllegalStateException e) {
Loge("AudioTrack.play failed: " + e.getMessage());
return;
}
// Fixed size in bytes of each 10ms block of audio data that we ask for
// using callbacks to the native WebRTC client.
final int sizeInBytes = byteBuffer.capacity();
while (keepAlive) {
// Get 10ms of PCM data from the native WebRTC client. Audio data is
// written into the common ByteBuffer using the address that was
// cached at construction.
nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
// Write data until all data has been written to the audio sink.
// Upon return, the buffer position will have been advanced to reflect
// the amount of data that was successfully written to the AudioTrack.
assertTrue(sizeInBytes <= byteBuffer.remaining());
int bytesWritten = audioTrack.write(byteBuffer,
sizeInBytes,
AudioTrack.WRITE_BLOCKING);
if (bytesWritten != sizeInBytes) {
Loge("AudioTrack.write failed: " + bytesWritten);
if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) {
keepAlive = false;
}
}
// The byte buffer must be rewinded since byteBuffer.position() is
// increased at each call to AudioTrack.write(). If we don't do this,
// next call to AudioTrack.write() will fail.
byteBuffer.rewind();
// TODO(henrika): it is possible to create a delay estimate here by
// counting number of written frames and subtracting the result from
// audioTrack.getPlaybackHeadPosition().
}
try {
audioTrack.stop();
} catch (IllegalStateException e) {
Loge("AudioTrack.stop failed: " + e.getMessage());
}
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
audioTrack.flush();
}
public void joinThread() {
keepAlive = false;
while (isAlive()) {
try {
_playBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
// kHz
} catch (Exception e) {
DoLog(e.getMessage());
join();
} catch (InterruptedException e) {
// Ignore.
}
_tempBufPlay = new byte[2 * 480];
}
}
}
@SuppressWarnings("unused")
private int InitPlayback(int sampleRate) {
// get the minimum buffer size that can be used
int minPlayBufSize = AudioTrack.getMinBufferSize(
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
WebRtcAudioTrack(Context context, long nativeAudioTrack) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
sampleRate = GetNativeSampleRate();
byteBuffer = byteBuffer.allocateDirect(
BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND));
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// DoLog("min play buf size is " + minPlayBufSize);
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
int playBufSize = minPlayBufSize;
if (playBufSize < 6000) {
playBufSize *= 2;
}
_bufferedPlaySamples = 0;
// DoLog("play buf size is " + playBufSize);
// release the object
if (_audioTrack != null) {
_audioTrack.release();
_audioTrack = null;
}
try {
_audioTrack = new AudioTrack(
AudioManager.STREAM_VOICE_CALL,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
playBufSize, AudioTrack.MODE_STREAM);
} catch (Exception e) {
DoLog(e.getMessage());
return -1;
}
// check that the audioRecord is ready to be used
if (_audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
// DoLog("play not initialized " + sampleRate);
return -1;
}
// DoLog("play sample rate set to " + sampleRate);
if (_audioManager == null && _context != null) {
_audioManager = (AudioManager)
_context.getSystemService(Context.AUDIO_SERVICE);
}
// Return max playout volume
if (_audioManager == null) {
// Don't know the max volume but still init is OK for playout,
// so we should not return error.
return 0;
}
return _audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
@SuppressWarnings("unused")
private int StartPlayback() {
// start playout
try {
_audioTrack.play();
private int GetNativeSampleRate() {
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
}
} catch (IllegalStateException e) {
e.printStackTrace();
return -1;
}
private int InitPlayout(int sampleRate) {
Logd("InitPlayout(sampleRate=" + sampleRate + ")");
// Get the minimum buffer size required for the successful creation of an
// AudioTrack object to be created in the MODE_STREAM mode.
// Note that this size doesn't guarantee a smooth playback under load.
// TODO(henrika): should we extend the buffer size to avoid glitches?
final int minBufferSizeInBytes = AudioTrack.getMinBufferSize(
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
Logd("AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
assertTrue(audioTrack == null);
_isPlaying = true;
return 0;
// For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal
// to the total buffer size |minBufferSizeInBytes|.
assertTrue(byteBuffer.capacity() < minBufferSizeInBytes);
try {
// Create an AudioTrack object and initialize its associated audio buffer.
// The size of this buffer determines how long an AudioTrack can play
// before running out of data.
audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
minBufferSizeInBytes,
AudioTrack.MODE_STREAM);
} catch (IllegalArgumentException e) {
Logd(e.getMessage());
return -1;
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
@SuppressWarnings("unused")
private int StopPlayback() {
_playLock.lock();
try {
// only stop if we are playing
if (_audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
// stop playout
try {
_audioTrack.stop();
} catch (IllegalStateException e) {
e.printStackTrace();
return -1;
}
// Return a delay estimate in milliseconds given the minimum buffer size.
return (1000 * (minBufferSizeInBytes / BYTES_PER_FRAME) / sampleRate);
}
// flush the buffers
_audioTrack.flush();
}
private boolean StartPlayout() {
Logd("StartPlayout");
assertTrue(audioTrack != null);
assertTrue(audioThread == null);
audioThread = new AudioTrackThread("AudioTrackJavaThread");
audioThread.start();
return true;
}
// release the object
_audioTrack.release();
_audioTrack = null;
} finally {
// Ensure we always unlock, both for success, exception or error
// return.
_doPlayInit = true;
_playLock.unlock();
}
_isPlaying = false;
return 0;
private boolean StopPlayout() {
Logd("StopPlayout");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;
if (audioTrack != null) {
audioTrack.release();
audioTrack = null;
}
return true;
}
@SuppressWarnings("unused")
private int PlayAudio(int lengthInBytes) {
_playLock.lock();
try {
if (_audioTrack == null) {
return -2; // We have probably closed down while waiting for
// play lock
}
// Set priority, only do once
if (_doPlayInit == true) {
try {
android.os.Process.setThreadPriority(
android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
} catch (Exception e) {
DoLog("Set play thread priority failed: " + e.getMessage());
}
_doPlayInit = false;
}
int written = 0;
_playBuffer.get(_tempBufPlay);
written = _audioTrack.write(_tempBufPlay, 0, lengthInBytes);
_playBuffer.rewind(); // Reset the position to start of buffer
// DoLog("Wrote data to sndCard");
// increase by number of written samples
_bufferedPlaySamples += (written >> 1);
// decrease by number of played samples
int pos = _audioTrack.getPlaybackHeadPosition();
if (pos < _playPosition) { // wrap or reset by driver
_playPosition = 0; // reset
}
_bufferedPlaySamples -= (pos - _playPosition);
_playPosition = pos;
if (written != lengthInBytes) {
// DoLog("Could not write all data to sc (written = " + written
// + ", length = " + lengthInBytes + ")");
return -1;
}
} finally {
// Ensure we always unlock, both for success, exception or error
// return.
_playLock.unlock();
}
return _bufferedPlaySamples;
/** Helper method which throws an exception when an assertion has failed. */
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
@SuppressWarnings("unused")
private int SetPlayoutSpeaker(boolean loudspeakerOn) {
// create audio manager if needed
if (_audioManager == null && _context != null) {
_audioManager = (AudioManager)
_context.getSystemService(Context.AUDIO_SERVICE);
}
private static void Logd(String msg) {
Log.d(TAG, msg);
}
if (_audioManager == null) {
DoLogErr("Could not change audio routing - no audio manager");
return -1;
}
private static void Loge(String msg) {
Log.e(TAG, msg);
}
int apiLevel = android.os.Build.VERSION.SDK_INT;
private native void nativeCacheDirectBufferAddress(
ByteBuffer byteBuffer, long nativeAudioRecord);
if ((3 == apiLevel) || (4 == apiLevel)) {
// 1.5 and 1.6 devices
if (loudspeakerOn) {
// route audio to back speaker
_audioManager.setMode(AudioManager.MODE_NORMAL);
} else {
// route audio to earpiece
_audioManager.setMode(AudioManager.MODE_IN_CALL);
}
} else {
// 2.x devices
if ((android.os.Build.BRAND.equals("Samsung") ||
android.os.Build.BRAND.equals("samsung")) &&
((5 == apiLevel) || (6 == apiLevel) ||
(7 == apiLevel))) {
// Samsung 2.0, 2.0.1 and 2.1 devices
if (loudspeakerOn) {
// route audio to back speaker
_audioManager.setMode(AudioManager.MODE_IN_CALL);
_audioManager.setSpeakerphoneOn(loudspeakerOn);
} else {
// route audio to earpiece
_audioManager.setSpeakerphoneOn(loudspeakerOn);
_audioManager.setMode(AudioManager.MODE_NORMAL);
}
} else {
// Non-Samsung and Samsung 2.2 and up devices
_audioManager.setSpeakerphoneOn(loudspeakerOn);
}
}
return 0;
}
@SuppressWarnings("unused")
private int SetPlayoutVolume(int level) {
// create audio manager if needed
if (_audioManager == null && _context != null) {
_audioManager = (AudioManager)
_context.getSystemService(Context.AUDIO_SERVICE);
}
int retVal = -1;
if (_audioManager != null) {
_audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL,
level, 0);
retVal = 0;
}
return retVal;
}
@SuppressWarnings("unused")
private int GetPlayoutVolume() {
// create audio manager if needed
if (_audioManager == null && _context != null) {
_audioManager = (AudioManager)
_context.getSystemService(Context.AUDIO_SERVICE);
}
int level = -1;
if (_audioManager != null) {
level = _audioManager.getStreamVolume(
AudioManager.STREAM_VOICE_CALL);
}
return level;
}
final String logTag = "WebRTC AD java";
private void DoLog(String msg) {
Log.d(logTag, msg);
}
private void DoLogErr(String msg) {
Log.e(logTag, msg);
}
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.voiceengine;
import java.lang.Thread;
import android.media.AudioManager;
import android.os.Build;
import android.util.Log;
public final class WebRtcAudioUtils {
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
public static boolean runningOnJellyBeanOrHigher() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
}
public static boolean runningOnJellyBeanMR1OrHigher() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
/** Helper method for building a string of thread information.*/
public static String getThreadInfo() {
return "@[name=" + Thread.currentThread().getName()
+ ", id=" + Thread.currentThread().getId() + "]";
}
/** Information about the current build, taken from system properties. */
public static void logDeviceInfo(String tag) {
Log.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ "Release: " + Build.VERSION.RELEASE + ", "
+ "Brand: " + Build.BRAND + ", "
+ "Device: " + Build.DEVICE + ", "
+ "Id: " + Build.ID + ", "
+ "Hardware: " + Build.HARDWARE + ", "
+ "Manufacturer: " + Build.MANUFACTURER + ", "
+ "Model: " + Build.MODEL + ", "
+ "Product: " + Build.PRODUCT);
}
/**
* Returns the native or optimal output sample rate for this device's
* primary output stream. Unit is in Hz.
*/
public static int GetNativeSampleRate(AudioManager audioManager) {
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
return SAMPLE_RATE_HZ;
}
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
return (sampleRateString == null) ?
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
}
}

View File

@ -41,8 +41,9 @@ enum {
namespace webrtc {
OpenSlesInput::OpenSlesInput()
: initialized_(false),
OpenSlesInput::OpenSlesInput(PlayoutDelayProvider* delay_provider)
: delay_provider_(delay_provider),
initialized_(false),
mic_initialized_(false),
rec_initialized_(false),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
@ -527,8 +528,7 @@ bool OpenSlesInput::CbThreadImpl() {
while (fifo_->size() > 0 && recording_) {
int8_t* audio = fifo_->Pop();
audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
// TODO(henrika): improve the delay estimate.
audio_buffer_->SetVQEData(100,
audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
recording_delay_, 0);
audio_buffer_->DeliverRecordedData();
}

View File

@ -35,7 +35,7 @@ class ThreadWrapper;
// to non-const methods require exclusive access to the object.
class OpenSlesInput {
public:
OpenSlesInput();
OpenSlesInput(PlayoutDelayProvider* delay_provider);
~OpenSlesInput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
@ -174,6 +174,8 @@ class OpenSlesInput {
// Thread-compatible.
bool CbThreadImpl();
PlayoutDelayProvider* delay_provider_;
// Java API handle
AudioManagerJni audio_manager_;

View File

@ -25,8 +25,6 @@
do { \
SLresult err = (op); \
if (err != SL_RESULT_SUCCESS) { \
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
"OpenSL error: %d", err); \
assert(false); \
return ret_val; \
} \
@ -43,9 +41,8 @@ enum {
namespace webrtc {
OpenSlesOutput::OpenSlesOutput(const int32_t id)
: id_(id),
initialized_(false),
OpenSlesOutput::OpenSlesOutput()
: initialized_(false),
speaker_initialized_(false),
play_initialized_(false),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
@ -468,7 +465,6 @@ bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) {
if (event_id == kNoUnderrun) {
return false;
}
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio underrun");
assert(event_id == kUnderrun);
assert(event_msg > 0);
// Wait for all enqueued buffers to be flushed.

View File

@ -35,7 +35,7 @@ class ThreadWrapper;
// to non-const methods require exclusive access to the object.
class OpenSlesOutput : public PlayoutDelayProvider {
public:
explicit OpenSlesOutput(const int32_t id);
explicit OpenSlesOutput();
virtual ~OpenSlesOutput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
@ -191,7 +191,6 @@ class OpenSlesOutput : public PlayoutDelayProvider {
// Java API handle
AudioManagerJni audio_manager_;
int id_;
bool initialized_;
bool speaker_initialized_;
bool play_initialized_;