Adding playout volume control to WebRtcAudioTrack.java.

Also adds a framework for an AudioManager to be used by both sides (playout and recording).
This initial implementation only does very simple tasks like setting up the correct audio
mode (needed for correct volume behavior). Note that this CL is mainly about modifying
the volume. The added AudioManager is only a place holder for future work. I could have
done the same parts in the WebRtcAudioTrack class but feel that it is better to move these
parts to an AudioManager already at this stage.

The AudioManager supports Init() where actual audio changes are done (set audio mode etc.)
but it can also be used a simple "construct-and-store-audio-parameters" unit, which is the
case here. Hence, the AM now serves as the center for getting audio parameters and then inject
these into playout and recording sides. Previously, both sides acquired their own parameters
and that is more error prone.

BUG=NONE
TEST=AudioDeviceTest
R=perkj@webrtc.org, phoglund@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/45829004

Cr-Commit-Position: refs/heads/master@{#8875}
This commit is contained in:
henrika 2015-03-27 10:56:23 +01:00
parent 8ed6a4bba4
commit 8324b525dc
20 changed files with 701 additions and 167 deletions

View File

@ -29,8 +29,8 @@ template <class InputType, class OutputType>
class OpenSlRunnerTemplate {
public:
OpenSlRunnerTemplate()
: output_(),
input_(&output_) {
: output_(NULL), // TODO(henrika): inject proper audio manager.
input_(&output_, NULL) {
output_.AttachAudioBuffer(&audio_buffer_);
if (output_.Init() != 0) {
assert(false);

View File

@ -89,6 +89,8 @@ source_set("audio_device") {
"android/audio_device_template.h",
"android/audio_device_utility_android.cc",
"android/audio_device_utility_android.h",
"android/audio_manager.cc",
"android/audio_manager.h",
"android/audio_manager_jni.cc",
"android/audio_manager_jni.h",
"android/audio_record_jni.cc",

View File

@ -15,12 +15,11 @@ namespace webrtc {
enum {
kDefaultSampleRate = 44100,
kBitsPerSample = 16,
kNumChannels = 1,
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
// Number of bytes per audio frame.
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
kBytesPerFrame = kNumChannels * (kBitsPerSample / 8),
kBytesPerFrame = kNumChannels * (16 / 8),
};
class PlayoutDelayProvider {

View File

@ -12,6 +12,7 @@
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -24,6 +25,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
public:
static void SetAndroidAudioDeviceObjects(void* javaVM,
void* context) {
AudioManager::SetAndroidAudioDeviceObjects(javaVM, context);
OutputType::SetAndroidAudioDeviceObjects(javaVM, context);
InputType::SetAndroidAudioDeviceObjects(javaVM, context);
}
@ -31,12 +33,14 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
static void ClearAndroidAudioDeviceObjects() {
OutputType::ClearAndroidAudioDeviceObjects();
InputType::ClearAndroidAudioDeviceObjects();
AudioManager::ClearAndroidAudioDeviceObjects();
}
// TODO(henrika): remove id
// TODO(henrika): remove id.
explicit AudioDeviceTemplate(const int32_t id)
: output_(),
input_(&output_) {
: audio_manager_(),
output_(&audio_manager_),
input_(&output_, &audio_manager_) {
}
virtual ~AudioDeviceTemplate() {
@ -49,11 +53,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
};
int32_t Init() override {
return output_.Init() | input_.Init();
return audio_manager_.Init() | output_.Init() | input_.Init();
}
int32_t Terminate() override {
return output_.Terminate() | input_.Terminate();
return output_.Terminate() | input_.Terminate() | audio_manager_.Close();
}
bool Initialized() const override {
@ -198,33 +202,23 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
int32_t SpeakerVolumeIsAvailable(bool& available) override {
available = false;
FATAL() << "Should never be called";
return -1;
return output_.SpeakerVolumeIsAvailable(available);
}
// TODO(henrika): add support if/when needed.
int32_t SetSpeakerVolume(uint32_t volume) override {
FATAL() << "Should never be called";
return -1;
return output_.SetSpeakerVolume(volume);
}
// TODO(henrika): add support if/when needed.
int32_t SpeakerVolume(uint32_t& volume) const override {
FATAL() << "Should never be called";
return -1;
return output_.SpeakerVolume(volume);
}
// TODO(henrika): add support if/when needed.
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
FATAL() << "Should never be called";
return -1;
return output_.MaxSpeakerVolume(maxVolume);
}
// TODO(henrika): add support if/when needed.
int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
FATAL() << "Should never be called";
return -1;
return output_.MinSpeakerVolume(minVolume);
}
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override {
@ -418,6 +412,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
private:
AudioManager audio_manager_;
OutputType output_;
InputType input_;
};

View File

@ -69,7 +69,7 @@ static const int kBitsPerSample = 16;
static const int kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored.
static const int kFullDuplexTimeInSec = 10;
static const int kFullDuplexTimeInSec = 5;
// Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access).
// Only used in the RunPlayoutAndRecordingInFullDuplex test.
@ -592,6 +592,12 @@ class AudioDeviceTest
return file_name;
}
void SetMaxPlayoutVolume() {
uint32_t max_volume;
EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
}
void StartPlayout() {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
EXPECT_FALSE(audio_device()->Playing());
@ -620,6 +626,24 @@ class AudioDeviceTest
EXPECT_FALSE(audio_device()->Recording());
}
int GetMaxSpeakerVolume() const {
uint32_t max_volume(0);
EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
return max_volume;
}
int GetMinSpeakerVolume() const {
uint32_t min_volume(0);
EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
return min_volume;
}
int GetSpeakerVolume() const {
uint32_t volume(0);
EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
return volume;
}
rtc::scoped_ptr<EventWrapper> test_is_done_;
scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters parameters_;
@ -634,7 +658,7 @@ TEST_P(AudioDeviceTest, AudioParameters) {
EXPECT_NE(0, playout_sample_rate());
PRINT("%splayout_sample_rate: %d\n", kTag, playout_sample_rate());
EXPECT_NE(0, recording_sample_rate());
PRINT("%splayout_sample_rate: %d\n", kTag, recording_sample_rate());
PRINT("%srecording_sample_rate: %d\n", kTag, recording_sample_rate());
EXPECT_NE(0, playout_channels());
PRINT("%splayout_channels: %d\n", kTag, playout_channels());
EXPECT_NE(0, recording_channels());
@ -659,6 +683,35 @@ TEST_P(AudioDeviceTest, BuiltInAECIsAvailable) {
kTag, audio_device()->BuiltInAECIsAvailable() ? "true" : "false");
}
TEST_P(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
bool available;
EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
EXPECT_TRUE(available);
}
TEST_P(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
EXPECT_GT(GetMaxSpeakerVolume(), 0);
}
TEST_P(AudioDeviceTest, MinSpeakerVolumeIsZero) {
EXPECT_EQ(GetMinSpeakerVolume(), 0);
}
TEST_P(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
const int default_volume = GetSpeakerVolume();
EXPECT_GE(default_volume, GetMinSpeakerVolume());
EXPECT_LE(default_volume, GetMaxSpeakerVolume());
}
TEST_P(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
const int default_volume = GetSpeakerVolume();
const int max_volume = GetMaxSpeakerVolume();
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
int new_volume = GetSpeakerVolume();
EXPECT_EQ(new_volume, max_volume);
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
}
// Tests that playout can be initiated, started and stopped.
TEST_P(AudioDeviceTest, StartStopPlayout) {
StartPlayout();
@ -752,6 +805,7 @@ TEST_P(AudioDeviceTest, RunPlayoutWithFileAsSource) {
mock.HandleCallbacks(test_is_done_.get(),
file_audio_stream.get(),
num_callbacks);
SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
@ -780,6 +834,7 @@ TEST_P(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
mock.HandleCallbacks(test_is_done_.get(),
fifo_audio_stream.get(),
kFullDuplexTimeInSec * kNumCallbacksPerSecond);
SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
StartPlayout();
@ -810,6 +865,7 @@ TEST_P(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
latency_audio_stream.get(),
kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
SetMaxPlayoutVolume();
StartRecording();
StartPlayout();
test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include <android/log.h>
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
#define TAG "AudioManager"
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
namespace webrtc {
static JavaVM* g_jvm = NULL;
static jobject g_context = NULL;
static jclass g_audio_manager_class = NULL;
void AudioManager::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
CHECK(jvm);
CHECK(context);
g_jvm = reinterpret_cast<JavaVM*>(jvm);
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
jclass local_class = FindClass(
jni, "org/webrtc/voiceengine/WebRtcAudioManager");
g_audio_manager_class = reinterpret_cast<jclass>(
NewGlobalRef(jni, local_class));
CHECK_EXCEPTION(jni);
// Register native methods with the WebRtcAudioManager class. These methods
// are declared private native in WebRtcAudioManager.java.
JNINativeMethod native_methods[] = {
{"nativeCacheAudioParameters", "(IIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
jni->RegisterNatives(g_audio_manager_class,
native_methods, arraysize(native_methods));
CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
}
void AudioManager::ClearAndroidAudioDeviceObjects() {
ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
jni->UnregisterNatives(g_audio_manager_class);
CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
DeleteGlobalRef(jni, g_audio_manager_class);
g_audio_manager_class = NULL;
DeleteGlobalRef(jni, g_context);
g_context = NULL;
g_jvm = NULL;
}
AudioManager::AudioManager()
: j_audio_manager_(NULL),
initialized_(false) {
ALOGD("ctor%s", GetThreadInfo().c_str());
CHECK(HasDeviceObjects());
CreateJavaInstance();
}
AudioManager::~AudioManager() {
ALOGD("~dtor%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
Close();
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jni->DeleteGlobalRef(j_audio_manager_);
j_audio_manager_ = NULL;
DCHECK(!initialized_);
}
bool AudioManager::Init() {
ALOGD("Init%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!initialized_);
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initID = GetMethodID(jni, g_audio_manager_class, "init", "()Z");
jboolean res = jni->CallBooleanMethod(j_audio_manager_, initID);
CHECK_EXCEPTION(jni);
if (!res) {
ALOGE("init failed!");
return false;
}
initialized_ = true;
return true;
}
bool AudioManager::Close() {
ALOGD("Close%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_)
return true;
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID disposeID = GetMethodID(
jni, g_audio_manager_class, "dispose", "()V");
jni->CallVoidMethod(j_audio_manager_, disposeID);
CHECK_EXCEPTION(jni);
initialized_ = false;
return true;
}
void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env, jobject obj,
jint sample_rate, jint channels, jlong nativeAudioManager) {
webrtc::AudioManager* this_object =
reinterpret_cast<webrtc::AudioManager*> (nativeAudioManager);
this_object->OnCacheAudioParameters(env, sample_rate, channels);
}
void AudioManager::OnCacheAudioParameters(
JNIEnv* env, jint sample_rate, jint channels) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
ALOGD("sample_rate: %d", sample_rate);
ALOGD("channels: %d", channels);
DCHECK(thread_checker_.CalledOnValidThread());
// TODO(henrika): add support stereo output.
playout_parameters_.reset(sample_rate, channels);
record_parameters_.reset(sample_rate, channels);
}
AudioParameters AudioManager::GetPlayoutAudioParameters() const {
CHECK(playout_parameters_.is_valid());
return playout_parameters_;
}
AudioParameters AudioManager::GetRecordAudioParameters() const {
CHECK(record_parameters_.is_valid());
return record_parameters_;
}
bool AudioManager::HasDeviceObjects() {
return (g_jvm && g_context && g_audio_manager_class);
}
void AudioManager::CreateJavaInstance() {
ALOGD("CreateJavaInstance");
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID constructorID = GetMethodID(
jni, g_audio_manager_class, "<init>", "(Landroid/content/Context;J)V");
j_audio_manager_ = jni->NewObject(g_audio_manager_class,
constructorID,
g_context,
reinterpret_cast<intptr_t>(this));
CHECK_EXCEPTION(jni) << "Error during NewObject";
CHECK(j_audio_manager_);
j_audio_manager_ = jni->NewGlobalRef(j_audio_manager_);
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
CHECK(j_audio_manager_);
}
} // namespace webrtc

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
#include <jni.h>
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
namespace webrtc {
class AudioParameters {
public:
enum { kBitsPerSample = 16 };
AudioParameters()
: sample_rate_(0),
channels_(0),
frames_per_buffer_(0),
bits_per_sample_(kBitsPerSample) {}
AudioParameters(int sample_rate, int channels)
: sample_rate_(sample_rate),
channels_(channels),
frames_per_buffer_(sample_rate / 100),
bits_per_sample_(kBitsPerSample) {}
void reset(int sample_rate, int channels) {
sample_rate_ = sample_rate;
channels_ = channels;
// WebRTC uses a fixed buffer size equal to 10ms.
frames_per_buffer_ = (sample_rate / 100);
}
int sample_rate() const { return sample_rate_; }
int channels() const { return channels_; }
int frames_per_buffer() const { return frames_per_buffer_; }
bool is_valid() const {
return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
}
int GetBytesPerFrame() const { return channels_ * bits_per_sample_ / 8; }
int GetBytesPerBuffer() const {
return frames_per_buffer_ * GetBytesPerFrame();
}
private:
int sample_rate_;
int channels_;
int frames_per_buffer_;
const int bits_per_sample_;
};
// Implements support for functions in the WebRTC audio stack for Android that
// relies on the AudioManager in android.media. It also populates an
// AudioParameter structure with native audio parameters detected at
// construction. This class does not make any audio-related modifications
// unless Init() is called. Caching audio parameters makes no changes but only
// reads data from the Java side.
// TODO(henrika): expand this class when adding support for low-latency
// OpenSL ES. Currently, it only contains very basic functionality.
class AudioManager {
public:
// Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features. |jvm| denotes the Java VM and
// |context| corresponds to android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_manager_class| for
// the "org/webrtc/voiceengine/WebRtcAudioManager"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
// Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
AudioManager();
~AudioManager();
// Initializes the audio manager (changes mode to MODE_IN_COMMUNICATION,
// request audio focus etc.).
// It is possible to use this class without calling Init() if the calling
// application prefers to set up the audio environment on its own instead.
bool Init();
// Revert any setting done by Init().
bool Close();
// Native audio parameters stored during construction.
AudioParameters GetPlayoutAudioParameters() const;
AudioParameters GetRecordAudioParameters() const;
bool initialized() const { return initialized_; }
private:
// Called from Java side so we can cache the native audio parameters.
// This method will be called by the WebRtcAudioManager constructor, i.e.
// on the same thread that this object is created on.
static void JNICALL CacheAudioParameters(JNIEnv* env, jobject obj,
jint sample_rate, jint channels, jlong nativeAudioManager);
void OnCacheAudioParameters(JNIEnv* env, jint sample_rate, jint channels);
// Returns true if SetAndroidAudioDeviceObjects() has been called
// successfully.
bool HasDeviceObjects();
// Called from the constructor. Defines the |j_audio_manager_| member.
void CreateJavaInstance();
// Stores thread ID in the constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
rtc::ThreadChecker thread_checker_;
// The Java WebRtcAudioManager instance.
jobject j_audio_manager_;
// Set to true by Init() and false by Close().
bool initialized_;
// Contains native parameters (e.g. sample rate, channel configuration).
// Set at construction in OnCacheAudioParameters() which is called from
// Java on the same thread as this object is created on.
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_

View File

@ -82,8 +82,10 @@ void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
g_jvm = NULL;
}
AudioRecordJni::AudioRecordJni(PlayoutDelayProvider* delay_provider)
AudioRecordJni::AudioRecordJni(
PlayoutDelayProvider* delay_provider, AudioManager* audio_manager)
: delay_provider_(delay_provider),
audio_parameters_(audio_manager->GetRecordAudioParameters()),
j_audio_record_(NULL),
direct_buffer_address_(NULL),
direct_buffer_capacity_in_bytes_(0),
@ -91,9 +93,9 @@ AudioRecordJni::AudioRecordJni(PlayoutDelayProvider* delay_provider)
initialized_(false),
recording_(false),
audio_device_buffer_(NULL),
sample_rate_hz_(0),
playout_delay_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
CreateJavaInstance();
// Detach from this thread since we want to use the checker to verify calls
@ -135,9 +137,10 @@ int32_t AudioRecordJni::InitRecording() {
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initRecordingID = GetMethodID(
jni, g_audio_record_class, "InitRecording", "(I)I");
jni, g_audio_record_class, "InitRecording", "(II)I");
jint frames_per_buffer = jni->CallIntMethod(
j_audio_record_, initRecordingID, sample_rate_hz_);
j_audio_record_, initRecordingID, audio_parameters_.sample_rate(),
audio_parameters_.channels());
CHECK_EXCEPTION(jni);
if (frames_per_buffer < 0) {
ALOGE("InitRecording failed!");
@ -147,6 +150,7 @@ int32_t AudioRecordJni::InitRecording() {
ALOGD("frames_per_buffer: %d", frames_per_buffer_);
CHECK_EQ(direct_buffer_capacity_in_bytes_,
frames_per_buffer_ * kBytesPerFrame);
CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_buffer());
initialized_ = true;
return 0;
}
@ -207,10 +211,12 @@ void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
ALOGD("AttachAudioBuffer");
DCHECK(thread_checker_.CalledOnValidThread());
audio_device_buffer_ = audioBuffer;
sample_rate_hz_ = GetNativeSampleRate();
ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz_);
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz_);
audio_device_buffer_->SetRecordingChannels(kNumChannels);
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
const int channels = audio_parameters_.channels();
ALOGD("SetRecordingChannels(%d)", channels);
audio_device_buffer_->SetRecordingChannels(channels);
}
bool AudioRecordJni::BuiltInAECIsAvailable() const {
@ -312,15 +318,4 @@ void AudioRecordJni::CreateJavaInstance() {
CHECK(j_audio_record_);
}
int AudioRecordJni::GetNativeSampleRate() {
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getNativeSampleRate = GetMethodID(
jni, g_audio_record_class, "GetNativeSampleRate", "()I");
jint sample_rate_hz = jni->CallIntMethod(
j_audio_record_, getNativeSampleRate);
CHECK_EXCEPTION(jni);
return sample_rate_hz;
}
} // namespace webrtc

View File

@ -14,6 +14,7 @@
#include <jni.h>
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
@ -57,7 +58,8 @@ class AudioRecordJni {
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
AudioRecordJni(PlayoutDelayProvider* delay_provider);
AudioRecordJni(
PlayoutDelayProvider* delay_provider, AudioManager* audio_manager);
~AudioRecordJni();
int32_t Init();
@ -104,10 +106,6 @@ class AudioRecordJni {
// Called from the constructor. Defines the |j_audio_record_| member.
void CreateJavaInstance();
// Returns the native, or optimal, sample rate reported by the audio input
// device.
int GetNativeSampleRate();
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
@ -124,6 +122,10 @@ class AudioRecordJni {
// possible to make improvements in this area.
PlayoutDelayProvider* delay_provider_;
// Contains audio parameters provided to this class at construction by the
// AudioManager.
const AudioParameters audio_parameters_;
// The Java WebRtcAudioRecord instance.
jobject j_audio_record_;
@ -147,11 +149,6 @@ class AudioRecordJni {
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
AudioDeviceBuffer* audio_device_buffer_;
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
// Java layer for the best possible sample rate for this particular device
// and audio configuration.
int sample_rate_hz_;
// Contains a delay estimate from the playout side given by |delay_provider_|.
int playout_delay_in_milliseconds_;
};

View File

@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
#include <android/log.h>
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#define TAG "AudioTrackJni"
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
@ -75,17 +75,19 @@ void AudioTrackJni::ClearAndroidAudioDeviceObjects() {
g_jvm = NULL;
}
AudioTrackJni::AudioTrackJni()
: j_audio_track_(NULL),
// TODO(henrika): possible extend usage of AudioManager and add it as member.
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
: audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
j_audio_track_(NULL),
direct_buffer_address_(NULL),
direct_buffer_capacity_in_bytes_(0),
frames_per_buffer_(0),
initialized_(false),
playing_(false),
audio_device_buffer_(NULL),
sample_rate_hz_(0),
delay_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
CreateJavaInstance();
// Detach from this thread since we want to use the checker to verify calls
@ -127,9 +129,10 @@ int32_t AudioTrackJni::InitPlayout() {
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initPlayoutID = GetMethodID(
jni, g_audio_track_class, "InitPlayout", "(I)I");
jni, g_audio_track_class, "InitPlayout", "(II)I");
jint delay_in_milliseconds = jni->CallIntMethod(
j_audio_track_, initPlayoutID, sample_rate_hz_);
j_audio_track_, initPlayoutID, audio_parameters_.sample_rate(),
audio_parameters_.channels());
CHECK_EXCEPTION(jni);
if (delay_in_milliseconds < 0) {
ALOGE("InitPlayout failed!");
@ -187,15 +190,68 @@ int32_t AudioTrackJni::StopPlayout() {
return 0;
}
int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) {
available = true;
return 0;
}
int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
ALOGD("SetSpeakerVolume(%d)%s", volume, GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID setStreamVolume = GetMethodID(
jni, g_audio_track_class, "SetStreamVolume", "(I)Z");
jboolean res = jni->CallBooleanMethod(
j_audio_track_, setStreamVolume, volume);
CHECK_EXCEPTION(jni);
return res ? 0 : -1;
}
int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
ALOGD("MaxSpeakerVolume%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getStreamMaxVolume = GetMethodID(
jni, g_audio_track_class, "GetStreamMaxVolume", "()I");
jint max_vol = jni->CallIntMethod(j_audio_track_, getStreamMaxVolume);
CHECK_EXCEPTION(jni);
max_volume = max_vol;
return 0;
}
int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
ALOGD("MaxSpeakerVolume%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
min_volume = 0;
return 0;
}
int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
ALOGD("SpeakerVolume%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getStreamVolume = GetMethodID(
jni, g_audio_track_class, "GetStreamVolume", "()I");
jint stream_volume = jni->CallIntMethod(j_audio_track_, getStreamVolume);
CHECK_EXCEPTION(jni);
volume = stream_volume;
return 0;
}
// TODO(henrika): possibly add stereo support.
void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
ALOGD("AttachAudioBuffer");
ALOGD("AttachAudioBuffer%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
audio_device_buffer_ = audioBuffer;
sample_rate_hz_ = GetNativeSampleRate();
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz_);
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz_);
audio_device_buffer_->SetPlayoutChannels(kNumChannels);
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
const int channels = audio_parameters_.channels();
ALOGD("SetPlayoutChannels(%d)", channels);
audio_device_buffer_->SetPlayoutChannels(channels);
}
int32_t AudioTrackJni::PlayoutDelay(uint16_t& delayMS) const {
@ -282,15 +338,4 @@ void AudioTrackJni::CreateJavaInstance() {
CHECK(j_audio_track_);
}
int AudioTrackJni::GetNativeSampleRate() {
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getNativeSampleRate = GetMethodID(
jni, g_audio_track_class, "GetNativeSampleRate", "()I");
jint sample_rate_hz = jni->CallIntMethod(
j_audio_track_, getNativeSampleRate);
CHECK_EXCEPTION(jni);
return sample_rate_hz;
}
} // namespace webrtc

View File

@ -15,6 +15,7 @@
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
@ -51,7 +52,7 @@ class AudioTrackJni : public PlayoutDelayProvider {
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
AudioTrackJni();
AudioTrackJni(AudioManager* audio_manager);
~AudioTrackJni();
int32_t Init();
@ -64,8 +65,13 @@ class AudioTrackJni : public PlayoutDelayProvider {
int32_t StopPlayout();
bool Playing() const { return playing_; }
int32_t PlayoutDelay(uint16_t& delayMS) const;
int SpeakerVolumeIsAvailable(bool& available);
int SetSpeakerVolume(uint32_t volume);
int SpeakerVolume(uint32_t& volume) const;
int MaxSpeakerVolume(uint32_t& max_volume) const;
int MinSpeakerVolume(uint32_t& min_volume) const;
int32_t PlayoutDelay(uint16_t& delayMS) const;
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
protected:
@ -76,8 +82,7 @@ class AudioTrackJni : public PlayoutDelayProvider {
// Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|.
// This method will be called by the WebRtcAudioTrack constructor, i.e.,
// on the same thread that this object is created on.
// Called on the same thread as the creating thread.
static void JNICALL CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack);
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
@ -98,10 +103,6 @@ class AudioTrackJni : public PlayoutDelayProvider {
// Called from the constructor. Defines the |j_audio_track_| member.
void CreateJavaInstance();
// Returns the native, or optimal, sample rate reported by the audio input
// device.
int GetNativeSampleRate();
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
@ -111,6 +112,10 @@ class AudioTrackJni : public PlayoutDelayProvider {
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
// Contains audio parameters provided to this class at construction by the
// AudioManager.
const AudioParameters audio_parameters_;
// The Java WebRtcAudioTrack instance.
jobject j_audio_track_;
@ -136,11 +141,6 @@ class AudioTrackJni : public PlayoutDelayProvider {
// and therefore outlives this object.
AudioDeviceBuffer* audio_device_buffer_;
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
// Java layer for the best possible sample rate for this particular device
// and audio configuration.
int sample_rate_hz_;
// Estimated playout delay caused by buffering in the Java based audio track.
// We are using a fixed value here since measurements have shown that the
// variations are very small (~10ms) and it is not worth the extra complexity

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.voiceengine;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioManager;
import android.util.Log;
// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
// At construction, storeAudioParameters() is called and it retrieves
// fundamental audio parameters like native sample rate and number of channels.
// The result is then provided to the caller by nativeCacheAudioParameters().
// It is also possible to call init() to set up the audio environment for best
// possible "VoIP performance". All settings done in init() are reverted by
// dispose(). This class can also be used without calling init() if the user
// prefers to set up the audio environment separately. However, it is
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
// This class also adds support for output volume control of the
// STREAM_VOICE_CALL-type stream.
class WebRtcAudioManager {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioManager";
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
// TODO(henrika): add stereo support for playout.
private static final int CHANNELS = 1;
private final long nativeAudioManager;
private final Context context;
private final AudioManager audioManager;
private boolean initialized = false;
private int nativeSampleRate;
private int nativeChannels;
private int savedAudioMode = AudioManager.MODE_INVALID;
private boolean savedIsSpeakerPhoneOn = false;
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioManager = nativeAudioManager;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
storeAudioParameters();
// TODO(henrika): add stereo support for playout side.
nativeCacheAudioParameters(
nativeSampleRate, nativeChannels, nativeAudioManager);
}
private boolean init() {
Logd("init" + WebRtcAudioUtils.getThreadInfo());
if (initialized) {
return true;
}
// Store current audio state so we can restore it when close() is called.
savedAudioMode = audioManager.getMode();
savedIsSpeakerPhoneOn = audioManager.isSpeakerphoneOn();
// Switch to COMMUNICATION mode for best possible VoIP performance.
audioManager.setMode(AudioManager.MODE_IN_COMMUNICATION);
if (DEBUG) {
Logd("savedAudioMode: " + savedAudioMode);
Logd("savedIsSpeakerPhoneOn: " + savedIsSpeakerPhoneOn);
Logd("hasEarpiece: " + hasEarpiece());
}
initialized = true;
return true;
}
private void dispose() {
Logd("dispose" + WebRtcAudioUtils.getThreadInfo());
if (!initialized) {
return;
}
// Restore previously stored audio states.
setSpeakerphoneOn(savedIsSpeakerPhoneOn);
audioManager.setMode(savedAudioMode);
audioManager.abandonAudioFocus(null);
}
private void storeAudioParameters() {
// Only mono is supported currently (in both directions).
// TODO(henrika): add support for stereo playout.
nativeChannels = CHANNELS;
// Get native sample rate and store it in |nativeSampleRate|.
// Most common rates are 44100 and 48000 Hz.
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
nativeSampleRate = SAMPLE_RATE_HZ;
} else {
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
nativeSampleRate = (sampleRateString == null) ?
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
}
Logd("nativeSampleRate: " + nativeSampleRate);
Logd("nativeChannels: " + nativeChannels);
}
/** Sets the speaker phone mode. */
private void setSpeakerphoneOn(boolean on) {
boolean wasOn = audioManager.isSpeakerphoneOn();
if (wasOn == on) {
return;
}
audioManager.setSpeakerphoneOn(on);
}
/** Gets the current earpiece state. */
private boolean hasEarpiece() {
return context.getPackageManager().hasSystemFeature(
PackageManager.FEATURE_TELEPHONY);
}
/** Helper method which throws an exception when an assertion has failed. */
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
private static void Logd(String msg) {
Log.d(TAG, msg);
}
private static void Loge(String msg) {
Log.e(TAG, msg);
}
private native void nativeCacheAudioParameters(
int sampleRate, int channels, long nativeAudioManager);
}

View File

@ -19,7 +19,6 @@ import android.media.AudioFormat;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.MediaRecorder.AudioSource;
import android.os.Build;
@ -32,32 +31,21 @@ class WebRtcAudioRecord {
private static final String TAG = "WebRtcAudioRecord";
// Mono recording is default.
private static final int CHANNELS = 1;
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
// Number of bytes per audio frame.
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
// Requested size of each recorded buffer provided to the client.
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
// Average number of callbacks per second.
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
private ByteBuffer byteBuffer;
private final int bytesPerBuffer;
private final int framesPerBuffer;
private final int sampleRate;
private final long nativeAudioRecord;
private final AudioManager audioManager;
private final Context context;
private ByteBuffer byteBuffer;
private AudioRecord audioRecord = null;
private AudioRecordThread audioThread = null;
@ -134,28 +122,11 @@ class WebRtcAudioRecord {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioRecord = nativeAudioRecord;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
sampleRate = GetNativeSampleRate();
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
private int GetNativeSampleRate() {
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
}
public static boolean BuiltInAECIsAvailable() {
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
@ -187,8 +158,18 @@ class WebRtcAudioRecord {
return true;
}
private int InitRecording(int sampleRate) {
Logd("InitRecording(sampleRate=" + sampleRate + ")");
private int InitRecording(int sampleRate, int channels) {
Logd("InitRecording(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = byteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
// Get the minimum buffer size required for the successful creation of
// an AudioRecord object, in byte units.
// Note that this size doesn't guarantee a smooth recording under load.

View File

@ -25,31 +25,22 @@ class WebRtcAudioTrack {
private static final String TAG = "WebRtcAudioTrack";
// Mono playout is default.
// TODO(henrika): add stereo support.
private static final int CHANNELS = 1;
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
// Number of bytes per audio frame.
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
// Requested size of each recorded buffer provided to the client.
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
// Average number of callbacks per second.
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
private ByteBuffer byteBuffer;
private final int sampleRate;
private final long nativeAudioTrack;
private final Context context;
private final long nativeAudioTrack;
private final AudioManager audioManager;
private ByteBuffer byteBuffer;
private AudioTrack audioTrack = null;
private AudioTrackThread audioThread = null;
@ -149,26 +140,23 @@ class WebRtcAudioTrack {
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
sampleRate = GetNativeSampleRate();
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
private int InitPlayout(int sampleRate, int channels) {
Logd("InitPlayout(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
byteBuffer = byteBuffer.allocateDirect(
BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND));
bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
private int GetNativeSampleRate() {
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
}
private int InitPlayout(int sampleRate) {
Logd("InitPlayout(sampleRate=" + sampleRate + ")");
// Get the minimum buffer size required for the successful creation of an
// AudioTrack object to be created in the MODE_STREAM mode.
// Note that this size doesn't guarantee a smooth playback under load.
@ -203,7 +191,9 @@ class WebRtcAudioTrack {
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
// Return a delay estimate in milliseconds given the minimum buffer size.
return (1000 * (minBufferSizeInBytes / BYTES_PER_FRAME) / sampleRate);
// TODO(henrika): improve estimate and use real measurements of total
// latency instead. We can most likely ignore this value.
return (1000 * (minBufferSizeInBytes / bytesPerFrame) / sampleRate);
}
private boolean StartPlayout() {
@ -227,6 +217,32 @@ class WebRtcAudioTrack {
return true;
}
/** Get max possible volume index for a phone call audio stream. */
private int GetStreamMaxVolume() {
Logd("GetStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Set current volume level for a phone call audio stream. */
private boolean SetStreamVolume(int volume) {
Logd("SetStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
if (audioManager.isVolumeFixed()) {
Loge("The device implements a fixed volume policy.");
return false;
}
audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
return true;
}
/** Get current volume level for a phone call audio stream. */
private int GetStreamVolume() {
Logd("GetStreamVolume");
assertTrue(audioManager != null);
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Helper method which throws an exception when an assertion has failed. */
private static void assertTrue(boolean condition) {
if (!condition) {

View File

@ -50,18 +50,4 @@ public final class WebRtcAudioUtils {
+ "Model: " + Build.MODEL + ", "
+ "Product: " + Build.PRODUCT);
}
/**
* Returns the native or optimal output sample rate for this device's
* primary output stream. Unit is in Hz.
*/
public static int GetNativeSampleRate(AudioManager audioManager) {
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
return SAMPLE_RATE_HZ;
}
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
return (sampleRateString == null) ?
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
}
}

View File

@ -41,7 +41,8 @@ enum {
namespace webrtc {
OpenSlesInput::OpenSlesInput(PlayoutDelayProvider* delay_provider)
OpenSlesInput::OpenSlesInput(
PlayoutDelayProvider* delay_provider, AudioManager* audio_manager)
: delay_provider_(delay_provider),
initialized_(false),
mic_initialized_(false),

View File

@ -24,6 +24,7 @@
namespace webrtc {
class AudioDeviceBuffer;
class AudioManager;
class CriticalSectionWrapper;
class PlayoutDelayProvider;
class SingleRwFifo;
@ -35,7 +36,8 @@ class ThreadWrapper;
// to non-const methods require exclusive access to the object.
class OpenSlesInput {
public:
OpenSlesInput(PlayoutDelayProvider* delay_provider);
OpenSlesInput(
PlayoutDelayProvider* delay_provider, AudioManager* audio_manager);
~OpenSlesInput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,

View File

@ -41,7 +41,7 @@ enum {
namespace webrtc {
OpenSlesOutput::OpenSlesOutput()
OpenSlesOutput::OpenSlesOutput(AudioManager* audio_manager)
: initialized_(false),
speaker_initialized_(false),
play_initialized_(false),

View File

@ -16,6 +16,7 @@
#include <SLES/OpenSLES_AndroidConfiguration.h>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
#include "webrtc/modules/audio_device/android/low_latency_event.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
@ -35,7 +36,8 @@ class ThreadWrapper;
// to non-const methods require exclusive access to the object.
class OpenSlesOutput : public PlayoutDelayProvider {
public:
explicit OpenSlesOutput();
// TODO(henrika): use this new audio manager instead of old.
explicit OpenSlesOutput(AudioManager* audio_manager);
virtual ~OpenSlesOutput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,

View File

@ -124,6 +124,8 @@
'android/audio_device_template.h',
'android/audio_device_utility_android.cc',
'android/audio_device_utility_android.h',
'android/audio_manager.cc',
'android/audio_manager.h',
'android/audio_manager_jni.cc',
'android/audio_manager_jni.h',
'android/audio_record_jni.cc',