Adds C++/JNI/Java unit test for audio device module on Android.

This CL adds support for unittests of the AudioDeviceModule on Android using both Java and C++. The new framework uses ::testing::TesWithParam to support both Java-based audio and OpenSL ES based audio. However, given existing issues in our OpenSL ES implementation, the list of test parameters only contains Java in this first version. Open SL ES will be enabled as soon as the backend has been refactored.

It also:

- Removes the redundant JNIEnv* argument in webrtc::VoiceEngine::SetAndroidObjects().
- Modifies usage of enable_android_opensl and the WEBRTC_ANDROID_OPENSLES define.
- Adds kAndroidJavaAudio and kAndroidOpenSLESAudio to AudioLayer enumerator.
- Fixes some bugs which were discovered when running the tests.

BUG=NONE
R=phoglund@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/40069004

Cr-Commit-Position: refs/heads/master@{#8651}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8651 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrika@webrtc.org 2015-03-09 12:39:53 +00:00
parent 1b32bbe0a7
commit 474d1eb223
28 changed files with 729 additions and 169 deletions

View File

@ -964,7 +964,7 @@ JOW(jboolean, PeerConnectionFactory_initializeAndroidGlobals)(
failure |= AndroidVideoCapturerJni::SetAndroidObjects(jni, context); failure |= AndroidVideoCapturerJni::SetAndroidObjects(jni, context);
} }
if (initialize_audio) if (initialize_audio)
failure |= webrtc::VoiceEngine::SetAndroidObjects(GetJVM(), jni, context); failure |= webrtc::VoiceEngine::SetAndroidObjects(GetJVM(), context);
factory_static_initialized = true; factory_static_initialized = true;
} }
if (initialize_video) { if (initialize_video) {

View File

@ -83,6 +83,7 @@
}, },
'dependencies': [ 'dependencies': [
'<(webrtc_root)/modules/modules.gyp:modules_unittests', '<(webrtc_root)/modules/modules.gyp:modules_unittests',
'audio_device_java',
], ],
'includes': [ 'includes': [
'../../build/apk_test.gypi', '../../build/apk_test.gypi',
@ -243,6 +244,17 @@
'../../build/java.gypi', '../../build/java.gypi',
], ],
}, },
{
'target_name': 'audio_device_java',
'type': 'none',
'variables': {
'java_in_dir': '<(webrtc_root)/modules/audio_device/android/java',
'never_lint': 1,
},
'includes': [
'../../build/java.gypi',
],
},
], ],
} }

View File

@ -43,7 +43,7 @@ JOWW(void, NativeWebRtcContextRegistry_register)(
"Failed to register android objects to video capture"); "Failed to register android objects to video capture");
CHECK(webrtc::SetRenderAndroidVM(g_vm) == 0, CHECK(webrtc::SetRenderAndroidVM(g_vm) == 0,
"Failed to register android objects to video render"); "Failed to register android objects to video render");
CHECK(webrtc::VoiceEngine::SetAndroidObjects(g_vm, jni, context) == 0, CHECK(webrtc::VoiceEngine::SetAndroidObjects(g_vm, context) == 0,
"Failed to register android objects to voice engine"); "Failed to register android objects to voice engine");
} }
@ -54,7 +54,7 @@ JOWW(void, NativeWebRtcContextRegistry_unRegister)(
"Failed to unregister android objects from video capture"); "Failed to unregister android objects from video capture");
CHECK(webrtc::SetRenderAndroidVM(NULL) == 0, CHECK(webrtc::SetRenderAndroidVM(NULL) == 0,
"Failed to unregister android objects from video render"); "Failed to unregister android objects from video render");
CHECK(webrtc::VoiceEngine::SetAndroidObjects(NULL, NULL, NULL) == 0, CHECK(webrtc::VoiceEngine::SetAndroidObjects(NULL, NULL) == 0,
"Failed to unregister android objects from voice engine"); "Failed to unregister android objects from voice engine");
webrtc_examples::ClearVieDeviceObjects(); webrtc_examples::ClearVieDeviceObjects();
webrtc_examples::ClearVoeDeviceObjects(); webrtc_examples::ClearVoeDeviceObjects();

View File

@ -84,8 +84,8 @@ class OpenSlRunner
jobject obj, jobject obj,
jobject context) { jobject context) {
assert(!g_runner); // Should only be called once. assert(!g_runner); // Should only be called once.
OpenSlesInput::SetAndroidAudioDeviceObjects(g_vm, env, context); OpenSlesInput::SetAndroidAudioDeviceObjects(g_vm, context);
OpenSlesOutput::SetAndroidAudioDeviceObjects(g_vm, env, context); OpenSlesOutput::SetAndroidAudioDeviceObjects(g_vm, context);
g_runner = new OpenSlRunner(); g_runner = new OpenSlRunner();
} }

View File

@ -23,10 +23,9 @@ template <class InputType, class OutputType>
class AudioDeviceTemplate : public AudioDeviceGeneric { class AudioDeviceTemplate : public AudioDeviceGeneric {
public: public:
static void SetAndroidAudioDeviceObjects(void* javaVM, static void SetAndroidAudioDeviceObjects(void* javaVM,
void* env,
void* context) { void* context) {
OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context); OutputType::SetAndroidAudioDeviceObjects(javaVM, context);
InputType::SetAndroidAudioDeviceObjects(javaVM, env, context); InputType::SetAndroidAudioDeviceObjects(javaVM, context);
} }
static void ClearAndroidAudioDeviceObjects() { static void ClearAndroidAudioDeviceObjects() {
@ -44,35 +43,35 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
} }
int32_t ActiveAudioLayer( int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const { // NOLINT AudioDeviceModule::AudioLayer& audioLayer) const override {
audioLayer = AudioDeviceModule::kPlatformDefaultAudio; audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
return 0; return 0;
} };
int32_t Init() { int32_t Init() override {
return output_.Init() | input_.Init(); return output_.Init() | input_.Init();
} }
int32_t Terminate() { int32_t Terminate() override {
return output_.Terminate() | input_.Terminate(); return output_.Terminate() | input_.Terminate();
} }
bool Initialized() const { bool Initialized() const override {
return true; return true;
} }
int16_t PlayoutDevices() { int16_t PlayoutDevices() override {
return 1; return 1;
} }
int16_t RecordingDevices() { int16_t RecordingDevices() override {
return 1; return 1;
} }
int32_t PlayoutDeviceName( int32_t PlayoutDeviceName(
uint16_t index, uint16_t index,
char name[kAdmMaxDeviceNameSize], char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) { char guid[kAdmMaxGuidSize]) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
@ -80,370 +79,341 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t RecordingDeviceName( int32_t RecordingDeviceName(
uint16_t index, uint16_t index,
char name[kAdmMaxDeviceNameSize], char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) { char guid[kAdmMaxGuidSize]) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetPlayoutDevice(uint16_t index) { int32_t SetPlayoutDevice(uint16_t index) override {
// OK to use but it has no effect currently since device selection is // OK to use but it has no effect currently since device selection is
// done using Andoid APIs instead. // done using Andoid APIs instead.
return 0; return 0;
} }
int32_t SetPlayoutDevice( int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) { AudioDeviceModule::WindowsDeviceType device) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetRecordingDevice(uint16_t index) { int32_t SetRecordingDevice(uint16_t index) override {
// OK to use but it has no effect currently since device selection is // OK to use but it has no effect currently since device selection is
// done using Andoid APIs instead. // done using Andoid APIs instead.
return 0; return 0;
} }
int32_t SetRecordingDevice( int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) { AudioDeviceModule::WindowsDeviceType device) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t PlayoutIsAvailable( int32_t PlayoutIsAvailable(bool& available) override {
bool& available) { // NOLINT
available = true; available = true;
return 0; return 0;
} }
int32_t InitPlayout() { int32_t InitPlayout() override {
return output_.InitPlayout(); return output_.InitPlayout();
} }
bool PlayoutIsInitialized() const { bool PlayoutIsInitialized() const override {
return output_.PlayoutIsInitialized(); return output_.PlayoutIsInitialized();
} }
int32_t RecordingIsAvailable( int32_t RecordingIsAvailable(bool& available) override {
bool& available) { // NOLINT
available = true; available = true;
return 0; return 0;
} }
int32_t InitRecording() { int32_t InitRecording() override {
return input_.InitRecording(); return input_.InitRecording();
} }
bool RecordingIsInitialized() const { bool RecordingIsInitialized() const override {
return input_.RecordingIsInitialized(); return input_.RecordingIsInitialized();
} }
int32_t StartPlayout() { int32_t StartPlayout() override {
return output_.StartPlayout(); return output_.StartPlayout();
} }
int32_t StopPlayout() { int32_t StopPlayout() override {
return output_.StopPlayout(); return output_.StopPlayout();
} }
bool Playing() const { bool Playing() const override {
return output_.Playing(); return output_.Playing();
} }
int32_t StartRecording() { int32_t StartRecording() override {
return input_.StartRecording(); return input_.StartRecording();
} }
int32_t StopRecording() { int32_t StopRecording() override {
return input_.StopRecording(); return input_.StopRecording();
} }
bool Recording() const { bool Recording() const override {
return input_.Recording() ; return input_.Recording() ;
} }
int32_t SetAGC(bool enable) { int32_t SetAGC(bool enable) override {
if (enable) { if (enable) {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
} }
return -1; return -1;
} }
bool AGC() const { bool AGC() const override {
return false; return false;
} }
int32_t SetWaveOutVolume(uint16_t volumeLeft, int32_t SetWaveOutVolume(
uint16_t volumeRight) { uint16_t volumeLeft, uint16_t volumeRight) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t WaveOutVolume( int32_t WaveOutVolume(
uint16_t& volumeLeft, // NOLINT uint16_t& volumeLeft, uint16_t& volumeRight) const override {
uint16_t& volumeRight) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t InitSpeaker() { int32_t InitSpeaker() override {
return 0; return 0;
} }
bool SpeakerIsInitialized() const { bool SpeakerIsInitialized() const override {
return true; return true;
} }
int32_t InitMicrophone() { int32_t InitMicrophone() override {
return 0; return 0;
} }
bool MicrophoneIsInitialized() const { bool MicrophoneIsInitialized() const override {
return true; return true;
} }
int32_t SpeakerVolumeIsAvailable( int32_t SpeakerVolumeIsAvailable(bool& available) override {
bool& available) { // NOLINT
available = false; available = false;
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
// TODO(henrika): add support if/when needed. // TODO(henrika): add support if/when needed.
int32_t SetSpeakerVolume(uint32_t volume) { int32_t SetSpeakerVolume(uint32_t volume) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
// TODO(henrika): add support if/when needed. // TODO(henrika): add support if/when needed.
int32_t SpeakerVolume( int32_t SpeakerVolume(uint32_t& volume) const override {
uint32_t& volume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
// TODO(henrika): add support if/when needed. // TODO(henrika): add support if/when needed.
int32_t MaxSpeakerVolume( int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
uint32_t& maxVolume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
// TODO(henrika): add support if/when needed. // TODO(henrika): add support if/when needed.
int32_t MinSpeakerVolume( int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
uint32_t& minVolume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SpeakerVolumeStepSize( int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override {
uint16_t& stepSize) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MicrophoneVolumeIsAvailable( int32_t MicrophoneVolumeIsAvailable(bool& available) override{
bool& available) { // NOLINT
available = false; available = false;
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetMicrophoneVolume(uint32_t volume) { int32_t SetMicrophoneVolume(uint32_t volume) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MicrophoneVolume( int32_t MicrophoneVolume(uint32_t& volume) const override {
uint32_t& volume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MaxMicrophoneVolume( int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override {
uint32_t& maxVolume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MinMicrophoneVolume( int32_t MinMicrophoneVolume(uint32_t& minVolume) const override {
uint32_t& minVolume) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MicrophoneVolumeStepSize( int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const override {
uint16_t& stepSize) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SpeakerMuteIsAvailable( int32_t SpeakerMuteIsAvailable(bool& available) override {
bool& available) { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetSpeakerMute(bool enable) { int32_t SetSpeakerMute(bool enable) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SpeakerMute( int32_t SpeakerMute(bool& enabled) const override {
bool& enabled) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MicrophoneMuteIsAvailable( int32_t MicrophoneMuteIsAvailable(bool& available) override {
bool& available) { // NOLINT
FATAL() << "Not implemented"; FATAL() << "Not implemented";
return -1; return -1;
} }
int32_t SetMicrophoneMute(bool enable) { int32_t SetMicrophoneMute(bool enable) override {
FATAL() << "Not implemented"; FATAL() << "Not implemented";
return -1; return -1;
} }
int32_t MicrophoneMute( int32_t MicrophoneMute(bool& enabled) const override {
bool& enabled) const { // NOLINT
FATAL() << "Not implemented"; FATAL() << "Not implemented";
return -1; return -1;
} }
int32_t MicrophoneBoostIsAvailable( int32_t MicrophoneBoostIsAvailable(bool& available) override {
bool& available) { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetMicrophoneBoost(bool enable) { int32_t SetMicrophoneBoost(bool enable) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t MicrophoneBoost( int32_t MicrophoneBoost(bool& enabled) const override {
bool& enabled) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t StereoPlayoutIsAvailable( int32_t StereoPlayoutIsAvailable(bool& available) override {
bool& available) { // NOLINT
available = false; available = false;
return 0; return 0;
} }
int32_t SetStereoPlayout(bool enable) { // TODO(henrika): add support.
int32_t SetStereoPlayout(bool enable) override {
return -1; return -1;
} }
int32_t StereoPlayout( // TODO(henrika): add support.
bool& enabled) const { // NOLINT int32_t StereoPlayout(bool& enabled) const override {
enabled = false; enabled = false;
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t StereoRecordingIsAvailable( int32_t StereoRecordingIsAvailable(bool& available) override {
bool& available) { // NOLINT
available = false; available = false;
return 0; return 0;
} }
int32_t SetStereoRecording(bool enable) { int32_t SetStereoRecording(bool enable) override {
return -1; return -1;
} }
int32_t StereoRecording( int32_t StereoRecording(bool& enabled) const override {
bool& enabled) const { // NOLINT
enabled = false; enabled = false;
return 0; return 0;
} }
int32_t SetPlayoutBuffer( int32_t SetPlayoutBuffer(
const AudioDeviceModule::BufferType type, const AudioDeviceModule::BufferType type, uint16_t sizeMS) override {
uint16_t sizeMS) {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t PlayoutBuffer( int32_t PlayoutBuffer(
AudioDeviceModule::BufferType& type, AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const override {
uint16_t& sizeMS) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t PlayoutDelay( int32_t PlayoutDelay(uint16_t& delayMS) const override {
uint16_t& delayMS) const { // NOLINT
return output_.PlayoutDelay(delayMS); return output_.PlayoutDelay(delayMS);
} }
int32_t RecordingDelay( int32_t RecordingDelay(uint16_t& delayMS) const override {
uint16_t& delayMS) const { // NOLINT
return input_.RecordingDelay(delayMS); return input_.RecordingDelay(delayMS);
} }
int32_t CPULoad( int32_t CPULoad(uint16_t& load) const override {
uint16_t& load) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
bool PlayoutWarning() const { bool PlayoutWarning() const override {
return false; return false;
} }
bool PlayoutError() const { bool PlayoutError() const override {
return false; return false;
} }
bool RecordingWarning() const { bool RecordingWarning() const override {
return false; return false;
} }
bool RecordingError() const { bool RecordingError() const override {
return false; return false;
} }
void ClearPlayoutWarning() {} void ClearPlayoutWarning() override {}
void ClearPlayoutError() {} void ClearPlayoutError() override {}
void ClearRecordingWarning() {} void ClearRecordingWarning() override {}
void ClearRecordingError() {} void ClearRecordingError() override {}
void AttachAudioBuffer( void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override {
AudioDeviceBuffer* audioBuffer) {
output_.AttachAudioBuffer(audioBuffer); output_.AttachAudioBuffer(audioBuffer);
input_.AttachAudioBuffer(audioBuffer); input_.AttachAudioBuffer(audioBuffer);
} }
// TODO(henrika): remove // TODO(henrika): remove
int32_t SetPlayoutSampleRate( int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override {
const uint32_t samplesPerSec) {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t SetLoudspeakerStatus(bool enable) { int32_t SetLoudspeakerStatus(bool enable) override {
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
int32_t GetLoudspeakerStatus( int32_t GetLoudspeakerStatus(bool& enable) const override {
bool& enable) const { // NOLINT
FATAL() << "Should never be called"; FATAL() << "Should never be called";
return -1; return -1;
} }
bool BuiltInAECIsAvailable() const { bool BuiltInAECIsAvailable() const override {
return input_.BuiltInAECIsAvailable(); return input_.BuiltInAECIsAvailable();
} }
int32_t EnableBuiltInAEC(bool enable) { int32_t EnableBuiltInAEC(bool enable) override {
return input_.EnableBuiltInAEC(enable); return input_.EnableBuiltInAEC(enable);
} }

View File

@ -0,0 +1,478 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_refptr.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
using std::cout;
using std::endl;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::Gt;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::NotNull;
using ::testing::Return;
using ::testing::TestWithParam;
// #define ENABLE_PRINTF
#ifdef ENABLE_PRINTF
#define PRINT(...) printf(__VA_ARGS__);
#else
#define PRINT(...) ((void)0)
#endif
namespace webrtc {
// Perform all tests for the different audio layers listed in this array.
// See the INSTANTIATE_TEST_CASE_P statement for details.
// TODO(henrika): the test framework supports both Java and OpenSL ES based
// audio backends but there are currently some issues (crashes) in the
// OpenSL ES implementation, hence it is not added to kAudioLayers yet.
static const AudioDeviceModule::AudioLayer kAudioLayers[] = {
AudioDeviceModule::kAndroidJavaAudio
/*, AudioDeviceModule::kAndroidOpenSLESAudio */};
// Number of callbacks (input or output) the tests waits for before we set
// an event indicating that the test was OK.
static const int kNumCallbacks = 10;
// Max amount of time we wait for an event to be set while counting callbacks.
static const int kTestTimeOutInMilliseconds = 10 * 1000;
// Average number of audio callbacks per second assuming 10ms packet size.
static const int kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
static const int kFilePlayTimeInSec = 2;
// Fixed value for the recording delay using Java based audio backend.
// TODO(henrika): harmonize with OpenSL ES and look for possible improvements.
static const uint32_t kFixedRecordingDelay = 100;
static const int kBitsPerSample = 16;
static const int kBytesPerSample = kBitsPerSample / 8;
enum TransportType {
kPlayout = 0x1,
kRecording = 0x2,
};
// Simple helper struct for device specific audio parameters.
struct AudioParameters {
int playout_frames_per_buffer() const {
return playout_sample_rate / 100; // WebRTC uses 10 ms as buffer size.
}
int recording_frames_per_buffer() const {
return recording_sample_rate / 100;
}
int playout_sample_rate;
int recording_sample_rate;
int playout_channels;
int recording_channels;
};
class MockAudioTransport : public AudioTransport {
public:
explicit MockAudioTransport(int type)
: type_(type),
play_count_(0),
rec_count_(0),
file_size_in_bytes_(0),
sample_rate_(0),
file_pos_(0) {}
// Read file with name |file_name| into |file_| array to ensure that we
// only read from memory during the test. Note that, we only support mono
// files currently.
bool LoadFile(const std::string& file_name, int sample_rate) {
file_size_in_bytes_ = test::GetFileSize(file_name);
sample_rate_ = sample_rate;
EXPECT_GE(file_size_in_callbacks(), num_callbacks_);
const int num_16bit_samples =
test::GetFileSize(file_name) / kBytesPerSample;
file_.reset(new int16_t[num_16bit_samples]);
FILE* audio_file = fopen(file_name.c_str(), "rb");
EXPECT_NE(audio_file, nullptr);
int num_samples_read = fread(
file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
EXPECT_EQ(num_samples_read, num_16bit_samples);
fclose(audio_file);
return true;
}
MOCK_METHOD10(RecordedDataIsAvailable,
int32_t(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel));
MOCK_METHOD8(NeedMorePlayData,
int32_t(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));
void HandleCallbacks(EventWrapper* test_is_done, int num_callbacks) {
test_is_done_ = test_is_done;
num_callbacks_ = num_callbacks;
if (play_mode()) {
ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
.WillByDefault(
Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
}
if (rec_mode()) {
ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
.WillByDefault(
Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
}
}
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) {
EXPECT_TRUE(rec_mode());
rec_count_++;
if (ReceivedEnoughCallbacks())
test_is_done_->Set();
return 0;
}
int32_t RealNeedMorePlayData(const uint32_t nSamples,
const uint8_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
uint32_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
EXPECT_TRUE(play_mode());
nSamplesOut = nSamples;
if (file_mode()) {
// Read samples from file stored in memory (at construction) and copy
// |nSamples| (<=> 10ms) to the |audioSamples| byte buffer.
memcpy(audioSamples,
static_cast<int16_t*> (&file_[file_pos_]),
nSamples * nBytesPerSample);
file_pos_ += nSamples;
}
play_count_++;
if (ReceivedEnoughCallbacks())
test_is_done_->Set();
return 0;
}
bool ReceivedEnoughCallbacks() {
bool recording_done = false;
if (rec_mode())
recording_done = rec_count_ >= num_callbacks_;
else
recording_done = true;
bool playout_done = false;
if (play_mode())
playout_done = play_count_ >= num_callbacks_;
else
playout_done = true;
return recording_done && playout_done;
}
bool play_mode() const { return type_ & kPlayout; }
bool rec_mode() const { return type_ & kRecording; }
bool file_mode() const { return file_.get() != nullptr; }
int file_size_in_seconds() const {
return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
}
int file_size_in_callbacks() const {
return file_size_in_seconds() * kNumCallbacksPerSecond;
}
private:
EventWrapper* test_is_done_;
int num_callbacks_;
int type_;
int play_count_;
int rec_count_;
int file_size_in_bytes_;
int sample_rate_;
rtc::scoped_ptr<int16_t[]> file_;
int file_pos_;
};
// AudioDeviceTest is a value-parameterized test.
class AudioDeviceTest
: public testing::TestWithParam<AudioDeviceModule::AudioLayer> {
protected:
AudioDeviceTest()
: test_is_done_(EventWrapper::Create()) {
// One-time initialization of JVM and application context. Ensures that we
// can do calls between C++ and Java. Initializes both Java and OpenSL ES
// implementations.
webrtc::audiodevicemodule::EnsureInitialized();
// Creates an audio device based on the test parameter. See
// INSTANTIATE_TEST_CASE_P() for details.
audio_device_ = CreateAudioDevice();
EXPECT_NE(audio_device_.get(), nullptr);
EXPECT_EQ(0, audio_device_->Init());
CacheAudioParameters();
}
virtual ~AudioDeviceTest() {
EXPECT_EQ(0, audio_device_->Terminate());
}
int playout_sample_rate() const {
return parameters_.playout_sample_rate;
}
int recording_sample_rate() const {
return parameters_.recording_sample_rate;
}
int playout_channels() const {
return parameters_.playout_channels;
}
int recording_channels() const {
return parameters_.playout_channels;
}
int playout_frames_per_buffer() const {
return parameters_.playout_frames_per_buffer();
}
int recording_frames_per_buffer() const {
return parameters_.recording_frames_per_buffer();
}
scoped_refptr<AudioDeviceModule> audio_device() const {
return audio_device_;
}
scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
scoped_refptr<AudioDeviceModule> module(
AudioDeviceModuleImpl::Create(0, GetParam()));
return module;
}
void CacheAudioParameters() {
AudioDeviceBuffer* audio_buffer =
static_cast<AudioDeviceModuleImpl*> (
audio_device_.get())->GetAudioDeviceBuffer();
parameters_.playout_sample_rate = audio_buffer->PlayoutSampleRate();
parameters_.recording_sample_rate = audio_buffer->RecordingSampleRate();
parameters_.playout_channels = audio_buffer->PlayoutChannels();
parameters_.recording_channels = audio_buffer->RecordingChannels();
}
// Retuerns file name relative to the resource root given a sample rate.
std::string GetFileName(int sample_rate) {
EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
char fname[64];
snprintf(fname,
sizeof(fname),
"audio_device/audio_short%d",
sample_rate / 1000);
std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
EXPECT_TRUE(test::FileExists(file_name));
#ifdef ENABLE_PRINTF
PRINT("file name: %s\n", file_name.c_str());
const int bytes = test::GetFileSize(file_name);
PRINT("file size: %d [bytes]\n", bytes);
PRINT("file size: %d [samples]\n", bytes / kBytesPerSample);
const int seconds = bytes / (sample_rate * kBytesPerSample);
PRINT("file size: %d [secs]\n", seconds);
PRINT("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
#endif
return file_name;
}
void StartPlayout() {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
EXPECT_FALSE(audio_device()->Playing());
EXPECT_EQ(0, audio_device()->InitPlayout());
EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
EXPECT_EQ(0, audio_device()->StartPlayout());
EXPECT_TRUE(audio_device()->Playing());
}
void StopPlayout() {
EXPECT_EQ(0, audio_device()->StopPlayout());
EXPECT_FALSE(audio_device()->Playing());
}
void StartRecording() {
EXPECT_FALSE(audio_device()->RecordingIsInitialized());
EXPECT_FALSE(audio_device()->Recording());
EXPECT_EQ(0, audio_device()->InitRecording());
EXPECT_TRUE(audio_device()->RecordingIsInitialized());
EXPECT_EQ(0, audio_device()->StartRecording());
EXPECT_TRUE(audio_device()->Recording());
}
void StopRecording() {
EXPECT_EQ(0, audio_device()->StopRecording());
EXPECT_FALSE(audio_device()->Recording());
}
rtc::scoped_ptr<EventWrapper> test_is_done_;
scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters parameters_;
};
TEST_P(AudioDeviceTest, ConstructDestruct) {
// Using the test fixture to create and destruct the audio device module.
}
// Create an audio device instance and print out the native audio parameters.
TEST_P(AudioDeviceTest, AudioParameters) {
EXPECT_NE(0, playout_sample_rate());
PRINT("playout_sample_rate: %d\n", playout_sample_rate());
EXPECT_NE(0, recording_sample_rate());
PRINT("playout_sample_rate: %d\n", recording_sample_rate());
EXPECT_NE(0, playout_channels());
PRINT("playout_channels: %d\n", playout_channels());
EXPECT_NE(0, recording_channels());
PRINT("recording_channels: %d\n", recording_channels());
}
TEST_P(AudioDeviceTest, InitTerminate) {
// Initialization is part of the test fixture.
EXPECT_TRUE(audio_device()->Initialized());
EXPECT_EQ(0, audio_device()->Terminate());
EXPECT_FALSE(audio_device()->Initialized());
}
TEST_P(AudioDeviceTest, Devices) {
// Device enumeration is not supported. Verify fixed values only.
EXPECT_EQ(1, audio_device()->PlayoutDevices());
EXPECT_EQ(1, audio_device()->RecordingDevices());
}
// Tests that playout can be initiated, started and stopped.
TEST_P(AudioDeviceTest, StartStopPlayout) {
StartPlayout();
StopPlayout();
}
// Tests that recording can be initiated, started and stopped.
TEST_P(AudioDeviceTest, StartStopRecording) {
StartRecording();
StopRecording();
}
// Start playout and verify that the native audio layer starts asking for real
// audio samples to play out using the NeedMorePlayData callback.
TEST_P(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
MockAudioTransport mock(kPlayout);
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
kBytesPerSample,
playout_channels(),
playout_sample_rate(),
NotNull(),
_, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopPlayout();
}
// Start recording and verify that the native audio layer starts feeding real
// audio samples via the RecordedDataIsAvailable callback.
TEST_P(AudioDeviceTest, StartRecordingVerifyCallbacks) {
MockAudioTransport mock(kRecording);
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
recording_frames_per_buffer(),
kBytesPerSample,
recording_channels(),
recording_sample_rate(),
kFixedRecordingDelay,
0,
0,
false,
_))
.Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopRecording();
}
// Start playout and recording (full-duplex audio) and verify that audio is
// active in both directions.
TEST_P(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
MockAudioTransport mock(kPlayout | kRecording);
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
kBytesPerSample,
playout_channels(),
playout_sample_rate(),
NotNull(),
_, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
recording_frames_per_buffer(),
kBytesPerSample,
recording_channels(),
recording_sample_rate(),
Gt(kFixedRecordingDelay),
0,
0,
false,
_))
.Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
StartRecording();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopRecording();
StopPlayout();
}
// Start playout and read audio from an external PCM file when the audio layer
// asks for data to play out. Real audio is played out in this test but it does
// not contain any explicit verification that the audio quality is perfect.
TEST_P(AudioDeviceTest, RunPlayoutWithFileAsSource) {
// TODO(henrika): extend test when mono output is supported.
EXPECT_EQ(1, playout_channels());
NiceMock<MockAudioTransport> mock(kPlayout);
std::string file_name = GetFileName(playout_sample_rate());
mock.LoadFile(file_name, playout_sample_rate());
mock.HandleCallbacks(test_is_done_.get(),
kFilePlayTimeInSec * kNumCallbacksPerSecond);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopPlayout();
}
INSTANTIATE_TEST_CASE_P(AudioDeviceTest, AudioDeviceTest,
::testing::ValuesIn(kAudioLayers));
} // namespace webrtc

View File

@ -10,11 +10,15 @@
#include "webrtc/modules/audio_device/android/audio_manager_jni.h" #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
#include <android/log.h>
#include <assert.h> #include <assert.h>
#include "webrtc/modules/utility/interface/helpers_android.h" #include "webrtc/modules/utility/interface/helpers_android.h"
#include "webrtc/system_wrappers/interface/trace.h" #include "webrtc/system_wrappers/interface/trace.h"
#define TAG "AudioManagerJni"
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
namespace webrtc { namespace webrtc {
static JavaVM* g_jvm_ = NULL; static JavaVM* g_jvm_ = NULL;
@ -40,15 +44,15 @@ AudioManagerJni::AudioManagerJni()
SetNativeFrameSize(env); SetNativeFrameSize(env);
} }
void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env, void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
void* context) { ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
assert(jvm); assert(jvm);
assert(env);
assert(context); assert(context);
// Store global Java VM variables to be accessed by API calls. // Store global Java VM variables to be accessed by API calls.
g_jvm_ = reinterpret_cast<JavaVM*>(jvm); g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
g_jni_env_ = reinterpret_cast<JNIEnv*>(env); g_jni_env_ = GetEnv(g_jvm_);
g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context)); g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
// FindClass must be made in this function since this function's contract // FindClass must be made in this function since this function's contract
@ -69,6 +73,7 @@ void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
} }
void AudioManagerJni::ClearAndroidAudioDeviceObjects() { void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
g_jni_env_->DeleteGlobalRef(g_audio_manager_class_); g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
g_audio_manager_class_ = NULL; g_audio_manager_class_ = NULL;
g_jni_env_->DeleteGlobalRef(g_context_); g_jni_env_->DeleteGlobalRef(g_context_);

View File

@ -34,8 +34,7 @@ class AudioManagerJni {
// It has to be called for this class' APIs to be successful. Calling // It has to be called for this class' APIs to be successful. Calling
// ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
// successfully if SetAndroidAudioDeviceObjects is not called after it. // successfully if SetAndroidAudioDeviceObjects is not called after it.
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
void* context);
// This function must be called when the AudioManagerJni class is no // This function must be called when the AudioManagerJni class is no
// longer needed. It frees up the global references acquired in // longer needed. It frees up the global references acquired in
// SetAndroidAudioDeviceObjects. // SetAndroidAudioDeviceObjects.

View File

@ -34,12 +34,10 @@ static JavaVM* g_jvm = NULL;
static jobject g_context = NULL; static jobject g_context = NULL;
static jclass g_audio_record_class = NULL; static jclass g_audio_record_class = NULL;
void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env, void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
void* context) {
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str()); ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
CHECK(jvm); CHECK(jvm);
CHECK(env);
CHECK(context); CHECK(context);
g_jvm = reinterpret_cast<JavaVM*>(jvm); g_jvm = reinterpret_cast<JavaVM*>(jvm);
@ -178,7 +176,7 @@ int32_t AudioRecordJni::StartRecording() {
int32_t AudioRecordJni::StopRecording() { int32_t AudioRecordJni::StopRecording() {
ALOGD("StopRecording%s", GetThreadInfo().c_str()); ALOGD("StopRecording%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_) { if (!initialized_ || !recording_) {
return 0; return 0;
} }
AttachThreadScoped ats(g_jvm); AttachThreadScoped ats(g_jvm);
@ -275,6 +273,10 @@ void JNICALL AudioRecordJni::DataIsRecorded(
// the thread is 'AudioRecordThread'. // the thread is 'AudioRecordThread'.
void AudioRecordJni::OnDataIsRecorded(int length) { void AudioRecordJni::OnDataIsRecorded(int length) {
DCHECK(thread_checker_java_.CalledOnValidThread()); DCHECK(thread_checker_java_.CalledOnValidThread());
if (!audio_device_buffer_) {
ALOGE("AttachAudioBuffer has not been called!");
return;
}
if (playout_delay_in_milliseconds_ == 0) { if (playout_delay_in_milliseconds_ == 0) {
playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs(); playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs();
ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_); ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_);
@ -284,7 +286,9 @@ void AudioRecordJni::OnDataIsRecorded(int length) {
audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_, audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_,
kHardwareDelayInMilliseconds, kHardwareDelayInMilliseconds,
0 /* clockDrift */); 0 /* clockDrift */);
audio_device_buffer_->DeliverRecordedData(); if (audio_device_buffer_->DeliverRecordedData() == 1) {
ALOGE("AudioDeviceBuffer::DeliverRecordedData failed!");
}
} }
bool AudioRecordJni::HasDeviceObjects() { bool AudioRecordJni::HasDeviceObjects() {

View File

@ -48,11 +48,11 @@ class AudioRecordJni {
public: public:
// Use the invocation API to allow the native application to use the JNI // Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features. // interface pointer to access VM features.
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer // |jvm| denotes the Java VM and |context| corresponds to
// and |context| corresponds to android.content.Context in Java. // android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_record_class| for // This method also sets a global jclass object, |g_audio_record_class| for
// the "org/webrtc/voiceengine/WebRtcAudioRecord"-class. // the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context); static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
// Always call this method after the object has been destructed. It deletes // Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection. // existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects(); static void ClearAndroidAudioDeviceObjects();

View File

@ -29,12 +29,10 @@ static JavaVM* g_jvm = NULL;
static jobject g_context = NULL; static jobject g_context = NULL;
static jclass g_audio_track_class = NULL; static jclass g_audio_track_class = NULL;
void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* env, void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
void* context) {
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str()); ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
CHECK(jvm); CHECK(jvm);
CHECK(env);
CHECK(context); CHECK(context);
g_jvm = reinterpret_cast<JavaVM*>(jvm); g_jvm = reinterpret_cast<JavaVM*>(jvm);
@ -168,7 +166,7 @@ int32_t AudioTrackJni::StartPlayout() {
int32_t AudioTrackJni::StopPlayout() { int32_t AudioTrackJni::StopPlayout() {
ALOGD("StopPlayout%s", GetThreadInfo().c_str()); ALOGD("StopPlayout%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_) { if (!initialized_ || !playing_) {
return 0; return 0;
} }
AttachThreadScoped ats(g_jvm); AttachThreadScoped ats(g_jvm);
@ -245,10 +243,17 @@ void JNICALL AudioTrackJni::GetPlayoutData(
// the thread is 'AudioRecordTrack'. // the thread is 'AudioRecordTrack'.
void AudioTrackJni::OnGetPlayoutData(int length) { void AudioTrackJni::OnGetPlayoutData(int length) {
DCHECK(thread_checker_java_.CalledOnValidThread()); DCHECK(thread_checker_java_.CalledOnValidThread());
// ALOGD("OnGetPlayoutData(length=%d, delay=%d)", length);
DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame); DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame);
if (!audio_device_buffer_) {
ALOGE("AttachAudioBuffer has not been called!");
return;
}
// Pull decoded data (in 16-bit PCM format) from jitter buffer. // Pull decoded data (in 16-bit PCM format) from jitter buffer.
int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_); int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
if (samples <= 0) {
ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!");
return;
}
DCHECK_EQ(samples, frames_per_buffer_); DCHECK_EQ(samples, frames_per_buffer_);
// Copy decoded data into common byte buffer to ensure that it can be // Copy decoded data into common byte buffer to ensure that it can be
// written to the Java based audio track. // written to the Java based audio track.

View File

@ -42,11 +42,11 @@ class AudioTrackJni : public PlayoutDelayProvider {
public: public:
// Use the invocation API to allow the native application to use the JNI // Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features. // interface pointer to access VM features.
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer // |jvm| denotes the Java VM and |context| corresponds to
// and |context| corresponds to android.content.Context in Java. // android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_track_class| for // This method also sets a global jclass object, |g_audio_track_class| for
// the "org/webrtc/voiceengine/WebRtcAudioTrack"-class. // the "org/webrtc/voiceengine/WebRtcAudioTrack"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context); static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
// Always call this method after the object has been destructed. It deletes // Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection. // existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects(); static void ClearAndroidAudioDeviceObjects();

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
#include <pthread.h>
#include "base/android/jni_android.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/android/audio_device_template.h"
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
#include "webrtc/modules/audio_device/android/opensles_input.h"
#include "webrtc/modules/audio_device/android/opensles_output.h"
namespace webrtc {
namespace audiodevicemodule {
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
void EnsureInitializedOnce() {
CHECK(::base::android::IsVMInitialized());
JNIEnv* jni = ::base::android::AttachCurrentThread();
JavaVM* jvm = NULL;
CHECK_EQ(0, jni->GetJavaVM(&jvm));
jobject context = ::base::android::GetApplicationContext();
// Provide JVM and context to Java and OpenSL ES implementations.
using AudioDeviceJava = AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>;
AudioDeviceJava::SetAndroidAudioDeviceObjects(jvm, context);
// TODO(henrika): enable OpenSL ES when it has been refactored to avoid
// crashes.
// using AudioDeviceOpenSLES
// AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>;
// AudioDeviceOpenSLESInstance::SetAndroidAudioDeviceObjects(jvm, context);
}
void EnsureInitialized() {
CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
}
} // namespace audiodevicemodule
} // namespace webrtc

View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
namespace webrtc {
namespace audiodevicemodule {
void EnsureInitialized();
} // namespace audiodevicemodule
} // namespace webrtc

View File

@ -103,7 +103,7 @@ class WebRtcAudioTrack {
AudioTrack.WRITE_BLOCKING); AudioTrack.WRITE_BLOCKING);
} else { } else {
bytesWritten = audioTrack.write(byteBuffer.array(), bytesWritten = audioTrack.write(byteBuffer.array(),
0, byteBuffer.arrayOffset(),
sizeInBytes); sizeInBytes);
} }
if (bytesWritten != sizeInBytes) { if (bytesWritten != sizeInBytes) {

View File

@ -66,7 +66,6 @@ OpenSlesInput::~OpenSlesInput() {
} }
int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM, int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
void* env,
void* context) { void* context) {
return 0; return 0;
} }

View File

@ -39,7 +39,6 @@ class OpenSlesInput {
~OpenSlesInput(); ~OpenSlesInput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
void* env,
void* context); void* context);
static void ClearAndroidAudioDeviceObjects(); static void ClearAndroidAudioDeviceObjects();

View File

@ -67,9 +67,8 @@ OpenSlesOutput::~OpenSlesOutput() {
} }
int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM, int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM,
void* env,
void* context) { void* context) {
AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, env, context); AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, context);
return 0; return 0;
} }

View File

@ -39,7 +39,6 @@ class OpenSlesOutput : public PlayoutDelayProvider {
virtual ~OpenSlesOutput(); virtual ~OpenSlesOutput();
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
void* env,
void* context); void* context);
static void ClearAndroidAudioDeviceObjects(); static void ClearAndroidAudioDeviceObjects();

View File

@ -270,7 +270,7 @@
}, },
], ],
}], }],
['OS=="android" and enable_android_opensl==1', { ['OS=="android"', {
'targets': [ 'targets': [
{ {
'target_name': 'audio_device_unittest', 'target_name': 'audio_device_unittest',

View File

@ -90,7 +90,6 @@ AudioDeviceModule* CreateAudioDeviceModule(
AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id, AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id,
const AudioLayer audioLayer) const AudioLayer audioLayer)
{ {
// Create the generic ref counted (platform independent) implementation. // Create the generic ref counted (platform independent) implementation.
RefCountImpl<AudioDeviceModuleImpl>* audioDevice = RefCountImpl<AudioDeviceModuleImpl>* audioDevice =
new RefCountImpl<AudioDeviceModuleImpl>(id, audioLayer); new RefCountImpl<AudioDeviceModuleImpl>(id, audioLayer);
@ -218,7 +217,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
ptrAudioDeviceUtility = new AudioDeviceUtilityDummy(Id()); ptrAudioDeviceUtility = new AudioDeviceUtilityDummy(Id());
} }
#else #else
const AudioLayer audioLayer(PlatformAudioLayer()); AudioLayer audioLayer(PlatformAudioLayer());
// Create the *Windows* implementation of the Audio Device // Create the *Windows* implementation of the Audio Device
// //
@ -273,22 +272,24 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
// Create the *Android OpenSLES* implementation of the Audio Device // Create the *Android OpenSLES* implementation of the Audio Device
// //
#if defined(WEBRTC_ANDROID) #if defined(WEBRTC_ANDROID)
if (audioLayer == kPlatformDefaultAudio) #ifdef WEBRTC_ANDROID_OPENSLES
{ // Force default audio layer to OpenSL ES if the special compiler flag
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency. // (enable_android_opensl) has been set to one.
#if defined(WEBRTC_ANDROID_OPENSLES) if (audioLayer == kPlatformDefaultAudio) {
ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id()); audioLayer = kAndroidOpenSLESAudio;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, }
"Android OpenSLES Audio APIs will be utilized");
#else
ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"Android JNI Audio APIs will be utilized");
#endif #endif
if (audioLayer == kPlatformDefaultAudio ||
audioLayer == kAndroidJavaAudio) {
ptrAudioDevice =
new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
} else if (audioLayer == kAndroidOpenSLESAudio) {
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
ptrAudioDevice =
new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
} }
if (ptrAudioDevice != NULL) if (ptrAudioDevice != NULL) {
{
// Create the Android implementation of the Device Utility. // Create the Android implementation of the Device Utility.
ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id()); ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
} }

View File

@ -193,6 +193,10 @@ public:
public: public:
int32_t Id() {return _id;} int32_t Id() {return _id;}
AudioDeviceBuffer* GetAudioDeviceBuffer() {
return &_audioDeviceBuffer;
}
private: private:
PlatformType Platform() const; PlatformType Platform() const;
AudioLayer PlatformAudioLayer() const; AudioLayer PlatformAudioLayer() const;

View File

@ -29,7 +29,9 @@ class AudioDeviceModule : public RefCountedModule {
kWindowsCoreAudio = 2, kWindowsCoreAudio = 2,
kLinuxAlsaAudio = 3, kLinuxAlsaAudio = 3,
kLinuxPulseAudio = 4, kLinuxPulseAudio = 4,
kDummyAudio = 5 kAndroidJavaAudio = 5,
kAndroidOpenSLESAudio = 6,
kDummyAudio = 7
}; };
enum WindowsDeviceType { enum WindowsDeviceType {

View File

@ -57,6 +57,7 @@
'acm_receive_test', 'acm_receive_test',
'acm_send_test', 'acm_send_test',
'audio_coding_module', 'audio_coding_module',
'audio_device' ,
'audio_processing', 'audio_processing',
'bitrate_controller', 'bitrate_controller',
'CNG', 'CNG',
@ -352,6 +353,18 @@
'dependencies': [ 'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code', '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
], ],
# Need to disable error due to the line in
# base/android/jni_android.h triggering it:
# const BASE_EXPORT jobject GetApplicationContext()
# error: type qualifiers ignored on function return type
'cflags': [
'-Wno-ignored-qualifiers',
],
'sources': [
'audio_device/android/audio_device_unittest.cc',
'audio_device/android/ensure_initialized.cc',
'audio_device/android/ensure_initialized.h',
],
}], }],
], ],
# Disable warnings to enable Win64 build, issue 1323. # Disable warnings to enable Win64 build, issue 1323.

View File

@ -34,6 +34,9 @@
'<(DEPTH)/resources/audio_coding/speech_mono_32_48kHz.pcm', '<(DEPTH)/resources/audio_coding/speech_mono_32_48kHz.pcm',
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm', '<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
'<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm', '<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
'<(DEPTH)/resources/audio_device/audio_short16.pcm',
'<(DEPTH)/resources/audio_device/audio_short44.pcm',
'<(DEPTH)/resources/audio_device/audio_short48.pcm',
'<(DEPTH)/resources/audio_processing/agc/agc_audio.pcm', '<(DEPTH)/resources/audio_processing/agc/agc_audio.pcm',
'<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat', '<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat', '<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',

View File

@ -27,7 +27,7 @@ int ViEAutoTestAndroid::RunAutotest(int testSelection, int subTestSelection,
webrtc::SetRenderAndroidVM(javaVM); webrtc::SetRenderAndroidVM(javaVM);
#ifndef WEBRTC_ANDROID_OPENSLES #ifndef WEBRTC_ANDROID_OPENSLES
// voice engine calls into ADM directly // voice engine calls into ADM directly
webrtc::VoiceEngine::SetAndroidObjects(javaVM, env, context); webrtc::VoiceEngine::SetAndroidObjects(javaVM, context);
#endif #endif
if (subTestSelection == 0) { if (subTestSelection == 0) {

View File

@ -86,7 +86,7 @@ public:
static int SetTraceCallback(TraceCallback* callback); static int SetTraceCallback(TraceCallback* callback);
#if !defined(WEBRTC_CHROMIUM_BUILD) #if !defined(WEBRTC_CHROMIUM_BUILD)
static int SetAndroidObjects(void* javaVM, void* env, void* context); static int SetAndroidObjects(void* javaVM, void* context);
#endif #endif
protected: protected:

View File

@ -152,7 +152,7 @@ bool VoiceEngine::Delete(VoiceEngine*& voiceEngine)
} }
#if !defined(WEBRTC_CHROMIUM_BUILD) #if !defined(WEBRTC_CHROMIUM_BUILD)
int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context) int VoiceEngine::SetAndroidObjects(void* javaVM, void* context)
{ {
#ifdef WEBRTC_ANDROID #ifdef WEBRTC_ANDROID
#ifdef WEBRTC_ANDROID_OPENSLES #ifdef WEBRTC_ANDROID_OPENSLES
@ -162,8 +162,8 @@ int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni> typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>
AudioDeviceInstance; AudioDeviceInstance;
#endif #endif
if (javaVM && env && context) { if (javaVM && context) {
AudioDeviceInstance::SetAndroidAudioDeviceObjects(javaVM, env, context); AudioDeviceInstance::SetAndroidAudioDeviceObjects(javaVM, context);
} else { } else {
AudioDeviceInstance::ClearAndroidAudioDeviceObjects(); AudioDeviceInstance::ClearAndroidAudioDeviceObjects();
} }