Adds C++/JNI/Java unit test for audio device module on Android.
This CL adds support for unittests of the AudioDeviceModule on Android using both Java and C++. The new framework uses ::testing::TesWithParam to support both Java-based audio and OpenSL ES based audio. However, given existing issues in our OpenSL ES implementation, the list of test parameters only contains Java in this first version. Open SL ES will be enabled as soon as the backend has been refactored. It also: - Removes the redundant JNIEnv* argument in webrtc::VoiceEngine::SetAndroidObjects(). - Modifies usage of enable_android_opensl and the WEBRTC_ANDROID_OPENSLES define. - Adds kAndroidJavaAudio and kAndroidOpenSLESAudio to AudioLayer enumerator. - Fixes some bugs which were discovered when running the tests. BUG=NONE R=phoglund@webrtc.org Review URL: https://webrtc-codereview.appspot.com/40069004 Cr-Commit-Position: refs/heads/master@{#8651} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8651 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
1b32bbe0a7
commit
474d1eb223
@ -964,7 +964,7 @@ JOW(jboolean, PeerConnectionFactory_initializeAndroidGlobals)(
|
||||
failure |= AndroidVideoCapturerJni::SetAndroidObjects(jni, context);
|
||||
}
|
||||
if (initialize_audio)
|
||||
failure |= webrtc::VoiceEngine::SetAndroidObjects(GetJVM(), jni, context);
|
||||
failure |= webrtc::VoiceEngine::SetAndroidObjects(GetJVM(), context);
|
||||
factory_static_initialized = true;
|
||||
}
|
||||
if (initialize_video) {
|
||||
|
@ -83,6 +83,7 @@
|
||||
},
|
||||
'dependencies': [
|
||||
'<(webrtc_root)/modules/modules.gyp:modules_unittests',
|
||||
'audio_device_java',
|
||||
],
|
||||
'includes': [
|
||||
'../../build/apk_test.gypi',
|
||||
@ -243,6 +244,17 @@
|
||||
'../../build/java.gypi',
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'audio_device_java',
|
||||
'type': 'none',
|
||||
'variables': {
|
||||
'java_in_dir': '<(webrtc_root)/modules/audio_device/android/java',
|
||||
'never_lint': 1,
|
||||
},
|
||||
'includes': [
|
||||
'../../build/java.gypi',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ JOWW(void, NativeWebRtcContextRegistry_register)(
|
||||
"Failed to register android objects to video capture");
|
||||
CHECK(webrtc::SetRenderAndroidVM(g_vm) == 0,
|
||||
"Failed to register android objects to video render");
|
||||
CHECK(webrtc::VoiceEngine::SetAndroidObjects(g_vm, jni, context) == 0,
|
||||
CHECK(webrtc::VoiceEngine::SetAndroidObjects(g_vm, context) == 0,
|
||||
"Failed to register android objects to voice engine");
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ JOWW(void, NativeWebRtcContextRegistry_unRegister)(
|
||||
"Failed to unregister android objects from video capture");
|
||||
CHECK(webrtc::SetRenderAndroidVM(NULL) == 0,
|
||||
"Failed to unregister android objects from video render");
|
||||
CHECK(webrtc::VoiceEngine::SetAndroidObjects(NULL, NULL, NULL) == 0,
|
||||
CHECK(webrtc::VoiceEngine::SetAndroidObjects(NULL, NULL) == 0,
|
||||
"Failed to unregister android objects from voice engine");
|
||||
webrtc_examples::ClearVieDeviceObjects();
|
||||
webrtc_examples::ClearVoeDeviceObjects();
|
||||
|
@ -84,8 +84,8 @@ class OpenSlRunner
|
||||
jobject obj,
|
||||
jobject context) {
|
||||
assert(!g_runner); // Should only be called once.
|
||||
OpenSlesInput::SetAndroidAudioDeviceObjects(g_vm, env, context);
|
||||
OpenSlesOutput::SetAndroidAudioDeviceObjects(g_vm, env, context);
|
||||
OpenSlesInput::SetAndroidAudioDeviceObjects(g_vm, context);
|
||||
OpenSlesOutput::SetAndroidAudioDeviceObjects(g_vm, context);
|
||||
g_runner = new OpenSlRunner();
|
||||
}
|
||||
|
||||
|
@ -23,10 +23,9 @@ template <class InputType, class OutputType>
|
||||
class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
public:
|
||||
static void SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context) {
|
||||
OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
OutputType::SetAndroidAudioDeviceObjects(javaVM, context);
|
||||
InputType::SetAndroidAudioDeviceObjects(javaVM, context);
|
||||
}
|
||||
|
||||
static void ClearAndroidAudioDeviceObjects() {
|
||||
@ -44,35 +43,35 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const { // NOLINT
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override {
|
||||
audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
int32_t Init() {
|
||||
int32_t Init() override {
|
||||
return output_.Init() | input_.Init();
|
||||
}
|
||||
|
||||
int32_t Terminate() {
|
||||
int32_t Terminate() override {
|
||||
return output_.Terminate() | input_.Terminate();
|
||||
}
|
||||
|
||||
bool Initialized() const {
|
||||
bool Initialized() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
int16_t PlayoutDevices() {
|
||||
int16_t PlayoutDevices() override {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int16_t RecordingDevices() {
|
||||
int16_t RecordingDevices() override {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int32_t PlayoutDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
@ -80,370 +79,341 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
int32_t RecordingDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutDevice(uint16_t index) {
|
||||
int32_t SetPlayoutDevice(uint16_t index) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(uint16_t index) {
|
||||
int32_t SetRecordingDevice(uint16_t index) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t PlayoutIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t PlayoutIsAvailable(bool& available) override {
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t InitPlayout() {
|
||||
int32_t InitPlayout() override {
|
||||
return output_.InitPlayout();
|
||||
}
|
||||
|
||||
bool PlayoutIsInitialized() const {
|
||||
bool PlayoutIsInitialized() const override {
|
||||
return output_.PlayoutIsInitialized();
|
||||
}
|
||||
|
||||
int32_t RecordingIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t RecordingIsAvailable(bool& available) override {
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t InitRecording() {
|
||||
int32_t InitRecording() override {
|
||||
return input_.InitRecording();
|
||||
}
|
||||
|
||||
bool RecordingIsInitialized() const {
|
||||
bool RecordingIsInitialized() const override {
|
||||
return input_.RecordingIsInitialized();
|
||||
}
|
||||
|
||||
int32_t StartPlayout() {
|
||||
int32_t StartPlayout() override {
|
||||
return output_.StartPlayout();
|
||||
}
|
||||
|
||||
int32_t StopPlayout() {
|
||||
int32_t StopPlayout() override {
|
||||
return output_.StopPlayout();
|
||||
}
|
||||
|
||||
bool Playing() const {
|
||||
bool Playing() const override {
|
||||
return output_.Playing();
|
||||
}
|
||||
|
||||
int32_t StartRecording() {
|
||||
int32_t StartRecording() override {
|
||||
return input_.StartRecording();
|
||||
}
|
||||
|
||||
int32_t StopRecording() {
|
||||
int32_t StopRecording() override {
|
||||
return input_.StopRecording();
|
||||
}
|
||||
|
||||
bool Recording() const {
|
||||
bool Recording() const override {
|
||||
return input_.Recording() ;
|
||||
}
|
||||
|
||||
int32_t SetAGC(bool enable) {
|
||||
int32_t SetAGC(bool enable) override {
|
||||
if (enable) {
|
||||
FATAL() << "Should never be called";
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AGC() const {
|
||||
bool AGC() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t SetWaveOutVolume(uint16_t volumeLeft,
|
||||
uint16_t volumeRight) {
|
||||
int32_t SetWaveOutVolume(
|
||||
uint16_t volumeLeft, uint16_t volumeRight) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t WaveOutVolume(
|
||||
uint16_t& volumeLeft, // NOLINT
|
||||
uint16_t& volumeRight) const { // NOLINT
|
||||
uint16_t& volumeLeft, uint16_t& volumeRight) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t InitSpeaker() {
|
||||
int32_t InitSpeaker() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool SpeakerIsInitialized() const {
|
||||
bool SpeakerIsInitialized() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t InitMicrophone() {
|
||||
int32_t InitMicrophone() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool MicrophoneIsInitialized() const {
|
||||
bool MicrophoneIsInitialized() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t SpeakerVolumeIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(henrika): add support if/when needed.
|
||||
int32_t SetSpeakerVolume(uint32_t volume) {
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(henrika): add support if/when needed.
|
||||
int32_t SpeakerVolume(
|
||||
uint32_t& volume) const { // NOLINT
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(henrika): add support if/when needed.
|
||||
int32_t MaxSpeakerVolume(
|
||||
uint32_t& maxVolume) const { // NOLINT
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(henrika): add support if/when needed.
|
||||
int32_t MinSpeakerVolume(
|
||||
uint32_t& minVolume) const { // NOLINT
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SpeakerVolumeStepSize(
|
||||
uint16_t& stepSize) const { // NOLINT
|
||||
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolumeIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override{
|
||||
available = false;
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) {
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolume(
|
||||
uint32_t& volume) const { // NOLINT
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const { // NOLINT
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const { // NOLINT
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const { // NOLINT
|
||||
int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SpeakerMuteIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetSpeakerMute(bool enable) {
|
||||
int32_t SetSpeakerMute(bool enable) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SpeakerMute(
|
||||
bool& enabled) const { // NOLINT
|
||||
int32_t SpeakerMute(bool& enabled) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneMuteIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override {
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneMute(bool enable) {
|
||||
int32_t SetMicrophoneMute(bool enable) override {
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneMute(
|
||||
bool& enabled) const { // NOLINT
|
||||
int32_t MicrophoneMute(bool& enabled) const override {
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneBoostIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t MicrophoneBoostIsAvailable(bool& available) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneBoost(bool enable) {
|
||||
int32_t SetMicrophoneBoost(bool enable) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneBoost(
|
||||
bool& enabled) const { // NOLINT
|
||||
int32_t MicrophoneBoost(bool& enabled) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoPlayoutIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetStereoPlayout(bool enable) {
|
||||
// TODO(henrika): add support.
|
||||
int32_t SetStereoPlayout(bool enable) override {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoPlayout(
|
||||
bool& enabled) const { // NOLINT
|
||||
// TODO(henrika): add support.
|
||||
int32_t StereoPlayout(bool& enabled) const override {
|
||||
enabled = false;
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoRecordingIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetStereoRecording(bool enable) {
|
||||
int32_t SetStereoRecording(bool enable) override {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoRecording(
|
||||
bool& enabled) const { // NOLINT
|
||||
int32_t StereoRecording(bool& enabled) const override {
|
||||
enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutBuffer(
|
||||
const AudioDeviceModule::BufferType type,
|
||||
uint16_t sizeMS) {
|
||||
const AudioDeviceModule::BufferType type, uint16_t sizeMS) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t PlayoutBuffer(
|
||||
AudioDeviceModule::BufferType& type,
|
||||
uint16_t& sizeMS) const { // NOLINT
|
||||
AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t PlayoutDelay(
|
||||
uint16_t& delayMS) const { // NOLINT
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const override {
|
||||
return output_.PlayoutDelay(delayMS);
|
||||
}
|
||||
|
||||
int32_t RecordingDelay(
|
||||
uint16_t& delayMS) const { // NOLINT
|
||||
int32_t RecordingDelay(uint16_t& delayMS) const override {
|
||||
return input_.RecordingDelay(delayMS);
|
||||
}
|
||||
|
||||
int32_t CPULoad(
|
||||
uint16_t& load) const { // NOLINT
|
||||
int32_t CPULoad(uint16_t& load) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool PlayoutWarning() const {
|
||||
bool PlayoutWarning() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PlayoutError() const {
|
||||
bool PlayoutError() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RecordingWarning() const {
|
||||
bool RecordingWarning() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RecordingError() const {
|
||||
bool RecordingError() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void ClearPlayoutWarning() {}
|
||||
void ClearPlayoutWarning() override {}
|
||||
|
||||
void ClearPlayoutError() {}
|
||||
void ClearPlayoutError() override {}
|
||||
|
||||
void ClearRecordingWarning() {}
|
||||
void ClearRecordingWarning() override {}
|
||||
|
||||
void ClearRecordingError() {}
|
||||
void ClearRecordingError() override {}
|
||||
|
||||
void AttachAudioBuffer(
|
||||
AudioDeviceBuffer* audioBuffer) {
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override {
|
||||
output_.AttachAudioBuffer(audioBuffer);
|
||||
input_.AttachAudioBuffer(audioBuffer);
|
||||
}
|
||||
|
||||
// TODO(henrika): remove
|
||||
int32_t SetPlayoutSampleRate(
|
||||
const uint32_t samplesPerSec) {
|
||||
int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetLoudspeakerStatus(bool enable) {
|
||||
int32_t SetLoudspeakerStatus(bool enable) override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t GetLoudspeakerStatus(
|
||||
bool& enable) const { // NOLINT
|
||||
int32_t GetLoudspeakerStatus(bool& enable) const override {
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool BuiltInAECIsAvailable() const {
|
||||
bool BuiltInAECIsAvailable() const override {
|
||||
return input_.BuiltInAECIsAvailable();
|
||||
}
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable) {
|
||||
int32_t EnableBuiltInAEC(bool enable) override {
|
||||
return input_.EnableBuiltInAEC(enable);
|
||||
}
|
||||
|
||||
|
478
webrtc/modules/audio_device/android/audio_device_unittest.cc
Normal file
478
webrtc/modules/audio_device/android/audio_device_unittest.cc
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/base/scoped_ptr.h"
|
||||
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_impl.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device.h"
|
||||
#include "webrtc/system_wrappers/interface/event_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_refptr.h"
|
||||
#include "webrtc/system_wrappers/interface/sleep.h"
|
||||
#include "webrtc/test/testsupport/fileutils.h"
|
||||
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using ::testing::_;
|
||||
using ::testing::AtLeast;
|
||||
using ::testing::Gt;
|
||||
using ::testing::Invoke;
|
||||
using ::testing::NiceMock;
|
||||
using ::testing::NotNull;
|
||||
using ::testing::Return;
|
||||
using ::testing::TestWithParam;
|
||||
|
||||
// #define ENABLE_PRINTF
|
||||
#ifdef ENABLE_PRINTF
|
||||
#define PRINT(...) printf(__VA_ARGS__);
|
||||
#else
|
||||
#define PRINT(...) ((void)0)
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Perform all tests for the different audio layers listed in this array.
|
||||
// See the INSTANTIATE_TEST_CASE_P statement for details.
|
||||
// TODO(henrika): the test framework supports both Java and OpenSL ES based
|
||||
// audio backends but there are currently some issues (crashes) in the
|
||||
// OpenSL ES implementation, hence it is not added to kAudioLayers yet.
|
||||
static const AudioDeviceModule::AudioLayer kAudioLayers[] = {
|
||||
AudioDeviceModule::kAndroidJavaAudio
|
||||
/*, AudioDeviceModule::kAndroidOpenSLESAudio */};
|
||||
// Number of callbacks (input or output) the tests waits for before we set
|
||||
// an event indicating that the test was OK.
|
||||
static const int kNumCallbacks = 10;
|
||||
// Max amount of time we wait for an event to be set while counting callbacks.
|
||||
static const int kTestTimeOutInMilliseconds = 10 * 1000;
|
||||
// Average number of audio callbacks per second assuming 10ms packet size.
|
||||
static const int kNumCallbacksPerSecond = 100;
|
||||
// Play out a test file during this time (unit is in seconds).
|
||||
static const int kFilePlayTimeInSec = 2;
|
||||
// Fixed value for the recording delay using Java based audio backend.
|
||||
// TODO(henrika): harmonize with OpenSL ES and look for possible improvements.
|
||||
static const uint32_t kFixedRecordingDelay = 100;
|
||||
static const int kBitsPerSample = 16;
|
||||
static const int kBytesPerSample = kBitsPerSample / 8;
|
||||
|
||||
enum TransportType {
|
||||
kPlayout = 0x1,
|
||||
kRecording = 0x2,
|
||||
};
|
||||
|
||||
// Simple helper struct for device specific audio parameters.
|
||||
struct AudioParameters {
|
||||
int playout_frames_per_buffer() const {
|
||||
return playout_sample_rate / 100; // WebRTC uses 10 ms as buffer size.
|
||||
}
|
||||
int recording_frames_per_buffer() const {
|
||||
return recording_sample_rate / 100;
|
||||
}
|
||||
int playout_sample_rate;
|
||||
int recording_sample_rate;
|
||||
int playout_channels;
|
||||
int recording_channels;
|
||||
};
|
||||
|
||||
class MockAudioTransport : public AudioTransport {
|
||||
public:
|
||||
explicit MockAudioTransport(int type)
|
||||
: type_(type),
|
||||
play_count_(0),
|
||||
rec_count_(0),
|
||||
file_size_in_bytes_(0),
|
||||
sample_rate_(0),
|
||||
file_pos_(0) {}
|
||||
|
||||
// Read file with name |file_name| into |file_| array to ensure that we
|
||||
// only read from memory during the test. Note that, we only support mono
|
||||
// files currently.
|
||||
bool LoadFile(const std::string& file_name, int sample_rate) {
|
||||
file_size_in_bytes_ = test::GetFileSize(file_name);
|
||||
sample_rate_ = sample_rate;
|
||||
|
||||
EXPECT_GE(file_size_in_callbacks(), num_callbacks_);
|
||||
const int num_16bit_samples =
|
||||
test::GetFileSize(file_name) / kBytesPerSample;
|
||||
file_.reset(new int16_t[num_16bit_samples]);
|
||||
FILE* audio_file = fopen(file_name.c_str(), "rb");
|
||||
EXPECT_NE(audio_file, nullptr);
|
||||
int num_samples_read = fread(
|
||||
file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
|
||||
EXPECT_EQ(num_samples_read, num_16bit_samples);
|
||||
fclose(audio_file);
|
||||
return true;
|
||||
}
|
||||
|
||||
MOCK_METHOD10(RecordedDataIsAvailable,
|
||||
int32_t(const void* audioSamples,
|
||||
const uint32_t nSamples,
|
||||
const uint8_t nBytesPerSample,
|
||||
const uint8_t nChannels,
|
||||
const uint32_t samplesPerSec,
|
||||
const uint32_t totalDelayMS,
|
||||
const int32_t clockDrift,
|
||||
const uint32_t currentMicLevel,
|
||||
const bool keyPressed,
|
||||
uint32_t& newMicLevel));
|
||||
MOCK_METHOD8(NeedMorePlayData,
|
||||
int32_t(const uint32_t nSamples,
|
||||
const uint8_t nBytesPerSample,
|
||||
const uint8_t nChannels,
|
||||
const uint32_t samplesPerSec,
|
||||
void* audioSamples,
|
||||
uint32_t& nSamplesOut,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms));
|
||||
|
||||
void HandleCallbacks(EventWrapper* test_is_done, int num_callbacks) {
|
||||
test_is_done_ = test_is_done;
|
||||
num_callbacks_ = num_callbacks;
|
||||
if (play_mode()) {
|
||||
ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
|
||||
.WillByDefault(
|
||||
Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
|
||||
}
|
||||
if (rec_mode()) {
|
||||
ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
|
||||
.WillByDefault(
|
||||
Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
|
||||
}
|
||||
}
|
||||
|
||||
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
|
||||
const uint32_t nSamples,
|
||||
const uint8_t nBytesPerSample,
|
||||
const uint8_t nChannels,
|
||||
const uint32_t samplesPerSec,
|
||||
const uint32_t totalDelayMS,
|
||||
const int32_t clockDrift,
|
||||
const uint32_t currentMicLevel,
|
||||
const bool keyPressed,
|
||||
uint32_t& newMicLevel) {
|
||||
EXPECT_TRUE(rec_mode());
|
||||
rec_count_++;
|
||||
if (ReceivedEnoughCallbacks())
|
||||
test_is_done_->Set();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RealNeedMorePlayData(const uint32_t nSamples,
|
||||
const uint8_t nBytesPerSample,
|
||||
const uint8_t nChannels,
|
||||
const uint32_t samplesPerSec,
|
||||
void* audioSamples,
|
||||
uint32_t& nSamplesOut,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) {
|
||||
EXPECT_TRUE(play_mode());
|
||||
nSamplesOut = nSamples;
|
||||
if (file_mode()) {
|
||||
// Read samples from file stored in memory (at construction) and copy
|
||||
// |nSamples| (<=> 10ms) to the |audioSamples| byte buffer.
|
||||
memcpy(audioSamples,
|
||||
static_cast<int16_t*> (&file_[file_pos_]),
|
||||
nSamples * nBytesPerSample);
|
||||
file_pos_ += nSamples;
|
||||
}
|
||||
play_count_++;
|
||||
if (ReceivedEnoughCallbacks())
|
||||
test_is_done_->Set();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ReceivedEnoughCallbacks() {
|
||||
bool recording_done = false;
|
||||
if (rec_mode())
|
||||
recording_done = rec_count_ >= num_callbacks_;
|
||||
else
|
||||
recording_done = true;
|
||||
|
||||
bool playout_done = false;
|
||||
if (play_mode())
|
||||
playout_done = play_count_ >= num_callbacks_;
|
||||
else
|
||||
playout_done = true;
|
||||
|
||||
return recording_done && playout_done;
|
||||
}
|
||||
|
||||
bool play_mode() const { return type_ & kPlayout; }
|
||||
bool rec_mode() const { return type_ & kRecording; }
|
||||
bool file_mode() const { return file_.get() != nullptr; }
|
||||
int file_size_in_seconds() const {
|
||||
return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
|
||||
}
|
||||
int file_size_in_callbacks() const {
|
||||
return file_size_in_seconds() * kNumCallbacksPerSecond;
|
||||
}
|
||||
|
||||
private:
|
||||
EventWrapper* test_is_done_;
|
||||
int num_callbacks_;
|
||||
int type_;
|
||||
int play_count_;
|
||||
int rec_count_;
|
||||
int file_size_in_bytes_;
|
||||
int sample_rate_;
|
||||
rtc::scoped_ptr<int16_t[]> file_;
|
||||
int file_pos_;
|
||||
};
|
||||
|
||||
// AudioDeviceTest is a value-parameterized test.
|
||||
class AudioDeviceTest
|
||||
: public testing::TestWithParam<AudioDeviceModule::AudioLayer> {
|
||||
protected:
|
||||
AudioDeviceTest()
|
||||
: test_is_done_(EventWrapper::Create()) {
|
||||
// One-time initialization of JVM and application context. Ensures that we
|
||||
// can do calls between C++ and Java. Initializes both Java and OpenSL ES
|
||||
// implementations.
|
||||
webrtc::audiodevicemodule::EnsureInitialized();
|
||||
// Creates an audio device based on the test parameter. See
|
||||
// INSTANTIATE_TEST_CASE_P() for details.
|
||||
audio_device_ = CreateAudioDevice();
|
||||
EXPECT_NE(audio_device_.get(), nullptr);
|
||||
EXPECT_EQ(0, audio_device_->Init());
|
||||
CacheAudioParameters();
|
||||
}
|
||||
virtual ~AudioDeviceTest() {
|
||||
EXPECT_EQ(0, audio_device_->Terminate());
|
||||
}
|
||||
|
||||
int playout_sample_rate() const {
|
||||
return parameters_.playout_sample_rate;
|
||||
}
|
||||
int recording_sample_rate() const {
|
||||
return parameters_.recording_sample_rate;
|
||||
}
|
||||
int playout_channels() const {
|
||||
return parameters_.playout_channels;
|
||||
}
|
||||
int recording_channels() const {
|
||||
return parameters_.playout_channels;
|
||||
}
|
||||
int playout_frames_per_buffer() const {
|
||||
return parameters_.playout_frames_per_buffer();
|
||||
}
|
||||
int recording_frames_per_buffer() const {
|
||||
return parameters_.recording_frames_per_buffer();
|
||||
}
|
||||
|
||||
scoped_refptr<AudioDeviceModule> audio_device() const {
|
||||
return audio_device_;
|
||||
}
|
||||
|
||||
scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
|
||||
scoped_refptr<AudioDeviceModule> module(
|
||||
AudioDeviceModuleImpl::Create(0, GetParam()));
|
||||
return module;
|
||||
}
|
||||
|
||||
void CacheAudioParameters() {
|
||||
AudioDeviceBuffer* audio_buffer =
|
||||
static_cast<AudioDeviceModuleImpl*> (
|
||||
audio_device_.get())->GetAudioDeviceBuffer();
|
||||
parameters_.playout_sample_rate = audio_buffer->PlayoutSampleRate();
|
||||
parameters_.recording_sample_rate = audio_buffer->RecordingSampleRate();
|
||||
parameters_.playout_channels = audio_buffer->PlayoutChannels();
|
||||
parameters_.recording_channels = audio_buffer->RecordingChannels();
|
||||
}
|
||||
|
||||
// Retuerns file name relative to the resource root given a sample rate.
|
||||
std::string GetFileName(int sample_rate) {
|
||||
EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
|
||||
char fname[64];
|
||||
snprintf(fname,
|
||||
sizeof(fname),
|
||||
"audio_device/audio_short%d",
|
||||
sample_rate / 1000);
|
||||
std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
|
||||
EXPECT_TRUE(test::FileExists(file_name));
|
||||
#ifdef ENABLE_PRINTF
|
||||
PRINT("file name: %s\n", file_name.c_str());
|
||||
const int bytes = test::GetFileSize(file_name);
|
||||
PRINT("file size: %d [bytes]\n", bytes);
|
||||
PRINT("file size: %d [samples]\n", bytes / kBytesPerSample);
|
||||
const int seconds = bytes / (sample_rate * kBytesPerSample);
|
||||
PRINT("file size: %d [secs]\n", seconds);
|
||||
PRINT("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
|
||||
#endif
|
||||
return file_name;
|
||||
}
|
||||
|
||||
void StartPlayout() {
|
||||
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
|
||||
EXPECT_FALSE(audio_device()->Playing());
|
||||
EXPECT_EQ(0, audio_device()->InitPlayout());
|
||||
EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
|
||||
EXPECT_EQ(0, audio_device()->StartPlayout());
|
||||
EXPECT_TRUE(audio_device()->Playing());
|
||||
}
|
||||
|
||||
void StopPlayout() {
|
||||
EXPECT_EQ(0, audio_device()->StopPlayout());
|
||||
EXPECT_FALSE(audio_device()->Playing());
|
||||
}
|
||||
|
||||
void StartRecording() {
|
||||
EXPECT_FALSE(audio_device()->RecordingIsInitialized());
|
||||
EXPECT_FALSE(audio_device()->Recording());
|
||||
EXPECT_EQ(0, audio_device()->InitRecording());
|
||||
EXPECT_TRUE(audio_device()->RecordingIsInitialized());
|
||||
EXPECT_EQ(0, audio_device()->StartRecording());
|
||||
EXPECT_TRUE(audio_device()->Recording());
|
||||
}
|
||||
|
||||
void StopRecording() {
|
||||
EXPECT_EQ(0, audio_device()->StopRecording());
|
||||
EXPECT_FALSE(audio_device()->Recording());
|
||||
}
|
||||
|
||||
rtc::scoped_ptr<EventWrapper> test_is_done_;
|
||||
scoped_refptr<AudioDeviceModule> audio_device_;
|
||||
AudioParameters parameters_;
|
||||
};
|
||||
|
||||
TEST_P(AudioDeviceTest, ConstructDestruct) {
|
||||
// Using the test fixture to create and destruct the audio device module.
|
||||
}
|
||||
|
||||
// Create an audio device instance and print out the native audio parameters.
|
||||
TEST_P(AudioDeviceTest, AudioParameters) {
|
||||
EXPECT_NE(0, playout_sample_rate());
|
||||
PRINT("playout_sample_rate: %d\n", playout_sample_rate());
|
||||
EXPECT_NE(0, recording_sample_rate());
|
||||
PRINT("playout_sample_rate: %d\n", recording_sample_rate());
|
||||
EXPECT_NE(0, playout_channels());
|
||||
PRINT("playout_channels: %d\n", playout_channels());
|
||||
EXPECT_NE(0, recording_channels());
|
||||
PRINT("recording_channels: %d\n", recording_channels());
|
||||
}
|
||||
|
||||
TEST_P(AudioDeviceTest, InitTerminate) {
|
||||
// Initialization is part of the test fixture.
|
||||
EXPECT_TRUE(audio_device()->Initialized());
|
||||
EXPECT_EQ(0, audio_device()->Terminate());
|
||||
EXPECT_FALSE(audio_device()->Initialized());
|
||||
}
|
||||
|
||||
TEST_P(AudioDeviceTest, Devices) {
|
||||
// Device enumeration is not supported. Verify fixed values only.
|
||||
EXPECT_EQ(1, audio_device()->PlayoutDevices());
|
||||
EXPECT_EQ(1, audio_device()->RecordingDevices());
|
||||
}
|
||||
|
||||
// Tests that playout can be initiated, started and stopped.
|
||||
TEST_P(AudioDeviceTest, StartStopPlayout) {
|
||||
StartPlayout();
|
||||
StopPlayout();
|
||||
}
|
||||
|
||||
// Tests that recording can be initiated, started and stopped.
|
||||
TEST_P(AudioDeviceTest, StartStopRecording) {
|
||||
StartRecording();
|
||||
StopRecording();
|
||||
}
|
||||
|
||||
// Start playout and verify that the native audio layer starts asking for real
|
||||
// audio samples to play out using the NeedMorePlayData callback.
|
||||
TEST_P(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
|
||||
MockAudioTransport mock(kPlayout);
|
||||
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
|
||||
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
|
||||
kBytesPerSample,
|
||||
playout_channels(),
|
||||
playout_sample_rate(),
|
||||
NotNull(),
|
||||
_, _, _))
|
||||
.Times(AtLeast(kNumCallbacks));
|
||||
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
|
||||
StartPlayout();
|
||||
test_is_done_->Wait(kTestTimeOutInMilliseconds);
|
||||
StopPlayout();
|
||||
}
|
||||
|
||||
// Start recording and verify that the native audio layer starts feeding real
|
||||
// audio samples via the RecordedDataIsAvailable callback.
|
||||
TEST_P(AudioDeviceTest, StartRecordingVerifyCallbacks) {
|
||||
MockAudioTransport mock(kRecording);
|
||||
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
|
||||
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
|
||||
recording_frames_per_buffer(),
|
||||
kBytesPerSample,
|
||||
recording_channels(),
|
||||
recording_sample_rate(),
|
||||
kFixedRecordingDelay,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
_))
|
||||
.Times(AtLeast(kNumCallbacks));
|
||||
|
||||
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
|
||||
StartRecording();
|
||||
test_is_done_->Wait(kTestTimeOutInMilliseconds);
|
||||
StopRecording();
|
||||
}
|
||||
|
||||
|
||||
// Start playout and recording (full-duplex audio) and verify that audio is
|
||||
// active in both directions.
|
||||
TEST_P(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
|
||||
MockAudioTransport mock(kPlayout | kRecording);
|
||||
mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
|
||||
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
|
||||
kBytesPerSample,
|
||||
playout_channels(),
|
||||
playout_sample_rate(),
|
||||
NotNull(),
|
||||
_, _, _))
|
||||
.Times(AtLeast(kNumCallbacks));
|
||||
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
|
||||
recording_frames_per_buffer(),
|
||||
kBytesPerSample,
|
||||
recording_channels(),
|
||||
recording_sample_rate(),
|
||||
Gt(kFixedRecordingDelay),
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
_))
|
||||
.Times(AtLeast(kNumCallbacks));
|
||||
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
|
||||
StartPlayout();
|
||||
StartRecording();
|
||||
test_is_done_->Wait(kTestTimeOutInMilliseconds);
|
||||
StopRecording();
|
||||
StopPlayout();
|
||||
}
|
||||
|
||||
// Start playout and read audio from an external PCM file when the audio layer
|
||||
// asks for data to play out. Real audio is played out in this test but it does
|
||||
// not contain any explicit verification that the audio quality is perfect.
|
||||
TEST_P(AudioDeviceTest, RunPlayoutWithFileAsSource) {
|
||||
// TODO(henrika): extend test when mono output is supported.
|
||||
EXPECT_EQ(1, playout_channels());
|
||||
NiceMock<MockAudioTransport> mock(kPlayout);
|
||||
std::string file_name = GetFileName(playout_sample_rate());
|
||||
mock.LoadFile(file_name, playout_sample_rate());
|
||||
mock.HandleCallbacks(test_is_done_.get(),
|
||||
kFilePlayTimeInSec * kNumCallbacksPerSecond);
|
||||
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
|
||||
StartPlayout();
|
||||
test_is_done_->Wait(kTestTimeOutInMilliseconds);
|
||||
StopPlayout();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(AudioDeviceTest, AudioDeviceTest,
|
||||
::testing::ValuesIn(kAudioLayers));
|
||||
|
||||
} // namespace webrtc
|
@ -10,11 +10,15 @@
|
||||
|
||||
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
|
||||
|
||||
#include <android/log.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
#define TAG "AudioManagerJni"
|
||||
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static JavaVM* g_jvm_ = NULL;
|
||||
@ -40,15 +44,15 @@ AudioManagerJni::AudioManagerJni()
|
||||
SetNativeFrameSize(env);
|
||||
}
|
||||
|
||||
void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context) {
|
||||
void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
|
||||
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
|
||||
|
||||
assert(jvm);
|
||||
assert(env);
|
||||
assert(context);
|
||||
|
||||
// Store global Java VM variables to be accessed by API calls.
|
||||
g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
|
||||
g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
|
||||
g_jni_env_ = GetEnv(g_jvm_);
|
||||
g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
|
||||
|
||||
// FindClass must be made in this function since this function's contract
|
||||
@ -69,6 +73,7 @@ void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
}
|
||||
|
||||
void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
|
||||
ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
|
||||
g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
|
||||
g_audio_manager_class_ = NULL;
|
||||
g_jni_env_->DeleteGlobalRef(g_context_);
|
||||
|
@ -34,8 +34,7 @@ class AudioManagerJni {
|
||||
// It has to be called for this class' APIs to be successful. Calling
|
||||
// ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
|
||||
// successfully if SetAndroidAudioDeviceObjects is not called after it.
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context);
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
|
||||
// This function must be called when the AudioManagerJni class is no
|
||||
// longer needed. It frees up the global references acquired in
|
||||
// SetAndroidAudioDeviceObjects.
|
||||
|
@ -34,12 +34,10 @@ static JavaVM* g_jvm = NULL;
|
||||
static jobject g_context = NULL;
|
||||
static jclass g_audio_record_class = NULL;
|
||||
|
||||
void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context) {
|
||||
void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
|
||||
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
|
||||
|
||||
CHECK(jvm);
|
||||
CHECK(env);
|
||||
CHECK(context);
|
||||
|
||||
g_jvm = reinterpret_cast<JavaVM*>(jvm);
|
||||
@ -178,7 +176,7 @@ int32_t AudioRecordJni::StartRecording() {
|
||||
int32_t AudioRecordJni::StopRecording() {
|
||||
ALOGD("StopRecording%s", GetThreadInfo().c_str());
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (!initialized_) {
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
AttachThreadScoped ats(g_jvm);
|
||||
@ -275,6 +273,10 @@ void JNICALL AudioRecordJni::DataIsRecorded(
|
||||
// the thread is 'AudioRecordThread'.
|
||||
void AudioRecordJni::OnDataIsRecorded(int length) {
|
||||
DCHECK(thread_checker_java_.CalledOnValidThread());
|
||||
if (!audio_device_buffer_) {
|
||||
ALOGE("AttachAudioBuffer has not been called!");
|
||||
return;
|
||||
}
|
||||
if (playout_delay_in_milliseconds_ == 0) {
|
||||
playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs();
|
||||
ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_);
|
||||
@ -284,7 +286,9 @@ void AudioRecordJni::OnDataIsRecorded(int length) {
|
||||
audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_,
|
||||
kHardwareDelayInMilliseconds,
|
||||
0 /* clockDrift */);
|
||||
audio_device_buffer_->DeliverRecordedData();
|
||||
if (audio_device_buffer_->DeliverRecordedData() == 1) {
|
||||
ALOGE("AudioDeviceBuffer::DeliverRecordedData failed!");
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioRecordJni::HasDeviceObjects() {
|
||||
|
@ -48,11 +48,11 @@ class AudioRecordJni {
|
||||
public:
|
||||
// Use the invocation API to allow the native application to use the JNI
|
||||
// interface pointer to access VM features.
|
||||
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
|
||||
// and |context| corresponds to android.content.Context in Java.
|
||||
// |jvm| denotes the Java VM and |context| corresponds to
|
||||
// android.content.Context in Java.
|
||||
// This method also sets a global jclass object, |g_audio_record_class| for
|
||||
// the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
|
||||
// Always call this method after the object has been destructed. It deletes
|
||||
// existing global references and enables garbage collection.
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
@ -29,12 +29,10 @@ static JavaVM* g_jvm = NULL;
|
||||
static jobject g_context = NULL;
|
||||
static jclass g_audio_track_class = NULL;
|
||||
|
||||
void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context) {
|
||||
void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
|
||||
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
|
||||
|
||||
CHECK(jvm);
|
||||
CHECK(env);
|
||||
CHECK(context);
|
||||
|
||||
g_jvm = reinterpret_cast<JavaVM*>(jvm);
|
||||
@ -168,7 +166,7 @@ int32_t AudioTrackJni::StartPlayout() {
|
||||
int32_t AudioTrackJni::StopPlayout() {
|
||||
ALOGD("StopPlayout%s", GetThreadInfo().c_str());
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (!initialized_) {
|
||||
if (!initialized_ || !playing_) {
|
||||
return 0;
|
||||
}
|
||||
AttachThreadScoped ats(g_jvm);
|
||||
@ -245,10 +243,17 @@ void JNICALL AudioTrackJni::GetPlayoutData(
|
||||
// the thread is 'AudioRecordTrack'.
|
||||
void AudioTrackJni::OnGetPlayoutData(int length) {
|
||||
DCHECK(thread_checker_java_.CalledOnValidThread());
|
||||
// ALOGD("OnGetPlayoutData(length=%d, delay=%d)", length);
|
||||
DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame);
|
||||
if (!audio_device_buffer_) {
|
||||
ALOGE("AttachAudioBuffer has not been called!");
|
||||
return;
|
||||
}
|
||||
// Pull decoded data (in 16-bit PCM format) from jitter buffer.
|
||||
int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
|
||||
if (samples <= 0) {
|
||||
ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!");
|
||||
return;
|
||||
}
|
||||
DCHECK_EQ(samples, frames_per_buffer_);
|
||||
// Copy decoded data into common byte buffer to ensure that it can be
|
||||
// written to the Java based audio track.
|
||||
|
@ -42,11 +42,11 @@ class AudioTrackJni : public PlayoutDelayProvider {
|
||||
public:
|
||||
// Use the invocation API to allow the native application to use the JNI
|
||||
// interface pointer to access VM features.
|
||||
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
|
||||
// and |context| corresponds to android.content.Context in Java.
|
||||
// |jvm| denotes the Java VM and |context| corresponds to
|
||||
// android.content.Context in Java.
|
||||
// This method also sets a global jclass object, |g_audio_track_class| for
|
||||
// the "org/webrtc/voiceengine/WebRtcAudioTrack"-class.
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
|
||||
// Always call this method after the object has been destructed. It deletes
|
||||
// existing global references and enables garbage collection.
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
51
webrtc/modules/audio_device/android/ensure_initialized.cc
Normal file
51
webrtc/modules/audio_device/android/ensure_initialized.cc
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#include "base/android/jni_android.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/audio_device/android/audio_device_template.h"
|
||||
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
|
||||
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_input.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_output.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace audiodevicemodule {
|
||||
|
||||
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
|
||||
|
||||
void EnsureInitializedOnce() {
|
||||
CHECK(::base::android::IsVMInitialized());
|
||||
JNIEnv* jni = ::base::android::AttachCurrentThread();
|
||||
JavaVM* jvm = NULL;
|
||||
CHECK_EQ(0, jni->GetJavaVM(&jvm));
|
||||
jobject context = ::base::android::GetApplicationContext();
|
||||
|
||||
// Provide JVM and context to Java and OpenSL ES implementations.
|
||||
using AudioDeviceJava = AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>;
|
||||
AudioDeviceJava::SetAndroidAudioDeviceObjects(jvm, context);
|
||||
|
||||
// TODO(henrika): enable OpenSL ES when it has been refactored to avoid
|
||||
// crashes.
|
||||
// using AudioDeviceOpenSLES
|
||||
// AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>;
|
||||
// AudioDeviceOpenSLESInstance::SetAndroidAudioDeviceObjects(jvm, context);
|
||||
}
|
||||
|
||||
void EnsureInitialized() {
|
||||
CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
|
||||
}
|
||||
|
||||
} // namespace audiodevicemodule
|
||||
} // namespace webrtc
|
17
webrtc/modules/audio_device/android/ensure_initialized.h
Normal file
17
webrtc/modules/audio_device/android/ensure_initialized.h
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
namespace webrtc {
|
||||
namespace audiodevicemodule {
|
||||
|
||||
void EnsureInitialized();
|
||||
|
||||
} // namespace audiodevicemodule
|
||||
} // namespace webrtc
|
@ -103,7 +103,7 @@ class WebRtcAudioTrack {
|
||||
AudioTrack.WRITE_BLOCKING);
|
||||
} else {
|
||||
bytesWritten = audioTrack.write(byteBuffer.array(),
|
||||
0,
|
||||
byteBuffer.arrayOffset(),
|
||||
sizeInBytes);
|
||||
}
|
||||
if (bytesWritten != sizeInBytes) {
|
||||
|
@ -66,7 +66,6 @@ OpenSlesInput::~OpenSlesInput() {
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ class OpenSlesInput {
|
||||
~OpenSlesInput();
|
||||
|
||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context);
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
||||
|
@ -67,9 +67,8 @@ OpenSlesOutput::~OpenSlesOutput() {
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context) {
|
||||
AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,6 @@ class OpenSlesOutput : public PlayoutDelayProvider {
|
||||
virtual ~OpenSlesOutput();
|
||||
|
||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context);
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
||||
|
@ -270,7 +270,7 @@
|
||||
},
|
||||
],
|
||||
}],
|
||||
['OS=="android" and enable_android_opensl==1', {
|
||||
['OS=="android"', {
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'audio_device_unittest',
|
||||
|
@ -90,7 +90,6 @@ AudioDeviceModule* CreateAudioDeviceModule(
|
||||
AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id,
|
||||
const AudioLayer audioLayer)
|
||||
{
|
||||
|
||||
// Create the generic ref counted (platform independent) implementation.
|
||||
RefCountImpl<AudioDeviceModuleImpl>* audioDevice =
|
||||
new RefCountImpl<AudioDeviceModuleImpl>(id, audioLayer);
|
||||
@ -218,7 +217,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
|
||||
ptrAudioDeviceUtility = new AudioDeviceUtilityDummy(Id());
|
||||
}
|
||||
#else
|
||||
const AudioLayer audioLayer(PlatformAudioLayer());
|
||||
AudioLayer audioLayer(PlatformAudioLayer());
|
||||
|
||||
// Create the *Windows* implementation of the Audio Device
|
||||
//
|
||||
@ -273,22 +272,24 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
|
||||
// Create the *Android OpenSLES* implementation of the Audio Device
|
||||
//
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
if (audioLayer == kPlatformDefaultAudio)
|
||||
{
|
||||
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
|
||||
#if defined(WEBRTC_ANDROID_OPENSLES)
|
||||
ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
|
||||
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
||||
"Android OpenSLES Audio APIs will be utilized");
|
||||
#else
|
||||
ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
|
||||
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
||||
"Android JNI Audio APIs will be utilized");
|
||||
#ifdef WEBRTC_ANDROID_OPENSLES
|
||||
// Force default audio layer to OpenSL ES if the special compiler flag
|
||||
// (enable_android_opensl) has been set to one.
|
||||
if (audioLayer == kPlatformDefaultAudio) {
|
||||
audioLayer = kAndroidOpenSLESAudio;
|
||||
}
|
||||
#endif
|
||||
if (audioLayer == kPlatformDefaultAudio ||
|
||||
audioLayer == kAndroidJavaAudio) {
|
||||
ptrAudioDevice =
|
||||
new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
|
||||
} else if (audioLayer == kAndroidOpenSLESAudio) {
|
||||
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
|
||||
ptrAudioDevice =
|
||||
new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
|
||||
}
|
||||
|
||||
if (ptrAudioDevice != NULL)
|
||||
{
|
||||
if (ptrAudioDevice != NULL) {
|
||||
// Create the Android implementation of the Device Utility.
|
||||
ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
|
||||
}
|
||||
|
@ -193,6 +193,10 @@ public:
|
||||
public:
|
||||
int32_t Id() {return _id;}
|
||||
|
||||
AudioDeviceBuffer* GetAudioDeviceBuffer() {
|
||||
return &_audioDeviceBuffer;
|
||||
}
|
||||
|
||||
private:
|
||||
PlatformType Platform() const;
|
||||
AudioLayer PlatformAudioLayer() const;
|
||||
|
@ -29,7 +29,9 @@ class AudioDeviceModule : public RefCountedModule {
|
||||
kWindowsCoreAudio = 2,
|
||||
kLinuxAlsaAudio = 3,
|
||||
kLinuxPulseAudio = 4,
|
||||
kDummyAudio = 5
|
||||
kAndroidJavaAudio = 5,
|
||||
kAndroidOpenSLESAudio = 6,
|
||||
kDummyAudio = 7
|
||||
};
|
||||
|
||||
enum WindowsDeviceType {
|
||||
|
@ -57,6 +57,7 @@
|
||||
'acm_receive_test',
|
||||
'acm_send_test',
|
||||
'audio_coding_module',
|
||||
'audio_device' ,
|
||||
'audio_processing',
|
||||
'bitrate_controller',
|
||||
'CNG',
|
||||
@ -352,6 +353,18 @@
|
||||
'dependencies': [
|
||||
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
|
||||
],
|
||||
# Need to disable error due to the line in
|
||||
# base/android/jni_android.h triggering it:
|
||||
# const BASE_EXPORT jobject GetApplicationContext()
|
||||
# error: type qualifiers ignored on function return type
|
||||
'cflags': [
|
||||
'-Wno-ignored-qualifiers',
|
||||
],
|
||||
'sources': [
|
||||
'audio_device/android/audio_device_unittest.cc',
|
||||
'audio_device/android/ensure_initialized.cc',
|
||||
'audio_device/android/ensure_initialized.h',
|
||||
],
|
||||
}],
|
||||
],
|
||||
# Disable warnings to enable Win64 build, issue 1323.
|
||||
|
@ -34,6 +34,9 @@
|
||||
'<(DEPTH)/resources/audio_coding/speech_mono_32_48kHz.pcm',
|
||||
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
|
||||
'<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
|
||||
'<(DEPTH)/resources/audio_device/audio_short16.pcm',
|
||||
'<(DEPTH)/resources/audio_device/audio_short44.pcm',
|
||||
'<(DEPTH)/resources/audio_device/audio_short48.pcm',
|
||||
'<(DEPTH)/resources/audio_processing/agc/agc_audio.pcm',
|
||||
'<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
|
||||
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
|
||||
|
@ -27,7 +27,7 @@ int ViEAutoTestAndroid::RunAutotest(int testSelection, int subTestSelection,
|
||||
webrtc::SetRenderAndroidVM(javaVM);
|
||||
#ifndef WEBRTC_ANDROID_OPENSLES
|
||||
// voice engine calls into ADM directly
|
||||
webrtc::VoiceEngine::SetAndroidObjects(javaVM, env, context);
|
||||
webrtc::VoiceEngine::SetAndroidObjects(javaVM, context);
|
||||
#endif
|
||||
|
||||
if (subTestSelection == 0) {
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
static int SetTraceCallback(TraceCallback* callback);
|
||||
|
||||
#if !defined(WEBRTC_CHROMIUM_BUILD)
|
||||
static int SetAndroidObjects(void* javaVM, void* env, void* context);
|
||||
static int SetAndroidObjects(void* javaVM, void* context);
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
@ -152,7 +152,7 @@ bool VoiceEngine::Delete(VoiceEngine*& voiceEngine)
|
||||
}
|
||||
|
||||
#if !defined(WEBRTC_CHROMIUM_BUILD)
|
||||
int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
|
||||
int VoiceEngine::SetAndroidObjects(void* javaVM, void* context)
|
||||
{
|
||||
#ifdef WEBRTC_ANDROID
|
||||
#ifdef WEBRTC_ANDROID_OPENSLES
|
||||
@ -162,8 +162,8 @@ int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
|
||||
typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>
|
||||
AudioDeviceInstance;
|
||||
#endif
|
||||
if (javaVM && env && context) {
|
||||
AudioDeviceInstance::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
if (javaVM && context) {
|
||||
AudioDeviceInstance::SetAndroidAudioDeviceObjects(javaVM, context);
|
||||
} else {
|
||||
AudioDeviceInstance::ClearAndroidAudioDeviceObjects();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user