OpenSL (not default): Enables low latency audio on Android.
BUG=1669 R=andrew@webrtc.org, fischman@webrtc.org, niklas.enbom@webrtc.org Review URL: https://webrtc-codereview.appspot.com/2032004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@4719 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
641340944b
commit
82f014aa0b
@ -106,6 +106,7 @@
|
||||
# and include it here.
|
||||
'android_java_files': [
|
||||
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRTCAudioDevice.java',
|
||||
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java',
|
||||
'<(webrtc_modules_dir)/video_capture/android/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java',
|
||||
'<(webrtc_modules_dir)/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java',
|
||||
'<(webrtc_modules_dir)/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java',
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,97 +8,54 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
#define SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
|
||||
#include <jni.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <SLES/OpenSLES_AndroidConfiguration.h>
|
||||
|
||||
#include <queue>
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
|
||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_input.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_output.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class EventWrapper;
|
||||
|
||||
const uint32_t N_MAX_INTERFACES = 3;
|
||||
const uint32_t N_MAX_OUTPUT_DEVICES = 6;
|
||||
const uint32_t N_MAX_INPUT_DEVICES = 3;
|
||||
|
||||
const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default fs
|
||||
const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default fs
|
||||
|
||||
const uint32_t N_REC_CHANNELS = 1;
|
||||
const uint32_t N_PLAY_CHANNELS = 1;
|
||||
|
||||
const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480;
|
||||
const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = 480;
|
||||
|
||||
const uint32_t REC_MAX_TEMP_BUF_SIZE_PER_10ms =
|
||||
N_REC_CHANNELS * REC_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
|
||||
|
||||
const uint32_t PLAY_MAX_TEMP_BUF_SIZE_PER_10ms =
|
||||
N_PLAY_CHANNELS * PLAY_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
|
||||
|
||||
// Number of the buffers in playout queue
|
||||
const uint16_t N_PLAY_QUEUE_BUFFERS = 8;
|
||||
// Number of buffers in recording queue
|
||||
// TODO(xian): Reduce the numbers of buffers to improve the latency.
|
||||
const uint16_t N_REC_QUEUE_BUFFERS = 8;
|
||||
// Some values returned from getMinBufferSize
|
||||
// (Nexus S playout 72ms, recording 64ms)
|
||||
// (Galaxy, 167ms, 44ms)
|
||||
// (Nexus 7, 72ms, 48ms)
|
||||
// (Xoom 92ms, 40ms)
|
||||
|
||||
class ThreadWrapper;
|
||||
|
||||
class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
|
||||
// Implements the interface of AudioDeviceGeneric. OpenSlesOutput and
|
||||
// OpenSlesInput are the imlementations.
|
||||
class AudioDeviceAndroidOpenSLES : public AudioDeviceGeneric {
|
||||
public:
|
||||
explicit AudioDeviceAndroidOpenSLES(const int32_t id);
|
||||
~AudioDeviceAndroidOpenSLES();
|
||||
virtual ~AudioDeviceAndroidOpenSLES();
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
virtual int32_t
|
||||
ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; // NOLINT
|
||||
virtual int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const; // NOLINT
|
||||
|
||||
// Main initializaton and termination
|
||||
virtual int32_t Init();
|
||||
virtual int32_t Terminate();
|
||||
virtual int32_t Terminate() ;
|
||||
virtual bool Initialized() const;
|
||||
|
||||
// Device enumeration
|
||||
virtual int16_t PlayoutDevices();
|
||||
virtual int16_t RecordingDevices();
|
||||
virtual int32_t
|
||||
PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
virtual int32_t
|
||||
RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
virtual int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
virtual int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
|
||||
// Device selection
|
||||
virtual int32_t SetPlayoutDevice(uint16_t index);
|
||||
virtual int32_t
|
||||
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
|
||||
virtual int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device);
|
||||
virtual int32_t SetRecordingDevice(uint16_t index);
|
||||
virtual int32_t
|
||||
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device);
|
||||
virtual int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device);
|
||||
|
||||
// Audio transport initialization
|
||||
virtual int32_t PlayoutIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t PlayoutIsAvailable(bool& available);
|
||||
virtual int32_t InitPlayout();
|
||||
virtual bool PlayoutIsInitialized() const;
|
||||
virtual int32_t RecordingIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t RecordingIsAvailable(bool& available);
|
||||
virtual int32_t InitRecording();
|
||||
virtual bool RecordingIsInitialized() const;
|
||||
|
||||
@ -115,80 +72,67 @@ class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
|
||||
virtual bool AGC() const;
|
||||
|
||||
// Volume control based on the Windows Wave API (Windows only)
|
||||
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
|
||||
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
|
||||
uint16_t volumeRight);
|
||||
virtual int32_t WaveOutVolume(
|
||||
uint16_t& volumeLeft, // NOLINT
|
||||
uint16_t& volumeRight) const; // NOLINT
|
||||
|
||||
// Audio mixer initialization
|
||||
virtual int32_t SpeakerIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t SpeakerIsAvailable(bool& available);
|
||||
virtual int32_t InitSpeaker();
|
||||
virtual bool SpeakerIsInitialized() const;
|
||||
virtual int32_t MicrophoneIsAvailable(
|
||||
bool& available);
|
||||
virtual int32_t MicrophoneIsAvailable(bool& available);
|
||||
virtual int32_t InitMicrophone();
|
||||
virtual bool MicrophoneIsInitialized() const;
|
||||
|
||||
// Speaker volume controls
|
||||
virtual int32_t SpeakerVolumeIsAvailable(
|
||||
bool& available); // NOLINT
|
||||
virtual int32_t SpeakerVolumeIsAvailable(bool& available);
|
||||
virtual int32_t SetSpeakerVolume(uint32_t volume);
|
||||
virtual int32_t SpeakerVolume(
|
||||
uint32_t& volume) const; // NOLINT
|
||||
virtual int32_t MaxSpeakerVolume(
|
||||
uint32_t& maxVolume) const; // NOLINT
|
||||
virtual int32_t MinSpeakerVolume(
|
||||
uint32_t& minVolume) const; // NOLINT
|
||||
virtual int32_t SpeakerVolumeStepSize(
|
||||
uint16_t& stepSize) const; // NOLINT
|
||||
virtual int32_t SpeakerVolume(uint32_t& volume) const;
|
||||
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
|
||||
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
|
||||
virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
|
||||
|
||||
// Microphone volume controls
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(
|
||||
bool& available); // NOLINT
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneVolume(uint32_t volume);
|
||||
virtual int32_t MicrophoneVolume(
|
||||
uint32_t& volume) const; // NOLINT
|
||||
virtual int32_t MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const; // NOLINT
|
||||
virtual int32_t MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const; // NOLINT
|
||||
virtual int32_t
|
||||
MicrophoneVolumeStepSize(uint16_t& stepSize) const; // NOLINT
|
||||
virtual int32_t MicrophoneVolume(uint32_t& volume) const;
|
||||
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
|
||||
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
|
||||
virtual int32_t MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const;
|
||||
|
||||
// Speaker mute control
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool& available);
|
||||
virtual int32_t SetSpeakerMute(bool enable);
|
||||
virtual int32_t SpeakerMute(bool& enabled) const; // NOLINT
|
||||
virtual int32_t SpeakerMute(bool& enabled) const;
|
||||
|
||||
// Microphone mute control
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneMute(bool enable);
|
||||
virtual int32_t MicrophoneMute(bool& enabled) const; // NOLINT
|
||||
virtual int32_t MicrophoneMute(bool& enabled) const;
|
||||
|
||||
// Microphone boost control
|
||||
virtual int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t MicrophoneBoostIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneBoost(bool enable);
|
||||
virtual int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
|
||||
virtual int32_t MicrophoneBoost(bool& enabled) const;
|
||||
|
||||
// Stereo support
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool& available);
|
||||
virtual int32_t SetStereoPlayout(bool enable);
|
||||
virtual int32_t StereoPlayout(bool& enabled) const; // NOLINT
|
||||
virtual int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
|
||||
virtual int32_t StereoPlayout(bool& enabled) const;
|
||||
virtual int32_t StereoRecordingIsAvailable(bool& available);
|
||||
virtual int32_t SetStereoRecording(bool enable);
|
||||
virtual int32_t StereoRecording(bool& enabled) const; // NOLINT
|
||||
virtual int32_t StereoRecording(bool& enabled) const;
|
||||
|
||||
// Delay information and control
|
||||
virtual int32_t
|
||||
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
|
||||
uint16_t sizeMS);
|
||||
virtual int32_t PlayoutBuffer(
|
||||
AudioDeviceModule::BufferType& type, // NOLINT
|
||||
uint16_t& sizeMS) const;
|
||||
virtual int32_t PlayoutDelay(
|
||||
uint16_t& delayMS) const; // NOLINT
|
||||
virtual int32_t RecordingDelay(
|
||||
uint16_t& delayMS) const; // NOLINT
|
||||
virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
|
||||
uint16_t sizeMS);
|
||||
virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
|
||||
uint16_t& sizeMS) const;
|
||||
virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
|
||||
virtual int32_t RecordingDelay(uint16_t& delayMS) const;
|
||||
|
||||
// CPU load
|
||||
virtual int32_t CPULoad(uint16_t& load) const; // NOLINT
|
||||
@ -208,109 +152,13 @@ class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
|
||||
|
||||
// Speaker audio routing
|
||||
virtual int32_t SetLoudspeakerStatus(bool enable);
|
||||
virtual int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
||||
virtual int32_t GetLoudspeakerStatus(bool& enable) const;
|
||||
|
||||
private:
|
||||
// Lock
|
||||
void Lock() {
|
||||
crit_sect_.Enter();
|
||||
};
|
||||
void UnLock() {
|
||||
crit_sect_.Leave();
|
||||
};
|
||||
|
||||
static void PlayerSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf queueItf,
|
||||
void *pContext);
|
||||
static void RecorderSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf queueItf,
|
||||
void *pContext);
|
||||
void PlayerSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf queueItf);
|
||||
void RecorderSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf queueItf);
|
||||
void CheckErr(SLresult res);
|
||||
|
||||
// Delay updates
|
||||
void UpdateRecordingDelay();
|
||||
void UpdatePlayoutDelay(uint32_t nSamplePlayed);
|
||||
|
||||
// Init
|
||||
int32_t InitSampleRate();
|
||||
|
||||
// Misc
|
||||
AudioDeviceBuffer* voe_audio_buffer_;
|
||||
CriticalSectionWrapper& crit_sect_;
|
||||
int32_t id_;
|
||||
|
||||
// audio unit
|
||||
SLObjectItf sles_engine_;
|
||||
|
||||
// playout device
|
||||
SLObjectItf sles_player_;
|
||||
SLEngineItf sles_engine_itf_;
|
||||
SLPlayItf sles_player_itf_;
|
||||
SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_;
|
||||
SLObjectItf sles_output_mixer_;
|
||||
SLVolumeItf sles_speaker_volume_;
|
||||
|
||||
// recording device
|
||||
SLObjectItf sles_recorder_;
|
||||
SLRecordItf sles_recorder_itf_;
|
||||
SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_;
|
||||
SLDeviceVolumeItf sles_mic_volume_;
|
||||
uint32_t mic_dev_id_;
|
||||
|
||||
uint32_t play_warning_, play_error_;
|
||||
uint32_t rec_warning_, rec_error_;
|
||||
|
||||
// States
|
||||
bool is_recording_dev_specified_;
|
||||
bool is_playout_dev_specified_;
|
||||
bool is_initialized_;
|
||||
bool is_recording_;
|
||||
bool is_playing_;
|
||||
bool is_rec_initialized_;
|
||||
bool is_play_initialized_;
|
||||
bool is_mic_initialized_;
|
||||
bool is_speaker_initialized_;
|
||||
|
||||
// Delay
|
||||
uint16_t playout_delay_;
|
||||
uint16_t recording_delay_;
|
||||
|
||||
// AGC state
|
||||
bool agc_enabled_;
|
||||
|
||||
// Threads
|
||||
ThreadWrapper* rec_thread_;
|
||||
uint32_t rec_thread_id_;
|
||||
static bool RecThreadFunc(void* context);
|
||||
bool RecThreadFuncImpl();
|
||||
EventWrapper& rec_timer_;
|
||||
|
||||
uint32_t mic_sampling_rate_;
|
||||
uint32_t speaker_sampling_rate_;
|
||||
uint32_t max_speaker_vol_;
|
||||
uint32_t min_speaker_vol_;
|
||||
bool loundspeaker_on_;
|
||||
|
||||
SLDataFormat_PCM player_pcm_;
|
||||
SLDataFormat_PCM record_pcm_;
|
||||
|
||||
std::queue<int8_t*> rec_queue_;
|
||||
std::queue<int8_t*> rec_voe_audio_queue_;
|
||||
std::queue<int8_t*> rec_voe_ready_queue_;
|
||||
int8_t rec_buf_[N_REC_QUEUE_BUFFERS][
|
||||
N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
|
||||
int8_t rec_voe_buf_[N_REC_QUEUE_BUFFERS][
|
||||
N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
|
||||
|
||||
std::queue<int8_t*> play_queue_;
|
||||
int8_t play_buf_[N_PLAY_QUEUE_BUFFERS][
|
||||
N_PLAY_CHANNELS * sizeof(int16_t) * PLAY_BUF_SIZE_IN_SAMPLES];
|
||||
OpenSlesOutput output_;
|
||||
OpenSlesInput input_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
|
||||
|
151
webrtc/modules/audio_device/android/audio_manager_jni.cc
Normal file
151
webrtc/modules/audio_device/android/audio_manager_jni.cc
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace {
|
||||
|
||||
class AttachThreadScoped {
|
||||
public:
|
||||
explicit AttachThreadScoped(JavaVM* jvm)
|
||||
: attached_(false), jvm_(jvm), env_(NULL) {
|
||||
jint ret_val = jvm->GetEnv(reinterpret_cast<void**>(&env_),
|
||||
REQUIRED_JNI_VERSION);
|
||||
if (ret_val == JNI_EDETACHED) {
|
||||
// Attach the thread to the Java VM.
|
||||
ret_val = jvm_->AttachCurrentThread(&env_, NULL);
|
||||
attached_ = ret_val > 0;
|
||||
assert(attached_);
|
||||
}
|
||||
}
|
||||
~AttachThreadScoped() {
|
||||
if (attached_ && (jvm_->DetachCurrentThread() < 0)) {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
JNIEnv* env() { return env_; }
|
||||
|
||||
private:
|
||||
bool attached_;
|
||||
JavaVM* jvm_;
|
||||
JNIEnv* env_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static JavaVM* g_jvm_ = NULL;
|
||||
static JNIEnv* g_jni_env_ = NULL;
|
||||
static jobject g_context_ = NULL;
|
||||
static jclass g_audio_manager_class_ = NULL;
|
||||
static jobject g_audio_manager_ = NULL;
|
||||
|
||||
AudioManagerJni::AudioManagerJni()
|
||||
: low_latency_supported_(false),
|
||||
native_output_sample_rate_(0),
|
||||
native_buffer_size_(0) {
|
||||
if (!HasDeviceObjects()) {
|
||||
assert(false);
|
||||
}
|
||||
AttachThreadScoped ats(g_jvm_);
|
||||
JNIEnv* env = ats.env();
|
||||
assert(env && "Unsupported JNI version!");
|
||||
CreateInstance(env);
|
||||
// Pre-store device specific values.
|
||||
SetLowLatencySupported(env);
|
||||
SetNativeOutputSampleRate(env);
|
||||
SetNativeFrameSize(env);
|
||||
}
|
||||
|
||||
void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context) {
|
||||
assert(jvm);
|
||||
assert(env);
|
||||
assert(context);
|
||||
|
||||
// Store global Java VM variables to be accessed by API calls.
|
||||
g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
|
||||
g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
|
||||
g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
|
||||
|
||||
// FindClass must be made in this function since this function's contract
|
||||
// requires it to be called by a Java thread.
|
||||
// See
|
||||
// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
|
||||
// as to why this is necessary.
|
||||
// Get the AudioManagerAndroid class object.
|
||||
jclass javaAmClassLocal = g_jni_env_->FindClass(
|
||||
"org/webrtc/voiceengine/AudioManagerAndroid");
|
||||
assert(javaAmClassLocal);
|
||||
|
||||
// Create a global reference such that the class object is not recycled by
|
||||
// the garbage collector.
|
||||
g_audio_manager_class_ = reinterpret_cast<jclass>(
|
||||
g_jni_env_->NewGlobalRef(javaAmClassLocal));
|
||||
assert(g_audio_manager_class_);
|
||||
}
|
||||
|
||||
void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
|
||||
g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
|
||||
g_audio_manager_class_ = NULL;
|
||||
g_jni_env_->DeleteGlobalRef(g_context_);
|
||||
g_context_ = NULL;
|
||||
g_jni_env_->DeleteGlobalRef(g_audio_manager_);
|
||||
g_audio_manager_ = NULL;
|
||||
g_jni_env_ = NULL;
|
||||
g_jvm_ = NULL;
|
||||
}
|
||||
|
||||
void AudioManagerJni::SetLowLatencySupported(JNIEnv* env) {
|
||||
jmethodID id = LookUpMethodId(env, "isAudioLowLatencySupported", "()Z");
|
||||
low_latency_supported_ = env->CallBooleanMethod(g_audio_manager_, id);
|
||||
}
|
||||
|
||||
void AudioManagerJni::SetNativeOutputSampleRate(JNIEnv* env) {
|
||||
jmethodID id = LookUpMethodId(env, "getNativeOutputSampleRate", "()I");
|
||||
native_output_sample_rate_ = env->CallIntMethod(g_audio_manager_, id);
|
||||
}
|
||||
|
||||
void AudioManagerJni::SetNativeFrameSize(JNIEnv* env) {
|
||||
jmethodID id = LookUpMethodId(env,
|
||||
"getAudioLowLatencyOutputFrameSize", "()I");
|
||||
native_buffer_size_ = env->CallIntMethod(g_audio_manager_, id);
|
||||
}
|
||||
|
||||
bool AudioManagerJni::HasDeviceObjects() {
|
||||
return g_jvm_ && g_jni_env_ && g_context_ && g_audio_manager_class_;
|
||||
}
|
||||
|
||||
jmethodID AudioManagerJni::LookUpMethodId(JNIEnv* env,
|
||||
const char* method_name,
|
||||
const char* method_signature) {
|
||||
jmethodID ret_val = env->GetMethodID(g_audio_manager_class_, method_name,
|
||||
method_signature);
|
||||
assert(ret_val);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
void AudioManagerJni::CreateInstance(JNIEnv* env) {
|
||||
// Get the method ID for the constructor taking Context.
|
||||
jmethodID id = LookUpMethodId(env, "<init>", "(Landroid/content/Context;)V");
|
||||
g_audio_manager_ = env->NewObject(g_audio_manager_class_, id, g_context_);
|
||||
// Create a global reference so that the instance is accessible until no
|
||||
// longer needed.
|
||||
g_audio_manager_ = env->NewGlobalRef(g_audio_manager_);
|
||||
assert(g_audio_manager_);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
73
webrtc/modules/audio_device/android/audio_manager_jni.h
Normal file
73
webrtc/modules/audio_device/android/audio_manager_jni.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// Android APIs used to access Java functionality needed to enable low latency
|
||||
// audio.
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
#define REQUIRED_JNI_VERSION JNI_VERSION_1_4
|
||||
|
||||
class AudioManagerJni {
|
||||
public:
|
||||
AudioManagerJni();
|
||||
~AudioManagerJni() {}
|
||||
|
||||
// SetAndroidAudioDeviceObjects must only be called once unless there has
|
||||
// been a successive call to ClearAndroidAudioDeviceObjects. For each
|
||||
// call to ClearAndroidAudioDeviceObjects, SetAndroidAudioDeviceObjects may be
|
||||
// called once.
|
||||
// This function must be called by a Java thread as calling it from a thread
|
||||
// created by the native application will prevent FindClass from working. See
|
||||
// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
|
||||
// for more details.
|
||||
// It has to be called for this class' APIs to be successful. Calling
|
||||
// ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
|
||||
// successfully if SetAndroidAudioDeviceObjects is not called after it.
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
||||
void* context);
|
||||
// This function must be called when the AudioManagerJni class is no
|
||||
// longer needed. It frees up the global references acquired in
|
||||
// SetAndroidAudioDeviceObjects.
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
||||
bool low_latency_supported() { return low_latency_supported_; }
|
||||
int native_output_sample_rate() { return native_output_sample_rate_; }
|
||||
int native_buffer_size() { return native_buffer_size_; }
|
||||
|
||||
private:
|
||||
bool HasDeviceObjects();
|
||||
|
||||
// Following functions assume that the calling thread has been attached.
|
||||
void SetLowLatencySupported(JNIEnv* env);
|
||||
void SetNativeOutputSampleRate(JNIEnv* env);
|
||||
void SetNativeFrameSize(JNIEnv* env);
|
||||
|
||||
jmethodID LookUpMethodId(JNIEnv* env, const char* method_name,
|
||||
const char* method_signature);
|
||||
|
||||
void CreateInstance(JNIEnv* env);
|
||||
|
||||
// Whether or not low latency audio is supported, the native output sample
|
||||
// rate and the audio buffer size do not change. I.e the values might as well
|
||||
// just be cached when initializing.
|
||||
bool low_latency_supported_;
|
||||
int native_output_sample_rate_;
|
||||
int native_buffer_size_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
|
88
webrtc/modules/audio_device/android/fine_audio_buffer.cc
Normal file
88
webrtc/modules/audio_device/android/fine_audio_buffer.cc
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
|
||||
|
||||
#include <memory.h>
|
||||
#include <stdio.h>
|
||||
#include <algorithm>
|
||||
|
||||
#include "webrtc/modules/audio_device/audio_device_buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
int desired_frame_size_bytes,
|
||||
int sample_rate)
|
||||
: device_buffer_(device_buffer),
|
||||
desired_frame_size_bytes_(desired_frame_size_bytes),
|
||||
sample_rate_(sample_rate),
|
||||
samples_per_10_ms_(sample_rate_ * 10 / 1000),
|
||||
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
|
||||
cached_buffer_start_(0),
|
||||
cached_bytes_(0) {
|
||||
cache_buffer_.reset(new int8_t[bytes_per_10_ms_]);
|
||||
}
|
||||
|
||||
FineAudioBuffer::~FineAudioBuffer() {
|
||||
}
|
||||
|
||||
int FineAudioBuffer::RequiredBufferSizeBytes() {
|
||||
// It is possible that we store the desired frame size - 1 samples. Since new
|
||||
// audio frames are pulled in chunks of 10ms we will need a buffer that can
|
||||
// hold desired_frame_size - 1 + 10ms of data. We omit the - 1.
|
||||
return desired_frame_size_bytes_ + bytes_per_10_ms_;
|
||||
}
|
||||
|
||||
void FineAudioBuffer::GetBufferData(int8_t* buffer) {
|
||||
if (desired_frame_size_bytes_ <= cached_bytes_) {
|
||||
memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_],
|
||||
desired_frame_size_bytes_);
|
||||
cached_buffer_start_ += desired_frame_size_bytes_;
|
||||
cached_bytes_ -= desired_frame_size_bytes_;
|
||||
assert(cached_buffer_start_ + cached_bytes_ < bytes_per_10_ms_);
|
||||
return;
|
||||
}
|
||||
memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_], cached_bytes_);
|
||||
// Push another n*10ms of audio to |buffer|. n > 1 if
|
||||
// |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we
|
||||
// write the audio after the cached bytes copied earlier.
|
||||
int8_t* unwritten_buffer = &buffer[cached_bytes_];
|
||||
int bytes_left = desired_frame_size_bytes_ - cached_bytes_;
|
||||
// Ceiling of integer division: 1 + ((x - 1) / y)
|
||||
int number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_);
|
||||
for (int i = 0; i < number_of_requests; ++i) {
|
||||
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
|
||||
int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
|
||||
if (num_out != samples_per_10_ms_) {
|
||||
assert(num_out == 0);
|
||||
cached_bytes_ = 0;
|
||||
return;
|
||||
}
|
||||
unwritten_buffer += bytes_per_10_ms_;
|
||||
assert(bytes_left >= 0);
|
||||
bytes_left -= bytes_per_10_ms_;
|
||||
}
|
||||
assert(bytes_left <= 0);
|
||||
// Put the samples that were written to |buffer| but are not used in the
|
||||
// cache.
|
||||
int cache_location = desired_frame_size_bytes_;
|
||||
int8_t* cache_ptr = &buffer[cache_location];
|
||||
cached_bytes_ = number_of_requests * bytes_per_10_ms_ -
|
||||
(desired_frame_size_bytes_ - cached_bytes_);
|
||||
// If cached_bytes_ is larger than the cache buffer, uninitialized memory
|
||||
// will be read.
|
||||
assert(cached_bytes_ <= bytes_per_10_ms_);
|
||||
assert(-bytes_left == cached_bytes_);
|
||||
cached_buffer_start_ = 0;
|
||||
memcpy(cache_buffer_.get(), cache_ptr, cached_bytes_);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
66
webrtc/modules/audio_device/android/fine_audio_buffer.h
Normal file
66
webrtc/modules/audio_device/android/fine_audio_buffer.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|
||||
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
|
||||
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||
// corresponding to 10ms of data. It then allows for this data to be pulled in
|
||||
// a finer or coarser granularity. I.e. interacting with this class instead of
|
||||
// directly with the AudioDeviceBuffer one can ask for any number of audio data
|
||||
// samples.
|
||||
class FineAudioBuffer {
|
||||
public:
|
||||
// |device_buffer| is a buffer that provides 10ms of audio data.
|
||||
// |desired_frame_size_bytes| is the number of bytes of audio data
|
||||
// (not samples) |GetBufferData| should return on success.
|
||||
// |sample_rate| is the sample rate of the audio data. This is needed because
|
||||
// |device_buffer| delivers 10ms of data. Given the sample rate the number
|
||||
// of samples can be calculated.
|
||||
FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
int desired_frame_size_bytes,
|
||||
int sample_rate);
|
||||
~FineAudioBuffer();
|
||||
|
||||
// Returns the required size of |buffer| when calling GetBufferData. If the
|
||||
// buffer is smaller memory trampling will happen.
|
||||
// |desired_frame_size_bytes| and |samples_rate| are as described in the
|
||||
// constructor.
|
||||
int RequiredBufferSizeBytes();
|
||||
|
||||
// |buffer| must be of equal or greater size than what is returned by
|
||||
// RequiredBufferSize. This is to avoid unnecessary memcpy.
|
||||
void GetBufferData(int8_t* buffer);
|
||||
|
||||
private:
|
||||
// Device buffer that provides 10ms chunks of data.
|
||||
AudioDeviceBuffer* device_buffer_;
|
||||
int desired_frame_size_bytes_; // Number of bytes delivered per GetBufferData
|
||||
int sample_rate_;
|
||||
int samples_per_10_ms_;
|
||||
// Convenience parameter to avoid converting from samples
|
||||
int bytes_per_10_ms_;
|
||||
|
||||
// Storage for samples that are not yet asked for.
|
||||
scoped_array<int8_t> cache_buffer_;
|
||||
int cached_buffer_start_; // Location of first unread sample.
|
||||
int cached_bytes_; // Number of bytes stored in cache.
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
|
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <memory>
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_device/mock_audio_device_buffer.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::InSequence;
|
||||
using ::testing::Return;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy
|
||||
// to detect errors. This function verifies that the buffers contain such data.
|
||||
// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
|
||||
// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
|
||||
// will happen.
|
||||
// |buffer| is the audio buffer to verify.
|
||||
bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) {
|
||||
int start_value = (buffer_number * size) % SCHAR_MAX;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
if (buffer[i] != (i + start_value) % SCHAR_MAX) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// This function replaces GetPlayoutData when it's called (which is done
|
||||
// implicitly when calling GetBufferData). It writes the sequence
|
||||
// 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a buffer of
|
||||
// different size than the one VerifyBuffer verifies.
|
||||
// |iteration| is the number of calls made to UpdateBuffer prior to this call.
|
||||
// |samples_per_10_ms| is the number of samples that should be written to the
|
||||
// buffer (|arg0|).
|
||||
ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
|
||||
int8_t* buffer = static_cast<int8_t*>(arg0);
|
||||
int bytes_per_10_ms = samples_per_10_ms * static_cast<int>(sizeof(int16_t));
|
||||
int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX;
|
||||
for (int i = 0; i < bytes_per_10_ms; ++i) {
|
||||
buffer[i] = (i + start_value) % SCHAR_MAX;
|
||||
}
|
||||
return samples_per_10_ms;
|
||||
}
|
||||
|
||||
void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
|
||||
const int kSamplesPer10Ms = sample_rate * 10 / 1000;
|
||||
const int kFrameSizeBytes = frame_size_in_samples *
|
||||
static_cast<int>(sizeof(int16_t));
|
||||
const int kNumberOfFrames = 5;
|
||||
// Ceiling of integer division: 1 + ((x - 1) / y)
|
||||
const int kNumberOfUpdateBufferCalls =
|
||||
1 + ((kNumberOfFrames * frame_size_in_samples - 1) / kSamplesPer10Ms);
|
||||
|
||||
MockAudioDeviceBuffer audio_device_buffer;
|
||||
EXPECT_CALL(audio_device_buffer, RequestPlayoutData(_))
|
||||
.WillRepeatedly(Return(kSamplesPer10Ms));
|
||||
{
|
||||
InSequence s;
|
||||
for (int i = 0; i < kNumberOfUpdateBufferCalls; ++i) {
|
||||
EXPECT_CALL(audio_device_buffer, GetPlayoutData(_))
|
||||
.WillOnce(UpdateBuffer(i, kSamplesPer10Ms))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
}
|
||||
FineAudioBuffer fine_buffer(&audio_device_buffer, kFrameSizeBytes,
|
||||
sample_rate);
|
||||
|
||||
scoped_array<int8_t> out_buffer;
|
||||
out_buffer.reset(
|
||||
new int8_t[fine_buffer.RequiredBufferSizeBytes()]);
|
||||
for (int i = 0; i < kNumberOfFrames; ++i) {
|
||||
fine_buffer.GetBufferData(out_buffer.get());
|
||||
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(FineBufferTest, BufferLessThan10ms) {
|
||||
const int kSampleRate = 44100;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
const int kFrameSizeSamples = kSamplesPer10Ms - 50;
|
||||
RunFineBufferTest(kSampleRate, kFrameSizeSamples);
|
||||
}
|
||||
|
||||
TEST(FineBufferTest, GreaterThan10ms) {
|
||||
const int kSampleRate = 44100;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
const int kFrameSizeSamples = kSamplesPer10Ms + 50;
|
||||
RunFineBufferTest(kSampleRate, kFrameSizeSamples);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// The functions in this file are called from native code. They can still be
|
||||
// accessed even though they are declared private.
|
||||
|
||||
package org.webrtc.voiceengine;
|
||||
|
||||
import android.content.Context;
|
||||
import android.content.pm.PackageManager;
|
||||
import android.media.AudioManager;
|
||||
|
||||
class AudioManagerAndroid {
|
||||
// Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
|
||||
// is also widely used on other android devices.
|
||||
private static final int DEFAULT_SAMPLING_RATE = 44100;
|
||||
// Randomly picked up frame size which is close to return value on N4.
|
||||
// Return this default value when
|
||||
// getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
|
||||
private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
|
||||
|
||||
private int mNativeOutputSampleRate;
|
||||
private boolean mAudioLowLatencySupported;
|
||||
private int mAudioLowLatencyOutputFrameSize;
|
||||
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private AudioManagerAndroid(Context context) {
|
||||
AudioManager audioManager = (AudioManager)
|
||||
context.getSystemService(Context.AUDIO_SERVICE);
|
||||
|
||||
mNativeOutputSampleRate = DEFAULT_SAMPLING_RATE;
|
||||
if (android.os.Build.VERSION.SDK_INT >=
|
||||
android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
|
||||
String sampleRateString = audioManager.getProperty(
|
||||
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||
if (sampleRateString != null) {
|
||||
mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
|
||||
}
|
||||
}
|
||||
mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
|
||||
PackageManager.FEATURE_AUDIO_LOW_LATENCY);
|
||||
mAudioLowLatencyOutputFrameSize = DEFAULT_FRAMES_PER_BUFFER;
|
||||
String framesPerBuffer = audioManager.getProperty(
|
||||
AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
|
||||
if (framesPerBuffer != null) {
|
||||
mAudioLowLatencyOutputFrameSize = Integer.parseInt(framesPerBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int getNativeOutputSampleRate() {
|
||||
return mNativeOutputSampleRate;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private boolean isAudioLowLatencySupported() {
|
||||
return mAudioLowLatencySupported;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int getAudioLowLatencyOutputFrameSize() {
|
||||
return mAudioLowLatencyOutputFrameSize;
|
||||
}
|
||||
}
|
65
webrtc/modules/audio_device/android/low_latency_event.h
Normal file
65
webrtc/modules/audio_device/android/low_latency_event.h
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implementation of event for single waiter, single signal threads. Event
|
||||
// is sticky.
|
||||
class LowLatencyEvent {
|
||||
public:
|
||||
LowLatencyEvent();
|
||||
~LowLatencyEvent();
|
||||
|
||||
// Readies the event. Must be called before signaling or waiting for event.
|
||||
// Returns true on success.
|
||||
bool Start();
|
||||
// Shuts down the event and releases threads calling WaitOnEvent. Once
|
||||
// stopped SignalEvent and WaitOnEvent will have no effect. Start can be
|
||||
// called to re-enable the event.
|
||||
// Returns true on success.
|
||||
bool Stop();
|
||||
|
||||
// Releases thread calling WaitOnEvent in a sticky fashion.
|
||||
void SignalEvent(int event_id, int event_msg);
|
||||
// Waits until SignalEvent or Stop is called.
|
||||
void WaitOnEvent(int* event_id, int* event_msg);
|
||||
|
||||
private:
|
||||
typedef int Handle;
|
||||
static const Handle kInvalidHandle;
|
||||
static const int kReadHandle;
|
||||
static const int kWriteHandle;
|
||||
|
||||
// Closes the handle. Returns true on success.
|
||||
static bool Close(Handle* handle);
|
||||
|
||||
// SignalEvent and WaitOnEvent are actually read/write to file descriptors.
|
||||
// Write is signal.
|
||||
void WriteFd(int message_id, int message);
|
||||
// Read is wait.
|
||||
void ReadFd(int* message_id, int* message);
|
||||
|
||||
Handle handles_[2];
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
|
@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/low_latency_event.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#define HANDLE_EINTR(x) ({ \
|
||||
typeof(x) eintr_wrapper_result; \
|
||||
do { \
|
||||
eintr_wrapper_result = (x); \
|
||||
} while (eintr_wrapper_result == -1 && errno == EINTR); \
|
||||
eintr_wrapper_result; \
|
||||
})
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const LowLatencyEvent::Handle LowLatencyEvent::kInvalidHandle = -1;
|
||||
const int LowLatencyEvent::kReadHandle = 0;
|
||||
const int LowLatencyEvent::kWriteHandle = 1;
|
||||
|
||||
LowLatencyEvent::LowLatencyEvent() {
|
||||
handles_[kReadHandle] = kInvalidHandle;
|
||||
handles_[kWriteHandle] = kInvalidHandle;
|
||||
}
|
||||
|
||||
LowLatencyEvent::~LowLatencyEvent() {
|
||||
Stop();
|
||||
}
|
||||
|
||||
bool LowLatencyEvent::Start() {
|
||||
assert(handles_[kReadHandle] == kInvalidHandle);
|
||||
assert(handles_[kWriteHandle] == kInvalidHandle);
|
||||
|
||||
return socketpair(AF_UNIX, SOCK_STREAM, 0, handles_) == 0;
|
||||
}
|
||||
|
||||
bool LowLatencyEvent::Stop() {
|
||||
bool ret = Close(&handles_[kReadHandle]) && Close(&handles_[kWriteHandle]);
|
||||
handles_[kReadHandle] = kInvalidHandle;
|
||||
handles_[kWriteHandle] = kInvalidHandle;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void LowLatencyEvent::SignalEvent(int event_id, int event_msg) {
|
||||
WriteFd(event_id, event_msg);
|
||||
}
|
||||
|
||||
void LowLatencyEvent::WaitOnEvent(int* event_id, int* event_msg) {
|
||||
ReadFd(event_id, event_msg);
|
||||
}
|
||||
|
||||
bool LowLatencyEvent::Close(Handle* handle) {
|
||||
if (*handle == kInvalidHandle) {
|
||||
return false;
|
||||
}
|
||||
int retval = HANDLE_EINTR(close(*handle));
|
||||
*handle = kInvalidHandle;
|
||||
return retval == 0;
|
||||
}
|
||||
|
||||
void LowLatencyEvent::WriteFd(int message_id, int message) {
|
||||
char buffer[sizeof(message_id) + sizeof(message)];
|
||||
size_t bytes = sizeof(buffer);
|
||||
memcpy(buffer, &message_id, sizeof(message_id));
|
||||
memcpy(&buffer[sizeof(message_id)], &message, sizeof(message));
|
||||
ssize_t bytes_written = HANDLE_EINTR(write(handles_[kWriteHandle], buffer,
|
||||
bytes));
|
||||
if (bytes_written != static_cast<ssize_t>(bytes)) {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
void LowLatencyEvent::ReadFd(int* message_id, int* message) {
|
||||
char buffer[sizeof(message_id) + sizeof(message)];
|
||||
size_t bytes = sizeof(buffer);
|
||||
ssize_t bytes_read = HANDLE_EINTR(read(handles_[kReadHandle], buffer, bytes));
|
||||
if (bytes_read == 0) {
|
||||
*message_id = 0;
|
||||
*message = 0;
|
||||
return;
|
||||
} else if (bytes_read == static_cast<ssize_t>(bytes)) {
|
||||
memcpy(message_id, buffer, sizeof(*message_id));
|
||||
memcpy(message, &buffer[sizeof(*message_id)], sizeof(*message));
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/low_latency_event.h"
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/sleep.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const int kEventMsg = 1;
|
||||
|
||||
class LowLatencyEventTest : public testing::Test {
|
||||
public:
|
||||
LowLatencyEventTest()
|
||||
: process_thread_(ThreadWrapper::CreateThread(CbThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"test_thread")),
|
||||
terminated_(false),
|
||||
iteration_count_(0),
|
||||
allowed_iterations_(0) {
|
||||
EXPECT_TRUE(event_.Start());
|
||||
Start();
|
||||
}
|
||||
~LowLatencyEventTest() {
|
||||
EXPECT_GE(allowed_iterations_, 1);
|
||||
EXPECT_GE(iteration_count_, 1);
|
||||
Stop();
|
||||
}
|
||||
|
||||
void AllowOneIteration() {
|
||||
++allowed_iterations_;
|
||||
event_.SignalEvent(allowed_iterations_, kEventMsg);
|
||||
}
|
||||
|
||||
private:
|
||||
void Start() {
|
||||
unsigned int thread_id = 0;
|
||||
EXPECT_TRUE(process_thread_->Start(thread_id));
|
||||
}
|
||||
void Stop() {
|
||||
terminated_ = true;
|
||||
event_.Stop();
|
||||
process_thread_->Stop();
|
||||
}
|
||||
|
||||
static bool CbThread(void* context) {
|
||||
return reinterpret_cast<LowLatencyEventTest*>(context)->CbThreadImpl();
|
||||
}
|
||||
bool CbThreadImpl() {
|
||||
int allowed_iterations;
|
||||
int message;
|
||||
++iteration_count_;
|
||||
event_.WaitOnEvent(&allowed_iterations, &message);
|
||||
EXPECT_EQ(iteration_count_, allowed_iterations);
|
||||
EXPECT_EQ(message, kEventMsg);
|
||||
return !terminated_;
|
||||
}
|
||||
|
||||
LowLatencyEvent event_;
|
||||
|
||||
scoped_ptr<ThreadWrapper> process_thread_;
|
||||
bool terminated_;
|
||||
int iteration_count_;
|
||||
int allowed_iterations_;
|
||||
};
|
||||
|
||||
|
||||
TEST_F(LowLatencyEventTest, TriggerEvent) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
AllowOneIteration();
|
||||
}
|
||||
}
|
||||
|
||||
// Events trigger in less than 3ms. Wait for 3 ms to ensure there are no
|
||||
// spurious wakeups.
|
||||
TEST_F(LowLatencyEventTest, NoTriggerEvent) {
|
||||
SleepMs(3);
|
||||
// If there were spurious wakeups either the wakeups would have triggered a
|
||||
// failure as we haven't allowed an iteration yet. Or the wakeup happened
|
||||
// to signal 0, 0 in which case the mismatch will be discovered when allowing
|
||||
// an iteration to happen.
|
||||
AllowOneIteration();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
37
webrtc/modules/audio_device/android/opensles_common.cc
Normal file
37
webrtc/modules/audio_device/android/opensles_common.cc
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/opensles_common.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc_opensl {
|
||||
|
||||
SLDataFormat_PCM CreatePcmConfiguration(int sample_rate) {
|
||||
SLDataFormat_PCM configuration;
|
||||
configuration.formatType = SL_DATAFORMAT_PCM;
|
||||
configuration.numChannels = kNumChannels;
|
||||
// According to the opensles documentation in the ndk:
|
||||
// samplesPerSec is actually in units of milliHz, despite the misleading name.
|
||||
// It further recommends using constants. However, this would lead to a lot
|
||||
// of boilerplate code so it is not done here.
|
||||
configuration.samplesPerSec = sample_rate * 1000;
|
||||
configuration.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
|
||||
configuration.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
|
||||
configuration.channelMask = SL_SPEAKER_FRONT_CENTER;
|
||||
if (2 == configuration.numChannels) {
|
||||
configuration.channelMask =
|
||||
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
|
||||
}
|
||||
configuration.endianness = SL_BYTEORDER_LITTLEENDIAN;
|
||||
return configuration;
|
||||
}
|
||||
|
||||
} // namespace webrtc_opensl
|
37
webrtc/modules/audio_device/android/opensles_common.h
Normal file
37
webrtc/modules/audio_device/android/opensles_common.h
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
|
||||
namespace webrtc_opensl {
|
||||
|
||||
enum {
|
||||
kDefaultSampleRate = 44100,
|
||||
kNumChannels = 1
|
||||
};
|
||||
|
||||
|
||||
class PlayoutDelayProvider {
|
||||
public:
|
||||
virtual int PlayoutDelayMs() = 0;
|
||||
|
||||
protected:
|
||||
PlayoutDelayProvider() {}
|
||||
virtual ~PlayoutDelayProvider() {}
|
||||
};
|
||||
|
||||
SLDataFormat_PCM CreatePcmConfiguration(int sample_rate);
|
||||
|
||||
} // namespace webrtc_opensl
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
504
webrtc/modules/audio_device/android/opensles_input.cc
Normal file
504
webrtc/modules/audio_device/android/opensles_input.cc
Normal file
@ -0,0 +1,504 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/opensles_input.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_buffer.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
using webrtc_opensl::kDefaultSampleRate;
|
||||
using webrtc_opensl::kNumChannels;
|
||||
|
||||
#define VOID_RETURN
|
||||
#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
|
||||
do { \
|
||||
SLresult err = (op); \
|
||||
if (err != SL_RESULT_SUCCESS) { \
|
||||
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
|
||||
"OpenSL error: %d", err); \
|
||||
assert(false); \
|
||||
return ret_val; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static const SLEngineOption kOption[] = {
|
||||
{ SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
|
||||
};
|
||||
|
||||
enum {
|
||||
kNoOverrun,
|
||||
kOverrun,
|
||||
};
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
OpenSlesInput::OpenSlesInput(
|
||||
const int32_t id,
|
||||
webrtc_opensl::PlayoutDelayProvider* delay_provider)
|
||||
: id_(id),
|
||||
delay_provider_(delay_provider),
|
||||
initialized_(false),
|
||||
mic_initialized_(false),
|
||||
rec_initialized_(false),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
recording_(false),
|
||||
num_fifo_buffers_needed_(0),
|
||||
number_overruns_(0),
|
||||
sles_engine_(NULL),
|
||||
sles_engine_itf_(NULL),
|
||||
sles_recorder_(NULL),
|
||||
sles_recorder_itf_(NULL),
|
||||
sles_recorder_sbq_itf_(NULL),
|
||||
audio_buffer_(NULL),
|
||||
active_queue_(0),
|
||||
agc_enabled_(false),
|
||||
recording_delay_(0) {
|
||||
}
|
||||
|
||||
OpenSlesInput::~OpenSlesInput() {
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::Init() {
|
||||
assert(!initialized_);
|
||||
|
||||
// Set up OpenSL engine.
|
||||
OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
|
||||
NULL, NULL),
|
||||
-1);
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
|
||||
SL_BOOLEAN_FALSE),
|
||||
-1);
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
|
||||
SL_IID_ENGINE,
|
||||
&sles_engine_itf_),
|
||||
-1);
|
||||
|
||||
if (InitSampleRate() != 0) {
|
||||
return -1;
|
||||
}
|
||||
AllocateBuffers();
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::Terminate() {
|
||||
// It is assumed that the caller has stopped recording before terminating.
|
||||
assert(!recording_);
|
||||
(*sles_engine_)->Destroy(sles_engine_);
|
||||
initialized_ = false;
|
||||
mic_initialized_ = false;
|
||||
rec_initialized_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
assert(index == 0);
|
||||
// Empty strings.
|
||||
name[0] = '\0';
|
||||
guid[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::SetRecordingDevice(uint16_t index) {
|
||||
assert(index == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::RecordingIsAvailable(bool& available) { // NOLINT
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::InitRecording() {
|
||||
assert(initialized_);
|
||||
assert(!rec_initialized_);
|
||||
rec_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::StartRecording() {
|
||||
assert(rec_initialized_);
|
||||
assert(!recording_);
|
||||
if (!CreateAudioRecorder()) {
|
||||
return -1;
|
||||
}
|
||||
// Setup to receive buffer queue event callbacks.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_sbq_itf_)->RegisterCallback(
|
||||
sles_recorder_sbq_itf_,
|
||||
RecorderSimpleBufferQueueCallback,
|
||||
this),
|
||||
-1);
|
||||
|
||||
if (!EnqueueAllBuffers()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
{
|
||||
// To prevent the compiler from e.g. optimizing the code to
|
||||
// recording_ = StartCbThreads() which wouldn't have been thread safe.
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
recording_ = true;
|
||||
}
|
||||
if (!StartCbThreads()) {
|
||||
recording_ = false;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::StopRecording() {
|
||||
StopCbThreads();
|
||||
DestroyAudioRecorder();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::SetAGC(bool enable) {
|
||||
agc_enabled_ = enable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneIsAvailable(bool& available) { // NOLINT
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::InitMicrophone() {
|
||||
assert(initialized_);
|
||||
assert(!recording_);
|
||||
mic_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneVolumeIsAvailable(bool& available) { // NOLINT
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const { // NOLINT
|
||||
minVolume = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const {
|
||||
stepSize = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneMuteIsAvailable(bool& available) { // NOLINT
|
||||
available = false; // Mic mute not supported on Android
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneBoostIsAvailable(bool& available) { // NOLINT
|
||||
available = false; // Mic boost not supported on Android.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::SetMicrophoneBoost(bool enable) {
|
||||
assert(false);
|
||||
return -1; // Not supported
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::MicrophoneBoost(bool& enabled) const { // NOLINT
|
||||
assert(false);
|
||||
return -1; // Not supported
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) { // NOLINT
|
||||
available = false; // Stereo recording not supported on Android.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::StereoRecording(bool& enabled) const { // NOLINT
|
||||
enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const { // NOLINT
|
||||
delayMS = recording_delay_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OpenSlesInput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
audio_buffer_ = audioBuffer;
|
||||
}
|
||||
|
||||
int32_t OpenSlesInput::InitSampleRate() {
|
||||
audio_buffer_->SetRecordingSampleRate(kDefaultSampleRate);
|
||||
audio_buffer_->SetRecordingChannels(kNumChannels);
|
||||
UpdateRecordingDelay();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OpenSlesInput::UpdateRecordingDelay() {
|
||||
// TODO(hellner): Add accurate delay estimate.
|
||||
// On average half the current buffer will have been filled with audio.
|
||||
int outstanding_samples =
|
||||
(TotalBuffersUsed() - 0.5) * kDefaultBufSizeInSamples;
|
||||
recording_delay_ = outstanding_samples / (kDefaultSampleRate / 1000);
|
||||
}
|
||||
|
||||
void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
|
||||
// Buffer size is 10ms of data.
|
||||
num_fifo_buffers_needed_ = kNum10MsToBuffer;
|
||||
}
|
||||
|
||||
void OpenSlesInput::AllocateBuffers() {
|
||||
// Allocate FIFO to handle passing buffers between processing and OpenSL
|
||||
// threads.
|
||||
CalculateNumFifoBuffersNeeded();
|
||||
assert(num_fifo_buffers_needed_ > 0);
|
||||
fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
|
||||
|
||||
// Allocate the memory area to be used.
|
||||
rec_buf_.reset(new scoped_array<int8_t>[TotalBuffersUsed()]);
|
||||
for (int i = 0; i < TotalBuffersUsed(); ++i) {
|
||||
rec_buf_[i].reset(new int8_t[kDefaultBufSizeInBytes]);
|
||||
}
|
||||
}
|
||||
|
||||
int OpenSlesInput::TotalBuffersUsed() const {
|
||||
return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
|
||||
}
|
||||
|
||||
bool OpenSlesInput::EnqueueAllBuffers() {
|
||||
active_queue_ = 0;
|
||||
number_overruns_ = 0;
|
||||
for (int i = 0; i < kNumOpenSlBuffers; ++i) {
|
||||
memset(rec_buf_[i].get(), 0, kDefaultBufSizeInBytes);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_sbq_itf_)->Enqueue(
|
||||
sles_recorder_sbq_itf_,
|
||||
reinterpret_cast<void*>(rec_buf_[i].get()),
|
||||
kDefaultBufSizeInBytes),
|
||||
false);
|
||||
}
|
||||
// In case of underrun the fifo will be at capacity. In case of first enqueue
|
||||
// no audio can have been returned yet meaning fifo must be empty. Any other
|
||||
// values are unexpected.
|
||||
assert(fifo_->size() == fifo_->capacity() ||
|
||||
fifo_->size() == 0);
|
||||
// OpenSL recording has been stopped. I.e. only this thread is touching
|
||||
// |fifo_|.
|
||||
while (fifo_->size() != 0) {
|
||||
// Clear the fifo.
|
||||
fifo_->Pop();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool OpenSlesInput::CreateAudioRecorder() {
|
||||
if (!event_.Start()) {
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
SLDataLocator_IODevice micLocator = {
|
||||
SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
|
||||
SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
|
||||
SLDataSource audio_source = { &micLocator, NULL };
|
||||
|
||||
SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
static_cast<SLuint32>(TotalBuffersUsed())
|
||||
};
|
||||
SLDataFormat_PCM configuration =
|
||||
webrtc_opensl::CreatePcmConfiguration(kDefaultSampleRate);
|
||||
SLDataSink audio_sink = { &simple_buf_queue, &configuration };
|
||||
|
||||
// Interfaces for recording android audio data and Android are needed.
|
||||
// Note the interfaces still need to be initialized. This only tells OpenSl
|
||||
// that the interfaces will be needed at some point.
|
||||
const SLInterfaceID id[kNumInterfaces] = {
|
||||
SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
|
||||
const SLboolean req[kNumInterfaces] = {
|
||||
SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
|
||||
&sles_recorder_,
|
||||
&audio_source,
|
||||
&audio_sink,
|
||||
kNumInterfaces,
|
||||
id,
|
||||
req),
|
||||
false);
|
||||
|
||||
// Realize the recorder in synchronous mode.
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
|
||||
SL_BOOLEAN_FALSE),
|
||||
false);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
|
||||
static_cast<void*>(&sles_recorder_itf_)),
|
||||
false);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_)->GetInterface(
|
||||
sles_recorder_,
|
||||
SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
static_cast<void*>(&sles_recorder_sbq_itf_)),
|
||||
false);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesInput::DestroyAudioRecorder() {
|
||||
event_.Stop();
|
||||
if (sles_recorder_sbq_itf_) {
|
||||
// Release all buffers currently queued up.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_),
|
||||
VOID_RETURN);
|
||||
sles_recorder_sbq_itf_ = NULL;
|
||||
}
|
||||
sles_recorder_itf_ = NULL;
|
||||
|
||||
if (!sles_recorder_) {
|
||||
(*sles_recorder_)->Destroy(sles_recorder_);
|
||||
sles_recorder_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenSlesInput::HandleOverrun(int event_id, int event_msg) {
|
||||
if (!recording_) {
|
||||
return false;
|
||||
}
|
||||
if (event_id == kNoOverrun) {
|
||||
return false;
|
||||
}
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio overrun");
|
||||
assert(event_id == kOverrun);
|
||||
assert(event_msg > 0);
|
||||
// Wait for all enqueued buffers be flushed.
|
||||
if (event_msg != kNumOpenSlBuffers) {
|
||||
return true;
|
||||
}
|
||||
// All buffers passed to OpenSL have been flushed. Restart the audio from
|
||||
// scratch.
|
||||
// No need to check sles_recorder_itf_ as recording_ would be false before it
|
||||
// is set to NULL.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
|
||||
SL_RECORDSTATE_STOPPED),
|
||||
true);
|
||||
EnqueueAllBuffers();
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
|
||||
SL_RECORDSTATE_RECORDING),
|
||||
true);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesInput::RecorderSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf queue_itf,
|
||||
void* context) {
|
||||
OpenSlesInput* audio_device = reinterpret_cast<OpenSlesInput*>(context);
|
||||
audio_device->RecorderSimpleBufferQueueCallbackHandler(queue_itf);
|
||||
}
|
||||
|
||||
void OpenSlesInput::RecorderSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf queue_itf) {
|
||||
if (fifo_->size() >= fifo_->capacity() || number_overruns_ > 0) {
|
||||
++number_overruns_;
|
||||
event_.SignalEvent(kOverrun, number_overruns_);
|
||||
return;
|
||||
}
|
||||
int8_t* audio = rec_buf_[active_queue_].get();
|
||||
// There is at least one spot available in the fifo.
|
||||
fifo_->Push(audio);
|
||||
active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
|
||||
event_.SignalEvent(kNoOverrun, 0);
|
||||
// active_queue_ is indexing the next buffer to record to. Since the current
|
||||
// buffer has been recorded it means that the buffer index
|
||||
// kNumOpenSlBuffers - 1 past |active_queue_| contains the next free buffer.
|
||||
// Since |fifo_| wasn't at capacity, at least one buffer is free to be used.
|
||||
int next_free_buffer =
|
||||
(active_queue_ + kNumOpenSlBuffers - 1) % TotalBuffersUsed();
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_sbq_itf_)->Enqueue(
|
||||
sles_recorder_sbq_itf_,
|
||||
reinterpret_cast<void*>(rec_buf_[next_free_buffer].get()),
|
||||
kDefaultBufSizeInBytes),
|
||||
VOID_RETURN);
|
||||
}
|
||||
|
||||
bool OpenSlesInput::StartCbThreads() {
|
||||
rec_thread_.reset(ThreadWrapper::CreateThread(CbThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"opensl_rec_thread"));
|
||||
assert(rec_thread_.get());
|
||||
unsigned int thread_id = 0;
|
||||
if (!rec_thread_->Start(thread_id)) {
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
|
||||
SL_RECORDSTATE_RECORDING),
|
||||
false);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesInput::StopCbThreads() {
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
recording_ = false;
|
||||
}
|
||||
if (sles_recorder_itf_) {
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
|
||||
SL_RECORDSTATE_STOPPED),
|
||||
VOID_RETURN);
|
||||
}
|
||||
if (rec_thread_.get() == NULL) {
|
||||
return;
|
||||
}
|
||||
event_.Stop();
|
||||
if (rec_thread_->Stop()) {
|
||||
rec_thread_.reset();
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenSlesInput::CbThread(void* context) {
|
||||
return reinterpret_cast<OpenSlesInput*>(context)->CbThreadImpl();
|
||||
}
|
||||
|
||||
bool OpenSlesInput::CbThreadImpl() {
|
||||
int event_id;
|
||||
int event_msg;
|
||||
// event_ must not be waited on while a lock has been taken.
|
||||
event_.WaitOnEvent(&event_id, &event_msg);
|
||||
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (HandleOverrun(event_id, event_msg)) {
|
||||
return recording_;
|
||||
}
|
||||
// If the fifo_ has audio data process it.
|
||||
while (fifo_->size() > 0 && recording_) {
|
||||
int8_t* audio = fifo_->Pop();
|
||||
audio_buffer_->SetRecordedBuffer(audio, kDefaultBufSizeInSamples);
|
||||
audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
|
||||
recording_delay_, 0);
|
||||
audio_buffer_->DeliverRecordedData();
|
||||
}
|
||||
return recording_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
214
webrtc/modules/audio_device/android/opensles_input.h
Normal file
214
webrtc/modules/audio_device/android/opensles_input.h
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <SLES/OpenSLES_AndroidConfiguration.h>
|
||||
|
||||
#include "webrtc/modules/audio_device/android/low_latency_event.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_common.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
class CriticalSectionWrapper;
|
||||
class PlayoutDelayProvider;
|
||||
class SingleRwFifo;
|
||||
class ThreadWrapper;
|
||||
|
||||
// OpenSL implementation that facilitate capturing PCM data from an android
|
||||
// device's microphone.
|
||||
// This class is Thread-compatible. I.e. Given an instance of this class, calls
|
||||
// to non-const methods require exclusive access to the object.
|
||||
class OpenSlesInput {
|
||||
public:
|
||||
OpenSlesInput(const int32_t id,
|
||||
webrtc_opensl::PlayoutDelayProvider* delay_provider);
|
||||
~OpenSlesInput();
|
||||
|
||||
// Main initializaton and termination
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
bool Initialized() const { return initialized_; }
|
||||
|
||||
// Device enumeration
|
||||
int16_t RecordingDevices() { return 1; }
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
|
||||
// Device selection
|
||||
int32_t SetRecordingDevice(uint16_t index);
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) { return -1; }
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t RecordingIsAvailable(bool& available); // NOLINT
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return rec_initialized_; }
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
// Microphone Automatic Gain Control (AGC)
|
||||
int32_t SetAGC(bool enable);
|
||||
bool AGC() const { return agc_enabled_; }
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t MicrophoneIsAvailable(bool& available); // NOLINT
|
||||
int32_t InitMicrophone();
|
||||
bool MicrophoneIsInitialized() const { return mic_initialized_; }
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available); // NOLINT
|
||||
// TODO(leozwang): Add microphone volume control when OpenSL APIs
|
||||
// are available.
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) { return 0; }
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const { return -1; } // NOLINT
|
||||
int32_t MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const { return 0; } // NOLINT
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const; // NOLINT
|
||||
int32_t MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const; // NOLINT
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetMicrophoneMute(bool enable) { return -1; }
|
||||
int32_t MicrophoneMute(bool& enabled) const { return -1; } // NOLINT
|
||||
|
||||
// Microphone boost control
|
||||
int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetMicrophoneBoost(bool enable);
|
||||
int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetStereoRecording(bool enable) { return -1; }
|
||||
int32_t StereoRecording(bool& enabled) const; // NOLINT
|
||||
|
||||
// Delay information and control
|
||||
int32_t RecordingDelay(uint16_t& delayMS) const; // NOLINT
|
||||
|
||||
bool RecordingWarning() const { return false; }
|
||||
bool RecordingError() const { return false; }
|
||||
void ClearRecordingWarning() {}
|
||||
void ClearRecordingError() {}
|
||||
|
||||
// Attach audio buffer
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
private:
|
||||
enum {
|
||||
kNumInterfaces = 2,
|
||||
kDefaultBufSizeInSamples = webrtc_opensl::kDefaultSampleRate * 10 / 1000,
|
||||
kDefaultBufSizeInBytes =
|
||||
webrtc_opensl::kNumChannels * kDefaultBufSizeInSamples * sizeof(int16_t),
|
||||
// Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
|
||||
// minimum for playout. Keep 2 for recording as well.
|
||||
kNumOpenSlBuffers = 2,
|
||||
kNum10MsToBuffer = 3,
|
||||
};
|
||||
|
||||
int32_t InitSampleRate();
|
||||
void UpdateRecordingDelay();
|
||||
void CalculateNumFifoBuffersNeeded();
|
||||
void AllocateBuffers();
|
||||
int TotalBuffersUsed() const;
|
||||
bool EnqueueAllBuffers();
|
||||
// This function also configures the audio recorder, e.g. sample rate to use
|
||||
// etc, so it should be called when starting recording.
|
||||
bool CreateAudioRecorder();
|
||||
void DestroyAudioRecorder();
|
||||
|
||||
// When overrun happens there will be more frames received from OpenSL than
|
||||
// the desired number of buffers. It is possible to expand the number of
|
||||
// buffers as you go but that would greatly increase the complexity of this
|
||||
// code. HandleOverrun gracefully handles the scenario by restarting playout,
|
||||
// throwing away all pending audio data. This will sound like a click. This
|
||||
// is also logged to identify these types of clicks.
|
||||
// This function returns true if there has been overrun. Further processing
|
||||
// of audio data should be avoided until this function returns false again.
|
||||
// The function needs to be protected by |crit_sect_|.
|
||||
bool HandleOverrun(int event_id, int event_msg);
|
||||
|
||||
static void RecorderSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf queueItf,
|
||||
void* pContext);
|
||||
// This function must not take any locks or do any heavy work. It is a
|
||||
// requirement for the OpenSL implementation to work as intended. The reason
|
||||
// for this is that taking locks exposes the OpenSL thread to the risk of
|
||||
// priority inversion.
|
||||
void RecorderSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf queueItf);
|
||||
|
||||
bool StartCbThreads();
|
||||
void StopCbThreads();
|
||||
static bool CbThread(void* context);
|
||||
// This function must be protected against data race with threads calling this
|
||||
// class' public functions. It is a requirement for this class to be
|
||||
// Thread-compatible.
|
||||
bool CbThreadImpl();
|
||||
|
||||
int id_;
|
||||
webrtc_opensl::PlayoutDelayProvider* delay_provider_;
|
||||
bool initialized_;
|
||||
bool mic_initialized_;
|
||||
bool rec_initialized_;
|
||||
|
||||
// Members that are read/write accessed concurrently by the process thread and
|
||||
// threads calling public functions of this class.
|
||||
scoped_ptr<ThreadWrapper> rec_thread_; // Processing thread
|
||||
scoped_ptr<CriticalSectionWrapper> crit_sect_;
|
||||
// This member controls the starting and stopping of recording audio to the
|
||||
// the device.
|
||||
bool recording_;
|
||||
|
||||
// Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
|
||||
// may not be the same thread as T2. T2 is the process thread and T1 is the
|
||||
// OpenSL thread.
|
||||
scoped_ptr<SingleRwFifo> fifo_;
|
||||
int num_fifo_buffers_needed_;
|
||||
LowLatencyEvent event_;
|
||||
int number_overruns_;
|
||||
|
||||
// OpenSL handles
|
||||
SLObjectItf sles_engine_;
|
||||
SLEngineItf sles_engine_itf_;
|
||||
SLObjectItf sles_recorder_;
|
||||
SLRecordItf sles_recorder_itf_;
|
||||
SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_;
|
||||
|
||||
// Audio buffers
|
||||
AudioDeviceBuffer* audio_buffer_;
|
||||
// Holds all allocated memory such that it is deallocated properly.
|
||||
scoped_array<scoped_array<int8_t> > rec_buf_;
|
||||
// Index in |rec_buf_| pointing to the audio buffer that will be ready the
|
||||
// next time RecorderSimpleBufferQueueCallbackHandler is invoked.
|
||||
// Ready means buffer contains audio data from the device.
|
||||
int active_queue_;
|
||||
|
||||
// Audio settings
|
||||
bool agc_enabled_;
|
||||
|
||||
// Audio status
|
||||
uint16_t recording_delay_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
|
565
webrtc/modules/audio_device/android/opensles_output.cc
Normal file
565
webrtc/modules/audio_device/android/opensles_output.cc
Normal file
@ -0,0 +1,565 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/opensles_output.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
|
||||
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_buffer.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
using webrtc_opensl::kDefaultSampleRate;
|
||||
using webrtc_opensl::kNumChannels;
|
||||
|
||||
#define VOID_RETURN
|
||||
#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
|
||||
do { \
|
||||
SLresult err = (op); \
|
||||
if (err != SL_RESULT_SUCCESS) { \
|
||||
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
|
||||
"OpenSL error: %d", err); \
|
||||
assert(false); \
|
||||
return ret_val; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static const SLEngineOption kOption[] = {
|
||||
{ SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
|
||||
};
|
||||
|
||||
enum {
|
||||
kNoUnderrun,
|
||||
kUnderrun,
|
||||
};
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
OpenSlesOutput::OpenSlesOutput(const int32_t id)
|
||||
: id_(id),
|
||||
initialized_(false),
|
||||
speaker_initialized_(false),
|
||||
play_initialized_(false),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
playing_(false),
|
||||
num_fifo_buffers_needed_(0),
|
||||
number_underruns_(0),
|
||||
sles_engine_(NULL),
|
||||
sles_engine_itf_(NULL),
|
||||
sles_player_(NULL),
|
||||
sles_player_itf_(NULL),
|
||||
sles_player_sbq_itf_(NULL),
|
||||
sles_output_mixer_(NULL),
|
||||
audio_buffer_(NULL),
|
||||
active_queue_(0),
|
||||
speaker_sampling_rate_(kDefaultSampleRate),
|
||||
buffer_size_samples_(0),
|
||||
buffer_size_bytes_(0),
|
||||
playout_delay_(0) {
|
||||
}
|
||||
|
||||
OpenSlesOutput::~OpenSlesOutput() {
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::Init() {
|
||||
assert(!initialized_);
|
||||
|
||||
// Set up OpenSl engine.
|
||||
OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
|
||||
NULL, NULL),
|
||||
-1);
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
|
||||
SL_BOOLEAN_FALSE),
|
||||
-1);
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
|
||||
SL_IID_ENGINE,
|
||||
&sles_engine_itf_),
|
||||
-1);
|
||||
// Set up OpenSl output mix.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
|
||||
&sles_output_mixer_,
|
||||
0,
|
||||
NULL,
|
||||
NULL),
|
||||
-1);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_output_mixer_)->Realize(sles_output_mixer_,
|
||||
SL_BOOLEAN_FALSE),
|
||||
-1);
|
||||
|
||||
if (!InitSampleRate()) {
|
||||
return -1;
|
||||
}
|
||||
AllocateBuffers();
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::Terminate() {
|
||||
// It is assumed that the caller has stopped recording before terminating.
|
||||
assert(!playing_);
|
||||
(*sles_engine_)->Destroy(sles_engine_);
|
||||
initialized_ = false;
|
||||
speaker_initialized_ = false;
|
||||
play_initialized_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
assert(index == 0);
|
||||
// Empty strings.
|
||||
name[0] = '\0';
|
||||
guid[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SetPlayoutDevice(uint16_t index) {
|
||||
assert(index == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::PlayoutIsAvailable(bool& available) { // NOLINT
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::InitPlayout() {
|
||||
assert(initialized_);
|
||||
assert(!play_initialized_);
|
||||
play_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::StartPlayout() {
|
||||
assert(play_initialized_);
|
||||
assert(!playing_);
|
||||
if (!CreateAudioPlayer()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Register callback to receive enqueued buffers.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_sbq_itf_)->RegisterCallback(sles_player_sbq_itf_,
|
||||
PlayerSimpleBufferQueueCallback,
|
||||
this),
|
||||
-1);
|
||||
if (!EnqueueAllBuffers()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
{
|
||||
// To prevent the compiler from e.g. optimizing the code to
|
||||
// playing_ = StartCbThreads() which wouldn't have been thread safe.
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
playing_ = true;
|
||||
}
|
||||
if (!StartCbThreads()) {
|
||||
playing_ = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::StopPlayout() {
|
||||
StopCbThreads();
|
||||
DestroyAudioPlayer();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SpeakerIsAvailable(bool& available) { // NOLINT
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::InitSpeaker() {
|
||||
assert(!playing_);
|
||||
speaker_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SpeakerVolumeIsAvailable(bool& available) { // NOLINT
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SetSpeakerVolume(uint32_t volume) {
|
||||
assert(speaker_initialized_);
|
||||
assert(initialized_);
|
||||
// TODO(hellner): implement.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::MaxSpeakerVolume(uint32_t& maxVolume) const { // NOLINT
|
||||
assert(speaker_initialized_);
|
||||
assert(initialized_);
|
||||
// TODO(hellner): implement.
|
||||
maxVolume = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::MinSpeakerVolume(uint32_t& minVolume) const { // NOLINT
|
||||
assert(speaker_initialized_);
|
||||
assert(initialized_);
|
||||
// TODO(hellner): implement.
|
||||
minVolume = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SpeakerVolumeStepSize(
|
||||
uint16_t& stepSize) const { // NOLINT
|
||||
assert(speaker_initialized_);
|
||||
stepSize = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SpeakerMuteIsAvailable(bool& available) { // NOLINT
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::StereoPlayoutIsAvailable(bool& available) { // NOLINT
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SetStereoPlayout(bool enable) {
|
||||
if (enable) {
|
||||
assert(false);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::StereoPlayout(bool& enabled) const { // NOLINT
|
||||
enabled = kNumChannels == 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::PlayoutBuffer(
|
||||
AudioDeviceModule::BufferType& type, // NOLINT
|
||||
uint16_t& sizeMS) const { // NOLINT
|
||||
type = AudioDeviceModule::kAdaptiveBufferSize;
|
||||
sizeMS = playout_delay_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::PlayoutDelay(uint16_t& delayMS) const { // NOLINT
|
||||
delayMS = playout_delay_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
audio_buffer_ = audioBuffer;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::SetLoudspeakerStatus(bool enable) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t OpenSlesOutput::GetLoudspeakerStatus(bool& enabled) const { // NOLINT
|
||||
enabled = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSlesOutput::PlayoutDelayMs() {
|
||||
return playout_delay_;
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::InitSampleRate() {
|
||||
if (!SetLowLatency()) {
|
||||
speaker_sampling_rate_ = kDefaultSampleRate;
|
||||
// Default is to use 10ms buffers.
|
||||
buffer_size_samples_ = speaker_sampling_rate_ * 10 / 1000;
|
||||
}
|
||||
if (audio_buffer_->SetPlayoutSampleRate(speaker_sampling_rate_) < 0) {
|
||||
return false;
|
||||
}
|
||||
if (audio_buffer_->SetPlayoutChannels(kNumChannels) < 0) {
|
||||
return false;
|
||||
}
|
||||
UpdatePlayoutDelay();
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::UpdatePlayoutDelay() {
|
||||
// TODO(hellner): Add accurate delay estimate.
|
||||
// On average half the current buffer will have been played out.
|
||||
int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
|
||||
playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::SetLowLatency() {
|
||||
if (!audio_manager_.low_latency_supported()) {
|
||||
return false;
|
||||
}
|
||||
buffer_size_samples_ = audio_manager_.native_buffer_size();
|
||||
assert(buffer_size_samples_ > 0);
|
||||
speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
|
||||
assert(speaker_sampling_rate_ > 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
|
||||
int number_of_bytes_needed =
|
||||
(speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
|
||||
|
||||
// Ceiling of integer division: 1 + ((x - 1) / y)
|
||||
int buffers_per_10_ms =
|
||||
1 + ((number_of_bytes_needed - 1) / buffer_size_bytes_);
|
||||
// |num_fifo_buffers_needed_| is a multiple of 10ms of buffered up audio.
|
||||
num_fifo_buffers_needed_ = kNum10MsToBuffer * buffers_per_10_ms;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::AllocateBuffers() {
|
||||
// Allocate fine buffer to provide frames of the desired size.
|
||||
buffer_size_bytes_ = buffer_size_samples_ * kNumChannels * sizeof(int16_t);
|
||||
fine_buffer_.reset(new FineAudioBuffer(audio_buffer_, buffer_size_bytes_,
|
||||
speaker_sampling_rate_));
|
||||
|
||||
// Allocate FIFO to handle passing buffers between processing and OpenSl
|
||||
// threads.
|
||||
CalculateNumFifoBuffersNeeded(); // Needs |buffer_size_bytes_| to be known
|
||||
assert(num_fifo_buffers_needed_ > 0);
|
||||
fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
|
||||
|
||||
// Allocate the memory area to be used.
|
||||
play_buf_.reset(new scoped_array<int8_t>[TotalBuffersUsed()]);
|
||||
int required_buffer_size = fine_buffer_->RequiredBufferSizeBytes();
|
||||
for (int i = 0; i < TotalBuffersUsed(); ++i) {
|
||||
play_buf_[i].reset(new int8_t[required_buffer_size]);
|
||||
}
|
||||
}
|
||||
|
||||
int OpenSlesOutput::TotalBuffersUsed() const {
|
||||
return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::EnqueueAllBuffers() {
|
||||
active_queue_ = 0;
|
||||
number_underruns_ = 0;
|
||||
for (int i = 0; i < kNumOpenSlBuffers; ++i) {
|
||||
memset(play_buf_[i].get(), 0, buffer_size_bytes_);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_sbq_itf_)->Enqueue(
|
||||
sles_player_sbq_itf_,
|
||||
reinterpret_cast<void*>(play_buf_[i].get()),
|
||||
buffer_size_bytes_),
|
||||
false);
|
||||
}
|
||||
// OpenSL playing has been stopped. I.e. only this thread is touching
|
||||
// |fifo_|.
|
||||
while (fifo_->size() != 0) {
|
||||
// Underrun might have happened when pushing new buffers to the FIFO.
|
||||
fifo_->Pop();
|
||||
}
|
||||
for (int i = kNumOpenSlBuffers; i < TotalBuffersUsed(); ++i) {
|
||||
memset(play_buf_[i].get(), 0, buffer_size_bytes_);
|
||||
fifo_->Push(play_buf_[i].get());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::CreateAudioPlayer() {
|
||||
if (!event_.Start()) {
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
static_cast<SLuint32>(kNumOpenSlBuffers)
|
||||
};
|
||||
SLDataFormat_PCM configuration =
|
||||
webrtc_opensl::CreatePcmConfiguration(speaker_sampling_rate_);
|
||||
SLDataSource audio_source = { &simple_buf_queue, &configuration };
|
||||
|
||||
SLDataLocator_OutputMix locator_outputmix;
|
||||
// Setup the data sink structure.
|
||||
locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
|
||||
locator_outputmix.outputMix = sles_output_mixer_;
|
||||
SLDataSink audio_sink = { &locator_outputmix, NULL };
|
||||
|
||||
// Interfaces for streaming audio data, setting volume and Android are needed.
|
||||
// Note the interfaces still need to be initialized. This only tells OpenSl
|
||||
// that the interfaces will be needed at some point.
|
||||
SLInterfaceID ids[kNumInterfaces] = {
|
||||
SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
|
||||
SLboolean req[kNumInterfaces] = {
|
||||
SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
|
||||
&audio_source, &audio_sink,
|
||||
kNumInterfaces, ids, req),
|
||||
false);
|
||||
// Realize the player in synchronous mode.
|
||||
OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
|
||||
SL_BOOLEAN_FALSE),
|
||||
false);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
|
||||
&sles_player_itf_),
|
||||
false);
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
|
||||
&sles_player_sbq_itf_),
|
||||
false);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::DestroyAudioPlayer() {
|
||||
SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
sles_player_sbq_itf_ = NULL;
|
||||
sles_player_itf_ = NULL;
|
||||
}
|
||||
event_.Stop();
|
||||
if (sles_player_sbq_itf) {
|
||||
// Release all buffers currently queued up.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_sbq_itf)->Clear(sles_player_sbq_itf),
|
||||
VOID_RETURN);
|
||||
}
|
||||
|
||||
if (sles_player_) {
|
||||
(*sles_player_)->Destroy(sles_player_);
|
||||
sles_player_ = NULL;
|
||||
}
|
||||
|
||||
if (sles_output_mixer_) {
|
||||
(*sles_output_mixer_)->Destroy(sles_output_mixer_);
|
||||
sles_output_mixer_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) {
|
||||
if (!playing_) {
|
||||
return false;
|
||||
}
|
||||
if (event_id == kNoUnderrun) {
|
||||
return false;
|
||||
}
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio underrun");
|
||||
assert(event_id == kUnderrun);
|
||||
assert(event_msg > 0);
|
||||
// Wait for all enqueued buffers to be flushed.
|
||||
if (event_msg != kNumOpenSlBuffers) {
|
||||
return true;
|
||||
}
|
||||
// All buffers have been flushed. Restart the audio from scratch.
|
||||
// No need to check sles_player_itf_ as playing_ would be false before it is
|
||||
// set to NULL.
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_itf_)->SetPlayState(sles_player_itf_,
|
||||
SL_PLAYSTATE_STOPPED),
|
||||
true);
|
||||
EnqueueAllBuffers();
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_itf_)->SetPlayState(sles_player_itf_,
|
||||
SL_PLAYSTATE_PLAYING),
|
||||
true);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::PlayerSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf sles_player_sbq_itf,
|
||||
void* p_context) {
|
||||
OpenSlesOutput* audio_device = reinterpret_cast<OpenSlesOutput*>(p_context);
|
||||
audio_device->PlayerSimpleBufferQueueCallbackHandler(sles_player_sbq_itf);
|
||||
}
|
||||
|
||||
void OpenSlesOutput::PlayerSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf sles_player_sbq_itf) {
|
||||
if (fifo_->size() <= 0 || number_underruns_ > 0) {
|
||||
++number_underruns_;
|
||||
event_.SignalEvent(kUnderrun, number_underruns_);
|
||||
return;
|
||||
}
|
||||
int8_t* audio = fifo_->Pop();
|
||||
if (audio)
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_sbq_itf)->Enqueue(sles_player_sbq_itf,
|
||||
audio,
|
||||
buffer_size_bytes_),
|
||||
VOID_RETURN);
|
||||
event_.SignalEvent(kNoUnderrun, 0);
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::StartCbThreads() {
|
||||
play_thread_.reset(ThreadWrapper::CreateThread(CbThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"opensl_play_thread"));
|
||||
assert(play_thread_.get());
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_itf_)->SetPlayState(sles_player_itf_,
|
||||
SL_PLAYSTATE_PLAYING),
|
||||
false);
|
||||
|
||||
unsigned int thread_id = 0;
|
||||
if (!play_thread_->Start(thread_id)) {
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSlesOutput::StopCbThreads() {
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
playing_ = false;
|
||||
}
|
||||
if (sles_player_itf_) {
|
||||
OPENSL_RETURN_ON_FAILURE(
|
||||
(*sles_player_itf_)->SetPlayState(sles_player_itf_,
|
||||
SL_PLAYSTATE_STOPPED),
|
||||
VOID_RETURN);
|
||||
}
|
||||
if (play_thread_.get() == NULL) {
|
||||
return;
|
||||
}
|
||||
event_.Stop();
|
||||
if (play_thread_->Stop()) {
|
||||
play_thread_.reset();
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::CbThread(void* context) {
|
||||
return reinterpret_cast<OpenSlesOutput*>(context)->CbThreadImpl();
|
||||
}
|
||||
|
||||
bool OpenSlesOutput::CbThreadImpl() {
|
||||
assert(fine_buffer_.get() != NULL);
|
||||
int event_id;
|
||||
int event_msg;
|
||||
// event_ must not be waited on while a lock has been taken.
|
||||
event_.WaitOnEvent(&event_id, &event_msg);
|
||||
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (HandleUnderrun(event_id, event_msg)) {
|
||||
return playing_;
|
||||
}
|
||||
// if fifo_ is not full it means next item in memory must be free.
|
||||
while (fifo_->size() < num_fifo_buffers_needed_ && playing_) {
|
||||
int8_t* audio = play_buf_[active_queue_].get();
|
||||
fine_buffer_->GetBufferData(audio);
|
||||
fifo_->Push(audio);
|
||||
active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
|
||||
}
|
||||
return playing_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
236
webrtc/modules/audio_device/android/opensles_output.h
Normal file
236
webrtc/modules/audio_device/android/opensles_output.h
Normal file
@ -0,0 +1,236 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <SLES/OpenSLES_AndroidConfiguration.h>
|
||||
|
||||
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
|
||||
#include "webrtc/modules/audio_device/android/low_latency_event.h"
|
||||
#include "webrtc/modules/audio_device/android/opensles_common.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
class CriticalSectionWrapper;
|
||||
class FineAudioBuffer;
|
||||
class SingleRwFifo;
|
||||
class ThreadWrapper;
|
||||
|
||||
// OpenSL implementation that facilitate playing PCM data to an android device.
|
||||
// This class is Thread-compatible. I.e. Given an instance of this class, calls
|
||||
// to non-const methods require exclusive access to the object.
|
||||
class OpenSlesOutput : public webrtc_opensl::PlayoutDelayProvider {
|
||||
public:
|
||||
explicit OpenSlesOutput(const int32_t id);
|
||||
virtual ~OpenSlesOutput();
|
||||
|
||||
// Main initializaton and termination
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
bool Initialized() const { return initialized_; }
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() { return 1; }
|
||||
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index);
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) { return 0; }
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available); // NOLINT
|
||||
int32_t InitPlayout();
|
||||
bool PlayoutIsInitialized() const { return play_initialized_; }
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout();
|
||||
int32_t StopPlayout();
|
||||
bool Playing() const { return playing_; }
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t SpeakerIsAvailable(bool& available); // NOLINT
|
||||
int32_t InitSpeaker();
|
||||
bool SpeakerIsInitialized() const { return speaker_initialized_; }
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetSpeakerVolume(uint32_t volume);
|
||||
int32_t SpeakerVolume(uint32_t& volume) const { return 0; } // NOLINT
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT
|
||||
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetSpeakerMute(bool enable) { return -1; }
|
||||
int32_t SpeakerMute(bool& enabled) const { return -1; } // NOLINT
|
||||
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetStereoPlayout(bool enable);
|
||||
int32_t StereoPlayout(bool& enabled) const; // NOLINT
|
||||
|
||||
// Delay information and control
|
||||
int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
|
||||
uint16_t sizeMS) { return -1; }
|
||||
int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT
|
||||
uint16_t& sizeMS) const;
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT
|
||||
|
||||
|
||||
// Error and warning information
|
||||
bool PlayoutWarning() const { return false; }
|
||||
bool PlayoutError() const { return false; }
|
||||
void ClearPlayoutWarning() {}
|
||||
void ClearPlayoutError() {}
|
||||
|
||||
// Attach audio buffer
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
// Speaker audio routing
|
||||
int32_t SetLoudspeakerStatus(bool enable);
|
||||
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
||||
|
||||
protected:
|
||||
virtual int PlayoutDelayMs();
|
||||
|
||||
private:
|
||||
enum {
|
||||
kNumInterfaces = 3,
|
||||
// TODO(xians): Reduce the numbers of buffers to improve the latency.
|
||||
// Currently 30ms worth of buffers are needed due to audio
|
||||
// pipeline processing jitter. Note: kNumOpenSlBuffers must
|
||||
// not be changed.
|
||||
// According to the opensles documentation in the ndk:
|
||||
// The lower output latency path is used only if the application requests a
|
||||
// buffer count of 2 or more. Use minimum number of buffers to keep delay
|
||||
// as low as possible.
|
||||
kNumOpenSlBuffers = 2,
|
||||
// NetEq delivers frames on a 10ms basis. This means that every 10ms there
|
||||
// will be a time consuming task. Keeping 10ms worth of buffers will ensure
|
||||
// that there is 10ms to perform the time consuming task without running
|
||||
// into underflow.
|
||||
// In addition to the 10ms that needs to be stored for NetEq processing
|
||||
// there will be jitter in audio pipe line due to the acquisition of locks.
|
||||
// Note: The buffers in the OpenSL queue do not count towards the 10ms of
|
||||
// frames needed since OpenSL needs to have them ready for playout.
|
||||
kNum10MsToBuffer = 4,
|
||||
};
|
||||
|
||||
bool InitSampleRate();
|
||||
bool SetLowLatency();
|
||||
void UpdatePlayoutDelay();
|
||||
// It might be possible to dynamically add or remove buffers based on how
|
||||
// close to depletion the fifo is. Few buffers means low delay. Too few
|
||||
// buffers will cause underrun. Dynamically changing the number of buffer
|
||||
// will greatly increase code complexity.
|
||||
void CalculateNumFifoBuffersNeeded();
|
||||
void AllocateBuffers();
|
||||
int TotalBuffersUsed() const;
|
||||
bool EnqueueAllBuffers();
|
||||
// This function also configures the audio player, e.g. sample rate to use
|
||||
// etc, so it should be called when starting playout.
|
||||
bool CreateAudioPlayer();
|
||||
void DestroyAudioPlayer();
|
||||
|
||||
// When underrun happens there won't be a new frame ready for playout that
|
||||
// can be retrieved yet. Since the OpenSL thread must return ASAP there will
|
||||
// be one less queue available to OpenSL. This function handles this case
|
||||
// gracefully by restarting the audio, pushing silent frames to OpenSL for
|
||||
// playout. This will sound like a click. Underruns are also logged to
|
||||
// make it possible to identify these types of audio artifacts.
|
||||
// This function returns true if there has been underrun. Further processing
|
||||
// of audio data should be avoided until this function returns false again.
|
||||
// The function needs to be protected by |crit_sect_|.
|
||||
bool HandleUnderrun(int event_id, int event_msg);
|
||||
|
||||
static void PlayerSimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf queueItf,
|
||||
void* pContext);
|
||||
// This function must not take any locks or do any heavy work. It is a
|
||||
// requirement for the OpenSL implementation to work as intended. The reason
|
||||
// for this is that taking locks exposes the OpenSL thread to the risk of
|
||||
// priority inversion.
|
||||
void PlayerSimpleBufferQueueCallbackHandler(
|
||||
SLAndroidSimpleBufferQueueItf queueItf);
|
||||
|
||||
bool StartCbThreads();
|
||||
void StopCbThreads();
|
||||
static bool CbThread(void* context);
|
||||
// This function must be protected against data race with threads calling this
|
||||
// class' public functions. It is a requirement for this class to be
|
||||
// Thread-compatible.
|
||||
bool CbThreadImpl();
|
||||
|
||||
// Java API handle
|
||||
AudioManagerJni audio_manager_;
|
||||
|
||||
int id_;
|
||||
bool initialized_;
|
||||
bool speaker_initialized_;
|
||||
bool play_initialized_;
|
||||
|
||||
// Members that are read/write accessed concurrently by the process thread and
|
||||
// threads calling public functions of this class.
|
||||
scoped_ptr<ThreadWrapper> play_thread_; // Processing thread
|
||||
scoped_ptr<CriticalSectionWrapper> crit_sect_;
|
||||
// This member controls the starting and stopping of playing audio to the
|
||||
// the device.
|
||||
bool playing_;
|
||||
|
||||
// Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
|
||||
// may not be the same thread as T2. T1 is the process thread and T2 is the
|
||||
// OpenSL thread.
|
||||
scoped_ptr<SingleRwFifo> fifo_;
|
||||
int num_fifo_buffers_needed_;
|
||||
LowLatencyEvent event_;
|
||||
int number_underruns_;
|
||||
|
||||
// OpenSL handles
|
||||
SLObjectItf sles_engine_;
|
||||
SLEngineItf sles_engine_itf_;
|
||||
SLObjectItf sles_player_;
|
||||
SLPlayItf sles_player_itf_;
|
||||
SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_;
|
||||
SLObjectItf sles_output_mixer_;
|
||||
|
||||
// Audio buffers
|
||||
AudioDeviceBuffer* audio_buffer_;
|
||||
scoped_ptr<FineAudioBuffer> fine_buffer_;
|
||||
scoped_array<scoped_array<int8_t> > play_buf_;
|
||||
// Index in |rec_buf_| pointing to the audio buffer that will be ready the
|
||||
// next time PlayerSimpleBufferQueueCallbackHandler is invoked.
|
||||
// Ready means buffer is ready to be played out to device.
|
||||
int active_queue_;
|
||||
|
||||
// Audio settings
|
||||
uint32_t speaker_sampling_rate_;
|
||||
int buffer_size_samples_;
|
||||
int buffer_size_bytes_;
|
||||
|
||||
// Audio status
|
||||
uint16_t playout_delay_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
|
86
webrtc/modules/audio_device/android/single_rw_fifo.cc
Normal file
86
webrtc/modules/audio_device/android/single_rw_fifo.cc
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
||||
|
||||
#if !defined(__ARMEL__)
|
||||
// ARM specific due to the implementation of MemoryBarrier.
|
||||
#error trying to compile ARM code for non-ARM target
|
||||
#endif
|
||||
|
||||
static int UpdatePos(int pos, int capacity) {
|
||||
return (pos + 1) % capacity;
|
||||
}
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace subtle {
|
||||
|
||||
// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
|
||||
// Note that it is only the MemoryBarrier function that makes this class arm
|
||||
// specific. Borrowing other MemoryBarrier implementations, this class could
|
||||
// be extended to more platforms.
|
||||
inline void MemoryBarrier() {
|
||||
// Note: This is a function call, which is also an implicit compiler
|
||||
// barrier.
|
||||
typedef void (*KernelMemoryBarrierFunc)();
|
||||
((KernelMemoryBarrierFunc)0xffff0fa0)();
|
||||
}
|
||||
|
||||
} // namespace subtle
|
||||
|
||||
SingleRwFifo::SingleRwFifo(int capacity)
|
||||
: capacity_(capacity),
|
||||
size_(0),
|
||||
read_pos_(0),
|
||||
write_pos_(0) {
|
||||
queue_.reset(new int8_t*[capacity_]);
|
||||
}
|
||||
|
||||
SingleRwFifo::~SingleRwFifo() {
|
||||
}
|
||||
|
||||
void SingleRwFifo::Push(int8_t* mem) {
|
||||
assert(mem);
|
||||
|
||||
// Ensure that there is space for the new data in the FIFO.
|
||||
// Note there is only one writer meaning that the other thread is guaranteed
|
||||
// only to decrease the size.
|
||||
const int free_slots = capacity() - size();
|
||||
if (free_slots <= 0) {
|
||||
// Size can be queried outside of the Push function. The caller is assumed
|
||||
// to ensure that Push will be successful before calling it.
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
queue_[write_pos_] = mem;
|
||||
// Memory barrier ensures that |size_| is updated after the size has changed.
|
||||
subtle::MemoryBarrier();
|
||||
++size_;
|
||||
write_pos_ = UpdatePos(write_pos_, capacity());
|
||||
}
|
||||
|
||||
int8_t* SingleRwFifo::Pop() {
|
||||
int8_t* ret_val = NULL;
|
||||
if (size() <= 0) {
|
||||
// Size can be queried outside of the Pop function. The caller is assumed
|
||||
// to ensure that Pop will be successfull before calling it.
|
||||
assert(false);
|
||||
return ret_val;
|
||||
}
|
||||
ret_val = queue_[read_pos_];
|
||||
// Memory barrier ensures that |size_| is updated after the size has changed.
|
||||
subtle::MemoryBarrier();
|
||||
--size_;
|
||||
read_pos_ = UpdatePos(read_pos_, capacity());
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
49
webrtc/modules/audio_device/android/single_rw_fifo.h
Normal file
49
webrtc/modules/audio_device/android/single_rw_fifo.h
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
|
||||
|
||||
#include "webrtc/system_wrappers/interface/atomic32.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements a lock-free FIFO losely based on
|
||||
// http://src.chromium.org/viewvc/chrome/trunk/src/media/base/audio_fifo.cc
|
||||
// Note that this class assumes there is one producer (writer) and one
|
||||
// consumer (reader) thread.
|
||||
class SingleRwFifo {
|
||||
public:
|
||||
explicit SingleRwFifo(int capacity);
|
||||
~SingleRwFifo();
|
||||
|
||||
void Push(int8_t* mem);
|
||||
int8_t* Pop();
|
||||
|
||||
void Clear();
|
||||
|
||||
int size() { return size_.Value(); }
|
||||
int capacity() const { return capacity_; }
|
||||
|
||||
private:
|
||||
scoped_array<int8_t*> queue_;
|
||||
int capacity_;
|
||||
|
||||
Atomic32 size_;
|
||||
|
||||
int read_pos_;
|
||||
int write_pos_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
|
126
webrtc/modules/audio_device/android/single_rw_fifo_unittest.cc
Normal file
126
webrtc/modules/audio_device/android/single_rw_fifo_unittest.cc
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class SingleRwFifoTest : public testing::Test {
|
||||
public:
|
||||
enum {
|
||||
// Uninteresting as it does not affect test
|
||||
kBufferSize = 8,
|
||||
kCapacity = 6,
|
||||
};
|
||||
|
||||
SingleRwFifoTest() : fifo_(kCapacity), pushed_(0), available_(0) {
|
||||
}
|
||||
virtual ~SingleRwFifoTest() {}
|
||||
|
||||
void SetUp() {
|
||||
for (int8_t i = 0; i < kCapacity; ++i) {
|
||||
// Create memory area.
|
||||
buffer_[i].reset(new int8_t[kBufferSize]);
|
||||
// Set the first byte in the buffer to the order in which it was created
|
||||
// this allows us to e.g. check that the buffers don't re-arrange.
|
||||
buffer_[i][0] = i;
|
||||
// Queue used by test.
|
||||
memory_queue_.push_back(buffer_[i].get());
|
||||
}
|
||||
available_ = kCapacity;
|
||||
VerifySizes();
|
||||
}
|
||||
|
||||
void Push(int number_of_buffers) {
|
||||
for (int8_t i = 0; i < number_of_buffers; ++i) {
|
||||
int8_t* data = memory_queue_.front();
|
||||
memory_queue_.pop_front();
|
||||
fifo_.Push(data);
|
||||
--available_;
|
||||
++pushed_;
|
||||
}
|
||||
VerifySizes();
|
||||
VerifyOrdering();
|
||||
}
|
||||
void Pop(int number_of_buffers) {
|
||||
for (int8_t i = 0; i < number_of_buffers; ++i) {
|
||||
int8_t* data = fifo_.Pop();
|
||||
memory_queue_.push_back(data);
|
||||
++available_;
|
||||
--pushed_;
|
||||
}
|
||||
VerifySizes();
|
||||
VerifyOrdering();
|
||||
}
|
||||
|
||||
void VerifyOrdering() const {
|
||||
std::list<int8_t*>::const_iterator iter = memory_queue_.begin();
|
||||
if (iter == memory_queue_.end()) {
|
||||
return;
|
||||
}
|
||||
int8_t previous_index = DataToElementIndex(*iter);
|
||||
++iter;
|
||||
for (; iter != memory_queue_.end(); ++iter) {
|
||||
int8_t current_index = DataToElementIndex(*iter);
|
||||
EXPECT_EQ(current_index, ++previous_index % kCapacity);
|
||||
}
|
||||
}
|
||||
|
||||
void VerifySizes() {
|
||||
EXPECT_EQ(available_, static_cast<int>(memory_queue_.size()));
|
||||
EXPECT_EQ(pushed_, fifo_.size());
|
||||
}
|
||||
|
||||
int8_t DataToElementIndex(int8_t* data) const {
|
||||
return data[0];
|
||||
}
|
||||
|
||||
protected:
|
||||
SingleRwFifo fifo_;
|
||||
// Memory area for proper de-allocation.
|
||||
scoped_array<int8_t> buffer_[kCapacity];
|
||||
std::list<int8_t*> memory_queue_;
|
||||
|
||||
int pushed_;
|
||||
int available_;
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(SingleRwFifoTest);
|
||||
};
|
||||
|
||||
TEST_F(SingleRwFifoTest, Construct) {
|
||||
// All verifications are done in SetUp.
|
||||
}
|
||||
|
||||
TEST_F(SingleRwFifoTest, Push) {
|
||||
Push(kCapacity);
|
||||
}
|
||||
|
||||
TEST_F(SingleRwFifoTest, Pop) {
|
||||
// Push all available.
|
||||
Push(available_);
|
||||
|
||||
// Test border cases:
|
||||
// At capacity
|
||||
Pop(1);
|
||||
Push(1);
|
||||
|
||||
// At minimal capacity
|
||||
Pop(pushed_);
|
||||
Push(1);
|
||||
Pop(1);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
@ -125,6 +125,20 @@
|
||||
'sources': [
|
||||
'android/audio_device_opensles_android.cc',
|
||||
'android/audio_device_opensles_android.h',
|
||||
'android/audio_manager_jni.cc',
|
||||
'android/audio_manager_jni.h',
|
||||
'android/fine_audio_buffer.cc',
|
||||
'android/fine_audio_buffer.h',
|
||||
'android/low_latency_event_posix.cc',
|
||||
'android/low_latency_event.h',
|
||||
'android/opensles_common.cc',
|
||||
'android/opensles_common.h',
|
||||
'android/opensles_input.cc',
|
||||
'android/opensles_input.h',
|
||||
'android/opensles_output.cc',
|
||||
'android/opensles_output.h',
|
||||
'android/single_rw_fifo.cc',
|
||||
'android/single_rw_fifo.h',
|
||||
],
|
||||
}, {
|
||||
'sources': [
|
||||
@ -240,6 +254,30 @@
|
||||
],
|
||||
}],
|
||||
],
|
||||
'conditions': [
|
||||
['OS=="android" and enable_android_opensl==1', {
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'audio_device_unittest',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'audio_device',
|
||||
'webrtc_utility',
|
||||
'<(DEPTH)/testing/gmock.gyp:gmock',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'<(webrtc_root)/test/test.gyp:test_support_main',
|
||||
],
|
||||
'sources': [
|
||||
'android/fine_audio_buffer_unittest.cc',
|
||||
'android/low_latency_event_unittest.cc',
|
||||
'android/single_rw_fifo_unittest.cc',
|
||||
'mock/mock_audio_device_buffer.h',
|
||||
],
|
||||
},
|
||||
],
|
||||
}],
|
||||
],
|
||||
}], # include_tests
|
||||
],
|
||||
}
|
||||
|
@ -29,6 +29,9 @@ class MediaFile;
|
||||
class AudioDeviceBuffer
|
||||
{
|
||||
public:
|
||||
AudioDeviceBuffer();
|
||||
virtual ~AudioDeviceBuffer();
|
||||
|
||||
void SetId(uint32_t id);
|
||||
int32_t RegisterAudioCallback(AudioTransport* audioCallback);
|
||||
|
||||
@ -57,8 +60,8 @@ public:
|
||||
int32_t DeliverRecordedData();
|
||||
uint32_t NewMicLevel() const;
|
||||
|
||||
int32_t RequestPlayoutData(uint32_t nSamples);
|
||||
int32_t GetPlayoutData(void* audioBuffer);
|
||||
virtual int32_t RequestPlayoutData(uint32_t nSamples);
|
||||
virtual int32_t GetPlayoutData(void* audioBuffer);
|
||||
|
||||
int32_t StartInputFileRecording(
|
||||
const char fileName[kAdmMaxFileNameSize]);
|
||||
@ -69,9 +72,6 @@ public:
|
||||
|
||||
int32_t SetTypingStatus(bool typingStatus);
|
||||
|
||||
AudioDeviceBuffer();
|
||||
~AudioDeviceBuffer();
|
||||
|
||||
private:
|
||||
int32_t _id;
|
||||
CriticalSectionWrapper& _critSect;
|
||||
|
30
webrtc/modules/audio_device/mock_audio_device_buffer.h
Normal file
30
webrtc/modules/audio_device/mock_audio_device_buffer.h
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MockAudioDeviceBuffer : public AudioDeviceBuffer {
|
||||
public:
|
||||
MockAudioDeviceBuffer() {}
|
||||
virtual ~MockAudioDeviceBuffer() {}
|
||||
|
||||
MOCK_METHOD1(RequestPlayoutData, int32_t(uint32_t nSamples));
|
||||
MOCK_METHOD1(GetPlayoutData, int32_t(void* audioBuffer));
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
@ -11,7 +11,6 @@
|
||||
// Atomic, system independent 32-bit integer. Unless you know what you're
|
||||
// doing, use locks instead! :-)
|
||||
//
|
||||
// Note: uses full memory barriers.
|
||||
// Note: assumes 32-bit (or higher) system
|
||||
#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ATOMIC32_H_
|
||||
#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ATOMIC32_H_
|
||||
@ -42,7 +41,9 @@ class Atomic32 {
|
||||
// Sets the value atomically to new_value if the value equals compare value.
|
||||
// The function returns true if the exchange happened.
|
||||
bool CompareExchange(int32_t new_value, int32_t compare_value);
|
||||
int32_t Value() const;
|
||||
int32_t Value() {
|
||||
return *this += 0;
|
||||
}
|
||||
|
||||
private:
|
||||
// Disable the + and - operator since it's unclear what these operations
|
||||
|
@ -46,8 +46,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
|
||||
return OSAtomicCompareAndSwap32Barrier(compare_value, new_value, &value_);
|
||||
}
|
||||
|
||||
int32_t Atomic32::Value() const {
|
||||
return value_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -50,8 +50,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
|
||||
return __sync_bool_compare_and_swap(&value_, compare_value, new_value);
|
||||
}
|
||||
|
||||
int32_t Atomic32::Value() const {
|
||||
return value_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -59,8 +59,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
|
||||
return (old_value == compare_value);
|
||||
}
|
||||
|
||||
int32_t Atomic32::Value() const {
|
||||
return value_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -1627,7 +1627,9 @@ JNIEXPORT jobjectArray JNICALL Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_
|
||||
codecToList.plname, codecToList.pltype,
|
||||
codecToList.plfreq, codecToList.pacsize,
|
||||
codecToList.channels, codecToList.rate);
|
||||
assert(written >= 0 && written < static_cast<int>(sizeof(info)));
|
||||
if (written < 0 || written >= static_cast<int>(sizeof(info))) {
|
||||
assert(false);
|
||||
}
|
||||
__android_log_print(ANDROID_LOG_DEBUG, WEBRTC_LOG_TAG,
|
||||
"VoiceEgnine Codec[%d] %s", i, info);
|
||||
env->SetObjectArrayElement(ret, i, env->NewStringUTF( info ));
|
||||
|
@ -8,9 +8,13 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#if defined(WEBRTC_ANDROID) && !defined(WEBRTC_ANDROID_OPENSLES)
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
#if defined(WEBRTC_ANDROID_OPENSLES)
|
||||
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
|
||||
#else
|
||||
#include "webrtc/modules/audio_device/android/audio_device_jni_android.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#include "webrtc/voice_engine/voice_engine_impl.h"
|
||||
@ -142,6 +146,7 @@ int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
|
||||
{
|
||||
#ifdef WEBRTC_ANDROID
|
||||
#ifdef WEBRTC_ANDROID_OPENSLES
|
||||
AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
return 0;
|
||||
#else
|
||||
return AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
|
||||
|
Loading…
x
Reference in New Issue
Block a user