Refactoring WebRTC Java/JNI audio track in C++ and Java.
This CL is part II in a major refactoring effort. See https://webrtc-codereview.appspot.com/33969004 for part I. - Removes unused code and old WEBRTC logging macros - Now uses optimal sample rate and buffer size in Java AudioTrack (used hard-coded sample rate before) - Makes code more inline with the implementation in Chrome - Adds helper methods for JNI handling to improve readability - Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy) - Simplified the delay estimate - Adds basic thread checks - Removes all locks in C++ land - Removes all locks in Java - Improves construction/destruction - Additional cleanup Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate). BUG=NONE R=magjed@webrtc.org, perkj@webrtc.org Review URL: https://webrtc-codereview.appspot.com/39169004 Cr-Commit-Position: refs/heads/master@{#8460} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8460 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
2ad3bb17a7
commit
962c62475e
@ -135,6 +135,7 @@
|
|||||||
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java',
|
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java',
|
||||||
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java',
|
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java',
|
||||||
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java',
|
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java',
|
||||||
|
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java',
|
||||||
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java',
|
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java',
|
||||||
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java',
|
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java',
|
||||||
],
|
],
|
||||||
|
@ -174,8 +174,6 @@ public class MediaEngine implements VideoDecodeEncodeObserver {
|
|||||||
cameras[info.facing] = info;
|
cameras[info.facing] = info;
|
||||||
}
|
}
|
||||||
setDefaultCamera();
|
setDefaultCamera();
|
||||||
check(voe.setSpeakerVolume(volumeLevel) == 0,
|
|
||||||
"Failed setSpeakerVolume");
|
|
||||||
check(voe.setAecmMode(VoiceEngine.AecmModes.SPEAKERPHONE, false) == 0,
|
check(voe.setAecmMode(VoiceEngine.AecmModes.SPEAKERPHONE, false) == 0,
|
||||||
"VoE set Aecm speakerphone mode failed");
|
"VoE set Aecm speakerphone mode failed");
|
||||||
check(vie.setKeyFrameRequestMethod(videoChannel,
|
check(vie.setKeyFrameRequestMethod(videoChannel,
|
||||||
|
@ -230,5 +230,5 @@ public class WebRTCDemo extends Activity implements MenuStateProvider {
|
|||||||
main.toggleStart();
|
main.toggleStart();
|
||||||
handler.postDelayed(startOrStopCallback, getCallRestartPeriodicity());
|
handler.postDelayed(startOrStopCallback, getCallRestartPeriodicity());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ template <class InputType, class OutputType>
|
|||||||
class OpenSlRunnerTemplate {
|
class OpenSlRunnerTemplate {
|
||||||
public:
|
public:
|
||||||
OpenSlRunnerTemplate()
|
OpenSlRunnerTemplate()
|
||||||
: output_(0),
|
: output_(),
|
||||||
input_() {
|
input_(&output_) {
|
||||||
output_.AttachAudioBuffer(&audio_buffer_);
|
output_.AttachAudioBuffer(&audio_buffer_);
|
||||||
if (output_.Init() != 0) {
|
if (output_.Init() != 0) {
|
||||||
assert(false);
|
assert(false);
|
||||||
|
@ -18,6 +18,9 @@ enum {
|
|||||||
kBitsPerSample = 16,
|
kBitsPerSample = 16,
|
||||||
kNumChannels = 1,
|
kNumChannels = 1,
|
||||||
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
|
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
|
||||||
|
// Number of bytes per audio frame.
|
||||||
|
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
|
||||||
|
kBytesPerFrame = kNumChannels * (kBitsPerSample / 8),
|
||||||
};
|
};
|
||||||
|
|
||||||
class PlayoutDelayProvider {
|
class PlayoutDelayProvider {
|
||||||
|
@ -34,10 +34,10 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
InputType::ClearAndroidAudioDeviceObjects();
|
InputType::ClearAndroidAudioDeviceObjects();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): remove id
|
||||||
explicit AudioDeviceTemplate(const int32_t id)
|
explicit AudioDeviceTemplate(const int32_t id)
|
||||||
: output_(id),
|
: output_(),
|
||||||
// TODO(henrika): provide proper delay estimate using input_(&output_).
|
input_(&output_) {
|
||||||
input_() {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~AudioDeviceTemplate() {
|
virtual ~AudioDeviceTemplate() {
|
||||||
@ -58,11 +58,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool Initialized() const {
|
bool Initialized() const {
|
||||||
return output_.Initialized();
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t PlayoutDevices() {
|
int16_t PlayoutDevices() {
|
||||||
return output_.PlayoutDevices();
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t RecordingDevices() {
|
int16_t RecordingDevices() {
|
||||||
@ -73,23 +73,28 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
uint16_t index,
|
uint16_t index,
|
||||||
char name[kAdmMaxDeviceNameSize],
|
char name[kAdmMaxDeviceNameSize],
|
||||||
char guid[kAdmMaxGuidSize]) {
|
char guid[kAdmMaxGuidSize]) {
|
||||||
return output_.PlayoutDeviceName(index, name, guid);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t RecordingDeviceName(
|
int32_t RecordingDeviceName(
|
||||||
uint16_t index,
|
uint16_t index,
|
||||||
char name[kAdmMaxDeviceNameSize],
|
char name[kAdmMaxDeviceNameSize],
|
||||||
char guid[kAdmMaxGuidSize]) {
|
char guid[kAdmMaxGuidSize]) {
|
||||||
|
FATAL() << "Should never be called";
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetPlayoutDevice(uint16_t index) {
|
int32_t SetPlayoutDevice(uint16_t index) {
|
||||||
return output_.SetPlayoutDevice(index);
|
// OK to use but it has no effect currently since device selection is
|
||||||
|
// done using Andoid APIs instead.
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetPlayoutDevice(
|
int32_t SetPlayoutDevice(
|
||||||
AudioDeviceModule::WindowsDeviceType device) {
|
AudioDeviceModule::WindowsDeviceType device) {
|
||||||
return output_.SetPlayoutDevice(device);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetRecordingDevice(uint16_t index) {
|
int32_t SetRecordingDevice(uint16_t index) {
|
||||||
@ -106,7 +111,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t PlayoutIsAvailable(
|
int32_t PlayoutIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return output_.PlayoutIsAvailable(available);
|
available = true;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t InitPlayout() {
|
int32_t InitPlayout() {
|
||||||
@ -175,17 +181,16 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
int32_t WaveOutVolume(
|
int32_t WaveOutVolume(
|
||||||
uint16_t& volumeLeft, // NOLINT
|
uint16_t& volumeLeft, // NOLINT
|
||||||
uint16_t& volumeRight) const { // NOLINT
|
uint16_t& volumeRight) const { // NOLINT
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
|
FATAL() << "Should never be called";
|
||||||
" API call not supported on this platform");
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t InitSpeaker() {
|
int32_t InitSpeaker() {
|
||||||
return output_.InitSpeaker();
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SpeakerIsInitialized() const {
|
bool SpeakerIsInitialized() const {
|
||||||
return output_.SpeakerIsInitialized();
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t InitMicrophone() {
|
int32_t InitMicrophone() {
|
||||||
@ -198,31 +203,42 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t SpeakerVolumeIsAvailable(
|
int32_t SpeakerVolumeIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return output_.SpeakerVolumeIsAvailable(available);
|
available = false;
|
||||||
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): add support if/when needed.
|
||||||
int32_t SetSpeakerVolume(uint32_t volume) {
|
int32_t SetSpeakerVolume(uint32_t volume) {
|
||||||
return output_.SetSpeakerVolume(volume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): add support if/when needed.
|
||||||
int32_t SpeakerVolume(
|
int32_t SpeakerVolume(
|
||||||
uint32_t& volume) const { // NOLINT
|
uint32_t& volume) const { // NOLINT
|
||||||
return output_.SpeakerVolume(volume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): add support if/when needed.
|
||||||
int32_t MaxSpeakerVolume(
|
int32_t MaxSpeakerVolume(
|
||||||
uint32_t& maxVolume) const { // NOLINT
|
uint32_t& maxVolume) const { // NOLINT
|
||||||
return output_.MaxSpeakerVolume(maxVolume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): add support if/when needed.
|
||||||
int32_t MinSpeakerVolume(
|
int32_t MinSpeakerVolume(
|
||||||
uint32_t& minVolume) const { // NOLINT
|
uint32_t& minVolume) const { // NOLINT
|
||||||
return output_.MinSpeakerVolume(minVolume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SpeakerVolumeStepSize(
|
int32_t SpeakerVolumeStepSize(
|
||||||
uint16_t& stepSize) const { // NOLINT
|
uint16_t& stepSize) const { // NOLINT
|
||||||
return output_.SpeakerVolumeStepSize(stepSize);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneVolumeIsAvailable(
|
int32_t MicrophoneVolumeIsAvailable(
|
||||||
@ -263,16 +279,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t SpeakerMuteIsAvailable(
|
int32_t SpeakerMuteIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return output_.SpeakerMuteIsAvailable(available);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetSpeakerMute(bool enable) {
|
int32_t SetSpeakerMute(bool enable) {
|
||||||
return output_.SetSpeakerMute(enable);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SpeakerMute(
|
int32_t SpeakerMute(
|
||||||
bool& enabled) const { // NOLINT
|
bool& enabled) const { // NOLINT
|
||||||
return output_.SpeakerMute(enabled);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneMuteIsAvailable(
|
int32_t MicrophoneMuteIsAvailable(
|
||||||
@ -311,16 +330,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t StereoPlayoutIsAvailable(
|
int32_t StereoPlayoutIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return output_.StereoPlayoutIsAvailable(available);
|
available = false;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetStereoPlayout(bool enable) {
|
int32_t SetStereoPlayout(bool enable) {
|
||||||
return output_.SetStereoPlayout(enable);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t StereoPlayout(
|
int32_t StereoPlayout(
|
||||||
bool& enabled) const { // NOLINT
|
bool& enabled) const { // NOLINT
|
||||||
return output_.StereoPlayout(enabled);
|
enabled = false;
|
||||||
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t StereoRecordingIsAvailable(
|
int32_t StereoRecordingIsAvailable(
|
||||||
@ -342,13 +364,15 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
int32_t SetPlayoutBuffer(
|
int32_t SetPlayoutBuffer(
|
||||||
const AudioDeviceModule::BufferType type,
|
const AudioDeviceModule::BufferType type,
|
||||||
uint16_t sizeMS) {
|
uint16_t sizeMS) {
|
||||||
return output_.SetPlayoutBuffer(type, sizeMS);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t PlayoutBuffer(
|
int32_t PlayoutBuffer(
|
||||||
AudioDeviceModule::BufferType& type,
|
AudioDeviceModule::BufferType& type,
|
||||||
uint16_t& sizeMS) const { // NOLINT
|
uint16_t& sizeMS) const { // NOLINT
|
||||||
return output_.PlayoutBuffer(type, sizeMS);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t PlayoutDelay(
|
int32_t PlayoutDelay(
|
||||||
@ -368,11 +392,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool PlayoutWarning() const {
|
bool PlayoutWarning() const {
|
||||||
return output_.PlayoutWarning();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PlayoutError() const {
|
bool PlayoutError() const {
|
||||||
return output_.PlayoutError();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingWarning() const {
|
bool RecordingWarning() const {
|
||||||
@ -383,13 +407,9 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearPlayoutWarning() {
|
void ClearPlayoutWarning() {}
|
||||||
return output_.ClearPlayoutWarning();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClearPlayoutError() {
|
void ClearPlayoutError() {}
|
||||||
return output_.ClearPlayoutError();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClearRecordingWarning() {}
|
void ClearRecordingWarning() {}
|
||||||
|
|
||||||
@ -401,18 +421,22 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
input_.AttachAudioBuffer(audioBuffer);
|
input_.AttachAudioBuffer(audioBuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(henrika): remove
|
||||||
int32_t SetPlayoutSampleRate(
|
int32_t SetPlayoutSampleRate(
|
||||||
const uint32_t samplesPerSec) {
|
const uint32_t samplesPerSec) {
|
||||||
return output_.SetPlayoutSampleRate(samplesPerSec);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetLoudspeakerStatus(bool enable) {
|
int32_t SetLoudspeakerStatus(bool enable) {
|
||||||
return output_.SetLoudspeakerStatus(enable);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t GetLoudspeakerStatus(
|
int32_t GetLoudspeakerStatus(
|
||||||
bool& enable) const { // NOLINT
|
bool& enable) const { // NOLINT
|
||||||
return output_.GetLoudspeakerStatus(enable);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BuiltInAECIsAvailable() const {
|
bool BuiltInAECIsAvailable() const {
|
||||||
|
@ -25,10 +25,6 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
// Number of bytes per audio frame.
|
|
||||||
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
|
|
||||||
static const int kBytesPerFrame = kNumChannels * (kBitsPerSample / 8);
|
|
||||||
|
|
||||||
// We are unable to obtain exact measurements of the hardware delay on Android.
|
// We are unable to obtain exact measurements of the hardware delay on Android.
|
||||||
// Instead, a lower bound (based on measurements) is used.
|
// Instead, a lower bound (based on measurements) is used.
|
||||||
// TODO(henrika): is it possible to improve this?
|
// TODO(henrika): is it possible to improve this?
|
||||||
@ -59,6 +55,8 @@ void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
|
|||||||
jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
|
jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
|
||||||
g_audio_record_class = reinterpret_cast<jclass>(
|
g_audio_record_class = reinterpret_cast<jclass>(
|
||||||
NewGlobalRef(jni, local_class));
|
NewGlobalRef(jni, local_class));
|
||||||
|
jni->DeleteLocalRef(local_class);
|
||||||
|
CHECK_EXCEPTION(jni);
|
||||||
|
|
||||||
// Register native methods with the WebRtcAudioRecord class. These methods
|
// Register native methods with the WebRtcAudioRecord class. These methods
|
||||||
// are declared private native in WebRtcAudioRecord.java.
|
// are declared private native in WebRtcAudioRecord.java.
|
||||||
@ -86,15 +84,17 @@ void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
|
|||||||
g_jvm = NULL;
|
g_jvm = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioRecordJni::AudioRecordJni()
|
AudioRecordJni::AudioRecordJni(PlayoutDelayProvider* delay_provider)
|
||||||
: j_audio_record_(NULL),
|
: delay_provider_(delay_provider),
|
||||||
|
j_audio_record_(NULL),
|
||||||
direct_buffer_address_(NULL),
|
direct_buffer_address_(NULL),
|
||||||
direct_buffer_capacity_in_bytes_(0),
|
direct_buffer_capacity_in_bytes_(0),
|
||||||
frames_per_buffer_(0),
|
frames_per_buffer_(0),
|
||||||
initialized_(false),
|
initialized_(false),
|
||||||
recording_(false),
|
recording_(false),
|
||||||
audio_device_buffer_(NULL),
|
audio_device_buffer_(NULL),
|
||||||
sample_rate_hz_(0) {
|
sample_rate_hz_(0),
|
||||||
|
playout_delay_in_milliseconds_(0) {
|
||||||
ALOGD("ctor%s", GetThreadInfo().c_str());
|
ALOGD("ctor%s", GetThreadInfo().c_str());
|
||||||
CHECK(HasDeviceObjects());
|
CHECK(HasDeviceObjects());
|
||||||
CreateJavaInstance();
|
CreateJavaInstance();
|
||||||
@ -197,7 +197,6 @@ int32_t AudioRecordJni::StopRecording() {
|
|||||||
initialized_ = false;
|
initialized_ = false;
|
||||||
recording_ = false;
|
recording_ = false;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const { // NOLINT
|
int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const { // NOLINT
|
||||||
@ -268,7 +267,7 @@ void AudioRecordJni::OnCacheDirectBufferAddress(
|
|||||||
void JNICALL AudioRecordJni::DataIsRecorded(
|
void JNICALL AudioRecordJni::DataIsRecorded(
|
||||||
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
|
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
|
||||||
webrtc::AudioRecordJni* this_object =
|
webrtc::AudioRecordJni* this_object =
|
||||||
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord );
|
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
|
||||||
this_object->OnDataIsRecorded(length);
|
this_object->OnDataIsRecorded(length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,10 +275,15 @@ void JNICALL AudioRecordJni::DataIsRecorded(
|
|||||||
// the thread is 'AudioRecordThread'.
|
// the thread is 'AudioRecordThread'.
|
||||||
void AudioRecordJni::OnDataIsRecorded(int length) {
|
void AudioRecordJni::OnDataIsRecorded(int length) {
|
||||||
DCHECK(thread_checker_java_.CalledOnValidThread());
|
DCHECK(thread_checker_java_.CalledOnValidThread());
|
||||||
|
if (playout_delay_in_milliseconds_ == 0) {
|
||||||
|
playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs();
|
||||||
|
ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_);
|
||||||
|
}
|
||||||
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
|
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
|
||||||
frames_per_buffer_);
|
frames_per_buffer_);
|
||||||
// TODO(henrika): improve playout delay estimate.
|
audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_,
|
||||||
audio_device_buffer_->SetVQEData(0, kHardwareDelayInMilliseconds, 0);
|
kHardwareDelayInMilliseconds,
|
||||||
|
0 /* clockDrift */);
|
||||||
audio_device_buffer_->DeliverRecordedData();
|
audio_device_buffer_->DeliverRecordedData();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ class PlayoutDelayProvider;
|
|||||||
// CHECK that the calling thread is attached to a Java VM.
|
// CHECK that the calling thread is attached to a Java VM.
|
||||||
//
|
//
|
||||||
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
|
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
|
||||||
// detach when method goes out of scope. We do so beacuse this class does not
|
// detach when method goes out of scope. We do so because this class does not
|
||||||
// own the thread is is created and called on and other objects on the same
|
// own the thread is is created and called on and other objects on the same
|
||||||
// thread might put us in a detached state at any time.
|
// thread might put us in a detached state at any time.
|
||||||
class AudioRecordJni {
|
class AudioRecordJni {
|
||||||
@ -57,7 +57,7 @@ class AudioRecordJni {
|
|||||||
// existing global references and enables garbage collection.
|
// existing global references and enables garbage collection.
|
||||||
static void ClearAndroidAudioDeviceObjects();
|
static void ClearAndroidAudioDeviceObjects();
|
||||||
|
|
||||||
AudioRecordJni();
|
AudioRecordJni(PlayoutDelayProvider* delay_provider);
|
||||||
~AudioRecordJni();
|
~AudioRecordJni();
|
||||||
|
|
||||||
int32_t Init();
|
int32_t Init();
|
||||||
@ -118,10 +118,11 @@ class AudioRecordJni {
|
|||||||
// thread in Java. Detached during construction of this object.
|
// thread in Java. Detached during construction of this object.
|
||||||
rtc::ThreadChecker thread_checker_java_;
|
rtc::ThreadChecker thread_checker_java_;
|
||||||
|
|
||||||
|
// Returns the current playout delay.
|
||||||
// Should return the current playout delay.
|
// TODO(henrika): this value is currently fixed since initial tests have
|
||||||
// TODO(henrika): fix on Android. Reports zero today.
|
// shown that the estimated delay varies very little over time. It might be
|
||||||
// PlayoutDelayProvider* delay_provider_;
|
// possible to make improvements in this area.
|
||||||
|
PlayoutDelayProvider* delay_provider_;
|
||||||
|
|
||||||
// The Java WebRtcAudioRecord instance.
|
// The Java WebRtcAudioRecord instance.
|
||||||
jobject j_audio_record_;
|
jobject j_audio_record_;
|
||||||
@ -151,6 +152,8 @@ class AudioRecordJni {
|
|||||||
// and audio configuration.
|
// and audio configuration.
|
||||||
int sample_rate_hz_;
|
int sample_rate_hz_;
|
||||||
|
|
||||||
|
// Contains a delay estimate from the playout side given by |delay_provider_|.
|
||||||
|
int playout_delay_in_milliseconds_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -13,161 +13,139 @@
|
|||||||
|
|
||||||
#include <jni.h>
|
#include <jni.h>
|
||||||
|
|
||||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
#include "webrtc/base/thread_checker.h"
|
||||||
#include "webrtc/modules/audio_device/android/audio_common.h"
|
#include "webrtc/modules/audio_device/android/audio_common.h"
|
||||||
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
||||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||||
|
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class EventWrapper;
|
// Implements 16-bit mono PCM audio output support for Android using the Java
|
||||||
class ThreadWrapper;
|
// AudioTrack interface. Most of the work is done by its Java counterpart in
|
||||||
|
// WebRtcAudioTrack.java. This class is created and lives on a thread in
|
||||||
const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
|
// C++-land, but decoded audio buffers are requested on a high-priority
|
||||||
const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
|
// thread managed by the Java class.
|
||||||
|
//
|
||||||
|
// An instance must be created and destroyed on one and the same thread.
|
||||||
|
// All public methods must also be called on the same thread. A thread checker
|
||||||
|
// will DCHECK if any method is called on an invalid thread.
|
||||||
|
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||||
|
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||||
|
// CHECK that the calling thread is attached to a Java VM.
|
||||||
|
//
|
||||||
|
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
|
||||||
|
// detach when method goes out of scope. We do so because this class does not
|
||||||
|
// own the thread is is created and called on and other objects on the same
|
||||||
|
// thread might put us in a detached state at any time.
|
||||||
class AudioTrackJni : public PlayoutDelayProvider {
|
class AudioTrackJni : public PlayoutDelayProvider {
|
||||||
public:
|
public:
|
||||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
|
// Use the invocation API to allow the native application to use the JNI
|
||||||
void* context);
|
// interface pointer to access VM features.
|
||||||
|
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
|
||||||
|
// and |context| corresponds to android.content.Context in Java.
|
||||||
|
// This method also sets a global jclass object, |g_audio_track_class| for
|
||||||
|
// the "org/webrtc/voiceengine/WebRtcAudioTrack"-class.
|
||||||
|
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
|
||||||
|
// Always call this method after the object has been destructed. It deletes
|
||||||
|
// existing global references and enables garbage collection.
|
||||||
static void ClearAndroidAudioDeviceObjects();
|
static void ClearAndroidAudioDeviceObjects();
|
||||||
explicit AudioTrackJni(const int32_t id);
|
|
||||||
virtual ~AudioTrackJni();
|
|
||||||
|
|
||||||
// Main initializaton and termination
|
AudioTrackJni();
|
||||||
|
~AudioTrackJni();
|
||||||
|
|
||||||
int32_t Init();
|
int32_t Init();
|
||||||
int32_t Terminate();
|
int32_t Terminate();
|
||||||
bool Initialized() const { return _initialized; }
|
|
||||||
|
|
||||||
// Device enumeration
|
|
||||||
int16_t PlayoutDevices() { return 1; } // There is one device only.
|
|
||||||
|
|
||||||
int32_t PlayoutDeviceName(uint16_t index,
|
|
||||||
char name[kAdmMaxDeviceNameSize],
|
|
||||||
char guid[kAdmMaxGuidSize]);
|
|
||||||
|
|
||||||
// Device selection
|
|
||||||
int32_t SetPlayoutDevice(uint16_t index);
|
|
||||||
int32_t SetPlayoutDevice(
|
|
||||||
AudioDeviceModule::WindowsDeviceType device);
|
|
||||||
|
|
||||||
// Audio transport initialization
|
|
||||||
int32_t PlayoutIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t InitPlayout();
|
int32_t InitPlayout();
|
||||||
bool PlayoutIsInitialized() const { return _playIsInitialized; }
|
bool PlayoutIsInitialized() const { return initialized_; }
|
||||||
|
|
||||||
// Audio transport control
|
|
||||||
int32_t StartPlayout();
|
int32_t StartPlayout();
|
||||||
int32_t StopPlayout();
|
int32_t StopPlayout();
|
||||||
bool Playing() const { return _playing; }
|
bool Playing() const { return playing_; }
|
||||||
|
|
||||||
// Audio mixer initialization
|
int32_t PlayoutDelay(uint16_t& delayMS) const;
|
||||||
int32_t InitSpeaker();
|
|
||||||
bool SpeakerIsInitialized() const { return _speakerIsInitialized; }
|
|
||||||
|
|
||||||
// Speaker volume controls
|
|
||||||
int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetSpeakerVolume(uint32_t volume);
|
|
||||||
int32_t SpeakerVolume(uint32_t& volume) const; // NOLINT
|
|
||||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT
|
|
||||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT
|
|
||||||
int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT
|
|
||||||
|
|
||||||
// Speaker mute control
|
|
||||||
int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetSpeakerMute(bool enable);
|
|
||||||
int32_t SpeakerMute(bool& enabled) const; // NOLINT
|
|
||||||
|
|
||||||
|
|
||||||
// Stereo support
|
|
||||||
int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetStereoPlayout(bool enable);
|
|
||||||
int32_t StereoPlayout(bool& enabled) const; // NOLINT
|
|
||||||
|
|
||||||
// Delay information and control
|
|
||||||
int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
|
|
||||||
uint16_t sizeMS);
|
|
||||||
int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT
|
|
||||||
uint16_t& sizeMS) const;
|
|
||||||
int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT
|
|
||||||
|
|
||||||
// Attach audio buffer
|
|
||||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||||
|
|
||||||
int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
|
|
||||||
|
|
||||||
// Error and warning information
|
|
||||||
bool PlayoutWarning() const;
|
|
||||||
bool PlayoutError() const;
|
|
||||||
void ClearPlayoutWarning();
|
|
||||||
void ClearPlayoutError();
|
|
||||||
|
|
||||||
// Speaker audio routing
|
|
||||||
int32_t SetLoudspeakerStatus(bool enable);
|
|
||||||
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// TODO(henrika): improve this estimate.
|
// PlayoutDelayProvider implementation.
|
||||||
virtual int PlayoutDelayMs() { return 0; }
|
virtual int PlayoutDelayMs();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
|
// Called from Java side so we can cache the address of the Java-manged
|
||||||
_critSect.Enter();
|
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||||
}
|
// is also stored in |direct_buffer_capacity_in_bytes_|.
|
||||||
void UnLock() UNLOCK_FUNCTION(_critSect) {
|
// This method will be called by the WebRtcAudioTrack constructor, i.e.,
|
||||||
_critSect.Leave();
|
// on the same thread that this object is created on.
|
||||||
}
|
static void JNICALL CacheDirectBufferAddress(
|
||||||
|
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack);
|
||||||
|
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||||
|
|
||||||
int32_t InitJavaResources();
|
// Called periodically by the Java based WebRtcAudioTrack object when
|
||||||
int32_t InitSampleRate();
|
// playout has started. Each call indicates that |length| new bytes should
|
||||||
|
// be written to the memory area |direct_buffer_address_| for playout.
|
||||||
|
// This method is called on a high-priority thread from Java. The name of
|
||||||
|
// the thread is 'AudioTrackThread'.
|
||||||
|
static void JNICALL GetPlayoutData(
|
||||||
|
JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack);
|
||||||
|
void OnGetPlayoutData(int length);
|
||||||
|
|
||||||
static bool PlayThreadFunc(void*);
|
// Returns true if SetAndroidAudioDeviceObjects() has been called
|
||||||
bool PlayThreadProcess();
|
// successfully.
|
||||||
|
bool HasDeviceObjects();
|
||||||
|
|
||||||
// TODO(leozwang): Android holds only one JVM, all these jni handling
|
// Called from the constructor. Defines the |j_audio_track_| member.
|
||||||
// will be consolidated into a single place to make it consistant and
|
void CreateJavaInstance();
|
||||||
// reliable. Chromium has a good example at base/android.
|
|
||||||
static JavaVM* globalJvm;
|
|
||||||
static JNIEnv* globalJNIEnv;
|
|
||||||
static jobject globalContext;
|
|
||||||
static jclass globalScClass;
|
|
||||||
|
|
||||||
JavaVM* _javaVM; // denotes a Java VM
|
// Returns the native, or optimal, sample rate reported by the audio input
|
||||||
JNIEnv* _jniEnvPlay; // The JNI env for playout thread
|
// device.
|
||||||
jclass _javaScClass; // AudioDeviceAndroid class
|
int GetNativeSampleRate();
|
||||||
jobject _javaScObj; // AudioDeviceAndroid object
|
|
||||||
jobject _javaPlayBuffer;
|
|
||||||
void* _javaDirectPlayBuffer; // Direct buffer pointer to play buffer
|
|
||||||
jmethodID _javaMidPlayAudio; // Method ID of play in AudioDeviceAndroid
|
|
||||||
|
|
||||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
// Stores thread ID in constructor.
|
||||||
CriticalSectionWrapper& _critSect;
|
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
|
||||||
int32_t _id;
|
// other methods are called from the same thread.
|
||||||
bool _initialized;
|
rtc::ThreadChecker thread_checker_;
|
||||||
|
|
||||||
EventWrapper& _timeEventPlay;
|
// Stores thread ID in first call to OnGetPlayoutData() from high-priority
|
||||||
EventWrapper& _playStartStopEvent;
|
// thread in Java. Detached during construction of this object.
|
||||||
ThreadWrapper* _ptrThreadPlay;
|
rtc::ThreadChecker thread_checker_java_;
|
||||||
uint32_t _playThreadID;
|
|
||||||
bool _playThreadIsInitialized;
|
|
||||||
bool _shutdownPlayThread;
|
|
||||||
bool _playoutDeviceIsSpecified;
|
|
||||||
|
|
||||||
bool _playing;
|
// The Java WebRtcAudioTrack instance.
|
||||||
bool _playIsInitialized;
|
jobject j_audio_track_;
|
||||||
bool _speakerIsInitialized;
|
|
||||||
|
|
||||||
bool _startPlay;
|
// Cached copy of address to direct audio buffer owned by |j_audio_track_|.
|
||||||
|
void* direct_buffer_address_;
|
||||||
|
|
||||||
uint16_t _playWarning;
|
// Number of bytes in the direct audio buffer owned by |j_audio_track_|.
|
||||||
uint16_t _playError;
|
int direct_buffer_capacity_in_bytes_;
|
||||||
|
|
||||||
uint16_t _delayPlayout;
|
// Number of audio frames per audio buffer. Each audio frame corresponds to
|
||||||
|
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||||
|
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||||
|
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||||
|
int frames_per_buffer_;
|
||||||
|
|
||||||
uint16_t _samplingFreqOut; // Sampling frequency for Speaker
|
bool initialized_;
|
||||||
uint32_t _maxSpeakerVolume; // The maximum speaker volume value
|
|
||||||
bool _loudSpeakerOn;
|
|
||||||
|
|
||||||
|
bool playing_;
|
||||||
|
|
||||||
|
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||||
|
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
|
||||||
|
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
|
||||||
|
// and therefore outlives this object.
|
||||||
|
AudioDeviceBuffer* audio_device_buffer_;
|
||||||
|
|
||||||
|
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
|
||||||
|
// Java layer for the best possible sample rate for this particular device
|
||||||
|
// and audio configuration.
|
||||||
|
int sample_rate_hz_;
|
||||||
|
|
||||||
|
// Estimated playout delay caused by buffering in the Java based audio track.
|
||||||
|
// We are using a fixed value here since measurements have shown that the
|
||||||
|
// variations are very small (~10ms) and it is not worth the extra complexity
|
||||||
|
// to update this estimate on a continuous basis.
|
||||||
|
int delay_in_milliseconds_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@ -11,12 +11,8 @@
|
|||||||
package org.webrtc.voiceengine;
|
package org.webrtc.voiceengine;
|
||||||
|
|
||||||
import java.lang.System;
|
import java.lang.System;
|
||||||
import java.lang.Thread;
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import android.content.Context;
|
import android.content.Context;
|
||||||
import android.media.AudioFormat;
|
import android.media.AudioFormat;
|
||||||
@ -36,9 +32,6 @@ class WebRtcAudioRecord {
|
|||||||
|
|
||||||
private static final String TAG = "WebRtcAudioRecord";
|
private static final String TAG = "WebRtcAudioRecord";
|
||||||
|
|
||||||
// Use 44.1kHz as the default sampling rate.
|
|
||||||
private static final int SAMPLE_RATE_HZ = 44100;
|
|
||||||
|
|
||||||
// Mono recording is default.
|
// Mono recording is default.
|
||||||
private static final int CHANNELS = 1;
|
private static final int CHANNELS = 1;
|
||||||
|
|
||||||
@ -71,16 +64,6 @@ class WebRtcAudioRecord {
|
|||||||
private AcousticEchoCanceler aec = null;
|
private AcousticEchoCanceler aec = null;
|
||||||
private boolean useBuiltInAEC = false;
|
private boolean useBuiltInAEC = false;
|
||||||
|
|
||||||
private final Set<Long> threadIds = new HashSet<Long>();
|
|
||||||
|
|
||||||
private static boolean runningOnJellyBeanOrHigher() {
|
|
||||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean runningOnJellyBeanMR1OrHigher() {
|
|
||||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
||||||
* to be recorded. Feeds recorded data to the native counterpart as a
|
* to be recorded. Feeds recorded data to the native counterpart as a
|
||||||
@ -97,16 +80,15 @@ class WebRtcAudioRecord {
|
|||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
|
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
|
||||||
DoLog("AudioRecordThread" + getThreadInfo());
|
Logd("AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
|
||||||
AddThreadId();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
audioRecord.startRecording();
|
audioRecord.startRecording();
|
||||||
} catch (IllegalStateException e) {
|
} catch (IllegalStateException e) {
|
||||||
DoLogErr("AudioRecord.startRecording failed: " + e.getMessage());
|
Loge("AudioRecord.startRecording failed: " + e.getMessage());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assertIsTrue(audioRecord.getRecordingState()
|
assertTrue(audioRecord.getRecordingState()
|
||||||
== AudioRecord.RECORDSTATE_RECORDING);
|
== AudioRecord.RECORDSTATE_RECORDING);
|
||||||
|
|
||||||
long lastTime = System.nanoTime();
|
long lastTime = System.nanoTime();
|
||||||
@ -115,7 +97,7 @@ class WebRtcAudioRecord {
|
|||||||
if (bytesRead == byteBuffer.capacity()) {
|
if (bytesRead == byteBuffer.capacity()) {
|
||||||
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
|
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
|
||||||
} else {
|
} else {
|
||||||
DoLogErr("AudioRecord.read failed: " + bytesRead);
|
Loge("AudioRecord.read failed: " + bytesRead);
|
||||||
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
|
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
|
||||||
keepAlive = false;
|
keepAlive = false;
|
||||||
}
|
}
|
||||||
@ -125,16 +107,15 @@ class WebRtcAudioRecord {
|
|||||||
long durationInMs =
|
long durationInMs =
|
||||||
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
||||||
lastTime = nowTime;
|
lastTime = nowTime;
|
||||||
DoLog("bytesRead[" + durationInMs + "] " + bytesRead);
|
Logd("bytesRead[" + durationInMs + "] " + bytesRead);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
audioRecord.stop();
|
audioRecord.stop();
|
||||||
} catch (IllegalStateException e) {
|
} catch (IllegalStateException e) {
|
||||||
DoLogErr("AudioRecord.stop failed: " + e.getMessage());
|
Loge("AudioRecord.stop failed: " + e.getMessage());
|
||||||
}
|
}
|
||||||
RemoveThreadId();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void joinThread() {
|
public void joinThread() {
|
||||||
@ -150,43 +131,34 @@ class WebRtcAudioRecord {
|
|||||||
}
|
}
|
||||||
|
|
||||||
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
|
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
|
||||||
DoLog("ctor" + getThreadInfo());
|
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||||
this.context = context;
|
this.context = context;
|
||||||
this.nativeAudioRecord = nativeAudioRecord;
|
this.nativeAudioRecord = nativeAudioRecord;
|
||||||
audioManager = ((AudioManager) context.getSystemService(
|
audioManager = (AudioManager) context.getSystemService(
|
||||||
Context.AUDIO_SERVICE));
|
Context.AUDIO_SERVICE);
|
||||||
sampleRate = GetNativeSampleRate();
|
sampleRate = GetNativeSampleRate();
|
||||||
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
|
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
|
||||||
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
|
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
|
||||||
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
|
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
|
||||||
DoLog("byteBuffer.capacity: " + byteBuffer.capacity());
|
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
|
||||||
|
|
||||||
// Rather than passing the ByteBuffer with every callback (requiring
|
// Rather than passing the ByteBuffer with every callback (requiring
|
||||||
// the potentially expensive GetDirectBufferAddress) we simply have the
|
// the potentially expensive GetDirectBufferAddress) we simply have the
|
||||||
// the native class cache the address to the memory once.
|
// the native class cache the address to the memory once.
|
||||||
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
|
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
|
||||||
AddThreadId();
|
|
||||||
|
if (DEBUG) {
|
||||||
|
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the native or optimal input sample rate for this device's
|
|
||||||
* primary input stream. Unit is in Hz.
|
|
||||||
* Note that we actually query the output device but the same result is
|
|
||||||
* also valid for input.
|
|
||||||
*/
|
|
||||||
private int GetNativeSampleRate() {
|
private int GetNativeSampleRate() {
|
||||||
if (!runningOnJellyBeanMR1OrHigher()) {
|
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
|
||||||
return SAMPLE_RATE_HZ;
|
|
||||||
}
|
|
||||||
String sampleRateString = audioManager.getProperty(
|
|
||||||
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
|
||||||
return (sampleRateString == null) ?
|
|
||||||
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean BuiltInAECIsAvailable() {
|
public static boolean BuiltInAECIsAvailable() {
|
||||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||||
if (!runningOnJellyBeanOrHigher()) {
|
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// TODO(henrika): add black-list based on device name. We could also
|
// TODO(henrika): add black-list based on device name. We could also
|
||||||
@ -196,10 +168,9 @@ class WebRtcAudioRecord {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private boolean EnableBuiltInAEC(boolean enable) {
|
private boolean EnableBuiltInAEC(boolean enable) {
|
||||||
DoLog("EnableBuiltInAEC(" + enable + ')');
|
Logd("EnableBuiltInAEC(" + enable + ')');
|
||||||
AddThreadId();
|
|
||||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||||
if (!runningOnJellyBeanOrHigher()) {
|
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Store the AEC state.
|
// Store the AEC state.
|
||||||
@ -208,17 +179,16 @@ class WebRtcAudioRecord {
|
|||||||
if (aec != null) {
|
if (aec != null) {
|
||||||
int ret = aec.setEnabled(enable);
|
int ret = aec.setEnabled(enable);
|
||||||
if (ret != AudioEffect.SUCCESS) {
|
if (ret != AudioEffect.SUCCESS) {
|
||||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
Loge("AcousticEchoCanceler.setEnabled failed");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private int InitRecording(int sampleRate) {
|
private int InitRecording(int sampleRate) {
|
||||||
DoLog("InitRecording(sampleRate=" + sampleRate + ")");
|
Logd("InitRecording(sampleRate=" + sampleRate + ")");
|
||||||
AddThreadId();
|
|
||||||
// Get the minimum buffer size required for the successful creation of
|
// Get the minimum buffer size required for the successful creation of
|
||||||
// an AudioRecord object, in byte units.
|
// an AudioRecord object, in byte units.
|
||||||
// Note that this size doesn't guarantee a smooth recording under load.
|
// Note that this size doesn't guarantee a smooth recording under load.
|
||||||
@ -227,19 +197,16 @@ class WebRtcAudioRecord {
|
|||||||
sampleRate,
|
sampleRate,
|
||||||
AudioFormat.CHANNEL_IN_MONO,
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
AudioFormat.ENCODING_PCM_16BIT);
|
AudioFormat.ENCODING_PCM_16BIT);
|
||||||
DoLog("AudioRecord.getMinBufferSize: " + minBufferSize);
|
Logd("AudioRecord.getMinBufferSize: " + minBufferSize);
|
||||||
|
|
||||||
if (aec != null) {
|
if (aec != null) {
|
||||||
aec.release();
|
aec.release();
|
||||||
aec = null;
|
aec = null;
|
||||||
}
|
}
|
||||||
if (audioRecord != null) {
|
assertTrue(audioRecord == null);
|
||||||
audioRecord.release();
|
|
||||||
audioRecord = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
|
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
|
||||||
DoLog("bufferSizeInBytes: " + bufferSizeInBytes);
|
Logd("bufferSizeInBytes: " + bufferSizeInBytes);
|
||||||
try {
|
try {
|
||||||
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
|
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
|
||||||
sampleRate,
|
sampleRate,
|
||||||
@ -248,105 +215,76 @@ class WebRtcAudioRecord {
|
|||||||
bufferSizeInBytes);
|
bufferSizeInBytes);
|
||||||
|
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
DoLog(e.getMessage());
|
Logd(e.getMessage());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
|
assertTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
|
||||||
|
|
||||||
DoLog("AudioRecord " +
|
Logd("AudioRecord " +
|
||||||
"session ID: " + audioRecord.getAudioSessionId() + ", " +
|
"session ID: " + audioRecord.getAudioSessionId() + ", " +
|
||||||
"audio format: " + audioRecord.getAudioFormat() + ", " +
|
"audio format: " + audioRecord.getAudioFormat() + ", " +
|
||||||
"channels: " + audioRecord.getChannelCount() + ", " +
|
"channels: " + audioRecord.getChannelCount() + ", " +
|
||||||
"sample rate: " + audioRecord.getSampleRate());
|
"sample rate: " + audioRecord.getSampleRate());
|
||||||
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
Logd("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
||||||
if (!BuiltInAECIsAvailable()) {
|
if (!BuiltInAECIsAvailable()) {
|
||||||
return framesPerBuffer;
|
return framesPerBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
|
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
|
||||||
if (aec == null) {
|
if (aec == null) {
|
||||||
DoLogErr("AcousticEchoCanceler.create failed");
|
Loge("AcousticEchoCanceler.create failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
int ret = aec.setEnabled(useBuiltInAEC);
|
int ret = aec.setEnabled(useBuiltInAEC);
|
||||||
if (ret != AudioEffect.SUCCESS) {
|
if (ret != AudioEffect.SUCCESS) {
|
||||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
Loge("AcousticEchoCanceler.setEnabled failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
Descriptor descriptor = aec.getDescriptor();
|
Descriptor descriptor = aec.getDescriptor();
|
||||||
DoLog("AcousticEchoCanceler " +
|
Logd("AcousticEchoCanceler " +
|
||||||
"name: " + descriptor.name + ", " +
|
"name: " + descriptor.name + ", " +
|
||||||
"implementor: " + descriptor.implementor + ", " +
|
"implementor: " + descriptor.implementor + ", " +
|
||||||
"uuid: " + descriptor.uuid);
|
"uuid: " + descriptor.uuid);
|
||||||
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||||
return framesPerBuffer;
|
return framesPerBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean StartRecording() {
|
private boolean StartRecording() {
|
||||||
DoLog("StartRecording");
|
Logd("StartRecording");
|
||||||
AddThreadId();
|
assertTrue(audioRecord != null);
|
||||||
if (audioRecord == null) {
|
assertTrue(audioThread == null);
|
||||||
DoLogErr("start() called before init()");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (audioThread != null) {
|
|
||||||
DoLogErr("start() was already called");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
audioThread = new AudioRecordThread("AudioRecordJavaThread");
|
audioThread = new AudioRecordThread("AudioRecordJavaThread");
|
||||||
audioThread.start();
|
audioThread.start();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean StopRecording() {
|
private boolean StopRecording() {
|
||||||
DoLog("StopRecording");
|
Logd("StopRecording");
|
||||||
AddThreadId();
|
assertTrue(audioThread != null);
|
||||||
if (audioThread == null) {
|
|
||||||
DoLogErr("start() was never called, or stop() was already called");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
audioThread.joinThread();
|
audioThread.joinThread();
|
||||||
audioThread = null;
|
audioThread = null;
|
||||||
if (aec != null) {
|
if (aec != null) {
|
||||||
aec.release();
|
aec.release();
|
||||||
aec = null;
|
aec = null;
|
||||||
}
|
}
|
||||||
if (audioRecord != null) {
|
audioRecord.release();
|
||||||
audioRecord.release();
|
audioRecord = null;
|
||||||
audioRecord = null;
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void DoLog(String msg) {
|
/** Helper method which throws an exception when an assertion has failed. */
|
||||||
Log.d(TAG, msg);
|
private static void assertTrue(boolean condition) {
|
||||||
}
|
|
||||||
|
|
||||||
private void DoLogErr(String msg) {
|
|
||||||
Log.e(TAG, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Helper method for building a string of thread information.*/
|
|
||||||
private static String getThreadInfo() {
|
|
||||||
return "@[name=" + Thread.currentThread().getName()
|
|
||||||
+ ", id=" + Thread.currentThread().getId() + "]";
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Helper method which throws an exception when an assertion has failed. */
|
|
||||||
private static void assertIsTrue(boolean condition) {
|
|
||||||
if (!condition) {
|
if (!condition) {
|
||||||
throw new AssertionError("Expected condition to be true");
|
throw new AssertionError("Expected condition to be true");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void AddThreadId() {
|
private static void Logd(String msg) {
|
||||||
threadIds.add(Thread.currentThread().getId());
|
Log.d(TAG, msg);
|
||||||
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void RemoveThreadId() {
|
private static void Loge(String msg) {
|
||||||
threadIds.remove(Thread.currentThread().getId());
|
Log.e(TAG, msg);
|
||||||
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private native void nativeCacheDirectBufferAddress(
|
private native void nativeCacheDirectBufferAddress(
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@ -10,300 +10,234 @@
|
|||||||
|
|
||||||
package org.webrtc.voiceengine;
|
package org.webrtc.voiceengine;
|
||||||
|
|
||||||
|
import java.lang.Thread;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
|
||||||
|
|
||||||
import android.content.Context;
|
import android.content.Context;
|
||||||
import android.media.AudioFormat;
|
import android.media.AudioFormat;
|
||||||
import android.media.AudioManager;
|
import android.media.AudioManager;
|
||||||
import android.media.AudioRecord;
|
|
||||||
import android.media.AudioTrack;
|
import android.media.AudioTrack;
|
||||||
|
import android.os.Process;
|
||||||
import android.util.Log;
|
import android.util.Log;
|
||||||
|
|
||||||
class WebRtcAudioTrack {
|
class WebRtcAudioTrack {
|
||||||
private AudioTrack _audioTrack = null;
|
private static final boolean DEBUG = false;
|
||||||
|
|
||||||
private Context _context;
|
private static final String TAG = "WebRtcAudioTrack";
|
||||||
private AudioManager _audioManager;
|
|
||||||
|
|
||||||
private ByteBuffer _playBuffer;
|
// Mono playout is default.
|
||||||
private byte[] _tempBufPlay;
|
// TODO(henrika): add stereo support.
|
||||||
|
private static final int CHANNELS = 1;
|
||||||
|
|
||||||
private final ReentrantLock _playLock = new ReentrantLock();
|
// Default audio data format is PCM 16 bit per sample.
|
||||||
|
// Guaranteed to be supported by all devices.
|
||||||
|
private static final int BITS_PER_SAMPLE = 16;
|
||||||
|
|
||||||
private boolean _doPlayInit = true;
|
// Number of bytes per audio frame.
|
||||||
private boolean _doRecInit = true;
|
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
|
||||||
private boolean _isRecording = false;
|
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
|
||||||
private boolean _isPlaying = false;
|
|
||||||
|
|
||||||
private int _bufferedPlaySamples = 0;
|
// Requested size of each recorded buffer provided to the client.
|
||||||
private int _playPosition = 0;
|
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
|
||||||
|
|
||||||
WebRtcAudioTrack() {
|
// Average number of callbacks per second.
|
||||||
|
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
|
||||||
|
|
||||||
|
private ByteBuffer byteBuffer;
|
||||||
|
private final int sampleRate;
|
||||||
|
|
||||||
|
private final long nativeAudioTrack;
|
||||||
|
private final Context context;
|
||||||
|
private final AudioManager audioManager;
|
||||||
|
|
||||||
|
private AudioTrack audioTrack = null;
|
||||||
|
private AudioTrackThread audioThread = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Audio thread which keeps calling AudioTrack.write() to stream audio.
|
||||||
|
* Data is periodically acquired from the native WebRTC layer using the
|
||||||
|
* nativeGetPlayoutData callback function.
|
||||||
|
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
|
||||||
|
*/
|
||||||
|
private class AudioTrackThread extends Thread {
|
||||||
|
private volatile boolean keepAlive = true;
|
||||||
|
|
||||||
|
public AudioTrackThread(String name) {
|
||||||
|
super(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
|
||||||
|
Logd("AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
|
||||||
|
|
||||||
|
try {
|
||||||
|
// In MODE_STREAM mode we can optionally prime the output buffer by
|
||||||
|
// writing up to bufferSizeInBytes (from constructor) before starting.
|
||||||
|
// This priming will avoid an immediate underrun, but is not required.
|
||||||
|
// TODO(henrika): initial tests have shown that priming is not required.
|
||||||
|
audioTrack.play();
|
||||||
|
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
Loge("AudioTrack.play failed: " + e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fixed size in bytes of each 10ms block of audio data that we ask for
|
||||||
|
// using callbacks to the native WebRTC client.
|
||||||
|
final int sizeInBytes = byteBuffer.capacity();
|
||||||
|
|
||||||
|
while (keepAlive) {
|
||||||
|
// Get 10ms of PCM data from the native WebRTC client. Audio data is
|
||||||
|
// written into the common ByteBuffer using the address that was
|
||||||
|
// cached at construction.
|
||||||
|
nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
|
||||||
|
// Write data until all data has been written to the audio sink.
|
||||||
|
// Upon return, the buffer position will have been advanced to reflect
|
||||||
|
// the amount of data that was successfully written to the AudioTrack.
|
||||||
|
assertTrue(sizeInBytes <= byteBuffer.remaining());
|
||||||
|
int bytesWritten = audioTrack.write(byteBuffer,
|
||||||
|
sizeInBytes,
|
||||||
|
AudioTrack.WRITE_BLOCKING);
|
||||||
|
if (bytesWritten != sizeInBytes) {
|
||||||
|
Loge("AudioTrack.write failed: " + bytesWritten);
|
||||||
|
if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) {
|
||||||
|
keepAlive = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The byte buffer must be rewinded since byteBuffer.position() is
|
||||||
|
// increased at each call to AudioTrack.write(). If we don't do this,
|
||||||
|
// next call to AudioTrack.write() will fail.
|
||||||
|
byteBuffer.rewind();
|
||||||
|
|
||||||
|
// TODO(henrika): it is possible to create a delay estimate here by
|
||||||
|
// counting number of written frames and subtracting the result from
|
||||||
|
// audioTrack.getPlaybackHeadPosition().
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
audioTrack.stop();
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
Loge("AudioTrack.stop failed: " + e.getMessage());
|
||||||
|
}
|
||||||
|
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
|
||||||
|
audioTrack.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void joinThread() {
|
||||||
|
keepAlive = false;
|
||||||
|
while (isAlive()) {
|
||||||
try {
|
try {
|
||||||
_playBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
|
join();
|
||||||
// kHz
|
} catch (InterruptedException e) {
|
||||||
} catch (Exception e) {
|
// Ignore.
|
||||||
DoLog(e.getMessage());
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
_tempBufPlay = new byte[2 * 480];
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
WebRtcAudioTrack(Context context, long nativeAudioTrack) {
|
||||||
private int InitPlayback(int sampleRate) {
|
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||||
// get the minimum buffer size that can be used
|
this.context = context;
|
||||||
int minPlayBufSize = AudioTrack.getMinBufferSize(
|
this.nativeAudioTrack = nativeAudioTrack;
|
||||||
sampleRate,
|
audioManager = (AudioManager) context.getSystemService(
|
||||||
AudioFormat.CHANNEL_OUT_MONO,
|
Context.AUDIO_SERVICE);
|
||||||
AudioFormat.ENCODING_PCM_16BIT);
|
sampleRate = GetNativeSampleRate();
|
||||||
|
byteBuffer = byteBuffer.allocateDirect(
|
||||||
|
BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND));
|
||||||
|
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
|
||||||
|
|
||||||
// DoLog("min play buf size is " + minPlayBufSize);
|
// Rather than passing the ByteBuffer with every callback (requiring
|
||||||
|
// the potentially expensive GetDirectBufferAddress) we simply have the
|
||||||
|
// the native class cache the address to the memory once.
|
||||||
|
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
|
||||||
|
|
||||||
int playBufSize = minPlayBufSize;
|
if (DEBUG) {
|
||||||
if (playBufSize < 6000) {
|
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||||
playBufSize *= 2;
|
|
||||||
}
|
|
||||||
_bufferedPlaySamples = 0;
|
|
||||||
// DoLog("play buf size is " + playBufSize);
|
|
||||||
|
|
||||||
// release the object
|
|
||||||
if (_audioTrack != null) {
|
|
||||||
_audioTrack.release();
|
|
||||||
_audioTrack = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
_audioTrack = new AudioTrack(
|
|
||||||
AudioManager.STREAM_VOICE_CALL,
|
|
||||||
sampleRate,
|
|
||||||
AudioFormat.CHANNEL_OUT_MONO,
|
|
||||||
AudioFormat.ENCODING_PCM_16BIT,
|
|
||||||
playBufSize, AudioTrack.MODE_STREAM);
|
|
||||||
} catch (Exception e) {
|
|
||||||
DoLog(e.getMessage());
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that the audioRecord is ready to be used
|
|
||||||
if (_audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
|
|
||||||
// DoLog("play not initialized " + sampleRate);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoLog("play sample rate set to " + sampleRate);
|
|
||||||
|
|
||||||
if (_audioManager == null && _context != null) {
|
|
||||||
_audioManager = (AudioManager)
|
|
||||||
_context.getSystemService(Context.AUDIO_SERVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return max playout volume
|
|
||||||
if (_audioManager == null) {
|
|
||||||
// Don't know the max volume but still init is OK for playout,
|
|
||||||
// so we should not return error.
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return _audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
private int GetNativeSampleRate() {
|
||||||
private int StartPlayback() {
|
return WebRtcAudioUtils.GetNativeSampleRate(audioManager);
|
||||||
// start playout
|
}
|
||||||
try {
|
|
||||||
_audioTrack.play();
|
|
||||||
|
|
||||||
} catch (IllegalStateException e) {
|
private int InitPlayout(int sampleRate) {
|
||||||
e.printStackTrace();
|
Logd("InitPlayout(sampleRate=" + sampleRate + ")");
|
||||||
return -1;
|
// Get the minimum buffer size required for the successful creation of an
|
||||||
}
|
// AudioTrack object to be created in the MODE_STREAM mode.
|
||||||
|
// Note that this size doesn't guarantee a smooth playback under load.
|
||||||
|
// TODO(henrika): should we extend the buffer size to avoid glitches?
|
||||||
|
final int minBufferSizeInBytes = AudioTrack.getMinBufferSize(
|
||||||
|
sampleRate,
|
||||||
|
AudioFormat.CHANNEL_OUT_MONO,
|
||||||
|
AudioFormat.ENCODING_PCM_16BIT);
|
||||||
|
Logd("AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
|
||||||
|
assertTrue(audioTrack == null);
|
||||||
|
|
||||||
_isPlaying = true;
|
// For the streaming mode, data must be written to the audio sink in
|
||||||
return 0;
|
// chunks of size (given by byteBuffer.capacity()) less than or equal
|
||||||
|
// to the total buffer size |minBufferSizeInBytes|.
|
||||||
|
assertTrue(byteBuffer.capacity() < minBufferSizeInBytes);
|
||||||
|
try {
|
||||||
|
// Create an AudioTrack object and initialize its associated audio buffer.
|
||||||
|
// The size of this buffer determines how long an AudioTrack can play
|
||||||
|
// before running out of data.
|
||||||
|
audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
|
||||||
|
sampleRate,
|
||||||
|
AudioFormat.CHANNEL_OUT_MONO,
|
||||||
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
|
minBufferSizeInBytes,
|
||||||
|
AudioTrack.MODE_STREAM);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
Logd(e.getMessage());
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
|
||||||
|
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
|
||||||
|
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
// Return a delay estimate in milliseconds given the minimum buffer size.
|
||||||
private int StopPlayback() {
|
return (1000 * (minBufferSizeInBytes / BYTES_PER_FRAME) / sampleRate);
|
||||||
_playLock.lock();
|
}
|
||||||
try {
|
|
||||||
// only stop if we are playing
|
|
||||||
if (_audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
|
|
||||||
// stop playout
|
|
||||||
try {
|
|
||||||
_audioTrack.stop();
|
|
||||||
} catch (IllegalStateException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush the buffers
|
private boolean StartPlayout() {
|
||||||
_audioTrack.flush();
|
Logd("StartPlayout");
|
||||||
}
|
assertTrue(audioTrack != null);
|
||||||
|
assertTrue(audioThread == null);
|
||||||
|
audioThread = new AudioTrackThread("AudioTrackJavaThread");
|
||||||
|
audioThread.start();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// release the object
|
private boolean StopPlayout() {
|
||||||
_audioTrack.release();
|
Logd("StopPlayout");
|
||||||
_audioTrack = null;
|
assertTrue(audioThread != null);
|
||||||
|
audioThread.joinThread();
|
||||||
} finally {
|
audioThread = null;
|
||||||
// Ensure we always unlock, both for success, exception or error
|
if (audioTrack != null) {
|
||||||
// return.
|
audioTrack.release();
|
||||||
_doPlayInit = true;
|
audioTrack = null;
|
||||||
_playLock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
_isPlaying = false;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
/** Helper method which throws an exception when an assertion has failed. */
|
||||||
private int PlayAudio(int lengthInBytes) {
|
private static void assertTrue(boolean condition) {
|
||||||
|
if (!condition) {
|
||||||
_playLock.lock();
|
throw new AssertionError("Expected condition to be true");
|
||||||
try {
|
|
||||||
if (_audioTrack == null) {
|
|
||||||
return -2; // We have probably closed down while waiting for
|
|
||||||
// play lock
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set priority, only do once
|
|
||||||
if (_doPlayInit == true) {
|
|
||||||
try {
|
|
||||||
android.os.Process.setThreadPriority(
|
|
||||||
android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
|
|
||||||
} catch (Exception e) {
|
|
||||||
DoLog("Set play thread priority failed: " + e.getMessage());
|
|
||||||
}
|
|
||||||
_doPlayInit = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
int written = 0;
|
|
||||||
_playBuffer.get(_tempBufPlay);
|
|
||||||
written = _audioTrack.write(_tempBufPlay, 0, lengthInBytes);
|
|
||||||
_playBuffer.rewind(); // Reset the position to start of buffer
|
|
||||||
|
|
||||||
// DoLog("Wrote data to sndCard");
|
|
||||||
|
|
||||||
// increase by number of written samples
|
|
||||||
_bufferedPlaySamples += (written >> 1);
|
|
||||||
|
|
||||||
// decrease by number of played samples
|
|
||||||
int pos = _audioTrack.getPlaybackHeadPosition();
|
|
||||||
if (pos < _playPosition) { // wrap or reset by driver
|
|
||||||
_playPosition = 0; // reset
|
|
||||||
}
|
|
||||||
_bufferedPlaySamples -= (pos - _playPosition);
|
|
||||||
_playPosition = pos;
|
|
||||||
|
|
||||||
if (written != lengthInBytes) {
|
|
||||||
// DoLog("Could not write all data to sc (written = " + written
|
|
||||||
// + ", length = " + lengthInBytes + ")");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
} finally {
|
|
||||||
// Ensure we always unlock, both for success, exception or error
|
|
||||||
// return.
|
|
||||||
_playLock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return _bufferedPlaySamples;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
private static void Logd(String msg) {
|
||||||
private int SetPlayoutSpeaker(boolean loudspeakerOn) {
|
Log.d(TAG, msg);
|
||||||
// create audio manager if needed
|
}
|
||||||
if (_audioManager == null && _context != null) {
|
|
||||||
_audioManager = (AudioManager)
|
|
||||||
_context.getSystemService(Context.AUDIO_SERVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_audioManager == null) {
|
private static void Loge(String msg) {
|
||||||
DoLogErr("Could not change audio routing - no audio manager");
|
Log.e(TAG, msg);
|
||||||
return -1;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int apiLevel = android.os.Build.VERSION.SDK_INT;
|
private native void nativeCacheDirectBufferAddress(
|
||||||
|
ByteBuffer byteBuffer, long nativeAudioRecord);
|
||||||
|
|
||||||
if ((3 == apiLevel) || (4 == apiLevel)) {
|
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
|
||||||
// 1.5 and 1.6 devices
|
|
||||||
if (loudspeakerOn) {
|
|
||||||
// route audio to back speaker
|
|
||||||
_audioManager.setMode(AudioManager.MODE_NORMAL);
|
|
||||||
} else {
|
|
||||||
// route audio to earpiece
|
|
||||||
_audioManager.setMode(AudioManager.MODE_IN_CALL);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// 2.x devices
|
|
||||||
if ((android.os.Build.BRAND.equals("Samsung") ||
|
|
||||||
android.os.Build.BRAND.equals("samsung")) &&
|
|
||||||
((5 == apiLevel) || (6 == apiLevel) ||
|
|
||||||
(7 == apiLevel))) {
|
|
||||||
// Samsung 2.0, 2.0.1 and 2.1 devices
|
|
||||||
if (loudspeakerOn) {
|
|
||||||
// route audio to back speaker
|
|
||||||
_audioManager.setMode(AudioManager.MODE_IN_CALL);
|
|
||||||
_audioManager.setSpeakerphoneOn(loudspeakerOn);
|
|
||||||
} else {
|
|
||||||
// route audio to earpiece
|
|
||||||
_audioManager.setSpeakerphoneOn(loudspeakerOn);
|
|
||||||
_audioManager.setMode(AudioManager.MODE_NORMAL);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Non-Samsung and Samsung 2.2 and up devices
|
|
||||||
_audioManager.setSpeakerphoneOn(loudspeakerOn);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
private int SetPlayoutVolume(int level) {
|
|
||||||
|
|
||||||
// create audio manager if needed
|
|
||||||
if (_audioManager == null && _context != null) {
|
|
||||||
_audioManager = (AudioManager)
|
|
||||||
_context.getSystemService(Context.AUDIO_SERVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
int retVal = -1;
|
|
||||||
|
|
||||||
if (_audioManager != null) {
|
|
||||||
_audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL,
|
|
||||||
level, 0);
|
|
||||||
retVal = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return retVal;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
private int GetPlayoutVolume() {
|
|
||||||
|
|
||||||
// create audio manager if needed
|
|
||||||
if (_audioManager == null && _context != null) {
|
|
||||||
_audioManager = (AudioManager)
|
|
||||||
_context.getSystemService(Context.AUDIO_SERVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
int level = -1;
|
|
||||||
|
|
||||||
if (_audioManager != null) {
|
|
||||||
level = _audioManager.getStreamVolume(
|
|
||||||
AudioManager.STREAM_VOICE_CALL);
|
|
||||||
}
|
|
||||||
|
|
||||||
return level;
|
|
||||||
}
|
|
||||||
|
|
||||||
final String logTag = "WebRTC AD java";
|
|
||||||
|
|
||||||
private void DoLog(String msg) {
|
|
||||||
Log.d(logTag, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void DoLogErr(String msg) {
|
|
||||||
Log.e(logTag, msg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,63 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.webrtc.voiceengine;
|
||||||
|
|
||||||
|
import java.lang.Thread;
|
||||||
|
|
||||||
|
import android.media.AudioManager;
|
||||||
|
import android.os.Build;
|
||||||
|
import android.util.Log;
|
||||||
|
|
||||||
|
public final class WebRtcAudioUtils {
|
||||||
|
// Use 44.1kHz as the default sampling rate.
|
||||||
|
private static final int SAMPLE_RATE_HZ = 44100;
|
||||||
|
|
||||||
|
public static boolean runningOnJellyBeanOrHigher() {
|
||||||
|
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean runningOnJellyBeanMR1OrHigher() {
|
||||||
|
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method for building a string of thread information.*/
|
||||||
|
public static String getThreadInfo() {
|
||||||
|
return "@[name=" + Thread.currentThread().getName()
|
||||||
|
+ ", id=" + Thread.currentThread().getId() + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Information about the current build, taken from system properties. */
|
||||||
|
public static void logDeviceInfo(String tag) {
|
||||||
|
Log.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
|
||||||
|
+ "Release: " + Build.VERSION.RELEASE + ", "
|
||||||
|
+ "Brand: " + Build.BRAND + ", "
|
||||||
|
+ "Device: " + Build.DEVICE + ", "
|
||||||
|
+ "Id: " + Build.ID + ", "
|
||||||
|
+ "Hardware: " + Build.HARDWARE + ", "
|
||||||
|
+ "Manufacturer: " + Build.MANUFACTURER + ", "
|
||||||
|
+ "Model: " + Build.MODEL + ", "
|
||||||
|
+ "Product: " + Build.PRODUCT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the native or optimal output sample rate for this device's
|
||||||
|
* primary output stream. Unit is in Hz.
|
||||||
|
*/
|
||||||
|
public static int GetNativeSampleRate(AudioManager audioManager) {
|
||||||
|
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
|
||||||
|
return SAMPLE_RATE_HZ;
|
||||||
|
}
|
||||||
|
String sampleRateString = audioManager.getProperty(
|
||||||
|
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||||
|
return (sampleRateString == null) ?
|
||||||
|
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
||||||
|
}
|
||||||
|
}
|
@ -41,8 +41,9 @@ enum {
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
OpenSlesInput::OpenSlesInput()
|
OpenSlesInput::OpenSlesInput(PlayoutDelayProvider* delay_provider)
|
||||||
: initialized_(false),
|
: delay_provider_(delay_provider),
|
||||||
|
initialized_(false),
|
||||||
mic_initialized_(false),
|
mic_initialized_(false),
|
||||||
rec_initialized_(false),
|
rec_initialized_(false),
|
||||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||||
@ -527,8 +528,7 @@ bool OpenSlesInput::CbThreadImpl() {
|
|||||||
while (fifo_->size() > 0 && recording_) {
|
while (fifo_->size() > 0 && recording_) {
|
||||||
int8_t* audio = fifo_->Pop();
|
int8_t* audio = fifo_->Pop();
|
||||||
audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
|
audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
|
||||||
// TODO(henrika): improve the delay estimate.
|
audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
|
||||||
audio_buffer_->SetVQEData(100,
|
|
||||||
recording_delay_, 0);
|
recording_delay_, 0);
|
||||||
audio_buffer_->DeliverRecordedData();
|
audio_buffer_->DeliverRecordedData();
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ class ThreadWrapper;
|
|||||||
// to non-const methods require exclusive access to the object.
|
// to non-const methods require exclusive access to the object.
|
||||||
class OpenSlesInput {
|
class OpenSlesInput {
|
||||||
public:
|
public:
|
||||||
OpenSlesInput();
|
OpenSlesInput(PlayoutDelayProvider* delay_provider);
|
||||||
~OpenSlesInput();
|
~OpenSlesInput();
|
||||||
|
|
||||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
||||||
@ -174,6 +174,8 @@ class OpenSlesInput {
|
|||||||
// Thread-compatible.
|
// Thread-compatible.
|
||||||
bool CbThreadImpl();
|
bool CbThreadImpl();
|
||||||
|
|
||||||
|
PlayoutDelayProvider* delay_provider_;
|
||||||
|
|
||||||
// Java API handle
|
// Java API handle
|
||||||
AudioManagerJni audio_manager_;
|
AudioManagerJni audio_manager_;
|
||||||
|
|
||||||
|
@ -25,8 +25,6 @@
|
|||||||
do { \
|
do { \
|
||||||
SLresult err = (op); \
|
SLresult err = (op); \
|
||||||
if (err != SL_RESULT_SUCCESS) { \
|
if (err != SL_RESULT_SUCCESS) { \
|
||||||
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
|
|
||||||
"OpenSL error: %d", err); \
|
|
||||||
assert(false); \
|
assert(false); \
|
||||||
return ret_val; \
|
return ret_val; \
|
||||||
} \
|
} \
|
||||||
@ -43,9 +41,8 @@ enum {
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
OpenSlesOutput::OpenSlesOutput(const int32_t id)
|
OpenSlesOutput::OpenSlesOutput()
|
||||||
: id_(id),
|
: initialized_(false),
|
||||||
initialized_(false),
|
|
||||||
speaker_initialized_(false),
|
speaker_initialized_(false),
|
||||||
play_initialized_(false),
|
play_initialized_(false),
|
||||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||||
@ -468,7 +465,6 @@ bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) {
|
|||||||
if (event_id == kNoUnderrun) {
|
if (event_id == kNoUnderrun) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio underrun");
|
|
||||||
assert(event_id == kUnderrun);
|
assert(event_id == kUnderrun);
|
||||||
assert(event_msg > 0);
|
assert(event_msg > 0);
|
||||||
// Wait for all enqueued buffers to be flushed.
|
// Wait for all enqueued buffers to be flushed.
|
||||||
|
@ -35,7 +35,7 @@ class ThreadWrapper;
|
|||||||
// to non-const methods require exclusive access to the object.
|
// to non-const methods require exclusive access to the object.
|
||||||
class OpenSlesOutput : public PlayoutDelayProvider {
|
class OpenSlesOutput : public PlayoutDelayProvider {
|
||||||
public:
|
public:
|
||||||
explicit OpenSlesOutput(const int32_t id);
|
explicit OpenSlesOutput();
|
||||||
virtual ~OpenSlesOutput();
|
virtual ~OpenSlesOutput();
|
||||||
|
|
||||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
||||||
@ -191,7 +191,6 @@ class OpenSlesOutput : public PlayoutDelayProvider {
|
|||||||
// Java API handle
|
// Java API handle
|
||||||
AudioManagerJni audio_manager_;
|
AudioManagerJni audio_manager_;
|
||||||
|
|
||||||
int id_;
|
|
||||||
bool initialized_;
|
bool initialized_;
|
||||||
bool speaker_initialized_;
|
bool speaker_initialized_;
|
||||||
bool play_initialized_;
|
bool play_initialized_;
|
||||||
|
Loading…
Reference in New Issue
Block a user