Refactoring WebRTC Java/JNI audio recording in C++ and Java.
This is a big refactoring of the existing C++/JNI/Java support for audio recording in native WebRTC: - Removes unused code and old WEBRTC logging macros - Now uses optimal sample rate and buffer size in Java AudioRecord (used hard-coded sample rate before) - Makes code more inline with the implementation in Chrome - Adds helper methods for JNI handling to improve readability - Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy) - Adds basic thread checks - Removes all locks in C++ land - Removes all locks in Java - Improves construction/destruction - Additional cleanup Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate). BUG=NONE R=magjed@webrtc.org, perkj@webrtc.org, pthatcher@webrtc.org Review URL: https://webrtc-codereview.appspot.com/33969004 Cr-Commit-Position: refs/heads/master@{#8325} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8325 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
c2d0473320
commit
62f6e75673
@ -147,16 +147,16 @@ bool PeerConnectionFactory::Initialize() {
|
|||||||
|
|
||||||
cricket::DummyDeviceManager* device_manager(
|
cricket::DummyDeviceManager* device_manager(
|
||||||
new cricket::DummyDeviceManager());
|
new cricket::DummyDeviceManager());
|
||||||
|
|
||||||
// TODO: Need to make sure only one VoE is created inside
|
// TODO: Need to make sure only one VoE is created inside
|
||||||
// WebRtcMediaEngine.
|
// WebRtcMediaEngine.
|
||||||
cricket::MediaEngineInterface* media_engine(
|
cricket::MediaEngineInterface* media_engine =
|
||||||
cricket::WebRtcMediaEngineFactory::Create(default_adm_.get(),
|
worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind(
|
||||||
NULL, // No secondary adm.
|
&PeerConnectionFactory::CreateMediaEngine_w, this));
|
||||||
video_encoder_factory_.get(),
|
|
||||||
video_decoder_factory_.get()));
|
|
||||||
|
|
||||||
channel_manager_.reset(new cricket::ChannelManager(
|
channel_manager_.reset(new cricket::ChannelManager(
|
||||||
media_engine, device_manager, worker_thread_));
|
media_engine, device_manager, worker_thread_));
|
||||||
|
|
||||||
channel_manager_->SetVideoRtxEnabled(true);
|
channel_manager_->SetVideoRtxEnabled(true);
|
||||||
if (!channel_manager_->Init()) {
|
if (!channel_manager_->Init()) {
|
||||||
return false;
|
return false;
|
||||||
@ -252,4 +252,11 @@ rtc::Thread* PeerConnectionFactory::worker_thread() {
|
|||||||
return worker_thread_;
|
return worker_thread_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() {
|
||||||
|
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||||
|
return cricket::WebRtcMediaEngineFactory::Create(
|
||||||
|
default_adm_.get(), NULL, video_encoder_factory_.get(),
|
||||||
|
video_decoder_factory_.get());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -90,6 +90,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
|
|||||||
virtual ~PeerConnectionFactory();
|
virtual ~PeerConnectionFactory();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
cricket::MediaEngineInterface* CreateMediaEngine_w();
|
||||||
|
|
||||||
bool owns_ptrs_;
|
bool owns_ptrs_;
|
||||||
bool wraps_current_thread_;
|
bool wraps_current_thread_;
|
||||||
rtc::Thread* signaling_thread_;
|
rtc::Thread* signaling_thread_;
|
||||||
|
@ -589,6 +589,7 @@ WebRtcVoiceEngine::~WebRtcVoiceEngine() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
|
bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
|
||||||
|
ASSERT(worker_thread == rtc::Thread::Current());
|
||||||
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
|
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
|
||||||
bool res = InitInternal();
|
bool res = InitInternal();
|
||||||
if (res) {
|
if (res) {
|
||||||
|
@ -149,6 +149,10 @@ ChannelManager::~ChannelManager() {
|
|||||||
// shutdown.
|
// shutdown.
|
||||||
ShutdownSrtp();
|
ShutdownSrtp();
|
||||||
}
|
}
|
||||||
|
// Always delete the media engine on the worker thread to match how it was
|
||||||
|
// created.
|
||||||
|
worker_thread_->Invoke<void>(Bind(
|
||||||
|
&ChannelManager::DeleteMediaEngine_w, this));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChannelManager::SetVideoRtxEnabled(bool enable) {
|
bool ChannelManager::SetVideoRtxEnabled(bool enable) {
|
||||||
@ -215,17 +219,22 @@ bool ChannelManager::Init() {
|
|||||||
if (initialized_) {
|
if (initialized_) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(worker_thread_ != NULL);
|
ASSERT(worker_thread_ != NULL);
|
||||||
if (worker_thread_) {
|
if (!worker_thread_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (worker_thread_ != rtc::Thread::Current()) {
|
if (worker_thread_ != rtc::Thread::Current()) {
|
||||||
// Do not allow invoking calls to other threads on the worker thread.
|
// Do not allow invoking calls to other threads on the worker thread.
|
||||||
worker_thread_->Invoke<bool>(rtc::Bind(
|
worker_thread_->Invoke<bool>(rtc::Bind(
|
||||||
&rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
|
&rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (media_engine_->Init(worker_thread_)) {
|
initialized_ = worker_thread_->Invoke<bool>(Bind(
|
||||||
initialized_ = true;
|
&ChannelManager::InitMediaEngine_w, this));
|
||||||
|
ASSERT(initialized_);
|
||||||
|
if (!initialized_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Now that we're initialized, apply any stored preferences. A preferred
|
// Now that we're initialized, apply any stored preferences. A preferred
|
||||||
// device might have been unplugged. In this case, we fallback to the
|
// device might have been unplugged. In this case, we fallback to the
|
||||||
@ -283,21 +292,29 @@ bool ChannelManager::Init() {
|
|||||||
if (default_video_encoder_config_.max_codec.id != 0) {
|
if (default_video_encoder_config_.max_codec.id != 0) {
|
||||||
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
|
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
return initialized_;
|
return initialized_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ChannelManager::InitMediaEngine_w() {
|
||||||
|
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||||
|
return (media_engine_->Init(worker_thread_));
|
||||||
|
}
|
||||||
|
|
||||||
void ChannelManager::Terminate() {
|
void ChannelManager::Terminate() {
|
||||||
ASSERT(initialized_);
|
ASSERT(initialized_);
|
||||||
if (!initialized_) {
|
if (!initialized_) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
worker_thread_->Invoke<void>(Bind(&ChannelManager::Terminate_w, this));
|
worker_thread_->Invoke<void>(Bind(&ChannelManager::Terminate_w, this));
|
||||||
media_engine_->Terminate();
|
|
||||||
initialized_ = false;
|
initialized_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ChannelManager::DeleteMediaEngine_w() {
|
||||||
|
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||||
|
media_engine_.reset(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
void ChannelManager::Terminate_w() {
|
void ChannelManager::Terminate_w() {
|
||||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||||
// Need to destroy the voice/video channels
|
// Need to destroy the voice/video channels
|
||||||
@ -313,6 +330,7 @@ void ChannelManager::Terminate_w() {
|
|||||||
if (!SetCaptureDevice_w(NULL)) {
|
if (!SetCaptureDevice_w(NULL)) {
|
||||||
LOG(LS_WARNING) << "failed to delete video capturer";
|
LOG(LS_WARNING) << "failed to delete video capturer";
|
||||||
}
|
}
|
||||||
|
media_engine_->Terminate();
|
||||||
}
|
}
|
||||||
|
|
||||||
VoiceChannel* ChannelManager::CreateVoiceChannel(
|
VoiceChannel* ChannelManager::CreateVoiceChannel(
|
||||||
|
@ -264,6 +264,8 @@ class ChannelManager : public rtc::MessageHandler,
|
|||||||
DeviceManagerInterface* dm,
|
DeviceManagerInterface* dm,
|
||||||
CaptureManager* cm,
|
CaptureManager* cm,
|
||||||
rtc::Thread* worker_thread);
|
rtc::Thread* worker_thread);
|
||||||
|
bool InitMediaEngine_w();
|
||||||
|
void DeleteMediaEngine_w();
|
||||||
void Terminate_w();
|
void Terminate_w();
|
||||||
VoiceChannel* CreateVoiceChannel_w(
|
VoiceChannel* CreateVoiceChannel_w(
|
||||||
BaseSession* session, const std::string& content_name, bool rtcp);
|
BaseSession* session, const std::string& content_name, bool rtcp);
|
||||||
|
@ -19,7 +19,7 @@ using icu::UnicodeString;
|
|||||||
jmethodID GetMethodID(JNIEnv* jni, jclass c, const std::string& name,
|
jmethodID GetMethodID(JNIEnv* jni, jclass c, const std::string& name,
|
||||||
const char* signature) {
|
const char* signature) {
|
||||||
jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
|
jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
|
||||||
CHECK_EXCEPTION(jni, "error during GetMethodID");
|
CHECK_JNI_EXCEPTION(jni, "error during GetMethodID");
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,11 +37,11 @@ jlong jlongFromPointer(void* ptr) {
|
|||||||
// Given a (UTF-16) jstring return a new UTF-8 native string.
|
// Given a (UTF-16) jstring return a new UTF-8 native string.
|
||||||
std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
|
std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
|
||||||
const jchar* jchars = jni->GetStringChars(j_string, NULL);
|
const jchar* jchars = jni->GetStringChars(j_string, NULL);
|
||||||
CHECK_EXCEPTION(jni, "Error during GetStringChars");
|
CHECK_JNI_EXCEPTION(jni, "Error during GetStringChars");
|
||||||
UnicodeString ustr(jchars, jni->GetStringLength(j_string));
|
UnicodeString ustr(jchars, jni->GetStringLength(j_string));
|
||||||
CHECK_EXCEPTION(jni, "Error during GetStringLength");
|
CHECK_JNI_EXCEPTION(jni, "Error during GetStringLength");
|
||||||
jni->ReleaseStringChars(j_string, jchars);
|
jni->ReleaseStringChars(j_string, jchars);
|
||||||
CHECK_EXCEPTION(jni, "Error during ReleaseStringChars");
|
CHECK_JNI_EXCEPTION(jni, "Error during ReleaseStringChars");
|
||||||
std::string ret;
|
std::string ret;
|
||||||
return ustr.toUTF8String(ret);
|
return ustr.toUTF8String(ret);
|
||||||
}
|
}
|
||||||
@ -72,10 +72,10 @@ jclass ClassReferenceHolder::GetClass(const std::string& name) {
|
|||||||
|
|
||||||
void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
|
void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
|
||||||
jclass localRef = jni->FindClass(name.c_str());
|
jclass localRef = jni->FindClass(name.c_str());
|
||||||
CHECK_EXCEPTION(jni, "Could not load class");
|
CHECK_JNI_EXCEPTION(jni, "Could not load class");
|
||||||
CHECK(localRef, name.c_str());
|
CHECK(localRef, name.c_str());
|
||||||
jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
|
jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
|
||||||
CHECK_EXCEPTION(jni, "error during NewGlobalRef");
|
CHECK_JNI_EXCEPTION(jni, "error during NewGlobalRef");
|
||||||
CHECK(globalRef, name.c_str());
|
CHECK(globalRef, name.c_str());
|
||||||
bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
|
bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
|
||||||
CHECK(inserted, "Duplicate class name");
|
CHECK(inserted, "Duplicate class name");
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
|
|
||||||
// Abort the process if |jni| has a Java exception pending, emitting |msg| to
|
// Abort the process if |jni| has a Java exception pending, emitting |msg| to
|
||||||
// logcat.
|
// logcat.
|
||||||
#define CHECK_EXCEPTION(jni, msg) \
|
#define CHECK_JNI_EXCEPTION(jni, msg) \
|
||||||
if (0) { \
|
if (0) { \
|
||||||
} else { \
|
} else { \
|
||||||
if (jni->ExceptionCheck()) { \
|
if (jni->ExceptionCheck()) { \
|
||||||
|
@ -115,7 +115,7 @@ class VideoDecodeEncodeObserver : public webrtc::ViEDecoderObserver,
|
|||||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||||
jobject j_codec =
|
jobject j_codec =
|
||||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||||
jni->CallVoidMethod(j_observer_, incoming_codec_changed_, video_channel,
|
jni->CallVoidMethod(j_observer_, incoming_codec_changed_, video_channel,
|
||||||
j_codec);
|
j_codec);
|
||||||
}
|
}
|
||||||
@ -456,7 +456,7 @@ JOWW(jobject, VideoEngine_getCodec)(JNIEnv* jni, jobject j_vie, jint index) {
|
|||||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||||
jobject j_codec =
|
jobject j_codec =
|
||||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||||
return j_codec;
|
return j_codec;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,7 +515,7 @@ JOWW(jobject,
|
|||||||
jmethodID j_camera_ctor = GetMethodID(jni, j_camera_class, "<init>", "(J)V");
|
jmethodID j_camera_ctor = GetMethodID(jni, j_camera_class, "<init>", "(J)V");
|
||||||
jobject j_camera = jni->NewObject(j_camera_class, j_camera_ctor,
|
jobject j_camera = jni->NewObject(j_camera_class, j_camera_ctor,
|
||||||
jlongFromPointer(camera_info));
|
jlongFromPointer(camera_info));
|
||||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||||
return j_camera;
|
return j_camera;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -610,7 +610,7 @@ JOWW(jobject, VideoEngine_getReceivedRtcpStatistics)(JNIEnv* jni, jobject j_vie,
|
|||||||
jni->NewObject(j_rtcp_statistics_class, j_rtcp_statistics_ctor,
|
jni->NewObject(j_rtcp_statistics_class, j_rtcp_statistics_ctor,
|
||||||
fraction_lost, cumulative_lost, extended_max, jitter,
|
fraction_lost, cumulative_lost, extended_max, jitter,
|
||||||
static_cast<int>(rtt_ms));
|
static_cast<int>(rtt_ms));
|
||||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||||
return j_rtcp_statistics;
|
return j_rtcp_statistics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ JOWW(jobject, VoiceEngine_getCodec)(JNIEnv* jni, jobject j_voe, jint index) {
|
|||||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||||
jobject j_codec =
|
jobject j_codec =
|
||||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||||
return j_codec;
|
return j_codec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,6 +178,7 @@ source_set("audio_device") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
deps = [
|
deps = [
|
||||||
|
"../../base:rtc_base_approved",
|
||||||
"../../common_audio",
|
"../../common_audio",
|
||||||
"../../system_wrappers",
|
"../../system_wrappers",
|
||||||
"../utility",
|
"../utility",
|
||||||
|
@ -15,6 +15,7 @@ namespace webrtc {
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
kDefaultSampleRate = 44100,
|
kDefaultSampleRate = 44100,
|
||||||
|
kBitsPerSample = 16,
|
||||||
kNumChannels = 1,
|
kNumChannels = 1,
|
||||||
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
|
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
|
||||||
};
|
};
|
||||||
|
@ -11,8 +11,8 @@
|
|||||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||||
|
|
||||||
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||||
|
|
||||||
#include "webrtc/system_wrappers/interface/trace.h"
|
#include "webrtc/system_wrappers/interface/trace.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -22,13 +22,11 @@ namespace webrtc {
|
|||||||
template <class InputType, class OutputType>
|
template <class InputType, class OutputType>
|
||||||
class AudioDeviceTemplate : public AudioDeviceGeneric {
|
class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||||
public:
|
public:
|
||||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
static void SetAndroidAudioDeviceObjects(void* javaVM,
|
||||||
void* env,
|
void* env,
|
||||||
void* context) {
|
void* context) {
|
||||||
if (OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context) == -1) {
|
OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||||
return -1;
|
InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||||
}
|
|
||||||
return InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ClearAndroidAudioDeviceObjects() {
|
static void ClearAndroidAudioDeviceObjects() {
|
||||||
@ -38,7 +36,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
explicit AudioDeviceTemplate(const int32_t id)
|
explicit AudioDeviceTemplate(const int32_t id)
|
||||||
: output_(id),
|
: output_(id),
|
||||||
input_(id, &output_) {
|
// TODO(henrika): provide proper delay estimate using input_(&output_).
|
||||||
|
input_() {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~AudioDeviceTemplate() {
|
virtual ~AudioDeviceTemplate() {
|
||||||
@ -59,7 +58,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool Initialized() const {
|
bool Initialized() const {
|
||||||
return output_.Initialized() && input_.Initialized();
|
return output_.Initialized();
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t PlayoutDevices() {
|
int16_t PlayoutDevices() {
|
||||||
@ -67,7 +66,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int16_t RecordingDevices() {
|
int16_t RecordingDevices() {
|
||||||
return input_.RecordingDevices();
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t PlayoutDeviceName(
|
int32_t PlayoutDeviceName(
|
||||||
@ -81,7 +80,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
uint16_t index,
|
uint16_t index,
|
||||||
char name[kAdmMaxDeviceNameSize],
|
char name[kAdmMaxDeviceNameSize],
|
||||||
char guid[kAdmMaxGuidSize]) {
|
char guid[kAdmMaxGuidSize]) {
|
||||||
return input_.RecordingDeviceName(index, name, guid);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetPlayoutDevice(uint16_t index) {
|
int32_t SetPlayoutDevice(uint16_t index) {
|
||||||
@ -94,12 +93,15 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetRecordingDevice(uint16_t index) {
|
int32_t SetRecordingDevice(uint16_t index) {
|
||||||
return input_.SetRecordingDevice(index);
|
// OK to use but it has no effect currently since device selection is
|
||||||
|
// done using Andoid APIs instead.
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetRecordingDevice(
|
int32_t SetRecordingDevice(
|
||||||
AudioDeviceModule::WindowsDeviceType device) {
|
AudioDeviceModule::WindowsDeviceType device) {
|
||||||
return input_.SetRecordingDevice(device);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t PlayoutIsAvailable(
|
int32_t PlayoutIsAvailable(
|
||||||
@ -117,7 +119,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t RecordingIsAvailable(
|
int32_t RecordingIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return input_.RecordingIsAvailable(available);
|
available = true;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t InitRecording() {
|
int32_t InitRecording() {
|
||||||
@ -153,17 +156,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetAGC(bool enable) {
|
int32_t SetAGC(bool enable) {
|
||||||
return input_.SetAGC(enable);
|
if (enable) {
|
||||||
|
FATAL() << "Should never be called";
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AGC() const {
|
bool AGC() const {
|
||||||
return input_.AGC();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetWaveOutVolume(uint16_t volumeLeft,
|
int32_t SetWaveOutVolume(uint16_t volumeLeft,
|
||||||
uint16_t volumeRight) {
|
uint16_t volumeRight) {
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
|
FATAL() << "Should never be called";
|
||||||
" API call not supported on this platform");
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,11 +189,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int32_t InitMicrophone() {
|
int32_t InitMicrophone() {
|
||||||
return input_.InitMicrophone();
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MicrophoneIsInitialized() const {
|
bool MicrophoneIsInitialized() const {
|
||||||
return input_.MicrophoneIsInitialized();
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SpeakerVolumeIsAvailable(
|
int32_t SpeakerVolumeIsAvailable(
|
||||||
@ -222,31 +227,38 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t MicrophoneVolumeIsAvailable(
|
int32_t MicrophoneVolumeIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return input_.MicrophoneVolumeIsAvailable(available);
|
available = false;
|
||||||
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetMicrophoneVolume(uint32_t volume) {
|
int32_t SetMicrophoneVolume(uint32_t volume) {
|
||||||
return input_.SetMicrophoneVolume(volume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneVolume(
|
int32_t MicrophoneVolume(
|
||||||
uint32_t& volume) const { // NOLINT
|
uint32_t& volume) const { // NOLINT
|
||||||
return input_.MicrophoneVolume(volume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MaxMicrophoneVolume(
|
int32_t MaxMicrophoneVolume(
|
||||||
uint32_t& maxVolume) const { // NOLINT
|
uint32_t& maxVolume) const { // NOLINT
|
||||||
return input_.MaxMicrophoneVolume(maxVolume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MinMicrophoneVolume(
|
int32_t MinMicrophoneVolume(
|
||||||
uint32_t& minVolume) const { // NOLINT
|
uint32_t& minVolume) const { // NOLINT
|
||||||
return input_.MinMicrophoneVolume(minVolume);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneVolumeStepSize(
|
int32_t MicrophoneVolumeStepSize(
|
||||||
uint16_t& stepSize) const { // NOLINT
|
uint16_t& stepSize) const { // NOLINT
|
||||||
return input_.MicrophoneVolumeStepSize(stepSize);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SpeakerMuteIsAvailable(
|
int32_t SpeakerMuteIsAvailable(
|
||||||
@ -265,30 +277,36 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t MicrophoneMuteIsAvailable(
|
int32_t MicrophoneMuteIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return input_.MicrophoneMuteIsAvailable(available);
|
FATAL() << "Not implemented";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetMicrophoneMute(bool enable) {
|
int32_t SetMicrophoneMute(bool enable) {
|
||||||
return input_.SetMicrophoneMute(enable);
|
FATAL() << "Not implemented";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneMute(
|
int32_t MicrophoneMute(
|
||||||
bool& enabled) const { // NOLINT
|
bool& enabled) const { // NOLINT
|
||||||
return input_.MicrophoneMute(enabled);
|
FATAL() << "Not implemented";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneBoostIsAvailable(
|
int32_t MicrophoneBoostIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return input_.MicrophoneBoostIsAvailable(available);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetMicrophoneBoost(bool enable) {
|
int32_t SetMicrophoneBoost(bool enable) {
|
||||||
return input_.SetMicrophoneBoost(enable);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t MicrophoneBoost(
|
int32_t MicrophoneBoost(
|
||||||
bool& enabled) const { // NOLINT
|
bool& enabled) const { // NOLINT
|
||||||
return input_.MicrophoneBoost(enabled);
|
FATAL() << "Should never be called";
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t StereoPlayoutIsAvailable(
|
int32_t StereoPlayoutIsAvailable(
|
||||||
@ -307,16 +325,18 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t StereoRecordingIsAvailable(
|
int32_t StereoRecordingIsAvailable(
|
||||||
bool& available) { // NOLINT
|
bool& available) { // NOLINT
|
||||||
return input_.StereoRecordingIsAvailable(available);
|
available = false;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetStereoRecording(bool enable) {
|
int32_t SetStereoRecording(bool enable) {
|
||||||
return input_.SetStereoRecording(enable);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t StereoRecording(
|
int32_t StereoRecording(
|
||||||
bool& enabled) const { // NOLINT
|
bool& enabled) const { // NOLINT
|
||||||
return input_.StereoRecording(enabled);
|
enabled = false;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetPlayoutBuffer(
|
int32_t SetPlayoutBuffer(
|
||||||
@ -343,8 +363,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
|
|
||||||
int32_t CPULoad(
|
int32_t CPULoad(
|
||||||
uint16_t& load) const { // NOLINT
|
uint16_t& load) const { // NOLINT
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
|
FATAL() << "Should never be called";
|
||||||
" API call not supported on this platform");
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,11 +376,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingWarning() const {
|
bool RecordingWarning() const {
|
||||||
return input_.RecordingWarning();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingError() const {
|
bool RecordingError() const {
|
||||||
return input_.RecordingError();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearPlayoutWarning() {
|
void ClearPlayoutWarning() {
|
||||||
@ -372,13 +391,9 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
return output_.ClearPlayoutError();
|
return output_.ClearPlayoutError();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearRecordingWarning() {
|
void ClearRecordingWarning() {}
|
||||||
return input_.ClearRecordingWarning();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClearRecordingError() {
|
void ClearRecordingError() {}
|
||||||
return input_.ClearRecordingError();
|
|
||||||
}
|
|
||||||
|
|
||||||
void AttachAudioBuffer(
|
void AttachAudioBuffer(
|
||||||
AudioDeviceBuffer* audioBuffer) {
|
AudioDeviceBuffer* audioBuffer) {
|
||||||
@ -386,11 +401,6 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
|||||||
input_.AttachAudioBuffer(audioBuffer);
|
input_.AttachAudioBuffer(audioBuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t SetRecordingSampleRate(
|
|
||||||
const uint32_t samplesPerSec) {
|
|
||||||
return input_.SetRecordingSampleRate(samplesPerSec);
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t SetPlayoutSampleRate(
|
int32_t SetPlayoutSampleRate(
|
||||||
const uint32_t samplesPerSec) {
|
const uint32_t samplesPerSec) {
|
||||||
return output_.SetPlayoutSampleRate(samplesPerSec);
|
return output_.SetPlayoutSampleRate(samplesPerSec);
|
||||||
|
@ -8,42 +8,16 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* Android audio device utility implementation
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "webrtc/modules/audio_device/android/audio_device_utility_android.h"
|
#include "webrtc/modules/audio_device/android/audio_device_utility_android.h"
|
||||||
|
|
||||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
namespace webrtc {
|
||||||
#include "webrtc/system_wrappers/interface/trace.h"
|
|
||||||
|
|
||||||
namespace webrtc
|
AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) {}
|
||||||
{
|
|
||||||
|
|
||||||
AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) :
|
AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid() {}
|
||||||
_critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
|
|
||||||
"%s created", __FUNCTION__);
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid()
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
|
|
||||||
"%s destroyed", __FUNCTION__);
|
|
||||||
{
|
|
||||||
CriticalSectionScoped lock(&_critSect);
|
|
||||||
}
|
|
||||||
|
|
||||||
delete &_critSect;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t AudioDeviceUtilityAndroid::Init()
|
|
||||||
{
|
|
||||||
|
|
||||||
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
|
|
||||||
" OS info: %s", "Android");
|
|
||||||
|
|
||||||
|
int32_t AudioDeviceUtilityAndroid::Init() {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,24 +15,22 @@
|
|||||||
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
||||||
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
||||||
|
|
||||||
|
#include <jni.h>
|
||||||
|
|
||||||
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/modules/audio_device/audio_device_utility.h"
|
#include "webrtc/modules/audio_device/audio_device_utility.h"
|
||||||
#include "webrtc/modules/audio_device/include/audio_device.h"
|
#include "webrtc/modules/audio_device/include/audio_device.h"
|
||||||
|
|
||||||
namespace webrtc
|
namespace webrtc {
|
||||||
{
|
|
||||||
class CriticalSectionWrapper;
|
|
||||||
|
|
||||||
class AudioDeviceUtilityAndroid: public AudioDeviceUtility
|
// TODO(henrika): this utility class is not used but I would like to keep this
|
||||||
{
|
// file for the other helper methods which are unique for Android.
|
||||||
|
class AudioDeviceUtilityAndroid: public AudioDeviceUtility {
|
||||||
public:
|
public:
|
||||||
AudioDeviceUtilityAndroid(const int32_t id);
|
AudioDeviceUtilityAndroid(const int32_t id);
|
||||||
~AudioDeviceUtilityAndroid();
|
~AudioDeviceUtilityAndroid();
|
||||||
|
|
||||||
virtual int32_t Init();
|
virtual int32_t Init();
|
||||||
|
|
||||||
private:
|
|
||||||
CriticalSectionWrapper& _critSect;
|
|
||||||
int32_t _id;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -13,166 +13,143 @@
|
|||||||
|
|
||||||
#include <jni.h>
|
#include <jni.h>
|
||||||
|
|
||||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
#include "webrtc/base/thread_checker.h"
|
||||||
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
||||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||||
|
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class EventWrapper;
|
|
||||||
class ThreadWrapper;
|
|
||||||
class PlayoutDelayProvider;
|
class PlayoutDelayProvider;
|
||||||
|
|
||||||
const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
|
// Implements 16-bit mono PCM audio input support for Android using the Java
|
||||||
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
|
// AudioRecord interface. Most of the work is done by its Java counterpart in
|
||||||
const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
|
// WebRtcAudioRecord.java. This class is created and lives on a thread in
|
||||||
|
// C++-land, but recorded audio buffers are delivered on a high-priority
|
||||||
|
// thread managed by the Java class.
|
||||||
|
//
|
||||||
|
// The Java class makes use of AudioEffect features (mainly AEC) which are
|
||||||
|
// first available in Jelly Bean. If it is instantiated running against earlier
|
||||||
|
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||||
|
// separately instead.
|
||||||
|
//
|
||||||
|
// An instance must be created and destroyed on one and the same thread.
|
||||||
|
// All public methods must also be called on the same thread. A thread checker
|
||||||
|
// will DCHECK if any method is called on an invalid thread.
|
||||||
|
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||||
|
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||||
|
// CHECK that the calling thread is attached to a Java VM.
|
||||||
|
//
|
||||||
|
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
|
||||||
|
// detach when method goes out of scope. We do so beacuse this class does not
|
||||||
|
// own the thread is is created and called on and other objects on the same
|
||||||
|
// thread might put us in a detached state at any time.
|
||||||
class AudioRecordJni {
|
class AudioRecordJni {
|
||||||
public:
|
public:
|
||||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
|
// Use the invocation API to allow the native application to use the JNI
|
||||||
void* context);
|
// interface pointer to access VM features.
|
||||||
|
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
|
||||||
|
// and |context| corresponds to android.content.Context in Java.
|
||||||
|
// This method also sets a global jclass object, |g_audio_record_class| for
|
||||||
|
// the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
|
||||||
|
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
|
||||||
|
// Always call this method after the object has been destructed. It deletes
|
||||||
|
// existing global references and enables garbage collection.
|
||||||
static void ClearAndroidAudioDeviceObjects();
|
static void ClearAndroidAudioDeviceObjects();
|
||||||
|
|
||||||
AudioRecordJni(const int32_t id, PlayoutDelayProvider* delay_provider);
|
AudioRecordJni();
|
||||||
~AudioRecordJni();
|
~AudioRecordJni();
|
||||||
|
|
||||||
// Main initializaton and termination
|
|
||||||
int32_t Init();
|
int32_t Init();
|
||||||
int32_t Terminate();
|
int32_t Terminate();
|
||||||
bool Initialized() const { return _initialized; }
|
|
||||||
|
|
||||||
// Device enumeration
|
|
||||||
int16_t RecordingDevices() { return 1; } // There is one device only
|
|
||||||
int32_t RecordingDeviceName(uint16_t index,
|
|
||||||
char name[kAdmMaxDeviceNameSize],
|
|
||||||
char guid[kAdmMaxGuidSize]);
|
|
||||||
|
|
||||||
// Device selection
|
|
||||||
int32_t SetRecordingDevice(uint16_t index);
|
|
||||||
int32_t SetRecordingDevice(
|
|
||||||
AudioDeviceModule::WindowsDeviceType device);
|
|
||||||
|
|
||||||
// Audio transport initialization
|
|
||||||
int32_t RecordingIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t InitRecording();
|
int32_t InitRecording();
|
||||||
bool RecordingIsInitialized() const { return _recIsInitialized; }
|
bool RecordingIsInitialized() const { return initialized_; }
|
||||||
|
|
||||||
// Audio transport control
|
|
||||||
int32_t StartRecording();
|
int32_t StartRecording();
|
||||||
int32_t StopRecording ();
|
int32_t StopRecording ();
|
||||||
bool Recording() const { return _recording; }
|
bool Recording() const { return recording_; }
|
||||||
|
|
||||||
// Microphone Automatic Gain Control (AGC)
|
int32_t RecordingDelay(uint16_t& delayMS) const;
|
||||||
int32_t SetAGC(bool enable);
|
|
||||||
bool AGC() const { return _AGC; }
|
|
||||||
|
|
||||||
// Audio mixer initialization
|
|
||||||
int32_t InitMicrophone();
|
|
||||||
bool MicrophoneIsInitialized() const { return _micIsInitialized; }
|
|
||||||
|
|
||||||
// Microphone volume controls
|
|
||||||
int32_t MicrophoneVolumeIsAvailable(bool& available); // NOLINT
|
|
||||||
// TODO(leozwang): Add microphone volume control when OpenSL APIs
|
|
||||||
// are available.
|
|
||||||
int32_t SetMicrophoneVolume(uint32_t volume);
|
|
||||||
int32_t MicrophoneVolume(uint32_t& volume) const; // NOLINT
|
|
||||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; // NOLINT
|
|
||||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const; // NOLINT
|
|
||||||
int32_t MicrophoneVolumeStepSize(
|
|
||||||
uint16_t& stepSize) const; // NOLINT
|
|
||||||
|
|
||||||
// Microphone mute control
|
|
||||||
int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetMicrophoneMute(bool enable);
|
|
||||||
int32_t MicrophoneMute(bool& enabled) const; // NOLINT
|
|
||||||
|
|
||||||
// Microphone boost control
|
|
||||||
int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetMicrophoneBoost(bool enable);
|
|
||||||
int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
|
|
||||||
|
|
||||||
// Stereo support
|
|
||||||
int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
|
|
||||||
int32_t SetStereoRecording(bool enable);
|
|
||||||
int32_t StereoRecording(bool& enabled) const; // NOLINT
|
|
||||||
|
|
||||||
// Delay information and control
|
|
||||||
int32_t RecordingDelay(uint16_t& delayMS) const; // NOLINT
|
|
||||||
|
|
||||||
bool RecordingWarning() const;
|
|
||||||
bool RecordingError() const;
|
|
||||||
void ClearRecordingWarning();
|
|
||||||
void ClearRecordingError();
|
|
||||||
|
|
||||||
// Attach audio buffer
|
|
||||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||||
|
|
||||||
int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
|
|
||||||
|
|
||||||
bool BuiltInAECIsAvailable() const;
|
bool BuiltInAECIsAvailable() const;
|
||||||
int32_t EnableBuiltInAEC(bool enable);
|
int32_t EnableBuiltInAEC(bool enable);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
|
// Called from Java side so we can cache the address of the Java-manged
|
||||||
_critSect.Enter();
|
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||||
}
|
// is also stored in |direct_buffer_capacity_in_bytes_|.
|
||||||
void UnLock() UNLOCK_FUNCTION(_critSect) {
|
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
|
||||||
_critSect.Leave();
|
// on the same thread that this object is created on.
|
||||||
}
|
static void JNICALL CacheDirectBufferAddress(
|
||||||
|
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord);
|
||||||
|
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||||
|
|
||||||
int32_t InitJavaResources();
|
// Called periodically by the Java based WebRtcAudioRecord object when
|
||||||
int32_t InitSampleRate();
|
// recording has started. Each call indicates that there are |length| new
|
||||||
|
// bytes recorded in the memory area |direct_buffer_address_| and it is
|
||||||
|
// now time to send these to the consumer.
|
||||||
|
// This method is called on a high-priority thread from Java. The name of
|
||||||
|
// the thread is 'AudioRecordThread'.
|
||||||
|
static void JNICALL DataIsRecorded(
|
||||||
|
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord);
|
||||||
|
void OnDataIsRecorded(int length);
|
||||||
|
|
||||||
static bool RecThreadFunc(void*);
|
// Returns true if SetAndroidAudioDeviceObjects() has been called
|
||||||
bool RecThreadProcess();
|
// successfully.
|
||||||
|
bool HasDeviceObjects();
|
||||||
|
|
||||||
// TODO(leozwang): Android holds only one JVM, all these jni handling
|
// Called from the constructor. Defines the |j_audio_record_| member.
|
||||||
// will be consolidated into a single place to make it consistant and
|
void CreateJavaInstance();
|
||||||
// reliable. Chromium has a good example at base/android.
|
|
||||||
static JavaVM* globalJvm;
|
|
||||||
static JNIEnv* globalJNIEnv;
|
|
||||||
static jobject globalContext;
|
|
||||||
static jclass globalScClass;
|
|
||||||
|
|
||||||
JavaVM* _javaVM; // denotes a Java VM
|
// Returns the native, or optimal, sample rate reported by the audio input
|
||||||
JNIEnv* _jniEnvRec; // The JNI env for recording thread
|
// device.
|
||||||
jclass _javaScClass; // AudioDeviceAndroid class
|
int GetNativeSampleRate();
|
||||||
jobject _javaScObj; // AudioDeviceAndroid object
|
|
||||||
jobject _javaRecBuffer;
|
|
||||||
void* _javaDirectRecBuffer; // Direct buffer pointer to rec buffer
|
|
||||||
jmethodID _javaMidRecAudio; // Method ID of rec in AudioDeviceAndroid
|
|
||||||
|
|
||||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
// Stores thread ID in constructor.
|
||||||
CriticalSectionWrapper& _critSect;
|
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
|
||||||
int32_t _id;
|
// other methods are called from the same thread.
|
||||||
PlayoutDelayProvider* _delay_provider;
|
// Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
|
||||||
bool _initialized;
|
rtc::ThreadChecker thread_checker_;
|
||||||
|
|
||||||
EventWrapper& _timeEventRec;
|
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
|
||||||
EventWrapper& _recStartStopEvent;
|
// thread in Java. Detached during construction of this object.
|
||||||
ThreadWrapper* _ptrThreadRec;
|
rtc::ThreadChecker thread_checker_java_;
|
||||||
uint32_t _recThreadID;
|
|
||||||
bool _recThreadIsInitialized;
|
|
||||||
bool _shutdownRecThread;
|
|
||||||
|
|
||||||
int8_t _recBuffer[2 * REC_BUF_SIZE_IN_SAMPLES];
|
|
||||||
bool _recordingDeviceIsSpecified;
|
|
||||||
|
|
||||||
bool _recording;
|
// Should return the current playout delay.
|
||||||
bool _recIsInitialized;
|
// TODO(henrika): fix on Android. Reports zero today.
|
||||||
bool _micIsInitialized;
|
// PlayoutDelayProvider* delay_provider_;
|
||||||
|
|
||||||
bool _startRec;
|
// The Java WebRtcAudioRecord instance.
|
||||||
|
jobject j_audio_record_;
|
||||||
|
|
||||||
uint16_t _recWarning;
|
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
|
||||||
uint16_t _recError;
|
void* direct_buffer_address_;
|
||||||
|
|
||||||
uint16_t _delayRecording;
|
// Number of bytes in the direct audio buffer owned by |j_audio_record_|.
|
||||||
|
int direct_buffer_capacity_in_bytes_;
|
||||||
|
|
||||||
bool _AGC;
|
// Number audio frames per audio buffer. Each audio frame corresponds to
|
||||||
|
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||||
|
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||||
|
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||||
|
int frames_per_buffer_;
|
||||||
|
|
||||||
uint16_t _samplingFreqIn; // Sampling frequency for Mic
|
bool initialized_;
|
||||||
int _recAudioSource;
|
|
||||||
|
bool recording_;
|
||||||
|
|
||||||
|
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||||
|
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
|
||||||
|
AudioDeviceBuffer* audio_device_buffer_;
|
||||||
|
|
||||||
|
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
|
||||||
|
// Java layer for the best possible sample rate for this particular device
|
||||||
|
// and audio configuration.
|
||||||
|
int sample_rate_hz_;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -107,6 +107,7 @@ class AudioTrackJni : public PlayoutDelayProvider {
|
|||||||
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
// TODO(henrika): improve this estimate.
|
||||||
virtual int PlayoutDelayMs() { return 0; }
|
virtual int PlayoutDelayMs() { return 0; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -10,8 +10,13 @@
|
|||||||
|
|
||||||
package org.webrtc.voiceengine;
|
package org.webrtc.voiceengine;
|
||||||
|
|
||||||
|
import java.lang.System;
|
||||||
|
import java.lang.Thread;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import android.content.Context;
|
import android.content.Context;
|
||||||
import android.media.AudioFormat;
|
import android.media.AudioFormat;
|
||||||
@ -22,39 +27,161 @@ import android.media.AudioManager;
|
|||||||
import android.media.AudioRecord;
|
import android.media.AudioRecord;
|
||||||
import android.media.MediaRecorder.AudioSource;
|
import android.media.MediaRecorder.AudioSource;
|
||||||
import android.os.Build;
|
import android.os.Build;
|
||||||
|
import android.os.Process;
|
||||||
|
import android.os.SystemClock;
|
||||||
import android.util.Log;
|
import android.util.Log;
|
||||||
|
|
||||||
class WebRtcAudioRecord {
|
class WebRtcAudioRecord {
|
||||||
private AudioRecord _audioRecord = null;
|
private static final boolean DEBUG = false;
|
||||||
|
|
||||||
private Context _context;
|
private static final String TAG = "WebRtcAudioRecord";
|
||||||
|
|
||||||
private ByteBuffer _recBuffer;
|
// Use 44.1kHz as the default sampling rate.
|
||||||
private byte[] _tempBufRec;
|
private static final int SAMPLE_RATE_HZ = 44100;
|
||||||
|
|
||||||
private final ReentrantLock _recLock = new ReentrantLock();
|
// Mono recording is default.
|
||||||
|
private static final int CHANNELS = 1;
|
||||||
|
|
||||||
private boolean _doRecInit = true;
|
// Default audio data format is PCM 16 bit per sample.
|
||||||
private boolean _isRecording = false;
|
// Guaranteed to be supported by all devices.
|
||||||
|
private static final int BITS_PER_SAMPLE = 16;
|
||||||
|
|
||||||
private int _bufferedRecSamples = 0;
|
// Number of bytes per audio frame.
|
||||||
|
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
|
||||||
|
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
|
||||||
|
|
||||||
private AcousticEchoCanceler _aec = null;
|
// Requested size of each recorded buffer provided to the client.
|
||||||
private boolean _useBuiltInAEC = false;
|
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
|
||||||
|
|
||||||
|
// Average number of callbacks per second.
|
||||||
|
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
|
||||||
|
|
||||||
|
private ByteBuffer byteBuffer;
|
||||||
|
private final int bytesPerBuffer;
|
||||||
|
private final int framesPerBuffer;
|
||||||
|
private final int sampleRate;
|
||||||
|
|
||||||
|
private final long nativeAudioRecord;
|
||||||
|
private final AudioManager audioManager;
|
||||||
|
private final Context context;
|
||||||
|
|
||||||
|
private AudioRecord audioRecord = null;
|
||||||
|
private AudioRecordThread audioThread = null;
|
||||||
|
|
||||||
|
private AcousticEchoCanceler aec = null;
|
||||||
|
private boolean useBuiltInAEC = false;
|
||||||
|
|
||||||
|
private final Set<Long> threadIds = new HashSet<Long>();
|
||||||
|
|
||||||
private static boolean runningOnJellyBeanOrHigher() {
|
private static boolean runningOnJellyBeanOrHigher() {
|
||||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtcAudioRecord() {
|
private static boolean runningOnJellyBeanMR1OrHigher() {
|
||||||
try {
|
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
|
||||||
_recBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
|
|
||||||
// kHz
|
|
||||||
} catch (Exception e) {
|
|
||||||
DoLog(e.getMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_tempBufRec = new byte[2 * 480];
|
/**
|
||||||
|
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
||||||
|
* to be recorded. Feeds recorded data to the native counterpart as a
|
||||||
|
* periodic sequence of callbacks using DataIsRecorded().
|
||||||
|
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
|
||||||
|
*/
|
||||||
|
private class AudioRecordThread extends Thread {
|
||||||
|
private volatile boolean keepAlive = true;
|
||||||
|
|
||||||
|
public AudioRecordThread(String name) {
|
||||||
|
super(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
|
||||||
|
DoLog("AudioRecordThread" + getThreadInfo());
|
||||||
|
AddThreadId();
|
||||||
|
|
||||||
|
try {
|
||||||
|
audioRecord.startRecording();
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
DoLogErr("AudioRecord.startRecording failed: " + e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
assertIsTrue(audioRecord.getRecordingState()
|
||||||
|
== AudioRecord.RECORDSTATE_RECORDING);
|
||||||
|
|
||||||
|
long lastTime = System.nanoTime();
|
||||||
|
while (keepAlive) {
|
||||||
|
int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
|
||||||
|
if (bytesRead == byteBuffer.capacity()) {
|
||||||
|
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
|
||||||
|
} else {
|
||||||
|
DoLogErr("AudioRecord.read failed: " + bytesRead);
|
||||||
|
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
|
||||||
|
keepAlive = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (DEBUG) {
|
||||||
|
long nowTime = System.nanoTime();
|
||||||
|
long durationInMs =
|
||||||
|
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
||||||
|
lastTime = nowTime;
|
||||||
|
DoLog("bytesRead[" + durationInMs + "] " + bytesRead);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
audioRecord.stop();
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
DoLogErr("AudioRecord.stop failed: " + e.getMessage());
|
||||||
|
}
|
||||||
|
RemoveThreadId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void joinThread() {
|
||||||
|
keepAlive = false;
|
||||||
|
while (isAlive()) {
|
||||||
|
try {
|
||||||
|
join();
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
// Ignore.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
|
||||||
|
DoLog("ctor" + getThreadInfo());
|
||||||
|
this.context = context;
|
||||||
|
this.nativeAudioRecord = nativeAudioRecord;
|
||||||
|
audioManager = ((AudioManager) context.getSystemService(
|
||||||
|
Context.AUDIO_SERVICE));
|
||||||
|
sampleRate = GetNativeSampleRate();
|
||||||
|
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
|
||||||
|
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
|
||||||
|
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
|
||||||
|
DoLog("byteBuffer.capacity: " + byteBuffer.capacity());
|
||||||
|
|
||||||
|
// Rather than passing the ByteBuffer with every callback (requiring
|
||||||
|
// the potentially expensive GetDirectBufferAddress) we simply have the
|
||||||
|
// the native class cache the address to the memory once.
|
||||||
|
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
|
||||||
|
AddThreadId();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the native or optimal input sample rate for this device's
|
||||||
|
* primary input stream. Unit is in Hz.
|
||||||
|
* Note that we actually query the output device but the same result is
|
||||||
|
* also valid for input.
|
||||||
|
*/
|
||||||
|
private int GetNativeSampleRate() {
|
||||||
|
if (!runningOnJellyBeanMR1OrHigher()) {
|
||||||
|
return SAMPLE_RATE_HZ;
|
||||||
|
}
|
||||||
|
String sampleRateString = audioManager.getProperty(
|
||||||
|
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||||
|
return (sampleRateString == null) ?
|
||||||
|
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean BuiltInAECIsAvailable() {
|
public static boolean BuiltInAECIsAvailable() {
|
||||||
@ -68,212 +195,162 @@ class WebRtcAudioRecord {
|
|||||||
return AcousticEchoCanceler.isAvailable();
|
return AcousticEchoCanceler.isAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
private int EnableBuiltInAEC(boolean enable) {
|
private boolean EnableBuiltInAEC(boolean enable) {
|
||||||
DoLog("EnableBuiltInAEC(" + enable + ')');
|
DoLog("EnableBuiltInAEC(" + enable + ')');
|
||||||
|
AddThreadId();
|
||||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||||
if (!runningOnJellyBeanOrHigher()) {
|
if (!runningOnJellyBeanOrHigher()) {
|
||||||
return -1;
|
return false;
|
||||||
}
|
}
|
||||||
|
// Store the AEC state.
|
||||||
_useBuiltInAEC = enable;
|
useBuiltInAEC = enable;
|
||||||
|
|
||||||
// Set AEC state if AEC has already been created.
|
// Set AEC state if AEC has already been created.
|
||||||
if (_aec != null) {
|
if (aec != null) {
|
||||||
int ret = _aec.setEnabled(enable);
|
int ret = aec.setEnabled(enable);
|
||||||
if (ret != AudioEffect.SUCCESS) {
|
if (ret != AudioEffect.SUCCESS) {
|
||||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||||
return -1;
|
return false;
|
||||||
}
|
}
|
||||||
DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
|
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
private int InitRecording(int sampleRate) {
|
||||||
}
|
DoLog("InitRecording(sampleRate=" + sampleRate + ")");
|
||||||
|
AddThreadId();
|
||||||
@SuppressWarnings("unused")
|
// Get the minimum buffer size required for the successful creation of
|
||||||
private int InitRecording(int audioSource, int sampleRate) {
|
// an AudioRecord object, in byte units.
|
||||||
DoLog("InitRecording");
|
// Note that this size doesn't guarantee a smooth recording under load.
|
||||||
audioSource = AudioSource.VOICE_COMMUNICATION;
|
// TODO(henrika): Do we need to make this larger to avoid underruns?
|
||||||
// get the minimum buffer size that can be used
|
int minBufferSize = AudioRecord.getMinBufferSize(
|
||||||
int minRecBufSize = AudioRecord.getMinBufferSize(
|
|
||||||
sampleRate,
|
sampleRate,
|
||||||
AudioFormat.CHANNEL_IN_MONO,
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
AudioFormat.ENCODING_PCM_16BIT);
|
AudioFormat.ENCODING_PCM_16BIT);
|
||||||
|
DoLog("AudioRecord.getMinBufferSize: " + minBufferSize);
|
||||||
|
|
||||||
// DoLog("min rec buf size is " + minRecBufSize);
|
if (aec != null) {
|
||||||
|
aec.release();
|
||||||
// double size to be more safe
|
aec = null;
|
||||||
int recBufSize = minRecBufSize * 2;
|
}
|
||||||
// On average half of the samples have been recorded/buffered and the
|
if (audioRecord != null) {
|
||||||
// recording interval is 1/100s.
|
audioRecord.release();
|
||||||
_bufferedRecSamples = sampleRate / 200;
|
audioRecord = null;
|
||||||
// DoLog("rough rec delay set to " + _bufferedRecSamples);
|
|
||||||
|
|
||||||
if (_aec != null) {
|
|
||||||
_aec.release();
|
|
||||||
_aec = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// release the object
|
|
||||||
if (_audioRecord != null) {
|
|
||||||
_audioRecord.release();
|
|
||||||
_audioRecord = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
|
||||||
|
DoLog("bufferSizeInBytes: " + bufferSizeInBytes);
|
||||||
try {
|
try {
|
||||||
_audioRecord = new AudioRecord(
|
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
|
||||||
audioSource,
|
|
||||||
sampleRate,
|
sampleRate,
|
||||||
AudioFormat.CHANNEL_IN_MONO,
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
AudioFormat.ENCODING_PCM_16BIT,
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
recBufSize);
|
bufferSizeInBytes);
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (IllegalArgumentException e) {
|
||||||
DoLog(e.getMessage());
|
DoLog(e.getMessage());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
|
||||||
|
|
||||||
// check that the audioRecord is ready to be used
|
DoLog("AudioRecord " +
|
||||||
if (_audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
|
"session ID: " + audioRecord.getAudioSessionId() + ", " +
|
||||||
// DoLog("rec not initialized " + sampleRate);
|
"audio format: " + audioRecord.getAudioFormat() + ", " +
|
||||||
return -1;
|
"channels: " + audioRecord.getChannelCount() + ", " +
|
||||||
}
|
"sample rate: " + audioRecord.getSampleRate());
|
||||||
|
|
||||||
// DoLog("rec sample rate set to " + sampleRate);
|
|
||||||
|
|
||||||
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
||||||
if (!BuiltInAECIsAvailable()) {
|
if (!BuiltInAECIsAvailable()) {
|
||||||
return _bufferedRecSamples;
|
return framesPerBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
_aec = AcousticEchoCanceler.create(_audioRecord.getAudioSessionId());
|
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
|
||||||
if (_aec == null) {
|
if (aec == null) {
|
||||||
DoLogErr("AcousticEchoCanceler.create failed");
|
DoLogErr("AcousticEchoCanceler.create failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
int ret = aec.setEnabled(useBuiltInAEC);
|
||||||
int ret = _aec.setEnabled(_useBuiltInAEC);
|
|
||||||
if (ret != AudioEffect.SUCCESS) {
|
if (ret != AudioEffect.SUCCESS) {
|
||||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
Descriptor descriptor = aec.getDescriptor();
|
||||||
Descriptor descriptor = _aec.getDescriptor();
|
|
||||||
DoLog("AcousticEchoCanceler " +
|
DoLog("AcousticEchoCanceler " +
|
||||||
"name: " + descriptor.name + ", " +
|
"name: " + descriptor.name + ", " +
|
||||||
"implementor: " + descriptor.implementor + ", " +
|
"implementor: " + descriptor.implementor + ", " +
|
||||||
"uuid: " + descriptor.uuid);
|
"uuid: " + descriptor.uuid);
|
||||||
DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
|
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||||
|
return framesPerBuffer;
|
||||||
return _bufferedRecSamples;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
private boolean StartRecording() {
|
||||||
private int StartRecording() {
|
|
||||||
DoLog("StartRecording");
|
DoLog("StartRecording");
|
||||||
// start recording
|
AddThreadId();
|
||||||
try {
|
if (audioRecord == null) {
|
||||||
_audioRecord.startRecording();
|
DoLogErr("start() called before init()");
|
||||||
|
return false;
|
||||||
} catch (IllegalStateException e) {
|
}
|
||||||
e.printStackTrace();
|
if (audioThread != null) {
|
||||||
return -1;
|
DoLogErr("start() was already called");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
audioThread = new AudioRecordThread("AudioRecordJavaThread");
|
||||||
|
audioThread.start();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
_isRecording = true;
|
private boolean StopRecording() {
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
private int StopRecording() {
|
|
||||||
DoLog("StopRecording");
|
DoLog("StopRecording");
|
||||||
_recLock.lock();
|
AddThreadId();
|
||||||
try {
|
if (audioThread == null) {
|
||||||
// only stop if we are recording
|
DoLogErr("start() was never called, or stop() was already called");
|
||||||
if (_audioRecord.getRecordingState() ==
|
return false;
|
||||||
AudioRecord.RECORDSTATE_RECORDING) {
|
|
||||||
// stop recording
|
|
||||||
try {
|
|
||||||
_audioRecord.stop();
|
|
||||||
} catch (IllegalStateException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
audioThread.joinThread();
|
||||||
|
audioThread = null;
|
||||||
|
if (aec != null) {
|
||||||
|
aec.release();
|
||||||
|
aec = null;
|
||||||
}
|
}
|
||||||
|
if (audioRecord != null) {
|
||||||
// Release the AEC object.
|
audioRecord.release();
|
||||||
if (_aec != null) {
|
audioRecord = null;
|
||||||
_aec.release();
|
|
||||||
_aec = null;
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
// Release the AudioRecord object.
|
|
||||||
_audioRecord.release();
|
|
||||||
_audioRecord = null;
|
|
||||||
|
|
||||||
} finally {
|
|
||||||
// Ensure we always unlock, both for success, exception or error
|
|
||||||
// return.
|
|
||||||
_doRecInit = true;
|
|
||||||
_recLock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_isRecording = false;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
private int RecordAudio(int lengthInBytes) {
|
|
||||||
_recLock.lock();
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (_audioRecord == null) {
|
|
||||||
return -2; // We have probably closed down while waiting for rec
|
|
||||||
// lock
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set priority, only do once
|
|
||||||
if (_doRecInit == true) {
|
|
||||||
try {
|
|
||||||
android.os.Process.setThreadPriority(
|
|
||||||
android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
|
|
||||||
} catch (Exception e) {
|
|
||||||
DoLog("Set rec thread priority failed: " + e.getMessage());
|
|
||||||
}
|
|
||||||
_doRecInit = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
int readBytes = 0;
|
|
||||||
_recBuffer.rewind(); // Reset the position to start of buffer
|
|
||||||
readBytes = _audioRecord.read(_tempBufRec, 0, lengthInBytes);
|
|
||||||
// DoLog("read " + readBytes + "from SC");
|
|
||||||
_recBuffer.put(_tempBufRec);
|
|
||||||
|
|
||||||
if (readBytes != lengthInBytes) {
|
|
||||||
// DoLog("Could not read all data from sc (read = " + readBytes
|
|
||||||
// + ", length = " + lengthInBytes + ")");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (Exception e) {
|
|
||||||
DoLogErr("RecordAudio try failed: " + e.getMessage());
|
|
||||||
|
|
||||||
} finally {
|
|
||||||
// Ensure we always unlock, both for success, exception or error
|
|
||||||
// return.
|
|
||||||
_recLock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return _bufferedRecSamples;
|
|
||||||
}
|
|
||||||
|
|
||||||
final String logTag = "WebRtcAudioRecord-Java";
|
|
||||||
|
|
||||||
private void DoLog(String msg) {
|
private void DoLog(String msg) {
|
||||||
Log.d(logTag, msg);
|
Log.d(TAG, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void DoLogErr(String msg) {
|
private void DoLogErr(String msg) {
|
||||||
Log.e(logTag, msg);
|
Log.e(TAG, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method for building a string of thread information.*/
|
||||||
|
private static String getThreadInfo() {
|
||||||
|
return "@[name=" + Thread.currentThread().getName()
|
||||||
|
+ ", id=" + Thread.currentThread().getId() + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method which throws an exception when an assertion has failed. */
|
||||||
|
private static void assertIsTrue(boolean condition) {
|
||||||
|
if (!condition) {
|
||||||
|
throw new AssertionError("Expected condition to be true");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void AddThreadId() {
|
||||||
|
threadIds.add(Thread.currentThread().getId());
|
||||||
|
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void RemoveThreadId() {
|
||||||
|
threadIds.remove(Thread.currentThread().getId());
|
||||||
|
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
private native void nativeCacheDirectBufferAddress(
|
||||||
|
ByteBuffer byteBuffer, long nativeAudioRecord);
|
||||||
|
|
||||||
|
private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
|
||||||
|
}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
'type': 'static_library',
|
'type': 'static_library',
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'webrtc_utility',
|
'webrtc_utility',
|
||||||
|
'<(webrtc_root)/base/base.gyp:rtc_base_approved',
|
||||||
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
|
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
|
||||||
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
|
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
|
||||||
],
|
],
|
||||||
|
@ -12,9 +12,37 @@
|
|||||||
#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
||||||
|
|
||||||
#include <jni.h>
|
#include <jni.h>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// Abort the process if |jni| has a Java exception pending.
|
||||||
|
// TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
|
||||||
|
#define CHECK_EXCEPTION(jni) \
|
||||||
|
CHECK(!jni->ExceptionCheck()) \
|
||||||
|
<< (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
|
// Return a |JNIEnv*| usable on this thread or NULL if this thread is detached.
|
||||||
|
JNIEnv* GetEnv(JavaVM* jvm);
|
||||||
|
|
||||||
|
// JNIEnv-helper methods that wraps the API which uses the JNI interface
|
||||||
|
// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
|
||||||
|
// is thrown while calling the method.
|
||||||
|
jmethodID GetMethodID (
|
||||||
|
JNIEnv* jni, jclass c, const std::string& name, const char* signature);
|
||||||
|
|
||||||
|
jclass FindClass(JNIEnv* jni, const std::string& name);
|
||||||
|
|
||||||
|
jobject NewGlobalRef(JNIEnv* jni, jobject o);
|
||||||
|
|
||||||
|
void DeleteGlobalRef(JNIEnv* jni, jobject o);
|
||||||
|
|
||||||
|
// Return thread ID as a string.
|
||||||
|
std::string GetThreadId();
|
||||||
|
|
||||||
|
// Return thread ID as string suitable for debug logging.
|
||||||
|
std::string GetThreadInfo();
|
||||||
|
|
||||||
// Attach thread to JVM if necessary and detach at scope end if originally
|
// Attach thread to JVM if necessary and detach at scope end if originally
|
||||||
// attached.
|
// attached.
|
||||||
class AttachThreadScoped {
|
class AttachThreadScoped {
|
||||||
@ -29,6 +57,23 @@ class AttachThreadScoped {
|
|||||||
JNIEnv* env_;
|
JNIEnv* env_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Scoped holder for global Java refs.
|
||||||
|
template<class T> // T is jclass, jobject, jintArray, etc.
|
||||||
|
class ScopedGlobalRef {
|
||||||
|
public:
|
||||||
|
ScopedGlobalRef(JNIEnv* jni, T obj)
|
||||||
|
: jni_(jni), obj_(static_cast<T>(NewGlobalRef(jni, obj))) {}
|
||||||
|
~ScopedGlobalRef() {
|
||||||
|
DeleteGlobalRef(jni_, obj_);
|
||||||
|
}
|
||||||
|
T operator*() const {
|
||||||
|
return obj_;
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
JNIEnv* jni_;
|
||||||
|
T obj_;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
||||||
#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
||||||
|
@ -8,27 +8,89 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/modules/utility/interface/helpers_android.h"
|
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||||
|
|
||||||
|
#include <android/log.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <pthread.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#define TAG "HelpersAndroid"
|
||||||
|
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
|
JNIEnv* GetEnv(JavaVM* jvm) {
|
||||||
|
void* env = NULL;
|
||||||
|
jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
|
||||||
|
CHECK(((env != NULL) && (status == JNI_OK)) ||
|
||||||
|
((env == NULL) && (status == JNI_EDETACHED)))
|
||||||
|
<< "Unexpected GetEnv return: " << status << ":" << env;
|
||||||
|
return reinterpret_cast<JNIEnv*>(env);
|
||||||
|
}
|
||||||
|
|
||||||
|
jmethodID GetMethodID (
|
||||||
|
JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
|
||||||
|
jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
|
||||||
|
CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
|
||||||
|
<< signature;
|
||||||
|
CHECK(m) << name << ", " << signature;
|
||||||
|
return m;
|
||||||
|
}
|
||||||
|
|
||||||
|
jclass FindClass(JNIEnv* jni, const std::string& name) {
|
||||||
|
jclass c = jni->FindClass(name.c_str());
|
||||||
|
CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
|
||||||
|
CHECK(c) << name;
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
|
jobject NewGlobalRef(JNIEnv* jni, jobject o) {
|
||||||
|
jobject ret = jni->NewGlobalRef(o);
|
||||||
|
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
|
||||||
|
CHECK(ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeleteGlobalRef(JNIEnv* jni, jobject o) {
|
||||||
|
jni->DeleteGlobalRef(o);
|
||||||
|
CHECK_EXCEPTION(jni) << "Error during DeleteGlobalRef";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetThreadId() {
|
||||||
|
char buf[21]; // Big enough to hold a kuint64max plus terminating NULL.
|
||||||
|
int thread_id = gettid();
|
||||||
|
CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
|
||||||
|
static_cast<int>(sizeof(buf))) << "Thread id is bigger than uint64??";
|
||||||
|
return std::string(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetThreadInfo() {
|
||||||
|
return "@[tid=" + GetThreadId() + "]";
|
||||||
|
}
|
||||||
|
|
||||||
AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
|
AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
|
||||||
: attached_(false), jvm_(jvm), env_(NULL) {
|
: attached_(false), jvm_(jvm), env_(NULL) {
|
||||||
jint ret_val = jvm->GetEnv(reinterpret_cast<void**>(&env_), JNI_VERSION_1_4);
|
env_ = GetEnv(jvm);
|
||||||
if (ret_val == JNI_EDETACHED) {
|
if (!env_) {
|
||||||
// Attach the thread to the Java VM.
|
// Adding debug log here so we can track down potential leaks and figure
|
||||||
ret_val = jvm_->AttachCurrentThread(&env_, NULL);
|
// out why we sometimes see "Native thread exiting without having called
|
||||||
attached_ = ret_val == JNI_OK;
|
// DetachCurrentThread" in logcat outputs.
|
||||||
assert(attached_);
|
ALOGD("Attaching thread to JVM%s", GetThreadInfo().c_str());
|
||||||
|
jint res = jvm->AttachCurrentThread(&env_, NULL);
|
||||||
|
attached_ = (res == JNI_OK);
|
||||||
|
CHECK(attached_) << "AttachCurrentThread failed: " << res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AttachThreadScoped::~AttachThreadScoped() {
|
AttachThreadScoped::~AttachThreadScoped() {
|
||||||
if (attached_ && (jvm_->DetachCurrentThread() < 0)) {
|
if (attached_) {
|
||||||
assert(false);
|
ALOGD("Detaching thread from JVM%s", GetThreadInfo().c_str());
|
||||||
|
jint res = jvm_->DetachCurrentThread();
|
||||||
|
CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
|
||||||
|
CHECK(!GetEnv(jvm_));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user