Refactoring WebRTC Java/JNI audio recording in C++ and Java.
This is a big refactoring of the existing C++/JNI/Java support for audio recording in native WebRTC: - Removes unused code and old WEBRTC logging macros - Now uses optimal sample rate and buffer size in Java AudioRecord (used hard-coded sample rate before) - Makes code more inline with the implementation in Chrome - Adds helper methods for JNI handling to improve readability - Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy) - Adds basic thread checks - Removes all locks in C++ land - Removes all locks in Java - Improves construction/destruction - Additional cleanup Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate). BUG=NONE R=magjed@webrtc.org, perkj@webrtc.org, pthatcher@webrtc.org Review URL: https://webrtc-codereview.appspot.com/33969004 Cr-Commit-Position: refs/heads/master@{#8325} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8325 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
c2d0473320
commit
62f6e75673
@ -147,16 +147,16 @@ bool PeerConnectionFactory::Initialize() {
|
||||
|
||||
cricket::DummyDeviceManager* device_manager(
|
||||
new cricket::DummyDeviceManager());
|
||||
|
||||
// TODO: Need to make sure only one VoE is created inside
|
||||
// WebRtcMediaEngine.
|
||||
cricket::MediaEngineInterface* media_engine(
|
||||
cricket::WebRtcMediaEngineFactory::Create(default_adm_.get(),
|
||||
NULL, // No secondary adm.
|
||||
video_encoder_factory_.get(),
|
||||
video_decoder_factory_.get()));
|
||||
cricket::MediaEngineInterface* media_engine =
|
||||
worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind(
|
||||
&PeerConnectionFactory::CreateMediaEngine_w, this));
|
||||
|
||||
channel_manager_.reset(new cricket::ChannelManager(
|
||||
media_engine, device_manager, worker_thread_));
|
||||
|
||||
channel_manager_->SetVideoRtxEnabled(true);
|
||||
if (!channel_manager_->Init()) {
|
||||
return false;
|
||||
@ -252,4 +252,11 @@ rtc::Thread* PeerConnectionFactory::worker_thread() {
|
||||
return worker_thread_;
|
||||
}
|
||||
|
||||
cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() {
|
||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||
return cricket::WebRtcMediaEngineFactory::Create(
|
||||
default_adm_.get(), NULL, video_encoder_factory_.get(),
|
||||
video_decoder_factory_.get());
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -90,6 +90,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
|
||||
virtual ~PeerConnectionFactory();
|
||||
|
||||
private:
|
||||
cricket::MediaEngineInterface* CreateMediaEngine_w();
|
||||
|
||||
bool owns_ptrs_;
|
||||
bool wraps_current_thread_;
|
||||
rtc::Thread* signaling_thread_;
|
||||
|
@ -589,6 +589,7 @@ WebRtcVoiceEngine::~WebRtcVoiceEngine() {
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
|
||||
ASSERT(worker_thread == rtc::Thread::Current());
|
||||
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
|
||||
bool res = InitInternal();
|
||||
if (res) {
|
||||
|
@ -149,6 +149,10 @@ ChannelManager::~ChannelManager() {
|
||||
// shutdown.
|
||||
ShutdownSrtp();
|
||||
}
|
||||
// Always delete the media engine on the worker thread to match how it was
|
||||
// created.
|
||||
worker_thread_->Invoke<void>(Bind(
|
||||
&ChannelManager::DeleteMediaEngine_w, this));
|
||||
}
|
||||
|
||||
bool ChannelManager::SetVideoRtxEnabled(bool enable) {
|
||||
@ -215,89 +219,102 @@ bool ChannelManager::Init() {
|
||||
if (initialized_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ASSERT(worker_thread_ != NULL);
|
||||
if (worker_thread_) {
|
||||
if (worker_thread_ != rtc::Thread::Current()) {
|
||||
// Do not allow invoking calls to other threads on the worker thread.
|
||||
worker_thread_->Invoke<bool>(rtc::Bind(
|
||||
&rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
|
||||
}
|
||||
|
||||
if (media_engine_->Init(worker_thread_)) {
|
||||
initialized_ = true;
|
||||
|
||||
// Now that we're initialized, apply any stored preferences. A preferred
|
||||
// device might have been unplugged. In this case, we fallback to the
|
||||
// default device but keep the user preferences. The preferences are
|
||||
// changed only when the Javascript FE changes them.
|
||||
const std::string preferred_audio_in_device = audio_in_device_;
|
||||
const std::string preferred_audio_out_device = audio_out_device_;
|
||||
const std::string preferred_camera_device = camera_device_;
|
||||
Device device;
|
||||
if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
|
||||
LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
|
||||
LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
|
||||
if (!camera_device_.empty()) {
|
||||
LOG(LS_WARNING) << "The preferred camera '" << camera_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
}
|
||||
camera_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
|
||||
if (!SetAudioOptions(audio_in_device_, audio_out_device_,
|
||||
audio_options_, audio_delay_offset_)) {
|
||||
LOG(LS_WARNING) << "Failed to SetAudioOptions with"
|
||||
<< " microphone: " << audio_in_device_
|
||||
<< " speaker: " << audio_out_device_
|
||||
<< " options: " << audio_options_.ToString()
|
||||
<< " delay: " << audio_delay_offset_;
|
||||
}
|
||||
|
||||
// If audio_output_volume_ has been set via SetOutputVolume(), set the
|
||||
// audio output volume of the engine.
|
||||
if (kNotSetOutputVolume != audio_output_volume_ &&
|
||||
!SetOutputVolume(audio_output_volume_)) {
|
||||
LOG(LS_WARNING) << "Failed to SetOutputVolume to "
|
||||
<< audio_output_volume_;
|
||||
}
|
||||
if (!SetCaptureDevice(camera_device_) && !camera_device_.empty()) {
|
||||
LOG(LS_WARNING) << "Failed to SetCaptureDevice with camera: "
|
||||
<< camera_device_;
|
||||
}
|
||||
|
||||
// Restore the user preferences.
|
||||
audio_in_device_ = preferred_audio_in_device;
|
||||
audio_out_device_ = preferred_audio_out_device;
|
||||
camera_device_ = preferred_camera_device;
|
||||
|
||||
// Now apply the default video codec that has been set earlier.
|
||||
if (default_video_encoder_config_.max_codec.id != 0) {
|
||||
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
|
||||
}
|
||||
}
|
||||
if (!worker_thread_) {
|
||||
return false;
|
||||
}
|
||||
if (worker_thread_ != rtc::Thread::Current()) {
|
||||
// Do not allow invoking calls to other threads on the worker thread.
|
||||
worker_thread_->Invoke<bool>(rtc::Bind(
|
||||
&rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
|
||||
}
|
||||
|
||||
initialized_ = worker_thread_->Invoke<bool>(Bind(
|
||||
&ChannelManager::InitMediaEngine_w, this));
|
||||
ASSERT(initialized_);
|
||||
if (!initialized_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now that we're initialized, apply any stored preferences. A preferred
|
||||
// device might have been unplugged. In this case, we fallback to the
|
||||
// default device but keep the user preferences. The preferences are
|
||||
// changed only when the Javascript FE changes them.
|
||||
const std::string preferred_audio_in_device = audio_in_device_;
|
||||
const std::string preferred_audio_out_device = audio_out_device_;
|
||||
const std::string preferred_camera_device = camera_device_;
|
||||
Device device;
|
||||
if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
|
||||
LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
|
||||
LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
|
||||
if (!camera_device_.empty()) {
|
||||
LOG(LS_WARNING) << "The preferred camera '" << camera_device_
|
||||
<< "' is unavailable. Fall back to the default.";
|
||||
}
|
||||
camera_device_ = DeviceManagerInterface::kDefaultDeviceName;
|
||||
}
|
||||
|
||||
if (!SetAudioOptions(audio_in_device_, audio_out_device_,
|
||||
audio_options_, audio_delay_offset_)) {
|
||||
LOG(LS_WARNING) << "Failed to SetAudioOptions with"
|
||||
<< " microphone: " << audio_in_device_
|
||||
<< " speaker: " << audio_out_device_
|
||||
<< " options: " << audio_options_.ToString()
|
||||
<< " delay: " << audio_delay_offset_;
|
||||
}
|
||||
|
||||
// If audio_output_volume_ has been set via SetOutputVolume(), set the
|
||||
// audio output volume of the engine.
|
||||
if (kNotSetOutputVolume != audio_output_volume_ &&
|
||||
!SetOutputVolume(audio_output_volume_)) {
|
||||
LOG(LS_WARNING) << "Failed to SetOutputVolume to "
|
||||
<< audio_output_volume_;
|
||||
}
|
||||
if (!SetCaptureDevice(camera_device_) && !camera_device_.empty()) {
|
||||
LOG(LS_WARNING) << "Failed to SetCaptureDevice with camera: "
|
||||
<< camera_device_;
|
||||
}
|
||||
|
||||
// Restore the user preferences.
|
||||
audio_in_device_ = preferred_audio_in_device;
|
||||
audio_out_device_ = preferred_audio_out_device;
|
||||
camera_device_ = preferred_camera_device;
|
||||
|
||||
// Now apply the default video codec that has been set earlier.
|
||||
if (default_video_encoder_config_.max_codec.id != 0) {
|
||||
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
|
||||
}
|
||||
|
||||
return initialized_;
|
||||
}
|
||||
|
||||
bool ChannelManager::InitMediaEngine_w() {
|
||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||
return (media_engine_->Init(worker_thread_));
|
||||
}
|
||||
|
||||
void ChannelManager::Terminate() {
|
||||
ASSERT(initialized_);
|
||||
if (!initialized_) {
|
||||
return;
|
||||
}
|
||||
worker_thread_->Invoke<void>(Bind(&ChannelManager::Terminate_w, this));
|
||||
media_engine_->Terminate();
|
||||
initialized_ = false;
|
||||
}
|
||||
|
||||
void ChannelManager::DeleteMediaEngine_w() {
|
||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||
media_engine_.reset(NULL);
|
||||
}
|
||||
|
||||
void ChannelManager::Terminate_w() {
|
||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||
// Need to destroy the voice/video channels
|
||||
@ -313,6 +330,7 @@ void ChannelManager::Terminate_w() {
|
||||
if (!SetCaptureDevice_w(NULL)) {
|
||||
LOG(LS_WARNING) << "failed to delete video capturer";
|
||||
}
|
||||
media_engine_->Terminate();
|
||||
}
|
||||
|
||||
VoiceChannel* ChannelManager::CreateVoiceChannel(
|
||||
|
@ -264,6 +264,8 @@ class ChannelManager : public rtc::MessageHandler,
|
||||
DeviceManagerInterface* dm,
|
||||
CaptureManager* cm,
|
||||
rtc::Thread* worker_thread);
|
||||
bool InitMediaEngine_w();
|
||||
void DeleteMediaEngine_w();
|
||||
void Terminate_w();
|
||||
VoiceChannel* CreateVoiceChannel_w(
|
||||
BaseSession* session, const std::string& content_name, bool rtcp);
|
||||
|
@ -19,7 +19,7 @@ using icu::UnicodeString;
|
||||
jmethodID GetMethodID(JNIEnv* jni, jclass c, const std::string& name,
|
||||
const char* signature) {
|
||||
jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
|
||||
CHECK_EXCEPTION(jni, "error during GetMethodID");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during GetMethodID");
|
||||
return m;
|
||||
}
|
||||
|
||||
@ -37,11 +37,11 @@ jlong jlongFromPointer(void* ptr) {
|
||||
// Given a (UTF-16) jstring return a new UTF-8 native string.
|
||||
std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
|
||||
const jchar* jchars = jni->GetStringChars(j_string, NULL);
|
||||
CHECK_EXCEPTION(jni, "Error during GetStringChars");
|
||||
CHECK_JNI_EXCEPTION(jni, "Error during GetStringChars");
|
||||
UnicodeString ustr(jchars, jni->GetStringLength(j_string));
|
||||
CHECK_EXCEPTION(jni, "Error during GetStringLength");
|
||||
CHECK_JNI_EXCEPTION(jni, "Error during GetStringLength");
|
||||
jni->ReleaseStringChars(j_string, jchars);
|
||||
CHECK_EXCEPTION(jni, "Error during ReleaseStringChars");
|
||||
CHECK_JNI_EXCEPTION(jni, "Error during ReleaseStringChars");
|
||||
std::string ret;
|
||||
return ustr.toUTF8String(ret);
|
||||
}
|
||||
@ -72,10 +72,10 @@ jclass ClassReferenceHolder::GetClass(const std::string& name) {
|
||||
|
||||
void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
|
||||
jclass localRef = jni->FindClass(name.c_str());
|
||||
CHECK_EXCEPTION(jni, "Could not load class");
|
||||
CHECK_JNI_EXCEPTION(jni, "Could not load class");
|
||||
CHECK(localRef, name.c_str());
|
||||
jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
|
||||
CHECK_EXCEPTION(jni, "error during NewGlobalRef");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewGlobalRef");
|
||||
CHECK(globalRef, name.c_str());
|
||||
bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
|
||||
CHECK(inserted, "Duplicate class name");
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
// Abort the process if |jni| has a Java exception pending, emitting |msg| to
|
||||
// logcat.
|
||||
#define CHECK_EXCEPTION(jni, msg) \
|
||||
#define CHECK_JNI_EXCEPTION(jni, msg) \
|
||||
if (0) { \
|
||||
} else { \
|
||||
if (jni->ExceptionCheck()) { \
|
||||
|
@ -115,7 +115,7 @@ class VideoDecodeEncodeObserver : public webrtc::ViEDecoderObserver,
|
||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||
jobject j_codec =
|
||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||
jni->CallVoidMethod(j_observer_, incoming_codec_changed_, video_channel,
|
||||
j_codec);
|
||||
}
|
||||
@ -456,7 +456,7 @@ JOWW(jobject, VideoEngine_getCodec)(JNIEnv* jni, jobject j_vie, jint index) {
|
||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||
jobject j_codec =
|
||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||
return j_codec;
|
||||
}
|
||||
|
||||
@ -515,7 +515,7 @@ JOWW(jobject,
|
||||
jmethodID j_camera_ctor = GetMethodID(jni, j_camera_class, "<init>", "(J)V");
|
||||
jobject j_camera = jni->NewObject(j_camera_class, j_camera_ctor,
|
||||
jlongFromPointer(camera_info));
|
||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||
return j_camera;
|
||||
}
|
||||
|
||||
@ -610,7 +610,7 @@ JOWW(jobject, VideoEngine_getReceivedRtcpStatistics)(JNIEnv* jni, jobject j_vie,
|
||||
jni->NewObject(j_rtcp_statistics_class, j_rtcp_statistics_ctor,
|
||||
fraction_lost, cumulative_lost, extended_max, jitter,
|
||||
static_cast<int>(rtt_ms));
|
||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||
return j_rtcp_statistics;
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ JOWW(jobject, VoiceEngine_getCodec)(JNIEnv* jni, jobject j_voe, jint index) {
|
||||
jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
|
||||
jobject j_codec =
|
||||
jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
|
||||
CHECK_EXCEPTION(jni, "error during NewObject");
|
||||
CHECK_JNI_EXCEPTION(jni, "error during NewObject");
|
||||
return j_codec;
|
||||
}
|
||||
|
||||
|
@ -178,6 +178,7 @@ source_set("audio_device") {
|
||||
}
|
||||
|
||||
deps = [
|
||||
"../../base:rtc_base_approved",
|
||||
"../../common_audio",
|
||||
"../../system_wrappers",
|
||||
"../utility",
|
||||
|
@ -15,6 +15,7 @@ namespace webrtc {
|
||||
|
||||
enum {
|
||||
kDefaultSampleRate = 44100,
|
||||
kBitsPerSample = 16,
|
||||
kNumChannels = 1,
|
||||
kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
|
||||
};
|
||||
|
@ -11,8 +11,8 @@
|
||||
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -22,13 +22,11 @@ namespace webrtc {
|
||||
template <class InputType, class OutputType>
|
||||
class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
public:
|
||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context) {
|
||||
if (OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context) == -1) {
|
||||
return -1;
|
||||
}
|
||||
return InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
static void SetAndroidAudioDeviceObjects(void* javaVM,
|
||||
void* env,
|
||||
void* context) {
|
||||
OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
|
||||
}
|
||||
|
||||
static void ClearAndroidAudioDeviceObjects() {
|
||||
@ -38,7 +36,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
explicit AudioDeviceTemplate(const int32_t id)
|
||||
: output_(id),
|
||||
input_(id, &output_) {
|
||||
// TODO(henrika): provide proper delay estimate using input_(&output_).
|
||||
input_() {
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceTemplate() {
|
||||
@ -59,7 +58,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
bool Initialized() const {
|
||||
return output_.Initialized() && input_.Initialized();
|
||||
return output_.Initialized();
|
||||
}
|
||||
|
||||
int16_t PlayoutDevices() {
|
||||
@ -67,7 +66,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
int16_t RecordingDevices() {
|
||||
return input_.RecordingDevices();
|
||||
return 1;
|
||||
}
|
||||
|
||||
int32_t PlayoutDeviceName(
|
||||
@ -81,7 +80,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
return input_.RecordingDeviceName(index, name, guid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutDevice(uint16_t index) {
|
||||
@ -94,12 +93,15 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(uint16_t index) {
|
||||
return input_.SetRecordingDevice(index);
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
return input_.SetRecordingDevice(device);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t PlayoutIsAvailable(
|
||||
@ -117,7 +119,8 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
int32_t RecordingIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
return input_.RecordingIsAvailable(available);
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t InitRecording() {
|
||||
@ -153,17 +156,19 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
int32_t SetAGC(bool enable) {
|
||||
return input_.SetAGC(enable);
|
||||
if (enable) {
|
||||
FATAL() << "Should never be called";
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AGC() const {
|
||||
return input_.AGC();
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t SetWaveOutVolume(uint16_t volumeLeft,
|
||||
uint16_t volumeRight) {
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
|
||||
" API call not supported on this platform");
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -184,11 +189,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
int32_t InitMicrophone() {
|
||||
return input_.InitMicrophone();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool MicrophoneIsInitialized() const {
|
||||
return input_.MicrophoneIsInitialized();
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t SpeakerVolumeIsAvailable(
|
||||
@ -222,31 +227,38 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
int32_t MicrophoneVolumeIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
return input_.MicrophoneVolumeIsAvailable(available);
|
||||
available = false;
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) {
|
||||
return input_.SetMicrophoneVolume(volume);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolume(
|
||||
uint32_t& volume) const { // NOLINT
|
||||
return input_.MicrophoneVolume(volume);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const { // NOLINT
|
||||
return input_.MaxMicrophoneVolume(maxVolume);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const { // NOLINT
|
||||
return input_.MinMicrophoneVolume(minVolume);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const { // NOLINT
|
||||
return input_.MicrophoneVolumeStepSize(stepSize);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SpeakerMuteIsAvailable(
|
||||
@ -265,30 +277,36 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
int32_t MicrophoneMuteIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
return input_.MicrophoneMuteIsAvailable(available);
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneMute(bool enable) {
|
||||
return input_.SetMicrophoneMute(enable);
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneMute(
|
||||
bool& enabled) const { // NOLINT
|
||||
return input_.MicrophoneMute(enabled);
|
||||
FATAL() << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneBoostIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
return input_.MicrophoneBoostIsAvailable(available);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneBoost(bool enable) {
|
||||
return input_.SetMicrophoneBoost(enable);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MicrophoneBoost(
|
||||
bool& enabled) const { // NOLINT
|
||||
return input_.MicrophoneBoost(enabled);
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoPlayoutIsAvailable(
|
||||
@ -307,16 +325,18 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
int32_t StereoRecordingIsAvailable(
|
||||
bool& available) { // NOLINT
|
||||
return input_.StereoRecordingIsAvailable(available);
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetStereoRecording(bool enable) {
|
||||
return input_.SetStereoRecording(enable);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t StereoRecording(
|
||||
bool& enabled) const { // NOLINT
|
||||
return input_.StereoRecording(enabled);
|
||||
enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutBuffer(
|
||||
@ -343,8 +363,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
|
||||
int32_t CPULoad(
|
||||
uint16_t& load) const { // NOLINT
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
|
||||
" API call not supported on this platform");
|
||||
FATAL() << "Should never be called";
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -357,11 +376,11 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
}
|
||||
|
||||
bool RecordingWarning() const {
|
||||
return input_.RecordingWarning();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RecordingError() const {
|
||||
return input_.RecordingError();
|
||||
return false;
|
||||
}
|
||||
|
||||
void ClearPlayoutWarning() {
|
||||
@ -372,13 +391,9 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
return output_.ClearPlayoutError();
|
||||
}
|
||||
|
||||
void ClearRecordingWarning() {
|
||||
return input_.ClearRecordingWarning();
|
||||
}
|
||||
void ClearRecordingWarning() {}
|
||||
|
||||
void ClearRecordingError() {
|
||||
return input_.ClearRecordingError();
|
||||
}
|
||||
void ClearRecordingError() {}
|
||||
|
||||
void AttachAudioBuffer(
|
||||
AudioDeviceBuffer* audioBuffer) {
|
||||
@ -386,11 +401,6 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
input_.AttachAudioBuffer(audioBuffer);
|
||||
}
|
||||
|
||||
int32_t SetRecordingSampleRate(
|
||||
const uint32_t samplesPerSec) {
|
||||
return input_.SetRecordingSampleRate(samplesPerSec);
|
||||
}
|
||||
|
||||
int32_t SetPlayoutSampleRate(
|
||||
const uint32_t samplesPerSec) {
|
||||
return output_.SetPlayoutSampleRate(samplesPerSec);
|
||||
|
@ -8,43 +8,17 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Android audio device utility implementation
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_device/android/audio_device_utility_android.h"
|
||||
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
namespace webrtc {
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) {}
|
||||
|
||||
AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) :
|
||||
_critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
|
||||
"%s created", __FUNCTION__);
|
||||
}
|
||||
AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid() {}
|
||||
|
||||
AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid()
|
||||
{
|
||||
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
|
||||
"%s destroyed", __FUNCTION__);
|
||||
{
|
||||
CriticalSectionScoped lock(&_critSect);
|
||||
}
|
||||
|
||||
delete &_critSect;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceUtilityAndroid::Init()
|
||||
{
|
||||
|
||||
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
|
||||
" OS info: %s", "Android");
|
||||
|
||||
return 0;
|
||||
int32_t AudioDeviceUtilityAndroid::Init() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -15,24 +15,22 @@
|
||||
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
||||
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_utility.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
class CriticalSectionWrapper;
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceUtilityAndroid: public AudioDeviceUtility
|
||||
{
|
||||
public:
|
||||
// TODO(henrika): this utility class is not used but I would like to keep this
|
||||
// file for the other helper methods which are unique for Android.
|
||||
class AudioDeviceUtilityAndroid: public AudioDeviceUtility {
|
||||
public:
|
||||
AudioDeviceUtilityAndroid(const int32_t id);
|
||||
~AudioDeviceUtilityAndroid();
|
||||
|
||||
virtual int32_t Init();
|
||||
|
||||
private:
|
||||
CriticalSectionWrapper& _critSect;
|
||||
int32_t _id;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,166 +13,143 @@
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/base/thread_checker.h"
|
||||
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class EventWrapper;
|
||||
class ThreadWrapper;
|
||||
class PlayoutDelayProvider;
|
||||
|
||||
const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
|
||||
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
|
||||
const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
|
||||
|
||||
// Implements 16-bit mono PCM audio input support for Android using the Java
|
||||
// AudioRecord interface. Most of the work is done by its Java counterpart in
|
||||
// WebRtcAudioRecord.java. This class is created and lives on a thread in
|
||||
// C++-land, but recorded audio buffers are delivered on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// The Java class makes use of AudioEffect features (mainly AEC) which are
|
||||
// first available in Jelly Bean. If it is instantiated running against earlier
|
||||
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||
// separately instead.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will DCHECK if any method is called on an invalid thread.
|
||||
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||
// CHECK that the calling thread is attached to a Java VM.
|
||||
//
|
||||
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
|
||||
// detach when method goes out of scope. We do so beacuse this class does not
|
||||
// own the thread is is created and called on and other objects on the same
|
||||
// thread might put us in a detached state at any time.
|
||||
class AudioRecordJni {
|
||||
public:
|
||||
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
|
||||
void* context);
|
||||
// Use the invocation API to allow the native application to use the JNI
|
||||
// interface pointer to access VM features.
|
||||
// |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
|
||||
// and |context| corresponds to android.content.Context in Java.
|
||||
// This method also sets a global jclass object, |g_audio_record_class| for
|
||||
// the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
|
||||
static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
|
||||
// Always call this method after the object has been destructed. It deletes
|
||||
// existing global references and enables garbage collection.
|
||||
static void ClearAndroidAudioDeviceObjects();
|
||||
|
||||
AudioRecordJni(const int32_t id, PlayoutDelayProvider* delay_provider);
|
||||
AudioRecordJni();
|
||||
~AudioRecordJni();
|
||||
|
||||
// Main initializaton and termination
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
bool Initialized() const { return _initialized; }
|
||||
|
||||
// Device enumeration
|
||||
int16_t RecordingDevices() { return 1; } // There is one device only
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
|
||||
// Device selection
|
||||
int32_t SetRecordingDevice(uint16_t index);
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device);
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t RecordingIsAvailable(bool& available); // NOLINT
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return _recIsInitialized; }
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return _recording; }
|
||||
int32_t StopRecording ();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
// Microphone Automatic Gain Control (AGC)
|
||||
int32_t SetAGC(bool enable);
|
||||
bool AGC() const { return _AGC; }
|
||||
int32_t RecordingDelay(uint16_t& delayMS) const;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitMicrophone();
|
||||
bool MicrophoneIsInitialized() const { return _micIsInitialized; }
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available); // NOLINT
|
||||
// TODO(leozwang): Add microphone volume control when OpenSL APIs
|
||||
// are available.
|
||||
int32_t SetMicrophoneVolume(uint32_t volume);
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const; // NOLINT
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; // NOLINT
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const; // NOLINT
|
||||
int32_t MicrophoneVolumeStepSize(
|
||||
uint16_t& stepSize) const; // NOLINT
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetMicrophoneMute(bool enable);
|
||||
int32_t MicrophoneMute(bool& enabled) const; // NOLINT
|
||||
|
||||
// Microphone boost control
|
||||
int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetMicrophoneBoost(bool enable);
|
||||
int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
|
||||
int32_t SetStereoRecording(bool enable);
|
||||
int32_t StereoRecording(bool& enabled) const; // NOLINT
|
||||
|
||||
// Delay information and control
|
||||
int32_t RecordingDelay(uint16_t& delayMS) const; // NOLINT
|
||||
|
||||
bool RecordingWarning() const;
|
||||
bool RecordingError() const;
|
||||
void ClearRecordingWarning();
|
||||
void ClearRecordingError();
|
||||
|
||||
// Attach audio buffer
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
|
||||
|
||||
bool BuiltInAECIsAvailable() const;
|
||||
int32_t EnableBuiltInAEC(bool enable);
|
||||
|
||||
private:
|
||||
void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
|
||||
_critSect.Enter();
|
||||
}
|
||||
void UnLock() UNLOCK_FUNCTION(_critSect) {
|
||||
_critSect.Leave();
|
||||
}
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||
// is also stored in |direct_buffer_capacity_in_bytes_|.
|
||||
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
|
||||
// on the same thread that this object is created on.
|
||||
static void JNICALL CacheDirectBufferAddress(
|
||||
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord);
|
||||
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||
|
||||
int32_t InitJavaResources();
|
||||
int32_t InitSampleRate();
|
||||
// Called periodically by the Java based WebRtcAudioRecord object when
|
||||
// recording has started. Each call indicates that there are |length| new
|
||||
// bytes recorded in the memory area |direct_buffer_address_| and it is
|
||||
// now time to send these to the consumer.
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
static void JNICALL DataIsRecorded(
|
||||
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord);
|
||||
void OnDataIsRecorded(int length);
|
||||
|
||||
static bool RecThreadFunc(void*);
|
||||
bool RecThreadProcess();
|
||||
// Returns true if SetAndroidAudioDeviceObjects() has been called
|
||||
// successfully.
|
||||
bool HasDeviceObjects();
|
||||
|
||||
// TODO(leozwang): Android holds only one JVM, all these jni handling
|
||||
// will be consolidated into a single place to make it consistant and
|
||||
// reliable. Chromium has a good example at base/android.
|
||||
static JavaVM* globalJvm;
|
||||
static JNIEnv* globalJNIEnv;
|
||||
static jobject globalContext;
|
||||
static jclass globalScClass;
|
||||
// Called from the constructor. Defines the |j_audio_record_| member.
|
||||
void CreateJavaInstance();
|
||||
|
||||
JavaVM* _javaVM; // denotes a Java VM
|
||||
JNIEnv* _jniEnvRec; // The JNI env for recording thread
|
||||
jclass _javaScClass; // AudioDeviceAndroid class
|
||||
jobject _javaScObj; // AudioDeviceAndroid object
|
||||
jobject _javaRecBuffer;
|
||||
void* _javaDirectRecBuffer; // Direct buffer pointer to rec buffer
|
||||
jmethodID _javaMidRecAudio; // Method ID of rec in AudioDeviceAndroid
|
||||
// Returns the native, or optimal, sample rate reported by the audio input
|
||||
// device.
|
||||
int GetNativeSampleRate();
|
||||
|
||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
||||
CriticalSectionWrapper& _critSect;
|
||||
int32_t _id;
|
||||
PlayoutDelayProvider* _delay_provider;
|
||||
bool _initialized;
|
||||
// Stores thread ID in constructor.
|
||||
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
|
||||
// other methods are called from the same thread.
|
||||
// Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
|
||||
EventWrapper& _timeEventRec;
|
||||
EventWrapper& _recStartStopEvent;
|
||||
ThreadWrapper* _ptrThreadRec;
|
||||
uint32_t _recThreadID;
|
||||
bool _recThreadIsInitialized;
|
||||
bool _shutdownRecThread;
|
||||
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
|
||||
// thread in Java. Detached during construction of this object.
|
||||
rtc::ThreadChecker thread_checker_java_;
|
||||
|
||||
int8_t _recBuffer[2 * REC_BUF_SIZE_IN_SAMPLES];
|
||||
bool _recordingDeviceIsSpecified;
|
||||
|
||||
bool _recording;
|
||||
bool _recIsInitialized;
|
||||
bool _micIsInitialized;
|
||||
// Should return the current playout delay.
|
||||
// TODO(henrika): fix on Android. Reports zero today.
|
||||
// PlayoutDelayProvider* delay_provider_;
|
||||
|
||||
bool _startRec;
|
||||
// The Java WebRtcAudioRecord instance.
|
||||
jobject j_audio_record_;
|
||||
|
||||
uint16_t _recWarning;
|
||||
uint16_t _recError;
|
||||
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
uint16_t _delayRecording;
|
||||
// Number of bytes in the direct audio buffer owned by |j_audio_record_|.
|
||||
int direct_buffer_capacity_in_bytes_;
|
||||
|
||||
bool _AGC;
|
||||
// Number audio frames per audio buffer. Each audio frame corresponds to
|
||||
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||
int frames_per_buffer_;
|
||||
|
||||
uint16_t _samplingFreqIn; // Sampling frequency for Mic
|
||||
int _recAudioSource;
|
||||
bool initialized_;
|
||||
|
||||
bool recording_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
|
||||
// Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
|
||||
// Java layer for the best possible sample rate for this particular device
|
||||
// and audio configuration.
|
||||
int sample_rate_hz_;
|
||||
|
||||
};
|
||||
|
||||
|
@ -107,6 +107,7 @@ class AudioTrackJni : public PlayoutDelayProvider {
|
||||
int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
|
||||
|
||||
protected:
|
||||
// TODO(henrika): improve this estimate.
|
||||
virtual int PlayoutDelayMs() { return 0; }
|
||||
|
||||
private:
|
||||
|
@ -10,8 +10,13 @@
|
||||
|
||||
package org.webrtc.voiceengine;
|
||||
|
||||
import java.lang.System;
|
||||
import java.lang.Thread;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import android.content.Context;
|
||||
import android.media.AudioFormat;
|
||||
@ -22,258 +27,330 @@ import android.media.AudioManager;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.MediaRecorder.AudioSource;
|
||||
import android.os.Build;
|
||||
import android.os.Process;
|
||||
import android.os.SystemClock;
|
||||
import android.util.Log;
|
||||
|
||||
class WebRtcAudioRecord {
|
||||
private AudioRecord _audioRecord = null;
|
||||
class WebRtcAudioRecord {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private Context _context;
|
||||
private static final String TAG = "WebRtcAudioRecord";
|
||||
|
||||
private ByteBuffer _recBuffer;
|
||||
private byte[] _tempBufRec;
|
||||
// Use 44.1kHz as the default sampling rate.
|
||||
private static final int SAMPLE_RATE_HZ = 44100;
|
||||
|
||||
private final ReentrantLock _recLock = new ReentrantLock();
|
||||
// Mono recording is default.
|
||||
private static final int CHANNELS = 1;
|
||||
|
||||
private boolean _doRecInit = true;
|
||||
private boolean _isRecording = false;
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
private static final int BITS_PER_SAMPLE = 16;
|
||||
|
||||
private int _bufferedRecSamples = 0;
|
||||
// Number of bytes per audio frame.
|
||||
// Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
|
||||
private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
|
||||
|
||||
private AcousticEchoCanceler _aec = null;
|
||||
private boolean _useBuiltInAEC = false;
|
||||
// Requested size of each recorded buffer provided to the client.
|
||||
private static final int CALLBACK_BUFFER_SIZE_MS = 10;
|
||||
|
||||
private static boolean runningOnJellyBeanOrHigher() {
|
||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
||||
// Average number of callbacks per second.
|
||||
private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
|
||||
|
||||
private ByteBuffer byteBuffer;
|
||||
private final int bytesPerBuffer;
|
||||
private final int framesPerBuffer;
|
||||
private final int sampleRate;
|
||||
|
||||
private final long nativeAudioRecord;
|
||||
private final AudioManager audioManager;
|
||||
private final Context context;
|
||||
|
||||
private AudioRecord audioRecord = null;
|
||||
private AudioRecordThread audioThread = null;
|
||||
|
||||
private AcousticEchoCanceler aec = null;
|
||||
private boolean useBuiltInAEC = false;
|
||||
|
||||
private final Set<Long> threadIds = new HashSet<Long>();
|
||||
|
||||
private static boolean runningOnJellyBeanOrHigher() {
|
||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
|
||||
}
|
||||
|
||||
private static boolean runningOnJellyBeanMR1OrHigher() {
|
||||
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
||||
* to be recorded. Feeds recorded data to the native counterpart as a
|
||||
* periodic sequence of callbacks using DataIsRecorded().
|
||||
* This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
|
||||
*/
|
||||
private class AudioRecordThread extends Thread {
|
||||
private volatile boolean keepAlive = true;
|
||||
|
||||
public AudioRecordThread(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
WebRtcAudioRecord() {
|
||||
try {
|
||||
_recBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
|
||||
// kHz
|
||||
} catch (Exception e) {
|
||||
DoLog(e.getMessage());
|
||||
@Override
|
||||
public void run() {
|
||||
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
|
||||
DoLog("AudioRecordThread" + getThreadInfo());
|
||||
AddThreadId();
|
||||
|
||||
try {
|
||||
audioRecord.startRecording();
|
||||
} catch (IllegalStateException e) {
|
||||
DoLogErr("AudioRecord.startRecording failed: " + e.getMessage());
|
||||
return;
|
||||
}
|
||||
assertIsTrue(audioRecord.getRecordingState()
|
||||
== AudioRecord.RECORDSTATE_RECORDING);
|
||||
|
||||
long lastTime = System.nanoTime();
|
||||
while (keepAlive) {
|
||||
int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
|
||||
if (bytesRead == byteBuffer.capacity()) {
|
||||
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
|
||||
} else {
|
||||
DoLogErr("AudioRecord.read failed: " + bytesRead);
|
||||
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
|
||||
keepAlive = false;
|
||||
}
|
||||
}
|
||||
if (DEBUG) {
|
||||
long nowTime = System.nanoTime();
|
||||
long durationInMs =
|
||||
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
||||
lastTime = nowTime;
|
||||
DoLog("bytesRead[" + durationInMs + "] " + bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
_tempBufRec = new byte[2 * 480];
|
||||
try {
|
||||
audioRecord.stop();
|
||||
} catch (IllegalStateException e) {
|
||||
DoLogErr("AudioRecord.stop failed: " + e.getMessage());
|
||||
}
|
||||
RemoveThreadId();
|
||||
}
|
||||
|
||||
public static boolean BuiltInAECIsAvailable() {
|
||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||
if (!runningOnJellyBeanOrHigher()) {
|
||||
public void joinThread() {
|
||||
keepAlive = false;
|
||||
while (isAlive()) {
|
||||
try {
|
||||
join();
|
||||
} catch (InterruptedException e) {
|
||||
// Ignore.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
|
||||
DoLog("ctor" + getThreadInfo());
|
||||
this.context = context;
|
||||
this.nativeAudioRecord = nativeAudioRecord;
|
||||
audioManager = ((AudioManager) context.getSystemService(
|
||||
Context.AUDIO_SERVICE));
|
||||
sampleRate = GetNativeSampleRate();
|
||||
bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
|
||||
framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
|
||||
byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
|
||||
DoLog("byteBuffer.capacity: " + byteBuffer.capacity());
|
||||
|
||||
// Rather than passing the ByteBuffer with every callback (requiring
|
||||
// the potentially expensive GetDirectBufferAddress) we simply have the
|
||||
// the native class cache the address to the memory once.
|
||||
nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
|
||||
AddThreadId();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the native or optimal input sample rate for this device's
|
||||
* primary input stream. Unit is in Hz.
|
||||
* Note that we actually query the output device but the same result is
|
||||
* also valid for input.
|
||||
*/
|
||||
private int GetNativeSampleRate() {
|
||||
if (!runningOnJellyBeanMR1OrHigher()) {
|
||||
return SAMPLE_RATE_HZ;
|
||||
}
|
||||
String sampleRateString = audioManager.getProperty(
|
||||
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||
return (sampleRateString == null) ?
|
||||
SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
||||
}
|
||||
|
||||
public static boolean BuiltInAECIsAvailable() {
|
||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||
if (!runningOnJellyBeanOrHigher()) {
|
||||
return false;
|
||||
}
|
||||
// TODO(henrika): add black-list based on device name. We could also
|
||||
// use uuid to exclude devices but that would require a session ID from
|
||||
// an existing AudioRecord object.
|
||||
return AcousticEchoCanceler.isAvailable();
|
||||
}
|
||||
|
||||
private boolean EnableBuiltInAEC(boolean enable) {
|
||||
DoLog("EnableBuiltInAEC(" + enable + ')');
|
||||
AddThreadId();
|
||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||
if (!runningOnJellyBeanOrHigher()) {
|
||||
return false;
|
||||
}
|
||||
// Store the AEC state.
|
||||
useBuiltInAEC = enable;
|
||||
// Set AEC state if AEC has already been created.
|
||||
if (aec != null) {
|
||||
int ret = aec.setEnabled(enable);
|
||||
if (ret != AudioEffect.SUCCESS) {
|
||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||
return false;
|
||||
}
|
||||
// TODO(henrika): add black-list based on device name. We could also
|
||||
// use uuid to exclude devices but that would require a session ID from
|
||||
// an existing AudioRecord object.
|
||||
return AcousticEchoCanceler.isAvailable();
|
||||
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private int InitRecording(int sampleRate) {
|
||||
DoLog("InitRecording(sampleRate=" + sampleRate + ")");
|
||||
AddThreadId();
|
||||
// Get the minimum buffer size required for the successful creation of
|
||||
// an AudioRecord object, in byte units.
|
||||
// Note that this size doesn't guarantee a smooth recording under load.
|
||||
// TODO(henrika): Do we need to make this larger to avoid underruns?
|
||||
int minBufferSize = AudioRecord.getMinBufferSize(
|
||||
sampleRate,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT);
|
||||
DoLog("AudioRecord.getMinBufferSize: " + minBufferSize);
|
||||
|
||||
if (aec != null) {
|
||||
aec.release();
|
||||
aec = null;
|
||||
}
|
||||
if (audioRecord != null) {
|
||||
audioRecord.release();
|
||||
audioRecord = null;
|
||||
}
|
||||
|
||||
private int EnableBuiltInAEC(boolean enable) {
|
||||
DoLog("EnableBuiltInAEC(" + enable + ')');
|
||||
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
|
||||
if (!runningOnJellyBeanOrHigher()) {
|
||||
return -1;
|
||||
}
|
||||
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
|
||||
DoLog("bufferSizeInBytes: " + bufferSizeInBytes);
|
||||
try {
|
||||
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
|
||||
sampleRate,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT,
|
||||
bufferSizeInBytes);
|
||||
|
||||
_useBuiltInAEC = enable;
|
||||
} catch (IllegalArgumentException e) {
|
||||
DoLog(e.getMessage());
|
||||
return -1;
|
||||
}
|
||||
assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
|
||||
|
||||
// Set AEC state if AEC has already been created.
|
||||
if (_aec != null) {
|
||||
int ret = _aec.setEnabled(enable);
|
||||
if (ret != AudioEffect.SUCCESS) {
|
||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||
return -1;
|
||||
}
|
||||
DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
|
||||
}
|
||||
|
||||
return 0;
|
||||
DoLog("AudioRecord " +
|
||||
"session ID: " + audioRecord.getAudioSessionId() + ", " +
|
||||
"audio format: " + audioRecord.getAudioFormat() + ", " +
|
||||
"channels: " + audioRecord.getChannelCount() + ", " +
|
||||
"sample rate: " + audioRecord.getSampleRate());
|
||||
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
||||
if (!BuiltInAECIsAvailable()) {
|
||||
return framesPerBuffer;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int InitRecording(int audioSource, int sampleRate) {
|
||||
DoLog("InitRecording");
|
||||
audioSource = AudioSource.VOICE_COMMUNICATION;
|
||||
// get the minimum buffer size that can be used
|
||||
int minRecBufSize = AudioRecord.getMinBufferSize(
|
||||
sampleRate,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT);
|
||||
|
||||
// DoLog("min rec buf size is " + minRecBufSize);
|
||||
|
||||
// double size to be more safe
|
||||
int recBufSize = minRecBufSize * 2;
|
||||
// On average half of the samples have been recorded/buffered and the
|
||||
// recording interval is 1/100s.
|
||||
_bufferedRecSamples = sampleRate / 200;
|
||||
// DoLog("rough rec delay set to " + _bufferedRecSamples);
|
||||
|
||||
if (_aec != null) {
|
||||
_aec.release();
|
||||
_aec = null;
|
||||
}
|
||||
|
||||
// release the object
|
||||
if (_audioRecord != null) {
|
||||
_audioRecord.release();
|
||||
_audioRecord = null;
|
||||
}
|
||||
|
||||
try {
|
||||
_audioRecord = new AudioRecord(
|
||||
audioSource,
|
||||
sampleRate,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT,
|
||||
recBufSize);
|
||||
|
||||
} catch (Exception e) {
|
||||
DoLog(e.getMessage());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// check that the audioRecord is ready to be used
|
||||
if (_audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
|
||||
// DoLog("rec not initialized " + sampleRate);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// DoLog("rec sample rate set to " + sampleRate);
|
||||
|
||||
DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
|
||||
if (!BuiltInAECIsAvailable()) {
|
||||
return _bufferedRecSamples;
|
||||
}
|
||||
|
||||
_aec = AcousticEchoCanceler.create(_audioRecord.getAudioSessionId());
|
||||
if (_aec == null) {
|
||||
DoLogErr("AcousticEchoCanceler.create failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ret = _aec.setEnabled(_useBuiltInAEC);
|
||||
if (ret != AudioEffect.SUCCESS) {
|
||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
Descriptor descriptor = _aec.getDescriptor();
|
||||
DoLog("AcousticEchoCanceler " +
|
||||
"name: " + descriptor.name + ", " +
|
||||
"implementor: " + descriptor.implementor + ", " +
|
||||
"uuid: " + descriptor.uuid);
|
||||
DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
|
||||
|
||||
return _bufferedRecSamples;
|
||||
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
|
||||
if (aec == null) {
|
||||
DoLogErr("AcousticEchoCanceler.create failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int StartRecording() {
|
||||
DoLog("StartRecording");
|
||||
// start recording
|
||||
try {
|
||||
_audioRecord.startRecording();
|
||||
|
||||
} catch (IllegalStateException e) {
|
||||
e.printStackTrace();
|
||||
return -1;
|
||||
}
|
||||
|
||||
_isRecording = true;
|
||||
return 0;
|
||||
int ret = aec.setEnabled(useBuiltInAEC);
|
||||
if (ret != AudioEffect.SUCCESS) {
|
||||
DoLogErr("AcousticEchoCanceler.setEnabled failed");
|
||||
return -1;
|
||||
}
|
||||
Descriptor descriptor = aec.getDescriptor();
|
||||
DoLog("AcousticEchoCanceler " +
|
||||
"name: " + descriptor.name + ", " +
|
||||
"implementor: " + descriptor.implementor + ", " +
|
||||
"uuid: " + descriptor.uuid);
|
||||
DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
|
||||
return framesPerBuffer;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int StopRecording() {
|
||||
DoLog("StopRecording");
|
||||
_recLock.lock();
|
||||
try {
|
||||
// only stop if we are recording
|
||||
if (_audioRecord.getRecordingState() ==
|
||||
AudioRecord.RECORDSTATE_RECORDING) {
|
||||
// stop recording
|
||||
try {
|
||||
_audioRecord.stop();
|
||||
} catch (IllegalStateException e) {
|
||||
e.printStackTrace();
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Release the AEC object.
|
||||
if (_aec != null) {
|
||||
_aec.release();
|
||||
_aec = null;
|
||||
}
|
||||
|
||||
// Release the AudioRecord object.
|
||||
_audioRecord.release();
|
||||
_audioRecord = null;
|
||||
|
||||
} finally {
|
||||
// Ensure we always unlock, both for success, exception or error
|
||||
// return.
|
||||
_doRecInit = true;
|
||||
_recLock.unlock();
|
||||
}
|
||||
|
||||
_isRecording = false;
|
||||
return 0;
|
||||
private boolean StartRecording() {
|
||||
DoLog("StartRecording");
|
||||
AddThreadId();
|
||||
if (audioRecord == null) {
|
||||
DoLogErr("start() called before init()");
|
||||
return false;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int RecordAudio(int lengthInBytes) {
|
||||
_recLock.lock();
|
||||
|
||||
try {
|
||||
if (_audioRecord == null) {
|
||||
return -2; // We have probably closed down while waiting for rec
|
||||
// lock
|
||||
}
|
||||
|
||||
// Set priority, only do once
|
||||
if (_doRecInit == true) {
|
||||
try {
|
||||
android.os.Process.setThreadPriority(
|
||||
android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
|
||||
} catch (Exception e) {
|
||||
DoLog("Set rec thread priority failed: " + e.getMessage());
|
||||
}
|
||||
_doRecInit = false;
|
||||
}
|
||||
|
||||
int readBytes = 0;
|
||||
_recBuffer.rewind(); // Reset the position to start of buffer
|
||||
readBytes = _audioRecord.read(_tempBufRec, 0, lengthInBytes);
|
||||
// DoLog("read " + readBytes + "from SC");
|
||||
_recBuffer.put(_tempBufRec);
|
||||
|
||||
if (readBytes != lengthInBytes) {
|
||||
// DoLog("Could not read all data from sc (read = " + readBytes
|
||||
// + ", length = " + lengthInBytes + ")");
|
||||
return -1;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
DoLogErr("RecordAudio try failed: " + e.getMessage());
|
||||
|
||||
} finally {
|
||||
// Ensure we always unlock, both for success, exception or error
|
||||
// return.
|
||||
_recLock.unlock();
|
||||
}
|
||||
|
||||
return _bufferedRecSamples;
|
||||
if (audioThread != null) {
|
||||
DoLogErr("start() was already called");
|
||||
return false;
|
||||
}
|
||||
audioThread = new AudioRecordThread("AudioRecordJavaThread");
|
||||
audioThread.start();
|
||||
return true;
|
||||
}
|
||||
|
||||
final String logTag = "WebRtcAudioRecord-Java";
|
||||
|
||||
private void DoLog(String msg) {
|
||||
Log.d(logTag, msg);
|
||||
private boolean StopRecording() {
|
||||
DoLog("StopRecording");
|
||||
AddThreadId();
|
||||
if (audioThread == null) {
|
||||
DoLogErr("start() was never called, or stop() was already called");
|
||||
return false;
|
||||
}
|
||||
|
||||
private void DoLogErr(String msg) {
|
||||
Log.e(logTag, msg);
|
||||
audioThread.joinThread();
|
||||
audioThread = null;
|
||||
if (aec != null) {
|
||||
aec.release();
|
||||
aec = null;
|
||||
}
|
||||
if (audioRecord != null) {
|
||||
audioRecord.release();
|
||||
audioRecord = null;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void DoLog(String msg) {
|
||||
Log.d(TAG, msg);
|
||||
}
|
||||
|
||||
private void DoLogErr(String msg) {
|
||||
Log.e(TAG, msg);
|
||||
}
|
||||
|
||||
/** Helper method for building a string of thread information.*/
|
||||
private static String getThreadInfo() {
|
||||
return "@[name=" + Thread.currentThread().getName()
|
||||
+ ", id=" + Thread.currentThread().getId() + "]";
|
||||
}
|
||||
|
||||
/** Helper method which throws an exception when an assertion has failed. */
|
||||
private static void assertIsTrue(boolean condition) {
|
||||
if (!condition) {
|
||||
throw new AssertionError("Expected condition to be true");
|
||||
}
|
||||
}
|
||||
|
||||
private void AddThreadId() {
|
||||
threadIds.add(Thread.currentThread().getId());
|
||||
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
||||
}
|
||||
|
||||
private void RemoveThreadId() {
|
||||
threadIds.remove(Thread.currentThread().getId());
|
||||
DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
|
||||
}
|
||||
|
||||
private native void nativeCacheDirectBufferAddress(
|
||||
ByteBuffer byteBuffer, long nativeAudioRecord);
|
||||
|
||||
private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
'type': 'static_library',
|
||||
'dependencies': [
|
||||
'webrtc_utility',
|
||||
'<(webrtc_root)/base/base.gyp:rtc_base_approved',
|
||||
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
|
||||
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
|
@ -12,9 +12,37 @@
|
||||
#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
||||
|
||||
#include <jni.h>
|
||||
#include <string>
|
||||
|
||||
// Abort the process if |jni| has a Java exception pending.
|
||||
// TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
|
||||
#define CHECK_EXCEPTION(jni) \
|
||||
CHECK(!jni->ExceptionCheck()) \
|
||||
<< (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Return a |JNIEnv*| usable on this thread or NULL if this thread is detached.
|
||||
JNIEnv* GetEnv(JavaVM* jvm);
|
||||
|
||||
// JNIEnv-helper methods that wraps the API which uses the JNI interface
|
||||
// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
|
||||
// is thrown while calling the method.
|
||||
jmethodID GetMethodID (
|
||||
JNIEnv* jni, jclass c, const std::string& name, const char* signature);
|
||||
|
||||
jclass FindClass(JNIEnv* jni, const std::string& name);
|
||||
|
||||
jobject NewGlobalRef(JNIEnv* jni, jobject o);
|
||||
|
||||
void DeleteGlobalRef(JNIEnv* jni, jobject o);
|
||||
|
||||
// Return thread ID as a string.
|
||||
std::string GetThreadId();
|
||||
|
||||
// Return thread ID as string suitable for debug logging.
|
||||
std::string GetThreadInfo();
|
||||
|
||||
// Attach thread to JVM if necessary and detach at scope end if originally
|
||||
// attached.
|
||||
class AttachThreadScoped {
|
||||
@ -29,6 +57,23 @@ class AttachThreadScoped {
|
||||
JNIEnv* env_;
|
||||
};
|
||||
|
||||
// Scoped holder for global Java refs.
|
||||
template<class T> // T is jclass, jobject, jintArray, etc.
|
||||
class ScopedGlobalRef {
|
||||
public:
|
||||
ScopedGlobalRef(JNIEnv* jni, T obj)
|
||||
: jni_(jni), obj_(static_cast<T>(NewGlobalRef(jni, obj))) {}
|
||||
~ScopedGlobalRef() {
|
||||
DeleteGlobalRef(jni_, obj_);
|
||||
}
|
||||
T operator*() const {
|
||||
return obj_;
|
||||
}
|
||||
private:
|
||||
JNIEnv* jni_;
|
||||
T obj_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
|
||||
|
@ -8,27 +8,89 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/utility/interface/helpers_android.h"
|
||||
|
||||
#include <android/log.h>
|
||||
#include <assert.h>
|
||||
#include <pthread.h>
|
||||
#include <stddef.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define TAG "HelpersAndroid"
|
||||
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
JNIEnv* GetEnv(JavaVM* jvm) {
|
||||
void* env = NULL;
|
||||
jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
|
||||
CHECK(((env != NULL) && (status == JNI_OK)) ||
|
||||
((env == NULL) && (status == JNI_EDETACHED)))
|
||||
<< "Unexpected GetEnv return: " << status << ":" << env;
|
||||
return reinterpret_cast<JNIEnv*>(env);
|
||||
}
|
||||
|
||||
jmethodID GetMethodID (
|
||||
JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
|
||||
jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
|
||||
CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
|
||||
<< signature;
|
||||
CHECK(m) << name << ", " << signature;
|
||||
return m;
|
||||
}
|
||||
|
||||
jclass FindClass(JNIEnv* jni, const std::string& name) {
|
||||
jclass c = jni->FindClass(name.c_str());
|
||||
CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
|
||||
CHECK(c) << name;
|
||||
return c;
|
||||
}
|
||||
|
||||
jobject NewGlobalRef(JNIEnv* jni, jobject o) {
|
||||
jobject ret = jni->NewGlobalRef(o);
|
||||
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
|
||||
CHECK(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DeleteGlobalRef(JNIEnv* jni, jobject o) {
|
||||
jni->DeleteGlobalRef(o);
|
||||
CHECK_EXCEPTION(jni) << "Error during DeleteGlobalRef";
|
||||
}
|
||||
|
||||
std::string GetThreadId() {
|
||||
char buf[21]; // Big enough to hold a kuint64max plus terminating NULL.
|
||||
int thread_id = gettid();
|
||||
CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
|
||||
static_cast<int>(sizeof(buf))) << "Thread id is bigger than uint64??";
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
std::string GetThreadInfo() {
|
||||
return "@[tid=" + GetThreadId() + "]";
|
||||
}
|
||||
|
||||
AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
|
||||
: attached_(false), jvm_(jvm), env_(NULL) {
|
||||
jint ret_val = jvm->GetEnv(reinterpret_cast<void**>(&env_), JNI_VERSION_1_4);
|
||||
if (ret_val == JNI_EDETACHED) {
|
||||
// Attach the thread to the Java VM.
|
||||
ret_val = jvm_->AttachCurrentThread(&env_, NULL);
|
||||
attached_ = ret_val == JNI_OK;
|
||||
assert(attached_);
|
||||
env_ = GetEnv(jvm);
|
||||
if (!env_) {
|
||||
// Adding debug log here so we can track down potential leaks and figure
|
||||
// out why we sometimes see "Native thread exiting without having called
|
||||
// DetachCurrentThread" in logcat outputs.
|
||||
ALOGD("Attaching thread to JVM%s", GetThreadInfo().c_str());
|
||||
jint res = jvm->AttachCurrentThread(&env_, NULL);
|
||||
attached_ = (res == JNI_OK);
|
||||
CHECK(attached_) << "AttachCurrentThread failed: " << res;
|
||||
}
|
||||
}
|
||||
|
||||
AttachThreadScoped::~AttachThreadScoped() {
|
||||
if (attached_ && (jvm_->DetachCurrentThread() < 0)) {
|
||||
assert(false);
|
||||
if (attached_) {
|
||||
ALOGD("Detaching thread from JVM%s", GetThreadInfo().c_str());
|
||||
jint res = jvm_->DetachCurrentThread();
|
||||
CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
|
||||
CHECK(!GetEnv(jvm_));
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user