Refactoring of AudioTrackJni and AudioRecordJni using new JVM/JNI classes

BUG=NONE
TEST=./webrtc/build/android/test_runner.py gtest -s modules_unittests --gtest_filter=AudioDevice*
R=tommi@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/51079004

Cr-Commit-Position: refs/heads/master@{#9271}
This commit is contained in:
henrika 2015-05-25 10:11:27 +02:00
parent a26c4e5df6
commit ee369e4277
13 changed files with 224 additions and 347 deletions

View File

@ -32,17 +32,6 @@ namespace webrtc {
template <class InputType, class OutputType>
class AudioDeviceTemplate : public AudioDeviceGeneric {
public:
static void SetAndroidAudioDeviceObjects(void* javaVM,
void* context) {
OutputType::SetAndroidAudioDeviceObjects(javaVM, context);
InputType::SetAndroidAudioDeviceObjects(javaVM, context);
}
static void ClearAndroidAudioDeviceObjects() {
OutputType::ClearAndroidAudioDeviceObjects();
InputType::ClearAndroidAudioDeviceObjects();
}
AudioDeviceTemplate(AudioDeviceModule::AudioLayer audio_layer,
AudioManager* audio_manager)
: audio_layer_(audio_layer),

View File

@ -8,8 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include <limits>
#include <list>
#include <numeric>
#include <string>
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@ -117,11 +121,11 @@ class FileAudioStream : public AudioStreamInterface {
}
// AudioStreamInterface::Write() is not implemented.
virtual void Write(const void* source, int num_frames) override {}
void Write(const void* source, int num_frames) override {}
// Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer.
virtual void Read(void* destination, int num_frames) override {
void Read(void* destination, int num_frames) override {
memcpy(destination,
static_cast<int16_t*> (&file_[file_pos_]),
num_frames * sizeof(int16_t));
@ -169,7 +173,7 @@ class FifoAudioStream : public AudioStreamInterface {
// Allocate new memory, copy |num_frames| samples from |source| into memory
// and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element.
virtual void Write(const void* source, int num_frames) override {
void Write(const void* source, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("+");
if (write_count_++ < kNumIgnoreFirstCallbacks) {
@ -192,7 +196,7 @@ class FifoAudioStream : public AudioStreamInterface {
// Read pointer to data buffer from front of list, copy |num_frames| of stored
// data into |destination| and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element.
virtual void Read(void* destination, int num_frames) override {
void Read(void* destination, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("-");
rtc::CritScope lock(&lock_);
@ -255,7 +259,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
// Insert periodic impulses in first two samples of |destination|.
virtual void Read(void* destination, int num_frames) override {
void Read(void* destination, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) {
PRINT("[");
@ -277,7 +281,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
// Detect received impulses in |source|, derive time between transmission and
// detection and add the calculated delay to list of latencies.
virtual void Write(const void* source, int num_frames) override {
void Write(const void* source, int num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++;
if (pulse_time_ == 0) {

View File

@ -173,12 +173,13 @@ class AudioManager {
// Also ensures that DetachCurrentThread() is called at destruction.
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
rtc::scoped_ptr<JNIEnvironment> j_environment_;
// TODO(henrika): add comments...
// Contains factory method for creating the Java object.
rtc::scoped_ptr<NativeRegistration> j_native_registration_;
// TODO(henrika): add comments...
// Wraps the Java specific parts of the AudioManager.
rtc::scoped_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
AudioDeviceModule::AudioLayer audio_layer_;

View File

@ -25,73 +25,65 @@
namespace webrtc {
static JavaVM* g_jvm = NULL;
static jobject g_context = NULL;
static jclass g_audio_record_class = NULL;
void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
CHECK(jvm);
CHECK(context);
g_jvm = reinterpret_cast<JavaVM*>(jvm);
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
// Protect context from being deleted during garbage collection.
g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
// Load the locally-defined WebRtcAudioRecord class and create a new global
// reference to it.
jclass local_class = FindClass(
jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
g_audio_record_class = reinterpret_cast<jclass>(
NewGlobalRef(jni, local_class));
jni->DeleteLocalRef(local_class);
CHECK_EXCEPTION(jni);
// Register native methods with the WebRtcAudioRecord class. These methods
// are declared private native in WebRtcAudioRecord.java.
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
{"nativeDataIsRecorded", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
jni->RegisterNatives(g_audio_record_class,
native_methods, arraysize(native_methods));
CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
// AudioRecordJni::JavaAudioRecord implementation.
AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_record)
: audio_record_(audio_record.Pass()),
init_recording_(native_reg->GetMethodId("InitRecording", "(II)I")),
start_recording_(native_reg->GetMethodId("StartRecording", "()Z")),
stop_recording_(native_reg->GetMethodId("StopRecording", "()Z")),
enable_built_in_aec_(native_reg->GetMethodId(
"EnableBuiltInAEC", "(Z)Z")) {
}
void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
jni->UnregisterNatives(g_audio_record_class);
CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
DeleteGlobalRef(jni, g_audio_record_class);
g_audio_record_class = NULL;
DeleteGlobalRef(jni, g_context);
g_context = NULL;
g_jvm = NULL;
AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
int AudioRecordJni::JavaAudioRecord::InitRecording(
int sample_rate, int channels) {
return audio_record_->CallIntMethod(init_recording_, sample_rate, channels);
}
bool AudioRecordJni::JavaAudioRecord::StartRecording() {
return audio_record_->CallBooleanMethod(start_recording_);
}
bool AudioRecordJni::JavaAudioRecord::StopRecording() {
return audio_record_->CallBooleanMethod(stop_recording_);
}
bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
return audio_record_->CallBooleanMethod(enable_built_in_aec_, enable);
}
// AudioRecordJni implementation.
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
: audio_manager_(audio_manager),
: j_environment_(JVM::GetInstance()->environment()),
audio_manager_(audio_manager),
audio_parameters_(audio_manager->GetRecordAudioParameters()),
total_delay_in_milliseconds_(0),
j_audio_record_(NULL),
direct_buffer_address_(NULL),
direct_buffer_address_(nullptr),
direct_buffer_capacity_in_bytes_(0),
frames_per_buffer_(0),
initialized_(false),
recording_(false),
audio_device_buffer_(NULL) {
audio_device_buffer_(nullptr) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
CreateJavaInstance();
CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
{"nativeDataIsRecorded", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioRecord",
native_methods, arraysize(native_methods));
j_audio_record_.reset(new JavaAudioRecord(
j_native_registration_.get(),
j_native_registration_->NewObject(
"<init>", "(Landroid/content/Context;J)V",
JVM::GetInstance()->context(), PointerTojlong(this))));
// Detach from this thread since we want to use the checker to verify calls
// from the Java based audio thread.
thread_checker_java_.DetachFromThread();
@ -101,10 +93,6 @@ AudioRecordJni::~AudioRecordJni() {
ALOGD("~dtor%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
Terminate();
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jni->DeleteGlobalRef(j_audio_record_);
j_audio_record_ = NULL;
}
int32_t AudioRecordJni::Init() {
@ -125,17 +113,8 @@ int32_t AudioRecordJni::InitRecording() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!initialized_);
DCHECK(!recording_);
if (initialized_ || recording_) {
return -1;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initRecordingID = GetMethodID(
jni, g_audio_record_class, "InitRecording", "(II)I");
jint frames_per_buffer = jni->CallIntMethod(
j_audio_record_, initRecordingID, audio_parameters_.sample_rate(),
audio_parameters_.channels());
CHECK_EXCEPTION(jni);
int frames_per_buffer = j_audio_record_->InitRecording(
audio_parameters_.sample_rate(), audio_parameters_.channels());
if (frames_per_buffer < 0) {
ALOGE("InitRecording failed!");
return -1;
@ -154,16 +133,7 @@ int32_t AudioRecordJni::StartRecording() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(initialized_);
DCHECK(!recording_);
if (!initialized_ || recording_) {
return -1;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID startRecordingID = GetMethodID(
jni, g_audio_record_class, "StartRecording", "()Z");
jboolean res = jni->CallBooleanMethod(j_audio_record_, startRecordingID);
CHECK_EXCEPTION(jni);
if (!res) {
if (!j_audio_record_->StartRecording()) {
ALOGE("StartRecording failed!");
return -1;
}
@ -177,13 +147,7 @@ int32_t AudioRecordJni::StopRecording() {
if (!initialized_ || !recording_) {
return 0;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID stopRecordingID = GetMethodID(
jni, g_audio_record_class, "StopRecording", "()Z");
jboolean res = jni->CallBooleanMethod(j_audio_record_, stopRecordingID);
CHECK_EXCEPTION(jni);
if (!res) {
if (!j_audio_record_->StopRecording()) {
ALOGE("StopRecording failed!");
return -1;
}
@ -214,18 +178,7 @@ void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
ALOGD("EnableBuiltInAEC%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID enableBuiltInAEC = GetMethodID(
jni, g_audio_record_class, "EnableBuiltInAEC", "(Z)Z");
jboolean res = jni->CallBooleanMethod(
j_audio_record_, enableBuiltInAEC, enable);
CHECK_EXCEPTION(jni);
if (!res) {
ALOGE("EnableBuiltInAEC failed!");
return -1;
}
return 0;
return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
}
void JNICALL AudioRecordJni::CacheDirectBufferAddress(
@ -239,6 +192,7 @@ void AudioRecordJni::OnCacheDirectBufferAddress(
JNIEnv* env, jobject byte_buffer) {
ALOGD("OnCacheDirectBufferAddress");
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!direct_buffer_address_);
direct_buffer_address_ =
env->GetDirectBufferAddress(byte_buffer);
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
@ -267,32 +221,11 @@ void AudioRecordJni::OnDataIsRecorded(int length) {
// |playDelayMs| parameter only. Components like the AEC only sees the sum
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_,
0, // recDelayMs
0 ); // clockDrift
0, // recDelayMs
0); // clockDrift
if (audio_device_buffer_->DeliverRecordedData() == -1) {
ALOGE("AudioDeviceBuffer::DeliverRecordedData failed!");
}
}
bool AudioRecordJni::HasDeviceObjects() {
return (g_jvm && g_context && g_audio_record_class);
}
void AudioRecordJni::CreateJavaInstance() {
ALOGD("CreateJavaInstance");
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID constructorID = GetMethodID(
jni, g_audio_record_class, "<init>", "(Landroid/content/Context;J)V");
j_audio_record_ = jni->NewObject(g_audio_record_class,
constructorID,
g_context,
reinterpret_cast<intptr_t>(this));
CHECK_EXCEPTION(jni) << "Error during NewObject";
CHECK(j_audio_record_);
j_audio_record_ = jni->NewGlobalRef(j_audio_record_);
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
CHECK(j_audio_record_);
}
} // namespace webrtc

View File

@ -18,6 +18,7 @@
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
#include "webrtc/modules/utility/interface/jvm_android.h"
namespace webrtc {
@ -35,26 +36,31 @@ namespace webrtc {
// An instance must be created and destroyed on one and the same thread.
// All public methods must also be called on the same thread. A thread checker
// will DCHECK if any method is called on an invalid thread.
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
// CHECK that the calling thread is attached to a Java VM.
//
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
// detach when method goes out of scope. We do so because this class does not
// own the thread is is created and called on and other objects on the same
// thread might put us in a detached state at any time.
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
// and detach when the object goes out of scope. Additional thread checking
// guarantees that no other (possibly non attached) thread is used.
class AudioRecordJni {
public:
// Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features.
// |jvm| denotes the Java VM and |context| corresponds to
// android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_record_class| for
// the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
// Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
// Wraps the Java specific parts of the AudioRecordJni into one helper class.
class JavaAudioRecord {
public:
JavaAudioRecord(NativeRegistration* native_registration,
rtc::scoped_ptr<GlobalRef> audio_track);
~JavaAudioRecord();
int InitRecording(int sample_rate, int channels);
bool StartRecording();
bool StopRecording();
bool EnableBuiltInAEC(bool enable);
private:
rtc::scoped_ptr<GlobalRef> audio_record_;
jmethodID init_recording_;
jmethodID start_recording_;
jmethodID stop_recording_;
jmethodID enable_built_in_aec_;
};
explicit AudioRecordJni(AudioManager* audio_manager);
~AudioRecordJni();
@ -66,7 +72,7 @@ class AudioRecordJni {
bool RecordingIsInitialized() const { return initialized_; }
int32_t StartRecording();
int32_t StopRecording ();
int32_t StopRecording();
bool Recording() const { return recording_; }
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
@ -93,23 +99,26 @@ class AudioRecordJni {
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord);
void OnDataIsRecorded(int length);
// Returns true if SetAndroidAudioDeviceObjects() has been called
// successfully.
bool HasDeviceObjects();
// Called from the constructor. Defines the |j_audio_record_| member.
void CreateJavaInstance();
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
// Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
rtc::ThreadChecker thread_checker_;
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
// Calls AttachCurrentThread() if this thread is not attached at construction.
// Also ensures that DetachCurrentThread() is called at destruction.
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
rtc::scoped_ptr<JNIEnvironment> j_environment_;
// Contains factory method for creating the Java object.
rtc::scoped_ptr<NativeRegistration> j_native_registration_;
// Wraps the Java specific parts of the AudioRecordJni class.
rtc::scoped_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
// Raw pointer to the audio manger.
const AudioManager* audio_manager_;
@ -122,9 +131,6 @@ class AudioRecordJni {
// possible values. See audio_common.h for details.
int total_delay_in_milliseconds_;
// The Java WebRtcAudioRecord instance.
jobject j_audio_record_;
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
void* direct_buffer_address_;

View File

@ -25,70 +25,72 @@
namespace webrtc {
static JavaVM* g_jvm = NULL;
static jobject g_context = NULL;
static jclass g_audio_track_class = NULL;
void AudioTrackJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
CHECK(jvm);
CHECK(context);
g_jvm = reinterpret_cast<JavaVM*>(jvm);
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
jclass local_class = FindClass(
jni, "org/webrtc/voiceengine/WebRtcAudioTrack");
g_audio_track_class = reinterpret_cast<jclass>(
NewGlobalRef(jni, local_class));
jni->DeleteLocalRef(local_class);
CHECK_EXCEPTION(jni);
// Register native methods with the WebRtcAudioTrack class. These methods
// are declared private native in WebRtcAudioTrack.java.
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
{"nativeGetPlayoutData", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
jni->RegisterNatives(g_audio_track_class,
native_methods, arraysize(native_methods));
CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
// AudioTrackJni::JavaAudioTrack implementation.
AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_track)
: audio_track_(audio_track.Pass()),
init_playout_(native_reg->GetMethodId("InitPlayout", "(II)V")),
start_playout_(native_reg->GetMethodId("StartPlayout", "()Z")),
stop_playout_(native_reg->GetMethodId("StopPlayout", "()Z")),
set_stream_volume_(native_reg->GetMethodId("SetStreamVolume", "(I)Z")),
get_stream_max_volume_(native_reg->GetMethodId(
"GetStreamMaxVolume", "()I")),
get_stream_volume_(native_reg->GetMethodId("GetStreamVolume", "()I")) {
}
// TODO(henrika): figure out if it is required to call this method? If so,
// ensure that is is always called as part of the destruction phase.
void AudioTrackJni::ClearAndroidAudioDeviceObjects() {
ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
JNIEnv* jni = GetEnv(g_jvm);
CHECK(jni) << "AttachCurrentThread must be called on this tread";
jni->UnregisterNatives(g_audio_track_class);
CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
DeleteGlobalRef(jni, g_audio_track_class);
g_audio_track_class = NULL;
DeleteGlobalRef(jni, g_context);
g_context = NULL;
g_jvm = NULL;
AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
void AudioTrackJni::JavaAudioTrack::InitPlayout(int sample_rate, int channels) {
audio_track_->CallVoidMethod(init_playout_, sample_rate, channels);
}
bool AudioTrackJni::JavaAudioTrack::StartPlayout() {
return audio_track_->CallBooleanMethod(start_playout_);
}
bool AudioTrackJni::JavaAudioTrack::StopPlayout() {
return audio_track_->CallBooleanMethod(stop_playout_);
}
bool AudioTrackJni::JavaAudioTrack::SetStreamVolume(int volume) {
return audio_track_->CallBooleanMethod(set_stream_volume_, volume);
}
int AudioTrackJni::JavaAudioTrack::GetStreamMaxVolume() {
return audio_track_->CallIntMethod(get_stream_max_volume_);
}
int AudioTrackJni::JavaAudioTrack::GetStreamVolume() {
return audio_track_->CallIntMethod(get_stream_volume_);
}
// TODO(henrika): possible extend usage of AudioManager and add it as member.
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
: audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
j_audio_track_(NULL),
direct_buffer_address_(NULL),
: j_environment_(JVM::GetInstance()->environment()),
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
direct_buffer_address_(nullptr),
direct_buffer_capacity_in_bytes_(0),
frames_per_buffer_(0),
initialized_(false),
playing_(false),
audio_device_buffer_(NULL) {
audio_device_buffer_(nullptr) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
CreateJavaInstance();
CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
{"nativeGetPlayoutData", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioTrack",
native_methods, arraysize(native_methods));
j_audio_track_.reset(new JavaAudioTrack(
j_native_registration_.get(),
j_native_registration_->NewObject(
"<init>", "(Landroid/content/Context;J)V",
JVM::GetInstance()->context(), PointerTojlong(this))));
// Detach from this thread since we want to use the checker to verify calls
// from the Java based audio thread.
thread_checker_java_.DetachFromThread();
@ -98,10 +100,6 @@ AudioTrackJni::~AudioTrackJni() {
ALOGD("~dtor%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
Terminate();
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jni->DeleteGlobalRef(j_audio_track_);
j_audio_track_ = NULL;
}
int32_t AudioTrackJni::Init() {
@ -122,16 +120,8 @@ int32_t AudioTrackJni::InitPlayout() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!initialized_);
DCHECK(!playing_);
if (initialized_ || playing_) {
return -1;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initPlayoutID = GetMethodID(
jni, g_audio_track_class, "InitPlayout", "(II)V");
jni->CallVoidMethod(j_audio_track_, initPlayoutID,
j_audio_track_->InitPlayout(
audio_parameters_.sample_rate(), audio_parameters_.channels());
CHECK_EXCEPTION(jni);
initialized_ = true;
return 0;
}
@ -141,16 +131,7 @@ int32_t AudioTrackJni::StartPlayout() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(initialized_);
DCHECK(!playing_);
if (!initialized_ || playing_) {
return -1;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID startPlayoutID = GetMethodID(
jni, g_audio_track_class, "StartPlayout", "()Z");
jboolean res = jni->CallBooleanMethod(j_audio_track_, startPlayoutID);
CHECK_EXCEPTION(jni);
if (!res) {
if (!j_audio_track_->StartPlayout()) {
ALOGE("StartPlayout failed!");
return -1;
}
@ -164,13 +145,7 @@ int32_t AudioTrackJni::StopPlayout() {
if (!initialized_ || !playing_) {
return 0;
}
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID stopPlayoutID = GetMethodID(
jni, g_audio_track_class, "StopPlayout", "()Z");
jboolean res = jni->CallBooleanMethod(j_audio_track_, stopPlayoutID);
CHECK_EXCEPTION(jni);
if (!res) {
if (!j_audio_track_->StopPlayout()) {
ALOGE("StopPlayout failed!");
return -1;
}
@ -190,26 +165,13 @@ int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) {
int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
ALOGD("SetSpeakerVolume(%d)%s", volume, GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID setStreamVolume = GetMethodID(
jni, g_audio_track_class, "SetStreamVolume", "(I)Z");
jboolean res = jni->CallBooleanMethod(
j_audio_track_, setStreamVolume, volume);
CHECK_EXCEPTION(jni);
return res ? 0 : -1;
return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
}
int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
ALOGD("MaxSpeakerVolume%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getStreamMaxVolume = GetMethodID(
jni, g_audio_track_class, "GetStreamMaxVolume", "()I");
jint max_vol = jni->CallIntMethod(j_audio_track_, getStreamMaxVolume);
CHECK_EXCEPTION(jni);
max_volume = max_vol;
max_volume = j_audio_track_->GetStreamMaxVolume();
return 0;
}
@ -223,13 +185,7 @@ int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
ALOGD("SpeakerVolume%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID getStreamVolume = GetMethodID(
jni, g_audio_track_class, "GetStreamVolume", "()I");
jint stream_volume = jni->CallIntMethod(j_audio_track_, getStreamVolume);
CHECK_EXCEPTION(jni);
volume = stream_volume;
volume = j_audio_track_->GetStreamVolume();
return 0;
}
@ -295,25 +251,4 @@ void AudioTrackJni::OnGetPlayoutData(int length) {
DCHECK_EQ(length, kBytesPerFrame * samples);
}
bool AudioTrackJni::HasDeviceObjects() {
return (g_jvm && g_context && g_audio_track_class);
}
void AudioTrackJni::CreateJavaInstance() {
ALOGD("CreateJavaInstance");
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID constructorID = GetMethodID(
jni, g_audio_track_class, "<init>", "(Landroid/content/Context;J)V");
j_audio_track_ = jni->NewObject(g_audio_track_class,
constructorID,
g_context,
reinterpret_cast<intptr_t>(this));
CHECK_EXCEPTION(jni) << "Error during NewObject";
CHECK(j_audio_track_);
j_audio_track_ = jni->NewGlobalRef(j_audio_track_);
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
CHECK(j_audio_track_);
}
} // namespace webrtc

View File

@ -19,6 +19,7 @@
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
#include "webrtc/modules/utility/interface/jvm_android.h"
namespace webrtc {
@ -31,28 +32,37 @@ namespace webrtc {
// An instance must be created and destroyed on one and the same thread.
// All public methods must also be called on the same thread. A thread checker
// will DCHECK if any method is called on an invalid thread.
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
// CHECK that the calling thread is attached to a Java VM.
//
// All methods use AttachThreadScoped to attach to a Java VM if needed and then
// detach when method goes out of scope. We do so because this class does not
// own the thread is is created and called on and other objects on the same
// thread might put us in a detached state at any time.
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
// and detach when the object goes out of scope. Additional thread checking
// guarantees that no other (possibly non attached) thread is used.
class AudioTrackJni {
public:
// Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features.
// |jvm| denotes the Java VM and |context| corresponds to
// android.content.Context in Java.
// This method also sets a global jclass object, |g_audio_track_class| for
// the "org/webrtc/voiceengine/WebRtcAudioTrack"-class.
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
// Always call this method after the object has been destructed. It deletes
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
// Wraps the Java specific parts of the AudioTrackJni into one helper class.
class JavaAudioTrack {
public:
JavaAudioTrack(NativeRegistration* native_registration,
rtc::scoped_ptr<GlobalRef> audio_track);
~JavaAudioTrack();
AudioTrackJni(AudioManager* audio_manager);
void InitPlayout(int sample_rate, int channels);
bool StartPlayout();
bool StopPlayout();
bool SetStreamVolume(int volume);
int GetStreamMaxVolume();
int GetStreamVolume();
private:
rtc::scoped_ptr<GlobalRef> audio_track_;
jmethodID init_playout_;
jmethodID start_playout_;
jmethodID stop_playout_;
jmethodID set_stream_volume_;
jmethodID get_stream_max_volume_;
jmethodID get_stream_volume_;
};
explicit AudioTrackJni(AudioManager* audio_manager);
~AudioTrackJni();
int32_t Init();
@ -91,29 +101,30 @@ class AudioTrackJni {
JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack);
void OnGetPlayoutData(int length);
// Returns true if SetAndroidAudioDeviceObjects() has been called
// successfully.
bool HasDeviceObjects();
// Called from the constructor. Defines the |j_audio_track_| member.
void CreateJavaInstance();
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
rtc::ThreadChecker thread_checker_;
// Stores thread ID in first call to OnGetPlayoutData() from high-priority
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
// Calls AttachCurrentThread() if this thread is not attached at construction.
// Also ensures that DetachCurrentThread() is called at destruction.
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
rtc::scoped_ptr<JNIEnvironment> j_environment_;
// Contains factory method for creating the Java object.
rtc::scoped_ptr<NativeRegistration> j_native_registration_;
// Wraps the Java specific parts of the AudioTrackJni class.
rtc::scoped_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
// Contains audio parameters provided to this class at construction by the
// AudioManager.
const AudioParameters audio_parameters_;
// The Java WebRtcAudioTrack instance.
jobject j_audio_track_;
// Cached copy of address to direct audio buffer owned by |j_audio_track_|.
void* direct_buffer_address_;

View File

@ -33,12 +33,6 @@ void EnsureInitializedOnce() {
// Initialize the Java environment (currently only used by the audio manager).
webrtc::JVM::Initialize(jvm, context);
// TODO(henrika): remove this call when AudioRecordJni and AudioTrackJni
// are modified to use the same sort of Java initialization as the audio
// manager.
using AudioDeviceJava = AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>;
AudioDeviceJava::SetAndroidAudioDeviceObjects(jvm, context);
}
void EnsureInitialized() {

View File

@ -147,7 +147,7 @@ class WebRtcAudioRecord {
}
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = byteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the

View File

@ -21,7 +21,7 @@ import android.os.Process;
import android.util.Log;
class WebRtcAudioTrack {
private static final boolean DEBUG = false;
private static final boolean DEBUG = true;
private static final String TAG = "WebRtcAudioTrack";

View File

@ -45,6 +45,7 @@ class GlobalRef {
~GlobalRef();
jboolean CallBooleanMethod(jmethodID methodID, ...);
jint CallIntMethod(jmethodID methodID, ...);
void CallVoidMethod(jmethodID methodID, ...);
private:

View File

@ -27,8 +27,10 @@ struct {
const char* name;
jclass clazz;
} loaded_classes[] = {
{"org/webrtc/voiceengine/WebRtcAudioManager", nullptr},
{"org/webrtc/voiceengine/BuildInfo", nullptr},
{"org/webrtc/voiceengine/WebRtcAudioManager", nullptr},
{"org/webrtc/voiceengine/WebRtcAudioRecord", nullptr},
{"org/webrtc/voiceengine/WebRtcAudioTrack", nullptr},
};
// Android's FindClass() is trickier than usual because the app-specific
@ -102,16 +104,27 @@ GlobalRef::~GlobalRef() {
jboolean GlobalRef::CallBooleanMethod(jmethodID methodID, ...) {
va_list args;
va_start(args, methodID);
jboolean res = jni_->CallBooleanMethod(j_object_, methodID, args);
jboolean res = jni_->CallBooleanMethodV(j_object_, methodID, args);
CHECK_EXCEPTION(jni_) << "Error during CallBooleanMethod";
va_end(args);
return res;
}
jint GlobalRef::CallIntMethod(jmethodID methodID, ...) {
va_list args;
va_start(args, methodID);
jint res = jni_->CallIntMethodV(j_object_, methodID, args);
CHECK_EXCEPTION(jni_) << "Error during CallIntMethod";
va_end(args);
return res;
}
void GlobalRef::CallVoidMethod(jmethodID methodID, ...) {
va_list args;
va_start(args, methodID);
jni_->CallVoidMethod(j_object_, methodID, args);
jni_->CallVoidMethodV(j_object_, methodID, args);
CHECK_EXCEPTION(jni_) << "Error during CallVoidMethod";
va_end(args);
}
// NativeRegistration implementation.

View File

@ -146,16 +146,6 @@ int VoiceEngine::SetAndroidObjects(void* javaVM, void* context) {
#ifdef WEBRTC_ANDROID
webrtc::JVM::Initialize(reinterpret_cast<JavaVM*>(javaVM),
reinterpret_cast<jobject>(context));
// The Android ADM implementation supports dynamic selection of the audio
// layer in both directions if a default audio layer is selected. Both
// Java-based audio backends are initialized here to ensure that the user
// can switch backend dynamically as well.
typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni> AudioDevice;
if (javaVM && context) {
AudioDevice::SetAndroidAudioDeviceObjects(javaVM, context);
} else {
AudioDevice::ClearAndroidAudioDeviceObjects();
}
return 0;
#else
return -1;