Make protobuf use optional.

- By default, disable the AudioProcessing protobuf usage in the Chromium
  build. The standalone build is unaffected.
- Add a test for the AudioProcessing debug dumps.

TEST=audioproc_unittest

Review URL: http://webrtc-codereview.appspot.com/303003

git-svn-id: http://webrtc.googlecode.com/svn/trunk@1094 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org 2011-12-03 00:03:31 +00:00
parent 626fbfd4cd
commit 7bf2646e4d
6 changed files with 173 additions and 94 deletions

View File

@ -66,6 +66,9 @@
# Exclude internal video render module on Chromium build
'include_internal_video_render%': 0,
# Disable the use of protocol buffers in production code.
'enable_protobuf%': 0,
'webrtc_root%': '<(DEPTH)/third_party/webrtc',
}, {
# Settings for the standalone (not-in-Chromium) build.
@ -77,6 +80,8 @@
'include_internal_video_render%': 1,
'enable_protobuf%': 1,
'webrtc_root%': '<(DEPTH)/src',
'conditions': [

View File

@ -17,6 +17,9 @@
}, {
'defines': [ 'WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE' ],
}],
['enable_protobuf==1', {
'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
}],
],
'dependencies': [
'audio_processing',
@ -41,26 +44,31 @@
},
'includes': [ '../../build/protoc.gypi', ],
},
{
'target_name': 'audioproc',
'type': 'executable',
'dependencies': [
'audio_processing',
'audioproc_debug_proto',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/../testing/gtest.gyp:gtest',
],
'conditions': [
['enable_protobuf==1', {
'targets': [
{
'target_name': 'audioproc',
'type': 'executable',
'dependencies': [
'audio_processing',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/../testing/gtest.gyp:gtest',
],
'sources': [ 'test/process_test.cc', ],
},
{
'target_name': 'unpack_aecdump',
'type': 'executable',
'dependencies': [
'audioproc_debug_proto',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/../third_party/google-gflags/google-gflags.gyp:google-gflags',
],
'sources': [ 'test/unpack.cc', ],
},
],
'sources': [ 'test/process_test.cc', ],
},
{
'target_name': 'unpack_aecdump',
'type': 'executable',
'dependencies': [
'audioproc_debug_proto',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/../third_party/google-gflags/google-gflags.gyp:google-gflags',
],
'sources': [ 'test/unpack.cc', ],
},
}],
],
}

View File

@ -19,9 +19,12 @@
'dependencies': [ 'ns' ],
'defines': [ 'WEBRTC_NS_FLOAT' ],
}],
['enable_protobuf==1', {
'dependencies': [ 'audioproc_debug_proto' ],
'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
}],
],
'dependencies': [
'audioproc_debug_proto',
'aec',
'aecm',
'agc',
@ -65,18 +68,24 @@
'voice_detection_impl.h',
],
},
{
'target_name': 'audioproc_debug_proto',
'type': 'static_library',
'sources': [ 'debug.proto', ],
'variables': {
'proto_in_dir': '.',
# Workaround to protect against gyp's pathname relativization when this
# file is included by modules.gyp.
'proto_out_protected': 'webrtc/audio_processing',
'proto_out_dir': '<(proto_out_protected)',
},
'includes': [ '../../build/protoc.gypi', ],
},
],
'conditions': [
['enable_protobuf==1', {
'targets': [
{
'target_name': 'audioproc_debug_proto',
'type': 'static_library',
'sources': [ 'debug.proto', ],
'variables': {
'proto_in_dir': '.',
# Workaround to protect against gyp's pathname relativization when
# this file is included by modules.gyp.
'proto_out_protected': 'webrtc/audio_processing',
'proto_out_dir': '<(proto_out_protected)',
},
'includes': [ '../../build/protoc.gypi', ],
},
],
}],
],
}

View File

@ -25,11 +25,15 @@
#include "processing_component.h"
#include "splitting_filter.h"
#include "voice_detection_impl.h"
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Files generated at build-time by the protobuf compiler.
#ifdef WEBRTC_ANDROID
#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
#else
#include "webrtc/audio_processing/debug.pb.h"
#endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace webrtc {
AudioProcessing* AudioProcessing::Create(int id) {
@ -60,11 +64,13 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
level_estimator_(NULL),
noise_suppression_(NULL),
voice_detection_(NULL),
debug_file_(FileWrapper::Create()),
event_msg_(new audioproc::Event()),
crit_(CriticalSectionWrapper::CreateCriticalSection()),
render_audio_(NULL),
capture_audio_(NULL),
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
debug_file_(FileWrapper::Create()),
event_msg_(new audioproc::Event()),
#endif
sample_rate_hz_(kSampleRate16kHz),
split_sample_rate_hz_(kSampleRate16kHz),
samples_per_channel_(sample_rate_hz_ / 100),
@ -104,14 +110,11 @@ AudioProcessingImpl::~AudioProcessingImpl() {
component_list_.pop_front();
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
debug_file_->CloseFile();
}
delete debug_file_;
debug_file_ = NULL;
delete event_msg_;
event_msg_ = NULL;
#endif
delete crit_;
crit_ = NULL;
@ -167,12 +170,14 @@ int AudioProcessingImpl::InitializeLocked() {
}
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
int err = WriteInitMessage();
if (err != kNoError) {
return err;
}
}
#endif
return kNoError;
}
@ -268,6 +273,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kBadDataLengthError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::STREAM);
audioproc::Stream* msg = event_msg_->mutable_stream();
@ -279,6 +285,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
msg->set_drift(echo_cancellation_->stream_drift_samples());
msg->set_level(gain_control_->stream_analog_level());
}
#endif
capture_audio_->DeinterleaveFrom(frame);
@ -359,6 +366,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
capture_audio_->InterleaveTo(frame, data_changed);
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
audioproc::Stream* msg = event_msg_->mutable_stream();
const size_t data_size = sizeof(int16_t) *
@ -370,6 +378,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return err;
}
}
#endif
was_stream_delay_set_ = false;
return kNoError;
@ -395,6 +404,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return kBadDataLengthError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
@ -407,6 +417,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return err;
}
}
#endif
render_audio_->DeinterleaveFrom(frame);
@ -474,6 +485,7 @@ int AudioProcessingImpl::StartDebugRecording(
return kNullPointerError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Stop any ongoing recording.
if (debug_file_->Open()) {
if (debug_file_->CloseFile() == -1) {
@ -490,20 +502,26 @@ int AudioProcessingImpl::StartDebugRecording(
if (err != kNoError) {
return err;
}
return kNoError;
#else
return kUnsupportedFunctionError;
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
}
int AudioProcessingImpl::StopDebugRecording() {
CriticalSectionScoped crit_scoped(*crit_);
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// We just return if recording hasn't started.
if (debug_file_->Open()) {
if (debug_file_->CloseFile() == -1) {
return kFileError;
}
}
return kNoError;
#else
return kUnsupportedFunctionError;
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
}
EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
@ -601,6 +619,47 @@ WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
return kNoError;
}
bool AudioProcessingImpl::stream_data_changed() const {
int enabled_count = 0;
std::list<ProcessingComponent*>::const_iterator it;
for (it = component_list_.begin(); it != component_list_.end(); it++) {
if ((*it)->is_component_enabled()) {
enabled_count++;
}
}
// Data is unchanged if no components are enabled, or if only level_estimator_
// or voice_detection_ is enabled.
if (enabled_count == 0) {
return false;
} else if (enabled_count == 1) {
if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
return false;
}
} else if (enabled_count == 2) {
if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
return false;
}
}
return true;
}
bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
}
bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
if (!stream_data_changed && !voice_detection_->is_enabled()) {
// Only level_estimator_ is enabled.
return false;
} else if (sample_rate_hz_ == kSampleRate32kHz) {
// Something besides level_estimator_ is enabled, and we have super-wb.
return true;
}
return false;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
int AudioProcessingImpl::WriteMessageToDebugFile() {
int32_t size = event_msg_->ByteSize();
if (size <= 0) {
@ -644,44 +703,5 @@ int AudioProcessingImpl::WriteInitMessage() {
return kNoError;
}
bool AudioProcessingImpl::stream_data_changed() const {
int enabled_count = 0;
std::list<ProcessingComponent*>::const_iterator it;
for (it = component_list_.begin(); it != component_list_.end(); it++) {
if ((*it)->is_component_enabled()) {
enabled_count++;
}
}
// Data is unchanged if no components are enabled, or if only level_estimator_
// or voice_detection_ is enabled.
if (enabled_count == 0) {
return false;
} else if (enabled_count == 1) {
if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
return false;
}
} else if (enabled_count == 2) {
if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
return false;
}
}
return true;
}
bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
}
bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
if (!stream_data_changed && !voice_detection_->is_enabled()) {
// Only level_estimator_ is enabled.
return false;
} else if (sample_rate_hz_ == kSampleRate32kHz) {
// Something besides level_estimator_ is enabled, and we have super-wb.
return true;
}
return false;
}
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
} // namespace webrtc

View File

@ -11,15 +11,14 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
#include "audio_processing.h"
#include <list>
#include <string>
#include "audio_processing.h"
#include "scoped_ptr.h"
namespace webrtc {
namespace audioproc {
class Event;
} // audioproc
class AudioBuffer;
class CriticalSectionWrapper;
class EchoCancellationImpl;
@ -32,6 +31,14 @@ class NoiseSuppressionImpl;
class ProcessingComponent;
class VoiceDetectionImpl;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace audioproc {
class Event;
} // namespace audioproc
#endif
class AudioProcessingImpl : public AudioProcessing {
public:
enum {
@ -79,8 +86,6 @@ class AudioProcessingImpl : public AudioProcessing {
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
private:
int WriteMessageToDebugFile();
int WriteInitMessage();
bool stream_data_changed() const;
bool synthesis_needed(bool stream_data_changed) const;
bool analysis_needed(bool stream_data_changed) const;
@ -96,14 +101,18 @@ class AudioProcessingImpl : public AudioProcessing {
VoiceDetectionImpl* voice_detection_;
std::list<ProcessingComponent*> component_list_;
FileWrapper* debug_file_;
audioproc::Event* event_msg_; // Protobuf message.
std::string event_str_; // Memory for protobuf serialization.
CriticalSectionWrapper* crit_;
AudioBuffer* render_audio_;
AudioBuffer* capture_audio_;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// TODO(andrew): make this more graceful. Ideally we would split this stuff
// out into a separate class with an "enabled" and "disabled" implementation.
int WriteMessageToDebugFile();
int WriteInitMessage();
scoped_ptr<FileWrapper> debug_file_;
scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
std::string event_str_; // Memory for protobuf serialization.
#endif
int sample_rate_hz_;
int split_sample_rate_hz_;

View File

@ -970,6 +970,34 @@ TEST_F(ApmTest, SplittingFilter) {
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
}
// TODO(andrew): expand test to verify output.
TEST_F(ApmTest, DebugDump) {
const std::string filename = webrtc::test::OutputPath() + "debug.aec";
EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Stopping without having started should be OK.
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
// Verify the file has been written.
ASSERT_TRUE(fopen(filename.c_str(), "r") != NULL);
// Clean it up.
ASSERT_EQ(0, remove(filename.c_str()));
#else
EXPECT_EQ(apm_->kUnsupportedFunctionError,
apm_->StartDebugRecording(filename.c_str()));
EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
// Verify the file has NOT been written.
ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
}
TEST_F(ApmTest, Process) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
webrtc::audioproc::OutputData output_data;