Remove the requirement to call set_sample_rate_hz and friends.

Instead have ProcessStream transparently handle changes to the stream
audio parameters (sample rate and channels). This removes two locks
per 10 ms ProcessStream call taken by VoiceEngine (four total with the
audio level indicator.)

Also, prepare future improvements by having the splitting filter take
a length parameter. This will allow it to work at different sample
rates. Remove the useless splitting_filter wrapper.

TESTED=voe_cmd_test with audio processing enabled and switching between
codecs; unit tests.

R=aluebs@webrtc.org, bjornv@webrtc.org, turaj@webrtc.org, xians@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/3949004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5346 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org
2014-01-07 17:45:09 +00:00
parent 39669c5c8f
commit 60730cfe3c
21 changed files with 308 additions and 466 deletions

View File

@@ -996,12 +996,14 @@ void WebRtcSpl_UpsampleBy2(const int16_t* in, int16_t len,
* END OF RESAMPLING FUNCTIONS * END OF RESAMPLING FUNCTIONS
************************************************************/ ************************************************************/
void WebRtcSpl_AnalysisQMF(const int16_t* in_data, void WebRtcSpl_AnalysisQMF(const int16_t* in_data,
int in_data_length,
int16_t* low_band, int16_t* low_band,
int16_t* high_band, int16_t* high_band,
int32_t* filter_state1, int32_t* filter_state1,
int32_t* filter_state2); int32_t* filter_state2);
void WebRtcSpl_SynthesisQMF(const int16_t* low_band, void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
const int16_t* high_band, const int16_t* high_band,
int band_length,
int16_t* out_data, int16_t* out_data,
int32_t* filter_state1, int32_t* filter_state1,
int32_t* filter_state2); int32_t* filter_state2);

View File

@@ -15,10 +15,12 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
// Number of samples in a low/high-band frame. #include <assert.h>
// Maximum number of samples in a low/high-band frame.
enum enum
{ {
kBandFrameLength = 160 kMaxBandFrameLength = 240 // 10 ms at 48 kHz.
}; };
// QMF filter coefficients in Q16. // QMF filter coefficients in Q16.
@@ -116,34 +118,37 @@ void WebRtcSpl_AllPassQMF(int32_t* in_data, int16_t data_length,
filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time
} }
void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int16_t* low_band, void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int in_data_length,
int16_t* high_band, int32_t* filter_state1, int16_t* low_band, int16_t* high_band,
int32_t* filter_state2) int32_t* filter_state1, int32_t* filter_state2)
{ {
int16_t i; int16_t i;
int16_t k; int16_t k;
int32_t tmp; int32_t tmp;
int32_t half_in1[kBandFrameLength]; int32_t half_in1[kMaxBandFrameLength];
int32_t half_in2[kBandFrameLength]; int32_t half_in2[kMaxBandFrameLength];
int32_t filter1[kBandFrameLength]; int32_t filter1[kMaxBandFrameLength];
int32_t filter2[kBandFrameLength]; int32_t filter2[kMaxBandFrameLength];
const int band_length = in_data_length / 2;
assert(in_data_length % 2 == 0);
assert(band_length <= kMaxBandFrameLength);
// Split even and odd samples. Also shift them to Q10. // Split even and odd samples. Also shift them to Q10.
for (i = 0, k = 0; i < kBandFrameLength; i++, k += 2) for (i = 0, k = 0; i < band_length; i++, k += 2)
{ {
half_in2[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k], 10); half_in2[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k], 10);
half_in1[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k + 1], 10); half_in1[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k + 1], 10);
} }
// All pass filter even and odd samples, independently. // All pass filter even and odd samples, independently.
WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter1, WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
filter_state1); WebRtcSpl_kAllPassFilter1, filter_state1);
WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter2, WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
filter_state2); WebRtcSpl_kAllPassFilter2, filter_state2);
// Take the sum and difference of filtered version of odd and even // Take the sum and difference of filtered version of odd and even
// branches to get upper & lower band. // branches to get upper & lower band.
for (i = 0; i < kBandFrameLength; i++) for (i = 0; i < band_length; i++)
{ {
tmp = filter1[i] + filter2[i] + 1024; tmp = filter1[i] + filter2[i] + 1024;
tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11); tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
@@ -156,20 +161,21 @@ void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int16_t* low_band,
} }
void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band, void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
int16_t* out_data, int32_t* filter_state1, int band_length, int16_t* out_data,
int32_t* filter_state2) int32_t* filter_state1, int32_t* filter_state2)
{ {
int32_t tmp; int32_t tmp;
int32_t half_in1[kBandFrameLength]; int32_t half_in1[kMaxBandFrameLength];
int32_t half_in2[kBandFrameLength]; int32_t half_in2[kMaxBandFrameLength];
int32_t filter1[kBandFrameLength]; int32_t filter1[kMaxBandFrameLength];
int32_t filter2[kBandFrameLength]; int32_t filter2[kMaxBandFrameLength];
int16_t i; int16_t i;
int16_t k; int16_t k;
assert(band_length <= kMaxBandFrameLength);
// Obtain the sum and difference channels out of upper and lower-band channels. // Obtain the sum and difference channels out of upper and lower-band channels.
// Also shift to Q10 domain. // Also shift to Q10 domain.
for (i = 0; i < kBandFrameLength; i++) for (i = 0; i < band_length; i++)
{ {
tmp = (int32_t)low_band[i] + (int32_t)high_band[i]; tmp = (int32_t)low_band[i] + (int32_t)high_band[i];
half_in1[i] = WEBRTC_SPL_LSHIFT_W32(tmp, 10); half_in1[i] = WEBRTC_SPL_LSHIFT_W32(tmp, 10);
@@ -178,15 +184,15 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
} }
// all-pass filter the sum and difference channels // all-pass filter the sum and difference channels
WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter2, WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
filter_state1); WebRtcSpl_kAllPassFilter2, filter_state1);
WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter1, WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
filter_state2); WebRtcSpl_kAllPassFilter1, filter_state2);
// The filtered signals are even and odd samples of the output. Combine // The filtered signals are even and odd samples of the output. Combine
// them. The signals are Q10 should shift them back to Q0 and take care of // them. The signals are Q10 should shift them back to Q0 and take care of
// saturation. // saturation.
for (i = 0, k = 0; i < kBandFrameLength; i++) for (i = 0, k = 0; i < band_length; i++)
{ {
tmp = WEBRTC_SPL_RSHIFT_W32(filter2[i] + 512, 10); tmp = WEBRTC_SPL_RSHIFT_W32(filter2[i] + 512, 10);
out_data[k++] = WebRtcSpl_SatW32ToW16(tmp); out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);

View File

@@ -552,8 +552,8 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
} }
if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) { if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
WebRtcSpl_AnalysisQMF(speech_in_ptr, speechInLB, speechInUB, WebRtcSpl_AnalysisQMF(speech_in_ptr, SWBFRAMESAMPLES_10ms, speechInLB,
instISAC->analysisFBState1, speechInUB, instISAC->analysisFBState1,
instISAC->analysisFBState2); instISAC->analysisFBState2);
/* Convert from fixed to floating point. */ /* Convert from fixed to floating point. */
@@ -1314,7 +1314,7 @@ static int16_t Decode(ISACStruct* ISAC_main_inst,
speechIdx = 0; speechIdx = 0;
while (speechIdx < numSamplesLB) { while (speechIdx < numSamplesLB) {
WebRtcSpl_SynthesisQMF(&outFrameLB[speechIdx], &outFrameUB[speechIdx], WebRtcSpl_SynthesisQMF(&outFrameLB[speechIdx], &outFrameUB[speechIdx],
&decoded[(speechIdx << 1)], FRAMESAMPLES_10ms, &decoded[(speechIdx << 1)],
instISAC->synthesisFBState1, instISAC->synthesisFBState1,
instISAC->synthesisFBState2); instISAC->synthesisFBState2);

View File

@@ -165,10 +165,6 @@ bool AudioConferenceMixerImpl::Init()
if(SetOutputFrequency(kDefaultFrequency) == -1) if(SetOutputFrequency(kDefaultFrequency) == -1)
return false; return false;
// Assume mono.
if (!SetNumLimiterChannels(1))
return false;
if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
_limiter->kNoError) _limiter->kNoError)
return false; return false;
@@ -326,9 +322,6 @@ int32_t AudioConferenceMixerImpl::Process()
std::max(MaxNumChannels(additionalFramesList), std::max(MaxNumChannels(additionalFramesList),
MaxNumChannels(rampOutList))); MaxNumChannels(rampOutList)));
if (!SetNumLimiterChannels(num_mixed_channels))
retval = -1;
mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, num_mixed_channels); AudioFrame::kVadPassive, num_mixed_channels);
@@ -434,13 +427,6 @@ int32_t AudioConferenceMixerImpl::SetOutputFrequency(
const Frequency frequency) const Frequency frequency)
{ {
CriticalSectionScoped cs(_crit.get()); CriticalSectionScoped cs(_crit.get());
const int error = _limiter->set_sample_rate_hz(frequency);
if(error != _limiter->kNoError)
{
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"Error from AudioProcessing: %d", error);
return -1;
}
_outputFrequency = frequency; _outputFrequency = frequency;
_sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000; _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
@@ -455,24 +441,6 @@ AudioConferenceMixerImpl::OutputFrequency() const
return _outputFrequency; return _outputFrequency;
} }
bool AudioConferenceMixerImpl::SetNumLimiterChannels(int numChannels)
{
if(_limiter->num_input_channels() != numChannels)
{
const int error = _limiter->set_num_channels(numChannels,
numChannels);
if(error != _limiter->kNoError)
{
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
"Error from AudioProcessing: %d", error);
assert(false);
return false;
}
}
return true;
}
int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback( int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback(
AudioMixerStatusReceiver& mixerStatusCallback, AudioMixerStatusReceiver& mixerStatusCallback,
const uint32_t amountOf10MsBetweenCallbacks) const uint32_t amountOf10MsBetweenCallbacks)

View File

@@ -89,10 +89,6 @@ private:
int32_t SetOutputFrequency(const Frequency frequency); int32_t SetOutputFrequency(const Frequency frequency);
Frequency OutputFrequency() const; Frequency OutputFrequency() const;
// Must be called whenever an audio frame indicates the number of channels
// has changed.
bool SetNumLimiterChannels(int numChannels);
// Fills mixList with the AudioFrames pointers that should be used when // Fills mixList with the AudioFrames pointers that should be used when
// mixing. Fills mixParticipantList with ParticipantStatistics for the // mixing. Fills mixParticipantList with ParticipantStatistics for the
// participants who's AudioFrames are inside mixList. // participants who's AudioFrames are inside mixList.

View File

@@ -67,8 +67,6 @@
'level_estimator_impl.h', 'level_estimator_impl.h',
'noise_suppression_impl.cc', 'noise_suppression_impl.cc',
'noise_suppression_impl.h', 'noise_suppression_impl.h',
'splitting_filter.cc',
'splitting_filter.h',
'processing_component.cc', 'processing_component.cc',
'processing_component.h', 'processing_component.h',
'utility/delay_estimator.c', 'utility/delay_estimator.c',

View File

@@ -12,6 +12,7 @@
#include <assert.h> #include <assert.h>
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/audio_buffer.h" #include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h" #include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
@@ -20,9 +21,9 @@
#include "webrtc/modules/audio_processing/level_estimator_impl.h" #include "webrtc/modules/audio_processing/level_estimator_impl.h"
#include "webrtc/modules/audio_processing/noise_suppression_impl.h" #include "webrtc/modules/audio_processing/noise_suppression_impl.h"
#include "webrtc/modules/audio_processing/processing_component.h" #include "webrtc/modules/audio_processing/processing_component.h"
#include "webrtc/modules/audio_processing/splitting_filter.h"
#include "webrtc/modules/audio_processing/voice_detection_impl.h" #include "webrtc/modules/audio_processing/voice_detection_impl.h"
#include "webrtc/modules/interface/module_common_types.h" #include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/file_wrapper.h" #include "webrtc/system_wrappers/interface/file_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h" #include "webrtc/system_wrappers/interface/logging.h"
@@ -36,9 +37,23 @@
#endif #endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
static const int kChunkSizeMs = 10;
#define RETURN_ON_ERR(expr) \
do { \
int err = expr; \
if (err != kNoError) { \
return err; \
} \
} while (0)
namespace webrtc { namespace webrtc {
// Throughout webrtc, it's assumed that success is represented by zero.
COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
AudioProcessing* AudioProcessing::Create(int id) { AudioProcessing* AudioProcessing::Create(int id) {
AudioProcessingImpl* apm = new AudioProcessingImpl(id); AudioProcessingImpl* apm = new AudioProcessingImpl();
if (apm->Initialize() != kNoError) { if (apm->Initialize() != kNoError) {
delete apm; delete apm;
apm = NULL; apm = NULL;
@@ -50,9 +65,8 @@ AudioProcessing* AudioProcessing::Create(int id) {
int32_t AudioProcessing::TimeUntilNextProcess() { return -1; } int32_t AudioProcessing::TimeUntilNextProcess() { return -1; }
int32_t AudioProcessing::Process() { return -1; } int32_t AudioProcessing::Process() { return -1; }
AudioProcessingImpl::AudioProcessingImpl(int id) AudioProcessingImpl::AudioProcessingImpl()
: id_(id), : echo_cancellation_(NULL),
echo_cancellation_(NULL),
echo_control_mobile_(NULL), echo_control_mobile_(NULL),
gain_control_(NULL), gain_control_(NULL),
high_pass_filter_(NULL), high_pass_filter_(NULL),
@@ -68,7 +82,7 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
#endif #endif
sample_rate_hz_(kSampleRate16kHz), sample_rate_hz_(kSampleRate16kHz),
split_sample_rate_hz_(kSampleRate16kHz), split_sample_rate_hz_(kSampleRate16kHz),
samples_per_channel_(sample_rate_hz_ / 100), samples_per_channel_(kChunkSizeMs * sample_rate_hz_ / 1000),
stream_delay_ms_(0), stream_delay_ms_(0),
delay_offset_ms_(0), delay_offset_ms_(0),
was_stream_delay_set_(false), was_stream_delay_set_(false),
@@ -157,8 +171,6 @@ int AudioProcessingImpl::InitializeLocked() {
capture_audio_ = new AudioBuffer(num_input_channels_, capture_audio_ = new AudioBuffer(num_input_channels_,
samples_per_channel_); samples_per_channel_);
was_stream_delay_set_ = false;
// Initialize all components. // Initialize all components.
std::list<ProcessingComponent*>::iterator it; std::list<ProcessingComponent*>::iterator it;
for (it = component_list_.begin(); it != component_list_.end(); ++it) { for (it = component_list_.begin(); it != component_list_.end(); ++it) {
@@ -272,6 +284,49 @@ int AudioProcessingImpl::num_output_channels() const {
return num_output_channels_; return num_output_channels_;
} }
int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
int num_input_channels, int num_output_channels, int num_reverse_channels) {
if (sample_rate_hz == sample_rate_hz_ &&
num_input_channels == num_input_channels_ &&
num_output_channels == num_output_channels_ &&
num_reverse_channels == num_reverse_channels_) {
return kNoError;
}
if (sample_rate_hz != kSampleRate8kHz &&
sample_rate_hz != kSampleRate16kHz &&
sample_rate_hz != kSampleRate32kHz) {
return kBadSampleRateError;
}
if (num_output_channels > num_input_channels) {
return kBadNumberChannelsError;
}
// Only mono and stereo supported currently.
if (num_input_channels > 2 || num_input_channels < 1 ||
num_output_channels > 2 || num_output_channels < 1 ||
num_reverse_channels > 2 || num_reverse_channels < 1) {
return kBadNumberChannelsError;
}
if (echo_control_mobile_->is_enabled() && sample_rate_hz > kSampleRate16kHz) {
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
return kUnsupportedComponentError;
}
sample_rate_hz_ = sample_rate_hz;
samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
num_input_channels_ = num_input_channels;
num_output_channels_ = num_output_channels;
num_reverse_channels_ = num_reverse_channels;
if (sample_rate_hz_ == kSampleRate32kHz) {
split_sample_rate_hz_ = kSampleRate16kHz;
} else {
split_sample_rate_hz_ = sample_rate_hz_;
}
return InitializeLocked();
}
int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
CriticalSectionScoped crit_scoped(crit_); CriticalSectionScoped crit_scoped(crit_);
int err = kNoError; int err = kNoError;
@@ -279,15 +334,10 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
if (frame == NULL) { if (frame == NULL) {
return kNullPointerError; return kNullPointerError;
} }
// TODO(ajm): We now always set the output channels equal to the input
if (frame->sample_rate_hz_ != sample_rate_hz_) { // channels here. Remove the ability to downmix entirely.
return kBadSampleRateError; RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
} frame->num_channels_, frame->num_channels_, num_reverse_channels_));
if (frame->num_channels_ != num_input_channels_) {
return kBadNumberChannelsError;
}
if (frame->samples_per_channel_ != samples_per_channel_) { if (frame->samples_per_channel_ != samples_per_channel_) {
return kBadDataLengthError; return kBadDataLengthError;
} }
@@ -318,7 +368,8 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
if (analysis_needed(data_processed)) { if (analysis_needed(data_processed)) {
for (int i = 0; i < num_output_channels_; i++) { for (int i = 0; i < num_output_channels_; i++) {
// Split into a low and high band. // Split into a low and high band.
SplittingFilterAnalysis(capture_audio_->data(i), WebRtcSpl_AnalysisQMF(capture_audio_->data(i),
capture_audio_->samples_per_channel(),
capture_audio_->low_pass_split_data(i), capture_audio_->low_pass_split_data(i),
capture_audio_->high_pass_split_data(i), capture_audio_->high_pass_split_data(i),
capture_audio_->analysis_filter_state1(i), capture_audio_->analysis_filter_state1(i),
@@ -369,8 +420,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
if (synthesis_needed(data_processed)) { if (synthesis_needed(data_processed)) {
for (int i = 0; i < num_output_channels_; i++) { for (int i = 0; i < num_output_channels_; i++) {
// Recombine low and high bands. // Recombine low and high bands.
SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i), WebRtcSpl_SynthesisQMF(capture_audio_->low_pass_split_data(i),
capture_audio_->high_pass_split_data(i), capture_audio_->high_pass_split_data(i),
capture_audio_->samples_per_split_channel(),
capture_audio_->data(i), capture_audio_->data(i),
capture_audio_->synthesis_filter_state1(i), capture_audio_->synthesis_filter_state1(i),
capture_audio_->synthesis_filter_state2(i)); capture_audio_->synthesis_filter_state2(i));
@@ -403,25 +455,21 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kNoError; return kNoError;
} }
// TODO(ajm): Have AnalyzeReverseStream accept sample rates not matching the
// primary stream and convert ourselves rather than having the user manage it.
// We can be smarter and use the splitting filter when appropriate. Similarly,
// perform downmixing here.
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
CriticalSectionScoped crit_scoped(crit_); CriticalSectionScoped crit_scoped(crit_);
int err = kNoError; int err = kNoError;
if (frame == NULL) { if (frame == NULL) {
return kNullPointerError; return kNullPointerError;
} }
if (frame->sample_rate_hz_ != sample_rate_hz_) { if (frame->sample_rate_hz_ != sample_rate_hz_) {
return kBadSampleRateError; return kBadSampleRateError;
} }
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
if (frame->num_channels_ != num_reverse_channels_) { num_output_channels_, frame->num_channels_));
return kBadNumberChannelsError;
}
if (frame->samples_per_channel_ != samples_per_channel_) {
return kBadDataLengthError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) { if (debug_file_->Open()) {
@@ -440,11 +488,11 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
render_audio_->DeinterleaveFrom(frame); render_audio_->DeinterleaveFrom(frame);
// TODO(ajm): turn the splitting filter into a component?
if (sample_rate_hz_ == kSampleRate32kHz) { if (sample_rate_hz_ == kSampleRate32kHz) {
for (int i = 0; i < num_reverse_channels_; i++) { for (int i = 0; i < num_reverse_channels_; i++) {
// Split into low and high band. // Split into low and high band.
SplittingFilterAnalysis(render_audio_->data(i), WebRtcSpl_AnalysisQMF(render_audio_->data(i),
render_audio_->samples_per_channel(),
render_audio_->low_pass_split_data(i), render_audio_->low_pass_split_data(i),
render_audio_->high_pass_split_data(i), render_audio_->high_pass_split_data(i),
render_audio_->analysis_filter_state1(i), render_audio_->analysis_filter_state1(i),
@@ -614,9 +662,6 @@ VoiceDetection* AudioProcessingImpl::voice_detection() const {
} }
int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) { int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
CriticalSectionScoped crit_scoped(crit_);
id_ = id;
return kNoError; return kNoError;
} }

View File

@@ -47,7 +47,7 @@ class AudioProcessingImpl : public AudioProcessing {
kSampleRate32kHz = 32000 kSampleRate32kHz = 32000
}; };
explicit AudioProcessingImpl(int id); AudioProcessingImpl();
virtual ~AudioProcessingImpl(); virtual ~AudioProcessingImpl();
CriticalSectionWrapper* crit() const; CriticalSectionWrapper* crit() const;
@@ -57,7 +57,6 @@ class AudioProcessingImpl : public AudioProcessing {
// AudioProcessing methods. // AudioProcessing methods.
virtual int Initialize() OVERRIDE; virtual int Initialize() OVERRIDE;
virtual int InitializeLocked();
virtual void SetExtraOptions(const Config& config) OVERRIDE; virtual void SetExtraOptions(const Config& config) OVERRIDE;
virtual int EnableExperimentalNs(bool enable) OVERRIDE; virtual int EnableExperimentalNs(bool enable) OVERRIDE;
virtual bool experimental_ns_enabled() const OVERRIDE { virtual bool experimental_ns_enabled() const OVERRIDE {
@@ -92,14 +91,17 @@ class AudioProcessingImpl : public AudioProcessing {
// Module methods. // Module methods.
virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE; virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE;
protected:
virtual int InitializeLocked();
private: private:
int MaybeInitializeLocked(int sample_rate_hz, int num_input_channels,
int num_output_channels, int num_reverse_channels);
bool is_data_processed() const; bool is_data_processed() const;
bool interleave_needed(bool is_data_processed) const; bool interleave_needed(bool is_data_processed) const;
bool synthesis_needed(bool is_data_processed) const; bool synthesis_needed(bool is_data_processed) const;
bool analysis_needed(bool is_data_processed) const; bool analysis_needed(bool is_data_processed) const;
int id_;
EchoCancellationImplWrapper* echo_cancellation_; EchoCancellationImplWrapper* echo_cancellation_;
EchoControlMobileImpl* echo_control_mobile_; EchoControlMobileImpl* echo_control_mobile_;
GainControlImpl* gain_control_; GainControlImpl* gain_control_;

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
#include "webrtc/modules/interface/module_common_types.h"
using ::testing::Invoke;
using ::testing::Return;
namespace webrtc {
class MockInitialize : public AudioProcessingImpl {
public:
MOCK_METHOD0(InitializeLocked, int());
int RealInitializeLocked() { return AudioProcessingImpl::InitializeLocked(); }
};
TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
MockInitialize mock;
ON_CALL(mock, InitializeLocked())
.WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked));
EXPECT_CALL(mock, InitializeLocked()).Times(1);
mock.Initialize();
AudioFrame frame;
// Call with the default parameters; there should be no init.
frame.num_channels_ = 1;
SetFrameSampleRate(&frame, 16000);
EXPECT_CALL(mock, InitializeLocked())
.Times(0);
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
// New sample rate. (Only impacts ProcessStream).
SetFrameSampleRate(&frame, 32000);
EXPECT_CALL(mock, InitializeLocked())
.Times(1);
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
// New number of channels.
frame.num_channels_ = 2;
EXPECT_CALL(mock, InitializeLocked())
.Times(2);
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
// ProcessStream sets num_channels_ == num_output_channels.
frame.num_channels_ = 2;
EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
// A new sample rate passed to AnalyzeReverseStream should be an error and
// not cause an init.
SetFrameSampleRate(&frame, 16000);
EXPECT_CALL(mock, InitializeLocked())
.Times(0);
EXPECT_EQ(mock.kBadSampleRateError, mock.AnalyzeReverseStream(&frame));
}
} // namespace webrtc

View File

@@ -336,8 +336,6 @@ int EchoCancellationImpl::Initialize() {
return err; return err;
} }
was_stream_drift_set_ = false;
return apm_->kNoError; return apm_->kNoError;
} }

View File

@@ -91,6 +91,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
int err = apm_->kNoError; int err = apm_->kNoError;
if (mode_ == kAdaptiveAnalog) { if (mode_ == kAdaptiveAnalog) {
capture_levels_.assign(num_handles(), analog_capture_level_);
for (int i = 0; i < num_handles(); i++) { for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i)); Handle* my_handle = static_cast<Handle*>(handle(i));
err = WebRtcAgc_AddMic( err = WebRtcAgc_AddMic(
@@ -114,7 +115,6 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
audio->low_pass_split_data(i), audio->low_pass_split_data(i),
audio->high_pass_split_data(i), audio->high_pass_split_data(i),
static_cast<int16_t>(audio->samples_per_split_channel()), static_cast<int16_t>(audio->samples_per_split_channel()),
//capture_levels_[i],
analog_capture_level_, analog_capture_level_,
&capture_level_out); &capture_level_out);
@@ -190,13 +190,6 @@ int GainControlImpl::set_stream_analog_level(int level) {
if (level < minimum_capture_level_ || level > maximum_capture_level_) { if (level < minimum_capture_level_ || level > maximum_capture_level_) {
return apm_->kBadParameterError; return apm_->kBadParameterError;
} }
if (mode_ == kAdaptiveAnalog) {
if (level != analog_capture_level_) {
// The analog level has been changed; update our internal levels.
capture_levels_.assign(num_handles(), level);
}
}
analog_capture_level_ = level; analog_capture_level_ = level;
return apm_->kNoError; return apm_->kNoError;
@@ -309,11 +302,6 @@ int GainControlImpl::Initialize() {
return err; return err;
} }
analog_capture_level_ =
(maximum_capture_level_ - minimum_capture_level_) >> 1;
capture_levels_.assign(num_handles(), analog_capture_level_);
was_analog_level_set_ = false;
return apm_->kNoError; return apm_->kNoError;
} }

View File

@@ -89,11 +89,6 @@ struct DelayCorrection {
// //
// Usage example, omitting error checking: // Usage example, omitting error checking:
// AudioProcessing* apm = AudioProcessing::Create(0); // AudioProcessing* apm = AudioProcessing::Create(0);
// apm->set_sample_rate_hz(32000); // Super-wideband processing.
//
// // Mono capture and stereo render.
// apm->set_num_channels(1, 1);
// apm->set_num_reverse_channels(2);
// //
// apm->high_pass_filter()->Enable(true); // apm->high_pass_filter()->Enable(true);
// //
@@ -145,11 +140,9 @@ class AudioProcessing : public Module {
// Initializes internal states, while retaining all user settings. This // Initializes internal states, while retaining all user settings. This
// should be called before beginning to process a new audio stream. However, // should be called before beginning to process a new audio stream. However,
// it is not necessary to call before processing the first stream after // it is not necessary to call before processing the first stream after
// creation. // creation. It is also not necessary to call if the audio parameters (sample
// // rate and number of channels) have changed. Passing updated parameters
// set_sample_rate_hz(), set_num_channels() and set_num_reverse_channels() // directly to |ProcessStream()| and |AnalyzeReverseStream()| is permissible.
// will trigger a full initialization if the settings are changed from their
// existing values. Otherwise they are no-ops.
virtual int Initialize() = 0; virtual int Initialize() = 0;
// Pass down additional options which don't have explicit setters. This // Pass down additional options which don't have explicit setters. This
@@ -159,11 +152,15 @@ class AudioProcessing : public Module {
virtual int EnableExperimentalNs(bool enable) = 0; virtual int EnableExperimentalNs(bool enable) = 0;
virtual bool experimental_ns_enabled() const = 0; virtual bool experimental_ns_enabled() const = 0;
// DEPRECATED: It is now possible to modify the sample rate directly in a call
// to |ProcessStream|.
// Sets the sample |rate| in Hz for both the primary and reverse audio // Sets the sample |rate| in Hz for both the primary and reverse audio
// streams. 8000, 16000 or 32000 Hz are permitted. // streams. 8000, 16000 or 32000 Hz are permitted.
virtual int set_sample_rate_hz(int rate) = 0; virtual int set_sample_rate_hz(int rate) = 0;
virtual int sample_rate_hz() const = 0; virtual int sample_rate_hz() const = 0;
// DEPRECATED: It is now possible to modify the number of channels directly in
// a call to |ProcessStream|.
// Sets the number of channels for the primary audio stream. Input frames must // Sets the number of channels for the primary audio stream. Input frames must
// contain a number of channels given by |input_channels|, while output frames // contain a number of channels given by |input_channels|, while output frames
// will be returned with number of channels given by |output_channels|. // will be returned with number of channels given by |output_channels|.
@@ -171,6 +168,8 @@ class AudioProcessing : public Module {
virtual int num_input_channels() const = 0; virtual int num_input_channels() const = 0;
virtual int num_output_channels() const = 0; virtual int num_output_channels() const = 0;
// DEPRECATED: It is now possible to modify the number of channels directly in
// a call to |AnalyzeReverseStream|.
// Sets the number of channels for the reverse audio stream. Input frames must // Sets the number of channels for the reverse audio stream. Input frames must
// contain a number of channels given by |channels|. // contain a number of channels given by |channels|.
virtual int set_num_reverse_channels(int channels) = 0; virtual int set_num_reverse_channels(int channels) = 0;
@@ -184,8 +183,8 @@ class AudioProcessing : public Module {
// with the stream_ tag which is needed should be called after processing. // with the stream_ tag which is needed should be called after processing.
// //
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_| // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
// members of |frame| must be valid, and correspond to settings supplied // members of |frame| must be valid. If changed from the previous call to this
// to APM. // method, it will trigger an initialization.
virtual int ProcessStream(AudioFrame* frame) = 0; virtual int ProcessStream(AudioFrame* frame) = 0;
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame // Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
@@ -199,7 +198,8 @@ class AudioProcessing : public Module {
// chances are you don't need to use it. // chances are you don't need to use it.
// //
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_| // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
// members of |frame| must be valid. // members of |frame| must be valid. |sample_rate_hz_| must correspond to
// |sample_rate_hz()|
// //
// TODO(ajm): add const to input; requires an implementation fix. // TODO(ajm): add const to input; requires an implementation fix.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0; virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;

View File

@@ -1,33 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/splitting_filter.h"
namespace webrtc {
void SplittingFilterAnalysis(const int16_t* in_data,
int16_t* low_band,
int16_t* high_band,
int32_t* filter_state1,
int32_t* filter_state2)
{
WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2);
}
void SplittingFilterSynthesis(const int16_t* low_band,
const int16_t* high_band,
int16_t* out_data,
int32_t* filt_state1,
int32_t* filt_state2)
{
WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2);
}
} // namespace webrtc

View File

@@ -1,63 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/typedefs.h"
namespace webrtc {
/*
* SplittingFilterbank_analysisQMF(...)
*
* Splits a super-wb signal into two subbands: 0-8 kHz and 8-16 kHz.
*
* Input:
* - in_data : super-wb audio signal
*
* Input & Output:
* - filt_state1: Filter state for first all-pass filter
* - filt_state2: Filter state for second all-pass filter
*
* Output:
* - low_band : The signal from the 0-4 kHz band
* - high_band : The signal from the 4-8 kHz band
*/
void SplittingFilterAnalysis(const int16_t* in_data,
int16_t* low_band,
int16_t* high_band,
int32_t* filt_state1,
int32_t* filt_state2);
/*
* SplittingFilterbank_synthesisQMF(...)
*
* Combines the two subbands (0-8 and 8-16 kHz) into a super-wb signal.
*
* Input:
* - low_band : The signal with the 0-8 kHz band
* - high_band : The signal with the 8-16 kHz band
*
* Input & Output:
* - filt_state1: Filter state for first all-pass filter
* - filt_state2: Filter state for second all-pass filter
*
* Output:
* - out_data : super-wb speech signal
*/
void SplittingFilterSynthesis(const int16_t* low_band,
const int16_t* high_band,
int16_t* out_data,
int32_t* filt_state1,
int32_t* filt_state2);
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_

View File

@@ -15,6 +15,7 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h" #include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
#include "webrtc/modules/interface/module_common_types.h" #include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h" #include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h" #include "webrtc/system_wrappers/interface/scoped_ptr.h"
@@ -230,11 +231,10 @@ class ApmTest : public ::testing::Test {
void EnableAllComponents(); void EnableAllComponents();
bool ReadFrame(FILE* file, AudioFrame* frame); bool ReadFrame(FILE* file, AudioFrame* frame);
void ProcessWithDefaultStreamParameters(AudioFrame* frame); void ProcessWithDefaultStreamParameters(AudioFrame* frame);
template <typename F>
void ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
int changed_value);
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max); int delay_min, int delay_max);
void TestChangingChannels(int num_channels,
AudioProcessing::Error expected_return);
const std::string output_path_; const std::string output_path_;
const std::string ref_path_; const std::string ref_path_;
@@ -330,17 +330,8 @@ std::string ApmTest::OutputFilePath(std::string name,
void ApmTest::Init(int sample_rate_hz, int num_reverse_channels, void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
int num_input_channels, int num_output_channels, int num_input_channels, int num_output_channels,
bool open_output_file) { bool open_output_file) {
ASSERT_EQ(apm_->kNoError, apm_->Initialize());
// Handles error checking of the parameters as well. No need to repeat it.
ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(sample_rate_hz));
ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(num_input_channels,
num_output_channels));
ASSERT_EQ(apm_->kNoError,
apm_->set_num_reverse_channels(num_reverse_channels));
// We always use 10 ms frames. // We always use 10 ms frames.
const int samples_per_channel = sample_rate_hz / 100; const int samples_per_channel = kChunkSizeMs * sample_rate_hz / 1000;
frame_->samples_per_channel_ = samples_per_channel; frame_->samples_per_channel_ = samples_per_channel;
frame_->num_channels_ = num_input_channels; frame_->num_channels_ = num_input_channels;
frame_->sample_rate_hz_ = sample_rate_hz; frame_->sample_rate_hz_ = sample_rate_hz;
@@ -348,6 +339,12 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
revframe_->num_channels_ = num_reverse_channels; revframe_->num_channels_ = num_reverse_channels;
revframe_->sample_rate_hz_ = sample_rate_hz; revframe_->sample_rate_hz_ = sample_rate_hz;
// Make one process call to ensure the audio parameters are set. It might
// result in a stream error which we can safely ignore.
int err = apm_->ProcessStream(frame_);
ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
ASSERT_EQ(apm_->kNoError, apm_->Initialize());
if (far_file_) { if (far_file_) {
ASSERT_EQ(0, fclose(far_file_)); ASSERT_EQ(0, fclose(far_file_));
} }
@@ -378,7 +375,6 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
void ApmTest::EnableAllComponents() { void ApmTest::EnableAllComponents() {
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
EXPECT_EQ(apm_->kNoError, EXPECT_EQ(apm_->kNoError,
@@ -442,50 +438,6 @@ void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
} }
template <typename F>
void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
int changed_value) {
EnableAllComponents();
Init(16000, 2, 2, 2, false);
SetFrameTo(frame_, 1000);
AudioFrame frame_copy;
frame_copy.CopyFrom(*frame_);
ProcessWithDefaultStreamParameters(frame_);
// Verify the processing has actually changed the frame.
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
// Test that a change in value triggers an init.
f(apm_.get(), changed_value);
f(apm_.get(), initial_value);
ProcessWithDefaultStreamParameters(&frame_copy);
EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
apm_->Initialize();
SetFrameTo(frame_, 1000);
AudioFrame initial_frame;
initial_frame.CopyFrom(*frame_);
ProcessWithDefaultStreamParameters(frame_);
ProcessWithDefaultStreamParameters(frame_);
// Verify the processing has actually changed the frame.
EXPECT_FALSE(FrameDataAreEqual(*frame_, initial_frame));
frame_copy.CopyFrom(initial_frame);
apm_->Initialize();
ProcessWithDefaultStreamParameters(&frame_copy);
// Verify an init here would result in different output.
apm_->Initialize();
ProcessWithDefaultStreamParameters(&frame_copy);
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
frame_copy.CopyFrom(initial_frame);
apm_->Initialize();
ProcessWithDefaultStreamParameters(&frame_copy);
// Test that the same value does not trigger an init.
f(apm_.get(), initial_value);
ProcessWithDefaultStreamParameters(&frame_copy);
EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
}
void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max) { int delay_min, int delay_max) {
// The |revframe_| and |frame_| should include the proper frame information, // The |revframe_| and |frame_| should include the proper frame information,
@@ -579,7 +531,6 @@ TEST_F(ApmTest, StreamParameters) {
apm_->ProcessStream(frame_)); apm_->ProcessStream(frame_));
// -- Missing AGC level -- // -- Missing AGC level --
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
@@ -602,8 +553,8 @@ TEST_F(ApmTest, StreamParameters) {
apm_->echo_cancellation()->enable_drift_compensation(false)); apm_->echo_cancellation()->enable_drift_compensation(false));
// -- Missing delay -- // -- Missing delay --
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
// Resets after successful ProcessStream(). // Resets after successful ProcessStream().
@@ -622,7 +573,6 @@ TEST_F(ApmTest, StreamParameters) {
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false)); EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
// -- Missing drift -- // -- Missing drift --
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
// Resets after successful ProcessStream(). // Resets after successful ProcessStream().
@@ -639,14 +589,12 @@ TEST_F(ApmTest, StreamParameters) {
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
// -- No stream parameters -- // -- No stream parameters --
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
EXPECT_EQ(apm_->kNoError, EXPECT_EQ(apm_->kNoError,
apm_->AnalyzeReverseStream(revframe_)); apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kStreamParameterNotSetError, EXPECT_EQ(apm_->kStreamParameterNotSetError,
apm_->ProcessStream(frame_)); apm_->ProcessStream(frame_));
// -- All there -- // -- All there --
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100)); EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
apm_->echo_cancellation()->set_stream_drift_samples(0); apm_->echo_cancellation()->set_stream_drift_samples(0);
EXPECT_EQ(apm_->kNoError, EXPECT_EQ(apm_->kNoError,
@@ -678,65 +626,38 @@ TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
EXPECT_EQ(50, apm_->stream_delay_ms()); EXPECT_EQ(50, apm_->stream_delay_ms());
} }
void ApmTest::TestChangingChannels(int num_channels,
AudioProcessing::Error expected_return) {
frame_->num_channels_ = num_channels;
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
}
TEST_F(ApmTest, Channels) { TEST_F(ApmTest, Channels) {
// Testing number of invalid channels // Testing number of invalid channels.
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1)); TestChangingChannels(0, apm_->kBadNumberChannelsError);
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0)); TestChangingChannels(3, apm_->kBadNumberChannelsError);
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1)); // Testing number of valid channels.
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
// Testing number of valid channels
for (int i = 1; i < 3; i++) { for (int i = 1; i < 3; i++) {
for (int j = 1; j < 3; j++) { TestChangingChannels(i, kNoErr);
if (j > i) {
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
} else {
EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
EXPECT_EQ(j, apm_->num_output_channels());
}
}
EXPECT_EQ(i, apm_->num_input_channels()); EXPECT_EQ(i, apm_->num_input_channels());
EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
EXPECT_EQ(i, apm_->num_reverse_channels()); EXPECT_EQ(i, apm_->num_reverse_channels());
} }
} }
TEST_F(ApmTest, SampleRates) { TEST_F(ApmTest, SampleRates) {
// Testing invalid sample rates // Testing invalid sample rates
EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000)); SetFrameSampleRate(frame_, 10000);
EXPECT_EQ(apm_->kBadSampleRateError, apm_->ProcessStream(frame_));
// Testing valid sample rates // Testing valid sample rates
int fs[] = {8000, 16000, 32000}; int fs[] = {8000, 16000, 32000};
for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) { for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i])); SetFrameSampleRate(frame_, fs[i]);
EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
EXPECT_EQ(fs[i], apm_->sample_rate_hz()); EXPECT_EQ(fs[i], apm_->sample_rate_hz());
} }
} }
void SetSampleRate(AudioProcessing* ap, int value) {
EXPECT_EQ(ap->kNoError, ap->set_sample_rate_hz(value));
}
void SetNumReverseChannels(AudioProcessing* ap, int value) {
EXPECT_EQ(ap->kNoError, ap->set_num_reverse_channels(value));
}
void SetNumOutputChannels(AudioProcessing* ap, int value) {
EXPECT_EQ(ap->kNoError, ap->set_num_channels(2, value));
}
TEST_F(ApmTest, SampleRateChangeTriggersInit) {
ChangeTriggersInit(SetSampleRate, apm_.get(), 16000, 8000);
}
TEST_F(ApmTest, ReverseChannelChangeTriggersInit) {
ChangeTriggersInit(SetNumReverseChannels, apm_.get(), 2, 1);
}
TEST_F(ApmTest, ChannelChangeTriggersInit) {
ChangeTriggersInit(SetNumOutputChannels, apm_.get(), 2, 1);
}
TEST_F(ApmTest, EchoCancellation) { TEST_F(ApmTest, EchoCancellation) {
EXPECT_EQ(apm_->kNoError, EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->enable_drift_compensation(true)); apm_->echo_cancellation()->enable_drift_compensation(true));
@@ -876,13 +797,16 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
TEST_F(ApmTest, EchoControlMobile) { TEST_F(ApmTest, EchoControlMobile) {
// AECM won't use super-wideband. // AECM won't use super-wideband.
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000)); SetFrameSampleRate(frame_, 32000);
EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kBadSampleRateError, EXPECT_EQ(apm_->kBadSampleRateError,
apm_->echo_control_mobile()->Enable(true)); apm_->echo_control_mobile()->Enable(true));
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000)); SetFrameSampleRate(frame_, 16000);
EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError, EXPECT_EQ(apm_->kNoError,
apm_->echo_control_mobile()->Enable(true)); apm_->echo_control_mobile()->Enable(true));
EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->set_sample_rate_hz(32000)); SetFrameSampleRate(frame_, 32000);
EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
// Turn AECM on (and AEC off) // Turn AECM on (and AEC off)
Init(16000, 2, 2, 2, false); Init(16000, 2, 2, 2, false);
@@ -1088,7 +1012,6 @@ TEST_F(ApmTest, LevelEstimator) {
// Run this test in wideband; in super-wb, the splitting filter distorts the // Run this test in wideband; in super-wb, the splitting filter distorts the
// audio enough to cause deviation from the expectation for small values. // audio enough to cause deviation from the expectation for small values.
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
frame_->samples_per_channel_ = 160; frame_->samples_per_channel_ = 160;
frame_->num_channels_ = 2; frame_->num_channels_ = 2;
frame_->sample_rate_hz_ = 16000; frame_->sample_rate_hz_ = 16000;
@@ -1214,19 +1137,6 @@ TEST_F(ApmTest, VoiceDetection) {
// TODO(bjornv): Add tests for streamed voice; stream_has_voice() // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
} }
TEST_F(ApmTest, VerifyDownMixing) {
for (size_t i = 0; i < kSampleRatesSize; i++) {
Init(kSampleRates[i], 2, 2, 1, false);
SetFrameTo(frame_, 1000, 2000);
AudioFrame mono_frame;
mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
mono_frame.num_channels_ = 1;
SetFrameTo(&mono_frame, 1500);
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
}
}
TEST_F(ApmTest, AllProcessingDisabledByDefault) { TEST_F(ApmTest, AllProcessingDisabledByDefault) {
EXPECT_FALSE(apm_->echo_cancellation()->is_enabled()); EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled()); EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
@@ -1322,7 +1232,6 @@ TEST_F(ApmTest, SplittingFilter) {
EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false)); EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
// 5. Not using super-wb. // 5. Not using super-wb.
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
frame_->samples_per_channel_ = 160; frame_->samples_per_channel_ = 160;
frame_->num_channels_ = 2; frame_->num_channels_ = 2;
frame_->sample_rate_hz_ = 16000; frame_->sample_rate_hz_ = 16000;
@@ -1343,7 +1252,6 @@ TEST_F(ApmTest, SplittingFilter) {
// Check the test is valid. We should have distortion from the filter // Check the test is valid. We should have distortion from the filter
// when AEC is enabled (which won't affect the audio). // when AEC is enabled (which won't affect the audio).
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
frame_->samples_per_channel_ = 320; frame_->samples_per_channel_ = 320;
frame_->num_channels_ = 2; frame_->num_channels_ = 2;
frame_->sample_rate_hz_ = 32000; frame_->sample_rate_hz_ = 32000;
@@ -1366,8 +1274,8 @@ TEST_F(ApmTest, DebugDump) {
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording()); EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str())); EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording()); EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
// Verify the file has been written. // Verify the file has been written.
@@ -1436,19 +1344,16 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
// Write the desired tests to the protobuf reference file. // Write the desired tests to the protobuf reference file.
for (size_t i = 0; i < kChannelsSize; i++) { for (size_t i = 0; i < kChannelsSize; i++) {
for (size_t j = 0; j < kChannelsSize; j++) { for (size_t j = 0; j < kChannelsSize; j++) {
// We can't have more output than input channels.
for (size_t k = 0; k <= j; k++) {
for (size_t l = 0; l < kProcessSampleRatesSize; l++) { for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
webrtc::audioproc::Test* test = ref_data.add_test(); webrtc::audioproc::Test* test = ref_data.add_test();
test->set_num_reverse_channels(kChannels[i]); test->set_num_reverse_channels(kChannels[i]);
test->set_num_input_channels(kChannels[j]); test->set_num_input_channels(kChannels[j]);
test->set_num_output_channels(kChannels[k]); test->set_num_output_channels(kChannels[j]);
test->set_sample_rate(kProcessSampleRates[l]); test->set_sample_rate(kProcessSampleRates[l]);
} }
} }
} }
} }
}
EnableAllComponents(); EnableAllComponents();
@@ -1456,6 +1361,11 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
printf("Running test %d of %d...\n", i + 1, ref_data.test_size()); printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
webrtc::audioproc::Test* test = ref_data.mutable_test(i); webrtc::audioproc::Test* test = ref_data.mutable_test(i);
// TODO(ajm): We no longer allow different input and output channels. Skip
// these tests for now, but they should be removed from the set.
if (test->num_input_channels() != test->num_output_channels())
continue;
Init(test->sample_rate(), test->num_reverse_channels(), Init(test->sample_rate(), test->num_reverse_channels(),
test->num_input_channels(), test->num_output_channels(), true); test->num_input_channels(), test->num_output_channels(), true);

View File

@@ -232,9 +232,6 @@ void void_main(int argc, char* argv[]) {
ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz)); ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
samples_per_channel = sample_rate_hz / 100; samples_per_channel = sample_rate_hz / 100;
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
} else if (strcmp(argv[i], "-ch") == 0) { } else if (strcmp(argv[i], "-ch") == 0) {
i++; i++;
ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch"; ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
@@ -242,18 +239,11 @@ void void_main(int argc, char* argv[]) {
i++; i++;
ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels)); ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
ASSERT_EQ(apm->kNoError,
apm->set_num_channels(num_capture_input_channels,
num_capture_output_channels));
} else if (strcmp(argv[i], "-rch") == 0) { } else if (strcmp(argv[i], "-rch") == 0) {
i++; i++;
ASSERT_LT(i, argc) << "Specify number of channels after -rch"; ASSERT_LT(i, argc) << "Specify number of channels after -rch";
ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels)); ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
ASSERT_EQ(apm->kNoError,
apm->set_num_reverse_channels(num_render_channels));
} else if (strcmp(argv[i], "-aec") == 0) { } else if (strcmp(argv[i], "-aec") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true)); ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
ASSERT_EQ(apm->kNoError, ASSERT_EQ(apm->kNoError,
@@ -637,9 +627,6 @@ void void_main(int argc, char* argv[]) {
const Init msg = event_msg.init(); const Init msg = event_msg.init();
ASSERT_TRUE(msg.has_sample_rate()); ASSERT_TRUE(msg.has_sample_rate());
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(msg.sample_rate()));
ASSERT_TRUE(msg.has_device_sample_rate()); ASSERT_TRUE(msg.has_device_sample_rate());
ASSERT_EQ(apm->kNoError, ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz( apm->echo_cancellation()->set_device_sample_rate_hz(
@@ -647,13 +634,7 @@ void void_main(int argc, char* argv[]) {
ASSERT_TRUE(msg.has_num_input_channels()); ASSERT_TRUE(msg.has_num_input_channels());
ASSERT_TRUE(msg.has_num_output_channels()); ASSERT_TRUE(msg.has_num_output_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_channels(msg.num_input_channels(),
msg.num_output_channels()));
ASSERT_TRUE(msg.has_num_reverse_channels()); ASSERT_TRUE(msg.has_num_reverse_channels());
ASSERT_EQ(apm->kNoError,
apm->set_num_reverse_channels(msg.num_reverse_channels()));
samples_per_channel = msg.sample_rate() / 100; samples_per_channel = msg.sample_rate() / 100;
far_frame.sample_rate_hz_ = msg.sample_rate(); far_frame.sample_rate_hz_ = msg.sample_rate();
@@ -833,9 +814,6 @@ void void_main(int argc, char* argv[]) {
1, 1,
event_file)); event_file));
ASSERT_EQ(apm->kNoError,
apm->set_sample_rate_hz(sample_rate_hz));
ASSERT_EQ(apm->kNoError, ASSERT_EQ(apm->kNoError,
apm->echo_cancellation()->set_device_sample_rate_hz( apm->echo_cancellation()->set_device_sample_rate_hz(
device_sample_rate_hz)); device_sample_rate_hz));

View File

@@ -0,0 +1,21 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
static const int kChunkSizeMs = 10;
static const webrtc::AudioProcessing::Error kNoErr =
webrtc::AudioProcessing::kNoError;
static void SetFrameSampleRate(webrtc::AudioFrame* frame, int sample_rate_hz) {
frame->sample_rate_hz_ = sample_rate_hz;
frame->samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
}

View File

@@ -152,8 +152,10 @@
'audio_coding/neteq4/mock/mock_payload_splitter.h', 'audio_coding/neteq4/mock/mock_payload_splitter.h',
'audio_processing/aec/system_delay_unittest.cc', 'audio_processing/aec/system_delay_unittest.cc',
'audio_processing/aec/echo_cancellation_unittest.cc', 'audio_processing/aec/echo_cancellation_unittest.cc',
'audio_processing/audio_processing_impl_unittest.cc',
'audio_processing/echo_cancellation_impl_unittest.cc', 'audio_processing/echo_cancellation_impl_unittest.cc',
'audio_processing/test/audio_processing_unittest.cc', 'audio_processing/test/audio_processing_unittest.cc',
'audio_processing/test/test_utils.h',
'audio_processing/utility/delay_estimator_unittest.cc', 'audio_processing/utility/delay_estimator_unittest.cc',
'audio_processing/utility/ring_buffer_unittest.cc', 'audio_processing/utility/ring_buffer_unittest.cc',
'bitrate_controller/bitrate_controller_unittest.cc', 'bitrate_controller/bitrate_controller_unittest.cc',
@@ -167,9 +169,9 @@
'desktop_capture/screen_capturer_mock_objects.h', 'desktop_capture/screen_capturer_mock_objects.h',
'desktop_capture/screen_capturer_unittest.cc', 'desktop_capture/screen_capturer_unittest.cc',
'desktop_capture/window_capturer_unittest.cc', 'desktop_capture/window_capturer_unittest.cc',
"desktop_capture/win/cursor_unittest.cc", 'desktop_capture/win/cursor_unittest.cc',
"desktop_capture/win/cursor_unittest_resources.h", 'desktop_capture/win/cursor_unittest_resources.h',
"desktop_capture/win/cursor_unittest_resources.rc", 'desktop_capture/win/cursor_unittest_resources.rc',
'media_file/source/media_file_unittest.cc', 'media_file/source/media_file_unittest.cc',
'module_common_types_unittest.cc', 'module_common_types_unittest.cc',
'pacing/paced_sender_unittest.cc', 'pacing/paced_sender_unittest.cc',

View File

@@ -695,10 +695,12 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
// Store speech type for dead-or-alive detection // Store speech type for dead-or-alive detection
_outputSpeechType = audioFrame.speech_type_; _outputSpeechType = audioFrame.speech_type_;
// Perform far-end AudioProcessing module processing on the received signal if (_rxApmIsEnabled) {
if (_rxApmIsEnabled) int err = rx_audioproc_->ProcessStream(&audioFrame);
{ if (err) {
ApmProcessRx(audioFrame); LOG(LS_ERROR) << "ProcessStream() error: " << err;
assert(false);
}
} }
float output_gain = 1.0f; float output_gain = 1.0f;
@@ -4446,29 +4448,13 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
InsertInbandDtmfTone(); InsertInbandDtmfTone();
if (_includeAudioLevelIndication) if (_includeAudioLevelIndication) {
{
if (rtp_audioproc_->set_sample_rate_hz(_audioFrame.sample_rate_hz_) !=
AudioProcessing::kNoError)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
VoEId(_instanceId, _channelId),
"Error setting AudioProcessing sample rate");
return -1;
}
if (rtp_audioproc_->set_num_channels(_audioFrame.num_channels_,
_audioFrame.num_channels_) !=
AudioProcessing::kNoError)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
VoEId(_instanceId, _channelId),
"Error setting AudioProcessing channels");
return -1;
}
// Performs level analysis only; does not affect the signal. // Performs level analysis only; does not affect the signal.
rtp_audioproc_->ProcessStream(&_audioFrame); int err = rtp_audioproc_->ProcessStream(&_audioFrame);
if (err) {
LOG(LS_ERROR) << "ProcessStream() error: " << err;
assert(false);
}
} }
return 0; return 0;
@@ -5210,25 +5196,6 @@ Channel::RegisterReceiveCodecsToRTPModule()
} }
} }
int Channel::ApmProcessRx(AudioFrame& frame) {
// Register the (possibly new) frame parameters.
if (rx_audioproc_->set_sample_rate_hz(frame.sample_rate_hz_) != 0) {
assert(false);
LOG_FERR1(LS_ERROR, set_sample_rate_hz, frame.sample_rate_hz_);
}
if (rx_audioproc_->set_num_channels(frame.num_channels_,
frame.num_channels_) != 0) {
assert(false);
LOG_FERR2(LS_ERROR, set_num_channels, frame.num_channels_,
frame.num_channels_);
}
if (rx_audioproc_->ProcessStream(&frame) != 0) {
assert(false);
LOG_FERR0(LS_ERROR, ProcessStream);
}
return 0;
}
int Channel::SetSecondarySendCodec(const CodecInst& codec, int Channel::SetSecondarySendCodec(const CodecInst& codec,
int red_payload_type) { int red_payload_type) {
// Sanity check for payload type. // Sanity check for payload type.

View File

@@ -443,7 +443,6 @@ private:
void UpdatePacketDelay(uint32_t timestamp, void UpdatePacketDelay(uint32_t timestamp,
uint16_t sequenceNumber); uint16_t sequenceNumber);
void RegisterReceiveCodecsToRTPModule(); void RegisterReceiveCodecsToRTPModule();
int ApmProcessRx(AudioFrame& audioFrame);
int SetRedPayloadType(int red_payload_type); int SetRedPayloadType(int red_payload_type);

View File

@@ -1317,18 +1317,6 @@ int32_t TransmitMixer::MixOrReplaceAudioWithFile(
void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift, void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
int current_mic_level) { int current_mic_level) {
if (audioproc_->set_num_channels(_audioFrame.num_channels_,
_audioFrame.num_channels_) != 0) {
assert(false);
LOG_FERR2(LS_ERROR, set_num_channels, _audioFrame.num_channels_,
_audioFrame.num_channels_);
}
if (audioproc_->set_sample_rate_hz(_audioFrame.sample_rate_hz_) != 0) {
assert(false);
LOG_FERR1(LS_ERROR, set_sample_rate_hz, _audioFrame.sample_rate_hz_);
}
if (audioproc_->set_stream_delay_ms(delay_ms) != 0) { if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
// A redundant warning is reported in AudioDevice, which we've throttled // A redundant warning is reported in AudioDevice, which we've throttled
// to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid // to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid
@@ -1338,8 +1326,8 @@ void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
GainControl* agc = audioproc_->gain_control(); GainControl* agc = audioproc_->gain_control();
if (agc->set_stream_analog_level(current_mic_level) != 0) { if (agc->set_stream_analog_level(current_mic_level) != 0) {
assert(false);
LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level); LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
assert(false);
} }
EchoCancellation* aec = audioproc_->echo_cancellation(); EchoCancellation* aec = audioproc_->echo_cancellation();
@@ -1349,8 +1337,8 @@ void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
int err = audioproc_->ProcessStream(&_audioFrame); int err = audioproc_->ProcessStream(&_audioFrame);
if (err != 0) { if (err != 0) {
assert(false);
LOG(LS_ERROR) << "ProcessStream() error: " << err; LOG(LS_ERROR) << "ProcessStream() error: " << err;
assert(false);
} }
CriticalSectionScoped cs(&_critSect); CriticalSectionScoped cs(&_critSect);