NetEq4: Make the algorithm buffer a member variable

This reduces the alloc count by more than 100,000 for
NetEqDecodingTest.TestBitExactness.

BUG=1363
TEST=out/Release/modules_unittests --gtest_filter=NetEqDecodingTest.TestBitExactness
R=turaj@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/2119004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4651 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org 2013-09-02 07:59:30 +00:00
parent cadf9040cb
commit c487c6abb0
2 changed files with 74 additions and 84 deletions

View File

@ -70,6 +70,7 @@ NetEqImpl::NetEqImpl(int fs,
payload_splitter_(payload_splitter), payload_splitter_(payload_splitter),
timestamp_scaler_(timestamp_scaler), timestamp_scaler_(timestamp_scaler),
vad_(new PostDecodeVad()), vad_(new PostDecodeVad()),
algorithm_buffer_(NULL),
sync_buffer_(NULL), sync_buffer_(NULL),
expand_(NULL), expand_(NULL),
comfort_noise_(NULL), comfort_noise_(NULL),
@ -112,6 +113,7 @@ NetEqImpl::NetEqImpl(int fs,
NetEqImpl::~NetEqImpl() { NetEqImpl::~NetEqImpl() {
LOG(LS_INFO) << "Deleting NetEqImpl object."; LOG(LS_INFO) << "Deleting NetEqImpl object.";
delete algorithm_buffer_;
delete sync_buffer_; delete sync_buffer_;
delete background_noise_; delete background_noise_;
delete expand_; delete expand_;
@ -620,58 +622,55 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
vad_->Update(decoded_buffer_.get(), length, speech_type, vad_->Update(decoded_buffer_.get(), length, speech_type,
sid_frame_available, fs_hz_); sid_frame_available, fs_hz_);
AudioMultiVector<int16_t> algorithm_buffer(sync_buffer_->Channels()); algorithm_buffer_->Clear();
switch (operation) { switch (operation) {
case kNormal: { case kNormal: {
DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf, DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf);
&algorithm_buffer);
break; break;
} }
case kMerge: { case kMerge: {
DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf, DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf);
&algorithm_buffer);
break; break;
} }
case kExpand: { case kExpand: {
return_value = DoExpand(play_dtmf, &algorithm_buffer); return_value = DoExpand(play_dtmf);
break; break;
} }
case kAccelerate: { case kAccelerate: {
return_value = DoAccelerate(decoded_buffer_.get(), length, speech_type, return_value = DoAccelerate(decoded_buffer_.get(), length, speech_type,
play_dtmf, &algorithm_buffer); play_dtmf);
break; break;
} }
case kPreemptiveExpand: { case kPreemptiveExpand: {
return_value = DoPreemptiveExpand(decoded_buffer_.get(), length, return_value = DoPreemptiveExpand(decoded_buffer_.get(), length,
speech_type, play_dtmf, speech_type, play_dtmf);
&algorithm_buffer);
break; break;
} }
case kRfc3389Cng: case kRfc3389Cng:
case kRfc3389CngNoPacket: { case kRfc3389CngNoPacket: {
return_value = DoRfc3389Cng(&packet_list, play_dtmf, &algorithm_buffer); return_value = DoRfc3389Cng(&packet_list, play_dtmf);
break; break;
} }
case kCodecInternalCng: { case kCodecInternalCng: {
// This handles the case when there is no transmission and the decoder // This handles the case when there is no transmission and the decoder
// should produce internal comfort noise. // should produce internal comfort noise.
// TODO(hlundin): Write test for codec-internal CNG. // TODO(hlundin): Write test for codec-internal CNG.
DoCodecInternalCng(&algorithm_buffer); DoCodecInternalCng();
break; break;
} }
case kDtmf: { case kDtmf: {
// TODO(hlundin): Write test for this. // TODO(hlundin): Write test for this.
return_value = DoDtmf(dtmf_event, &play_dtmf, &algorithm_buffer); return_value = DoDtmf(dtmf_event, &play_dtmf);
break; break;
} }
case kAlternativePlc: { case kAlternativePlc: {
// TODO(hlundin): Write test for this. // TODO(hlundin): Write test for this.
DoAlternativePlc(false, &algorithm_buffer); DoAlternativePlc(false);
break; break;
} }
case kAlternativePlcIncreaseTimestamp: { case kAlternativePlcIncreaseTimestamp: {
// TODO(hlundin): Write test for this. // TODO(hlundin): Write test for this.
DoAlternativePlc(true, &algorithm_buffer); DoAlternativePlc(true);
break; break;
} }
case kAudioRepetitionIncreaseTimestamp: { case kAudioRepetitionIncreaseTimestamp: {
@ -684,7 +683,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
// TODO(hlundin): Write test for this. // TODO(hlundin): Write test for this.
// Copy last |output_size_samples_| from |sync_buffer_| to // Copy last |output_size_samples_| from |sync_buffer_| to
// |algorithm_buffer|. // |algorithm_buffer|.
algorithm_buffer.PushBackFromIndex( algorithm_buffer_->PushBackFromIndex(
*sync_buffer_, sync_buffer_->Size() - output_size_samples_); *sync_buffer_, sync_buffer_->Size() - output_size_samples_);
expand_->Reset(); expand_->Reset();
break; break;
@ -705,7 +704,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
} }
// Copy from |algorithm_buffer| to |sync_buffer_|. // Copy from |algorithm_buffer| to |sync_buffer_|.
sync_buffer_->PushBack(algorithm_buffer); sync_buffer_->PushBack(*algorithm_buffer_);
// Extract data from |sync_buffer_| to |output|. // Extract data from |sync_buffer_| to |output|.
int num_output_samples_per_channel = output_size_samples_; int num_output_samples_per_channel = output_size_samples_;
@ -720,7 +719,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output,
num_output_samples_per_channel, output); num_output_samples_per_channel, output);
*num_channels = sync_buffer_->Channels(); *num_channels = sync_buffer_->Channels();
LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" << LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" <<
" insert " << algorithm_buffer.Size() << " samples, extract " << " insert " << algorithm_buffer_->Size() << " samples, extract " <<
samples_from_sync << " samples"; samples_from_sync << " samples";
if (samples_from_sync != output_size_samples_) { if (samples_from_sync != output_size_samples_) {
LOG_F(LS_ERROR) << "samples_from_sync != output_size_samples_"; LOG_F(LS_ERROR) << "samples_from_sync != output_size_samples_";
@ -1205,15 +1204,14 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
} }
void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length, void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
assert(decoder_database_.get()); assert(decoder_database_.get());
assert(background_noise_); assert(background_noise_);
assert(expand_); assert(expand_);
Normal normal(fs_hz_, decoder_database_.get(), *background_noise_, expand_); Normal normal(fs_hz_, decoder_database_.get(), *background_noise_, expand_);
assert(mute_factor_array_.get()); assert(mute_factor_array_.get());
normal.Process(decoded_buffer, decoded_length, last_mode_, normal.Process(decoded_buffer, decoded_length, last_mode_,
mute_factor_array_.get(), algorithm_buffer); mute_factor_array_.get(), algorithm_buffer_);
if (decoded_length != 0) { if (decoded_length != 0) {
last_mode_ = kModeNormal; last_mode_ = kModeNormal;
} }
@ -1232,12 +1230,11 @@ void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
} }
void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length, void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) { Merge merge(fs_hz_, algorithm_buffer_->Channels(), expand_, sync_buffer_);
Merge merge(fs_hz_, algorithm_buffer->Channels(), expand_, sync_buffer_);
assert(mute_factor_array_.get()); assert(mute_factor_array_.get());
int new_length = merge.Process(decoded_buffer, decoded_length, int new_length = merge.Process(decoded_buffer, decoded_length,
mute_factor_array_.get(), algorithm_buffer); mute_factor_array_.get(), algorithm_buffer_);
// Update in-call and post-call statistics. // Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) { if (expand_->MuteFactor(0) == 0) {
@ -1259,13 +1256,12 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
} }
} }
int NetEqImpl::DoExpand(bool play_dtmf, int NetEqImpl::DoExpand(bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
while ((sync_buffer_->FutureLength() - expand_->overlap_length()) < while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
static_cast<size_t>(output_size_samples_)) { static_cast<size_t>(output_size_samples_)) {
algorithm_buffer->Clear(); algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer); int return_value = expand_->Process(algorithm_buffer_);
int length = algorithm_buffer->Size(); int length = algorithm_buffer_->Size();
// Update in-call and post-call statistics. // Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) { if (expand_->MuteFactor(0) == 0) {
@ -1282,8 +1278,8 @@ int NetEqImpl::DoExpand(bool play_dtmf,
return return_value; return return_value;
} }
sync_buffer_->PushBack(*algorithm_buffer); sync_buffer_->PushBack(*algorithm_buffer_);
algorithm_buffer->Clear(); algorithm_buffer_->Clear();
} }
if (!play_dtmf) { if (!play_dtmf) {
dtmf_tone_generator_->Reset(); dtmf_tone_generator_->Reset();
@ -1293,11 +1289,10 @@ int NetEqImpl::DoExpand(bool play_dtmf,
int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length, int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, AudioDecoder::SpeechType speech_type,
bool play_dtmf, bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
const size_t required_samples = 240 * fs_mult_; // Must have 30 ms. const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
int borrowed_samples_per_channel = 0; int borrowed_samples_per_channel = 0;
size_t num_channels = algorithm_buffer->Channels(); size_t num_channels = algorithm_buffer_->Channels();
size_t decoded_length_per_channel = decoded_length / num_channels; size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) { if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms. // Must move data from the |sync_buffer_| in order to get 30 ms.
@ -1315,7 +1310,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
Accelerate accelerate(fs_hz_, num_channels, *background_noise_); Accelerate accelerate(fs_hz_, num_channels, *background_noise_);
Accelerate::ReturnCodes return_code = accelerate.Process(decoded_buffer, Accelerate::ReturnCodes return_code = accelerate.Process(decoded_buffer,
decoded_length, decoded_length,
algorithm_buffer, algorithm_buffer_,
&samples_removed); &samples_removed);
stats_.AcceleratedSamples(samples_removed); stats_.AcceleratedSamples(samples_removed);
switch (return_code) { switch (return_code) {
@ -1336,22 +1331,22 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
if (borrowed_samples_per_channel > 0) { if (borrowed_samples_per_channel > 0) {
// Copy borrowed samples back to the |sync_buffer_|. // Copy borrowed samples back to the |sync_buffer_|.
int length = algorithm_buffer->Size(); int length = algorithm_buffer_->Size();
if (length < borrowed_samples_per_channel) { if (length < borrowed_samples_per_channel) {
// This destroys the beginning of the buffer, but will not cause any // This destroys the beginning of the buffer, but will not cause any
// problems. // problems.
sync_buffer_->ReplaceAtIndex(*algorithm_buffer, sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
sync_buffer_->Size() - sync_buffer_->Size() -
borrowed_samples_per_channel); borrowed_samples_per_channel);
sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length); sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length);
algorithm_buffer->PopFront(length); algorithm_buffer_->PopFront(length);
assert(algorithm_buffer->Empty()); assert(algorithm_buffer_->Empty());
} else { } else {
sync_buffer_->ReplaceAtIndex(*algorithm_buffer, sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
borrowed_samples_per_channel, borrowed_samples_per_channel,
sync_buffer_->Size() - sync_buffer_->Size() -
borrowed_samples_per_channel); borrowed_samples_per_channel);
algorithm_buffer->PopFront(borrowed_samples_per_channel); algorithm_buffer_->PopFront(borrowed_samples_per_channel);
} }
} }
@ -1369,10 +1364,9 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
size_t decoded_length, size_t decoded_length,
AudioDecoder::SpeechType speech_type, AudioDecoder::SpeechType speech_type,
bool play_dtmf, bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
const size_t required_samples = 240 * fs_mult_; // Must have 30 ms. const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
size_t num_channels = algorithm_buffer->Channels(); size_t num_channels = algorithm_buffer_->Channels();
int borrowed_samples_per_channel = 0; int borrowed_samples_per_channel = 0;
int old_borrowed_samples_per_channel = 0; int old_borrowed_samples_per_channel = 0;
size_t decoded_length_per_channel = decoded_length / num_channels; size_t decoded_length_per_channel = decoded_length / num_channels;
@ -1397,7 +1391,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
PreemptiveExpand preemptive_expand(fs_hz_, num_channels, *background_noise_); PreemptiveExpand preemptive_expand(fs_hz_, num_channels, *background_noise_);
PreemptiveExpand::ReturnCodes return_code = preemptive_expand.Process( PreemptiveExpand::ReturnCodes return_code = preemptive_expand.Process(
decoded_buffer, decoded_length, old_borrowed_samples_per_channel, decoded_buffer, decoded_length, old_borrowed_samples_per_channel,
algorithm_buffer, &samples_added); algorithm_buffer_, &samples_added);
stats_.PreemptiveExpandedSamples(samples_added); stats_.PreemptiveExpandedSamples(samples_added);
switch (return_code) { switch (return_code) {
case PreemptiveExpand::kSuccess: case PreemptiveExpand::kSuccess:
@ -1418,9 +1412,9 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
if (borrowed_samples_per_channel > 0) { if (borrowed_samples_per_channel > 0) {
// Copy borrowed samples back to the |sync_buffer_|. // Copy borrowed samples back to the |sync_buffer_|.
sync_buffer_->ReplaceAtIndex( sync_buffer_->ReplaceAtIndex(
*algorithm_buffer, borrowed_samples_per_channel, *algorithm_buffer_, borrowed_samples_per_channel,
sync_buffer_->Size() - borrowed_samples_per_channel); sync_buffer_->Size() - borrowed_samples_per_channel);
algorithm_buffer->PopFront(borrowed_samples_per_channel); algorithm_buffer_->PopFront(borrowed_samples_per_channel);
} }
// If last packet was decoded as an inband CNG, set mode to CNG instead. // If last packet was decoded as an inband CNG, set mode to CNG instead.
@ -1434,8 +1428,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
return 0; return 0;
} }
int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf, int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
if (!packet_list->empty()) { if (!packet_list->empty()) {
// Must have exactly one SID frame at this point. // Must have exactly one SID frame at this point.
assert(packet_list->size() == 1); assert(packet_list->size() == 1);
@ -1470,12 +1463,12 @@ int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf,
if (comfort_noise_->UpdateParameters(packet) == if (comfort_noise_->UpdateParameters(packet) ==
ComfortNoise::kInternalError) { ComfortNoise::kInternalError) {
LOG_FERR0(LS_WARNING, UpdateParameters); LOG_FERR0(LS_WARNING, UpdateParameters);
algorithm_buffer->Zeros(output_size_samples_); algorithm_buffer_->Zeros(output_size_samples_);
return -comfort_noise_->internal_error_code(); return -comfort_noise_->internal_error_code();
} }
} }
int cn_return = comfort_noise_->Generate(output_size_samples_, int cn_return = comfort_noise_->Generate(output_size_samples_,
algorithm_buffer); algorithm_buffer_);
expand_->Reset(); expand_->Reset();
last_mode_ = kModeRfc3389Cng; last_mode_ = kModeRfc3389Cng;
if (!play_dtmf) { if (!play_dtmf) {
@ -1492,8 +1485,7 @@ int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf,
return 0; return 0;
} }
void NetEqImpl::DoCodecInternalCng( void NetEqImpl::DoCodecInternalCng() {
AudioMultiVector<int16_t>* algorithm_buffer) {
int length = 0; int length = 0;
// TODO(hlundin): Will probably need a longer buffer for multi-channel. // TODO(hlundin): Will probably need a longer buffer for multi-channel.
int16_t decoded_buffer[kMaxFrameSize]; int16_t decoded_buffer[kMaxFrameSize];
@ -1506,13 +1498,12 @@ void NetEqImpl::DoCodecInternalCng(
Normal normal(fs_hz_, decoder_database_.get(), *background_noise_, expand_); Normal normal(fs_hz_, decoder_database_.get(), *background_noise_, expand_);
assert(mute_factor_array_.get()); assert(mute_factor_array_.get());
normal.Process(decoded_buffer, length, last_mode_, mute_factor_array_.get(), normal.Process(decoded_buffer, length, last_mode_, mute_factor_array_.get(),
algorithm_buffer); algorithm_buffer_);
last_mode_ = kModeCodecInternalCng; last_mode_ = kModeCodecInternalCng;
expand_->Reset(); expand_->Reset();
} }
int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf, int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
AudioMultiVector<int16_t>* algorithm_buffer) {
// This block of the code and the block further down, handling |dtmf_switch| // This block of the code and the block further down, handling |dtmf_switch|
// are commented out. Otherwise playing out-of-band DTMF would fail in VoE // are commented out. Otherwise playing out-of-band DTMF would fail in VoE
// test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is // test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is
@ -1540,11 +1531,11 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf,
if (dtmf_return_value == 0) { if (dtmf_return_value == 0) {
// Generate DTMF signal. // Generate DTMF signal.
dtmf_return_value = dtmf_tone_generator_->Generate(output_size_samples_, dtmf_return_value = dtmf_tone_generator_->Generate(output_size_samples_,
algorithm_buffer); algorithm_buffer_);
} }
if (dtmf_return_value < 0) { if (dtmf_return_value < 0) {
algorithm_buffer->Zeros(output_size_samples_); algorithm_buffer_->Zeros(output_size_samples_);
return dtmf_return_value; return dtmf_return_value;
} }
@ -1562,10 +1553,10 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf,
// int required_length = sync_buffer_->FutureLength(); // int required_length = sync_buffer_->FutureLength();
// assert(dtmf_tone_generator_->initialized()); // assert(dtmf_tone_generator_->initialized());
// dtmf_return_value = dtmf_tone_generator_->Generate(required_length, // dtmf_return_value = dtmf_tone_generator_->Generate(required_length,
// algorithm_buffer); // algorithm_buffer_);
// assert((size_t) required_length == algorithm_buffer->Size()); // assert((size_t) required_length == algorithm_buffer_->Size());
// if (dtmf_return_value < 0) { // if (dtmf_return_value < 0) {
// algorithm_buffer->Zeros(output_size_samples_); // algorithm_buffer_->Zeros(output_size_samples_);
// return dtmf_return_value; // return dtmf_return_value;
// } // }
// //
@ -1573,13 +1564,13 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf,
// // data. // // data.
// // TODO(hlundin): It seems that this overwriting has gone lost. // // TODO(hlundin): It seems that this overwriting has gone lost.
// // Not adapted for multi-channel yet. // // Not adapted for multi-channel yet.
// assert(algorithm_buffer->Channels() == 1); // assert(algorithm_buffer_->Channels() == 1);
// if (algorithm_buffer->Channels() != 1) { // if (algorithm_buffer_->Channels() != 1) {
// LOG(LS_WARNING) << "DTMF not supported for more than one channel"; // LOG(LS_WARNING) << "DTMF not supported for more than one channel";
// return kStereoNotSupported; // return kStereoNotSupported;
// } // }
// // Shuffle the remaining data to the beginning of algorithm buffer. // // Shuffle the remaining data to the beginning of algorithm buffer.
// algorithm_buffer->PopFront(sync_buffer_->FutureLength()); // algorithm_buffer_->PopFront(sync_buffer_->FutureLength());
// } // }
sync_buffer_->IncreaseEndTimestamp(output_size_samples_); sync_buffer_->IncreaseEndTimestamp(output_size_samples_);
@ -1591,8 +1582,7 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf,
return 0; return 0;
} }
void NetEqImpl::DoAlternativePlc(bool increase_timestamp, void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
AudioMultiVector<int16_t>* algorithm_buffer) {
AudioDecoder* decoder = decoder_database_->GetActiveDecoder(); AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
int length; int length;
if (decoder && decoder->HasDecodePlc()) { if (decoder && decoder->HasDecodePlc()) {
@ -1601,14 +1591,14 @@ void NetEqImpl::DoAlternativePlc(bool increase_timestamp,
int16_t decoded_buffer[kMaxFrameSize]; int16_t decoded_buffer[kMaxFrameSize];
length = decoder->DecodePlc(1, decoded_buffer); length = decoder->DecodePlc(1, decoded_buffer);
if (length > 0) { if (length > 0) {
algorithm_buffer->PushBackInterleaved(decoded_buffer, length); algorithm_buffer_->PushBackInterleaved(decoded_buffer, length);
} else { } else {
length = 0; length = 0;
} }
} else { } else {
// Do simple zero-stuffing. // Do simple zero-stuffing.
length = output_size_samples_; length = output_size_samples_;
algorithm_buffer->Zeros(length); algorithm_buffer_->Zeros(length);
// By not advancing the timestamp, NetEq inserts samples. // By not advancing the timestamp, NetEq inserts samples.
stats_.AddZeros(length); stats_.AddZeros(length);
} }
@ -1756,6 +1746,12 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
assert(vad_.get()); // Cannot be NULL here. assert(vad_.get()); // Cannot be NULL here.
vad_->Init(); vad_->Init();
// Delete algorithm buffer and create a new one.
if (algorithm_buffer_) {
delete algorithm_buffer_;
}
algorithm_buffer_ = new AudioMultiVector<int16_t>(channels);
// Delete sync buffer and create a new one. // Delete sync buffer and create a new one.
if (sync_buffer_) { if (sync_buffer_) {
delete sync_buffer_; delete sync_buffer_;

View File

@ -222,49 +222,42 @@ class NetEqImpl : public webrtc::NetEq {
// Sub-method which calls the Normal class to perform the normal operation. // Sub-method which calls the Normal class to perform the normal operation.
void DoNormal(const int16_t* decoded_buffer, size_t decoded_length, void DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Sub-method which calls the Merge class to perform the merge operation. // Sub-method which calls the Merge class to perform the merge operation.
void DoMerge(int16_t* decoded_buffer, size_t decoded_length, void DoMerge(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Sub-method which calls the Expand class to perform the expand operation. // Sub-method which calls the Expand class to perform the expand operation.
int DoExpand(bool play_dtmf, AudioMultiVector<int16_t>* algorithm_buffer); int DoExpand(bool play_dtmf);
// Sub-method which calls the Accelerate class to perform the accelerate // Sub-method which calls the Accelerate class to perform the accelerate
// operation. // operation.
int DoAccelerate(int16_t* decoded_buffer, size_t decoded_length, int DoAccelerate(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Sub-method which calls the PreemptiveExpand class to perform the // Sub-method which calls the PreemptiveExpand class to perform the
// preemtive expand operation. // preemtive expand operation.
int DoPreemptiveExpand(int16_t* decoded_buffer, size_t decoded_length, int DoPreemptiveExpand(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf, AudioDecoder::SpeechType speech_type, bool play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort // Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort
// noise. |packet_list| can either contain one SID frame to update the // noise. |packet_list| can either contain one SID frame to update the
// noise parameters, or no payload at all, in which case the previously // noise parameters, or no payload at all, in which case the previously
// received parameters are used. // received parameters are used.
int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf, int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Calls the audio decoder to generate codec-internal comfort noise when // Calls the audio decoder to generate codec-internal comfort noise when
// no packet was received. // no packet was received.
void DoCodecInternalCng(AudioMultiVector<int16_t>* algorithm_buffer); void DoCodecInternalCng();
// Calls the DtmfToneGenerator class to generate DTMF tones. // Calls the DtmfToneGenerator class to generate DTMF tones.
int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf, int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf);
AudioMultiVector<int16_t>* algorithm_buffer);
// Produces packet-loss concealment using alternative methods. If the codec // Produces packet-loss concealment using alternative methods. If the codec
// has an internal PLC, it is called to generate samples. Otherwise, the // has an internal PLC, it is called to generate samples. Otherwise, the
// method performs zero-stuffing. // method performs zero-stuffing.
void DoAlternativePlc(bool increase_timestamp, void DoAlternativePlc(bool increase_timestamp);
AudioMultiVector<int16_t>* algorithm_buffer);
// Overdub DTMF on top of |output|. // Overdub DTMF on top of |output|.
int DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels, int DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
@ -296,6 +289,7 @@ class NetEqImpl : public webrtc::NetEq {
scoped_ptr<TimestampScaler> timestamp_scaler_; scoped_ptr<TimestampScaler> timestamp_scaler_;
scoped_ptr<DecisionLogic> decision_logic_; scoped_ptr<DecisionLogic> decision_logic_;
scoped_ptr<PostDecodeVad> vad_; scoped_ptr<PostDecodeVad> vad_;
AudioMultiVector<int16_t>* algorithm_buffer_;
SyncBuffer* sync_buffer_; SyncBuffer* sync_buffer_;
Expand* expand_; Expand* expand_;
RandomVector random_vector_; RandomVector random_vector_;