diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc index fb5203746..3fedc487a 100644 --- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc +++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc @@ -173,7 +173,7 @@ bool AudioConferenceMixerImpl::Init() if (!SetNumLimiterChannels(1)) return false; - if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != + if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != _limiter->kNoError) return false; @@ -1125,7 +1125,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::MixFromList( { // No mixing required here; skip the saturation protection. AudioFrame* audioFrame = static_cast(item->GetItem()); - mixedAudio = *audioFrame; + mixedAudio.CopyFrom(*audioFrame); SetParticipantStatistics(&_scratchMixedParticipants[position], *audioFrame); return 0; @@ -1173,7 +1173,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::MixAnonomouslyFromList( { // No mixing required here; skip the saturation protection. AudioFrame* audioFrame = static_cast(item->GetItem()); - mixedAudio = *audioFrame; + mixedAudio.CopyFrom(*audioFrame); return 0; } diff --git a/webrtc/modules/audio_processing/test/unit_test.cc b/webrtc/modules/audio_processing/test/unit_test.cc index 51340e959..1baa48d67 100644 --- a/webrtc/modules/audio_processing/test/unit_test.cc +++ b/webrtc/modules/audio_processing/test/unit_test.cc @@ -451,7 +451,8 @@ void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value, EnableAllComponents(); Init(16000, 2, 2, 2, false); SetFrameTo(frame_, 1000); - AudioFrame frame_copy = *frame_; + AudioFrame frame_copy; + frame_copy.CopyFrom(*frame_); ProcessWithDefaultStreamParameters(frame_); // Verify the processing has actually changed the frame. EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy)); @@ -464,13 +465,14 @@ void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value, apm_->Initialize(); SetFrameTo(frame_, 1000); - AudioFrame initial_frame = *frame_; + AudioFrame initial_frame; + initial_frame.CopyFrom(*frame_); ProcessWithDefaultStreamParameters(frame_); ProcessWithDefaultStreamParameters(frame_); // Verify the processing has actually changed the frame. EXPECT_FALSE(FrameDataAreEqual(*frame_, initial_frame)); - frame_copy = initial_frame; + frame_copy.CopyFrom(initial_frame); apm_->Initialize(); ProcessWithDefaultStreamParameters(&frame_copy); // Verify an init here would result in different output. @@ -478,7 +480,7 @@ void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value, ProcessWithDefaultStreamParameters(&frame_copy); EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy)); - frame_copy = initial_frame; + frame_copy.CopyFrom(initial_frame); apm_->Initialize(); ProcessWithDefaultStreamParameters(&frame_copy); // Test that the same value does not trigger an init. @@ -1078,7 +1080,8 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) { for (size_t i = 0; i < kSampleRatesSize; i++) { Init(kSampleRates[i], 2, 2, 2, false); SetFrameTo(frame_, 1000, 2000); - AudioFrame frame_copy = *frame_; + AudioFrame frame_copy; + frame_copy.CopyFrom(*frame_); for (int j = 0; j < 1000; j++) { EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy)); @@ -1119,14 +1122,15 @@ TEST_F(ApmTest, SplittingFilter) { // Verify the filter is not active through undistorted audio when: // 1. No components are enabled... SetFrameTo(frame_, 1000); - AudioFrame frame_copy = *frame_; + AudioFrame frame_copy; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy)); // 2. Only the level estimator is enabled... SetFrameTo(frame_, 1000); - frame_copy = *frame_; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); @@ -1135,7 +1139,7 @@ TEST_F(ApmTest, SplittingFilter) { // 3. Only VAD is enabled... SetFrameTo(frame_, 1000); - frame_copy = *frame_; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); @@ -1144,7 +1148,7 @@ TEST_F(ApmTest, SplittingFilter) { // 4. Both VAD and the level estimator are enabled... SetFrameTo(frame_, 1000); - frame_copy = *frame_; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); @@ -1164,7 +1168,7 @@ TEST_F(ApmTest, SplittingFilter) { // behavior of the AEC. Think of something more robust. EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true)); SetFrameTo(frame_, 1000); - frame_copy = *frame_; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->set_stream_drift_samples(0)); @@ -1182,7 +1186,7 @@ TEST_F(ApmTest, SplittingFilter) { frame_->num_channels_ = 2; frame_->sample_rate_hz_ = 32000; SetFrameTo(frame_, 1000); - frame_copy = *frame_; + frame_copy.CopyFrom(*frame_); EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->set_stream_drift_samples(0)); diff --git a/webrtc/modules/interface/module_common_types.h b/webrtc/modules/interface/module_common_types.h index 4b0dc5a8d..a33e7efb0 100644 --- a/webrtc/modules/interface/module_common_types.h +++ b/webrtc/modules/interface/module_common_types.h @@ -11,8 +11,10 @@ #ifndef MODULE_COMMON_TYPES_H #define MODULE_COMMON_TYPES_H +#include #include // memcpy -#include + +#include #include "webrtc/common_types.h" #include "webrtc/system_wrappers/interface/constructor_magic.h" @@ -724,7 +726,8 @@ VideoFrame::Free() class AudioFrame { public: - enum { kMaxDataSizeSamples = 3840 }; // stereo, 32 kHz, 60ms (2*32*60) + // Stereo, 32 kHz, 60 ms (2 * 32 * 60) + static const int kMaxDataSizeSamples = 3840; enum VADActivity { @@ -744,7 +747,7 @@ public: AudioFrame(); virtual ~AudioFrame(); - int UpdateFrame( + void UpdateFrame( int id, uint32_t timestamp, const int16_t* data, @@ -757,9 +760,10 @@ public: AudioFrame& Append(const AudioFrame& rhs); + void CopyFrom(const AudioFrame& src); + void Mute(); - AudioFrame& operator=(const AudioFrame& rhs); AudioFrame& operator>>=(const int rhs); AudioFrame& operator+=(const AudioFrame& rhs); AudioFrame& operator-=(const AudioFrame& rhs); @@ -773,6 +777,9 @@ public: SpeechType speech_type_; VADActivity vad_activity_; uint32_t energy_; + +private: + DISALLOW_COPY_AND_ASSIGN(AudioFrame); }; inline @@ -796,7 +803,7 @@ AudioFrame::~AudioFrame() } inline -int +void AudioFrame::UpdateFrame( int id, uint32_t timestamp, @@ -810,30 +817,43 @@ AudioFrame::UpdateFrame( { id_ = id; timestamp_ = timestamp; + samples_per_channel_ = samples_per_channel; sample_rate_hz_ = sample_rate_hz; speech_type_ = speech_type; vad_activity_ = vad_activity; num_channels_ = num_channels; energy_ = energy; - if((samples_per_channel > kMaxDataSizeSamples) || - (num_channels > 2) || (num_channels < 1)) - { - samples_per_channel_ = 0; - return -1; - } - samples_per_channel_ = samples_per_channel; + const int length = samples_per_channel * num_channels; + assert(length <= kMaxDataSizeSamples && length >= 0); if(data != NULL) { - memcpy(data_, data, sizeof(int16_t) * - samples_per_channel * num_channels_); + memcpy(data_, data, sizeof(int16_t) * length); } else { - memset(data_,0,sizeof(int16_t) * - samples_per_channel * num_channels_); + memset(data_, 0, sizeof(int16_t) * length); } - return 0; +} + +inline void AudioFrame::CopyFrom(const AudioFrame& src) +{ + if(this == &src) + { + return; + } + id_ = src.id_; + timestamp_ = src.timestamp_; + samples_per_channel_ = src.samples_per_channel_; + sample_rate_hz_ = src.sample_rate_hz_; + speech_type_ = src.speech_type_; + vad_activity_ = src.vad_activity_; + num_channels_ = src.num_channels_; + energy_ = src.energy_; + + const int length = samples_per_channel_ * num_channels_; + assert(length <= kMaxDataSizeSamples && length >= 0); + memcpy(data_, src.data_, sizeof(int16_t) * length); } inline @@ -843,36 +863,6 @@ AudioFrame::Mute() memset(data_, 0, samples_per_channel_ * num_channels_ * sizeof(int16_t)); } -inline -AudioFrame& -AudioFrame::operator=(const AudioFrame& rhs) -{ - // Sanity Check - if((rhs.samples_per_channel_ > kMaxDataSizeSamples) || - (rhs.num_channels_ > 2) || - (rhs.num_channels_ < 1)) - { - return *this; - } - if(this == &rhs) - { - return *this; - } - id_ = rhs.id_; - timestamp_ = rhs.timestamp_; - sample_rate_hz_ = rhs.sample_rate_hz_; - speech_type_ = rhs.speech_type_; - vad_activity_ = rhs.vad_activity_; - num_channels_ = rhs.num_channels_; - energy_ = rhs.energy_; - - samples_per_channel_ = rhs.samples_per_channel_; - memcpy(data_, rhs.data_, - sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_); - - return *this; -} - inline AudioFrame& AudioFrame::operator>>=(const int rhs) diff --git a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc index c11dce3ca..efdebf6dd 100644 --- a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc +++ b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc @@ -62,7 +62,8 @@ TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) { TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) { frame_.num_channels_ = 1; SetFrameData(&frame_, 1); - AudioFrame temp_frame = frame_; + AudioFrame temp_frame; + temp_frame.CopyFrom(frame_); EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_)); AudioFrame stereo_frame; @@ -86,7 +87,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) { TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) { SetFrameData(&frame_, 4, 2); - AudioFrame temp_frame = frame_; + AudioFrame temp_frame; + temp_frame.CopyFrom(frame_); EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_)); AudioFrame mono_frame; @@ -131,7 +133,8 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) { // Set data to "stereo", despite it being a mono frame. SetFrameData(&frame_, 0, 1); - AudioFrame orig_frame = frame_; + AudioFrame orig_frame; + orig_frame.CopyFrom(frame_); AudioFrameOperations::SwapStereoChannels(&frame_); // Verify that no swap occurred. VerifyFramesAreEqual(orig_frame, frame_); diff --git a/webrtc/modules/utility/source/coder.cc b/webrtc/modules/utility/source/coder.cc index 31d528ddb..a610daa6b 100644 --- a/webrtc/modules/utility/source/coder.cc +++ b/webrtc/modules/utility/source/coder.cc @@ -92,7 +92,8 @@ WebRtc_Word32 AudioCoder::Encode(const AudioFrame& audio, { // Fake a timestamp in case audio doesn't contain a correct timestamp. // Make a local copy of the audio frame since audio is const - AudioFrame audioFrame = audio; + AudioFrame audioFrame; + audioFrame.CopyFrom(audio); audioFrame.timestamp_ = _encodeTimestamp; _encodeTimestamp += audioFrame.samples_per_channel_; diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc index 38897fc0d..357eb0ffe 100644 --- a/webrtc/voice_engine/channel.cc +++ b/webrtc/voice_engine/channel.cc @@ -660,7 +660,7 @@ Channel::OnInitializeDecoder( receiveCodec.channels = channels; receiveCodec.rate = rate; strncpy(receiveCodec.plname, payloadName, RTP_PAYLOAD_NAME_SIZE - 1); - + _audioCodingModule.Codec(payloadName, dummyCodec, frequency, channels); receiveCodec.pacsize = dummyCodec.pacsize; @@ -3789,7 +3789,7 @@ int Channel::StartPlayingFileAsMicrophone(InStream* stream, _inputFilePlayerPtr = NULL; return -1; } - + _inputFilePlayerPtr->RegisterModuleFileCallback(this); _inputFilePlaying = true; @@ -4009,7 +4009,7 @@ int Channel::StartRecordingPlayout(OutStream* stream, _outputFileRecorderPtr = NULL; return -1; } - + _outputFileRecorderPtr->RegisterModuleFileCallback(this); _outputFileRecording = true; @@ -5794,7 +5794,7 @@ Channel::Demultiplex(const AudioFrame& audioFrame) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::Demultiplex()"); - _audioFrame = audioFrame; + _audioFrame.CopyFrom(audioFrame); _audioFrame.id_ = _channelId; return 0; } @@ -6343,7 +6343,7 @@ Channel::InsertInbandDtmfTone() // account. _inbandDtmfGenerator.ResetTone(); } - + WebRtc_Word16 toneBuffer[320]; WebRtc_UWord16 toneSamples(0); // Get 10ms tone segment and set time since last tone to zero @@ -6356,19 +6356,19 @@ Channel::InsertInbandDtmfTone() } // Replace mixed audio with DTMF tone. - for (int sample = 0; + for (int sample = 0; sample < _audioFrame.samples_per_channel_; sample++) { - for (int channel = 0; - channel < _audioFrame.num_channels_; + for (int channel = 0; + channel < _audioFrame.num_channels_; channel++) { - _audioFrame.data_[sample * _audioFrame.num_channels_ + channel] = - toneBuffer[sample]; + const int index = sample * _audioFrame.num_channels_ + channel; + _audioFrame.data_[index] = toneBuffer[sample]; } } - + assert(_audioFrame.samples_per_channel_ == toneSamples); } else { diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc index daf0d4a30..36a9da734 100644 --- a/webrtc/voice_engine/output_mixer.cc +++ b/webrtc/voice_engine/output_mixer.cc @@ -32,7 +32,7 @@ OutputMixer::NewMixedAudio(const WebRtc_Word32 id, WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size); - _audioFrame = generalAudioFrame; + _audioFrame.CopyFrom(generalAudioFrame); _audioFrame.id_ = id; } @@ -135,7 +135,7 @@ OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) : { WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::OutputMixer() - ctor"); - + if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) || (_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1)) { @@ -143,7 +143,7 @@ OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) : "OutputMixer::OutputMixer() failed to register mixer" "callbacks"); } - + _dtmfGenerator.Init(); } @@ -156,7 +156,7 @@ OutputMixer::Destroy(OutputMixer*& mixer) mixer = NULL; } } - + OutputMixer::~OutputMixer() { WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1), @@ -191,7 +191,7 @@ OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics) return 0; } -WebRtc_Word32 +WebRtc_Word32 OutputMixer::SetAudioProcessingModule( AudioProcessing* audioProcessingModule) { @@ -368,7 +368,7 @@ int OutputMixer::StartRecordingPlayout(const char* fileName, } CriticalSectionScoped cs(&_fileCritSect); - + // Destroy the old instance if (_outputFileRecorderPtr) { diff --git a/webrtc/voice_engine/output_mixer_internal.cc b/webrtc/voice_engine/output_mixer_internal.cc index b78d8cd93..dfa7d95b8 100644 --- a/webrtc/voice_engine/output_mixer_internal.cc +++ b/webrtc/voice_engine/output_mixer_internal.cc @@ -39,7 +39,7 @@ int RemixAndResample(const AudioFrame& src_frame, if (resampler->ResetIfNeeded(src_frame.sample_rate_hz_, dst_frame->sample_rate_hz_, resampler_type) == -1) { - *dst_frame = src_frame; + dst_frame->CopyFrom(src_frame); WEBRTC_TRACE(kTraceError, kTraceVoice, -1, "%s ResetIfNeeded failed", __FUNCTION__); return -1; @@ -53,7 +53,7 @@ int RemixAndResample(const AudioFrame& src_frame, out_length) == 0) { dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels; } else { - *dst_frame = src_frame; + dst_frame->CopyFrom(src_frame); WEBRTC_TRACE(kTraceError, kTraceVoice, -1, "%s resampling failed", __FUNCTION__); return -1; diff --git a/webrtc/voice_engine/output_mixer_unittest.cc b/webrtc/voice_engine/output_mixer_unittest.cc index fe678a02a..dbcb25103 100644 --- a/webrtc/voice_engine/output_mixer_unittest.cc +++ b/webrtc/voice_engine/output_mixer_unittest.cc @@ -25,8 +25,8 @@ class OutputMixerTest : public ::testing::Test { src_frame_.sample_rate_hz_ = 16000; src_frame_.samples_per_channel_ = src_frame_.sample_rate_hz_ / 100; src_frame_.num_channels_ = 1; - dst_frame_ = src_frame_; - golden_frame_ = src_frame_; + dst_frame_.CopyFrom(src_frame_); + golden_frame_.CopyFrom(src_frame_); } void RunResampleTest(int src_channels, int src_sample_rate_hz, diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc index 4dc18498b..a69940120 100644 --- a/webrtc/voice_engine/transmit_mixer.cc +++ b/webrtc/voice_engine/transmit_mixer.cc @@ -444,7 +444,8 @@ TransmitMixer::DemuxAndMix() } else if (channelPtr->Sending()) { // load temporary audioframe with current (mixed) microphone signal - AudioFrame tmpAudioFrame = _audioFrame; + AudioFrame tmpAudioFrame; + tmpAudioFrame.CopyFrom(_audioFrame); channelPtr->Demultiplex(tmpAudioFrame); channelPtr->PrepareEncodeAndSend(_mixingFrequency); diff --git a/webrtc/voice_engine/voe_file_impl.cc b/webrtc/voice_engine/voe_file_impl.cc index 8f0061faa..869cd8640 100644 --- a/webrtc/voice_engine/voe_file_impl.cc +++ b/webrtc/voice_engine/voe_file_impl.cc @@ -704,17 +704,10 @@ int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8, break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertPCMToWAV failed during conversion (audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -794,18 +787,10 @@ int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut) break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, frequency, - AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertPCMToWAV failed during conversion " - "(create audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, frequency, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -882,17 +867,10 @@ int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8, break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertWAVToPCM failed during conversion (audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -974,17 +952,10 @@ int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut) break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, frequency, - AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertWAVToPCM failed during conversion (audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, frequency, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -1059,18 +1030,10 @@ int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8, // This is an OK way to end break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertPCMToCompressed failed during conversion " - "(audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -1151,18 +1114,10 @@ int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn, // This is an OK way to end break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertPCMToCompressed failed during conversion " - "(audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -1241,19 +1196,11 @@ int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8, // This is an OK way to end break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, - AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertCompressedToPCM failed during conversion " - "(create audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res) @@ -1338,19 +1285,11 @@ int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn, // This is an OK way to end break; } - res=audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, - frequency, - AudioFrame::kNormalSpeech, - AudioFrame::kVadActive); - if(res) - { - WEBRTC_TRACE(kTraceError, kTraceVoice, - VoEId(_shared->instance_id(), -1), - "ConvertCompressedToPCM failed during conversion " - "(audio frame)"); - break; - } + audioFrame.UpdateFrame(-1, 0, decodedData, + (WebRtc_UWord16)decLength, + frequency, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive); res=recObj.RecordAudioToFile(audioFrame); if(res)