Rename AudioFrame members.
BUG= TEST=trybots Review URL: https://webrtc-codereview.appspot.com/542005 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2164 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -586,7 +586,7 @@ ACMNetEQ::RecOut(
|
||||
}
|
||||
{
|
||||
WriteLockScoped lockCodec(*_decodeLock);
|
||||
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame._payloadData[0]),
|
||||
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame.data_[0]),
|
||||
&payloadLenSample) != 0)
|
||||
{
|
||||
LogError("RecOut", 0);
|
||||
@@ -604,7 +604,7 @@ ACMNetEQ::RecOut(
|
||||
}
|
||||
}
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
|
||||
audioFrame._audioChannel = 1;
|
||||
audioFrame.num_channels_ = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -667,10 +667,10 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
|
||||
for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
|
||||
{
|
||||
audioFrame._payloadData[n<<1] = payloadMaster[n];
|
||||
audioFrame._payloadData[(n<<1)+1] = payloadSlave[n];
|
||||
audioFrame.data_[n<<1] = payloadMaster[n];
|
||||
audioFrame.data_[(n<<1)+1] = payloadSlave[n];
|
||||
}
|
||||
audioFrame._audioChannel = 2;
|
||||
audioFrame.num_channels_ = 2;
|
||||
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
|
||||
@@ -685,58 +685,58 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
}
|
||||
}
|
||||
|
||||
audioFrame._payloadDataLengthInSamples = static_cast<WebRtc_UWord16>(payloadLenSample);
|
||||
audioFrame.samples_per_channel_ = static_cast<WebRtc_UWord16>(payloadLenSample);
|
||||
// NetEq always returns 10 ms of audio.
|
||||
_currentSampFreqKHz = static_cast<float>(audioFrame._payloadDataLengthInSamples) / 10.0f;
|
||||
audioFrame._frequencyInHz = audioFrame._payloadDataLengthInSamples * 100;
|
||||
_currentSampFreqKHz = static_cast<float>(audioFrame.samples_per_channel_) / 10.0f;
|
||||
audioFrame.sample_rate_hz_ = audioFrame.samples_per_channel_ * 100;
|
||||
if(_vadStatus)
|
||||
{
|
||||
if(type == kOutputVADPassive)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputNormal)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadActive;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadActive;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputPLC)
|
||||
{
|
||||
audioFrame._vadActivity = _previousAudioActivity;
|
||||
audioFrame._speechType = AudioFrame::kPLC;
|
||||
audioFrame.vad_activity_ = _previousAudioActivity;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||
}
|
||||
else if(type == kOutputCNG)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kCNG;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||
}
|
||||
else
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Always return kVadUnknown when receive VAD is inactive
|
||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||
|
||||
if(type == kOutputNormal)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputPLC)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kPLC;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||
}
|
||||
else if(type == kOutputPLCtoCNG)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||
}
|
||||
else if(type == kOutputCNG)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kCNG;
|
||||
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -744,11 +744,11 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
// we don't expect to get if _vadStatus is false
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
|
||||
"RecOut: NetEq returned kVadPassive while _vadStatus is false.");
|
||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
}
|
||||
_previousAudioActivity = audioFrame._vadActivity;
|
||||
_previousAudioActivity = audioFrame.vad_activity_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ void AcmNetEqTest::InsertZeroPacket(uint16_t sequence_number,
|
||||
void AcmNetEqTest::PullData(int expected_num_samples) {
|
||||
AudioFrame out_frame;
|
||||
ASSERT_EQ(0, neteq_.RecOut(out_frame));
|
||||
ASSERT_EQ(expected_num_samples, out_frame._payloadDataLengthInSamples);
|
||||
ASSERT_EQ(expected_num_samples, out_frame.samples_per_channel_);
|
||||
}
|
||||
|
||||
TEST_F(AcmNetEqTest, NetworkStatistics) {
|
||||
|
||||
@@ -942,17 +942,17 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (audio_frame._payloadDataLengthInSamples == 0) {
|
||||
if (audio_frame.samples_per_channel_ == 0) {
|
||||
assert(false);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, payload length is zero");
|
||||
return -1;
|
||||
}
|
||||
// Allow for 8, 16, 32 and 48kHz input audio.
|
||||
if ((audio_frame._frequencyInHz != 8000)
|
||||
&& (audio_frame._frequencyInHz != 16000)
|
||||
&& (audio_frame._frequencyInHz != 32000)
|
||||
&& (audio_frame._frequencyInHz != 48000)) {
|
||||
if ((audio_frame.sample_rate_hz_ != 8000)
|
||||
&& (audio_frame.sample_rate_hz_ != 16000)
|
||||
&& (audio_frame.sample_rate_hz_ != 32000)
|
||||
&& (audio_frame.sample_rate_hz_ != 48000)) {
|
||||
assert(false);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, input frequency not valid");
|
||||
@@ -960,8 +960,8 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
}
|
||||
|
||||
// If the length and frequency matches. We currently just support raw PCM.
|
||||
if ((audio_frame._frequencyInHz / 100)
|
||||
!= audio_frame._payloadDataLengthInSamples) {
|
||||
if ((audio_frame.sample_rate_hz_ / 100)
|
||||
!= audio_frame.samples_per_channel_) {
|
||||
WEBRTC_TRACE(
|
||||
webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, input frequency and length doesn't match");
|
||||
@@ -971,33 +971,33 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
// Calculate the timestamp that should be pushed to codec.
|
||||
// This might be different from the timestamp of the frame
|
||||
// due to re-sampling.
|
||||
bool resample = ((WebRtc_Word32) audio_frame._frequencyInHz
|
||||
bool resample = ((WebRtc_Word32) audio_frame.sample_rate_hz_
|
||||
!= _sendCodecInst.plfreq);
|
||||
|
||||
// If number of channels in audio doesn't match codec mode, we need
|
||||
// either mono-to-stereo or stereo-to-mono conversion.
|
||||
WebRtc_Word16 audio[WEBRTC_10MS_PCM_AUDIO];
|
||||
int audio_channels = _sendCodecInst.channels;
|
||||
if (audio_frame._audioChannel != _sendCodecInst.channels) {
|
||||
if (audio_frame.num_channels_ != _sendCodecInst.channels) {
|
||||
if (_sendCodecInst.channels == 2) {
|
||||
// Do mono-to-stereo conversion by copying each sample.
|
||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
audio[k * 2] = audio_frame._payloadData[k];
|
||||
audio[(k * 2) + 1] = audio_frame._payloadData[k];
|
||||
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
audio[k * 2] = audio_frame.data_[k];
|
||||
audio[(k * 2) + 1] = audio_frame.data_[k];
|
||||
}
|
||||
} else if (_sendCodecInst.channels == 1) {
|
||||
// Do stereo-to-mono conversion by creating the average of the stereo
|
||||
// samples.
|
||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
audio[k] = (audio_frame._payloadData[k * 2]
|
||||
+ audio_frame._payloadData[(k * 2) + 1]) >> 1;
|
||||
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
audio[k] = (audio_frame.data_[k * 2]
|
||||
+ audio_frame.data_[(k * 2) + 1]) >> 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Copy payload data for future use.
|
||||
size_t length = static_cast<size_t>(audio_frame._payloadDataLengthInSamples
|
||||
size_t length = static_cast<size_t>(audio_frame.samples_per_channel_
|
||||
* audio_channels);
|
||||
memcpy(audio, audio_frame._payloadData, length * sizeof(WebRtc_UWord16));
|
||||
memcpy(audio, audio_frame.data_, length * sizeof(WebRtc_UWord16));
|
||||
}
|
||||
|
||||
WebRtc_UWord32 current_timestamp;
|
||||
@@ -1010,18 +1010,18 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
WebRtc_Word16 new_length;
|
||||
|
||||
// Calculate the timestamp of this frame.
|
||||
if (_lastInTimestamp > audio_frame._timeStamp) {
|
||||
if (_lastInTimestamp > audio_frame.timestamp_) {
|
||||
// A wrap around has happened.
|
||||
timestamp_diff = ((WebRtc_UWord32) 0xFFFFFFFF - _lastInTimestamp)
|
||||
+ audio_frame._timeStamp;
|
||||
+ audio_frame.timestamp_;
|
||||
} else {
|
||||
timestamp_diff = audio_frame._timeStamp - _lastInTimestamp;
|
||||
timestamp_diff = audio_frame.timestamp_ - _lastInTimestamp;
|
||||
}
|
||||
current_timestamp = _lastTimestamp + (WebRtc_UWord32)(timestamp_diff *
|
||||
((double) _sendCodecInst.plfreq / (double) audio_frame._frequencyInHz));
|
||||
((double) _sendCodecInst.plfreq / (double) audio_frame.sample_rate_hz_));
|
||||
|
||||
new_length = _inputResampler.Resample10Msec(audio,
|
||||
audio_frame._frequencyInHz,
|
||||
audio_frame.sample_rate_hz_,
|
||||
resampled_audio, send_freq,
|
||||
audio_channels);
|
||||
|
||||
@@ -1035,13 +1035,13 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
new_length,
|
||||
audio_channels);
|
||||
} else {
|
||||
current_timestamp = audio_frame._timeStamp;
|
||||
current_timestamp = audio_frame.timestamp_;
|
||||
|
||||
status = _codecs[_currentSendCodecIdx]->Add10MsData(
|
||||
current_timestamp, audio, audio_frame._payloadDataLengthInSamples,
|
||||
current_timestamp, audio, audio_frame.samples_per_channel_,
|
||||
audio_channels);
|
||||
}
|
||||
_lastInTimestamp = audio_frame._timeStamp;
|
||||
_lastInTimestamp = audio_frame.timestamp_;
|
||||
_lastTimestamp = current_timestamp;
|
||||
return status;
|
||||
}
|
||||
@@ -1733,16 +1733,16 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
return -1;
|
||||
}
|
||||
|
||||
audio_frame._audioChannel = _audioFrame._audioChannel;
|
||||
audio_frame._vadActivity = _audioFrame._vadActivity;
|
||||
audio_frame._speechType = _audioFrame._speechType;
|
||||
audio_frame.num_channels_ = _audioFrame.num_channels_;
|
||||
audio_frame.vad_activity_ = _audioFrame.vad_activity_;
|
||||
audio_frame.speech_type_ = _audioFrame.speech_type_;
|
||||
|
||||
stereo_mode = (_audioFrame._audioChannel > 1);
|
||||
stereo_mode = (_audioFrame.num_channels_ > 1);
|
||||
// For stereo playout:
|
||||
// Master and Slave samples are interleaved starting with Master.
|
||||
|
||||
const WebRtc_UWord16 receive_freq =
|
||||
static_cast<WebRtc_UWord16>(_audioFrame._frequencyInHz);
|
||||
static_cast<WebRtc_UWord16>(_audioFrame.sample_rate_hz_);
|
||||
bool tone_detected = false;
|
||||
WebRtc_Word16 last_detected_tone;
|
||||
WebRtc_Word16 tone;
|
||||
@@ -1754,8 +1754,8 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
|
||||
// Resample payloadData.
|
||||
WebRtc_Word16 temp_len = _outputResampler.Resample10Msec(
|
||||
_audioFrame._payloadData, receive_freq, audio_frame._payloadData,
|
||||
desired_freq_hz, _audioFrame._audioChannel);
|
||||
_audioFrame.data_, receive_freq, audio_frame.data_,
|
||||
desired_freq_hz, _audioFrame.num_channels_);
|
||||
|
||||
if (temp_len < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
@@ -1764,55 +1764,55 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
}
|
||||
|
||||
// Set the payload data length from the resampler.
|
||||
audio_frame._payloadDataLengthInSamples = (WebRtc_UWord16) temp_len;
|
||||
audio_frame.samples_per_channel_ = (WebRtc_UWord16) temp_len;
|
||||
// Set the sampling frequency.
|
||||
audio_frame._frequencyInHz = desired_freq_hz;
|
||||
audio_frame.sample_rate_hz_ = desired_freq_hz;
|
||||
} else {
|
||||
memcpy(audio_frame._payloadData, _audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples * audio_frame._audioChannel
|
||||
memcpy(audio_frame.data_, _audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_ * audio_frame.num_channels_
|
||||
* sizeof(WebRtc_Word16));
|
||||
// Set the payload length.
|
||||
audio_frame._payloadDataLengthInSamples =
|
||||
_audioFrame._payloadDataLengthInSamples;
|
||||
audio_frame.samples_per_channel_ =
|
||||
_audioFrame.samples_per_channel_;
|
||||
// Set the sampling frequency.
|
||||
audio_frame._frequencyInHz = receive_freq;
|
||||
audio_frame.sample_rate_hz_ = receive_freq;
|
||||
}
|
||||
|
||||
// Tone detection done for master channel.
|
||||
if (_dtmfDetector != NULL) {
|
||||
// Dtmf Detection.
|
||||
if (audio_frame._frequencyInHz == 8000) {
|
||||
// Use audio_frame._payloadData then Dtmf detector doesn't
|
||||
if (audio_frame.sample_rate_hz_ == 8000) {
|
||||
// Use audio_frame.data_ then Dtmf detector doesn't
|
||||
// need resampling.
|
||||
if (!stereo_mode) {
|
||||
_dtmfDetector->Detect(audio_frame._payloadData,
|
||||
audio_frame._payloadDataLengthInSamples,
|
||||
audio_frame._frequencyInHz, tone_detected,
|
||||
_dtmfDetector->Detect(audio_frame.data_,
|
||||
audio_frame.samples_per_channel_,
|
||||
audio_frame.sample_rate_hz_, tone_detected,
|
||||
tone);
|
||||
} else {
|
||||
// We are in 8 kHz so the master channel needs only 80 samples.
|
||||
WebRtc_Word16 master_channel[80];
|
||||
for (int n = 0; n < 80; n++) {
|
||||
master_channel[n] = audio_frame._payloadData[n << 1];
|
||||
master_channel[n] = audio_frame.data_[n << 1];
|
||||
}
|
||||
_dtmfDetector->Detect(master_channel,
|
||||
audio_frame._payloadDataLengthInSamples,
|
||||
audio_frame._frequencyInHz, tone_detected,
|
||||
audio_frame.samples_per_channel_,
|
||||
audio_frame.sample_rate_hz_, tone_detected,
|
||||
tone);
|
||||
}
|
||||
} else {
|
||||
// Do the detection on the audio that we got from NetEQ (_audioFrame).
|
||||
if (!stereo_mode) {
|
||||
_dtmfDetector->Detect(_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_dtmfDetector->Detect(_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_,
|
||||
receive_freq, tone_detected, tone);
|
||||
} else {
|
||||
WebRtc_Word16 master_channel[WEBRTC_10MS_PCM_AUDIO];
|
||||
for (int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++) {
|
||||
master_channel[n] = _audioFrame._payloadData[n << 1];
|
||||
for (int n = 0; n < _audioFrame.samples_per_channel_; n++) {
|
||||
master_channel[n] = _audioFrame.data_[n << 1];
|
||||
}
|
||||
_dtmfDetector->Detect(master_channel,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_audioFrame.samples_per_channel_,
|
||||
receive_freq, tone_detected, tone);
|
||||
}
|
||||
}
|
||||
@@ -1844,10 +1844,10 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
}
|
||||
}
|
||||
|
||||
audio_frame._id = _id;
|
||||
audio_frame._volume = -1;
|
||||
audio_frame._energy = -1;
|
||||
audio_frame._timeStamp = 0;
|
||||
audio_frame.id_ = _id;
|
||||
audio_frame.volume_ = -1;
|
||||
audio_frame.energy_ = -1;
|
||||
audio_frame.timestamp_ = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -430,7 +430,7 @@ APITest::PullAudioRunA()
|
||||
{
|
||||
_outFileA.Write10MsData(audioFrame);
|
||||
}
|
||||
_receiveVADActivityA[(int)audioFrame._vadActivity]++;
|
||||
_receiveVADActivityA[(int)audioFrame.vad_activity_]++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -459,7 +459,7 @@ APITest::PullAudioRunB()
|
||||
{
|
||||
_outFileB.Write10MsData(audioFrame);
|
||||
}
|
||||
_receiveVADActivityB[(int)audioFrame._vadActivity]++;
|
||||
_receiveVADActivityB[(int)audioFrame.vad_activity_]++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -242,8 +242,8 @@ bool Receiver::PlayoutData() {
|
||||
if (_playoutLengthSmpls == 0) {
|
||||
return false;
|
||||
}
|
||||
_pcmFile.Write10MsData(audioFrame._payloadData,
|
||||
audioFrame._payloadDataLengthInSamples);
|
||||
_pcmFile.Write10MsData(audioFrame.data_,
|
||||
audioFrame.samples_per_channel_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -155,13 +155,13 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
||||
channels = 2;
|
||||
}
|
||||
|
||||
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame._payloadData,
|
||||
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame.data_,
|
||||
sizeof(WebRtc_UWord16),
|
||||
samples_10ms_ * channels,
|
||||
pcm_file_);
|
||||
if (payload_size < samples_10ms_ * channels) {
|
||||
for (int k = payload_size; k < samples_10ms_ * channels; k++) {
|
||||
audio_frame._payloadData[k] = 0;
|
||||
audio_frame.data_[k] = 0;
|
||||
}
|
||||
if (auto_rewind_) {
|
||||
rewind(pcm_file_);
|
||||
@@ -170,34 +170,34 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
||||
end_of_file_ = true;
|
||||
}
|
||||
}
|
||||
audio_frame._payloadDataLengthInSamples = samples_10ms_;
|
||||
audio_frame._frequencyInHz = frequency_;
|
||||
audio_frame._audioChannel = channels;
|
||||
audio_frame._timeStamp = timestamp_;
|
||||
audio_frame.samples_per_channel_ = samples_10ms_;
|
||||
audio_frame.sample_rate_hz_ = frequency_;
|
||||
audio_frame.num_channels_ = channels;
|
||||
audio_frame.timestamp_ = timestamp_;
|
||||
timestamp_ += samples_10ms_;
|
||||
return samples_10ms_;
|
||||
}
|
||||
|
||||
void PCMFile::Write10MsData(AudioFrame& audio_frame) {
|
||||
if (audio_frame._audioChannel == 1) {
|
||||
if (audio_frame.num_channels_ == 1) {
|
||||
if (!save_stereo_) {
|
||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_UWord16),
|
||||
audio_frame._payloadDataLengthInSamples, pcm_file_);
|
||||
fwrite(audio_frame.data_, sizeof(WebRtc_UWord16),
|
||||
audio_frame.samples_per_channel_, pcm_file_);
|
||||
} else {
|
||||
WebRtc_Word16* stereo_audio =
|
||||
new WebRtc_Word16[2 * audio_frame._payloadDataLengthInSamples];
|
||||
new WebRtc_Word16[2 * audio_frame.samples_per_channel_];
|
||||
int k;
|
||||
for (k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
stereo_audio[k << 1] = audio_frame._payloadData[k];
|
||||
stereo_audio[(k << 1) + 1] = audio_frame._payloadData[k];
|
||||
for (k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
stereo_audio[k << 1] = audio_frame.data_[k];
|
||||
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
|
||||
}
|
||||
fwrite(stereo_audio, sizeof(WebRtc_Word16),
|
||||
2 * audio_frame._payloadDataLengthInSamples, pcm_file_);
|
||||
2 * audio_frame.samples_per_channel_, pcm_file_);
|
||||
delete[] stereo_audio;
|
||||
}
|
||||
} else {
|
||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_Word16),
|
||||
audio_frame._audioChannel * audio_frame._payloadDataLengthInSamples,
|
||||
fwrite(audio_frame.data_, sizeof(WebRtc_Word16),
|
||||
audio_frame.num_channels_ * audio_frame.samples_per_channel_,
|
||||
pcm_file_);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,17 +199,17 @@ SpatialAudio::EncodeDecode(
|
||||
while(!_inFile.EndOfFile())
|
||||
{
|
||||
_inFile.Read10MsData(audioFrame);
|
||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
||||
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||
{
|
||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
||||
audioFrame._payloadData[n] * leftPanning + 0.5);
|
||||
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||
audioFrame.data_[n] * leftPanning + 0.5);
|
||||
}
|
||||
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
|
||||
|
||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
||||
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||
{
|
||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
||||
audioFrame._payloadData[n] * rightToLeftRatio + 0.5);
|
||||
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||
audioFrame.data_[n] * rightToLeftRatio + 0.5);
|
||||
}
|
||||
CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
|
||||
|
||||
|
||||
@@ -831,7 +831,7 @@ void TestAllCodecs::Run(TestPack* channel)
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
|
||||
// Write output speech to file
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
}
|
||||
|
||||
if (errorCount)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -575,7 +575,7 @@ void TestFEC::Run()
|
||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||
CHECK_ERROR(_acmA->Process());
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
msecPassed += 10;
|
||||
if(msecPassed >= 1000)
|
||||
{
|
||||
|
||||
@@ -892,8 +892,8 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
|
||||
|
||||
// Write output speech to file
|
||||
out_file_.Write10MsData(
|
||||
audio_frame._payloadData,
|
||||
audio_frame._payloadDataLengthInSamples * audio_frame._audioChannel);
|
||||
audio_frame.data_,
|
||||
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
|
||||
}
|
||||
|
||||
if (error_count) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -337,14 +337,14 @@ void TestVADDTX::Run()
|
||||
while(!_inFileA.EndOfFile())
|
||||
{
|
||||
_inFileA.Read10MsData(audioFrame);
|
||||
audioFrame._timeStamp = timestampA;
|
||||
audioFrame.timestamp_ = timestampA;
|
||||
timestampA += SamplesIn10MsecA;
|
||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||
|
||||
CHECK_ERROR(_acmA->Process());
|
||||
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
}
|
||||
#ifdef PRINT_STAT
|
||||
_monitor.PrintStatistics(_testMode);
|
||||
|
||||
Reference in New Issue
Block a user