Rename AudioFrame members.
BUG= TEST=trybots Review URL: https://webrtc-codereview.appspot.com/542005 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2164 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -586,7 +586,7 @@ ACMNetEQ::RecOut(
|
||||
}
|
||||
{
|
||||
WriteLockScoped lockCodec(*_decodeLock);
|
||||
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame._payloadData[0]),
|
||||
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame.data_[0]),
|
||||
&payloadLenSample) != 0)
|
||||
{
|
||||
LogError("RecOut", 0);
|
||||
@@ -604,7 +604,7 @@ ACMNetEQ::RecOut(
|
||||
}
|
||||
}
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
|
||||
audioFrame._audioChannel = 1;
|
||||
audioFrame.num_channels_ = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -667,10 +667,10 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
|
||||
for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
|
||||
{
|
||||
audioFrame._payloadData[n<<1] = payloadMaster[n];
|
||||
audioFrame._payloadData[(n<<1)+1] = payloadSlave[n];
|
||||
audioFrame.data_[n<<1] = payloadMaster[n];
|
||||
audioFrame.data_[(n<<1)+1] = payloadSlave[n];
|
||||
}
|
||||
audioFrame._audioChannel = 2;
|
||||
audioFrame.num_channels_ = 2;
|
||||
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
|
||||
WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
|
||||
@@ -685,58 +685,58 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
}
|
||||
}
|
||||
|
||||
audioFrame._payloadDataLengthInSamples = static_cast<WebRtc_UWord16>(payloadLenSample);
|
||||
audioFrame.samples_per_channel_ = static_cast<WebRtc_UWord16>(payloadLenSample);
|
||||
// NetEq always returns 10 ms of audio.
|
||||
_currentSampFreqKHz = static_cast<float>(audioFrame._payloadDataLengthInSamples) / 10.0f;
|
||||
audioFrame._frequencyInHz = audioFrame._payloadDataLengthInSamples * 100;
|
||||
_currentSampFreqKHz = static_cast<float>(audioFrame.samples_per_channel_) / 10.0f;
|
||||
audioFrame.sample_rate_hz_ = audioFrame.samples_per_channel_ * 100;
|
||||
if(_vadStatus)
|
||||
{
|
||||
if(type == kOutputVADPassive)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputNormal)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadActive;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadActive;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputPLC)
|
||||
{
|
||||
audioFrame._vadActivity = _previousAudioActivity;
|
||||
audioFrame._speechType = AudioFrame::kPLC;
|
||||
audioFrame.vad_activity_ = _previousAudioActivity;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||
}
|
||||
else if(type == kOutputCNG)
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kCNG;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||
}
|
||||
else
|
||||
{
|
||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Always return kVadUnknown when receive VAD is inactive
|
||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||
|
||||
if(type == kOutputNormal)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
else if(type == kOutputPLC)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kPLC;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||
}
|
||||
else if(type == kOutputPLCtoCNG)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
||||
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||
}
|
||||
else if(type == kOutputCNG)
|
||||
{
|
||||
audioFrame._speechType = AudioFrame::kCNG;
|
||||
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -744,11 +744,11 @@ audio by Master (%d samples) and Slave (%d samples).",
|
||||
// we don't expect to get if _vadStatus is false
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
|
||||
"RecOut: NetEq returned kVadPassive while _vadStatus is false.");
|
||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
}
|
||||
}
|
||||
_previousAudioActivity = audioFrame._vadActivity;
|
||||
_previousAudioActivity = audioFrame.vad_activity_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ void AcmNetEqTest::InsertZeroPacket(uint16_t sequence_number,
|
||||
void AcmNetEqTest::PullData(int expected_num_samples) {
|
||||
AudioFrame out_frame;
|
||||
ASSERT_EQ(0, neteq_.RecOut(out_frame));
|
||||
ASSERT_EQ(expected_num_samples, out_frame._payloadDataLengthInSamples);
|
||||
ASSERT_EQ(expected_num_samples, out_frame.samples_per_channel_);
|
||||
}
|
||||
|
||||
TEST_F(AcmNetEqTest, NetworkStatistics) {
|
||||
|
||||
@@ -942,17 +942,17 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (audio_frame._payloadDataLengthInSamples == 0) {
|
||||
if (audio_frame.samples_per_channel_ == 0) {
|
||||
assert(false);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, payload length is zero");
|
||||
return -1;
|
||||
}
|
||||
// Allow for 8, 16, 32 and 48kHz input audio.
|
||||
if ((audio_frame._frequencyInHz != 8000)
|
||||
&& (audio_frame._frequencyInHz != 16000)
|
||||
&& (audio_frame._frequencyInHz != 32000)
|
||||
&& (audio_frame._frequencyInHz != 48000)) {
|
||||
if ((audio_frame.sample_rate_hz_ != 8000)
|
||||
&& (audio_frame.sample_rate_hz_ != 16000)
|
||||
&& (audio_frame.sample_rate_hz_ != 32000)
|
||||
&& (audio_frame.sample_rate_hz_ != 48000)) {
|
||||
assert(false);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, input frequency not valid");
|
||||
@@ -960,8 +960,8 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
}
|
||||
|
||||
// If the length and frequency matches. We currently just support raw PCM.
|
||||
if ((audio_frame._frequencyInHz / 100)
|
||||
!= audio_frame._payloadDataLengthInSamples) {
|
||||
if ((audio_frame.sample_rate_hz_ / 100)
|
||||
!= audio_frame.samples_per_channel_) {
|
||||
WEBRTC_TRACE(
|
||||
webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
"Cannot Add 10 ms audio, input frequency and length doesn't match");
|
||||
@@ -971,33 +971,33 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
// Calculate the timestamp that should be pushed to codec.
|
||||
// This might be different from the timestamp of the frame
|
||||
// due to re-sampling.
|
||||
bool resample = ((WebRtc_Word32) audio_frame._frequencyInHz
|
||||
bool resample = ((WebRtc_Word32) audio_frame.sample_rate_hz_
|
||||
!= _sendCodecInst.plfreq);
|
||||
|
||||
// If number of channels in audio doesn't match codec mode, we need
|
||||
// either mono-to-stereo or stereo-to-mono conversion.
|
||||
WebRtc_Word16 audio[WEBRTC_10MS_PCM_AUDIO];
|
||||
int audio_channels = _sendCodecInst.channels;
|
||||
if (audio_frame._audioChannel != _sendCodecInst.channels) {
|
||||
if (audio_frame.num_channels_ != _sendCodecInst.channels) {
|
||||
if (_sendCodecInst.channels == 2) {
|
||||
// Do mono-to-stereo conversion by copying each sample.
|
||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
audio[k * 2] = audio_frame._payloadData[k];
|
||||
audio[(k * 2) + 1] = audio_frame._payloadData[k];
|
||||
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
audio[k * 2] = audio_frame.data_[k];
|
||||
audio[(k * 2) + 1] = audio_frame.data_[k];
|
||||
}
|
||||
} else if (_sendCodecInst.channels == 1) {
|
||||
// Do stereo-to-mono conversion by creating the average of the stereo
|
||||
// samples.
|
||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
audio[k] = (audio_frame._payloadData[k * 2]
|
||||
+ audio_frame._payloadData[(k * 2) + 1]) >> 1;
|
||||
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
audio[k] = (audio_frame.data_[k * 2]
|
||||
+ audio_frame.data_[(k * 2) + 1]) >> 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Copy payload data for future use.
|
||||
size_t length = static_cast<size_t>(audio_frame._payloadDataLengthInSamples
|
||||
size_t length = static_cast<size_t>(audio_frame.samples_per_channel_
|
||||
* audio_channels);
|
||||
memcpy(audio, audio_frame._payloadData, length * sizeof(WebRtc_UWord16));
|
||||
memcpy(audio, audio_frame.data_, length * sizeof(WebRtc_UWord16));
|
||||
}
|
||||
|
||||
WebRtc_UWord32 current_timestamp;
|
||||
@@ -1010,18 +1010,18 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
WebRtc_Word16 new_length;
|
||||
|
||||
// Calculate the timestamp of this frame.
|
||||
if (_lastInTimestamp > audio_frame._timeStamp) {
|
||||
if (_lastInTimestamp > audio_frame.timestamp_) {
|
||||
// A wrap around has happened.
|
||||
timestamp_diff = ((WebRtc_UWord32) 0xFFFFFFFF - _lastInTimestamp)
|
||||
+ audio_frame._timeStamp;
|
||||
+ audio_frame.timestamp_;
|
||||
} else {
|
||||
timestamp_diff = audio_frame._timeStamp - _lastInTimestamp;
|
||||
timestamp_diff = audio_frame.timestamp_ - _lastInTimestamp;
|
||||
}
|
||||
current_timestamp = _lastTimestamp + (WebRtc_UWord32)(timestamp_diff *
|
||||
((double) _sendCodecInst.plfreq / (double) audio_frame._frequencyInHz));
|
||||
((double) _sendCodecInst.plfreq / (double) audio_frame.sample_rate_hz_));
|
||||
|
||||
new_length = _inputResampler.Resample10Msec(audio,
|
||||
audio_frame._frequencyInHz,
|
||||
audio_frame.sample_rate_hz_,
|
||||
resampled_audio, send_freq,
|
||||
audio_channels);
|
||||
|
||||
@@ -1035,13 +1035,13 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
||||
new_length,
|
||||
audio_channels);
|
||||
} else {
|
||||
current_timestamp = audio_frame._timeStamp;
|
||||
current_timestamp = audio_frame.timestamp_;
|
||||
|
||||
status = _codecs[_currentSendCodecIdx]->Add10MsData(
|
||||
current_timestamp, audio, audio_frame._payloadDataLengthInSamples,
|
||||
current_timestamp, audio, audio_frame.samples_per_channel_,
|
||||
audio_channels);
|
||||
}
|
||||
_lastInTimestamp = audio_frame._timeStamp;
|
||||
_lastInTimestamp = audio_frame.timestamp_;
|
||||
_lastTimestamp = current_timestamp;
|
||||
return status;
|
||||
}
|
||||
@@ -1733,16 +1733,16 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
return -1;
|
||||
}
|
||||
|
||||
audio_frame._audioChannel = _audioFrame._audioChannel;
|
||||
audio_frame._vadActivity = _audioFrame._vadActivity;
|
||||
audio_frame._speechType = _audioFrame._speechType;
|
||||
audio_frame.num_channels_ = _audioFrame.num_channels_;
|
||||
audio_frame.vad_activity_ = _audioFrame.vad_activity_;
|
||||
audio_frame.speech_type_ = _audioFrame.speech_type_;
|
||||
|
||||
stereo_mode = (_audioFrame._audioChannel > 1);
|
||||
stereo_mode = (_audioFrame.num_channels_ > 1);
|
||||
// For stereo playout:
|
||||
// Master and Slave samples are interleaved starting with Master.
|
||||
|
||||
const WebRtc_UWord16 receive_freq =
|
||||
static_cast<WebRtc_UWord16>(_audioFrame._frequencyInHz);
|
||||
static_cast<WebRtc_UWord16>(_audioFrame.sample_rate_hz_);
|
||||
bool tone_detected = false;
|
||||
WebRtc_Word16 last_detected_tone;
|
||||
WebRtc_Word16 tone;
|
||||
@@ -1754,8 +1754,8 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
|
||||
// Resample payloadData.
|
||||
WebRtc_Word16 temp_len = _outputResampler.Resample10Msec(
|
||||
_audioFrame._payloadData, receive_freq, audio_frame._payloadData,
|
||||
desired_freq_hz, _audioFrame._audioChannel);
|
||||
_audioFrame.data_, receive_freq, audio_frame.data_,
|
||||
desired_freq_hz, _audioFrame.num_channels_);
|
||||
|
||||
if (temp_len < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||
@@ -1764,55 +1764,55 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
}
|
||||
|
||||
// Set the payload data length from the resampler.
|
||||
audio_frame._payloadDataLengthInSamples = (WebRtc_UWord16) temp_len;
|
||||
audio_frame.samples_per_channel_ = (WebRtc_UWord16) temp_len;
|
||||
// Set the sampling frequency.
|
||||
audio_frame._frequencyInHz = desired_freq_hz;
|
||||
audio_frame.sample_rate_hz_ = desired_freq_hz;
|
||||
} else {
|
||||
memcpy(audio_frame._payloadData, _audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples * audio_frame._audioChannel
|
||||
memcpy(audio_frame.data_, _audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_ * audio_frame.num_channels_
|
||||
* sizeof(WebRtc_Word16));
|
||||
// Set the payload length.
|
||||
audio_frame._payloadDataLengthInSamples =
|
||||
_audioFrame._payloadDataLengthInSamples;
|
||||
audio_frame.samples_per_channel_ =
|
||||
_audioFrame.samples_per_channel_;
|
||||
// Set the sampling frequency.
|
||||
audio_frame._frequencyInHz = receive_freq;
|
||||
audio_frame.sample_rate_hz_ = receive_freq;
|
||||
}
|
||||
|
||||
// Tone detection done for master channel.
|
||||
if (_dtmfDetector != NULL) {
|
||||
// Dtmf Detection.
|
||||
if (audio_frame._frequencyInHz == 8000) {
|
||||
// Use audio_frame._payloadData then Dtmf detector doesn't
|
||||
if (audio_frame.sample_rate_hz_ == 8000) {
|
||||
// Use audio_frame.data_ then Dtmf detector doesn't
|
||||
// need resampling.
|
||||
if (!stereo_mode) {
|
||||
_dtmfDetector->Detect(audio_frame._payloadData,
|
||||
audio_frame._payloadDataLengthInSamples,
|
||||
audio_frame._frequencyInHz, tone_detected,
|
||||
_dtmfDetector->Detect(audio_frame.data_,
|
||||
audio_frame.samples_per_channel_,
|
||||
audio_frame.sample_rate_hz_, tone_detected,
|
||||
tone);
|
||||
} else {
|
||||
// We are in 8 kHz so the master channel needs only 80 samples.
|
||||
WebRtc_Word16 master_channel[80];
|
||||
for (int n = 0; n < 80; n++) {
|
||||
master_channel[n] = audio_frame._payloadData[n << 1];
|
||||
master_channel[n] = audio_frame.data_[n << 1];
|
||||
}
|
||||
_dtmfDetector->Detect(master_channel,
|
||||
audio_frame._payloadDataLengthInSamples,
|
||||
audio_frame._frequencyInHz, tone_detected,
|
||||
audio_frame.samples_per_channel_,
|
||||
audio_frame.sample_rate_hz_, tone_detected,
|
||||
tone);
|
||||
}
|
||||
} else {
|
||||
// Do the detection on the audio that we got from NetEQ (_audioFrame).
|
||||
if (!stereo_mode) {
|
||||
_dtmfDetector->Detect(_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_dtmfDetector->Detect(_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_,
|
||||
receive_freq, tone_detected, tone);
|
||||
} else {
|
||||
WebRtc_Word16 master_channel[WEBRTC_10MS_PCM_AUDIO];
|
||||
for (int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++) {
|
||||
master_channel[n] = _audioFrame._payloadData[n << 1];
|
||||
for (int n = 0; n < _audioFrame.samples_per_channel_; n++) {
|
||||
master_channel[n] = _audioFrame.data_[n << 1];
|
||||
}
|
||||
_dtmfDetector->Detect(master_channel,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_audioFrame.samples_per_channel_,
|
||||
receive_freq, tone_detected, tone);
|
||||
}
|
||||
}
|
||||
@@ -1844,10 +1844,10 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
}
|
||||
}
|
||||
|
||||
audio_frame._id = _id;
|
||||
audio_frame._volume = -1;
|
||||
audio_frame._energy = -1;
|
||||
audio_frame._timeStamp = 0;
|
||||
audio_frame.id_ = _id;
|
||||
audio_frame.volume_ = -1;
|
||||
audio_frame.energy_ = -1;
|
||||
audio_frame.timestamp_ = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -430,7 +430,7 @@ APITest::PullAudioRunA()
|
||||
{
|
||||
_outFileA.Write10MsData(audioFrame);
|
||||
}
|
||||
_receiveVADActivityA[(int)audioFrame._vadActivity]++;
|
||||
_receiveVADActivityA[(int)audioFrame.vad_activity_]++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -459,7 +459,7 @@ APITest::PullAudioRunB()
|
||||
{
|
||||
_outFileB.Write10MsData(audioFrame);
|
||||
}
|
||||
_receiveVADActivityB[(int)audioFrame._vadActivity]++;
|
||||
_receiveVADActivityB[(int)audioFrame.vad_activity_]++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -242,8 +242,8 @@ bool Receiver::PlayoutData() {
|
||||
if (_playoutLengthSmpls == 0) {
|
||||
return false;
|
||||
}
|
||||
_pcmFile.Write10MsData(audioFrame._payloadData,
|
||||
audioFrame._payloadDataLengthInSamples);
|
||||
_pcmFile.Write10MsData(audioFrame.data_,
|
||||
audioFrame.samples_per_channel_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -155,13 +155,13 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
||||
channels = 2;
|
||||
}
|
||||
|
||||
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame._payloadData,
|
||||
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame.data_,
|
||||
sizeof(WebRtc_UWord16),
|
||||
samples_10ms_ * channels,
|
||||
pcm_file_);
|
||||
if (payload_size < samples_10ms_ * channels) {
|
||||
for (int k = payload_size; k < samples_10ms_ * channels; k++) {
|
||||
audio_frame._payloadData[k] = 0;
|
||||
audio_frame.data_[k] = 0;
|
||||
}
|
||||
if (auto_rewind_) {
|
||||
rewind(pcm_file_);
|
||||
@@ -170,34 +170,34 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
||||
end_of_file_ = true;
|
||||
}
|
||||
}
|
||||
audio_frame._payloadDataLengthInSamples = samples_10ms_;
|
||||
audio_frame._frequencyInHz = frequency_;
|
||||
audio_frame._audioChannel = channels;
|
||||
audio_frame._timeStamp = timestamp_;
|
||||
audio_frame.samples_per_channel_ = samples_10ms_;
|
||||
audio_frame.sample_rate_hz_ = frequency_;
|
||||
audio_frame.num_channels_ = channels;
|
||||
audio_frame.timestamp_ = timestamp_;
|
||||
timestamp_ += samples_10ms_;
|
||||
return samples_10ms_;
|
||||
}
|
||||
|
||||
void PCMFile::Write10MsData(AudioFrame& audio_frame) {
|
||||
if (audio_frame._audioChannel == 1) {
|
||||
if (audio_frame.num_channels_ == 1) {
|
||||
if (!save_stereo_) {
|
||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_UWord16),
|
||||
audio_frame._payloadDataLengthInSamples, pcm_file_);
|
||||
fwrite(audio_frame.data_, sizeof(WebRtc_UWord16),
|
||||
audio_frame.samples_per_channel_, pcm_file_);
|
||||
} else {
|
||||
WebRtc_Word16* stereo_audio =
|
||||
new WebRtc_Word16[2 * audio_frame._payloadDataLengthInSamples];
|
||||
new WebRtc_Word16[2 * audio_frame.samples_per_channel_];
|
||||
int k;
|
||||
for (k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
||||
stereo_audio[k << 1] = audio_frame._payloadData[k];
|
||||
stereo_audio[(k << 1) + 1] = audio_frame._payloadData[k];
|
||||
for (k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||
stereo_audio[k << 1] = audio_frame.data_[k];
|
||||
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
|
||||
}
|
||||
fwrite(stereo_audio, sizeof(WebRtc_Word16),
|
||||
2 * audio_frame._payloadDataLengthInSamples, pcm_file_);
|
||||
2 * audio_frame.samples_per_channel_, pcm_file_);
|
||||
delete[] stereo_audio;
|
||||
}
|
||||
} else {
|
||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_Word16),
|
||||
audio_frame._audioChannel * audio_frame._payloadDataLengthInSamples,
|
||||
fwrite(audio_frame.data_, sizeof(WebRtc_Word16),
|
||||
audio_frame.num_channels_ * audio_frame.samples_per_channel_,
|
||||
pcm_file_);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,17 +199,17 @@ SpatialAudio::EncodeDecode(
|
||||
while(!_inFile.EndOfFile())
|
||||
{
|
||||
_inFile.Read10MsData(audioFrame);
|
||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
||||
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||
{
|
||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
||||
audioFrame._payloadData[n] * leftPanning + 0.5);
|
||||
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||
audioFrame.data_[n] * leftPanning + 0.5);
|
||||
}
|
||||
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
|
||||
|
||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
||||
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||
{
|
||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
||||
audioFrame._payloadData[n] * rightToLeftRatio + 0.5);
|
||||
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||
audioFrame.data_[n] * rightToLeftRatio + 0.5);
|
||||
}
|
||||
CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
|
||||
|
||||
|
||||
@@ -831,7 +831,7 @@ void TestAllCodecs::Run(TestPack* channel)
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
|
||||
// Write output speech to file
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
}
|
||||
|
||||
if (errorCount)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -575,7 +575,7 @@ void TestFEC::Run()
|
||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||
CHECK_ERROR(_acmA->Process());
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
msecPassed += 10;
|
||||
if(msecPassed >= 1000)
|
||||
{
|
||||
|
||||
@@ -892,8 +892,8 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
|
||||
|
||||
// Write output speech to file
|
||||
out_file_.Write10MsData(
|
||||
audio_frame._payloadData,
|
||||
audio_frame._payloadDataLengthInSamples * audio_frame._audioChannel);
|
||||
audio_frame.data_,
|
||||
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
|
||||
}
|
||||
|
||||
if (error_count) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -337,14 +337,14 @@ void TestVADDTX::Run()
|
||||
while(!_inFileA.EndOfFile())
|
||||
{
|
||||
_inFileA.Read10MsData(audioFrame);
|
||||
audioFrame._timeStamp = timestampA;
|
||||
audioFrame.timestamp_ = timestampA;
|
||||
timestampA += SamplesIn10MsecA;
|
||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||
|
||||
CHECK_ERROR(_acmA->Process());
|
||||
|
||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
||||
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||
}
|
||||
#ifdef PRINT_STAT
|
||||
_monitor.PrintStatistics(_testMode);
|
||||
|
||||
@@ -21,8 +21,8 @@ namespace {
|
||||
void SetParticipantStatistics(ParticipantStatistics* stats,
|
||||
const AudioFrame& frame)
|
||||
{
|
||||
stats->participant = frame._id;
|
||||
stats->level = frame._volume;
|
||||
stats->participant = frame.id_;
|
||||
stats->level = frame.volume_;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@@ -290,7 +290,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
||||
{
|
||||
// Use the same number of channels as the first frame to be mixed.
|
||||
numberOfChannels = static_cast<const AudioFrame*>(
|
||||
firstItem->GetItem())->_audioChannel;
|
||||
firstItem->GetItem())->num_channels_;
|
||||
}
|
||||
// TODO(henrike): it might be better to decide the number of channels
|
||||
// with an API instead of dynamically.
|
||||
@@ -309,11 +309,11 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
||||
MixAnonomouslyFromList(*mixedAudio, additionalFramesList);
|
||||
MixAnonomouslyFromList(*mixedAudio, rampOutList);
|
||||
|
||||
if(mixedAudio->_payloadDataLengthInSamples == 0)
|
||||
if(mixedAudio->samples_per_channel_ == 0)
|
||||
{
|
||||
// Nothing was mixed, set the audio samples to silence.
|
||||
memset(mixedAudio->_payloadData, 0, _sampleSize);
|
||||
mixedAudio->_payloadDataLengthInSamples = _sampleSize;
|
||||
memset(mixedAudio->data_, 0, _sampleSize);
|
||||
mixedAudio->samples_per_channel_ = _sampleSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -322,7 +322,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
_mixedAudioLevel.ComputeLevel(mixedAudio->_payloadData,_sampleSize);
|
||||
_mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize);
|
||||
audioLevel = _mixedAudioLevel.GetLevel();
|
||||
|
||||
if(_mixerStatusCb)
|
||||
@@ -719,7 +719,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
audioFrame->_frequencyInHz = _outputFrequency;
|
||||
audioFrame->sample_rate_hz_ = _outputFrequency;
|
||||
|
||||
if(participant->GetAudioFrame(_id,*audioFrame) != 0)
|
||||
{
|
||||
@@ -732,14 +732,14 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
// TODO(henrike): this assert triggers in some test cases where SRTP is
|
||||
// used which prevents NetEQ from making a VAD. Temporarily disable this
|
||||
// assert until the problem is fixed on a higher level.
|
||||
// assert(audioFrame->_vadActivity != AudioFrame::kVadUnknown);
|
||||
if (audioFrame->_vadActivity == AudioFrame::kVadUnknown)
|
||||
// assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
|
||||
if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
||||
"invalid VAD state from participant");
|
||||
}
|
||||
|
||||
if(audioFrame->_vadActivity == AudioFrame::kVadActive)
|
||||
if(audioFrame->vad_activity_ == AudioFrame::kVadActive)
|
||||
{
|
||||
if(!wasMixed)
|
||||
{
|
||||
@@ -752,7 +752,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
// mixed. Only keep the ones with the highest energy.
|
||||
ListItem* replaceItem = NULL;
|
||||
CalculateEnergy(*audioFrame);
|
||||
WebRtc_UWord32 lowestEnergy = audioFrame->_energy;
|
||||
WebRtc_UWord32 lowestEnergy = audioFrame->energy_;
|
||||
|
||||
ListItem* activeItem = activeList.First();
|
||||
while(activeItem)
|
||||
@@ -760,10 +760,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
AudioFrame* replaceFrame = static_cast<AudioFrame*>(
|
||||
activeItem->GetItem());
|
||||
CalculateEnergy(*replaceFrame);
|
||||
if(replaceFrame->_energy < lowestEnergy)
|
||||
if(replaceFrame->energy_ < lowestEnergy)
|
||||
{
|
||||
replaceItem = activeItem;
|
||||
lowestEnergy = replaceFrame->_energy;
|
||||
lowestEnergy = replaceFrame->energy_;
|
||||
}
|
||||
activeItem = activeList.Next(activeItem);
|
||||
}
|
||||
@@ -774,7 +774,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
|
||||
bool replaceWasMixed = false;
|
||||
MapItem* replaceParticipant = mixParticipantList.Find(
|
||||
replaceFrame->_id);
|
||||
replaceFrame->id_);
|
||||
// When a frame is pushed to |activeList| it is also pushed
|
||||
// to mixParticipantList with the frame's id. This means
|
||||
// that the Find call above should never fail.
|
||||
@@ -786,12 +786,12 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
replaceParticipant->GetItem())->_mixHistory->
|
||||
WasMixed(replaceWasMixed);
|
||||
|
||||
mixParticipantList.Erase(replaceFrame->_id);
|
||||
mixParticipantList.Erase(replaceFrame->id_);
|
||||
activeList.Erase(replaceItem);
|
||||
|
||||
activeList.PushFront(static_cast<void*>(audioFrame));
|
||||
mixParticipantList.Insert(
|
||||
audioFrame->_id,
|
||||
audioFrame->id_,
|
||||
static_cast<void*>(participant));
|
||||
assert(mixParticipantList.Size() <=
|
||||
kMaximumAmountOfMixedParticipants);
|
||||
@@ -820,7 +820,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
}
|
||||
} else {
|
||||
activeList.PushFront(static_cast<void*>(audioFrame));
|
||||
mixParticipantList.Insert(audioFrame->_id,
|
||||
mixParticipantList.Insert(audioFrame->id_,
|
||||
static_cast<void*>(participant));
|
||||
assert(mixParticipantList.Size() <=
|
||||
kMaximumAmountOfMixedParticipants);
|
||||
@@ -864,7 +864,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
||||
{
|
||||
mixList.PushBack(pair->audioFrame);
|
||||
mixParticipantList.Insert(pair->audioFrame->_id,
|
||||
mixParticipantList.Insert(pair->audioFrame->id_,
|
||||
static_cast<void*>(pair->participant));
|
||||
assert(mixParticipantList.Size() <=
|
||||
kMaximumAmountOfMixedParticipants);
|
||||
@@ -885,7 +885,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
||||
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
||||
{
|
||||
mixList.PushBack(pair->audioFrame);
|
||||
mixParticipantList.Insert(pair->audioFrame->_id,
|
||||
mixParticipantList.Insert(pair->audioFrame->id_,
|
||||
static_cast<void*>(pair->participant));
|
||||
assert(mixParticipantList.Size() <=
|
||||
kMaximumAmountOfMixedParticipants);
|
||||
@@ -923,7 +923,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
audioFrame->_frequencyInHz = _outputFrequency;
|
||||
audioFrame->sample_rate_hz_ = _outputFrequency;
|
||||
if(participant->GetAudioFrame(_id, *audioFrame) != 0)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
||||
@@ -932,7 +932,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
|
||||
item = nextItem;
|
||||
continue;
|
||||
}
|
||||
if(audioFrame->_payloadDataLengthInSamples == 0)
|
||||
if(audioFrame->samples_per_channel_ == 0)
|
||||
{
|
||||
// Empty frame. Don't use it.
|
||||
_audioFramePool->PushMemory(audioFrame);
|
||||
@@ -1000,14 +1000,14 @@ void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
|
||||
{
|
||||
AudioFrame* audioFrame = static_cast<AudioFrame*>(item->GetItem());
|
||||
CalculateEnergy(*audioFrame);
|
||||
if(audioFrame->_vadActivity == AudioFrame::kVadActive)
|
||||
if(audioFrame->vad_activity_ == AudioFrame::kVadActive)
|
||||
{
|
||||
_scratchVadPositiveParticipants[
|
||||
_scratchVadPositiveParticipantsAmount].participant =
|
||||
audioFrame->_id;
|
||||
audioFrame->id_;
|
||||
_scratchVadPositiveParticipants[
|
||||
_scratchVadPositiveParticipantsAmount].level =
|
||||
audioFrame->_volume;
|
||||
audioFrame->volume_;
|
||||
_scratchVadPositiveParticipantsAmount++;
|
||||
}
|
||||
item = mixList.Next(item);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -41,41 +41,41 @@ const int rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
|
||||
namespace webrtc {
|
||||
void CalculateEnergy(AudioFrame& audioFrame)
|
||||
{
|
||||
if(audioFrame._energy != 0xffffffff)
|
||||
if(audioFrame.energy_ != 0xffffffff)
|
||||
{
|
||||
return;
|
||||
}
|
||||
audioFrame._energy = 0;
|
||||
for(int position = 0; position < audioFrame._payloadDataLengthInSamples;
|
||||
audioFrame.energy_ = 0;
|
||||
for(int position = 0; position < audioFrame.samples_per_channel_;
|
||||
position++)
|
||||
{
|
||||
// TODO(andrew): this can easily overflow.
|
||||
audioFrame._energy += audioFrame._payloadData[position] *
|
||||
audioFrame._payloadData[position];
|
||||
audioFrame.energy_ += audioFrame.data_[position] *
|
||||
audioFrame.data_[position];
|
||||
}
|
||||
}
|
||||
|
||||
void RampIn(AudioFrame& audioFrame)
|
||||
{
|
||||
assert(rampSize <= audioFrame._payloadDataLengthInSamples);
|
||||
assert(rampSize <= audioFrame.samples_per_channel_);
|
||||
for(int i = 0; i < rampSize; i++)
|
||||
{
|
||||
audioFrame._payloadData[i] = static_cast<WebRtc_Word16>
|
||||
(rampArray[i] * audioFrame._payloadData[i]);
|
||||
audioFrame.data_[i] = static_cast<WebRtc_Word16>
|
||||
(rampArray[i] * audioFrame.data_[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void RampOut(AudioFrame& audioFrame)
|
||||
{
|
||||
assert(rampSize <= audioFrame._payloadDataLengthInSamples);
|
||||
assert(rampSize <= audioFrame.samples_per_channel_);
|
||||
for(int i = 0; i < rampSize; i++)
|
||||
{
|
||||
const int rampPos = rampSize - 1 - i;
|
||||
audioFrame._payloadData[i] = static_cast<WebRtc_Word16>
|
||||
(rampArray[rampPos] * audioFrame._payloadData[i]);
|
||||
audioFrame.data_[i] = static_cast<WebRtc_Word16>
|
||||
(rampArray[rampPos] * audioFrame.data_[i]);
|
||||
}
|
||||
memset(&audioFrame._payloadData[rampSize], 0,
|
||||
(audioFrame._payloadDataLengthInSamples - rampSize) *
|
||||
sizeof(audioFrame._payloadData[0]));
|
||||
memset(&audioFrame.data_[rampSize], 0,
|
||||
(audioFrame.samples_per_channel_ - rampSize) *
|
||||
sizeof(audioFrame.data_[0]));
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -210,9 +210,9 @@ bool
|
||||
FileWriter::WriteToFile(
|
||||
const AudioFrame& audioFrame)
|
||||
{
|
||||
WebRtc_Word32 written = (WebRtc_Word32)fwrite(audioFrame._payloadData,sizeof(WebRtc_Word16),audioFrame._payloadDataLengthInSamples,_file);
|
||||
WebRtc_Word32 written = (WebRtc_Word32)fwrite(audioFrame.data_,sizeof(WebRtc_Word16),audioFrame.samples_per_channel_,_file);
|
||||
// Do not flush buffers since that will add (a lot of) delay
|
||||
return written == audioFrame._payloadDataLengthInSamples;
|
||||
return written == audioFrame.samples_per_channel_;
|
||||
}
|
||||
|
||||
FileReader::FileReader()
|
||||
@@ -269,8 +269,8 @@ FileReader::ReadFromFile(
|
||||
AudioFrame& audioFrame)
|
||||
{
|
||||
|
||||
WebRtc_Word16 buffer[AudioFrame::kMaxAudioFrameSizeSamples];
|
||||
LoopedFileRead(buffer,AudioFrame::kMaxAudioFrameSizeSamples,_sampleSize,_file);
|
||||
WebRtc_Word16 buffer[AudioFrame::kMaxDataSizeSamples];
|
||||
LoopedFileRead(buffer,AudioFrame::kMaxDataSizeSamples,_sampleSize,_file);
|
||||
|
||||
bool vad = false;
|
||||
GetVAD(buffer,_sampleSize,vad);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -188,27 +188,27 @@ int AudioBuffer::samples_per_split_channel() const {
|
||||
|
||||
// TODO(andrew): Do deinterleaving and mixing in one step?
|
||||
void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
||||
assert(frame->_audioChannel <= max_num_channels_);
|
||||
assert(frame->_payloadDataLengthInSamples == samples_per_channel_);
|
||||
assert(frame->num_channels_ <= max_num_channels_);
|
||||
assert(frame->samples_per_channel_ == samples_per_channel_);
|
||||
|
||||
num_channels_ = frame->_audioChannel;
|
||||
num_channels_ = frame->num_channels_;
|
||||
data_was_mixed_ = false;
|
||||
num_mixed_channels_ = 0;
|
||||
num_mixed_low_pass_channels_ = 0;
|
||||
reference_copied_ = false;
|
||||
activity_ = frame->_vadActivity;
|
||||
activity_ = frame->vad_activity_;
|
||||
is_muted_ = false;
|
||||
if (frame->_energy == 0) {
|
||||
if (frame->energy_ == 0) {
|
||||
is_muted_ = true;
|
||||
}
|
||||
|
||||
if (num_channels_ == 1) {
|
||||
// We can get away with a pointer assignment in this case.
|
||||
data_ = frame->_payloadData;
|
||||
data_ = frame->data_;
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t* interleaved = frame->_payloadData;
|
||||
int16_t* interleaved = frame->data_;
|
||||
for (int i = 0; i < num_channels_; i++) {
|
||||
int16_t* deinterleaved = channels_[i].data;
|
||||
int interleaved_idx = i;
|
||||
@@ -220,9 +220,9 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
||||
}
|
||||
|
||||
void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
||||
assert(frame->_audioChannel == num_channels_);
|
||||
assert(frame->_payloadDataLengthInSamples == samples_per_channel_);
|
||||
frame->_vadActivity = activity_;
|
||||
assert(frame->num_channels_ == num_channels_);
|
||||
assert(frame->samples_per_channel_ == samples_per_channel_);
|
||||
frame->vad_activity_ = activity_;
|
||||
|
||||
if (!data_changed) {
|
||||
return;
|
||||
@@ -230,18 +230,18 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
||||
|
||||
if (num_channels_ == 1) {
|
||||
if (data_was_mixed_) {
|
||||
memcpy(frame->_payloadData,
|
||||
memcpy(frame->data_,
|
||||
channels_[0].data,
|
||||
sizeof(int16_t) * samples_per_channel_);
|
||||
} else {
|
||||
// These should point to the same buffer in this case.
|
||||
assert(data_ == frame->_payloadData);
|
||||
assert(data_ == frame->data_);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t* interleaved = frame->_payloadData;
|
||||
int16_t* interleaved = frame->data_;
|
||||
for (int i = 0; i < num_channels_; i++) {
|
||||
int16_t* deinterleaved = channels_[i].data;
|
||||
int interleaved_idx = i;
|
||||
|
||||
@@ -258,15 +258,15 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
return kNullPointerError;
|
||||
}
|
||||
|
||||
if (frame->_frequencyInHz != sample_rate_hz_) {
|
||||
if (frame->sample_rate_hz_ != sample_rate_hz_) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
|
||||
if (frame->_audioChannel != num_input_channels_) {
|
||||
if (frame->num_channels_ != num_input_channels_) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
|
||||
if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
|
||||
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
|
||||
@@ -275,9 +275,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
event_msg_->set_type(audioproc::Event::STREAM);
|
||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||
const size_t data_size = sizeof(int16_t) *
|
||||
frame->_payloadDataLengthInSamples *
|
||||
frame->_audioChannel;
|
||||
msg->set_input_data(frame->_payloadData, data_size);
|
||||
frame->samples_per_channel_ *
|
||||
frame->num_channels_;
|
||||
msg->set_input_data(frame->data_, data_size);
|
||||
msg->set_delay(stream_delay_ms_);
|
||||
msg->set_drift(echo_cancellation_->stream_drift_samples());
|
||||
msg->set_level(gain_control_->stream_analog_level());
|
||||
@@ -289,7 +289,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
// TODO(ajm): experiment with mixing and AEC placement.
|
||||
if (num_output_channels_ < num_input_channels_) {
|
||||
capture_audio_->Mix(num_output_channels_);
|
||||
frame->_audioChannel = num_output_channels_;
|
||||
frame->num_channels_ = num_output_channels_;
|
||||
}
|
||||
|
||||
bool data_processed = is_data_processed();
|
||||
@@ -367,9 +367,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
if (debug_file_->Open()) {
|
||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||
const size_t data_size = sizeof(int16_t) *
|
||||
frame->_payloadDataLengthInSamples *
|
||||
frame->_audioChannel;
|
||||
msg->set_output_data(frame->_payloadData, data_size);
|
||||
frame->samples_per_channel_ *
|
||||
frame->num_channels_;
|
||||
msg->set_output_data(frame->data_, data_size);
|
||||
err = WriteMessageToDebugFile();
|
||||
if (err != kNoError) {
|
||||
return err;
|
||||
@@ -389,15 +389,15 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
||||
return kNullPointerError;
|
||||
}
|
||||
|
||||
if (frame->_frequencyInHz != sample_rate_hz_) {
|
||||
if (frame->sample_rate_hz_ != sample_rate_hz_) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
|
||||
if (frame->_audioChannel != num_reverse_channels_) {
|
||||
if (frame->num_channels_ != num_reverse_channels_) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
|
||||
if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
|
||||
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
|
||||
@@ -406,9 +406,9 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
||||
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
|
||||
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
||||
const size_t data_size = sizeof(int16_t) *
|
||||
frame->_payloadDataLengthInSamples *
|
||||
frame->_audioChannel;
|
||||
msg->set_data(frame->_payloadData, data_size);
|
||||
frame->samples_per_channel_ *
|
||||
frame->num_channels_;
|
||||
msg->set_data(frame->data_, data_size);
|
||||
err = WriteMessageToDebugFile();
|
||||
if (err != kNoError) {
|
||||
return err;
|
||||
|
||||
@@ -150,7 +150,7 @@ class AudioProcessing : public Module {
|
||||
// must be called prior to processing the current frame. Any getter function
|
||||
// with the stream_ tag which is needed should be called after processing.
|
||||
//
|
||||
// The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
|
||||
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
|
||||
// members of |frame| must be valid, and correspond to settings supplied
|
||||
// to APM.
|
||||
virtual int ProcessStream(AudioFrame* frame) = 0;
|
||||
@@ -165,7 +165,7 @@ class AudioProcessing : public Module {
|
||||
// typically will not be used. If you're not sure what to pass in here,
|
||||
// chances are you don't need to use it.
|
||||
//
|
||||
// The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
|
||||
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
|
||||
// members of |frame| must be valid.
|
||||
//
|
||||
// TODO(ajm): add const to input; requires an implementation fix.
|
||||
@@ -554,7 +554,7 @@ class NoiseSuppression {
|
||||
// external VAD decision.
|
||||
//
|
||||
// In addition to |stream_has_voice()| the VAD decision is provided through the
|
||||
// |AudioFrame| passed to |ProcessStream()|. The |_vadActivity| member will be
|
||||
// |AudioFrame| passed to |ProcessStream()|. The |vad_activity_| member will be
|
||||
// modified to reflect the current decision.
|
||||
class VoiceDetection {
|
||||
public:
|
||||
|
||||
@@ -546,11 +546,11 @@ void void_main(int argc, char* argv[]) {
|
||||
apm->set_num_reverse_channels(msg.num_reverse_channels()));
|
||||
|
||||
samples_per_channel = msg.sample_rate() / 100;
|
||||
far_frame._frequencyInHz = msg.sample_rate();
|
||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame._audioChannel = msg.num_reverse_channels();
|
||||
near_frame._frequencyInHz = msg.sample_rate();
|
||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame.sample_rate_hz_ = msg.sample_rate();
|
||||
far_frame.samples_per_channel_ = samples_per_channel;
|
||||
far_frame.num_channels_ = msg.num_reverse_channels();
|
||||
near_frame.sample_rate_hz_ = msg.sample_rate();
|
||||
near_frame.samples_per_channel_ = samples_per_channel;
|
||||
|
||||
if (verbose) {
|
||||
printf("Init at frame: %d (primary), %d (reverse)\n",
|
||||
@@ -569,8 +569,8 @@ void void_main(int argc, char* argv[]) {
|
||||
|
||||
ASSERT_TRUE(msg.has_data());
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
far_frame._audioChannel, msg.data().size());
|
||||
memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
|
||||
far_frame.num_channels_, msg.data().size());
|
||||
memcpy(far_frame.data_, msg.data().data(), msg.data().size());
|
||||
|
||||
if (perf_testing) {
|
||||
t0 = TickTime::Now();
|
||||
@@ -597,12 +597,12 @@ void void_main(int argc, char* argv[]) {
|
||||
primary_count++;
|
||||
|
||||
// ProcessStream could have changed this for the output frame.
|
||||
near_frame._audioChannel = apm->num_input_channels();
|
||||
near_frame.num_channels_ = apm->num_input_channels();
|
||||
|
||||
ASSERT_TRUE(msg.has_input_data());
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
near_frame._audioChannel, msg.input_data().size());
|
||||
memcpy(near_frame._payloadData,
|
||||
near_frame.num_channels_, msg.input_data().size());
|
||||
memcpy(near_frame.data_,
|
||||
msg.input_data().data(),
|
||||
msg.input_data().size());
|
||||
|
||||
@@ -630,7 +630,7 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
ASSERT_TRUE(err == apm->kNoError ||
|
||||
err == apm->kBadStreamParameterWarning);
|
||||
ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
|
||||
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||
|
||||
capture_level = apm->gain_control()->stream_analog_level();
|
||||
|
||||
@@ -659,8 +659,8 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t size = samples_per_channel * near_frame._audioChannel;
|
||||
ASSERT_EQ(size, fwrite(near_frame._payloadData,
|
||||
size_t size = samples_per_channel * near_frame.num_channels_;
|
||||
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
out_file));
|
||||
@@ -700,11 +700,11 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
far_frame._frequencyInHz = sample_rate_hz;
|
||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame._audioChannel = num_render_channels;
|
||||
near_frame._frequencyInHz = sample_rate_hz;
|
||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame.sample_rate_hz_ = sample_rate_hz;
|
||||
far_frame.samples_per_channel_ = samples_per_channel;
|
||||
far_frame.num_channels_ = num_render_channels;
|
||||
near_frame.sample_rate_hz_ = sample_rate_hz;
|
||||
near_frame.samples_per_channel_ = samples_per_channel;
|
||||
|
||||
if (event == kInitializeEvent || event == kResetEventDeprecated) {
|
||||
ASSERT_EQ(1u,
|
||||
@@ -724,11 +724,11 @@ void void_main(int argc, char* argv[]) {
|
||||
apm->echo_cancellation()->set_device_sample_rate_hz(
|
||||
device_sample_rate_hz));
|
||||
|
||||
far_frame._frequencyInHz = sample_rate_hz;
|
||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame._audioChannel = num_render_channels;
|
||||
near_frame._frequencyInHz = sample_rate_hz;
|
||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
||||
far_frame.sample_rate_hz_ = sample_rate_hz;
|
||||
far_frame.samples_per_channel_ = samples_per_channel;
|
||||
far_frame.num_channels_ = num_render_channels;
|
||||
near_frame.sample_rate_hz_ = sample_rate_hz;
|
||||
near_frame.samples_per_channel_ = samples_per_channel;
|
||||
|
||||
if (verbose) {
|
||||
printf("Init at frame: %d (primary), %d (reverse)\n",
|
||||
@@ -740,7 +740,7 @@ void void_main(int argc, char* argv[]) {
|
||||
reverse_count++;
|
||||
|
||||
size_t size = samples_per_channel * num_render_channels;
|
||||
read_count = fread(far_frame._payloadData,
|
||||
read_count = fread(far_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
far_file);
|
||||
@@ -778,10 +778,10 @@ void void_main(int argc, char* argv[]) {
|
||||
|
||||
} else if (event == kCaptureEvent) {
|
||||
primary_count++;
|
||||
near_frame._audioChannel = num_capture_input_channels;
|
||||
near_frame.num_channels_ = num_capture_input_channels;
|
||||
|
||||
size_t size = samples_per_channel * num_capture_input_channels;
|
||||
read_count = fread(near_frame._payloadData,
|
||||
read_count = fread(near_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
near_file);
|
||||
@@ -829,7 +829,7 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
ASSERT_TRUE(err == apm->kNoError ||
|
||||
err == apm->kBadStreamParameterWarning);
|
||||
ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
|
||||
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||
|
||||
capture_level = apm->gain_control()->stream_analog_level();
|
||||
|
||||
@@ -858,8 +858,8 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
size = samples_per_channel * near_frame._audioChannel;
|
||||
ASSERT_EQ(size, fwrite(near_frame._payloadData,
|
||||
size = samples_per_channel * near_frame.num_channels_;
|
||||
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
out_file));
|
||||
|
||||
@@ -193,12 +193,12 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
|
||||
|
||||
// We always use 10 ms frames.
|
||||
const int samples_per_channel = sample_rate_hz / 100;
|
||||
frame_->_payloadDataLengthInSamples = samples_per_channel;
|
||||
frame_->_audioChannel = num_input_channels;
|
||||
frame_->_frequencyInHz = sample_rate_hz;
|
||||
revframe_->_payloadDataLengthInSamples = samples_per_channel;
|
||||
revframe_->_audioChannel = num_reverse_channels;
|
||||
revframe_->_frequencyInHz = sample_rate_hz;
|
||||
frame_->samples_per_channel_ = samples_per_channel;
|
||||
frame_->num_channels_ = num_input_channels;
|
||||
frame_->sample_rate_hz_ = sample_rate_hz;
|
||||
revframe_->samples_per_channel_ = samples_per_channel;
|
||||
revframe_->num_channels_ = num_reverse_channels;
|
||||
revframe_->sample_rate_hz_ = sample_rate_hz;
|
||||
|
||||
if (far_file_) {
|
||||
ASSERT_EQ(0, fclose(far_file_));
|
||||
@@ -249,41 +249,41 @@ T AbsValue(T a) {
|
||||
}
|
||||
|
||||
void SetFrameTo(AudioFrame* frame, int16_t value) {
|
||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * frame->_audioChannel;
|
||||
for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
|
||||
++i) {
|
||||
frame->_payloadData[i] = value;
|
||||
frame->data_[i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
|
||||
ASSERT_EQ(2, frame->_audioChannel);
|
||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
||||
frame->_payloadData[i] = left;
|
||||
frame->_payloadData[i + 1] = right;
|
||||
ASSERT_EQ(2, frame->num_channels_);
|
||||
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||
frame->data_[i] = left;
|
||||
frame->data_[i + 1] = right;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t MaxAudioFrame(const AudioFrame& frame) {
|
||||
const int length = frame._payloadDataLengthInSamples * frame._audioChannel;
|
||||
int16_t max = AbsValue(frame._payloadData[0]);
|
||||
const int length = frame.samples_per_channel_ * frame.num_channels_;
|
||||
int16_t max = AbsValue(frame.data_[0]);
|
||||
for (int i = 1; i < length; i++) {
|
||||
max = MaxValue(max, AbsValue(frame._payloadData[i]));
|
||||
max = MaxValue(max, AbsValue(frame.data_[i]));
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||
if (frame1._payloadDataLengthInSamples !=
|
||||
frame2._payloadDataLengthInSamples) {
|
||||
if (frame1.samples_per_channel_ !=
|
||||
frame2.samples_per_channel_) {
|
||||
return false;
|
||||
}
|
||||
if (frame1._audioChannel !=
|
||||
frame2._audioChannel) {
|
||||
if (frame1.num_channels_ !=
|
||||
frame2.num_channels_) {
|
||||
return false;
|
||||
}
|
||||
if (memcmp(frame1._payloadData, frame2._payloadData,
|
||||
frame1._payloadDataLengthInSamples * frame1._audioChannel *
|
||||
if (memcmp(frame1.data_, frame2.data_,
|
||||
frame1.samples_per_channel_ * frame1.num_channels_ *
|
||||
sizeof(int16_t))) {
|
||||
return false;
|
||||
}
|
||||
@@ -360,12 +360,12 @@ bool DeadlockProc(void* thread_object) {
|
||||
|
||||
AudioFrame primary_frame;
|
||||
AudioFrame reverse_frame;
|
||||
primary_frame._payloadDataLengthInSamples = 320;
|
||||
primary_frame._audioChannel = 2;
|
||||
primary_frame._frequencyInHz = 32000;
|
||||
reverse_frame._payloadDataLengthInSamples = 320;
|
||||
reverse_frame._audioChannel = 2;
|
||||
reverse_frame._frequencyInHz = 32000;
|
||||
primary_frame.samples_per_channel_ = 320;
|
||||
primary_frame.num_channels_ = 2;
|
||||
primary_frame.sample_rate_hz_ = 32000;
|
||||
reverse_frame.samples_per_channel_ = 320;
|
||||
reverse_frame.num_channels_ = 2;
|
||||
reverse_frame.sample_rate_hz_ = 32000;
|
||||
|
||||
ap->echo_cancellation()->Enable(true);
|
||||
ap->gain_control()->Enable(true);
|
||||
@@ -849,9 +849,9 @@ TEST_F(ApmTest, LevelEstimator) {
|
||||
// Run this test in wideband; in super-wb, the splitting filter distorts the
|
||||
// audio enough to cause deviation from the expectation for small values.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
||||
frame_->_payloadDataLengthInSamples = 160;
|
||||
frame_->_audioChannel = 2;
|
||||
frame_->_frequencyInHz = 16000;
|
||||
frame_->samples_per_channel_ = 160;
|
||||
frame_->num_channels_ = 2;
|
||||
frame_->sample_rate_hz_ = 16000;
|
||||
|
||||
// Min value if no frames have been processed.
|
||||
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
||||
@@ -884,14 +884,14 @@ TEST_F(ApmTest, LevelEstimator) {
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(70, apm_->level_estimator()->RMS());
|
||||
|
||||
// Min value if _energy == 0.
|
||||
// Min value if energy_ == 0.
|
||||
SetFrameTo(frame_, 10000);
|
||||
uint32_t energy = frame_->_energy; // Save default to restore below.
|
||||
frame_->_energy = 0;
|
||||
uint32_t energy = frame_->energy_; // Save default to restore below.
|
||||
frame_->energy_ = 0;
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
||||
frame_->_energy = energy;
|
||||
frame_->energy_ = energy;
|
||||
|
||||
// Verify reset after enable/disable.
|
||||
SetFrameTo(frame_, 32767);
|
||||
@@ -960,16 +960,16 @@ TEST_F(ApmTest, VoiceDetection) {
|
||||
AudioFrame::kVadUnknown
|
||||
};
|
||||
for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
|
||||
frame_->_vadActivity = activity[i];
|
||||
frame_->vad_activity_ = activity[i];
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(activity[i], frame_->_vadActivity);
|
||||
EXPECT_EQ(activity[i], frame_->vad_activity_);
|
||||
}
|
||||
|
||||
// Test that AudioFrame activity is set when VAD is enabled.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
|
||||
frame_->_vadActivity = AudioFrame::kVadUnknown;
|
||||
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_NE(AudioFrame::kVadUnknown, frame_->_vadActivity);
|
||||
EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
|
||||
|
||||
// TODO(bjornv): Add tests for streamed voice; stream_has_voice()
|
||||
}
|
||||
@@ -979,9 +979,8 @@ TEST_F(ApmTest, VerifyDownMixing) {
|
||||
Init(kSampleRates[i], 2, 2, 1, false);
|
||||
SetFrameTo(frame_, 1000, 2000);
|
||||
AudioFrame mono_frame;
|
||||
mono_frame._payloadDataLengthInSamples =
|
||||
frame_->_payloadDataLengthInSamples;
|
||||
mono_frame._audioChannel = 1;
|
||||
mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameTo(&mono_frame, 1500);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
|
||||
@@ -1050,9 +1049,9 @@ TEST_F(ApmTest, SplittingFilter) {
|
||||
|
||||
// 5. Not using super-wb.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
||||
frame_->_payloadDataLengthInSamples = 160;
|
||||
frame_->_audioChannel = 2;
|
||||
frame_->_frequencyInHz = 16000;
|
||||
frame_->samples_per_channel_ = 160;
|
||||
frame_->num_channels_ = 2;
|
||||
frame_->sample_rate_hz_ = 16000;
|
||||
// Enable AEC, which would require the filter in super-wb. We rely on the
|
||||
// first few frames of data being unaffected by the AEC.
|
||||
// TODO(andrew): This test, and the one below, rely rather tenuously on the
|
||||
@@ -1073,9 +1072,9 @@ TEST_F(ApmTest, SplittingFilter) {
|
||||
// Check the test is valid. We should have distortion from the filter
|
||||
// when AEC is enabled (which won't affect the audio).
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
|
||||
frame_->_payloadDataLengthInSamples = 320;
|
||||
frame_->_audioChannel = 2;
|
||||
frame_->_frequencyInHz = 32000;
|
||||
frame_->samples_per_channel_ = 320;
|
||||
frame_->num_channels_ = 2;
|
||||
frame_->sample_rate_hz_ = 32000;
|
||||
SetFrameTo(frame_, 1000);
|
||||
frame_copy = *frame_;
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
@@ -1208,7 +1207,7 @@ TEST_F(ApmTest, Process) {
|
||||
while (1) {
|
||||
// Read far-end frame, always stereo.
|
||||
size_t frame_size = samples_per_channel * 2;
|
||||
size_t read_count = fread(revframe_->_payloadData,
|
||||
size_t read_count = fread(revframe_->data_,
|
||||
sizeof(int16_t),
|
||||
frame_size,
|
||||
far_file_);
|
||||
@@ -1218,8 +1217,8 @@ TEST_F(ApmTest, Process) {
|
||||
break; // This is expected.
|
||||
}
|
||||
|
||||
if (revframe_->_audioChannel == 1) {
|
||||
MixStereoToMono(revframe_->_payloadData, revframe_->_payloadData,
|
||||
if (revframe_->num_channels_ == 1) {
|
||||
MixStereoToMono(revframe_->data_, revframe_->data_,
|
||||
samples_per_channel);
|
||||
}
|
||||
|
||||
@@ -1232,7 +1231,7 @@ TEST_F(ApmTest, Process) {
|
||||
apm_->gain_control()->set_stream_analog_level(analog_level));
|
||||
|
||||
// Read near-end frame, always stereo.
|
||||
read_count = fread(frame_->_payloadData,
|
||||
read_count = fread(frame_->data_,
|
||||
sizeof(int16_t),
|
||||
frame_size,
|
||||
near_file_);
|
||||
@@ -1242,15 +1241,15 @@ TEST_F(ApmTest, Process) {
|
||||
break; // This is expected.
|
||||
}
|
||||
|
||||
if (frame_->_audioChannel == 1) {
|
||||
MixStereoToMono(frame_->_payloadData, frame_->_payloadData,
|
||||
if (frame_->num_channels_ == 1) {
|
||||
MixStereoToMono(frame_->data_, frame_->data_,
|
||||
samples_per_channel);
|
||||
}
|
||||
frame_->_vadActivity = AudioFrame::kVadUnknown;
|
||||
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
// Ensure the frame was downmixed properly.
|
||||
EXPECT_EQ(test->num_output_channels(), frame_->_audioChannel);
|
||||
EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
|
||||
|
||||
max_output_average += MaxAudioFrame(*frame_);
|
||||
|
||||
@@ -1265,20 +1264,20 @@ TEST_F(ApmTest, Process) {
|
||||
}
|
||||
if (apm_->voice_detection()->stream_has_voice()) {
|
||||
has_voice_count++;
|
||||
EXPECT_EQ(AudioFrame::kVadActive, frame_->_vadActivity);
|
||||
EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
|
||||
} else {
|
||||
EXPECT_EQ(AudioFrame::kVadPassive, frame_->_vadActivity);
|
||||
EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
|
||||
}
|
||||
|
||||
frame_size = samples_per_channel * frame_->_audioChannel;
|
||||
size_t write_count = fwrite(frame_->_payloadData,
|
||||
frame_size = samples_per_channel * frame_->num_channels_;
|
||||
size_t write_count = fwrite(frame_->data_,
|
||||
sizeof(int16_t),
|
||||
frame_size,
|
||||
out_file_);
|
||||
ASSERT_EQ(frame_size, write_count);
|
||||
|
||||
// Reset in case of downmixing.
|
||||
frame_->_audioChannel = test->num_input_channels();
|
||||
frame_->num_channels_ = test->num_input_channels();
|
||||
frame_count++;
|
||||
}
|
||||
max_output_average /= frame_count;
|
||||
|
||||
@@ -707,13 +707,13 @@ VideoFrame::Free()
|
||||
* exact opposite frames when deciding the resulting
|
||||
* state. To do this use the -operator.
|
||||
*
|
||||
* - _audioChannel of 1 indicated mono, and 2
|
||||
* - num_channels_ of 1 indicated mono, and 2
|
||||
* indicates stereo.
|
||||
*
|
||||
* - _payloadDataLengthInSamples is the number of
|
||||
* - samples_per_channel_ is the number of
|
||||
* samples per channel. Therefore, the total
|
||||
* number of samples in _payloadData is
|
||||
* (_payloadDataLengthInSamples * _audioChannel).
|
||||
* number of samples in data_ is
|
||||
* (samples_per_channel_ * num_channels_).
|
||||
*
|
||||
* - Stereo data is stored in interleaved fashion
|
||||
* starting with the left channel.
|
||||
@@ -722,7 +722,7 @@ VideoFrame::Free()
|
||||
class AudioFrame
|
||||
{
|
||||
public:
|
||||
enum{kMaxAudioFrameSizeSamples = 3840}; // stereo 32KHz 60ms 2*32*60
|
||||
enum { kMaxDataSizeSamples = 3840 }; // stereo 32KHz 60ms 2*32*60
|
||||
|
||||
enum VADActivity
|
||||
{
|
||||
@@ -763,34 +763,33 @@ public:
|
||||
AudioFrame& operator+=(const AudioFrame& rhs);
|
||||
AudioFrame& operator-=(const AudioFrame& rhs);
|
||||
|
||||
WebRtc_Word32 _id;
|
||||
WebRtc_UWord32 _timeStamp;
|
||||
// TODO(andrew): clean up types.
|
||||
WebRtc_Word32 id_;
|
||||
WebRtc_UWord32 timestamp_;
|
||||
|
||||
// Supporting Stereo, stereo samples are interleaved
|
||||
WebRtc_Word16 _payloadData[kMaxAudioFrameSizeSamples];
|
||||
WebRtc_UWord16 _payloadDataLengthInSamples;
|
||||
int _frequencyInHz;
|
||||
WebRtc_UWord8 _audioChannel;
|
||||
SpeechType _speechType;
|
||||
VADActivity _vadActivity;
|
||||
|
||||
WebRtc_UWord32 _energy;
|
||||
WebRtc_Word32 _volume;
|
||||
WebRtc_Word16 data_[kMaxDataSizeSamples];
|
||||
WebRtc_UWord16 samples_per_channel_;
|
||||
int sample_rate_hz_;
|
||||
WebRtc_UWord8 num_channels_;
|
||||
SpeechType speech_type_;
|
||||
VADActivity vad_activity_;
|
||||
WebRtc_UWord32 energy_;
|
||||
WebRtc_Word32 volume_; // TODO(andrew): investigate removing.
|
||||
};
|
||||
|
||||
inline
|
||||
AudioFrame::AudioFrame()
|
||||
:
|
||||
_id(-1),
|
||||
_timeStamp(0),
|
||||
_payloadData(),
|
||||
_payloadDataLengthInSamples(0),
|
||||
_frequencyInHz(0),
|
||||
_audioChannel(1),
|
||||
_speechType(kUndefined),
|
||||
_vadActivity(kVadUnknown),
|
||||
_energy(0xffffffff),
|
||||
_volume(0xffffffff)
|
||||
id_(-1),
|
||||
timestamp_(0),
|
||||
data_(),
|
||||
samples_per_channel_(0),
|
||||
sample_rate_hz_(0),
|
||||
num_channels_(1),
|
||||
speech_type_(kUndefined),
|
||||
vad_activity_(kVadUnknown),
|
||||
energy_(0xffffffff),
|
||||
volume_(0xffffffff)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -813,31 +812,31 @@ AudioFrame::UpdateFrame(
|
||||
const WebRtc_Word32 volume,
|
||||
const WebRtc_Word32 energy)
|
||||
{
|
||||
_id = id;
|
||||
_timeStamp = timeStamp;
|
||||
_frequencyInHz = frequencyInHz;
|
||||
_speechType = speechType;
|
||||
_vadActivity = vadActivity;
|
||||
_volume = volume;
|
||||
_audioChannel = audioChannel;
|
||||
_energy = energy;
|
||||
id_ = id;
|
||||
timestamp_ = timeStamp;
|
||||
sample_rate_hz_ = frequencyInHz;
|
||||
speech_type_ = speechType;
|
||||
vad_activity_ = vadActivity;
|
||||
volume_ = volume;
|
||||
num_channels_ = audioChannel;
|
||||
energy_ = energy;
|
||||
|
||||
if((payloadDataLengthInSamples > kMaxAudioFrameSizeSamples) ||
|
||||
if((payloadDataLengthInSamples > kMaxDataSizeSamples) ||
|
||||
(audioChannel > 2) || (audioChannel < 1))
|
||||
{
|
||||
_payloadDataLengthInSamples = 0;
|
||||
samples_per_channel_ = 0;
|
||||
return -1;
|
||||
}
|
||||
_payloadDataLengthInSamples = payloadDataLengthInSamples;
|
||||
samples_per_channel_ = payloadDataLengthInSamples;
|
||||
if(payloadData != NULL)
|
||||
{
|
||||
memcpy(_payloadData, payloadData, sizeof(WebRtc_Word16) *
|
||||
payloadDataLengthInSamples * _audioChannel);
|
||||
memcpy(data_, payloadData, sizeof(WebRtc_Word16) *
|
||||
payloadDataLengthInSamples * num_channels_);
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(_payloadData,0,sizeof(WebRtc_Word16) *
|
||||
payloadDataLengthInSamples * _audioChannel);
|
||||
memset(data_,0,sizeof(WebRtc_Word16) *
|
||||
payloadDataLengthInSamples * num_channels_);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -846,7 +845,7 @@ inline
|
||||
void
|
||||
AudioFrame::Mute()
|
||||
{
|
||||
memset(_payloadData, 0, _payloadDataLengthInSamples * sizeof(WebRtc_Word16));
|
||||
memset(data_, 0, samples_per_channel_ * sizeof(WebRtc_Word16));
|
||||
}
|
||||
|
||||
inline
|
||||
@@ -854,9 +853,9 @@ AudioFrame&
|
||||
AudioFrame::operator=(const AudioFrame& rhs)
|
||||
{
|
||||
// Sanity Check
|
||||
if((rhs._payloadDataLengthInSamples > kMaxAudioFrameSizeSamples) ||
|
||||
(rhs._audioChannel > 2) ||
|
||||
(rhs._audioChannel < 1))
|
||||
if((rhs.samples_per_channel_ > kMaxDataSizeSamples) ||
|
||||
(rhs.num_channels_ > 2) ||
|
||||
(rhs.num_channels_ < 1))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
@@ -864,18 +863,18 @@ AudioFrame::operator=(const AudioFrame& rhs)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
_id = rhs._id;
|
||||
_timeStamp = rhs._timeStamp;
|
||||
_frequencyInHz = rhs._frequencyInHz;
|
||||
_speechType = rhs._speechType;
|
||||
_vadActivity = rhs._vadActivity;
|
||||
_volume = rhs._volume;
|
||||
_audioChannel = rhs._audioChannel;
|
||||
_energy = rhs._energy;
|
||||
id_ = rhs.id_;
|
||||
timestamp_ = rhs.timestamp_;
|
||||
sample_rate_hz_ = rhs.sample_rate_hz_;
|
||||
speech_type_ = rhs.speech_type_;
|
||||
vad_activity_ = rhs.vad_activity_;
|
||||
volume_ = rhs.volume_;
|
||||
num_channels_ = rhs.num_channels_;
|
||||
energy_ = rhs.energy_;
|
||||
|
||||
_payloadDataLengthInSamples = rhs._payloadDataLengthInSamples;
|
||||
memcpy(_payloadData, rhs._payloadData,
|
||||
sizeof(WebRtc_Word16) * rhs._payloadDataLengthInSamples * _audioChannel);
|
||||
samples_per_channel_ = rhs.samples_per_channel_;
|
||||
memcpy(data_, rhs.data_,
|
||||
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
||||
|
||||
return *this;
|
||||
}
|
||||
@@ -884,15 +883,15 @@ inline
|
||||
AudioFrame&
|
||||
AudioFrame::operator>>=(const WebRtc_Word32 rhs)
|
||||
{
|
||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
||||
if((_audioChannel > 2) ||
|
||||
(_audioChannel < 1))
|
||||
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||
if((num_channels_ > 2) ||
|
||||
(num_channels_ < 1))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||
{
|
||||
_payloadData[i] = WebRtc_Word16(_payloadData[i] >> rhs);
|
||||
data_[i] = WebRtc_Word16(data_[i] >> rhs);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@@ -902,39 +901,39 @@ AudioFrame&
|
||||
AudioFrame::Append(const AudioFrame& rhs)
|
||||
{
|
||||
// Sanity check
|
||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
||||
if((_audioChannel > 2) ||
|
||||
(_audioChannel < 1))
|
||||
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||
if((num_channels_ > 2) ||
|
||||
(num_channels_ < 1))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
if(_audioChannel != rhs._audioChannel)
|
||||
if(num_channels_ != rhs.num_channels_)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
if((_vadActivity == kVadActive) ||
|
||||
rhs._vadActivity == kVadActive)
|
||||
if((vad_activity_ == kVadActive) ||
|
||||
rhs.vad_activity_ == kVadActive)
|
||||
{
|
||||
_vadActivity = kVadActive;
|
||||
vad_activity_ = kVadActive;
|
||||
}
|
||||
else if((_vadActivity == kVadUnknown) ||
|
||||
rhs._vadActivity == kVadUnknown)
|
||||
else if((vad_activity_ == kVadUnknown) ||
|
||||
rhs.vad_activity_ == kVadUnknown)
|
||||
{
|
||||
_vadActivity = kVadUnknown;
|
||||
vad_activity_ = kVadUnknown;
|
||||
}
|
||||
if(_speechType != rhs._speechType)
|
||||
if(speech_type_ != rhs.speech_type_)
|
||||
{
|
||||
_speechType = kUndefined;
|
||||
speech_type_ = kUndefined;
|
||||
}
|
||||
|
||||
WebRtc_UWord16 offset = _payloadDataLengthInSamples * _audioChannel;
|
||||
WebRtc_UWord16 offset = samples_per_channel_ * num_channels_;
|
||||
for(WebRtc_UWord16 i = 0;
|
||||
i < rhs._payloadDataLengthInSamples * rhs._audioChannel;
|
||||
i < rhs.samples_per_channel_ * rhs.num_channels_;
|
||||
i++)
|
||||
{
|
||||
_payloadData[offset+i] = rhs._payloadData[i];
|
||||
data_[offset+i] = rhs.data_[i];
|
||||
}
|
||||
_payloadDataLengthInSamples += rhs._payloadDataLengthInSamples;
|
||||
samples_per_channel_ += rhs.samples_per_channel_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -944,23 +943,23 @@ AudioFrame&
|
||||
AudioFrame::operator+=(const AudioFrame& rhs)
|
||||
{
|
||||
// Sanity check
|
||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
||||
if((_audioChannel > 2) ||
|
||||
(_audioChannel < 1))
|
||||
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||
if((num_channels_ > 2) ||
|
||||
(num_channels_ < 1))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
if(_audioChannel != rhs._audioChannel)
|
||||
if(num_channels_ != rhs.num_channels_)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
bool noPrevData = false;
|
||||
if(_payloadDataLengthInSamples != rhs._payloadDataLengthInSamples)
|
||||
if(samples_per_channel_ != rhs.samples_per_channel_)
|
||||
{
|
||||
if(_payloadDataLengthInSamples == 0)
|
||||
if(samples_per_channel_ == 0)
|
||||
{
|
||||
// special case we have no data to start with
|
||||
_payloadDataLengthInSamples = rhs._payloadDataLengthInSamples;
|
||||
samples_per_channel_ = rhs.samples_per_channel_;
|
||||
noPrevData = true;
|
||||
} else
|
||||
{
|
||||
@@ -968,47 +967,47 @@ AudioFrame::operator+=(const AudioFrame& rhs)
|
||||
}
|
||||
}
|
||||
|
||||
if((_vadActivity == kVadActive) ||
|
||||
rhs._vadActivity == kVadActive)
|
||||
if((vad_activity_ == kVadActive) ||
|
||||
rhs.vad_activity_ == kVadActive)
|
||||
{
|
||||
_vadActivity = kVadActive;
|
||||
vad_activity_ = kVadActive;
|
||||
}
|
||||
else if((_vadActivity == kVadUnknown) ||
|
||||
rhs._vadActivity == kVadUnknown)
|
||||
else if((vad_activity_ == kVadUnknown) ||
|
||||
rhs.vad_activity_ == kVadUnknown)
|
||||
{
|
||||
_vadActivity = kVadUnknown;
|
||||
vad_activity_ = kVadUnknown;
|
||||
}
|
||||
|
||||
if(_speechType != rhs._speechType)
|
||||
if(speech_type_ != rhs.speech_type_)
|
||||
{
|
||||
_speechType = kUndefined;
|
||||
speech_type_ = kUndefined;
|
||||
}
|
||||
|
||||
if(noPrevData)
|
||||
{
|
||||
memcpy(_payloadData, rhs._payloadData,
|
||||
sizeof(WebRtc_Word16) * rhs._payloadDataLengthInSamples * _audioChannel);
|
||||
memcpy(data_, rhs.data_,
|
||||
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
||||
} else
|
||||
{
|
||||
// IMPROVEMENT this can be done very fast in assembly
|
||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||
{
|
||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)_payloadData[i] +
|
||||
(WebRtc_Word32)rhs._payloadData[i];
|
||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] +
|
||||
(WebRtc_Word32)rhs.data_[i];
|
||||
if(wrapGuard < -32768)
|
||||
{
|
||||
_payloadData[i] = -32768;
|
||||
data_[i] = -32768;
|
||||
}else if(wrapGuard > 32767)
|
||||
{
|
||||
_payloadData[i] = 32767;
|
||||
data_[i] = 32767;
|
||||
}else
|
||||
{
|
||||
_payloadData[i] = (WebRtc_Word16)wrapGuard;
|
||||
data_[i] = (WebRtc_Word16)wrapGuard;
|
||||
}
|
||||
}
|
||||
}
|
||||
_energy = 0xffffffff;
|
||||
_volume = 0xffffffff;
|
||||
energy_ = 0xffffffff;
|
||||
volume_ = 0xffffffff;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -1017,43 +1016,43 @@ AudioFrame&
|
||||
AudioFrame::operator-=(const AudioFrame& rhs)
|
||||
{
|
||||
// Sanity check
|
||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
||||
if((_audioChannel > 2)||
|
||||
(_audioChannel < 1))
|
||||
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||
if((num_channels_ > 2)||
|
||||
(num_channels_ < 1))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
if((_payloadDataLengthInSamples != rhs._payloadDataLengthInSamples) ||
|
||||
(_audioChannel != rhs._audioChannel))
|
||||
if((samples_per_channel_ != rhs.samples_per_channel_) ||
|
||||
(num_channels_ != rhs.num_channels_))
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
if((_vadActivity != kVadPassive) ||
|
||||
rhs._vadActivity != kVadPassive)
|
||||
if((vad_activity_ != kVadPassive) ||
|
||||
rhs.vad_activity_ != kVadPassive)
|
||||
{
|
||||
_vadActivity = kVadUnknown;
|
||||
vad_activity_ = kVadUnknown;
|
||||
}
|
||||
_speechType = kUndefined;
|
||||
speech_type_ = kUndefined;
|
||||
|
||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||
{
|
||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)_payloadData[i] -
|
||||
(WebRtc_Word32)rhs._payloadData[i];
|
||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] -
|
||||
(WebRtc_Word32)rhs.data_[i];
|
||||
if(wrapGuard < -32768)
|
||||
{
|
||||
_payloadData[i] = -32768;
|
||||
data_[i] = -32768;
|
||||
}
|
||||
else if(wrapGuard > 32767)
|
||||
{
|
||||
_payloadData[i] = 32767;
|
||||
data_[i] = 32767;
|
||||
}
|
||||
else
|
||||
{
|
||||
_payloadData[i] = (WebRtc_Word16)wrapGuard;
|
||||
data_[i] = (WebRtc_Word16)wrapGuard;
|
||||
}
|
||||
}
|
||||
_energy = 0xffffffff;
|
||||
_volume = 0xffffffff;
|
||||
energy_ = 0xffffffff;
|
||||
volume_ = 0xffffffff;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
@@ -94,8 +94,8 @@ WebRtc_Word32 AudioCoder::Encode(const AudioFrame& audio,
|
||||
// Fake a timestamp in case audio doesn't contain a correct timestamp.
|
||||
// Make a local copy of the audio frame since audio is const
|
||||
AudioFrame audioFrame = audio;
|
||||
audioFrame._timeStamp = _encodeTimestamp;
|
||||
_encodeTimestamp += audioFrame._payloadDataLengthInSamples;
|
||||
audioFrame.timestamp_ = _encodeTimestamp;
|
||||
_encodeTimestamp += audioFrame.samples_per_channel_;
|
||||
|
||||
// For any codec with a frame size that is longer than 10 ms the encoded
|
||||
// length in bytes should be zero until a a full frame has been encoded.
|
||||
|
||||
@@ -133,13 +133,13 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
||||
AudioFrame unresampledAudioFrame;
|
||||
if(STR_CASE_CMP(_codec.plname, "L16") == 0)
|
||||
{
|
||||
unresampledAudioFrame._frequencyInHz = _codec.plfreq;
|
||||
unresampledAudioFrame.sample_rate_hz_ = _codec.plfreq;
|
||||
|
||||
// L16 is un-encoded data. Just pull 10 ms.
|
||||
WebRtc_UWord32 lengthInBytes =
|
||||
sizeof(unresampledAudioFrame._payloadData);
|
||||
sizeof(unresampledAudioFrame.data_);
|
||||
if (_fileModule.PlayoutAudioData(
|
||||
(WebRtc_Word8*)unresampledAudioFrame._payloadData,
|
||||
(WebRtc_Word8*)unresampledAudioFrame.data_,
|
||||
lengthInBytes) == -1)
|
||||
{
|
||||
// End of file reached.
|
||||
@@ -151,7 +151,7 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
||||
return 0;
|
||||
}
|
||||
// One sample is two bytes.
|
||||
unresampledAudioFrame._payloadDataLengthInSamples =
|
||||
unresampledAudioFrame.samples_per_channel_ =
|
||||
(WebRtc_UWord16)lengthInBytes >> 1;
|
||||
|
||||
}else {
|
||||
@@ -181,7 +181,7 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
||||
}
|
||||
|
||||
int outLen = 0;
|
||||
if(_resampler.ResetIfNeeded(unresampledAudioFrame._frequencyInHz,
|
||||
if(_resampler.ResetIfNeeded(unresampledAudioFrame.sample_rate_hz_,
|
||||
frequencyInHz, kResamplerSynchronous))
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, _instanceID,
|
||||
@@ -192,8 +192,8 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
||||
memset(outBuffer, 0, outLen * sizeof(WebRtc_Word16));
|
||||
return 0;
|
||||
}
|
||||
_resampler.Push(unresampledAudioFrame._payloadData,
|
||||
unresampledAudioFrame._payloadDataLengthInSamples,
|
||||
_resampler.Push(unresampledAudioFrame.data_,
|
||||
unresampledAudioFrame.samples_per_channel_,
|
||||
outBuffer,
|
||||
MAX_AUDIO_BUFFER_IN_SAMPLES,
|
||||
outLen);
|
||||
|
||||
@@ -201,46 +201,46 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
||||
return -1;
|
||||
}
|
||||
AudioFrame tempAudioFrame;
|
||||
tempAudioFrame._payloadDataLengthInSamples = 0;
|
||||
if( incomingAudioFrame._audioChannel == 2 &&
|
||||
tempAudioFrame.samples_per_channel_ = 0;
|
||||
if( incomingAudioFrame.num_channels_ == 2 &&
|
||||
!_moduleFile->IsStereo())
|
||||
{
|
||||
// Recording mono but incoming audio is (interleaved) stereo.
|
||||
tempAudioFrame._audioChannel = 1;
|
||||
tempAudioFrame._frequencyInHz = incomingAudioFrame._frequencyInHz;
|
||||
tempAudioFrame._payloadDataLengthInSamples =
|
||||
incomingAudioFrame._payloadDataLengthInSamples;
|
||||
tempAudioFrame.num_channels_ = 1;
|
||||
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
|
||||
tempAudioFrame.samples_per_channel_ =
|
||||
incomingAudioFrame.samples_per_channel_;
|
||||
for (WebRtc_UWord16 i = 0;
|
||||
i < (incomingAudioFrame._payloadDataLengthInSamples); i++)
|
||||
i < (incomingAudioFrame.samples_per_channel_); i++)
|
||||
{
|
||||
// Sample value is the average of left and right buffer rounded to
|
||||
// closest integer value. Note samples can be either 1 or 2 byte.
|
||||
tempAudioFrame._payloadData[i] =
|
||||
((incomingAudioFrame._payloadData[2 * i] +
|
||||
incomingAudioFrame._payloadData[(2 * i) + 1] + 1) >> 1);
|
||||
tempAudioFrame.data_[i] =
|
||||
((incomingAudioFrame.data_[2 * i] +
|
||||
incomingAudioFrame.data_[(2 * i) + 1] + 1) >> 1);
|
||||
}
|
||||
}
|
||||
else if( incomingAudioFrame._audioChannel == 1 &&
|
||||
else if( incomingAudioFrame.num_channels_ == 1 &&
|
||||
_moduleFile->IsStereo())
|
||||
{
|
||||
// Recording stereo but incoming audio is mono.
|
||||
tempAudioFrame._audioChannel = 2;
|
||||
tempAudioFrame._frequencyInHz = incomingAudioFrame._frequencyInHz;
|
||||
tempAudioFrame._payloadDataLengthInSamples =
|
||||
incomingAudioFrame._payloadDataLengthInSamples;
|
||||
tempAudioFrame.num_channels_ = 2;
|
||||
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
|
||||
tempAudioFrame.samples_per_channel_ =
|
||||
incomingAudioFrame.samples_per_channel_;
|
||||
for (WebRtc_UWord16 i = 0;
|
||||
i < (incomingAudioFrame._payloadDataLengthInSamples); i++)
|
||||
i < (incomingAudioFrame.samples_per_channel_); i++)
|
||||
{
|
||||
// Duplicate sample to both channels
|
||||
tempAudioFrame._payloadData[2*i] =
|
||||
incomingAudioFrame._payloadData[i];
|
||||
tempAudioFrame._payloadData[2*i+1] =
|
||||
incomingAudioFrame._payloadData[i];
|
||||
tempAudioFrame.data_[2*i] =
|
||||
incomingAudioFrame.data_[i];
|
||||
tempAudioFrame.data_[2*i+1] =
|
||||
incomingAudioFrame.data_[i];
|
||||
}
|
||||
}
|
||||
|
||||
const AudioFrame* ptrAudioFrame = &incomingAudioFrame;
|
||||
if(tempAudioFrame._payloadDataLengthInSamples != 0)
|
||||
if(tempAudioFrame.samples_per_channel_ != 0)
|
||||
{
|
||||
// If ptrAudioFrame is not empty it contains the audio to be recorded.
|
||||
ptrAudioFrame = &tempAudioFrame;
|
||||
@@ -269,23 +269,23 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
||||
}
|
||||
} else {
|
||||
int outLen = 0;
|
||||
if(ptrAudioFrame->_audioChannel == 2)
|
||||
if(ptrAudioFrame->num_channels_ == 2)
|
||||
{
|
||||
// ptrAudioFrame contains interleaved stereo audio.
|
||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->_frequencyInHz,
|
||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
|
||||
codec_info_.plfreq,
|
||||
kResamplerSynchronousStereo);
|
||||
_audioResampler.Push(ptrAudioFrame->_payloadData,
|
||||
ptrAudioFrame->_payloadDataLengthInSamples *
|
||||
ptrAudioFrame->_audioChannel,
|
||||
_audioResampler.Push(ptrAudioFrame->data_,
|
||||
ptrAudioFrame->samples_per_channel_ *
|
||||
ptrAudioFrame->num_channels_,
|
||||
(WebRtc_Word16*)_audioBuffer,
|
||||
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
||||
} else {
|
||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->_frequencyInHz,
|
||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
|
||||
codec_info_.plfreq,
|
||||
kResamplerSynchronous);
|
||||
_audioResampler.Push(ptrAudioFrame->_payloadData,
|
||||
ptrAudioFrame->_payloadDataLengthInSamples,
|
||||
_audioResampler.Push(ptrAudioFrame->data_,
|
||||
ptrAudioFrame->samples_per_channel_,
|
||||
(WebRtc_Word16*)_audioBuffer,
|
||||
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
||||
}
|
||||
@@ -298,8 +298,8 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
||||
if (encodedLenInBytes)
|
||||
{
|
||||
WebRtc_UWord16 msOfData =
|
||||
ptrAudioFrame->_payloadDataLengthInSamples /
|
||||
WebRtc_UWord16(ptrAudioFrame->_frequencyInHz / 1000);
|
||||
ptrAudioFrame->samples_per_channel_ /
|
||||
WebRtc_UWord16(ptrAudioFrame->sample_rate_hz_ / 1000);
|
||||
if (WriteEncodedAudioData(_audioBuffer,
|
||||
(WebRtc_UWord16)encodedLenInBytes,
|
||||
msOfData, playoutTS) == -1)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -151,13 +151,13 @@ int main(int /*argc*/, char** /*argv*/)
|
||||
}
|
||||
}
|
||||
WebRtc_UWord32 decodedDataLengthInSamples;
|
||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame._payloadData, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame.data_, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||
{
|
||||
audioNotDone = false;
|
||||
} else
|
||||
{
|
||||
audioFrame._frequencyInHz = filePlayer.Frequency();
|
||||
audioFrame._payloadDataLengthInSamples = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||
audioFrame.sample_rate_hz_ = filePlayer.Frequency();
|
||||
audioFrame.samples_per_channel_ = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||
fileRecorder.RecordAudioToFile(audioFrame, &TickTime::Now());
|
||||
}
|
||||
}
|
||||
@@ -214,9 +214,9 @@ int main(int /*argc*/, char** /*argv*/)
|
||||
|
||||
// 10 ms
|
||||
AudioFrame audioFrame;
|
||||
audioFrame._payloadDataLengthInSamples = audioCodec.plfreq/100;
|
||||
memset(audioFrame._payloadData, 0, 2*audioFrame._payloadDataLengthInSamples);
|
||||
audioFrame._frequencyInHz = 8000;
|
||||
audioFrame.samples_per_channel_ = audioCodec.plfreq/100;
|
||||
memset(audioFrame.data_, 0, 2*audioFrame.samples_per_channel_);
|
||||
audioFrame.sample_rate_hz_ = 8000;
|
||||
|
||||
// prepare the video frame
|
||||
videoFrame.VerifyAndAllocate(KVideoWriteSize);
|
||||
@@ -338,15 +338,15 @@ int main(int /*argc*/, char** /*argv*/)
|
||||
}
|
||||
|
||||
WebRtc_UWord32 decodedDataLengthInSamples;
|
||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame._payloadData, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame.data_, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||
{
|
||||
audioNotDone = false;
|
||||
|
||||
} else
|
||||
{
|
||||
::Sleep(5);
|
||||
audioFrame._frequencyInHz = filePlayer.Frequency();
|
||||
audioFrame._payloadDataLengthInSamples = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||
audioFrame.sample_rate_hz_ = filePlayer.Frequency();
|
||||
audioFrame.samples_per_channel_ = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||
assert(0 == fileRecorder.RecordAudioToFile(audioFrame));
|
||||
|
||||
audioFrameCount++;
|
||||
|
||||
@@ -15,70 +15,70 @@ namespace webrtc {
|
||||
namespace voe {
|
||||
|
||||
int AudioFrameOperations::MonoToStereo(AudioFrame& frame) {
|
||||
if (frame._audioChannel != 1) {
|
||||
if (frame.num_channels_ != 1) {
|
||||
return -1;
|
||||
}
|
||||
if ((frame._payloadDataLengthInSamples << 1) >=
|
||||
AudioFrame::kMaxAudioFrameSizeSamples) {
|
||||
if ((frame.samples_per_channel_ << 1) >=
|
||||
AudioFrame::kMaxDataSizeSamples) {
|
||||
// not enough memory to expand from mono to stereo
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t payloadCopy[AudioFrame::kMaxAudioFrameSizeSamples];
|
||||
memcpy(payloadCopy, frame._payloadData,
|
||||
sizeof(int16_t) * frame._payloadDataLengthInSamples);
|
||||
int16_t payloadCopy[AudioFrame::kMaxDataSizeSamples];
|
||||
memcpy(payloadCopy, frame.data_,
|
||||
sizeof(int16_t) * frame.samples_per_channel_);
|
||||
|
||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
||||
frame._payloadData[2 * i] = payloadCopy[i];
|
||||
frame._payloadData[2 * i + 1] = payloadCopy[i];
|
||||
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||
frame.data_[2 * i] = payloadCopy[i];
|
||||
frame.data_[2 * i + 1] = payloadCopy[i];
|
||||
}
|
||||
|
||||
frame._audioChannel = 2;
|
||||
frame.num_channels_ = 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioFrameOperations::StereoToMono(AudioFrame& frame) {
|
||||
if (frame._audioChannel != 2) {
|
||||
if (frame.num_channels_ != 2) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
||||
frame._payloadData[i] = (frame._payloadData[2 * i] >> 1) +
|
||||
(frame._payloadData[2 * i + 1] >> 1);
|
||||
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||
frame.data_[i] = (frame.data_[2 * i] >> 1) +
|
||||
(frame.data_[2 * i + 1] >> 1);
|
||||
}
|
||||
|
||||
frame._audioChannel = 1;
|
||||
frame.num_channels_ = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
||||
if (frame->_audioChannel != 2) return;
|
||||
if (frame->num_channels_ != 2) return;
|
||||
|
||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
||||
int16_t temp_data = frame->_payloadData[i];
|
||||
frame->_payloadData[i] = frame->_payloadData[i + 1];
|
||||
frame->_payloadData[i + 1] = temp_data;
|
||||
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||
int16_t temp_data = frame->data_[i];
|
||||
frame->data_[i] = frame->data_[i + 1];
|
||||
frame->data_[i + 1] = temp_data;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioFrameOperations::Mute(AudioFrame& frame) {
|
||||
memset(frame._payloadData, 0, sizeof(int16_t) *
|
||||
frame._payloadDataLengthInSamples * frame._audioChannel);
|
||||
frame._energy = 0;
|
||||
memset(frame.data_, 0, sizeof(int16_t) *
|
||||
frame.samples_per_channel_ * frame.num_channels_);
|
||||
frame.energy_ = 0;
|
||||
}
|
||||
|
||||
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
|
||||
if (frame._audioChannel != 2) {
|
||||
if (frame.num_channels_ != 2) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
||||
frame._payloadData[2 * i] =
|
||||
static_cast<int16_t>(left * frame._payloadData[2 * i]);
|
||||
frame._payloadData[2 * i + 1] =
|
||||
static_cast<int16_t>(right * frame._payloadData[2 * i + 1]);
|
||||
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||
frame.data_[2 * i] =
|
||||
static_cast<int16_t>(left * frame.data_[2 * i]);
|
||||
frame.data_[2 * i + 1] =
|
||||
static_cast<int16_t>(right * frame.data_[2 * i + 1]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -87,15 +87,15 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
|
||||
int32_t temp_data = 0;
|
||||
|
||||
// Ensure that the output result is saturated [-32768, +32767].
|
||||
for (int i = 0; i < frame._payloadDataLengthInSamples * frame._audioChannel;
|
||||
for (int i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
|
||||
i++) {
|
||||
temp_data = static_cast<int32_t>(scale * frame._payloadData[i]);
|
||||
temp_data = static_cast<int32_t>(scale * frame.data_[i]);
|
||||
if (temp_data < -32768) {
|
||||
frame._payloadData[i] = -32768;
|
||||
frame.data_[i] = -32768;
|
||||
} else if (temp_data > 32767) {
|
||||
frame._payloadData[i] = 32767;
|
||||
frame.data_[i] = 32767;
|
||||
} else {
|
||||
frame._payloadData[i] = static_cast<int16_t>(temp_data);
|
||||
frame.data_[i] = static_cast<int16_t>(temp_data);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -21,59 +21,59 @@ class AudioFrameOperationsTest : public ::testing::Test {
|
||||
protected:
|
||||
AudioFrameOperationsTest() {
|
||||
// Set typical values.
|
||||
frame_._payloadDataLengthInSamples = 320;
|
||||
frame_._audioChannel = 2;
|
||||
frame_.samples_per_channel_ = 320;
|
||||
frame_.num_channels_ = 2;
|
||||
}
|
||||
|
||||
AudioFrame frame_;
|
||||
};
|
||||
|
||||
void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
|
||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
||||
frame->_payloadData[i] = left;
|
||||
frame->_payloadData[i + 1] = right;
|
||||
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||
frame->data_[i] = left;
|
||||
frame->data_[i + 1] = right;
|
||||
}
|
||||
}
|
||||
|
||||
void SetFrameData(AudioFrame* frame, int16_t data) {
|
||||
for (int i = 0; i < frame->_payloadDataLengthInSamples; i++) {
|
||||
frame->_payloadData[i] = data;
|
||||
for (int i = 0; i < frame->samples_per_channel_; i++) {
|
||||
frame->data_[i] = data;
|
||||
}
|
||||
}
|
||||
|
||||
void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||
EXPECT_EQ(frame1._audioChannel, frame2._audioChannel);
|
||||
EXPECT_EQ(frame1._payloadDataLengthInSamples,
|
||||
frame2._payloadDataLengthInSamples);
|
||||
EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
|
||||
EXPECT_EQ(frame1.samples_per_channel_,
|
||||
frame2.samples_per_channel_);
|
||||
|
||||
for (int i = 0; i < frame1._payloadDataLengthInSamples * frame1._audioChannel;
|
||||
for (int i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
|
||||
i++) {
|
||||
EXPECT_EQ(frame1._payloadData[i], frame2._payloadData[i]);
|
||||
EXPECT_EQ(frame1.data_[i], frame2.data_[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
|
||||
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
||||
|
||||
frame_._payloadDataLengthInSamples = AudioFrame::kMaxAudioFrameSizeSamples;
|
||||
frame_._audioChannel = 1;
|
||||
frame_.samples_per_channel_ = AudioFrame::kMaxDataSizeSamples;
|
||||
frame_.num_channels_ = 1;
|
||||
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(&frame_, 1);
|
||||
EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(frame_));
|
||||
|
||||
AudioFrame stereo_frame;
|
||||
stereo_frame._payloadDataLengthInSamples = 320;
|
||||
stereo_frame._audioChannel = 2;
|
||||
stereo_frame.samples_per_channel_ = 320;
|
||||
stereo_frame.num_channels_ = 2;
|
||||
SetFrameData(&stereo_frame, 1, 1);
|
||||
VerifyFramesAreEqual(stereo_frame, frame_);
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(frame_));
|
||||
}
|
||||
|
||||
@@ -82,8 +82,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
|
||||
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
||||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame._payloadDataLengthInSamples = 320;
|
||||
mono_frame._audioChannel = 1;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(&mono_frame, 3);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
@@ -93,8 +93,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
|
||||
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
||||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame._payloadDataLengthInSamples = 320;
|
||||
mono_frame._audioChannel = 1;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(&mono_frame, -32768);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
@@ -103,8 +103,8 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
|
||||
SetFrameData(&frame_, 0, 1);
|
||||
|
||||
AudioFrame swapped_frame;
|
||||
swapped_frame._payloadDataLengthInSamples = 320;
|
||||
swapped_frame._audioChannel = 2;
|
||||
swapped_frame.samples_per_channel_ = 320;
|
||||
swapped_frame.num_channels_ = 2;
|
||||
SetFrameData(&swapped_frame, 1, 0);
|
||||
|
||||
AudioFrameOperations::SwapStereoChannels(&frame_);
|
||||
@@ -112,7 +112,7 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
// Set data to "stereo", despite it being a mono frame.
|
||||
SetFrameData(&frame_, 0, 1);
|
||||
|
||||
@@ -124,28 +124,28 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
|
||||
SetFrameData(&frame_, 1000, 1000);
|
||||
frame_._energy = 1000 * 1000 * frame_._payloadDataLengthInSamples *
|
||||
frame_._audioChannel;
|
||||
frame_.energy_ = 1000 * 1000 * frame_.samples_per_channel_ *
|
||||
frame_.num_channels_;
|
||||
AudioFrameOperations::Mute(frame_);
|
||||
|
||||
AudioFrame muted_frame;
|
||||
muted_frame._payloadDataLengthInSamples = 320;
|
||||
muted_frame._audioChannel = 2;
|
||||
muted_frame.samples_per_channel_ = 320;
|
||||
muted_frame.num_channels_ = 2;
|
||||
SetFrameData(&muted_frame, 0, 0);
|
||||
muted_frame._energy = 0;
|
||||
muted_frame.energy_ = 0;
|
||||
VerifyFramesAreEqual(muted_frame, frame_);
|
||||
EXPECT_EQ(muted_frame._energy, frame_._energy);
|
||||
EXPECT_EQ(muted_frame.energy_, frame_.energy_);
|
||||
}
|
||||
|
||||
// TODO(andrew): should not allow negative scales.
|
||||
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
||||
|
||||
frame_._audioChannel = 3;
|
||||
frame_.num_channels_ = 3;
|
||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
||||
|
||||
frame_._audioChannel = 2;
|
||||
frame_.num_channels_ = 2;
|
||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, frame_));
|
||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, frame_));
|
||||
}
|
||||
@@ -156,8 +156,8 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
|
||||
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, frame_));
|
||||
|
||||
AudioFrame clipped_frame;
|
||||
clipped_frame._payloadDataLengthInSamples = 320;
|
||||
clipped_frame._audioChannel = 2;
|
||||
clipped_frame.samples_per_channel_ = 320;
|
||||
clipped_frame.num_channels_ = 2;
|
||||
SetFrameData(&clipped_frame, 32767, -32768);
|
||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||
}
|
||||
@@ -167,8 +167,8 @@ TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
|
||||
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, frame_));
|
||||
|
||||
AudioFrame scaled_frame;
|
||||
scaled_frame._payloadDataLengthInSamples = 320;
|
||||
scaled_frame._audioChannel = 2;
|
||||
scaled_frame.samples_per_channel_ = 320;
|
||||
scaled_frame.num_channels_ = 2;
|
||||
SetFrameData(&scaled_frame, 2, -3);
|
||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||
}
|
||||
@@ -179,13 +179,13 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(&frame_, 4000);
|
||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_));
|
||||
|
||||
AudioFrame clipped_frame;
|
||||
clipped_frame._payloadDataLengthInSamples = 320;
|
||||
clipped_frame._audioChannel = 1;
|
||||
clipped_frame.samples_per_channel_ = 320;
|
||||
clipped_frame.num_channels_ = 1;
|
||||
SetFrameData(&clipped_frame, 32767);
|
||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||
|
||||
@@ -196,13 +196,13 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
|
||||
frame_._audioChannel = 1;
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(&frame_, 1);
|
||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, frame_));
|
||||
|
||||
AudioFrame scaled_frame;
|
||||
scaled_frame._payloadDataLengthInSamples = 320;
|
||||
scaled_frame._audioChannel = 1;
|
||||
scaled_frame.samples_per_channel_ = 320;
|
||||
scaled_frame.num_channels_ = 1;
|
||||
SetFrameData(&scaled_frame, 2);
|
||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||
}
|
||||
|
||||
@@ -829,7 +829,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
||||
"Channel::GetAudioFrame(id=%d)", id);
|
||||
|
||||
// Get 10ms raw PCM data from the ACM (mixer limits output frequency)
|
||||
if (_audioCodingModule.PlayoutData10Ms(audioFrame._frequencyInHz,
|
||||
if (_audioCodingModule.PlayoutData10Ms(audioFrame.sample_rate_hz_,
|
||||
audioFrame) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceError, kTraceVoice,
|
||||
@@ -848,9 +848,9 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
||||
}
|
||||
|
||||
// Convert module ID to internal VoE channel ID
|
||||
audioFrame._id = VoEChannelId(audioFrame._id);
|
||||
audioFrame.id_ = VoEChannelId(audioFrame.id_);
|
||||
// Store speech type for dead-or-alive detection
|
||||
_outputSpeechType = audioFrame._speechType;
|
||||
_outputSpeechType = audioFrame.speech_type_;
|
||||
|
||||
// Perform far-end AudioProcessing module processing on the received signal
|
||||
if (_rxApmIsEnabled)
|
||||
@@ -869,7 +869,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
||||
|
||||
if (_panLeft != 1.0f || _panRight != 1.0f)
|
||||
{
|
||||
if (audioFrame._audioChannel == 1)
|
||||
if (audioFrame.num_channels_ == 1)
|
||||
{
|
||||
// Emulate stereo mode since panning is active.
|
||||
// The mono signal is copied to both left and right channels here.
|
||||
@@ -886,7 +886,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
||||
// Mix decoded PCM output with file if file mixing is enabled
|
||||
if (_outputFilePlaying)
|
||||
{
|
||||
MixAudioWithFile(audioFrame, audioFrame._frequencyInHz);
|
||||
MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
|
||||
}
|
||||
|
||||
// Place channel in on-hold state (~muted) if on-hold is activated
|
||||
@@ -899,15 +899,15 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
||||
if (_outputExternalMedia)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
const bool isStereo = (audioFrame._audioChannel == 2);
|
||||
const bool isStereo = (audioFrame.num_channels_ == 2);
|
||||
if (_outputExternalMediaCallbackPtr)
|
||||
{
|
||||
_outputExternalMediaCallbackPtr->Process(
|
||||
_channelId,
|
||||
kPlaybackPerChannel,
|
||||
(WebRtc_Word16*)audioFrame._payloadData,
|
||||
audioFrame._payloadDataLengthInSamples,
|
||||
audioFrame._frequencyInHz,
|
||||
(WebRtc_Word16*)audioFrame.data_,
|
||||
audioFrame.samples_per_channel_,
|
||||
audioFrame.sample_rate_hz_,
|
||||
isStereo);
|
||||
}
|
||||
}
|
||||
@@ -1610,7 +1610,7 @@ WebRtc_Word32
|
||||
Channel::UpdateLocalTimeStamp()
|
||||
{
|
||||
|
||||
_timeStamp += _audioFrame._payloadDataLengthInSamples;
|
||||
_timeStamp += _audioFrame.samples_per_channel_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4657,7 +4657,7 @@ Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
|
||||
|
||||
int vadDecision = 1;
|
||||
|
||||
vadDecision = (audioFrame._vadActivity == AudioFrame::kVadActive)? 1 : 0;
|
||||
vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive)? 1 : 0;
|
||||
|
||||
if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
|
||||
{
|
||||
@@ -5774,7 +5774,7 @@ Channel::Demultiplex(const AudioFrame& audioFrame)
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::Demultiplex()");
|
||||
_audioFrame = audioFrame;
|
||||
_audioFrame._id = _channelId;
|
||||
_audioFrame.id_ = _channelId;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -5784,7 +5784,7 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::PrepareEncodeAndSend()");
|
||||
|
||||
if (_audioFrame._payloadDataLengthInSamples == 0)
|
||||
if (_audioFrame.samples_per_channel_ == 0)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::PrepareEncodeAndSend() invalid audio frame");
|
||||
@@ -5804,15 +5804,15 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
||||
if (_inputExternalMedia)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
||||
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||
if (_inputExternalMediaCallbackPtr)
|
||||
{
|
||||
_inputExternalMediaCallbackPtr->Process(
|
||||
_channelId,
|
||||
kRecordingPerChannel,
|
||||
(WebRtc_Word16*)_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_audioFrame._frequencyInHz,
|
||||
(WebRtc_Word16*)_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_,
|
||||
_audioFrame.sample_rate_hz_,
|
||||
isStereo);
|
||||
}
|
||||
}
|
||||
@@ -5824,9 +5824,9 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
||||
assert(_rtpAudioProc.get() != NULL);
|
||||
|
||||
// Check if settings need to be updated.
|
||||
if (_rtpAudioProc->sample_rate_hz() != _audioFrame._frequencyInHz)
|
||||
if (_rtpAudioProc->sample_rate_hz() != _audioFrame.sample_rate_hz_)
|
||||
{
|
||||
if (_rtpAudioProc->set_sample_rate_hz(_audioFrame._frequencyInHz) !=
|
||||
if (_rtpAudioProc->set_sample_rate_hz(_audioFrame.sample_rate_hz_) !=
|
||||
AudioProcessing::kNoError)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
||||
@@ -5836,10 +5836,10 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
||||
}
|
||||
}
|
||||
|
||||
if (_rtpAudioProc->num_input_channels() != _audioFrame._audioChannel)
|
||||
if (_rtpAudioProc->num_input_channels() != _audioFrame.num_channels_)
|
||||
{
|
||||
if (_rtpAudioProc->set_num_channels(_audioFrame._audioChannel,
|
||||
_audioFrame._audioChannel)
|
||||
if (_rtpAudioProc->set_num_channels(_audioFrame.num_channels_,
|
||||
_audioFrame.num_channels_)
|
||||
!= AudioProcessing::kNoError)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
||||
@@ -5862,20 +5862,20 @@ Channel::EncodeAndSend()
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::EncodeAndSend()");
|
||||
|
||||
assert(_audioFrame._audioChannel <= 2);
|
||||
if (_audioFrame._payloadDataLengthInSamples == 0)
|
||||
assert(_audioFrame.num_channels_ <= 2);
|
||||
if (_audioFrame.samples_per_channel_ == 0)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::EncodeAndSend() invalid audio frame");
|
||||
return -1;
|
||||
}
|
||||
|
||||
_audioFrame._id = _channelId;
|
||||
_audioFrame.id_ = _channelId;
|
||||
|
||||
// --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
|
||||
|
||||
// The ACM resamples internally.
|
||||
_audioFrame._timeStamp = _timeStamp;
|
||||
_audioFrame.timestamp_ = _timeStamp;
|
||||
if (_audioCodingModule.Add10MsData((AudioFrame&)_audioFrame) != 0)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
@@ -5883,7 +5883,7 @@ Channel::EncodeAndSend()
|
||||
return -1;
|
||||
}
|
||||
|
||||
_timeStamp += _audioFrame._payloadDataLengthInSamples;
|
||||
_timeStamp += _audioFrame.samples_per_channel_;
|
||||
|
||||
// --- Encode if complete frame is ready
|
||||
|
||||
@@ -6179,14 +6179,14 @@ Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
||||
}
|
||||
}
|
||||
|
||||
assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
|
||||
assert(_audioFrame.samples_per_channel_ == fileSamples);
|
||||
|
||||
if (_mixFileWithMicrophone)
|
||||
{
|
||||
// Currently file stream is always mono.
|
||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||
Utility::MixWithSat(_audioFrame._payloadData,
|
||||
static_cast<int>(_audioFrame._audioChannel),
|
||||
Utility::MixWithSat(_audioFrame.data_,
|
||||
static_cast<int>(_audioFrame.num_channels_),
|
||||
fileBuffer.get(),
|
||||
1,
|
||||
static_cast<int>(fileSamples));
|
||||
@@ -6241,12 +6241,12 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
||||
}
|
||||
}
|
||||
|
||||
if (audioFrame._payloadDataLengthInSamples == fileSamples)
|
||||
if (audioFrame.samples_per_channel_ == fileSamples)
|
||||
{
|
||||
// Currently file stream is always mono.
|
||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||
Utility::MixWithSat(audioFrame._payloadData,
|
||||
static_cast<int>(audioFrame._audioChannel),
|
||||
Utility::MixWithSat(audioFrame.data_,
|
||||
static_cast<int>(audioFrame.num_channels_),
|
||||
fileBuffer.get(),
|
||||
1,
|
||||
static_cast<int>(fileSamples));
|
||||
@@ -6254,9 +6254,9 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
||||
else
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||
"Channel::MixAudioWithFile() _payloadDataLengthInSamples(%d) != "
|
||||
"Channel::MixAudioWithFile() samples_per_channel_(%d) != "
|
||||
"fileSamples(%d)",
|
||||
audioFrame._payloadDataLengthInSamples, fileSamples);
|
||||
audioFrame.samples_per_channel_, fileSamples);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -6292,12 +6292,12 @@ Channel::InsertInbandDtmfTone()
|
||||
WebRtc_UWord16 frequency(0);
|
||||
_inbandDtmfGenerator.GetSampleRate(frequency);
|
||||
|
||||
if (frequency != _audioFrame._frequencyInHz)
|
||||
if (frequency != _audioFrame.sample_rate_hz_)
|
||||
{
|
||||
// Update sample rate of Dtmf tone since the mixing frequency
|
||||
// has changed.
|
||||
_inbandDtmfGenerator.SetSampleRate(
|
||||
(WebRtc_UWord16) (_audioFrame._frequencyInHz));
|
||||
(WebRtc_UWord16) (_audioFrame.sample_rate_hz_));
|
||||
// Reset the tone to be added taking the new sample rate into
|
||||
// account.
|
||||
_inbandDtmfGenerator.ResetTone();
|
||||
@@ -6316,19 +6316,19 @@ Channel::InsertInbandDtmfTone()
|
||||
|
||||
// Replace mixed audio with DTMF tone.
|
||||
for (int sample = 0;
|
||||
sample < _audioFrame._payloadDataLengthInSamples;
|
||||
sample < _audioFrame.samples_per_channel_;
|
||||
sample++)
|
||||
{
|
||||
for (int channel = 0;
|
||||
channel < _audioFrame._audioChannel;
|
||||
channel < _audioFrame.num_channels_;
|
||||
channel++)
|
||||
{
|
||||
_audioFrame._payloadData[sample * _audioFrame._audioChannel + channel] =
|
||||
_audioFrame.data_[sample * _audioFrame.num_channels_ + channel] =
|
||||
toneBuffer[sample];
|
||||
}
|
||||
}
|
||||
|
||||
assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
|
||||
assert(_audioFrame.samples_per_channel_ == toneSamples);
|
||||
} else
|
||||
{
|
||||
// Add 10ms to "delay-since-last-tone" counter
|
||||
@@ -6572,15 +6572,15 @@ Channel::ApmProcessRx(AudioFrame& audioFrame)
|
||||
|
||||
// Reset the APM frequency if the frequency has changed
|
||||
if (_rxAudioProcessingModulePtr->sample_rate_hz() !=
|
||||
audioFrame._frequencyInHz)
|
||||
audioFrame.sample_rate_hz_)
|
||||
{
|
||||
if (_rxAudioProcessingModulePtr->set_sample_rate_hz(
|
||||
audioFrame._frequencyInHz) != 0)
|
||||
audioFrame.sample_rate_hz_) != 0)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
|
||||
"AudioProcessingModule::set_sample_rate_hz("
|
||||
"_frequencyInHz=%u) => error",
|
||||
_audioFrame._frequencyInHz);
|
||||
"sample_rate_hz_=%u) => error",
|
||||
_audioFrame.sample_rate_hz_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
@@ -52,8 +52,8 @@ AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
|
||||
|
||||
// Check speech level (works for 2 channels as well)
|
||||
absValue = WebRtcSpl_MaxAbsValueW16(
|
||||
audioFrame._payloadData,
|
||||
audioFrame._payloadDataLengthInSamples*audioFrame._audioChannel);
|
||||
audioFrame.data_,
|
||||
audioFrame.samples_per_channel_*audioFrame.num_channels_);
|
||||
if (absValue > _absMax)
|
||||
_absMax = absValue;
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
|
||||
"OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
|
||||
|
||||
_audioFrame = generalAudioFrame;
|
||||
_audioFrame._id = id;
|
||||
_audioFrame.id_ = id;
|
||||
}
|
||||
|
||||
void OutputMixer::MixedParticipants(
|
||||
@@ -539,9 +539,9 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
||||
|
||||
int outLen(0);
|
||||
|
||||
if (audioFrame._audioChannel == 1)
|
||||
if (audioFrame.num_channels_ == 1)
|
||||
{
|
||||
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
|
||||
if (_resampler.ResetIfNeeded(audioFrame.sample_rate_hz_,
|
||||
desiredFreqHz,
|
||||
kResamplerSynchronous) != 0)
|
||||
{
|
||||
@@ -552,7 +552,7 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
|
||||
if (_resampler.ResetIfNeeded(audioFrame.sample_rate_hz_,
|
||||
desiredFreqHz,
|
||||
kResamplerSynchronousStereo) != 0)
|
||||
{
|
||||
@@ -562,18 +562,18 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
||||
}
|
||||
}
|
||||
if (_resampler.Push(
|
||||
_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
|
||||
audioFrame._payloadData,
|
||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
||||
_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_*_audioFrame.num_channels_,
|
||||
audioFrame.data_,
|
||||
AudioFrame::kMaxDataSizeSamples,
|
||||
outLen) == 0)
|
||||
{
|
||||
// Ensure that output from resampler matches the audio-frame format.
|
||||
// Example: 10ms stereo output at 48kHz => outLen = 960 =>
|
||||
// convert _payloadDataLengthInSamples to 480
|
||||
audioFrame._payloadDataLengthInSamples =
|
||||
(outLen / _audioFrame._audioChannel);
|
||||
audioFrame._frequencyInHz = desiredFreqHz;
|
||||
// convert samples_per_channel_ to 480
|
||||
audioFrame.samples_per_channel_ =
|
||||
(outLen / _audioFrame.num_channels_);
|
||||
audioFrame.sample_rate_hz_ = desiredFreqHz;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -582,7 +582,7 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((channels == 2) && (audioFrame._audioChannel == 1))
|
||||
if ((channels == 2) && (audioFrame.num_channels_ == 1))
|
||||
{
|
||||
AudioFrameOperations::MonoToStereo(audioFrame);
|
||||
}
|
||||
@@ -593,12 +593,12 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
||||
WebRtc_Word32
|
||||
OutputMixer::DoOperationsOnCombinedSignal()
|
||||
{
|
||||
if (_audioFrame._frequencyInHz != _mixingFrequencyHz)
|
||||
if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
|
||||
"OutputMixer::DoOperationsOnCombinedSignal() => "
|
||||
"mixing frequency = %d", _audioFrame._frequencyInHz);
|
||||
_mixingFrequencyHz = _audioFrame._frequencyInHz;
|
||||
"mixing frequency = %d", _audioFrame.sample_rate_hz_);
|
||||
_mixingFrequencyHz = _audioFrame.sample_rate_hz_;
|
||||
}
|
||||
|
||||
// --- Insert inband Dtmf tone
|
||||
@@ -610,7 +610,7 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
||||
// Scale left and/or right channel(s) if balance is active
|
||||
if (_panLeft != 1.0 || _panRight != 1.0)
|
||||
{
|
||||
if (_audioFrame._audioChannel == 1)
|
||||
if (_audioFrame.num_channels_ == 1)
|
||||
{
|
||||
AudioFrameOperations::MonoToStereo(_audioFrame);
|
||||
}
|
||||
@@ -619,7 +619,7 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
||||
// Pure stereo mode (we are receiving a stereo signal).
|
||||
}
|
||||
|
||||
assert(_audioFrame._audioChannel == 2);
|
||||
assert(_audioFrame.num_channels_ == 2);
|
||||
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
|
||||
}
|
||||
|
||||
@@ -632,15 +632,15 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
||||
if (_externalMedia)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
||||
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||
if (_externalMediaCallbackPtr)
|
||||
{
|
||||
_externalMediaCallbackPtr->Process(
|
||||
-1,
|
||||
kPlaybackAllChannelsMixed,
|
||||
(WebRtc_Word16*)_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_audioFrame._frequencyInHz,
|
||||
(WebRtc_Word16*)_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_,
|
||||
_audioFrame.sample_rate_hz_,
|
||||
isStereo);
|
||||
}
|
||||
}
|
||||
@@ -664,31 +664,31 @@ OutputMixer::APMAnalyzeReverseStream()
|
||||
// Convert from mixing frequency to APM frequency.
|
||||
// Sending side determines APM frequency.
|
||||
|
||||
if (audioFrame._audioChannel == 1)
|
||||
if (audioFrame.num_channels_ == 1)
|
||||
{
|
||||
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
|
||||
_apmResampler.ResetIfNeeded(_audioFrame.sample_rate_hz_,
|
||||
_audioProcessingModulePtr->sample_rate_hz(),
|
||||
kResamplerSynchronous);
|
||||
}
|
||||
else
|
||||
{
|
||||
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
|
||||
_apmResampler.ResetIfNeeded(_audioFrame.sample_rate_hz_,
|
||||
_audioProcessingModulePtr->sample_rate_hz(),
|
||||
kResamplerSynchronousStereo);
|
||||
}
|
||||
if (_apmResampler.Push(
|
||||
_audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
|
||||
audioFrame._payloadData,
|
||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
||||
_audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_*_audioFrame.num_channels_,
|
||||
audioFrame.data_,
|
||||
AudioFrame::kMaxDataSizeSamples,
|
||||
outLen) == 0)
|
||||
{
|
||||
audioFrame._payloadDataLengthInSamples =
|
||||
(outLen / _audioFrame._audioChannel);
|
||||
audioFrame._frequencyInHz = _audioProcessingModulePtr->sample_rate_hz();
|
||||
audioFrame.samples_per_channel_ =
|
||||
(outLen / _audioFrame.num_channels_);
|
||||
audioFrame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
|
||||
}
|
||||
|
||||
if (audioFrame._audioChannel == 2)
|
||||
if (audioFrame.num_channels_ == 2)
|
||||
{
|
||||
AudioFrameOperations::StereoToMono(audioFrame);
|
||||
}
|
||||
@@ -709,11 +709,11 @@ OutputMixer::InsertInbandDtmfTone()
|
||||
{
|
||||
WebRtc_UWord16 sampleRate(0);
|
||||
_dtmfGenerator.GetSampleRate(sampleRate);
|
||||
if (sampleRate != _audioFrame._frequencyInHz)
|
||||
if (sampleRate != _audioFrame.sample_rate_hz_)
|
||||
{
|
||||
// Update sample rate of Dtmf tone since the mixing frequency changed.
|
||||
_dtmfGenerator.SetSampleRate(
|
||||
(WebRtc_UWord16)(_audioFrame._frequencyInHz));
|
||||
(WebRtc_UWord16)(_audioFrame.sample_rate_hz_));
|
||||
// Reset the tone to be added taking the new sample rate into account.
|
||||
_dtmfGenerator.ResetTone();
|
||||
}
|
||||
@@ -729,21 +729,21 @@ OutputMixer::InsertInbandDtmfTone()
|
||||
}
|
||||
|
||||
// replace mixed audio with Dtmf tone
|
||||
if (_audioFrame._audioChannel == 1)
|
||||
if (_audioFrame.num_channels_ == 1)
|
||||
{
|
||||
// mono
|
||||
memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
|
||||
memcpy(_audioFrame.data_, toneBuffer, sizeof(WebRtc_Word16)
|
||||
* toneSamples);
|
||||
} else
|
||||
{
|
||||
// stereo
|
||||
for (int i = 0; i < _audioFrame._payloadDataLengthInSamples; i++)
|
||||
for (int i = 0; i < _audioFrame.samples_per_channel_; i++)
|
||||
{
|
||||
_audioFrame._payloadData[2 * i] = toneBuffer[i];
|
||||
_audioFrame._payloadData[2 * i + 1] = 0;
|
||||
_audioFrame.data_[2 * i] = toneBuffer[i];
|
||||
_audioFrame.data_[2 * i + 1] = 0;
|
||||
}
|
||||
}
|
||||
assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
|
||||
assert(_audioFrame.samples_per_channel_ == toneSamples);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -404,15 +404,15 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
|
||||
if (_externalMedia)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
||||
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||
if (_externalMediaCallbackPtr)
|
||||
{
|
||||
_externalMediaCallbackPtr->Process(
|
||||
-1,
|
||||
kRecordingAllChannelsMixed,
|
||||
(WebRtc_Word16*) _audioFrame._payloadData,
|
||||
_audioFrame._payloadDataLengthInSamples,
|
||||
_audioFrame._frequencyInHz,
|
||||
(WebRtc_Word16*) _audioFrame.data_,
|
||||
_audioFrame.samples_per_channel_,
|
||||
_audioFrame.sample_rate_hz_,
|
||||
isStereo);
|
||||
}
|
||||
}
|
||||
@@ -1181,22 +1181,22 @@ TransmitMixer::GenerateAudioFrame(const WebRtc_Word16 audioSamples[],
|
||||
if (_audioResampler.Push(
|
||||
(WebRtc_Word16*) audioSamples,
|
||||
nSamples * nChannels,
|
||||
_audioFrame._payloadData,
|
||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
||||
(int&) _audioFrame._payloadDataLengthInSamples) == -1)
|
||||
_audioFrame.data_,
|
||||
AudioFrame::kMaxDataSizeSamples,
|
||||
(int&) _audioFrame.samples_per_channel_) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"TransmitMixer::GenerateAudioFrame() resampling failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
_audioFrame._payloadDataLengthInSamples /= nChannels;
|
||||
_audioFrame._id = _instanceId;
|
||||
_audioFrame._timeStamp = -1;
|
||||
_audioFrame._frequencyInHz = mixingFrequency;
|
||||
_audioFrame._speechType = AudioFrame::kNormalSpeech;
|
||||
_audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
||||
_audioFrame._audioChannel = nChannels;
|
||||
_audioFrame.samples_per_channel_ /= nChannels;
|
||||
_audioFrame.id_ = _instanceId;
|
||||
_audioFrame.timestamp_ = -1;
|
||||
_audioFrame.sample_rate_hz_ = mixingFrequency;
|
||||
_audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||
_audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||
_audioFrame.num_channels_ = nChannels;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1253,14 +1253,14 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
||||
}
|
||||
}
|
||||
|
||||
assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
|
||||
assert(_audioFrame.samples_per_channel_ == fileSamples);
|
||||
|
||||
if (_mixFileWithMicrophone)
|
||||
{
|
||||
// Currently file stream is always mono.
|
||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||
Utility::MixWithSat(_audioFrame._payloadData,
|
||||
static_cast<int>(_audioFrame._audioChannel),
|
||||
Utility::MixWithSat(_audioFrame.data_,
|
||||
static_cast<int>(_audioFrame.num_channels_),
|
||||
fileBuffer.get(),
|
||||
1,
|
||||
static_cast<int>(fileSamples));
|
||||
@@ -1291,16 +1291,16 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
||||
|
||||
// Check if the number of input channels has changed. Retain the number
|
||||
// of output channels.
|
||||
if (_audioFrame._audioChannel !=
|
||||
if (_audioFrame.num_channels_ !=
|
||||
_audioProcessingModulePtr->num_input_channels())
|
||||
{
|
||||
if (_audioProcessingModulePtr->set_num_channels(
|
||||
_audioFrame._audioChannel,
|
||||
_audioFrame.num_channels_,
|
||||
_audioProcessingModulePtr->num_output_channels()))
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_num_channels(%d, %d) => error",
|
||||
_audioFrame._frequencyInHz,
|
||||
_audioFrame.sample_rate_hz_,
|
||||
_audioProcessingModulePtr->num_output_channels());
|
||||
}
|
||||
}
|
||||
@@ -1308,14 +1308,14 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
||||
// If the frequency has changed we need to change APM settings
|
||||
// Sending side is "master"
|
||||
if (_audioProcessingModulePtr->sample_rate_hz() !=
|
||||
_audioFrame._frequencyInHz)
|
||||
_audioFrame.sample_rate_hz_)
|
||||
{
|
||||
if (_audioProcessingModulePtr->set_sample_rate_hz(
|
||||
_audioFrame._frequencyInHz))
|
||||
_audioFrame.sample_rate_hz_))
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_sample_rate_hz(%u) => error",
|
||||
_audioFrame._frequencyInHz);
|
||||
_audioFrame.sample_rate_hz_);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1376,7 +1376,7 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
||||
int TransmitMixer::TypingDetection()
|
||||
{
|
||||
// We let the VAD determine if we're using this feature or not.
|
||||
if (_audioFrame._vadActivity == AudioFrame::kVadUnknown)
|
||||
if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
@@ -1388,7 +1388,7 @@ int TransmitMixer::TypingDetection()
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (_audioFrame._vadActivity == AudioFrame::kVadActive)
|
||||
if (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
|
||||
_timeActive++;
|
||||
else
|
||||
_timeActive = 0;
|
||||
@@ -1403,7 +1403,7 @@ int TransmitMixer::TypingDetection()
|
||||
++_timeSinceLastTyping;
|
||||
}
|
||||
|
||||
if (keyPressed && (_audioFrame._vadActivity == AudioFrame::kVadActive)
|
||||
if (keyPressed && (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
|
||||
&& (_timeActive < _timeWindow))
|
||||
{
|
||||
_penaltyCounter += _costPerTyping;
|
||||
|
||||
@@ -256,18 +256,18 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
|
||||
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
|
||||
_audioFrame);
|
||||
|
||||
assert(nSamples == _audioFrame._payloadDataLengthInSamples);
|
||||
assert(nSamples == _audioFrame.samples_per_channel_);
|
||||
assert(samplesPerSec ==
|
||||
static_cast<WebRtc_UWord32>(_audioFrame._frequencyInHz));
|
||||
static_cast<WebRtc_UWord32>(_audioFrame.sample_rate_hz_));
|
||||
|
||||
// Deliver audio (PCM) samples to the ADM
|
||||
memcpy(
|
||||
(WebRtc_Word16*) audioSamples,
|
||||
(const WebRtc_Word16*) _audioFrame._payloadData,
|
||||
sizeof(WebRtc_Word16) * (_audioFrame._payloadDataLengthInSamples
|
||||
* _audioFrame._audioChannel));
|
||||
(const WebRtc_Word16*) _audioFrame.data_,
|
||||
sizeof(WebRtc_Word16) * (_audioFrame.samples_per_channel_
|
||||
* _audioFrame.num_channels_));
|
||||
|
||||
nSamplesOut = _audioFrame._payloadDataLengthInSamples;
|
||||
nSamplesOut = _audioFrame.samples_per_channel_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -329,9 +329,9 @@ int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
||||
|
||||
// Deliver audio (PCM) samples to the external sink
|
||||
memcpy(speechData10ms,
|
||||
audioFrame._payloadData,
|
||||
sizeof(WebRtc_Word16)*(audioFrame._payloadDataLengthInSamples));
|
||||
lengthSamples = audioFrame._payloadDataLengthInSamples;
|
||||
audioFrame.data_,
|
||||
sizeof(WebRtc_Word16)*(audioFrame.samples_per_channel_));
|
||||
lengthSamples = audioFrame.samples_per_channel_;
|
||||
|
||||
// Store current playout delay (to be used by ExternalRecordingInsertData).
|
||||
playout_delay_ms_ = current_delay_ms;
|
||||
|
||||
Reference in New Issue
Block a user