Rename AudioFrame members.
BUG= TEST=trybots Review URL: https://webrtc-codereview.appspot.com/542005 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2164 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -586,7 +586,7 @@ ACMNetEQ::RecOut(
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
WriteLockScoped lockCodec(*_decodeLock);
|
WriteLockScoped lockCodec(*_decodeLock);
|
||||||
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame._payloadData[0]),
|
if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame.data_[0]),
|
||||||
&payloadLenSample) != 0)
|
&payloadLenSample) != 0)
|
||||||
{
|
{
|
||||||
LogError("RecOut", 0);
|
LogError("RecOut", 0);
|
||||||
@@ -604,7 +604,7 @@ ACMNetEQ::RecOut(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
|
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
|
||||||
audioFrame._audioChannel = 1;
|
audioFrame.num_channels_ = 1;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -667,10 +667,10 @@ audio by Master (%d samples) and Slave (%d samples).",
|
|||||||
|
|
||||||
for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
|
for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
|
||||||
{
|
{
|
||||||
audioFrame._payloadData[n<<1] = payloadMaster[n];
|
audioFrame.data_[n<<1] = payloadMaster[n];
|
||||||
audioFrame._payloadData[(n<<1)+1] = payloadSlave[n];
|
audioFrame.data_[(n<<1)+1] = payloadSlave[n];
|
||||||
}
|
}
|
||||||
audioFrame._audioChannel = 2;
|
audioFrame.num_channels_ = 2;
|
||||||
|
|
||||||
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
|
WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
|
||||||
WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
|
WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
|
||||||
@@ -685,58 +685,58 @@ audio by Master (%d samples) and Slave (%d samples).",
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
audioFrame._payloadDataLengthInSamples = static_cast<WebRtc_UWord16>(payloadLenSample);
|
audioFrame.samples_per_channel_ = static_cast<WebRtc_UWord16>(payloadLenSample);
|
||||||
// NetEq always returns 10 ms of audio.
|
// NetEq always returns 10 ms of audio.
|
||||||
_currentSampFreqKHz = static_cast<float>(audioFrame._payloadDataLengthInSamples) / 10.0f;
|
_currentSampFreqKHz = static_cast<float>(audioFrame.samples_per_channel_) / 10.0f;
|
||||||
audioFrame._frequencyInHz = audioFrame._payloadDataLengthInSamples * 100;
|
audioFrame.sample_rate_hz_ = audioFrame.samples_per_channel_ * 100;
|
||||||
if(_vadStatus)
|
if(_vadStatus)
|
||||||
{
|
{
|
||||||
if(type == kOutputVADPassive)
|
if(type == kOutputVADPassive)
|
||||||
{
|
{
|
||||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||||
}
|
}
|
||||||
else if(type == kOutputNormal)
|
else if(type == kOutputNormal)
|
||||||
{
|
{
|
||||||
audioFrame._vadActivity = AudioFrame::kVadActive;
|
audioFrame.vad_activity_ = AudioFrame::kVadActive;
|
||||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||||
}
|
}
|
||||||
else if(type == kOutputPLC)
|
else if(type == kOutputPLC)
|
||||||
{
|
{
|
||||||
audioFrame._vadActivity = _previousAudioActivity;
|
audioFrame.vad_activity_ = _previousAudioActivity;
|
||||||
audioFrame._speechType = AudioFrame::kPLC;
|
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||||
}
|
}
|
||||||
else if(type == kOutputCNG)
|
else if(type == kOutputCNG)
|
||||||
{
|
{
|
||||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||||
audioFrame._speechType = AudioFrame::kCNG;
|
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
audioFrame._vadActivity = AudioFrame::kVadPassive;
|
audioFrame.vad_activity_ = AudioFrame::kVadPassive;
|
||||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Always return kVadUnknown when receive VAD is inactive
|
// Always return kVadUnknown when receive VAD is inactive
|
||||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||||
|
|
||||||
if(type == kOutputNormal)
|
if(type == kOutputNormal)
|
||||||
{
|
{
|
||||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||||
}
|
}
|
||||||
else if(type == kOutputPLC)
|
else if(type == kOutputPLC)
|
||||||
{
|
{
|
||||||
audioFrame._speechType = AudioFrame::kPLC;
|
audioFrame.speech_type_ = AudioFrame::kPLC;
|
||||||
}
|
}
|
||||||
else if(type == kOutputPLCtoCNG)
|
else if(type == kOutputPLCtoCNG)
|
||||||
{
|
{
|
||||||
audioFrame._speechType = AudioFrame::kPLCCNG;
|
audioFrame.speech_type_ = AudioFrame::kPLCCNG;
|
||||||
}
|
}
|
||||||
else if(type == kOutputCNG)
|
else if(type == kOutputCNG)
|
||||||
{
|
{
|
||||||
audioFrame._speechType = AudioFrame::kCNG;
|
audioFrame.speech_type_ = AudioFrame::kCNG;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -744,11 +744,11 @@ audio by Master (%d samples) and Slave (%d samples).",
|
|||||||
// we don't expect to get if _vadStatus is false
|
// we don't expect to get if _vadStatus is false
|
||||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
|
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
|
||||||
"RecOut: NetEq returned kVadPassive while _vadStatus is false.");
|
"RecOut: NetEq returned kVadPassive while _vadStatus is false.");
|
||||||
audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||||
audioFrame._speechType = AudioFrame::kNormalSpeech;
|
audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_previousAudioActivity = audioFrame._vadActivity;
|
_previousAudioActivity = audioFrame.vad_activity_;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ void AcmNetEqTest::InsertZeroPacket(uint16_t sequence_number,
|
|||||||
void AcmNetEqTest::PullData(int expected_num_samples) {
|
void AcmNetEqTest::PullData(int expected_num_samples) {
|
||||||
AudioFrame out_frame;
|
AudioFrame out_frame;
|
||||||
ASSERT_EQ(0, neteq_.RecOut(out_frame));
|
ASSERT_EQ(0, neteq_.RecOut(out_frame));
|
||||||
ASSERT_EQ(expected_num_samples, out_frame._payloadDataLengthInSamples);
|
ASSERT_EQ(expected_num_samples, out_frame.samples_per_channel_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AcmNetEqTest, NetworkStatistics) {
|
TEST_F(AcmNetEqTest, NetworkStatistics) {
|
||||||
|
|||||||
@@ -942,17 +942,17 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audio_frame._payloadDataLengthInSamples == 0) {
|
if (audio_frame.samples_per_channel_ == 0) {
|
||||||
assert(false);
|
assert(false);
|
||||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||||
"Cannot Add 10 ms audio, payload length is zero");
|
"Cannot Add 10 ms audio, payload length is zero");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// Allow for 8, 16, 32 and 48kHz input audio.
|
// Allow for 8, 16, 32 and 48kHz input audio.
|
||||||
if ((audio_frame._frequencyInHz != 8000)
|
if ((audio_frame.sample_rate_hz_ != 8000)
|
||||||
&& (audio_frame._frequencyInHz != 16000)
|
&& (audio_frame.sample_rate_hz_ != 16000)
|
||||||
&& (audio_frame._frequencyInHz != 32000)
|
&& (audio_frame.sample_rate_hz_ != 32000)
|
||||||
&& (audio_frame._frequencyInHz != 48000)) {
|
&& (audio_frame.sample_rate_hz_ != 48000)) {
|
||||||
assert(false);
|
assert(false);
|
||||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||||
"Cannot Add 10 ms audio, input frequency not valid");
|
"Cannot Add 10 ms audio, input frequency not valid");
|
||||||
@@ -960,8 +960,8 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If the length and frequency matches. We currently just support raw PCM.
|
// If the length and frequency matches. We currently just support raw PCM.
|
||||||
if ((audio_frame._frequencyInHz / 100)
|
if ((audio_frame.sample_rate_hz_ / 100)
|
||||||
!= audio_frame._payloadDataLengthInSamples) {
|
!= audio_frame.samples_per_channel_) {
|
||||||
WEBRTC_TRACE(
|
WEBRTC_TRACE(
|
||||||
webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||||
"Cannot Add 10 ms audio, input frequency and length doesn't match");
|
"Cannot Add 10 ms audio, input frequency and length doesn't match");
|
||||||
@@ -971,33 +971,33 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
|||||||
// Calculate the timestamp that should be pushed to codec.
|
// Calculate the timestamp that should be pushed to codec.
|
||||||
// This might be different from the timestamp of the frame
|
// This might be different from the timestamp of the frame
|
||||||
// due to re-sampling.
|
// due to re-sampling.
|
||||||
bool resample = ((WebRtc_Word32) audio_frame._frequencyInHz
|
bool resample = ((WebRtc_Word32) audio_frame.sample_rate_hz_
|
||||||
!= _sendCodecInst.plfreq);
|
!= _sendCodecInst.plfreq);
|
||||||
|
|
||||||
// If number of channels in audio doesn't match codec mode, we need
|
// If number of channels in audio doesn't match codec mode, we need
|
||||||
// either mono-to-stereo or stereo-to-mono conversion.
|
// either mono-to-stereo or stereo-to-mono conversion.
|
||||||
WebRtc_Word16 audio[WEBRTC_10MS_PCM_AUDIO];
|
WebRtc_Word16 audio[WEBRTC_10MS_PCM_AUDIO];
|
||||||
int audio_channels = _sendCodecInst.channels;
|
int audio_channels = _sendCodecInst.channels;
|
||||||
if (audio_frame._audioChannel != _sendCodecInst.channels) {
|
if (audio_frame.num_channels_ != _sendCodecInst.channels) {
|
||||||
if (_sendCodecInst.channels == 2) {
|
if (_sendCodecInst.channels == 2) {
|
||||||
// Do mono-to-stereo conversion by copying each sample.
|
// Do mono-to-stereo conversion by copying each sample.
|
||||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||||
audio[k * 2] = audio_frame._payloadData[k];
|
audio[k * 2] = audio_frame.data_[k];
|
||||||
audio[(k * 2) + 1] = audio_frame._payloadData[k];
|
audio[(k * 2) + 1] = audio_frame.data_[k];
|
||||||
}
|
}
|
||||||
} else if (_sendCodecInst.channels == 1) {
|
} else if (_sendCodecInst.channels == 1) {
|
||||||
// Do stereo-to-mono conversion by creating the average of the stereo
|
// Do stereo-to-mono conversion by creating the average of the stereo
|
||||||
// samples.
|
// samples.
|
||||||
for (int k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||||
audio[k] = (audio_frame._payloadData[k * 2]
|
audio[k] = (audio_frame.data_[k * 2]
|
||||||
+ audio_frame._payloadData[(k * 2) + 1]) >> 1;
|
+ audio_frame.data_[(k * 2) + 1]) >> 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Copy payload data for future use.
|
// Copy payload data for future use.
|
||||||
size_t length = static_cast<size_t>(audio_frame._payloadDataLengthInSamples
|
size_t length = static_cast<size_t>(audio_frame.samples_per_channel_
|
||||||
* audio_channels);
|
* audio_channels);
|
||||||
memcpy(audio, audio_frame._payloadData, length * sizeof(WebRtc_UWord16));
|
memcpy(audio, audio_frame.data_, length * sizeof(WebRtc_UWord16));
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_UWord32 current_timestamp;
|
WebRtc_UWord32 current_timestamp;
|
||||||
@@ -1010,18 +1010,18 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
|||||||
WebRtc_Word16 new_length;
|
WebRtc_Word16 new_length;
|
||||||
|
|
||||||
// Calculate the timestamp of this frame.
|
// Calculate the timestamp of this frame.
|
||||||
if (_lastInTimestamp > audio_frame._timeStamp) {
|
if (_lastInTimestamp > audio_frame.timestamp_) {
|
||||||
// A wrap around has happened.
|
// A wrap around has happened.
|
||||||
timestamp_diff = ((WebRtc_UWord32) 0xFFFFFFFF - _lastInTimestamp)
|
timestamp_diff = ((WebRtc_UWord32) 0xFFFFFFFF - _lastInTimestamp)
|
||||||
+ audio_frame._timeStamp;
|
+ audio_frame.timestamp_;
|
||||||
} else {
|
} else {
|
||||||
timestamp_diff = audio_frame._timeStamp - _lastInTimestamp;
|
timestamp_diff = audio_frame.timestamp_ - _lastInTimestamp;
|
||||||
}
|
}
|
||||||
current_timestamp = _lastTimestamp + (WebRtc_UWord32)(timestamp_diff *
|
current_timestamp = _lastTimestamp + (WebRtc_UWord32)(timestamp_diff *
|
||||||
((double) _sendCodecInst.plfreq / (double) audio_frame._frequencyInHz));
|
((double) _sendCodecInst.plfreq / (double) audio_frame.sample_rate_hz_));
|
||||||
|
|
||||||
new_length = _inputResampler.Resample10Msec(audio,
|
new_length = _inputResampler.Resample10Msec(audio,
|
||||||
audio_frame._frequencyInHz,
|
audio_frame.sample_rate_hz_,
|
||||||
resampled_audio, send_freq,
|
resampled_audio, send_freq,
|
||||||
audio_channels);
|
audio_channels);
|
||||||
|
|
||||||
@@ -1035,13 +1035,13 @@ WebRtc_Word32 AudioCodingModuleImpl::Add10MsData(
|
|||||||
new_length,
|
new_length,
|
||||||
audio_channels);
|
audio_channels);
|
||||||
} else {
|
} else {
|
||||||
current_timestamp = audio_frame._timeStamp;
|
current_timestamp = audio_frame.timestamp_;
|
||||||
|
|
||||||
status = _codecs[_currentSendCodecIdx]->Add10MsData(
|
status = _codecs[_currentSendCodecIdx]->Add10MsData(
|
||||||
current_timestamp, audio, audio_frame._payloadDataLengthInSamples,
|
current_timestamp, audio, audio_frame.samples_per_channel_,
|
||||||
audio_channels);
|
audio_channels);
|
||||||
}
|
}
|
||||||
_lastInTimestamp = audio_frame._timeStamp;
|
_lastInTimestamp = audio_frame.timestamp_;
|
||||||
_lastTimestamp = current_timestamp;
|
_lastTimestamp = current_timestamp;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -1733,16 +1733,16 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
audio_frame._audioChannel = _audioFrame._audioChannel;
|
audio_frame.num_channels_ = _audioFrame.num_channels_;
|
||||||
audio_frame._vadActivity = _audioFrame._vadActivity;
|
audio_frame.vad_activity_ = _audioFrame.vad_activity_;
|
||||||
audio_frame._speechType = _audioFrame._speechType;
|
audio_frame.speech_type_ = _audioFrame.speech_type_;
|
||||||
|
|
||||||
stereo_mode = (_audioFrame._audioChannel > 1);
|
stereo_mode = (_audioFrame.num_channels_ > 1);
|
||||||
// For stereo playout:
|
// For stereo playout:
|
||||||
// Master and Slave samples are interleaved starting with Master.
|
// Master and Slave samples are interleaved starting with Master.
|
||||||
|
|
||||||
const WebRtc_UWord16 receive_freq =
|
const WebRtc_UWord16 receive_freq =
|
||||||
static_cast<WebRtc_UWord16>(_audioFrame._frequencyInHz);
|
static_cast<WebRtc_UWord16>(_audioFrame.sample_rate_hz_);
|
||||||
bool tone_detected = false;
|
bool tone_detected = false;
|
||||||
WebRtc_Word16 last_detected_tone;
|
WebRtc_Word16 last_detected_tone;
|
||||||
WebRtc_Word16 tone;
|
WebRtc_Word16 tone;
|
||||||
@@ -1754,8 +1754,8 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
|
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
|
||||||
// Resample payloadData.
|
// Resample payloadData.
|
||||||
WebRtc_Word16 temp_len = _outputResampler.Resample10Msec(
|
WebRtc_Word16 temp_len = _outputResampler.Resample10Msec(
|
||||||
_audioFrame._payloadData, receive_freq, audio_frame._payloadData,
|
_audioFrame.data_, receive_freq, audio_frame.data_,
|
||||||
desired_freq_hz, _audioFrame._audioChannel);
|
desired_freq_hz, _audioFrame.num_channels_);
|
||||||
|
|
||||||
if (temp_len < 0) {
|
if (temp_len < 0) {
|
||||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||||
@@ -1764,55 +1764,55 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the payload data length from the resampler.
|
// Set the payload data length from the resampler.
|
||||||
audio_frame._payloadDataLengthInSamples = (WebRtc_UWord16) temp_len;
|
audio_frame.samples_per_channel_ = (WebRtc_UWord16) temp_len;
|
||||||
// Set the sampling frequency.
|
// Set the sampling frequency.
|
||||||
audio_frame._frequencyInHz = desired_freq_hz;
|
audio_frame.sample_rate_hz_ = desired_freq_hz;
|
||||||
} else {
|
} else {
|
||||||
memcpy(audio_frame._payloadData, _audioFrame._payloadData,
|
memcpy(audio_frame.data_, _audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples * audio_frame._audioChannel
|
_audioFrame.samples_per_channel_ * audio_frame.num_channels_
|
||||||
* sizeof(WebRtc_Word16));
|
* sizeof(WebRtc_Word16));
|
||||||
// Set the payload length.
|
// Set the payload length.
|
||||||
audio_frame._payloadDataLengthInSamples =
|
audio_frame.samples_per_channel_ =
|
||||||
_audioFrame._payloadDataLengthInSamples;
|
_audioFrame.samples_per_channel_;
|
||||||
// Set the sampling frequency.
|
// Set the sampling frequency.
|
||||||
audio_frame._frequencyInHz = receive_freq;
|
audio_frame.sample_rate_hz_ = receive_freq;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tone detection done for master channel.
|
// Tone detection done for master channel.
|
||||||
if (_dtmfDetector != NULL) {
|
if (_dtmfDetector != NULL) {
|
||||||
// Dtmf Detection.
|
// Dtmf Detection.
|
||||||
if (audio_frame._frequencyInHz == 8000) {
|
if (audio_frame.sample_rate_hz_ == 8000) {
|
||||||
// Use audio_frame._payloadData then Dtmf detector doesn't
|
// Use audio_frame.data_ then Dtmf detector doesn't
|
||||||
// need resampling.
|
// need resampling.
|
||||||
if (!stereo_mode) {
|
if (!stereo_mode) {
|
||||||
_dtmfDetector->Detect(audio_frame._payloadData,
|
_dtmfDetector->Detect(audio_frame.data_,
|
||||||
audio_frame._payloadDataLengthInSamples,
|
audio_frame.samples_per_channel_,
|
||||||
audio_frame._frequencyInHz, tone_detected,
|
audio_frame.sample_rate_hz_, tone_detected,
|
||||||
tone);
|
tone);
|
||||||
} else {
|
} else {
|
||||||
// We are in 8 kHz so the master channel needs only 80 samples.
|
// We are in 8 kHz so the master channel needs only 80 samples.
|
||||||
WebRtc_Word16 master_channel[80];
|
WebRtc_Word16 master_channel[80];
|
||||||
for (int n = 0; n < 80; n++) {
|
for (int n = 0; n < 80; n++) {
|
||||||
master_channel[n] = audio_frame._payloadData[n << 1];
|
master_channel[n] = audio_frame.data_[n << 1];
|
||||||
}
|
}
|
||||||
_dtmfDetector->Detect(master_channel,
|
_dtmfDetector->Detect(master_channel,
|
||||||
audio_frame._payloadDataLengthInSamples,
|
audio_frame.samples_per_channel_,
|
||||||
audio_frame._frequencyInHz, tone_detected,
|
audio_frame.sample_rate_hz_, tone_detected,
|
||||||
tone);
|
tone);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Do the detection on the audio that we got from NetEQ (_audioFrame).
|
// Do the detection on the audio that we got from NetEQ (_audioFrame).
|
||||||
if (!stereo_mode) {
|
if (!stereo_mode) {
|
||||||
_dtmfDetector->Detect(_audioFrame._payloadData,
|
_dtmfDetector->Detect(_audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples,
|
_audioFrame.samples_per_channel_,
|
||||||
receive_freq, tone_detected, tone);
|
receive_freq, tone_detected, tone);
|
||||||
} else {
|
} else {
|
||||||
WebRtc_Word16 master_channel[WEBRTC_10MS_PCM_AUDIO];
|
WebRtc_Word16 master_channel[WEBRTC_10MS_PCM_AUDIO];
|
||||||
for (int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++) {
|
for (int n = 0; n < _audioFrame.samples_per_channel_; n++) {
|
||||||
master_channel[n] = _audioFrame._payloadData[n << 1];
|
master_channel[n] = _audioFrame.data_[n << 1];
|
||||||
}
|
}
|
||||||
_dtmfDetector->Detect(master_channel,
|
_dtmfDetector->Detect(master_channel,
|
||||||
_audioFrame._payloadDataLengthInSamples,
|
_audioFrame.samples_per_channel_,
|
||||||
receive_freq, tone_detected, tone);
|
receive_freq, tone_detected, tone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1844,10 +1844,10 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
audio_frame._id = _id;
|
audio_frame.id_ = _id;
|
||||||
audio_frame._volume = -1;
|
audio_frame.volume_ = -1;
|
||||||
audio_frame._energy = -1;
|
audio_frame.energy_ = -1;
|
||||||
audio_frame._timeStamp = 0;
|
audio_frame.timestamp_ = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -430,7 +430,7 @@ APITest::PullAudioRunA()
|
|||||||
{
|
{
|
||||||
_outFileA.Write10MsData(audioFrame);
|
_outFileA.Write10MsData(audioFrame);
|
||||||
}
|
}
|
||||||
_receiveVADActivityA[(int)audioFrame._vadActivity]++;
|
_receiveVADActivityA[(int)audioFrame.vad_activity_]++;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -459,7 +459,7 @@ APITest::PullAudioRunB()
|
|||||||
{
|
{
|
||||||
_outFileB.Write10MsData(audioFrame);
|
_outFileB.Write10MsData(audioFrame);
|
||||||
}
|
}
|
||||||
_receiveVADActivityB[(int)audioFrame._vadActivity]++;
|
_receiveVADActivityB[(int)audioFrame.vad_activity_]++;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -242,8 +242,8 @@ bool Receiver::PlayoutData() {
|
|||||||
if (_playoutLengthSmpls == 0) {
|
if (_playoutLengthSmpls == 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
_pcmFile.Write10MsData(audioFrame._payloadData,
|
_pcmFile.Write10MsData(audioFrame.data_,
|
||||||
audioFrame._payloadDataLengthInSamples);
|
audioFrame.samples_per_channel_);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -155,13 +155,13 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
|||||||
channels = 2;
|
channels = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame._payloadData,
|
WebRtc_Word32 payload_size = (WebRtc_Word32) fread(audio_frame.data_,
|
||||||
sizeof(WebRtc_UWord16),
|
sizeof(WebRtc_UWord16),
|
||||||
samples_10ms_ * channels,
|
samples_10ms_ * channels,
|
||||||
pcm_file_);
|
pcm_file_);
|
||||||
if (payload_size < samples_10ms_ * channels) {
|
if (payload_size < samples_10ms_ * channels) {
|
||||||
for (int k = payload_size; k < samples_10ms_ * channels; k++) {
|
for (int k = payload_size; k < samples_10ms_ * channels; k++) {
|
||||||
audio_frame._payloadData[k] = 0;
|
audio_frame.data_[k] = 0;
|
||||||
}
|
}
|
||||||
if (auto_rewind_) {
|
if (auto_rewind_) {
|
||||||
rewind(pcm_file_);
|
rewind(pcm_file_);
|
||||||
@@ -170,34 +170,34 @@ WebRtc_Word32 PCMFile::Read10MsData(AudioFrame& audio_frame) {
|
|||||||
end_of_file_ = true;
|
end_of_file_ = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
audio_frame._payloadDataLengthInSamples = samples_10ms_;
|
audio_frame.samples_per_channel_ = samples_10ms_;
|
||||||
audio_frame._frequencyInHz = frequency_;
|
audio_frame.sample_rate_hz_ = frequency_;
|
||||||
audio_frame._audioChannel = channels;
|
audio_frame.num_channels_ = channels;
|
||||||
audio_frame._timeStamp = timestamp_;
|
audio_frame.timestamp_ = timestamp_;
|
||||||
timestamp_ += samples_10ms_;
|
timestamp_ += samples_10ms_;
|
||||||
return samples_10ms_;
|
return samples_10ms_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PCMFile::Write10MsData(AudioFrame& audio_frame) {
|
void PCMFile::Write10MsData(AudioFrame& audio_frame) {
|
||||||
if (audio_frame._audioChannel == 1) {
|
if (audio_frame.num_channels_ == 1) {
|
||||||
if (!save_stereo_) {
|
if (!save_stereo_) {
|
||||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_UWord16),
|
fwrite(audio_frame.data_, sizeof(WebRtc_UWord16),
|
||||||
audio_frame._payloadDataLengthInSamples, pcm_file_);
|
audio_frame.samples_per_channel_, pcm_file_);
|
||||||
} else {
|
} else {
|
||||||
WebRtc_Word16* stereo_audio =
|
WebRtc_Word16* stereo_audio =
|
||||||
new WebRtc_Word16[2 * audio_frame._payloadDataLengthInSamples];
|
new WebRtc_Word16[2 * audio_frame.samples_per_channel_];
|
||||||
int k;
|
int k;
|
||||||
for (k = 0; k < audio_frame._payloadDataLengthInSamples; k++) {
|
for (k = 0; k < audio_frame.samples_per_channel_; k++) {
|
||||||
stereo_audio[k << 1] = audio_frame._payloadData[k];
|
stereo_audio[k << 1] = audio_frame.data_[k];
|
||||||
stereo_audio[(k << 1) + 1] = audio_frame._payloadData[k];
|
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
|
||||||
}
|
}
|
||||||
fwrite(stereo_audio, sizeof(WebRtc_Word16),
|
fwrite(stereo_audio, sizeof(WebRtc_Word16),
|
||||||
2 * audio_frame._payloadDataLengthInSamples, pcm_file_);
|
2 * audio_frame.samples_per_channel_, pcm_file_);
|
||||||
delete[] stereo_audio;
|
delete[] stereo_audio;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fwrite(audio_frame._payloadData, sizeof(WebRtc_Word16),
|
fwrite(audio_frame.data_, sizeof(WebRtc_Word16),
|
||||||
audio_frame._audioChannel * audio_frame._payloadDataLengthInSamples,
|
audio_frame.num_channels_ * audio_frame.samples_per_channel_,
|
||||||
pcm_file_);
|
pcm_file_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -199,17 +199,17 @@ SpatialAudio::EncodeDecode(
|
|||||||
while(!_inFile.EndOfFile())
|
while(!_inFile.EndOfFile())
|
||||||
{
|
{
|
||||||
_inFile.Read10MsData(audioFrame);
|
_inFile.Read10MsData(audioFrame);
|
||||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||||
{
|
{
|
||||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||||
audioFrame._payloadData[n] * leftPanning + 0.5);
|
audioFrame.data_[n] * leftPanning + 0.5);
|
||||||
}
|
}
|
||||||
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
|
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
|
||||||
|
|
||||||
for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
|
for(int n = 0; n < audioFrame.samples_per_channel_; n++)
|
||||||
{
|
{
|
||||||
audioFrame._payloadData[n] = (WebRtc_Word16)floor(
|
audioFrame.data_[n] = (WebRtc_Word16)floor(
|
||||||
audioFrame._payloadData[n] * rightToLeftRatio + 0.5);
|
audioFrame.data_[n] * rightToLeftRatio + 0.5);
|
||||||
}
|
}
|
||||||
CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
|
CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
|
||||||
|
|
||||||
|
|||||||
@@ -831,7 +831,7 @@ void TestAllCodecs::Run(TestPack* channel)
|
|||||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||||
|
|
||||||
// Write output speech to file
|
// Write output speech to file
|
||||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorCount)
|
if (errorCount)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -575,7 +575,7 @@ void TestFEC::Run()
|
|||||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||||
CHECK_ERROR(_acmA->Process());
|
CHECK_ERROR(_acmA->Process());
|
||||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||||
msecPassed += 10;
|
msecPassed += 10;
|
||||||
if(msecPassed >= 1000)
|
if(msecPassed >= 1000)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -892,8 +892,8 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
|
|||||||
|
|
||||||
// Write output speech to file
|
// Write output speech to file
|
||||||
out_file_.Write10MsData(
|
out_file_.Write10MsData(
|
||||||
audio_frame._payloadData,
|
audio_frame.data_,
|
||||||
audio_frame._payloadDataLengthInSamples * audio_frame._audioChannel);
|
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (error_count) {
|
if (error_count) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -337,14 +337,14 @@ void TestVADDTX::Run()
|
|||||||
while(!_inFileA.EndOfFile())
|
while(!_inFileA.EndOfFile())
|
||||||
{
|
{
|
||||||
_inFileA.Read10MsData(audioFrame);
|
_inFileA.Read10MsData(audioFrame);
|
||||||
audioFrame._timeStamp = timestampA;
|
audioFrame.timestamp_ = timestampA;
|
||||||
timestampA += SamplesIn10MsecA;
|
timestampA += SamplesIn10MsecA;
|
||||||
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
CHECK_ERROR(_acmA->Add10MsData(audioFrame));
|
||||||
|
|
||||||
CHECK_ERROR(_acmA->Process());
|
CHECK_ERROR(_acmA->Process());
|
||||||
|
|
||||||
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
|
||||||
_outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
|
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
|
||||||
}
|
}
|
||||||
#ifdef PRINT_STAT
|
#ifdef PRINT_STAT
|
||||||
_monitor.PrintStatistics(_testMode);
|
_monitor.PrintStatistics(_testMode);
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ namespace {
|
|||||||
void SetParticipantStatistics(ParticipantStatistics* stats,
|
void SetParticipantStatistics(ParticipantStatistics* stats,
|
||||||
const AudioFrame& frame)
|
const AudioFrame& frame)
|
||||||
{
|
{
|
||||||
stats->participant = frame._id;
|
stats->participant = frame.id_;
|
||||||
stats->level = frame._volume;
|
stats->level = frame.volume_;
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@@ -290,7 +290,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
|||||||
{
|
{
|
||||||
// Use the same number of channels as the first frame to be mixed.
|
// Use the same number of channels as the first frame to be mixed.
|
||||||
numberOfChannels = static_cast<const AudioFrame*>(
|
numberOfChannels = static_cast<const AudioFrame*>(
|
||||||
firstItem->GetItem())->_audioChannel;
|
firstItem->GetItem())->num_channels_;
|
||||||
}
|
}
|
||||||
// TODO(henrike): it might be better to decide the number of channels
|
// TODO(henrike): it might be better to decide the number of channels
|
||||||
// with an API instead of dynamically.
|
// with an API instead of dynamically.
|
||||||
@@ -309,11 +309,11 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
|||||||
MixAnonomouslyFromList(*mixedAudio, additionalFramesList);
|
MixAnonomouslyFromList(*mixedAudio, additionalFramesList);
|
||||||
MixAnonomouslyFromList(*mixedAudio, rampOutList);
|
MixAnonomouslyFromList(*mixedAudio, rampOutList);
|
||||||
|
|
||||||
if(mixedAudio->_payloadDataLengthInSamples == 0)
|
if(mixedAudio->samples_per_channel_ == 0)
|
||||||
{
|
{
|
||||||
// Nothing was mixed, set the audio samples to silence.
|
// Nothing was mixed, set the audio samples to silence.
|
||||||
memset(mixedAudio->_payloadData, 0, _sampleSize);
|
memset(mixedAudio->data_, 0, _sampleSize);
|
||||||
mixedAudio->_payloadDataLengthInSamples = _sampleSize;
|
mixedAudio->samples_per_channel_ = _sampleSize;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -322,7 +322,7 @@ WebRtc_Word32 AudioConferenceMixerImpl::Process()
|
|||||||
retval = -1;
|
retval = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
_mixedAudioLevel.ComputeLevel(mixedAudio->_payloadData,_sampleSize);
|
_mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize);
|
||||||
audioLevel = _mixedAudioLevel.GetLevel();
|
audioLevel = _mixedAudioLevel.GetLevel();
|
||||||
|
|
||||||
if(_mixerStatusCb)
|
if(_mixerStatusCb)
|
||||||
@@ -719,7 +719,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
assert(false);
|
assert(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
audioFrame->_frequencyInHz = _outputFrequency;
|
audioFrame->sample_rate_hz_ = _outputFrequency;
|
||||||
|
|
||||||
if(participant->GetAudioFrame(_id,*audioFrame) != 0)
|
if(participant->GetAudioFrame(_id,*audioFrame) != 0)
|
||||||
{
|
{
|
||||||
@@ -732,14 +732,14 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
// TODO(henrike): this assert triggers in some test cases where SRTP is
|
// TODO(henrike): this assert triggers in some test cases where SRTP is
|
||||||
// used which prevents NetEQ from making a VAD. Temporarily disable this
|
// used which prevents NetEQ from making a VAD. Temporarily disable this
|
||||||
// assert until the problem is fixed on a higher level.
|
// assert until the problem is fixed on a higher level.
|
||||||
// assert(audioFrame->_vadActivity != AudioFrame::kVadUnknown);
|
// assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
|
||||||
if (audioFrame->_vadActivity == AudioFrame::kVadUnknown)
|
if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
||||||
"invalid VAD state from participant");
|
"invalid VAD state from participant");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(audioFrame->_vadActivity == AudioFrame::kVadActive)
|
if(audioFrame->vad_activity_ == AudioFrame::kVadActive)
|
||||||
{
|
{
|
||||||
if(!wasMixed)
|
if(!wasMixed)
|
||||||
{
|
{
|
||||||
@@ -752,7 +752,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
// mixed. Only keep the ones with the highest energy.
|
// mixed. Only keep the ones with the highest energy.
|
||||||
ListItem* replaceItem = NULL;
|
ListItem* replaceItem = NULL;
|
||||||
CalculateEnergy(*audioFrame);
|
CalculateEnergy(*audioFrame);
|
||||||
WebRtc_UWord32 lowestEnergy = audioFrame->_energy;
|
WebRtc_UWord32 lowestEnergy = audioFrame->energy_;
|
||||||
|
|
||||||
ListItem* activeItem = activeList.First();
|
ListItem* activeItem = activeList.First();
|
||||||
while(activeItem)
|
while(activeItem)
|
||||||
@@ -760,10 +760,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
AudioFrame* replaceFrame = static_cast<AudioFrame*>(
|
AudioFrame* replaceFrame = static_cast<AudioFrame*>(
|
||||||
activeItem->GetItem());
|
activeItem->GetItem());
|
||||||
CalculateEnergy(*replaceFrame);
|
CalculateEnergy(*replaceFrame);
|
||||||
if(replaceFrame->_energy < lowestEnergy)
|
if(replaceFrame->energy_ < lowestEnergy)
|
||||||
{
|
{
|
||||||
replaceItem = activeItem;
|
replaceItem = activeItem;
|
||||||
lowestEnergy = replaceFrame->_energy;
|
lowestEnergy = replaceFrame->energy_;
|
||||||
}
|
}
|
||||||
activeItem = activeList.Next(activeItem);
|
activeItem = activeList.Next(activeItem);
|
||||||
}
|
}
|
||||||
@@ -774,7 +774,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
|
|
||||||
bool replaceWasMixed = false;
|
bool replaceWasMixed = false;
|
||||||
MapItem* replaceParticipant = mixParticipantList.Find(
|
MapItem* replaceParticipant = mixParticipantList.Find(
|
||||||
replaceFrame->_id);
|
replaceFrame->id_);
|
||||||
// When a frame is pushed to |activeList| it is also pushed
|
// When a frame is pushed to |activeList| it is also pushed
|
||||||
// to mixParticipantList with the frame's id. This means
|
// to mixParticipantList with the frame's id. This means
|
||||||
// that the Find call above should never fail.
|
// that the Find call above should never fail.
|
||||||
@@ -786,12 +786,12 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
replaceParticipant->GetItem())->_mixHistory->
|
replaceParticipant->GetItem())->_mixHistory->
|
||||||
WasMixed(replaceWasMixed);
|
WasMixed(replaceWasMixed);
|
||||||
|
|
||||||
mixParticipantList.Erase(replaceFrame->_id);
|
mixParticipantList.Erase(replaceFrame->id_);
|
||||||
activeList.Erase(replaceItem);
|
activeList.Erase(replaceItem);
|
||||||
|
|
||||||
activeList.PushFront(static_cast<void*>(audioFrame));
|
activeList.PushFront(static_cast<void*>(audioFrame));
|
||||||
mixParticipantList.Insert(
|
mixParticipantList.Insert(
|
||||||
audioFrame->_id,
|
audioFrame->id_,
|
||||||
static_cast<void*>(participant));
|
static_cast<void*>(participant));
|
||||||
assert(mixParticipantList.Size() <=
|
assert(mixParticipantList.Size() <=
|
||||||
kMaximumAmountOfMixedParticipants);
|
kMaximumAmountOfMixedParticipants);
|
||||||
@@ -820,7 +820,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
activeList.PushFront(static_cast<void*>(audioFrame));
|
activeList.PushFront(static_cast<void*>(audioFrame));
|
||||||
mixParticipantList.Insert(audioFrame->_id,
|
mixParticipantList.Insert(audioFrame->id_,
|
||||||
static_cast<void*>(participant));
|
static_cast<void*>(participant));
|
||||||
assert(mixParticipantList.Size() <=
|
assert(mixParticipantList.Size() <=
|
||||||
kMaximumAmountOfMixedParticipants);
|
kMaximumAmountOfMixedParticipants);
|
||||||
@@ -864,7 +864,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
||||||
{
|
{
|
||||||
mixList.PushBack(pair->audioFrame);
|
mixList.PushBack(pair->audioFrame);
|
||||||
mixParticipantList.Insert(pair->audioFrame->_id,
|
mixParticipantList.Insert(pair->audioFrame->id_,
|
||||||
static_cast<void*>(pair->participant));
|
static_cast<void*>(pair->participant));
|
||||||
assert(mixParticipantList.Size() <=
|
assert(mixParticipantList.Size() <=
|
||||||
kMaximumAmountOfMixedParticipants);
|
kMaximumAmountOfMixedParticipants);
|
||||||
@@ -885,7 +885,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
|
|||||||
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
if(mixList.GetSize() < maxAudioFrameCounter + mixListStartSize)
|
||||||
{
|
{
|
||||||
mixList.PushBack(pair->audioFrame);
|
mixList.PushBack(pair->audioFrame);
|
||||||
mixParticipantList.Insert(pair->audioFrame->_id,
|
mixParticipantList.Insert(pair->audioFrame->id_,
|
||||||
static_cast<void*>(pair->participant));
|
static_cast<void*>(pair->participant));
|
||||||
assert(mixParticipantList.Size() <=
|
assert(mixParticipantList.Size() <=
|
||||||
kMaximumAmountOfMixedParticipants);
|
kMaximumAmountOfMixedParticipants);
|
||||||
@@ -923,7 +923,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
|
|||||||
assert(false);
|
assert(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
audioFrame->_frequencyInHz = _outputFrequency;
|
audioFrame->sample_rate_hz_ = _outputFrequency;
|
||||||
if(participant->GetAudioFrame(_id, *audioFrame) != 0)
|
if(participant->GetAudioFrame(_id, *audioFrame) != 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
||||||
@@ -932,7 +932,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
|
|||||||
item = nextItem;
|
item = nextItem;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if(audioFrame->_payloadDataLengthInSamples == 0)
|
if(audioFrame->samples_per_channel_ == 0)
|
||||||
{
|
{
|
||||||
// Empty frame. Don't use it.
|
// Empty frame. Don't use it.
|
||||||
_audioFramePool->PushMemory(audioFrame);
|
_audioFramePool->PushMemory(audioFrame);
|
||||||
@@ -1000,14 +1000,14 @@ void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
|
|||||||
{
|
{
|
||||||
AudioFrame* audioFrame = static_cast<AudioFrame*>(item->GetItem());
|
AudioFrame* audioFrame = static_cast<AudioFrame*>(item->GetItem());
|
||||||
CalculateEnergy(*audioFrame);
|
CalculateEnergy(*audioFrame);
|
||||||
if(audioFrame->_vadActivity == AudioFrame::kVadActive)
|
if(audioFrame->vad_activity_ == AudioFrame::kVadActive)
|
||||||
{
|
{
|
||||||
_scratchVadPositiveParticipants[
|
_scratchVadPositiveParticipants[
|
||||||
_scratchVadPositiveParticipantsAmount].participant =
|
_scratchVadPositiveParticipantsAmount].participant =
|
||||||
audioFrame->_id;
|
audioFrame->id_;
|
||||||
_scratchVadPositiveParticipants[
|
_scratchVadPositiveParticipants[
|
||||||
_scratchVadPositiveParticipantsAmount].level =
|
_scratchVadPositiveParticipantsAmount].level =
|
||||||
audioFrame->_volume;
|
audioFrame->volume_;
|
||||||
_scratchVadPositiveParticipantsAmount++;
|
_scratchVadPositiveParticipantsAmount++;
|
||||||
}
|
}
|
||||||
item = mixList.Next(item);
|
item = mixList.Next(item);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -41,41 +41,41 @@ const int rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
|
|||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
void CalculateEnergy(AudioFrame& audioFrame)
|
void CalculateEnergy(AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
if(audioFrame._energy != 0xffffffff)
|
if(audioFrame.energy_ != 0xffffffff)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
audioFrame._energy = 0;
|
audioFrame.energy_ = 0;
|
||||||
for(int position = 0; position < audioFrame._payloadDataLengthInSamples;
|
for(int position = 0; position < audioFrame.samples_per_channel_;
|
||||||
position++)
|
position++)
|
||||||
{
|
{
|
||||||
// TODO(andrew): this can easily overflow.
|
// TODO(andrew): this can easily overflow.
|
||||||
audioFrame._energy += audioFrame._payloadData[position] *
|
audioFrame.energy_ += audioFrame.data_[position] *
|
||||||
audioFrame._payloadData[position];
|
audioFrame.data_[position];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RampIn(AudioFrame& audioFrame)
|
void RampIn(AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
assert(rampSize <= audioFrame._payloadDataLengthInSamples);
|
assert(rampSize <= audioFrame.samples_per_channel_);
|
||||||
for(int i = 0; i < rampSize; i++)
|
for(int i = 0; i < rampSize; i++)
|
||||||
{
|
{
|
||||||
audioFrame._payloadData[i] = static_cast<WebRtc_Word16>
|
audioFrame.data_[i] = static_cast<WebRtc_Word16>
|
||||||
(rampArray[i] * audioFrame._payloadData[i]);
|
(rampArray[i] * audioFrame.data_[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RampOut(AudioFrame& audioFrame)
|
void RampOut(AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
assert(rampSize <= audioFrame._payloadDataLengthInSamples);
|
assert(rampSize <= audioFrame.samples_per_channel_);
|
||||||
for(int i = 0; i < rampSize; i++)
|
for(int i = 0; i < rampSize; i++)
|
||||||
{
|
{
|
||||||
const int rampPos = rampSize - 1 - i;
|
const int rampPos = rampSize - 1 - i;
|
||||||
audioFrame._payloadData[i] = static_cast<WebRtc_Word16>
|
audioFrame.data_[i] = static_cast<WebRtc_Word16>
|
||||||
(rampArray[rampPos] * audioFrame._payloadData[i]);
|
(rampArray[rampPos] * audioFrame.data_[i]);
|
||||||
}
|
}
|
||||||
memset(&audioFrame._payloadData[rampSize], 0,
|
memset(&audioFrame.data_[rampSize], 0,
|
||||||
(audioFrame._payloadDataLengthInSamples - rampSize) *
|
(audioFrame.samples_per_channel_ - rampSize) *
|
||||||
sizeof(audioFrame._payloadData[0]));
|
sizeof(audioFrame.data_[0]));
|
||||||
}
|
}
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -210,9 +210,9 @@ bool
|
|||||||
FileWriter::WriteToFile(
|
FileWriter::WriteToFile(
|
||||||
const AudioFrame& audioFrame)
|
const AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
WebRtc_Word32 written = (WebRtc_Word32)fwrite(audioFrame._payloadData,sizeof(WebRtc_Word16),audioFrame._payloadDataLengthInSamples,_file);
|
WebRtc_Word32 written = (WebRtc_Word32)fwrite(audioFrame.data_,sizeof(WebRtc_Word16),audioFrame.samples_per_channel_,_file);
|
||||||
// Do not flush buffers since that will add (a lot of) delay
|
// Do not flush buffers since that will add (a lot of) delay
|
||||||
return written == audioFrame._payloadDataLengthInSamples;
|
return written == audioFrame.samples_per_channel_;
|
||||||
}
|
}
|
||||||
|
|
||||||
FileReader::FileReader()
|
FileReader::FileReader()
|
||||||
@@ -269,8 +269,8 @@ FileReader::ReadFromFile(
|
|||||||
AudioFrame& audioFrame)
|
AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
|
|
||||||
WebRtc_Word16 buffer[AudioFrame::kMaxAudioFrameSizeSamples];
|
WebRtc_Word16 buffer[AudioFrame::kMaxDataSizeSamples];
|
||||||
LoopedFileRead(buffer,AudioFrame::kMaxAudioFrameSizeSamples,_sampleSize,_file);
|
LoopedFileRead(buffer,AudioFrame::kMaxDataSizeSamples,_sampleSize,_file);
|
||||||
|
|
||||||
bool vad = false;
|
bool vad = false;
|
||||||
GetVAD(buffer,_sampleSize,vad);
|
GetVAD(buffer,_sampleSize,vad);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -188,27 +188,27 @@ int AudioBuffer::samples_per_split_channel() const {
|
|||||||
|
|
||||||
// TODO(andrew): Do deinterleaving and mixing in one step?
|
// TODO(andrew): Do deinterleaving and mixing in one step?
|
||||||
void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
||||||
assert(frame->_audioChannel <= max_num_channels_);
|
assert(frame->num_channels_ <= max_num_channels_);
|
||||||
assert(frame->_payloadDataLengthInSamples == samples_per_channel_);
|
assert(frame->samples_per_channel_ == samples_per_channel_);
|
||||||
|
|
||||||
num_channels_ = frame->_audioChannel;
|
num_channels_ = frame->num_channels_;
|
||||||
data_was_mixed_ = false;
|
data_was_mixed_ = false;
|
||||||
num_mixed_channels_ = 0;
|
num_mixed_channels_ = 0;
|
||||||
num_mixed_low_pass_channels_ = 0;
|
num_mixed_low_pass_channels_ = 0;
|
||||||
reference_copied_ = false;
|
reference_copied_ = false;
|
||||||
activity_ = frame->_vadActivity;
|
activity_ = frame->vad_activity_;
|
||||||
is_muted_ = false;
|
is_muted_ = false;
|
||||||
if (frame->_energy == 0) {
|
if (frame->energy_ == 0) {
|
||||||
is_muted_ = true;
|
is_muted_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_channels_ == 1) {
|
if (num_channels_ == 1) {
|
||||||
// We can get away with a pointer assignment in this case.
|
// We can get away with a pointer assignment in this case.
|
||||||
data_ = frame->_payloadData;
|
data_ = frame->data_;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t* interleaved = frame->_payloadData;
|
int16_t* interleaved = frame->data_;
|
||||||
for (int i = 0; i < num_channels_; i++) {
|
for (int i = 0; i < num_channels_; i++) {
|
||||||
int16_t* deinterleaved = channels_[i].data;
|
int16_t* deinterleaved = channels_[i].data;
|
||||||
int interleaved_idx = i;
|
int interleaved_idx = i;
|
||||||
@@ -220,9 +220,9 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
||||||
assert(frame->_audioChannel == num_channels_);
|
assert(frame->num_channels_ == num_channels_);
|
||||||
assert(frame->_payloadDataLengthInSamples == samples_per_channel_);
|
assert(frame->samples_per_channel_ == samples_per_channel_);
|
||||||
frame->_vadActivity = activity_;
|
frame->vad_activity_ = activity_;
|
||||||
|
|
||||||
if (!data_changed) {
|
if (!data_changed) {
|
||||||
return;
|
return;
|
||||||
@@ -230,18 +230,18 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
|||||||
|
|
||||||
if (num_channels_ == 1) {
|
if (num_channels_ == 1) {
|
||||||
if (data_was_mixed_) {
|
if (data_was_mixed_) {
|
||||||
memcpy(frame->_payloadData,
|
memcpy(frame->data_,
|
||||||
channels_[0].data,
|
channels_[0].data,
|
||||||
sizeof(int16_t) * samples_per_channel_);
|
sizeof(int16_t) * samples_per_channel_);
|
||||||
} else {
|
} else {
|
||||||
// These should point to the same buffer in this case.
|
// These should point to the same buffer in this case.
|
||||||
assert(data_ == frame->_payloadData);
|
assert(data_ == frame->data_);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t* interleaved = frame->_payloadData;
|
int16_t* interleaved = frame->data_;
|
||||||
for (int i = 0; i < num_channels_; i++) {
|
for (int i = 0; i < num_channels_; i++) {
|
||||||
int16_t* deinterleaved = channels_[i].data;
|
int16_t* deinterleaved = channels_[i].data;
|
||||||
int interleaved_idx = i;
|
int interleaved_idx = i;
|
||||||
|
|||||||
@@ -258,15 +258,15 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
return kNullPointerError;
|
return kNullPointerError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_frequencyInHz != sample_rate_hz_) {
|
if (frame->sample_rate_hz_ != sample_rate_hz_) {
|
||||||
return kBadSampleRateError;
|
return kBadSampleRateError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_audioChannel != num_input_channels_) {
|
if (frame->num_channels_ != num_input_channels_) {
|
||||||
return kBadNumberChannelsError;
|
return kBadNumberChannelsError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
|
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||||
return kBadDataLengthError;
|
return kBadDataLengthError;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,9 +275,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
event_msg_->set_type(audioproc::Event::STREAM);
|
event_msg_->set_type(audioproc::Event::STREAM);
|
||||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||||
const size_t data_size = sizeof(int16_t) *
|
const size_t data_size = sizeof(int16_t) *
|
||||||
frame->_payloadDataLengthInSamples *
|
frame->samples_per_channel_ *
|
||||||
frame->_audioChannel;
|
frame->num_channels_;
|
||||||
msg->set_input_data(frame->_payloadData, data_size);
|
msg->set_input_data(frame->data_, data_size);
|
||||||
msg->set_delay(stream_delay_ms_);
|
msg->set_delay(stream_delay_ms_);
|
||||||
msg->set_drift(echo_cancellation_->stream_drift_samples());
|
msg->set_drift(echo_cancellation_->stream_drift_samples());
|
||||||
msg->set_level(gain_control_->stream_analog_level());
|
msg->set_level(gain_control_->stream_analog_level());
|
||||||
@@ -289,7 +289,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
// TODO(ajm): experiment with mixing and AEC placement.
|
// TODO(ajm): experiment with mixing and AEC placement.
|
||||||
if (num_output_channels_ < num_input_channels_) {
|
if (num_output_channels_ < num_input_channels_) {
|
||||||
capture_audio_->Mix(num_output_channels_);
|
capture_audio_->Mix(num_output_channels_);
|
||||||
frame->_audioChannel = num_output_channels_;
|
frame->num_channels_ = num_output_channels_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool data_processed = is_data_processed();
|
bool data_processed = is_data_processed();
|
||||||
@@ -367,9 +367,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
|||||||
if (debug_file_->Open()) {
|
if (debug_file_->Open()) {
|
||||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||||
const size_t data_size = sizeof(int16_t) *
|
const size_t data_size = sizeof(int16_t) *
|
||||||
frame->_payloadDataLengthInSamples *
|
frame->samples_per_channel_ *
|
||||||
frame->_audioChannel;
|
frame->num_channels_;
|
||||||
msg->set_output_data(frame->_payloadData, data_size);
|
msg->set_output_data(frame->data_, data_size);
|
||||||
err = WriteMessageToDebugFile();
|
err = WriteMessageToDebugFile();
|
||||||
if (err != kNoError) {
|
if (err != kNoError) {
|
||||||
return err;
|
return err;
|
||||||
@@ -389,15 +389,15 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
|||||||
return kNullPointerError;
|
return kNullPointerError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_frequencyInHz != sample_rate_hz_) {
|
if (frame->sample_rate_hz_ != sample_rate_hz_) {
|
||||||
return kBadSampleRateError;
|
return kBadSampleRateError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_audioChannel != num_reverse_channels_) {
|
if (frame->num_channels_ != num_reverse_channels_) {
|
||||||
return kBadNumberChannelsError;
|
return kBadNumberChannelsError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
|
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||||
return kBadDataLengthError;
|
return kBadDataLengthError;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -406,9 +406,9 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
|||||||
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
|
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
|
||||||
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
||||||
const size_t data_size = sizeof(int16_t) *
|
const size_t data_size = sizeof(int16_t) *
|
||||||
frame->_payloadDataLengthInSamples *
|
frame->samples_per_channel_ *
|
||||||
frame->_audioChannel;
|
frame->num_channels_;
|
||||||
msg->set_data(frame->_payloadData, data_size);
|
msg->set_data(frame->data_, data_size);
|
||||||
err = WriteMessageToDebugFile();
|
err = WriteMessageToDebugFile();
|
||||||
if (err != kNoError) {
|
if (err != kNoError) {
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ class AudioProcessing : public Module {
|
|||||||
// must be called prior to processing the current frame. Any getter function
|
// must be called prior to processing the current frame. Any getter function
|
||||||
// with the stream_ tag which is needed should be called after processing.
|
// with the stream_ tag which is needed should be called after processing.
|
||||||
//
|
//
|
||||||
// The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
|
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
|
||||||
// members of |frame| must be valid, and correspond to settings supplied
|
// members of |frame| must be valid, and correspond to settings supplied
|
||||||
// to APM.
|
// to APM.
|
||||||
virtual int ProcessStream(AudioFrame* frame) = 0;
|
virtual int ProcessStream(AudioFrame* frame) = 0;
|
||||||
@@ -165,7 +165,7 @@ class AudioProcessing : public Module {
|
|||||||
// typically will not be used. If you're not sure what to pass in here,
|
// typically will not be used. If you're not sure what to pass in here,
|
||||||
// chances are you don't need to use it.
|
// chances are you don't need to use it.
|
||||||
//
|
//
|
||||||
// The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
|
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
|
||||||
// members of |frame| must be valid.
|
// members of |frame| must be valid.
|
||||||
//
|
//
|
||||||
// TODO(ajm): add const to input; requires an implementation fix.
|
// TODO(ajm): add const to input; requires an implementation fix.
|
||||||
@@ -554,7 +554,7 @@ class NoiseSuppression {
|
|||||||
// external VAD decision.
|
// external VAD decision.
|
||||||
//
|
//
|
||||||
// In addition to |stream_has_voice()| the VAD decision is provided through the
|
// In addition to |stream_has_voice()| the VAD decision is provided through the
|
||||||
// |AudioFrame| passed to |ProcessStream()|. The |_vadActivity| member will be
|
// |AudioFrame| passed to |ProcessStream()|. The |vad_activity_| member will be
|
||||||
// modified to reflect the current decision.
|
// modified to reflect the current decision.
|
||||||
class VoiceDetection {
|
class VoiceDetection {
|
||||||
public:
|
public:
|
||||||
|
|||||||
@@ -546,11 +546,11 @@ void void_main(int argc, char* argv[]) {
|
|||||||
apm->set_num_reverse_channels(msg.num_reverse_channels()));
|
apm->set_num_reverse_channels(msg.num_reverse_channels()));
|
||||||
|
|
||||||
samples_per_channel = msg.sample_rate() / 100;
|
samples_per_channel = msg.sample_rate() / 100;
|
||||||
far_frame._frequencyInHz = msg.sample_rate();
|
far_frame.sample_rate_hz_ = msg.sample_rate();
|
||||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
far_frame.samples_per_channel_ = samples_per_channel;
|
||||||
far_frame._audioChannel = msg.num_reverse_channels();
|
far_frame.num_channels_ = msg.num_reverse_channels();
|
||||||
near_frame._frequencyInHz = msg.sample_rate();
|
near_frame.sample_rate_hz_ = msg.sample_rate();
|
||||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
near_frame.samples_per_channel_ = samples_per_channel;
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
printf("Init at frame: %d (primary), %d (reverse)\n",
|
printf("Init at frame: %d (primary), %d (reverse)\n",
|
||||||
@@ -569,8 +569,8 @@ void void_main(int argc, char* argv[]) {
|
|||||||
|
|
||||||
ASSERT_TRUE(msg.has_data());
|
ASSERT_TRUE(msg.has_data());
|
||||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||||
far_frame._audioChannel, msg.data().size());
|
far_frame.num_channels_, msg.data().size());
|
||||||
memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
|
memcpy(far_frame.data_, msg.data().data(), msg.data().size());
|
||||||
|
|
||||||
if (perf_testing) {
|
if (perf_testing) {
|
||||||
t0 = TickTime::Now();
|
t0 = TickTime::Now();
|
||||||
@@ -597,12 +597,12 @@ void void_main(int argc, char* argv[]) {
|
|||||||
primary_count++;
|
primary_count++;
|
||||||
|
|
||||||
// ProcessStream could have changed this for the output frame.
|
// ProcessStream could have changed this for the output frame.
|
||||||
near_frame._audioChannel = apm->num_input_channels();
|
near_frame.num_channels_ = apm->num_input_channels();
|
||||||
|
|
||||||
ASSERT_TRUE(msg.has_input_data());
|
ASSERT_TRUE(msg.has_input_data());
|
||||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||||
near_frame._audioChannel, msg.input_data().size());
|
near_frame.num_channels_, msg.input_data().size());
|
||||||
memcpy(near_frame._payloadData,
|
memcpy(near_frame.data_,
|
||||||
msg.input_data().data(),
|
msg.input_data().data(),
|
||||||
msg.input_data().size());
|
msg.input_data().size());
|
||||||
|
|
||||||
@@ -630,7 +630,7 @@ void void_main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
ASSERT_TRUE(err == apm->kNoError ||
|
ASSERT_TRUE(err == apm->kNoError ||
|
||||||
err == apm->kBadStreamParameterWarning);
|
err == apm->kBadStreamParameterWarning);
|
||||||
ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
|
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||||
|
|
||||||
capture_level = apm->gain_control()->stream_analog_level();
|
capture_level = apm->gain_control()->stream_analog_level();
|
||||||
|
|
||||||
@@ -659,8 +659,8 @@ void void_main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t size = samples_per_channel * near_frame._audioChannel;
|
size_t size = samples_per_channel * near_frame.num_channels_;
|
||||||
ASSERT_EQ(size, fwrite(near_frame._payloadData,
|
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
size,
|
size,
|
||||||
out_file));
|
out_file));
|
||||||
@@ -700,11 +700,11 @@ void void_main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
far_frame._frequencyInHz = sample_rate_hz;
|
far_frame.sample_rate_hz_ = sample_rate_hz;
|
||||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
far_frame.samples_per_channel_ = samples_per_channel;
|
||||||
far_frame._audioChannel = num_render_channels;
|
far_frame.num_channels_ = num_render_channels;
|
||||||
near_frame._frequencyInHz = sample_rate_hz;
|
near_frame.sample_rate_hz_ = sample_rate_hz;
|
||||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
near_frame.samples_per_channel_ = samples_per_channel;
|
||||||
|
|
||||||
if (event == kInitializeEvent || event == kResetEventDeprecated) {
|
if (event == kInitializeEvent || event == kResetEventDeprecated) {
|
||||||
ASSERT_EQ(1u,
|
ASSERT_EQ(1u,
|
||||||
@@ -724,11 +724,11 @@ void void_main(int argc, char* argv[]) {
|
|||||||
apm->echo_cancellation()->set_device_sample_rate_hz(
|
apm->echo_cancellation()->set_device_sample_rate_hz(
|
||||||
device_sample_rate_hz));
|
device_sample_rate_hz));
|
||||||
|
|
||||||
far_frame._frequencyInHz = sample_rate_hz;
|
far_frame.sample_rate_hz_ = sample_rate_hz;
|
||||||
far_frame._payloadDataLengthInSamples = samples_per_channel;
|
far_frame.samples_per_channel_ = samples_per_channel;
|
||||||
far_frame._audioChannel = num_render_channels;
|
far_frame.num_channels_ = num_render_channels;
|
||||||
near_frame._frequencyInHz = sample_rate_hz;
|
near_frame.sample_rate_hz_ = sample_rate_hz;
|
||||||
near_frame._payloadDataLengthInSamples = samples_per_channel;
|
near_frame.samples_per_channel_ = samples_per_channel;
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
printf("Init at frame: %d (primary), %d (reverse)\n",
|
printf("Init at frame: %d (primary), %d (reverse)\n",
|
||||||
@@ -740,7 +740,7 @@ void void_main(int argc, char* argv[]) {
|
|||||||
reverse_count++;
|
reverse_count++;
|
||||||
|
|
||||||
size_t size = samples_per_channel * num_render_channels;
|
size_t size = samples_per_channel * num_render_channels;
|
||||||
read_count = fread(far_frame._payloadData,
|
read_count = fread(far_frame.data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
size,
|
size,
|
||||||
far_file);
|
far_file);
|
||||||
@@ -778,10 +778,10 @@ void void_main(int argc, char* argv[]) {
|
|||||||
|
|
||||||
} else if (event == kCaptureEvent) {
|
} else if (event == kCaptureEvent) {
|
||||||
primary_count++;
|
primary_count++;
|
||||||
near_frame._audioChannel = num_capture_input_channels;
|
near_frame.num_channels_ = num_capture_input_channels;
|
||||||
|
|
||||||
size_t size = samples_per_channel * num_capture_input_channels;
|
size_t size = samples_per_channel * num_capture_input_channels;
|
||||||
read_count = fread(near_frame._payloadData,
|
read_count = fread(near_frame.data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
size,
|
size,
|
||||||
near_file);
|
near_file);
|
||||||
@@ -829,7 +829,7 @@ void void_main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
ASSERT_TRUE(err == apm->kNoError ||
|
ASSERT_TRUE(err == apm->kNoError ||
|
||||||
err == apm->kBadStreamParameterWarning);
|
err == apm->kBadStreamParameterWarning);
|
||||||
ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
|
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||||
|
|
||||||
capture_level = apm->gain_control()->stream_analog_level();
|
capture_level = apm->gain_control()->stream_analog_level();
|
||||||
|
|
||||||
@@ -858,8 +858,8 @@ void void_main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size = samples_per_channel * near_frame._audioChannel;
|
size = samples_per_channel * near_frame.num_channels_;
|
||||||
ASSERT_EQ(size, fwrite(near_frame._payloadData,
|
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
size,
|
size,
|
||||||
out_file));
|
out_file));
|
||||||
|
|||||||
@@ -193,12 +193,12 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
|
|||||||
|
|
||||||
// We always use 10 ms frames.
|
// We always use 10 ms frames.
|
||||||
const int samples_per_channel = sample_rate_hz / 100;
|
const int samples_per_channel = sample_rate_hz / 100;
|
||||||
frame_->_payloadDataLengthInSamples = samples_per_channel;
|
frame_->samples_per_channel_ = samples_per_channel;
|
||||||
frame_->_audioChannel = num_input_channels;
|
frame_->num_channels_ = num_input_channels;
|
||||||
frame_->_frequencyInHz = sample_rate_hz;
|
frame_->sample_rate_hz_ = sample_rate_hz;
|
||||||
revframe_->_payloadDataLengthInSamples = samples_per_channel;
|
revframe_->samples_per_channel_ = samples_per_channel;
|
||||||
revframe_->_audioChannel = num_reverse_channels;
|
revframe_->num_channels_ = num_reverse_channels;
|
||||||
revframe_->_frequencyInHz = sample_rate_hz;
|
revframe_->sample_rate_hz_ = sample_rate_hz;
|
||||||
|
|
||||||
if (far_file_) {
|
if (far_file_) {
|
||||||
ASSERT_EQ(0, fclose(far_file_));
|
ASSERT_EQ(0, fclose(far_file_));
|
||||||
@@ -249,41 +249,41 @@ T AbsValue(T a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SetFrameTo(AudioFrame* frame, int16_t value) {
|
void SetFrameTo(AudioFrame* frame, int16_t value) {
|
||||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * frame->_audioChannel;
|
for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
|
||||||
++i) {
|
++i) {
|
||||||
frame->_payloadData[i] = value;
|
frame->data_[i] = value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
|
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
|
||||||
ASSERT_EQ(2, frame->_audioChannel);
|
ASSERT_EQ(2, frame->num_channels_);
|
||||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||||
frame->_payloadData[i] = left;
|
frame->data_[i] = left;
|
||||||
frame->_payloadData[i + 1] = right;
|
frame->data_[i + 1] = right;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t MaxAudioFrame(const AudioFrame& frame) {
|
int16_t MaxAudioFrame(const AudioFrame& frame) {
|
||||||
const int length = frame._payloadDataLengthInSamples * frame._audioChannel;
|
const int length = frame.samples_per_channel_ * frame.num_channels_;
|
||||||
int16_t max = AbsValue(frame._payloadData[0]);
|
int16_t max = AbsValue(frame.data_[0]);
|
||||||
for (int i = 1; i < length; i++) {
|
for (int i = 1; i < length; i++) {
|
||||||
max = MaxValue(max, AbsValue(frame._payloadData[i]));
|
max = MaxValue(max, AbsValue(frame.data_[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
return max;
|
return max;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||||
if (frame1._payloadDataLengthInSamples !=
|
if (frame1.samples_per_channel_ !=
|
||||||
frame2._payloadDataLengthInSamples) {
|
frame2.samples_per_channel_) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (frame1._audioChannel !=
|
if (frame1.num_channels_ !=
|
||||||
frame2._audioChannel) {
|
frame2.num_channels_) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (memcmp(frame1._payloadData, frame2._payloadData,
|
if (memcmp(frame1.data_, frame2.data_,
|
||||||
frame1._payloadDataLengthInSamples * frame1._audioChannel *
|
frame1.samples_per_channel_ * frame1.num_channels_ *
|
||||||
sizeof(int16_t))) {
|
sizeof(int16_t))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -360,12 +360,12 @@ bool DeadlockProc(void* thread_object) {
|
|||||||
|
|
||||||
AudioFrame primary_frame;
|
AudioFrame primary_frame;
|
||||||
AudioFrame reverse_frame;
|
AudioFrame reverse_frame;
|
||||||
primary_frame._payloadDataLengthInSamples = 320;
|
primary_frame.samples_per_channel_ = 320;
|
||||||
primary_frame._audioChannel = 2;
|
primary_frame.num_channels_ = 2;
|
||||||
primary_frame._frequencyInHz = 32000;
|
primary_frame.sample_rate_hz_ = 32000;
|
||||||
reverse_frame._payloadDataLengthInSamples = 320;
|
reverse_frame.samples_per_channel_ = 320;
|
||||||
reverse_frame._audioChannel = 2;
|
reverse_frame.num_channels_ = 2;
|
||||||
reverse_frame._frequencyInHz = 32000;
|
reverse_frame.sample_rate_hz_ = 32000;
|
||||||
|
|
||||||
ap->echo_cancellation()->Enable(true);
|
ap->echo_cancellation()->Enable(true);
|
||||||
ap->gain_control()->Enable(true);
|
ap->gain_control()->Enable(true);
|
||||||
@@ -849,9 +849,9 @@ TEST_F(ApmTest, LevelEstimator) {
|
|||||||
// Run this test in wideband; in super-wb, the splitting filter distorts the
|
// Run this test in wideband; in super-wb, the splitting filter distorts the
|
||||||
// audio enough to cause deviation from the expectation for small values.
|
// audio enough to cause deviation from the expectation for small values.
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
||||||
frame_->_payloadDataLengthInSamples = 160;
|
frame_->samples_per_channel_ = 160;
|
||||||
frame_->_audioChannel = 2;
|
frame_->num_channels_ = 2;
|
||||||
frame_->_frequencyInHz = 16000;
|
frame_->sample_rate_hz_ = 16000;
|
||||||
|
|
||||||
// Min value if no frames have been processed.
|
// Min value if no frames have been processed.
|
||||||
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
||||||
@@ -884,14 +884,14 @@ TEST_F(ApmTest, LevelEstimator) {
|
|||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_EQ(70, apm_->level_estimator()->RMS());
|
EXPECT_EQ(70, apm_->level_estimator()->RMS());
|
||||||
|
|
||||||
// Min value if _energy == 0.
|
// Min value if energy_ == 0.
|
||||||
SetFrameTo(frame_, 10000);
|
SetFrameTo(frame_, 10000);
|
||||||
uint32_t energy = frame_->_energy; // Save default to restore below.
|
uint32_t energy = frame_->energy_; // Save default to restore below.
|
||||||
frame_->_energy = 0;
|
frame_->energy_ = 0;
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
||||||
frame_->_energy = energy;
|
frame_->energy_ = energy;
|
||||||
|
|
||||||
// Verify reset after enable/disable.
|
// Verify reset after enable/disable.
|
||||||
SetFrameTo(frame_, 32767);
|
SetFrameTo(frame_, 32767);
|
||||||
@@ -960,16 +960,16 @@ TEST_F(ApmTest, VoiceDetection) {
|
|||||||
AudioFrame::kVadUnknown
|
AudioFrame::kVadUnknown
|
||||||
};
|
};
|
||||||
for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
|
for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
|
||||||
frame_->_vadActivity = activity[i];
|
frame_->vad_activity_ = activity[i];
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_EQ(activity[i], frame_->_vadActivity);
|
EXPECT_EQ(activity[i], frame_->vad_activity_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that AudioFrame activity is set when VAD is enabled.
|
// Test that AudioFrame activity is set when VAD is enabled.
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
|
EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
|
||||||
frame_->_vadActivity = AudioFrame::kVadUnknown;
|
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_NE(AudioFrame::kVadUnknown, frame_->_vadActivity);
|
EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
|
||||||
|
|
||||||
// TODO(bjornv): Add tests for streamed voice; stream_has_voice()
|
// TODO(bjornv): Add tests for streamed voice; stream_has_voice()
|
||||||
}
|
}
|
||||||
@@ -979,9 +979,8 @@ TEST_F(ApmTest, VerifyDownMixing) {
|
|||||||
Init(kSampleRates[i], 2, 2, 1, false);
|
Init(kSampleRates[i], 2, 2, 1, false);
|
||||||
SetFrameTo(frame_, 1000, 2000);
|
SetFrameTo(frame_, 1000, 2000);
|
||||||
AudioFrame mono_frame;
|
AudioFrame mono_frame;
|
||||||
mono_frame._payloadDataLengthInSamples =
|
mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
|
||||||
frame_->_payloadDataLengthInSamples;
|
mono_frame.num_channels_ = 1;
|
||||||
mono_frame._audioChannel = 1;
|
|
||||||
SetFrameTo(&mono_frame, 1500);
|
SetFrameTo(&mono_frame, 1500);
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
|
EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
|
||||||
@@ -1050,9 +1049,9 @@ TEST_F(ApmTest, SplittingFilter) {
|
|||||||
|
|
||||||
// 5. Not using super-wb.
|
// 5. Not using super-wb.
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
|
||||||
frame_->_payloadDataLengthInSamples = 160;
|
frame_->samples_per_channel_ = 160;
|
||||||
frame_->_audioChannel = 2;
|
frame_->num_channels_ = 2;
|
||||||
frame_->_frequencyInHz = 16000;
|
frame_->sample_rate_hz_ = 16000;
|
||||||
// Enable AEC, which would require the filter in super-wb. We rely on the
|
// Enable AEC, which would require the filter in super-wb. We rely on the
|
||||||
// first few frames of data being unaffected by the AEC.
|
// first few frames of data being unaffected by the AEC.
|
||||||
// TODO(andrew): This test, and the one below, rely rather tenuously on the
|
// TODO(andrew): This test, and the one below, rely rather tenuously on the
|
||||||
@@ -1073,9 +1072,9 @@ TEST_F(ApmTest, SplittingFilter) {
|
|||||||
// Check the test is valid. We should have distortion from the filter
|
// Check the test is valid. We should have distortion from the filter
|
||||||
// when AEC is enabled (which won't affect the audio).
|
// when AEC is enabled (which won't affect the audio).
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
|
EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
|
||||||
frame_->_payloadDataLengthInSamples = 320;
|
frame_->samples_per_channel_ = 320;
|
||||||
frame_->_audioChannel = 2;
|
frame_->num_channels_ = 2;
|
||||||
frame_->_frequencyInHz = 32000;
|
frame_->sample_rate_hz_ = 32000;
|
||||||
SetFrameTo(frame_, 1000);
|
SetFrameTo(frame_, 1000);
|
||||||
frame_copy = *frame_;
|
frame_copy = *frame_;
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||||
@@ -1208,7 +1207,7 @@ TEST_F(ApmTest, Process) {
|
|||||||
while (1) {
|
while (1) {
|
||||||
// Read far-end frame, always stereo.
|
// Read far-end frame, always stereo.
|
||||||
size_t frame_size = samples_per_channel * 2;
|
size_t frame_size = samples_per_channel * 2;
|
||||||
size_t read_count = fread(revframe_->_payloadData,
|
size_t read_count = fread(revframe_->data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
frame_size,
|
frame_size,
|
||||||
far_file_);
|
far_file_);
|
||||||
@@ -1218,8 +1217,8 @@ TEST_F(ApmTest, Process) {
|
|||||||
break; // This is expected.
|
break; // This is expected.
|
||||||
}
|
}
|
||||||
|
|
||||||
if (revframe_->_audioChannel == 1) {
|
if (revframe_->num_channels_ == 1) {
|
||||||
MixStereoToMono(revframe_->_payloadData, revframe_->_payloadData,
|
MixStereoToMono(revframe_->data_, revframe_->data_,
|
||||||
samples_per_channel);
|
samples_per_channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1232,7 +1231,7 @@ TEST_F(ApmTest, Process) {
|
|||||||
apm_->gain_control()->set_stream_analog_level(analog_level));
|
apm_->gain_control()->set_stream_analog_level(analog_level));
|
||||||
|
|
||||||
// Read near-end frame, always stereo.
|
// Read near-end frame, always stereo.
|
||||||
read_count = fread(frame_->_payloadData,
|
read_count = fread(frame_->data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
frame_size,
|
frame_size,
|
||||||
near_file_);
|
near_file_);
|
||||||
@@ -1242,15 +1241,15 @@ TEST_F(ApmTest, Process) {
|
|||||||
break; // This is expected.
|
break; // This is expected.
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame_->_audioChannel == 1) {
|
if (frame_->num_channels_ == 1) {
|
||||||
MixStereoToMono(frame_->_payloadData, frame_->_payloadData,
|
MixStereoToMono(frame_->data_, frame_->data_,
|
||||||
samples_per_channel);
|
samples_per_channel);
|
||||||
}
|
}
|
||||||
frame_->_vadActivity = AudioFrame::kVadUnknown;
|
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||||
|
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
// Ensure the frame was downmixed properly.
|
// Ensure the frame was downmixed properly.
|
||||||
EXPECT_EQ(test->num_output_channels(), frame_->_audioChannel);
|
EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
|
||||||
|
|
||||||
max_output_average += MaxAudioFrame(*frame_);
|
max_output_average += MaxAudioFrame(*frame_);
|
||||||
|
|
||||||
@@ -1265,20 +1264,20 @@ TEST_F(ApmTest, Process) {
|
|||||||
}
|
}
|
||||||
if (apm_->voice_detection()->stream_has_voice()) {
|
if (apm_->voice_detection()->stream_has_voice()) {
|
||||||
has_voice_count++;
|
has_voice_count++;
|
||||||
EXPECT_EQ(AudioFrame::kVadActive, frame_->_vadActivity);
|
EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
|
||||||
} else {
|
} else {
|
||||||
EXPECT_EQ(AudioFrame::kVadPassive, frame_->_vadActivity);
|
EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
|
||||||
}
|
}
|
||||||
|
|
||||||
frame_size = samples_per_channel * frame_->_audioChannel;
|
frame_size = samples_per_channel * frame_->num_channels_;
|
||||||
size_t write_count = fwrite(frame_->_payloadData,
|
size_t write_count = fwrite(frame_->data_,
|
||||||
sizeof(int16_t),
|
sizeof(int16_t),
|
||||||
frame_size,
|
frame_size,
|
||||||
out_file_);
|
out_file_);
|
||||||
ASSERT_EQ(frame_size, write_count);
|
ASSERT_EQ(frame_size, write_count);
|
||||||
|
|
||||||
// Reset in case of downmixing.
|
// Reset in case of downmixing.
|
||||||
frame_->_audioChannel = test->num_input_channels();
|
frame_->num_channels_ = test->num_input_channels();
|
||||||
frame_count++;
|
frame_count++;
|
||||||
}
|
}
|
||||||
max_output_average /= frame_count;
|
max_output_average /= frame_count;
|
||||||
|
|||||||
@@ -707,13 +707,13 @@ VideoFrame::Free()
|
|||||||
* exact opposite frames when deciding the resulting
|
* exact opposite frames when deciding the resulting
|
||||||
* state. To do this use the -operator.
|
* state. To do this use the -operator.
|
||||||
*
|
*
|
||||||
* - _audioChannel of 1 indicated mono, and 2
|
* - num_channels_ of 1 indicated mono, and 2
|
||||||
* indicates stereo.
|
* indicates stereo.
|
||||||
*
|
*
|
||||||
* - _payloadDataLengthInSamples is the number of
|
* - samples_per_channel_ is the number of
|
||||||
* samples per channel. Therefore, the total
|
* samples per channel. Therefore, the total
|
||||||
* number of samples in _payloadData is
|
* number of samples in data_ is
|
||||||
* (_payloadDataLengthInSamples * _audioChannel).
|
* (samples_per_channel_ * num_channels_).
|
||||||
*
|
*
|
||||||
* - Stereo data is stored in interleaved fashion
|
* - Stereo data is stored in interleaved fashion
|
||||||
* starting with the left channel.
|
* starting with the left channel.
|
||||||
@@ -722,7 +722,7 @@ VideoFrame::Free()
|
|||||||
class AudioFrame
|
class AudioFrame
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum{kMaxAudioFrameSizeSamples = 3840}; // stereo 32KHz 60ms 2*32*60
|
enum { kMaxDataSizeSamples = 3840 }; // stereo 32KHz 60ms 2*32*60
|
||||||
|
|
||||||
enum VADActivity
|
enum VADActivity
|
||||||
{
|
{
|
||||||
@@ -763,34 +763,33 @@ public:
|
|||||||
AudioFrame& operator+=(const AudioFrame& rhs);
|
AudioFrame& operator+=(const AudioFrame& rhs);
|
||||||
AudioFrame& operator-=(const AudioFrame& rhs);
|
AudioFrame& operator-=(const AudioFrame& rhs);
|
||||||
|
|
||||||
WebRtc_Word32 _id;
|
// TODO(andrew): clean up types.
|
||||||
WebRtc_UWord32 _timeStamp;
|
WebRtc_Word32 id_;
|
||||||
|
WebRtc_UWord32 timestamp_;
|
||||||
|
|
||||||
// Supporting Stereo, stereo samples are interleaved
|
WebRtc_Word16 data_[kMaxDataSizeSamples];
|
||||||
WebRtc_Word16 _payloadData[kMaxAudioFrameSizeSamples];
|
WebRtc_UWord16 samples_per_channel_;
|
||||||
WebRtc_UWord16 _payloadDataLengthInSamples;
|
int sample_rate_hz_;
|
||||||
int _frequencyInHz;
|
WebRtc_UWord8 num_channels_;
|
||||||
WebRtc_UWord8 _audioChannel;
|
SpeechType speech_type_;
|
||||||
SpeechType _speechType;
|
VADActivity vad_activity_;
|
||||||
VADActivity _vadActivity;
|
WebRtc_UWord32 energy_;
|
||||||
|
WebRtc_Word32 volume_; // TODO(andrew): investigate removing.
|
||||||
WebRtc_UWord32 _energy;
|
|
||||||
WebRtc_Word32 _volume;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline
|
inline
|
||||||
AudioFrame::AudioFrame()
|
AudioFrame::AudioFrame()
|
||||||
:
|
:
|
||||||
_id(-1),
|
id_(-1),
|
||||||
_timeStamp(0),
|
timestamp_(0),
|
||||||
_payloadData(),
|
data_(),
|
||||||
_payloadDataLengthInSamples(0),
|
samples_per_channel_(0),
|
||||||
_frequencyInHz(0),
|
sample_rate_hz_(0),
|
||||||
_audioChannel(1),
|
num_channels_(1),
|
||||||
_speechType(kUndefined),
|
speech_type_(kUndefined),
|
||||||
_vadActivity(kVadUnknown),
|
vad_activity_(kVadUnknown),
|
||||||
_energy(0xffffffff),
|
energy_(0xffffffff),
|
||||||
_volume(0xffffffff)
|
volume_(0xffffffff)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -813,31 +812,31 @@ AudioFrame::UpdateFrame(
|
|||||||
const WebRtc_Word32 volume,
|
const WebRtc_Word32 volume,
|
||||||
const WebRtc_Word32 energy)
|
const WebRtc_Word32 energy)
|
||||||
{
|
{
|
||||||
_id = id;
|
id_ = id;
|
||||||
_timeStamp = timeStamp;
|
timestamp_ = timeStamp;
|
||||||
_frequencyInHz = frequencyInHz;
|
sample_rate_hz_ = frequencyInHz;
|
||||||
_speechType = speechType;
|
speech_type_ = speechType;
|
||||||
_vadActivity = vadActivity;
|
vad_activity_ = vadActivity;
|
||||||
_volume = volume;
|
volume_ = volume;
|
||||||
_audioChannel = audioChannel;
|
num_channels_ = audioChannel;
|
||||||
_energy = energy;
|
energy_ = energy;
|
||||||
|
|
||||||
if((payloadDataLengthInSamples > kMaxAudioFrameSizeSamples) ||
|
if((payloadDataLengthInSamples > kMaxDataSizeSamples) ||
|
||||||
(audioChannel > 2) || (audioChannel < 1))
|
(audioChannel > 2) || (audioChannel < 1))
|
||||||
{
|
{
|
||||||
_payloadDataLengthInSamples = 0;
|
samples_per_channel_ = 0;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
_payloadDataLengthInSamples = payloadDataLengthInSamples;
|
samples_per_channel_ = payloadDataLengthInSamples;
|
||||||
if(payloadData != NULL)
|
if(payloadData != NULL)
|
||||||
{
|
{
|
||||||
memcpy(_payloadData, payloadData, sizeof(WebRtc_Word16) *
|
memcpy(data_, payloadData, sizeof(WebRtc_Word16) *
|
||||||
payloadDataLengthInSamples * _audioChannel);
|
payloadDataLengthInSamples * num_channels_);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memset(_payloadData,0,sizeof(WebRtc_Word16) *
|
memset(data_,0,sizeof(WebRtc_Word16) *
|
||||||
payloadDataLengthInSamples * _audioChannel);
|
payloadDataLengthInSamples * num_channels_);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -846,7 +845,7 @@ inline
|
|||||||
void
|
void
|
||||||
AudioFrame::Mute()
|
AudioFrame::Mute()
|
||||||
{
|
{
|
||||||
memset(_payloadData, 0, _payloadDataLengthInSamples * sizeof(WebRtc_Word16));
|
memset(data_, 0, samples_per_channel_ * sizeof(WebRtc_Word16));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
@@ -854,9 +853,9 @@ AudioFrame&
|
|||||||
AudioFrame::operator=(const AudioFrame& rhs)
|
AudioFrame::operator=(const AudioFrame& rhs)
|
||||||
{
|
{
|
||||||
// Sanity Check
|
// Sanity Check
|
||||||
if((rhs._payloadDataLengthInSamples > kMaxAudioFrameSizeSamples) ||
|
if((rhs.samples_per_channel_ > kMaxDataSizeSamples) ||
|
||||||
(rhs._audioChannel > 2) ||
|
(rhs.num_channels_ > 2) ||
|
||||||
(rhs._audioChannel < 1))
|
(rhs.num_channels_ < 1))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@@ -864,18 +863,18 @@ AudioFrame::operator=(const AudioFrame& rhs)
|
|||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
_id = rhs._id;
|
id_ = rhs.id_;
|
||||||
_timeStamp = rhs._timeStamp;
|
timestamp_ = rhs.timestamp_;
|
||||||
_frequencyInHz = rhs._frequencyInHz;
|
sample_rate_hz_ = rhs.sample_rate_hz_;
|
||||||
_speechType = rhs._speechType;
|
speech_type_ = rhs.speech_type_;
|
||||||
_vadActivity = rhs._vadActivity;
|
vad_activity_ = rhs.vad_activity_;
|
||||||
_volume = rhs._volume;
|
volume_ = rhs.volume_;
|
||||||
_audioChannel = rhs._audioChannel;
|
num_channels_ = rhs.num_channels_;
|
||||||
_energy = rhs._energy;
|
energy_ = rhs.energy_;
|
||||||
|
|
||||||
_payloadDataLengthInSamples = rhs._payloadDataLengthInSamples;
|
samples_per_channel_ = rhs.samples_per_channel_;
|
||||||
memcpy(_payloadData, rhs._payloadData,
|
memcpy(data_, rhs.data_,
|
||||||
sizeof(WebRtc_Word16) * rhs._payloadDataLengthInSamples * _audioChannel);
|
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
||||||
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@@ -884,15 +883,15 @@ inline
|
|||||||
AudioFrame&
|
AudioFrame&
|
||||||
AudioFrame::operator>>=(const WebRtc_Word32 rhs)
|
AudioFrame::operator>>=(const WebRtc_Word32 rhs)
|
||||||
{
|
{
|
||||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||||
if((_audioChannel > 2) ||
|
if((num_channels_ > 2) ||
|
||||||
(_audioChannel < 1))
|
(num_channels_ < 1))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
_payloadData[i] = WebRtc_Word16(_payloadData[i] >> rhs);
|
data_[i] = WebRtc_Word16(data_[i] >> rhs);
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@@ -902,39 +901,39 @@ AudioFrame&
|
|||||||
AudioFrame::Append(const AudioFrame& rhs)
|
AudioFrame::Append(const AudioFrame& rhs)
|
||||||
{
|
{
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||||
if((_audioChannel > 2) ||
|
if((num_channels_ > 2) ||
|
||||||
(_audioChannel < 1))
|
(num_channels_ < 1))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
if(_audioChannel != rhs._audioChannel)
|
if(num_channels_ != rhs.num_channels_)
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
if((_vadActivity == kVadActive) ||
|
if((vad_activity_ == kVadActive) ||
|
||||||
rhs._vadActivity == kVadActive)
|
rhs.vad_activity_ == kVadActive)
|
||||||
{
|
{
|
||||||
_vadActivity = kVadActive;
|
vad_activity_ = kVadActive;
|
||||||
}
|
}
|
||||||
else if((_vadActivity == kVadUnknown) ||
|
else if((vad_activity_ == kVadUnknown) ||
|
||||||
rhs._vadActivity == kVadUnknown)
|
rhs.vad_activity_ == kVadUnknown)
|
||||||
{
|
{
|
||||||
_vadActivity = kVadUnknown;
|
vad_activity_ = kVadUnknown;
|
||||||
}
|
}
|
||||||
if(_speechType != rhs._speechType)
|
if(speech_type_ != rhs.speech_type_)
|
||||||
{
|
{
|
||||||
_speechType = kUndefined;
|
speech_type_ = kUndefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_UWord16 offset = _payloadDataLengthInSamples * _audioChannel;
|
WebRtc_UWord16 offset = samples_per_channel_ * num_channels_;
|
||||||
for(WebRtc_UWord16 i = 0;
|
for(WebRtc_UWord16 i = 0;
|
||||||
i < rhs._payloadDataLengthInSamples * rhs._audioChannel;
|
i < rhs.samples_per_channel_ * rhs.num_channels_;
|
||||||
i++)
|
i++)
|
||||||
{
|
{
|
||||||
_payloadData[offset+i] = rhs._payloadData[i];
|
data_[offset+i] = rhs.data_[i];
|
||||||
}
|
}
|
||||||
_payloadDataLengthInSamples += rhs._payloadDataLengthInSamples;
|
samples_per_channel_ += rhs.samples_per_channel_;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -944,23 +943,23 @@ AudioFrame&
|
|||||||
AudioFrame::operator+=(const AudioFrame& rhs)
|
AudioFrame::operator+=(const AudioFrame& rhs)
|
||||||
{
|
{
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||||
if((_audioChannel > 2) ||
|
if((num_channels_ > 2) ||
|
||||||
(_audioChannel < 1))
|
(num_channels_ < 1))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
if(_audioChannel != rhs._audioChannel)
|
if(num_channels_ != rhs.num_channels_)
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
bool noPrevData = false;
|
bool noPrevData = false;
|
||||||
if(_payloadDataLengthInSamples != rhs._payloadDataLengthInSamples)
|
if(samples_per_channel_ != rhs.samples_per_channel_)
|
||||||
{
|
{
|
||||||
if(_payloadDataLengthInSamples == 0)
|
if(samples_per_channel_ == 0)
|
||||||
{
|
{
|
||||||
// special case we have no data to start with
|
// special case we have no data to start with
|
||||||
_payloadDataLengthInSamples = rhs._payloadDataLengthInSamples;
|
samples_per_channel_ = rhs.samples_per_channel_;
|
||||||
noPrevData = true;
|
noPrevData = true;
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
@@ -968,47 +967,47 @@ AudioFrame::operator+=(const AudioFrame& rhs)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if((_vadActivity == kVadActive) ||
|
if((vad_activity_ == kVadActive) ||
|
||||||
rhs._vadActivity == kVadActive)
|
rhs.vad_activity_ == kVadActive)
|
||||||
{
|
{
|
||||||
_vadActivity = kVadActive;
|
vad_activity_ = kVadActive;
|
||||||
}
|
}
|
||||||
else if((_vadActivity == kVadUnknown) ||
|
else if((vad_activity_ == kVadUnknown) ||
|
||||||
rhs._vadActivity == kVadUnknown)
|
rhs.vad_activity_ == kVadUnknown)
|
||||||
{
|
{
|
||||||
_vadActivity = kVadUnknown;
|
vad_activity_ = kVadUnknown;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(_speechType != rhs._speechType)
|
if(speech_type_ != rhs.speech_type_)
|
||||||
{
|
{
|
||||||
_speechType = kUndefined;
|
speech_type_ = kUndefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(noPrevData)
|
if(noPrevData)
|
||||||
{
|
{
|
||||||
memcpy(_payloadData, rhs._payloadData,
|
memcpy(data_, rhs.data_,
|
||||||
sizeof(WebRtc_Word16) * rhs._payloadDataLengthInSamples * _audioChannel);
|
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
// IMPROVEMENT this can be done very fast in assembly
|
// IMPROVEMENT this can be done very fast in assembly
|
||||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)_payloadData[i] +
|
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] +
|
||||||
(WebRtc_Word32)rhs._payloadData[i];
|
(WebRtc_Word32)rhs.data_[i];
|
||||||
if(wrapGuard < -32768)
|
if(wrapGuard < -32768)
|
||||||
{
|
{
|
||||||
_payloadData[i] = -32768;
|
data_[i] = -32768;
|
||||||
}else if(wrapGuard > 32767)
|
}else if(wrapGuard > 32767)
|
||||||
{
|
{
|
||||||
_payloadData[i] = 32767;
|
data_[i] = 32767;
|
||||||
}else
|
}else
|
||||||
{
|
{
|
||||||
_payloadData[i] = (WebRtc_Word16)wrapGuard;
|
data_[i] = (WebRtc_Word16)wrapGuard;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_energy = 0xffffffff;
|
energy_ = 0xffffffff;
|
||||||
_volume = 0xffffffff;
|
volume_ = 0xffffffff;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1017,43 +1016,43 @@ AudioFrame&
|
|||||||
AudioFrame::operator-=(const AudioFrame& rhs)
|
AudioFrame::operator-=(const AudioFrame& rhs)
|
||||||
{
|
{
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert((_audioChannel > 0) && (_audioChannel < 3));
|
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||||
if((_audioChannel > 2)||
|
if((num_channels_ > 2)||
|
||||||
(_audioChannel < 1))
|
(num_channels_ < 1))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
if((_payloadDataLengthInSamples != rhs._payloadDataLengthInSamples) ||
|
if((samples_per_channel_ != rhs.samples_per_channel_) ||
|
||||||
(_audioChannel != rhs._audioChannel))
|
(num_channels_ != rhs.num_channels_))
|
||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
if((_vadActivity != kVadPassive) ||
|
if((vad_activity_ != kVadPassive) ||
|
||||||
rhs._vadActivity != kVadPassive)
|
rhs.vad_activity_ != kVadPassive)
|
||||||
{
|
{
|
||||||
_vadActivity = kVadUnknown;
|
vad_activity_ = kVadUnknown;
|
||||||
}
|
}
|
||||||
_speechType = kUndefined;
|
speech_type_ = kUndefined;
|
||||||
|
|
||||||
for(WebRtc_UWord16 i = 0; i < _payloadDataLengthInSamples * _audioChannel; i++)
|
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)_payloadData[i] -
|
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] -
|
||||||
(WebRtc_Word32)rhs._payloadData[i];
|
(WebRtc_Word32)rhs.data_[i];
|
||||||
if(wrapGuard < -32768)
|
if(wrapGuard < -32768)
|
||||||
{
|
{
|
||||||
_payloadData[i] = -32768;
|
data_[i] = -32768;
|
||||||
}
|
}
|
||||||
else if(wrapGuard > 32767)
|
else if(wrapGuard > 32767)
|
||||||
{
|
{
|
||||||
_payloadData[i] = 32767;
|
data_[i] = 32767;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_payloadData[i] = (WebRtc_Word16)wrapGuard;
|
data_[i] = (WebRtc_Word16)wrapGuard;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_energy = 0xffffffff;
|
energy_ = 0xffffffff;
|
||||||
_volume = 0xffffffff;
|
volume_ = 0xffffffff;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -94,8 +94,8 @@ WebRtc_Word32 AudioCoder::Encode(const AudioFrame& audio,
|
|||||||
// Fake a timestamp in case audio doesn't contain a correct timestamp.
|
// Fake a timestamp in case audio doesn't contain a correct timestamp.
|
||||||
// Make a local copy of the audio frame since audio is const
|
// Make a local copy of the audio frame since audio is const
|
||||||
AudioFrame audioFrame = audio;
|
AudioFrame audioFrame = audio;
|
||||||
audioFrame._timeStamp = _encodeTimestamp;
|
audioFrame.timestamp_ = _encodeTimestamp;
|
||||||
_encodeTimestamp += audioFrame._payloadDataLengthInSamples;
|
_encodeTimestamp += audioFrame.samples_per_channel_;
|
||||||
|
|
||||||
// For any codec with a frame size that is longer than 10 ms the encoded
|
// For any codec with a frame size that is longer than 10 ms the encoded
|
||||||
// length in bytes should be zero until a a full frame has been encoded.
|
// length in bytes should be zero until a a full frame has been encoded.
|
||||||
|
|||||||
@@ -133,13 +133,13 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
|||||||
AudioFrame unresampledAudioFrame;
|
AudioFrame unresampledAudioFrame;
|
||||||
if(STR_CASE_CMP(_codec.plname, "L16") == 0)
|
if(STR_CASE_CMP(_codec.plname, "L16") == 0)
|
||||||
{
|
{
|
||||||
unresampledAudioFrame._frequencyInHz = _codec.plfreq;
|
unresampledAudioFrame.sample_rate_hz_ = _codec.plfreq;
|
||||||
|
|
||||||
// L16 is un-encoded data. Just pull 10 ms.
|
// L16 is un-encoded data. Just pull 10 ms.
|
||||||
WebRtc_UWord32 lengthInBytes =
|
WebRtc_UWord32 lengthInBytes =
|
||||||
sizeof(unresampledAudioFrame._payloadData);
|
sizeof(unresampledAudioFrame.data_);
|
||||||
if (_fileModule.PlayoutAudioData(
|
if (_fileModule.PlayoutAudioData(
|
||||||
(WebRtc_Word8*)unresampledAudioFrame._payloadData,
|
(WebRtc_Word8*)unresampledAudioFrame.data_,
|
||||||
lengthInBytes) == -1)
|
lengthInBytes) == -1)
|
||||||
{
|
{
|
||||||
// End of file reached.
|
// End of file reached.
|
||||||
@@ -151,7 +151,7 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// One sample is two bytes.
|
// One sample is two bytes.
|
||||||
unresampledAudioFrame._payloadDataLengthInSamples =
|
unresampledAudioFrame.samples_per_channel_ =
|
||||||
(WebRtc_UWord16)lengthInBytes >> 1;
|
(WebRtc_UWord16)lengthInBytes >> 1;
|
||||||
|
|
||||||
}else {
|
}else {
|
||||||
@@ -181,7 +181,7 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
int outLen = 0;
|
int outLen = 0;
|
||||||
if(_resampler.ResetIfNeeded(unresampledAudioFrame._frequencyInHz,
|
if(_resampler.ResetIfNeeded(unresampledAudioFrame.sample_rate_hz_,
|
||||||
frequencyInHz, kResamplerSynchronous))
|
frequencyInHz, kResamplerSynchronous))
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, _instanceID,
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, _instanceID,
|
||||||
@@ -192,8 +192,8 @@ WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
|||||||
memset(outBuffer, 0, outLen * sizeof(WebRtc_Word16));
|
memset(outBuffer, 0, outLen * sizeof(WebRtc_Word16));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
_resampler.Push(unresampledAudioFrame._payloadData,
|
_resampler.Push(unresampledAudioFrame.data_,
|
||||||
unresampledAudioFrame._payloadDataLengthInSamples,
|
unresampledAudioFrame.samples_per_channel_,
|
||||||
outBuffer,
|
outBuffer,
|
||||||
MAX_AUDIO_BUFFER_IN_SAMPLES,
|
MAX_AUDIO_BUFFER_IN_SAMPLES,
|
||||||
outLen);
|
outLen);
|
||||||
|
|||||||
@@ -201,46 +201,46 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
AudioFrame tempAudioFrame;
|
AudioFrame tempAudioFrame;
|
||||||
tempAudioFrame._payloadDataLengthInSamples = 0;
|
tempAudioFrame.samples_per_channel_ = 0;
|
||||||
if( incomingAudioFrame._audioChannel == 2 &&
|
if( incomingAudioFrame.num_channels_ == 2 &&
|
||||||
!_moduleFile->IsStereo())
|
!_moduleFile->IsStereo())
|
||||||
{
|
{
|
||||||
// Recording mono but incoming audio is (interleaved) stereo.
|
// Recording mono but incoming audio is (interleaved) stereo.
|
||||||
tempAudioFrame._audioChannel = 1;
|
tempAudioFrame.num_channels_ = 1;
|
||||||
tempAudioFrame._frequencyInHz = incomingAudioFrame._frequencyInHz;
|
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
|
||||||
tempAudioFrame._payloadDataLengthInSamples =
|
tempAudioFrame.samples_per_channel_ =
|
||||||
incomingAudioFrame._payloadDataLengthInSamples;
|
incomingAudioFrame.samples_per_channel_;
|
||||||
for (WebRtc_UWord16 i = 0;
|
for (WebRtc_UWord16 i = 0;
|
||||||
i < (incomingAudioFrame._payloadDataLengthInSamples); i++)
|
i < (incomingAudioFrame.samples_per_channel_); i++)
|
||||||
{
|
{
|
||||||
// Sample value is the average of left and right buffer rounded to
|
// Sample value is the average of left and right buffer rounded to
|
||||||
// closest integer value. Note samples can be either 1 or 2 byte.
|
// closest integer value. Note samples can be either 1 or 2 byte.
|
||||||
tempAudioFrame._payloadData[i] =
|
tempAudioFrame.data_[i] =
|
||||||
((incomingAudioFrame._payloadData[2 * i] +
|
((incomingAudioFrame.data_[2 * i] +
|
||||||
incomingAudioFrame._payloadData[(2 * i) + 1] + 1) >> 1);
|
incomingAudioFrame.data_[(2 * i) + 1] + 1) >> 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if( incomingAudioFrame._audioChannel == 1 &&
|
else if( incomingAudioFrame.num_channels_ == 1 &&
|
||||||
_moduleFile->IsStereo())
|
_moduleFile->IsStereo())
|
||||||
{
|
{
|
||||||
// Recording stereo but incoming audio is mono.
|
// Recording stereo but incoming audio is mono.
|
||||||
tempAudioFrame._audioChannel = 2;
|
tempAudioFrame.num_channels_ = 2;
|
||||||
tempAudioFrame._frequencyInHz = incomingAudioFrame._frequencyInHz;
|
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
|
||||||
tempAudioFrame._payloadDataLengthInSamples =
|
tempAudioFrame.samples_per_channel_ =
|
||||||
incomingAudioFrame._payloadDataLengthInSamples;
|
incomingAudioFrame.samples_per_channel_;
|
||||||
for (WebRtc_UWord16 i = 0;
|
for (WebRtc_UWord16 i = 0;
|
||||||
i < (incomingAudioFrame._payloadDataLengthInSamples); i++)
|
i < (incomingAudioFrame.samples_per_channel_); i++)
|
||||||
{
|
{
|
||||||
// Duplicate sample to both channels
|
// Duplicate sample to both channels
|
||||||
tempAudioFrame._payloadData[2*i] =
|
tempAudioFrame.data_[2*i] =
|
||||||
incomingAudioFrame._payloadData[i];
|
incomingAudioFrame.data_[i];
|
||||||
tempAudioFrame._payloadData[2*i+1] =
|
tempAudioFrame.data_[2*i+1] =
|
||||||
incomingAudioFrame._payloadData[i];
|
incomingAudioFrame.data_[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const AudioFrame* ptrAudioFrame = &incomingAudioFrame;
|
const AudioFrame* ptrAudioFrame = &incomingAudioFrame;
|
||||||
if(tempAudioFrame._payloadDataLengthInSamples != 0)
|
if(tempAudioFrame.samples_per_channel_ != 0)
|
||||||
{
|
{
|
||||||
// If ptrAudioFrame is not empty it contains the audio to be recorded.
|
// If ptrAudioFrame is not empty it contains the audio to be recorded.
|
||||||
ptrAudioFrame = &tempAudioFrame;
|
ptrAudioFrame = &tempAudioFrame;
|
||||||
@@ -269,23 +269,23 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int outLen = 0;
|
int outLen = 0;
|
||||||
if(ptrAudioFrame->_audioChannel == 2)
|
if(ptrAudioFrame->num_channels_ == 2)
|
||||||
{
|
{
|
||||||
// ptrAudioFrame contains interleaved stereo audio.
|
// ptrAudioFrame contains interleaved stereo audio.
|
||||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->_frequencyInHz,
|
_audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
|
||||||
codec_info_.plfreq,
|
codec_info_.plfreq,
|
||||||
kResamplerSynchronousStereo);
|
kResamplerSynchronousStereo);
|
||||||
_audioResampler.Push(ptrAudioFrame->_payloadData,
|
_audioResampler.Push(ptrAudioFrame->data_,
|
||||||
ptrAudioFrame->_payloadDataLengthInSamples *
|
ptrAudioFrame->samples_per_channel_ *
|
||||||
ptrAudioFrame->_audioChannel,
|
ptrAudioFrame->num_channels_,
|
||||||
(WebRtc_Word16*)_audioBuffer,
|
(WebRtc_Word16*)_audioBuffer,
|
||||||
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
||||||
} else {
|
} else {
|
||||||
_audioResampler.ResetIfNeeded(ptrAudioFrame->_frequencyInHz,
|
_audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
|
||||||
codec_info_.plfreq,
|
codec_info_.plfreq,
|
||||||
kResamplerSynchronous);
|
kResamplerSynchronous);
|
||||||
_audioResampler.Push(ptrAudioFrame->_payloadData,
|
_audioResampler.Push(ptrAudioFrame->data_,
|
||||||
ptrAudioFrame->_payloadDataLengthInSamples,
|
ptrAudioFrame->samples_per_channel_,
|
||||||
(WebRtc_Word16*)_audioBuffer,
|
(WebRtc_Word16*)_audioBuffer,
|
||||||
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
MAX_AUDIO_BUFFER_IN_BYTES, outLen);
|
||||||
}
|
}
|
||||||
@@ -298,8 +298,8 @@ WebRtc_Word32 FileRecorderImpl::RecordAudioToFile(
|
|||||||
if (encodedLenInBytes)
|
if (encodedLenInBytes)
|
||||||
{
|
{
|
||||||
WebRtc_UWord16 msOfData =
|
WebRtc_UWord16 msOfData =
|
||||||
ptrAudioFrame->_payloadDataLengthInSamples /
|
ptrAudioFrame->samples_per_channel_ /
|
||||||
WebRtc_UWord16(ptrAudioFrame->_frequencyInHz / 1000);
|
WebRtc_UWord16(ptrAudioFrame->sample_rate_hz_ / 1000);
|
||||||
if (WriteEncodedAudioData(_audioBuffer,
|
if (WriteEncodedAudioData(_audioBuffer,
|
||||||
(WebRtc_UWord16)encodedLenInBytes,
|
(WebRtc_UWord16)encodedLenInBytes,
|
||||||
msOfData, playoutTS) == -1)
|
msOfData, playoutTS) == -1)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -151,13 +151,13 @@ int main(int /*argc*/, char** /*argv*/)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
WebRtc_UWord32 decodedDataLengthInSamples;
|
WebRtc_UWord32 decodedDataLengthInSamples;
|
||||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame._payloadData, decodedDataLengthInSamples, audioCodec.plfreq))
|
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame.data_, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||||
{
|
{
|
||||||
audioNotDone = false;
|
audioNotDone = false;
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
audioFrame._frequencyInHz = filePlayer.Frequency();
|
audioFrame.sample_rate_hz_ = filePlayer.Frequency();
|
||||||
audioFrame._payloadDataLengthInSamples = (WebRtc_UWord16)decodedDataLengthInSamples;
|
audioFrame.samples_per_channel_ = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||||
fileRecorder.RecordAudioToFile(audioFrame, &TickTime::Now());
|
fileRecorder.RecordAudioToFile(audioFrame, &TickTime::Now());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,9 +214,9 @@ int main(int /*argc*/, char** /*argv*/)
|
|||||||
|
|
||||||
// 10 ms
|
// 10 ms
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
audioFrame._payloadDataLengthInSamples = audioCodec.plfreq/100;
|
audioFrame.samples_per_channel_ = audioCodec.plfreq/100;
|
||||||
memset(audioFrame._payloadData, 0, 2*audioFrame._payloadDataLengthInSamples);
|
memset(audioFrame.data_, 0, 2*audioFrame.samples_per_channel_);
|
||||||
audioFrame._frequencyInHz = 8000;
|
audioFrame.sample_rate_hz_ = 8000;
|
||||||
|
|
||||||
// prepare the video frame
|
// prepare the video frame
|
||||||
videoFrame.VerifyAndAllocate(KVideoWriteSize);
|
videoFrame.VerifyAndAllocate(KVideoWriteSize);
|
||||||
@@ -338,15 +338,15 @@ int main(int /*argc*/, char** /*argv*/)
|
|||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_UWord32 decodedDataLengthInSamples;
|
WebRtc_UWord32 decodedDataLengthInSamples;
|
||||||
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame._payloadData, decodedDataLengthInSamples, audioCodec.plfreq))
|
if( 0 != filePlayer.Get10msAudioFromFile( audioFrame.data_, decodedDataLengthInSamples, audioCodec.plfreq))
|
||||||
{
|
{
|
||||||
audioNotDone = false;
|
audioNotDone = false;
|
||||||
|
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
::Sleep(5);
|
::Sleep(5);
|
||||||
audioFrame._frequencyInHz = filePlayer.Frequency();
|
audioFrame.sample_rate_hz_ = filePlayer.Frequency();
|
||||||
audioFrame._payloadDataLengthInSamples = (WebRtc_UWord16)decodedDataLengthInSamples;
|
audioFrame.samples_per_channel_ = (WebRtc_UWord16)decodedDataLengthInSamples;
|
||||||
assert(0 == fileRecorder.RecordAudioToFile(audioFrame));
|
assert(0 == fileRecorder.RecordAudioToFile(audioFrame));
|
||||||
|
|
||||||
audioFrameCount++;
|
audioFrameCount++;
|
||||||
|
|||||||
@@ -15,70 +15,70 @@ namespace webrtc {
|
|||||||
namespace voe {
|
namespace voe {
|
||||||
|
|
||||||
int AudioFrameOperations::MonoToStereo(AudioFrame& frame) {
|
int AudioFrameOperations::MonoToStereo(AudioFrame& frame) {
|
||||||
if (frame._audioChannel != 1) {
|
if (frame.num_channels_ != 1) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if ((frame._payloadDataLengthInSamples << 1) >=
|
if ((frame.samples_per_channel_ << 1) >=
|
||||||
AudioFrame::kMaxAudioFrameSizeSamples) {
|
AudioFrame::kMaxDataSizeSamples) {
|
||||||
// not enough memory to expand from mono to stereo
|
// not enough memory to expand from mono to stereo
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t payloadCopy[AudioFrame::kMaxAudioFrameSizeSamples];
|
int16_t payloadCopy[AudioFrame::kMaxDataSizeSamples];
|
||||||
memcpy(payloadCopy, frame._payloadData,
|
memcpy(payloadCopy, frame.data_,
|
||||||
sizeof(int16_t) * frame._payloadDataLengthInSamples);
|
sizeof(int16_t) * frame.samples_per_channel_);
|
||||||
|
|
||||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||||
frame._payloadData[2 * i] = payloadCopy[i];
|
frame.data_[2 * i] = payloadCopy[i];
|
||||||
frame._payloadData[2 * i + 1] = payloadCopy[i];
|
frame.data_[2 * i + 1] = payloadCopy[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
frame._audioChannel = 2;
|
frame.num_channels_ = 2;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioFrameOperations::StereoToMono(AudioFrame& frame) {
|
int AudioFrameOperations::StereoToMono(AudioFrame& frame) {
|
||||||
if (frame._audioChannel != 2) {
|
if (frame.num_channels_ != 2) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||||
frame._payloadData[i] = (frame._payloadData[2 * i] >> 1) +
|
frame.data_[i] = (frame.data_[2 * i] >> 1) +
|
||||||
(frame._payloadData[2 * i + 1] >> 1);
|
(frame.data_[2 * i + 1] >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
frame._audioChannel = 1;
|
frame.num_channels_ = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
||||||
if (frame->_audioChannel != 2) return;
|
if (frame->num_channels_ != 2) return;
|
||||||
|
|
||||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||||
int16_t temp_data = frame->_payloadData[i];
|
int16_t temp_data = frame->data_[i];
|
||||||
frame->_payloadData[i] = frame->_payloadData[i + 1];
|
frame->data_[i] = frame->data_[i + 1];
|
||||||
frame->_payloadData[i + 1] = temp_data;
|
frame->data_[i + 1] = temp_data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioFrameOperations::Mute(AudioFrame& frame) {
|
void AudioFrameOperations::Mute(AudioFrame& frame) {
|
||||||
memset(frame._payloadData, 0, sizeof(int16_t) *
|
memset(frame.data_, 0, sizeof(int16_t) *
|
||||||
frame._payloadDataLengthInSamples * frame._audioChannel);
|
frame.samples_per_channel_ * frame.num_channels_);
|
||||||
frame._energy = 0;
|
frame.energy_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
|
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
|
||||||
if (frame._audioChannel != 2) {
|
if (frame.num_channels_ != 2) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < frame._payloadDataLengthInSamples; i++) {
|
for (int i = 0; i < frame.samples_per_channel_; i++) {
|
||||||
frame._payloadData[2 * i] =
|
frame.data_[2 * i] =
|
||||||
static_cast<int16_t>(left * frame._payloadData[2 * i]);
|
static_cast<int16_t>(left * frame.data_[2 * i]);
|
||||||
frame._payloadData[2 * i + 1] =
|
frame.data_[2 * i + 1] =
|
||||||
static_cast<int16_t>(right * frame._payloadData[2 * i + 1]);
|
static_cast<int16_t>(right * frame.data_[2 * i + 1]);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -87,15 +87,15 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
|
|||||||
int32_t temp_data = 0;
|
int32_t temp_data = 0;
|
||||||
|
|
||||||
// Ensure that the output result is saturated [-32768, +32767].
|
// Ensure that the output result is saturated [-32768, +32767].
|
||||||
for (int i = 0; i < frame._payloadDataLengthInSamples * frame._audioChannel;
|
for (int i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
|
||||||
i++) {
|
i++) {
|
||||||
temp_data = static_cast<int32_t>(scale * frame._payloadData[i]);
|
temp_data = static_cast<int32_t>(scale * frame.data_[i]);
|
||||||
if (temp_data < -32768) {
|
if (temp_data < -32768) {
|
||||||
frame._payloadData[i] = -32768;
|
frame.data_[i] = -32768;
|
||||||
} else if (temp_data > 32767) {
|
} else if (temp_data > 32767) {
|
||||||
frame._payloadData[i] = 32767;
|
frame.data_[i] = 32767;
|
||||||
} else {
|
} else {
|
||||||
frame._payloadData[i] = static_cast<int16_t>(temp_data);
|
frame.data_[i] = static_cast<int16_t>(temp_data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -21,59 +21,59 @@ class AudioFrameOperationsTest : public ::testing::Test {
|
|||||||
protected:
|
protected:
|
||||||
AudioFrameOperationsTest() {
|
AudioFrameOperationsTest() {
|
||||||
// Set typical values.
|
// Set typical values.
|
||||||
frame_._payloadDataLengthInSamples = 320;
|
frame_.samples_per_channel_ = 320;
|
||||||
frame_._audioChannel = 2;
|
frame_.num_channels_ = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioFrame frame_;
|
AudioFrame frame_;
|
||||||
};
|
};
|
||||||
|
|
||||||
void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
|
void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
|
||||||
for (int i = 0; i < frame->_payloadDataLengthInSamples * 2; i += 2) {
|
for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||||
frame->_payloadData[i] = left;
|
frame->data_[i] = left;
|
||||||
frame->_payloadData[i + 1] = right;
|
frame->data_[i + 1] = right;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetFrameData(AudioFrame* frame, int16_t data) {
|
void SetFrameData(AudioFrame* frame, int16_t data) {
|
||||||
for (int i = 0; i < frame->_payloadDataLengthInSamples; i++) {
|
for (int i = 0; i < frame->samples_per_channel_; i++) {
|
||||||
frame->_payloadData[i] = data;
|
frame->data_[i] = data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||||
EXPECT_EQ(frame1._audioChannel, frame2._audioChannel);
|
EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
|
||||||
EXPECT_EQ(frame1._payloadDataLengthInSamples,
|
EXPECT_EQ(frame1.samples_per_channel_,
|
||||||
frame2._payloadDataLengthInSamples);
|
frame2.samples_per_channel_);
|
||||||
|
|
||||||
for (int i = 0; i < frame1._payloadDataLengthInSamples * frame1._audioChannel;
|
for (int i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
|
||||||
i++) {
|
i++) {
|
||||||
EXPECT_EQ(frame1._payloadData[i], frame2._payloadData[i]);
|
EXPECT_EQ(frame1.data_[i], frame2.data_[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
|
TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
||||||
|
|
||||||
frame_._payloadDataLengthInSamples = AudioFrame::kMaxAudioFrameSizeSamples;
|
frame_.samples_per_channel_ = AudioFrame::kMaxDataSizeSamples;
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(frame_));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
|
TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
SetFrameData(&frame_, 1);
|
SetFrameData(&frame_, 1);
|
||||||
EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(frame_));
|
EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(frame_));
|
||||||
|
|
||||||
AudioFrame stereo_frame;
|
AudioFrame stereo_frame;
|
||||||
stereo_frame._payloadDataLengthInSamples = 320;
|
stereo_frame.samples_per_channel_ = 320;
|
||||||
stereo_frame._audioChannel = 2;
|
stereo_frame.num_channels_ = 2;
|
||||||
SetFrameData(&stereo_frame, 1, 1);
|
SetFrameData(&stereo_frame, 1, 1);
|
||||||
VerifyFramesAreEqual(stereo_frame, frame_);
|
VerifyFramesAreEqual(stereo_frame, frame_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
|
TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(frame_));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,8 +82,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
|
|||||||
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
||||||
|
|
||||||
AudioFrame mono_frame;
|
AudioFrame mono_frame;
|
||||||
mono_frame._payloadDataLengthInSamples = 320;
|
mono_frame.samples_per_channel_ = 320;
|
||||||
mono_frame._audioChannel = 1;
|
mono_frame.num_channels_ = 1;
|
||||||
SetFrameData(&mono_frame, 3);
|
SetFrameData(&mono_frame, 3);
|
||||||
VerifyFramesAreEqual(mono_frame, frame_);
|
VerifyFramesAreEqual(mono_frame, frame_);
|
||||||
}
|
}
|
||||||
@@ -93,8 +93,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
|
|||||||
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
EXPECT_EQ(0, AudioFrameOperations::StereoToMono(frame_));
|
||||||
|
|
||||||
AudioFrame mono_frame;
|
AudioFrame mono_frame;
|
||||||
mono_frame._payloadDataLengthInSamples = 320;
|
mono_frame.samples_per_channel_ = 320;
|
||||||
mono_frame._audioChannel = 1;
|
mono_frame.num_channels_ = 1;
|
||||||
SetFrameData(&mono_frame, -32768);
|
SetFrameData(&mono_frame, -32768);
|
||||||
VerifyFramesAreEqual(mono_frame, frame_);
|
VerifyFramesAreEqual(mono_frame, frame_);
|
||||||
}
|
}
|
||||||
@@ -103,8 +103,8 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
|
|||||||
SetFrameData(&frame_, 0, 1);
|
SetFrameData(&frame_, 0, 1);
|
||||||
|
|
||||||
AudioFrame swapped_frame;
|
AudioFrame swapped_frame;
|
||||||
swapped_frame._payloadDataLengthInSamples = 320;
|
swapped_frame.samples_per_channel_ = 320;
|
||||||
swapped_frame._audioChannel = 2;
|
swapped_frame.num_channels_ = 2;
|
||||||
SetFrameData(&swapped_frame, 1, 0);
|
SetFrameData(&swapped_frame, 1, 0);
|
||||||
|
|
||||||
AudioFrameOperations::SwapStereoChannels(&frame_);
|
AudioFrameOperations::SwapStereoChannels(&frame_);
|
||||||
@@ -112,7 +112,7 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
// Set data to "stereo", despite it being a mono frame.
|
// Set data to "stereo", despite it being a mono frame.
|
||||||
SetFrameData(&frame_, 0, 1);
|
SetFrameData(&frame_, 0, 1);
|
||||||
|
|
||||||
@@ -124,28 +124,28 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
|||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
|
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
|
||||||
SetFrameData(&frame_, 1000, 1000);
|
SetFrameData(&frame_, 1000, 1000);
|
||||||
frame_._energy = 1000 * 1000 * frame_._payloadDataLengthInSamples *
|
frame_.energy_ = 1000 * 1000 * frame_.samples_per_channel_ *
|
||||||
frame_._audioChannel;
|
frame_.num_channels_;
|
||||||
AudioFrameOperations::Mute(frame_);
|
AudioFrameOperations::Mute(frame_);
|
||||||
|
|
||||||
AudioFrame muted_frame;
|
AudioFrame muted_frame;
|
||||||
muted_frame._payloadDataLengthInSamples = 320;
|
muted_frame.samples_per_channel_ = 320;
|
||||||
muted_frame._audioChannel = 2;
|
muted_frame.num_channels_ = 2;
|
||||||
SetFrameData(&muted_frame, 0, 0);
|
SetFrameData(&muted_frame, 0, 0);
|
||||||
muted_frame._energy = 0;
|
muted_frame.energy_ = 0;
|
||||||
VerifyFramesAreEqual(muted_frame, frame_);
|
VerifyFramesAreEqual(muted_frame, frame_);
|
||||||
EXPECT_EQ(muted_frame._energy, frame_._energy);
|
EXPECT_EQ(muted_frame.energy_, frame_.energy_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(andrew): should not allow negative scales.
|
// TODO(andrew): should not allow negative scales.
|
||||||
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
|
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
||||||
|
|
||||||
frame_._audioChannel = 3;
|
frame_.num_channels_ = 3;
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_));
|
||||||
|
|
||||||
frame_._audioChannel = 2;
|
frame_.num_channels_ = 2;
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, frame_));
|
||||||
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, frame_));
|
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, frame_));
|
||||||
}
|
}
|
||||||
@@ -156,8 +156,8 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
|
|||||||
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, frame_));
|
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, frame_));
|
||||||
|
|
||||||
AudioFrame clipped_frame;
|
AudioFrame clipped_frame;
|
||||||
clipped_frame._payloadDataLengthInSamples = 320;
|
clipped_frame.samples_per_channel_ = 320;
|
||||||
clipped_frame._audioChannel = 2;
|
clipped_frame.num_channels_ = 2;
|
||||||
SetFrameData(&clipped_frame, 32767, -32768);
|
SetFrameData(&clipped_frame, 32767, -32768);
|
||||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||||
}
|
}
|
||||||
@@ -167,8 +167,8 @@ TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
|
|||||||
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, frame_));
|
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, frame_));
|
||||||
|
|
||||||
AudioFrame scaled_frame;
|
AudioFrame scaled_frame;
|
||||||
scaled_frame._payloadDataLengthInSamples = 320;
|
scaled_frame.samples_per_channel_ = 320;
|
||||||
scaled_frame._audioChannel = 2;
|
scaled_frame.num_channels_ = 2;
|
||||||
SetFrameData(&scaled_frame, 2, -3);
|
SetFrameData(&scaled_frame, 2, -3);
|
||||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||||
}
|
}
|
||||||
@@ -179,13 +179,13 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
SetFrameData(&frame_, 4000);
|
SetFrameData(&frame_, 4000);
|
||||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_));
|
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_));
|
||||||
|
|
||||||
AudioFrame clipped_frame;
|
AudioFrame clipped_frame;
|
||||||
clipped_frame._payloadDataLengthInSamples = 320;
|
clipped_frame.samples_per_channel_ = 320;
|
||||||
clipped_frame._audioChannel = 1;
|
clipped_frame.num_channels_ = 1;
|
||||||
SetFrameData(&clipped_frame, 32767);
|
SetFrameData(&clipped_frame, 32767);
|
||||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||||
|
|
||||||
@@ -196,13 +196,13 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
|
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
|
||||||
frame_._audioChannel = 1;
|
frame_.num_channels_ = 1;
|
||||||
SetFrameData(&frame_, 1);
|
SetFrameData(&frame_, 1);
|
||||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, frame_));
|
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, frame_));
|
||||||
|
|
||||||
AudioFrame scaled_frame;
|
AudioFrame scaled_frame;
|
||||||
scaled_frame._payloadDataLengthInSamples = 320;
|
scaled_frame.samples_per_channel_ = 320;
|
||||||
scaled_frame._audioChannel = 1;
|
scaled_frame.num_channels_ = 1;
|
||||||
SetFrameData(&scaled_frame, 2);
|
SetFrameData(&scaled_frame, 2);
|
||||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -829,7 +829,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
|||||||
"Channel::GetAudioFrame(id=%d)", id);
|
"Channel::GetAudioFrame(id=%d)", id);
|
||||||
|
|
||||||
// Get 10ms raw PCM data from the ACM (mixer limits output frequency)
|
// Get 10ms raw PCM data from the ACM (mixer limits output frequency)
|
||||||
if (_audioCodingModule.PlayoutData10Ms(audioFrame._frequencyInHz,
|
if (_audioCodingModule.PlayoutData10Ms(audioFrame.sample_rate_hz_,
|
||||||
audioFrame) == -1)
|
audioFrame) == -1)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceError, kTraceVoice,
|
WEBRTC_TRACE(kTraceError, kTraceVoice,
|
||||||
@@ -848,9 +848,9 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert module ID to internal VoE channel ID
|
// Convert module ID to internal VoE channel ID
|
||||||
audioFrame._id = VoEChannelId(audioFrame._id);
|
audioFrame.id_ = VoEChannelId(audioFrame.id_);
|
||||||
// Store speech type for dead-or-alive detection
|
// Store speech type for dead-or-alive detection
|
||||||
_outputSpeechType = audioFrame._speechType;
|
_outputSpeechType = audioFrame.speech_type_;
|
||||||
|
|
||||||
// Perform far-end AudioProcessing module processing on the received signal
|
// Perform far-end AudioProcessing module processing on the received signal
|
||||||
if (_rxApmIsEnabled)
|
if (_rxApmIsEnabled)
|
||||||
@@ -869,7 +869,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
|||||||
|
|
||||||
if (_panLeft != 1.0f || _panRight != 1.0f)
|
if (_panLeft != 1.0f || _panRight != 1.0f)
|
||||||
{
|
{
|
||||||
if (audioFrame._audioChannel == 1)
|
if (audioFrame.num_channels_ == 1)
|
||||||
{
|
{
|
||||||
// Emulate stereo mode since panning is active.
|
// Emulate stereo mode since panning is active.
|
||||||
// The mono signal is copied to both left and right channels here.
|
// The mono signal is copied to both left and right channels here.
|
||||||
@@ -886,7 +886,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
|||||||
// Mix decoded PCM output with file if file mixing is enabled
|
// Mix decoded PCM output with file if file mixing is enabled
|
||||||
if (_outputFilePlaying)
|
if (_outputFilePlaying)
|
||||||
{
|
{
|
||||||
MixAudioWithFile(audioFrame, audioFrame._frequencyInHz);
|
MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Place channel in on-hold state (~muted) if on-hold is activated
|
// Place channel in on-hold state (~muted) if on-hold is activated
|
||||||
@@ -899,15 +899,15 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
|
|||||||
if (_outputExternalMedia)
|
if (_outputExternalMedia)
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_callbackCritSect);
|
CriticalSectionScoped cs(&_callbackCritSect);
|
||||||
const bool isStereo = (audioFrame._audioChannel == 2);
|
const bool isStereo = (audioFrame.num_channels_ == 2);
|
||||||
if (_outputExternalMediaCallbackPtr)
|
if (_outputExternalMediaCallbackPtr)
|
||||||
{
|
{
|
||||||
_outputExternalMediaCallbackPtr->Process(
|
_outputExternalMediaCallbackPtr->Process(
|
||||||
_channelId,
|
_channelId,
|
||||||
kPlaybackPerChannel,
|
kPlaybackPerChannel,
|
||||||
(WebRtc_Word16*)audioFrame._payloadData,
|
(WebRtc_Word16*)audioFrame.data_,
|
||||||
audioFrame._payloadDataLengthInSamples,
|
audioFrame.samples_per_channel_,
|
||||||
audioFrame._frequencyInHz,
|
audioFrame.sample_rate_hz_,
|
||||||
isStereo);
|
isStereo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1610,7 +1610,7 @@ WebRtc_Word32
|
|||||||
Channel::UpdateLocalTimeStamp()
|
Channel::UpdateLocalTimeStamp()
|
||||||
{
|
{
|
||||||
|
|
||||||
_timeStamp += _audioFrame._payloadDataLengthInSamples;
|
_timeStamp += _audioFrame.samples_per_channel_;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4657,7 +4657,7 @@ Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
|
|||||||
|
|
||||||
int vadDecision = 1;
|
int vadDecision = 1;
|
||||||
|
|
||||||
vadDecision = (audioFrame._vadActivity == AudioFrame::kVadActive)? 1 : 0;
|
vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive)? 1 : 0;
|
||||||
|
|
||||||
if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
|
if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
|
||||||
{
|
{
|
||||||
@@ -5774,7 +5774,7 @@ Channel::Demultiplex(const AudioFrame& audioFrame)
|
|||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::Demultiplex()");
|
"Channel::Demultiplex()");
|
||||||
_audioFrame = audioFrame;
|
_audioFrame = audioFrame;
|
||||||
_audioFrame._id = _channelId;
|
_audioFrame.id_ = _channelId;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5784,7 +5784,7 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::PrepareEncodeAndSend()");
|
"Channel::PrepareEncodeAndSend()");
|
||||||
|
|
||||||
if (_audioFrame._payloadDataLengthInSamples == 0)
|
if (_audioFrame.samples_per_channel_ == 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::PrepareEncodeAndSend() invalid audio frame");
|
"Channel::PrepareEncodeAndSend() invalid audio frame");
|
||||||
@@ -5804,15 +5804,15 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
if (_inputExternalMedia)
|
if (_inputExternalMedia)
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_callbackCritSect);
|
CriticalSectionScoped cs(&_callbackCritSect);
|
||||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||||
if (_inputExternalMediaCallbackPtr)
|
if (_inputExternalMediaCallbackPtr)
|
||||||
{
|
{
|
||||||
_inputExternalMediaCallbackPtr->Process(
|
_inputExternalMediaCallbackPtr->Process(
|
||||||
_channelId,
|
_channelId,
|
||||||
kRecordingPerChannel,
|
kRecordingPerChannel,
|
||||||
(WebRtc_Word16*)_audioFrame._payloadData,
|
(WebRtc_Word16*)_audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples,
|
_audioFrame.samples_per_channel_,
|
||||||
_audioFrame._frequencyInHz,
|
_audioFrame.sample_rate_hz_,
|
||||||
isStereo);
|
isStereo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -5824,9 +5824,9 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
assert(_rtpAudioProc.get() != NULL);
|
assert(_rtpAudioProc.get() != NULL);
|
||||||
|
|
||||||
// Check if settings need to be updated.
|
// Check if settings need to be updated.
|
||||||
if (_rtpAudioProc->sample_rate_hz() != _audioFrame._frequencyInHz)
|
if (_rtpAudioProc->sample_rate_hz() != _audioFrame.sample_rate_hz_)
|
||||||
{
|
{
|
||||||
if (_rtpAudioProc->set_sample_rate_hz(_audioFrame._frequencyInHz) !=
|
if (_rtpAudioProc->set_sample_rate_hz(_audioFrame.sample_rate_hz_) !=
|
||||||
AudioProcessing::kNoError)
|
AudioProcessing::kNoError)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
||||||
@@ -5836,10 +5836,10 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_rtpAudioProc->num_input_channels() != _audioFrame._audioChannel)
|
if (_rtpAudioProc->num_input_channels() != _audioFrame.num_channels_)
|
||||||
{
|
{
|
||||||
if (_rtpAudioProc->set_num_channels(_audioFrame._audioChannel,
|
if (_rtpAudioProc->set_num_channels(_audioFrame.num_channels_,
|
||||||
_audioFrame._audioChannel)
|
_audioFrame.num_channels_)
|
||||||
!= AudioProcessing::kNoError)
|
!= AudioProcessing::kNoError)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
||||||
@@ -5862,20 +5862,20 @@ Channel::EncodeAndSend()
|
|||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::EncodeAndSend()");
|
"Channel::EncodeAndSend()");
|
||||||
|
|
||||||
assert(_audioFrame._audioChannel <= 2);
|
assert(_audioFrame.num_channels_ <= 2);
|
||||||
if (_audioFrame._payloadDataLengthInSamples == 0)
|
if (_audioFrame.samples_per_channel_ == 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::EncodeAndSend() invalid audio frame");
|
"Channel::EncodeAndSend() invalid audio frame");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
_audioFrame._id = _channelId;
|
_audioFrame.id_ = _channelId;
|
||||||
|
|
||||||
// --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
|
// --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
|
||||||
|
|
||||||
// The ACM resamples internally.
|
// The ACM resamples internally.
|
||||||
_audioFrame._timeStamp = _timeStamp;
|
_audioFrame.timestamp_ = _timeStamp;
|
||||||
if (_audioCodingModule.Add10MsData((AudioFrame&)_audioFrame) != 0)
|
if (_audioCodingModule.Add10MsData((AudioFrame&)_audioFrame) != 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
@@ -5883,7 +5883,7 @@ Channel::EncodeAndSend()
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
_timeStamp += _audioFrame._payloadDataLengthInSamples;
|
_timeStamp += _audioFrame.samples_per_channel_;
|
||||||
|
|
||||||
// --- Encode if complete frame is ready
|
// --- Encode if complete frame is ready
|
||||||
|
|
||||||
@@ -6179,14 +6179,14 @@ Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
|
assert(_audioFrame.samples_per_channel_ == fileSamples);
|
||||||
|
|
||||||
if (_mixFileWithMicrophone)
|
if (_mixFileWithMicrophone)
|
||||||
{
|
{
|
||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(_audioFrame._payloadData,
|
Utility::MixWithSat(_audioFrame.data_,
|
||||||
static_cast<int>(_audioFrame._audioChannel),
|
static_cast<int>(_audioFrame.num_channels_),
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
static_cast<int>(fileSamples));
|
||||||
@@ -6241,12 +6241,12 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audioFrame._payloadDataLengthInSamples == fileSamples)
|
if (audioFrame.samples_per_channel_ == fileSamples)
|
||||||
{
|
{
|
||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(audioFrame._payloadData,
|
Utility::MixWithSat(audioFrame.data_,
|
||||||
static_cast<int>(audioFrame._audioChannel),
|
static_cast<int>(audioFrame.num_channels_),
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
static_cast<int>(fileSamples));
|
||||||
@@ -6254,9 +6254,9 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
|
||||||
"Channel::MixAudioWithFile() _payloadDataLengthInSamples(%d) != "
|
"Channel::MixAudioWithFile() samples_per_channel_(%d) != "
|
||||||
"fileSamples(%d)",
|
"fileSamples(%d)",
|
||||||
audioFrame._payloadDataLengthInSamples, fileSamples);
|
audioFrame.samples_per_channel_, fileSamples);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6292,12 +6292,12 @@ Channel::InsertInbandDtmfTone()
|
|||||||
WebRtc_UWord16 frequency(0);
|
WebRtc_UWord16 frequency(0);
|
||||||
_inbandDtmfGenerator.GetSampleRate(frequency);
|
_inbandDtmfGenerator.GetSampleRate(frequency);
|
||||||
|
|
||||||
if (frequency != _audioFrame._frequencyInHz)
|
if (frequency != _audioFrame.sample_rate_hz_)
|
||||||
{
|
{
|
||||||
// Update sample rate of Dtmf tone since the mixing frequency
|
// Update sample rate of Dtmf tone since the mixing frequency
|
||||||
// has changed.
|
// has changed.
|
||||||
_inbandDtmfGenerator.SetSampleRate(
|
_inbandDtmfGenerator.SetSampleRate(
|
||||||
(WebRtc_UWord16) (_audioFrame._frequencyInHz));
|
(WebRtc_UWord16) (_audioFrame.sample_rate_hz_));
|
||||||
// Reset the tone to be added taking the new sample rate into
|
// Reset the tone to be added taking the new sample rate into
|
||||||
// account.
|
// account.
|
||||||
_inbandDtmfGenerator.ResetTone();
|
_inbandDtmfGenerator.ResetTone();
|
||||||
@@ -6316,19 +6316,19 @@ Channel::InsertInbandDtmfTone()
|
|||||||
|
|
||||||
// Replace mixed audio with DTMF tone.
|
// Replace mixed audio with DTMF tone.
|
||||||
for (int sample = 0;
|
for (int sample = 0;
|
||||||
sample < _audioFrame._payloadDataLengthInSamples;
|
sample < _audioFrame.samples_per_channel_;
|
||||||
sample++)
|
sample++)
|
||||||
{
|
{
|
||||||
for (int channel = 0;
|
for (int channel = 0;
|
||||||
channel < _audioFrame._audioChannel;
|
channel < _audioFrame.num_channels_;
|
||||||
channel++)
|
channel++)
|
||||||
{
|
{
|
||||||
_audioFrame._payloadData[sample * _audioFrame._audioChannel + channel] =
|
_audioFrame.data_[sample * _audioFrame.num_channels_ + channel] =
|
||||||
toneBuffer[sample];
|
toneBuffer[sample];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
|
assert(_audioFrame.samples_per_channel_ == toneSamples);
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
// Add 10ms to "delay-since-last-tone" counter
|
// Add 10ms to "delay-since-last-tone" counter
|
||||||
@@ -6572,15 +6572,15 @@ Channel::ApmProcessRx(AudioFrame& audioFrame)
|
|||||||
|
|
||||||
// Reset the APM frequency if the frequency has changed
|
// Reset the APM frequency if the frequency has changed
|
||||||
if (_rxAudioProcessingModulePtr->sample_rate_hz() !=
|
if (_rxAudioProcessingModulePtr->sample_rate_hz() !=
|
||||||
audioFrame._frequencyInHz)
|
audioFrame.sample_rate_hz_)
|
||||||
{
|
{
|
||||||
if (_rxAudioProcessingModulePtr->set_sample_rate_hz(
|
if (_rxAudioProcessingModulePtr->set_sample_rate_hz(
|
||||||
audioFrame._frequencyInHz) != 0)
|
audioFrame.sample_rate_hz_) != 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
|
||||||
"AudioProcessingModule::set_sample_rate_hz("
|
"AudioProcessingModule::set_sample_rate_hz("
|
||||||
"_frequencyInHz=%u) => error",
|
"sample_rate_hz_=%u) => error",
|
||||||
_audioFrame._frequencyInHz);
|
_audioFrame.sample_rate_hz_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@@ -52,8 +52,8 @@ AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
|
|||||||
|
|
||||||
// Check speech level (works for 2 channels as well)
|
// Check speech level (works for 2 channels as well)
|
||||||
absValue = WebRtcSpl_MaxAbsValueW16(
|
absValue = WebRtcSpl_MaxAbsValueW16(
|
||||||
audioFrame._payloadData,
|
audioFrame.data_,
|
||||||
audioFrame._payloadDataLengthInSamples*audioFrame._audioChannel);
|
audioFrame.samples_per_channel_*audioFrame.num_channels_);
|
||||||
if (absValue > _absMax)
|
if (absValue > _absMax)
|
||||||
_absMax = absValue;
|
_absMax = absValue;
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
|
|||||||
"OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
|
"OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
|
||||||
|
|
||||||
_audioFrame = generalAudioFrame;
|
_audioFrame = generalAudioFrame;
|
||||||
_audioFrame._id = id;
|
_audioFrame.id_ = id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void OutputMixer::MixedParticipants(
|
void OutputMixer::MixedParticipants(
|
||||||
@@ -539,9 +539,9 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
|||||||
|
|
||||||
int outLen(0);
|
int outLen(0);
|
||||||
|
|
||||||
if (audioFrame._audioChannel == 1)
|
if (audioFrame.num_channels_ == 1)
|
||||||
{
|
{
|
||||||
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
|
if (_resampler.ResetIfNeeded(audioFrame.sample_rate_hz_,
|
||||||
desiredFreqHz,
|
desiredFreqHz,
|
||||||
kResamplerSynchronous) != 0)
|
kResamplerSynchronous) != 0)
|
||||||
{
|
{
|
||||||
@@ -552,7 +552,7 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
|
if (_resampler.ResetIfNeeded(audioFrame.sample_rate_hz_,
|
||||||
desiredFreqHz,
|
desiredFreqHz,
|
||||||
kResamplerSynchronousStereo) != 0)
|
kResamplerSynchronousStereo) != 0)
|
||||||
{
|
{
|
||||||
@@ -562,18 +562,18 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_resampler.Push(
|
if (_resampler.Push(
|
||||||
_audioFrame._payloadData,
|
_audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
|
_audioFrame.samples_per_channel_*_audioFrame.num_channels_,
|
||||||
audioFrame._payloadData,
|
audioFrame.data_,
|
||||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
AudioFrame::kMaxDataSizeSamples,
|
||||||
outLen) == 0)
|
outLen) == 0)
|
||||||
{
|
{
|
||||||
// Ensure that output from resampler matches the audio-frame format.
|
// Ensure that output from resampler matches the audio-frame format.
|
||||||
// Example: 10ms stereo output at 48kHz => outLen = 960 =>
|
// Example: 10ms stereo output at 48kHz => outLen = 960 =>
|
||||||
// convert _payloadDataLengthInSamples to 480
|
// convert samples_per_channel_ to 480
|
||||||
audioFrame._payloadDataLengthInSamples =
|
audioFrame.samples_per_channel_ =
|
||||||
(outLen / _audioFrame._audioChannel);
|
(outLen / _audioFrame.num_channels_);
|
||||||
audioFrame._frequencyInHz = desiredFreqHz;
|
audioFrame.sample_rate_hz_ = desiredFreqHz;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -582,7 +582,7 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((channels == 2) && (audioFrame._audioChannel == 1))
|
if ((channels == 2) && (audioFrame.num_channels_ == 1))
|
||||||
{
|
{
|
||||||
AudioFrameOperations::MonoToStereo(audioFrame);
|
AudioFrameOperations::MonoToStereo(audioFrame);
|
||||||
}
|
}
|
||||||
@@ -593,12 +593,12 @@ OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
|
|||||||
WebRtc_Word32
|
WebRtc_Word32
|
||||||
OutputMixer::DoOperationsOnCombinedSignal()
|
OutputMixer::DoOperationsOnCombinedSignal()
|
||||||
{
|
{
|
||||||
if (_audioFrame._frequencyInHz != _mixingFrequencyHz)
|
if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
|
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
|
||||||
"OutputMixer::DoOperationsOnCombinedSignal() => "
|
"OutputMixer::DoOperationsOnCombinedSignal() => "
|
||||||
"mixing frequency = %d", _audioFrame._frequencyInHz);
|
"mixing frequency = %d", _audioFrame.sample_rate_hz_);
|
||||||
_mixingFrequencyHz = _audioFrame._frequencyInHz;
|
_mixingFrequencyHz = _audioFrame.sample_rate_hz_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Insert inband Dtmf tone
|
// --- Insert inband Dtmf tone
|
||||||
@@ -610,7 +610,7 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
|||||||
// Scale left and/or right channel(s) if balance is active
|
// Scale left and/or right channel(s) if balance is active
|
||||||
if (_panLeft != 1.0 || _panRight != 1.0)
|
if (_panLeft != 1.0 || _panRight != 1.0)
|
||||||
{
|
{
|
||||||
if (_audioFrame._audioChannel == 1)
|
if (_audioFrame.num_channels_ == 1)
|
||||||
{
|
{
|
||||||
AudioFrameOperations::MonoToStereo(_audioFrame);
|
AudioFrameOperations::MonoToStereo(_audioFrame);
|
||||||
}
|
}
|
||||||
@@ -619,7 +619,7 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
|||||||
// Pure stereo mode (we are receiving a stereo signal).
|
// Pure stereo mode (we are receiving a stereo signal).
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_audioFrame._audioChannel == 2);
|
assert(_audioFrame.num_channels_ == 2);
|
||||||
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
|
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -632,15 +632,15 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
|||||||
if (_externalMedia)
|
if (_externalMedia)
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_callbackCritSect);
|
CriticalSectionScoped cs(&_callbackCritSect);
|
||||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||||
if (_externalMediaCallbackPtr)
|
if (_externalMediaCallbackPtr)
|
||||||
{
|
{
|
||||||
_externalMediaCallbackPtr->Process(
|
_externalMediaCallbackPtr->Process(
|
||||||
-1,
|
-1,
|
||||||
kPlaybackAllChannelsMixed,
|
kPlaybackAllChannelsMixed,
|
||||||
(WebRtc_Word16*)_audioFrame._payloadData,
|
(WebRtc_Word16*)_audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples,
|
_audioFrame.samples_per_channel_,
|
||||||
_audioFrame._frequencyInHz,
|
_audioFrame.sample_rate_hz_,
|
||||||
isStereo);
|
isStereo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -664,31 +664,31 @@ OutputMixer::APMAnalyzeReverseStream()
|
|||||||
// Convert from mixing frequency to APM frequency.
|
// Convert from mixing frequency to APM frequency.
|
||||||
// Sending side determines APM frequency.
|
// Sending side determines APM frequency.
|
||||||
|
|
||||||
if (audioFrame._audioChannel == 1)
|
if (audioFrame.num_channels_ == 1)
|
||||||
{
|
{
|
||||||
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
|
_apmResampler.ResetIfNeeded(_audioFrame.sample_rate_hz_,
|
||||||
_audioProcessingModulePtr->sample_rate_hz(),
|
_audioProcessingModulePtr->sample_rate_hz(),
|
||||||
kResamplerSynchronous);
|
kResamplerSynchronous);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
|
_apmResampler.ResetIfNeeded(_audioFrame.sample_rate_hz_,
|
||||||
_audioProcessingModulePtr->sample_rate_hz(),
|
_audioProcessingModulePtr->sample_rate_hz(),
|
||||||
kResamplerSynchronousStereo);
|
kResamplerSynchronousStereo);
|
||||||
}
|
}
|
||||||
if (_apmResampler.Push(
|
if (_apmResampler.Push(
|
||||||
_audioFrame._payloadData,
|
_audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
|
_audioFrame.samples_per_channel_*_audioFrame.num_channels_,
|
||||||
audioFrame._payloadData,
|
audioFrame.data_,
|
||||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
AudioFrame::kMaxDataSizeSamples,
|
||||||
outLen) == 0)
|
outLen) == 0)
|
||||||
{
|
{
|
||||||
audioFrame._payloadDataLengthInSamples =
|
audioFrame.samples_per_channel_ =
|
||||||
(outLen / _audioFrame._audioChannel);
|
(outLen / _audioFrame.num_channels_);
|
||||||
audioFrame._frequencyInHz = _audioProcessingModulePtr->sample_rate_hz();
|
audioFrame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audioFrame._audioChannel == 2)
|
if (audioFrame.num_channels_ == 2)
|
||||||
{
|
{
|
||||||
AudioFrameOperations::StereoToMono(audioFrame);
|
AudioFrameOperations::StereoToMono(audioFrame);
|
||||||
}
|
}
|
||||||
@@ -709,11 +709,11 @@ OutputMixer::InsertInbandDtmfTone()
|
|||||||
{
|
{
|
||||||
WebRtc_UWord16 sampleRate(0);
|
WebRtc_UWord16 sampleRate(0);
|
||||||
_dtmfGenerator.GetSampleRate(sampleRate);
|
_dtmfGenerator.GetSampleRate(sampleRate);
|
||||||
if (sampleRate != _audioFrame._frequencyInHz)
|
if (sampleRate != _audioFrame.sample_rate_hz_)
|
||||||
{
|
{
|
||||||
// Update sample rate of Dtmf tone since the mixing frequency changed.
|
// Update sample rate of Dtmf tone since the mixing frequency changed.
|
||||||
_dtmfGenerator.SetSampleRate(
|
_dtmfGenerator.SetSampleRate(
|
||||||
(WebRtc_UWord16)(_audioFrame._frequencyInHz));
|
(WebRtc_UWord16)(_audioFrame.sample_rate_hz_));
|
||||||
// Reset the tone to be added taking the new sample rate into account.
|
// Reset the tone to be added taking the new sample rate into account.
|
||||||
_dtmfGenerator.ResetTone();
|
_dtmfGenerator.ResetTone();
|
||||||
}
|
}
|
||||||
@@ -729,21 +729,21 @@ OutputMixer::InsertInbandDtmfTone()
|
|||||||
}
|
}
|
||||||
|
|
||||||
// replace mixed audio with Dtmf tone
|
// replace mixed audio with Dtmf tone
|
||||||
if (_audioFrame._audioChannel == 1)
|
if (_audioFrame.num_channels_ == 1)
|
||||||
{
|
{
|
||||||
// mono
|
// mono
|
||||||
memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
|
memcpy(_audioFrame.data_, toneBuffer, sizeof(WebRtc_Word16)
|
||||||
* toneSamples);
|
* toneSamples);
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
// stereo
|
// stereo
|
||||||
for (int i = 0; i < _audioFrame._payloadDataLengthInSamples; i++)
|
for (int i = 0; i < _audioFrame.samples_per_channel_; i++)
|
||||||
{
|
{
|
||||||
_audioFrame._payloadData[2 * i] = toneBuffer[i];
|
_audioFrame.data_[2 * i] = toneBuffer[i];
|
||||||
_audioFrame._payloadData[2 * i + 1] = 0;
|
_audioFrame.data_[2 * i + 1] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
|
assert(_audioFrame.samples_per_channel_ == toneSamples);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -404,15 +404,15 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
|
|||||||
if (_externalMedia)
|
if (_externalMedia)
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_callbackCritSect);
|
CriticalSectionScoped cs(&_callbackCritSect);
|
||||||
const bool isStereo = (_audioFrame._audioChannel == 2);
|
const bool isStereo = (_audioFrame.num_channels_ == 2);
|
||||||
if (_externalMediaCallbackPtr)
|
if (_externalMediaCallbackPtr)
|
||||||
{
|
{
|
||||||
_externalMediaCallbackPtr->Process(
|
_externalMediaCallbackPtr->Process(
|
||||||
-1,
|
-1,
|
||||||
kRecordingAllChannelsMixed,
|
kRecordingAllChannelsMixed,
|
||||||
(WebRtc_Word16*) _audioFrame._payloadData,
|
(WebRtc_Word16*) _audioFrame.data_,
|
||||||
_audioFrame._payloadDataLengthInSamples,
|
_audioFrame.samples_per_channel_,
|
||||||
_audioFrame._frequencyInHz,
|
_audioFrame.sample_rate_hz_,
|
||||||
isStereo);
|
isStereo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1181,22 +1181,22 @@ TransmitMixer::GenerateAudioFrame(const WebRtc_Word16 audioSamples[],
|
|||||||
if (_audioResampler.Push(
|
if (_audioResampler.Push(
|
||||||
(WebRtc_Word16*) audioSamples,
|
(WebRtc_Word16*) audioSamples,
|
||||||
nSamples * nChannels,
|
nSamples * nChannels,
|
||||||
_audioFrame._payloadData,
|
_audioFrame.data_,
|
||||||
AudioFrame::kMaxAudioFrameSizeSamples,
|
AudioFrame::kMaxDataSizeSamples,
|
||||||
(int&) _audioFrame._payloadDataLengthInSamples) == -1)
|
(int&) _audioFrame.samples_per_channel_) == -1)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
|
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
|
||||||
"TransmitMixer::GenerateAudioFrame() resampling failed");
|
"TransmitMixer::GenerateAudioFrame() resampling failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
_audioFrame._payloadDataLengthInSamples /= nChannels;
|
_audioFrame.samples_per_channel_ /= nChannels;
|
||||||
_audioFrame._id = _instanceId;
|
_audioFrame.id_ = _instanceId;
|
||||||
_audioFrame._timeStamp = -1;
|
_audioFrame.timestamp_ = -1;
|
||||||
_audioFrame._frequencyInHz = mixingFrequency;
|
_audioFrame.sample_rate_hz_ = mixingFrequency;
|
||||||
_audioFrame._speechType = AudioFrame::kNormalSpeech;
|
_audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
|
||||||
_audioFrame._vadActivity = AudioFrame::kVadUnknown;
|
_audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
|
||||||
_audioFrame._audioChannel = nChannels;
|
_audioFrame.num_channels_ = nChannels;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1253,14 +1253,14 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
|
assert(_audioFrame.samples_per_channel_ == fileSamples);
|
||||||
|
|
||||||
if (_mixFileWithMicrophone)
|
if (_mixFileWithMicrophone)
|
||||||
{
|
{
|
||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(_audioFrame._payloadData,
|
Utility::MixWithSat(_audioFrame.data_,
|
||||||
static_cast<int>(_audioFrame._audioChannel),
|
static_cast<int>(_audioFrame.num_channels_),
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
static_cast<int>(fileSamples));
|
||||||
@@ -1291,16 +1291,16 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
|||||||
|
|
||||||
// Check if the number of input channels has changed. Retain the number
|
// Check if the number of input channels has changed. Retain the number
|
||||||
// of output channels.
|
// of output channels.
|
||||||
if (_audioFrame._audioChannel !=
|
if (_audioFrame.num_channels_ !=
|
||||||
_audioProcessingModulePtr->num_input_channels())
|
_audioProcessingModulePtr->num_input_channels())
|
||||||
{
|
{
|
||||||
if (_audioProcessingModulePtr->set_num_channels(
|
if (_audioProcessingModulePtr->set_num_channels(
|
||||||
_audioFrame._audioChannel,
|
_audioFrame.num_channels_,
|
||||||
_audioProcessingModulePtr->num_output_channels()))
|
_audioProcessingModulePtr->num_output_channels()))
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||||
"AudioProcessing::set_num_channels(%d, %d) => error",
|
"AudioProcessing::set_num_channels(%d, %d) => error",
|
||||||
_audioFrame._frequencyInHz,
|
_audioFrame.sample_rate_hz_,
|
||||||
_audioProcessingModulePtr->num_output_channels());
|
_audioProcessingModulePtr->num_output_channels());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1308,14 +1308,14 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
|||||||
// If the frequency has changed we need to change APM settings
|
// If the frequency has changed we need to change APM settings
|
||||||
// Sending side is "master"
|
// Sending side is "master"
|
||||||
if (_audioProcessingModulePtr->sample_rate_hz() !=
|
if (_audioProcessingModulePtr->sample_rate_hz() !=
|
||||||
_audioFrame._frequencyInHz)
|
_audioFrame.sample_rate_hz_)
|
||||||
{
|
{
|
||||||
if (_audioProcessingModulePtr->set_sample_rate_hz(
|
if (_audioProcessingModulePtr->set_sample_rate_hz(
|
||||||
_audioFrame._frequencyInHz))
|
_audioFrame.sample_rate_hz_))
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||||
"AudioProcessing::set_sample_rate_hz(%u) => error",
|
"AudioProcessing::set_sample_rate_hz(%u) => error",
|
||||||
_audioFrame._frequencyInHz);
|
_audioFrame.sample_rate_hz_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1376,7 +1376,7 @@ WebRtc_Word32 TransmitMixer::APMProcessStream(
|
|||||||
int TransmitMixer::TypingDetection()
|
int TransmitMixer::TypingDetection()
|
||||||
{
|
{
|
||||||
// We let the VAD determine if we're using this feature or not.
|
// We let the VAD determine if we're using this feature or not.
|
||||||
if (_audioFrame._vadActivity == AudioFrame::kVadUnknown)
|
if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown)
|
||||||
{
|
{
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@@ -1388,7 +1388,7 @@ int TransmitMixer::TypingDetection()
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_audioFrame._vadActivity == AudioFrame::kVadActive)
|
if (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
|
||||||
_timeActive++;
|
_timeActive++;
|
||||||
else
|
else
|
||||||
_timeActive = 0;
|
_timeActive = 0;
|
||||||
@@ -1403,7 +1403,7 @@ int TransmitMixer::TypingDetection()
|
|||||||
++_timeSinceLastTyping;
|
++_timeSinceLastTyping;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (keyPressed && (_audioFrame._vadActivity == AudioFrame::kVadActive)
|
if (keyPressed && (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
|
||||||
&& (_timeActive < _timeWindow))
|
&& (_timeActive < _timeWindow))
|
||||||
{
|
{
|
||||||
_penaltyCounter += _costPerTyping;
|
_penaltyCounter += _costPerTyping;
|
||||||
|
|||||||
@@ -256,18 +256,18 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
|
|||||||
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
|
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
|
||||||
_audioFrame);
|
_audioFrame);
|
||||||
|
|
||||||
assert(nSamples == _audioFrame._payloadDataLengthInSamples);
|
assert(nSamples == _audioFrame.samples_per_channel_);
|
||||||
assert(samplesPerSec ==
|
assert(samplesPerSec ==
|
||||||
static_cast<WebRtc_UWord32>(_audioFrame._frequencyInHz));
|
static_cast<WebRtc_UWord32>(_audioFrame.sample_rate_hz_));
|
||||||
|
|
||||||
// Deliver audio (PCM) samples to the ADM
|
// Deliver audio (PCM) samples to the ADM
|
||||||
memcpy(
|
memcpy(
|
||||||
(WebRtc_Word16*) audioSamples,
|
(WebRtc_Word16*) audioSamples,
|
||||||
(const WebRtc_Word16*) _audioFrame._payloadData,
|
(const WebRtc_Word16*) _audioFrame.data_,
|
||||||
sizeof(WebRtc_Word16) * (_audioFrame._payloadDataLengthInSamples
|
sizeof(WebRtc_Word16) * (_audioFrame.samples_per_channel_
|
||||||
* _audioFrame._audioChannel));
|
* _audioFrame.num_channels_));
|
||||||
|
|
||||||
nSamplesOut = _audioFrame._payloadDataLengthInSamples;
|
nSamplesOut = _audioFrame.samples_per_channel_;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -329,9 +329,9 @@ int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
|||||||
|
|
||||||
// Deliver audio (PCM) samples to the external sink
|
// Deliver audio (PCM) samples to the external sink
|
||||||
memcpy(speechData10ms,
|
memcpy(speechData10ms,
|
||||||
audioFrame._payloadData,
|
audioFrame.data_,
|
||||||
sizeof(WebRtc_Word16)*(audioFrame._payloadDataLengthInSamples));
|
sizeof(WebRtc_Word16)*(audioFrame.samples_per_channel_));
|
||||||
lengthSamples = audioFrame._payloadDataLengthInSamples;
|
lengthSamples = audioFrame.samples_per_channel_;
|
||||||
|
|
||||||
// Store current playout delay (to be used by ExternalRecordingInsertData).
|
// Store current playout delay (to be used by ExternalRecordingInsertData).
|
||||||
playout_delay_ms_ = current_delay_ms;
|
playout_delay_ms_ = current_delay_ms;
|
||||||
|
|||||||
Reference in New Issue
Block a user