Refactor audio_processing/aecm: Removed usage of macro WEBRTC_SPL_MUL_16_16_RSFT

The macro is defined as
#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \
(WEBRTC_SPL_MUL_16_16(a, b) >> (c))

where the latter macro is in C defined as
#define WEBRTC_SPL_MUL_16_16(a, b) \
((int32_t) (((int16_t)(a)) * ((int16_t)(b))))
(For definitions on ARMv7 and MIPS, see common_audio/signal_processing/include/spl_inl_{armv7,mips}.h)

The replacement consists of
- avoiding casts to int16_t if inputs already are int16_t
- adding explicit cast to <type> if result is assigned to <type> (other than int or int32_t)
- minor cleanups like remove of unnecessary parentheses and style changes

In addition an implicit cast from int32_t to int16_t was removed, which was a bug.

BUG=3348,3353
TESTED=Locally on Mac and trybots
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/41169004

Cr-Commit-Position: refs/heads/master@{#8665}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8665 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
bjornv@webrtc.org 2015-03-10 06:40:02 +00:00
parent 1afbdc7555
commit b38b009d21
2 changed files with 21 additions and 28 deletions

View File

@ -794,7 +794,7 @@ void WebRtcAecm_CalcEnergies(AecmCore* aecm,
tmp16 = 2560 - aecm->farEnergyMin;
if (tmp16 > 0)
{
tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, FAR_ENERGY_VAD_REGION, 9);
tmp16 = (int16_t)((tmp16 * FAR_ENERGY_VAD_REGION) >> 9);
} else
{
tmp16 = 0;
@ -1092,8 +1092,9 @@ void WebRtcAecm_UpdateChannel(AecmCore* aecm,
aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld);
} else
{
aecm->mseThreshold += WEBRTC_SPL_MUL_16_16_RSFT(mseAdapt
- WEBRTC_SPL_MUL_16_16_RSFT(aecm->mseThreshold, 5, 3), 205, 8);
int scaled_threshold = aecm->mseThreshold * 5 / 8;
aecm->mseThreshold +=
((mseAdapt - scaled_threshold) * 205) >> 8;
}
}

View File

@ -73,14 +73,11 @@ static void WindowAndFFT(AecmCore* aecm,
for (i = 0; i < PART_LEN; i++) {
// Window time domain signal and insert into real part of
// transformation array |fft|
fft[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
(time_signal[i] << time_signal_scaling),
WebRtcAecm_kSqrtHanning[i],
14);
fft[PART_LEN + i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
(time_signal[i + PART_LEN] << time_signal_scaling),
WebRtcAecm_kSqrtHanning[PART_LEN - i],
14);
int16_t scaled_time_signal = time_signal[i] << time_signal_scaling;
fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14);
scaled_time_signal = time_signal[i + PART_LEN] << time_signal_scaling;
fft[PART_LEN + i] = (int16_t)((
scaled_time_signal * WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14);
}
// Do forward FFT, then take only the first PART_LEN complex samples,
@ -124,9 +121,8 @@ static void InverseFFTAndWindow(AecmCore* aecm,
tmp32no1 + aecm->outBuf[i],
WEBRTC_SPL_WORD16_MIN);
tmp32no1 = WEBRTC_SPL_MUL_16_16_RSFT(ifft_out[PART_LEN + i],
WebRtcAecm_kSqrtHanning[PART_LEN - i],
14);
tmp32no1 = (ifft_out[PART_LEN + i] *
WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14;
tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1,
outCFFT - aecm->dfaCleanQDomain);
aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
@ -250,8 +246,8 @@ static int TimeToFrequencyDomain(AecmCore* aecm,
alpha = kAlpha3;
beta = kBeta3;
}
tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(max_value, alpha, 15);
tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(min_value, beta, 15);
tmp16no1 = (int16_t)((max_value * alpha) >> 15);
tmp16no2 = (int16_t)((min_value * beta) >> 15);
freq_signal_abs[i] = (uint16_t)tmp16no1 + (uint16_t)tmp16no2;
#else
#ifdef WEBRTC_ARCH_ARM_V7
@ -561,7 +557,7 @@ int WebRtcAecm_ProcessBlock(AecmCore* aecm,
// speech distortion in double-talk.
for (i = 0; i < PART_LEN1; i++)
{
hnl[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], hnl[i], 14);
hnl[i] = (int16_t)((hnl[i] * hnl[i]) >> 14);
}
for (i = kMinPrefBand; i <= kMaxPrefBand; i++)
@ -609,7 +605,7 @@ int WebRtcAecm_ProcessBlock(AecmCore* aecm,
hnl[i] = ONE_Q14;
} else
{
hnl[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], nlpGain, 14);
hnl[i] = (int16_t)((hnl[i] * nlpGain) >> 14);
}
// multiply with Wiener coefficients
@ -744,9 +740,7 @@ static void ComfortNoise(AecmCore* aecm,
noiseRShift16[i] = (int16_t)tmp32;
tmp16 = ONE_Q14 - lambda[i];
noiseRShift16[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16,
noiseRShift16[i],
14);
noiseRShift16[i] = (int16_t)((tmp16 * noiseRShift16[i]) >> 14);
}
// Generate a uniform random array on [0 2^15-1].
@ -758,15 +752,13 @@ static void ComfortNoise(AecmCore* aecm,
for (i = 1; i < PART_LEN1; i++)
{
// Get a random index for the cos and sin tables over [0 359].
tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(359, randW16[i - 1], 15);
tmp16 = (int16_t)((359 * randW16[i - 1]) >> 15);
// Tables are in Q13.
uReal[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(noiseRShift16[i],
WebRtcAecm_kCosTable[tmp16],
13);
uImag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(-noiseRShift16[i],
WebRtcAecm_kSinTable[tmp16],
13);
uReal[i] = (int16_t)((noiseRShift16[i] * WebRtcAecm_kCosTable[tmp16]) >>
13);
uImag[i] = (int16_t)((-noiseRShift16[i] * WebRtcAecm_kSinTable[tmp16]) >>
13);
}
uImag[PART_LEN] = 0;