From c2c4117477afe9586b86069a158fc972f618b5ef Mon Sep 17 00:00:00 2001 From: "bjornv@webrtc.org" Date: Fri, 5 Sep 2014 06:01:53 +0000 Subject: [PATCH] common_audio: Replaced WEBRTC_SPL_LSHIFT_U32 with << in audio_processing Affected components: * AECMobile - Added a help function since the same operation was performed several times. * Auto Gain Control * Noise Suppression (fixed point) BUG=3348,3353 TESTED=locally on Linux R=kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/20219004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7076 4adac7df-926f-26a2-2b94-8c16560cd09d --- .../modules/audio_processing/aecm/aecm_core.c | 22 +++++++------ .../audio_processing/agc/digital_agc.c | 2 +- webrtc/modules/audio_processing/ns/nsx_core.c | 32 +++++++++---------- .../modules/audio_processing/ns/nsx_core_c.c | 16 ++++------ .../audio_processing/ns/nsx_core_mips.c | 12 +++---- 5 files changed, 42 insertions(+), 42 deletions(-) diff --git a/webrtc/modules/audio_processing/aecm/aecm_core.c b/webrtc/modules/audio_processing/aecm/aecm_core.c index 6b6d2f5b8..db8147840 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core.c @@ -706,6 +706,15 @@ int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal, return retVal; } +// ExtractFractionPart(a, zeros) +// +// returns the fraction part of |a|, with |zeros| number of leading zeros, as an +// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the +// number of zeros match. +static int16_t ExtractFractionPart(uint32_t a, int zeros) { + return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23); +} + // WebRtcAecm_CalcEnergies(...) // // This function calculates the log of energies for nearend, farend and estimated @@ -751,9 +760,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (nearEner) { zeros = WebRtcSpl_NormU32(nearEner); - frac = (int16_t)WEBRTC_SPL_RSHIFT_U32( - (WEBRTC_SPL_LSHIFT_U32(nearEner, zeros) & 0x7FFFFFFF), - 23); + frac = ExtractFractionPart(nearEner, zeros); // log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; tmp16 -= WEBRTC_SPL_LSHIFT_W16(aecm->dfaNoisyQDomain, 8); @@ -774,8 +781,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (tmpFar) { zeros = WebRtcSpl_NormU32(tmpFar); - frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros) - & 0x7FFFFFFF), 23); + frac = ExtractFractionPart(tmpFar, zeros); // log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; tmp16 -= WEBRTC_SPL_LSHIFT_W16(far_q, 8); @@ -787,8 +793,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (tmpAdapt) { zeros = WebRtcSpl_NormU32(tmpAdapt); - frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros) - & 0x7FFFFFFF), 23); + frac = ExtractFractionPart(tmpAdapt, zeros); //log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8); @@ -800,8 +805,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (tmpStored) { zeros = WebRtcSpl_NormU32(tmpStored); - frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros) - & 0x7FFFFFFF), 23); + frac = ExtractFractionPart(tmpStored, zeros); //log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8); diff --git a/webrtc/modules/audio_processing/agc/digital_agc.c b/webrtc/modules/audio_processing/agc/digital_agc.c index b15b6e39d..e439e0902 100644 --- a/webrtc/modules/audio_processing/agc/digital_agc.c +++ b/webrtc/modules/audio_processing/agc/digital_agc.c @@ -154,7 +154,7 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16 fracPart = (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8 tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22 - tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((uint32_t)kGenFuncTable[intPart], 14); // Q22 + tmpU32no1 += (uint32_t)kGenFuncTable[intPart] << 14; // Q22 logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14 // Compensate for negative exponent using the relation: // log2(1 + 2^-x) = log2(1 + 2^x) - x diff --git a/webrtc/modules/audio_processing/ns/nsx_core.c b/webrtc/modules/audio_processing/ns/nsx_core.c index 5a88c12fb..19ad1a97a 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core.c +++ b/webrtc/modules/audio_processing/ns/nsx_core.c @@ -620,7 +620,7 @@ void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst, } // Shift fractional part to Q(minNorm-stages) tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11); - *noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (uint32_t)tmp32no2; + *noise_estimate_avg = (1 << int_part) + (uint32_t)tmp32no2; // Scale up to initMagnEst, which is not block averaged *noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1); } @@ -1149,7 +1149,7 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, uint16_t* magnIn) { tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn) norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16; if (norm32 > 0) { - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32); // Q(prevQMagn+qMagn+norm32) + tmpU32no1 <<= norm32; // Q(prevQMagn+qMagn+norm32) } else { tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, -norm32); // Q(prevQMagn+qMagn+norm32) } @@ -1660,7 +1660,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // numerator = (initMagnEst - noise_estimate * overdrive) // Result in Q(8+minNorm-stages) tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive); - numerator = WEBRTC_SPL_LSHIFT_U32(inst->initMagnEst[i], 8); + numerator = inst->initMagnEst[i] << 8; if (numerator > tmpU32no1) { // Suppression filter coefficient larger than zero, so calculate. numerator -= tmpU32no1; @@ -1671,7 +1671,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram nShifts = WEBRTC_SPL_SAT(6, nShifts, 0); // Shift numerator to Q(nShifts+8+minNorm-stages) - numerator = WEBRTC_SPL_LSHIFT_U32(numerator, nShifts); + numerator <<= nShifts; // Shift denominator to Q(nShifts-6+minNorm-stages) tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], 6 - nShifts); @@ -1710,7 +1710,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // Add them together and divide by startup length noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT); // Shift back if necessary - noiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], nShifts); + noiseU32[i] <<= nShifts; } // Update new Q-domain for 'noiseU32' qNoise = q_domain_to_use; @@ -1753,15 +1753,15 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // calculate post SNR: output in Q11 postLocSnr[i] = 2048; // 1.0 in Q11 - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], 6); // Q(6+qMagn) + tmpU32no1 = (uint32_t)magnU16[i] << 6; // Q(6+qMagn) if (postShifts < 0) { tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn) } else { - tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], postShifts); // Q(6+qMagn) + tmpU32no2 = noiseU32[i] << postShifts; // Q(6+qMagn) } if (tmpU32no1 > tmpU32no2) { // Current magnitude larger than noise - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, 11); // Q(17+qMagn) + tmpU32no1 <<= 11; // Q(17+qMagn) if (tmpU32no2 > 0) { tmpU32no1 /= tmpU32no2; // Q11 postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11 @@ -1772,7 +1772,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // calculate prevNearSnr[i] and save for later instead of recalculating it later nearMagnEst = WEBRTC_SPL_UMUL_16_16(inst->prevMagnU16[i], inst->noiseSupFilter[i]); // Q(prevQMagn+14) - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(nearMagnEst, 3); // Q(prevQMagn+17) + tmpU32no1 = nearMagnEst << 3; // Q(prevQMagn+17) tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], nShifts); // Q(prevQMagn+6) if (tmpU32no2 > 0) { @@ -1833,7 +1833,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram inst->featureSpecDiff = 0x007FFFFF; } else { inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF, - WEBRTC_SPL_LSHIFT_U32(tmpU32no3, norm32no1)); + tmpU32no3 << norm32no1); } } @@ -1858,7 +1858,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram if (postShifts < 0) { tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(magnU16[i], -postShifts); // Q(prevQNoise) } else { - tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], postShifts); // Q(prevQNoise) + tmpU32no2 = (uint32_t)magnU16[i] << postShifts; // Q(prevQNoise) } if (prevNoiseU16[i] > tmpU32no2) { sign = -1; @@ -1979,18 +1979,18 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram if (nShifts < 0) { // This case is equivalent with magn < noise which implies curNearSnr = 0; tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn) - tmpNoiseU32 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], -nShifts); // Q(qMagn) + tmpNoiseU32 = noiseU32[i] << -nShifts; // Q(qMagn) } else if (nShifts > 17) { - tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], 17); // Q(qMagn+17) + tmpMagnU32 = (uint32_t)magnU16[i] << 17; // Q(qMagn+17) tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17) } else { - tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], nShifts); // Q(qNoise_prev+11) + tmpMagnU32 = (uint32_t)magnU16[i] << nShifts; // Q(qNoise_prev+11) tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11) } if (tmpMagnU32 > tmpNoiseU32) { tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur) norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1)); - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32no2); // Q(qCur+norm32no2) + tmpU32no1 <<= norm32no2; // Q(qCur+norm32no2) tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpNoiseU32, 11 - norm32no2); // Q(qCur+norm32no2-11) if (tmpU32no2 > 0) { tmpU32no1 /= tmpU32no2; // Q11 @@ -2033,7 +2033,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram inst->prevQMagn = qMagn; if (norm32no1 > 5) { for (i = 0; i < inst->magnLen; i++) { - inst->prevNoiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], norm32no1 - 5); // Q(qNoise+11) + inst->prevNoiseU32[i] = noiseU32[i] << (norm32no1 - 5); // Q(qNoise+11) inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn) } } else { diff --git a/webrtc/modules/audio_processing/ns/nsx_core_c.c b/webrtc/modules/audio_processing/ns/nsx_core_c.c index 4472583f1..2fce49b90 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core_c.c +++ b/webrtc/modules/audio_processing/ns/nsx_core_c.c @@ -8,6 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include + #include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h" #include "webrtc/modules/audio_processing/ns/nsx_core.h" @@ -39,9 +41,9 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, for (i = 0; i < inst->magnLen; i++) { besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11 normTmp = WebRtcSpl_NormU32(postLocSnr[i]); - num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp) + num = postLocSnr[i] << normTmp; // Q(11+normTmp) if (normTmp > 10) { - den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp) + den = priorLocSnr[i] << (normTmp - 11); // Q(normTmp) } else { den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp) } @@ -121,11 +123,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, //widthPrior = widthPrior * 2.0; nShifts++; } - tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, - nShifts), 25); - //Q14 - tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), - 25); //Q14 + tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25); // Q14 // compute indicator function: sigmoid map // FLOAT code // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * @@ -151,8 +149,8 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, if (inst->featureSpecDiff) { normTmp = WEBRTC_SPL_MIN(20 - inst->stages, WebRtcSpl_NormU32(inst->featureSpecDiff)); - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp); - // Q(normTmp-2*stages) + assert(normTmp >= 0); + tmpU32no1 = inst->featureSpecDiff << normTmp; // Q(normTmp-2*stages) tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy, 20 - inst->stages - normTmp); if (tmpU32no2 > 0) { diff --git a/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/webrtc/modules/audio_processing/ns/nsx_core_mips.c index 067162754..47b1b5f6f 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core_mips.c +++ b/webrtc/modules/audio_processing/ns/nsx_core_mips.c @@ -8,6 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include + #include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h" #include "webrtc/modules/audio_processing/ns/nsx_core.h" @@ -155,11 +157,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, //widthPrior = widthPrior * 2.0; nShifts++; } - tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, - nShifts), 25); - //Q14 - tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), - 25); //Q14 + tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25); //Q14 // compute indicator function: sigmoid map // FLOAT code // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * @@ -185,8 +183,8 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, if (inst->featureSpecDiff) { normTmp = WEBRTC_SPL_MIN(20 - inst->stages, WebRtcSpl_NormU32(inst->featureSpecDiff)); - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp); - // Q(normTmp-2*stages) + assert(normTmp >= 0); + tmpU32no1 = inst->featureSpecDiff << normTmp; // Q(normTmp-2*stages) tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy, 20 - inst->stages - normTmp); if (tmpU32no2 > 0) {