audio_processing: Removed usage of macro WEBRTC_SPL_MUL

WEBRTC_SPL_MUL is a trivial multiplication after casting to int32_t. This is already taken care of by the compiler which makes the macro unnecessary.

Affected components:
* AGC
* NSx

BUG=3348,3353
TESTED=locally on linux and trybots
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/25429004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7330 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
bjornv@webrtc.org
2014-09-30 09:29:28 +00:00
parent 750423c722
commit 23ec8372a6
4 changed files with 32 additions and 41 deletions

View File

@@ -649,7 +649,7 @@ void WebRtcAgc_ZeroCtrl(Agc_t *stt, int32_t *inMicLevel, int32_t *env)
if (*inMicLevel < midVal)
{
/* *inMicLevel *= 1.1; */
tmp32 = WEBRTC_SPL_MUL(1126, *inMicLevel);
tmp32 = 1126 * *inMicLevel;
*inMicLevel = WEBRTC_SPL_RSHIFT_W32(tmp32, 10);
/* Reduces risk of a muted mic repeatedly triggering excessive levels due
* to zero signal detection. */
@@ -864,7 +864,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
* Rxx160_LP is adjusted down because it is so slow it could
* cause the AGC to make wrong decisions. */
/* stt->Rxx160_LPw32 *= 0.875; */
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(WEBRTC_SPL_RSHIFT_W32(stt->Rxx160_LPw32, 3), 7);
stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 8) * 7;
stt->zeroCtrlMax = stt->micVol;
@@ -970,7 +970,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
{
stt->activeSpeech += 2;
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx16_LPw32Max, 3);
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, RXX_BUFFER_LEN);
stt->Rxx160_LPw32 = tmp32 * RXX_BUFFER_LEN;
}
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx160w32 - stt->Rxx160_LPw32, kAlphaLongTerm);
@@ -989,7 +989,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
/* Lower the recording level */
/* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx160_LPw32, 6);
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 53);
stt->Rxx160_LPw32 = tmp32 * 53;
/* Reduce the max gain to avoid excessive oscillation
* (but never drop below the maximum analog level).
@@ -1040,7 +1040,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
stt->msTooHigh = 0;
/* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx160_LPw32, 6);
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 53);
stt->Rxx160_LPw32 = tmp32 * 53;
/* Reduce the max gain to avoid excessive oscillation
* (but never drop below the maximum analog level).
@@ -1105,7 +1105,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
/* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx160_LPw32, 6);
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 67);
stt->Rxx160_LPw32 = tmp32 * 67;
tmp32 = inMicLevelTmp - stt->minLevel;
tmpU32 = ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));
@@ -1166,7 +1166,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
/* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
tmp32 = WEBRTC_SPL_RSHIFT_W32(stt->Rxx160_LPw32, 6);
stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 67);
stt->Rxx160_LPw32 = tmp32 * 67;
tmp32 = inMicLevelTmp - stt->minLevel;
tmpU32 = ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));

View File

@@ -218,11 +218,11 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
}
if (y32 > 39000)
{
tmp32 = WEBRTC_SPL_MUL(y32 >> 1, kLog10) + 4096; // in Q27
tmp32 = (y32 >> 1) * kLog10 + 4096; // in Q27
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 13); // in Q14
} else
{
tmp32 = WEBRTC_SPL_MUL(y32, kLog10) + 8192; // in Q28
tmp32 = y32 * kLog10 + 8192; // in Q28
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 14); // in Q14
}
tmp32 += 16 << 14; // in Q14 (Make sure final output is in Q16)
@@ -463,7 +463,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
}
tmp32 = (cur_level << zeros) & 0x7FFFFFFF;
frac = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12
tmp32 = WEBRTC_SPL_MUL((stt->gainTable[zeros-1] - stt->gainTable[zeros]), frac);
tmp32 = (stt->gainTable[zeros-1] - stt->gainTable[zeros]) * frac;
gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
#ifdef WEBRTC_AGC_DEBUG_DUMP
if (k == 0) {
@@ -518,10 +518,10 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
{
// To prevent wraparound
tmp32 = WEBRTC_SPL_RSHIFT_W32((gains[k+1] - stt->gainTable[0]), 8);
tmp32 = WEBRTC_SPL_MUL(tmp32, (178 + gain_adj));
tmp32 *= 178 + gain_adj;
} else
{
tmp32 = WEBRTC_SPL_MUL((gains[k+1] - stt->gainTable[0]), (178 + gain_adj));
tmp32 = (gains[k+1] - stt->gainTable[0]) * (178 + gain_adj);
tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 8);
}
gains[k + 1] = stt->gainTable[0] + tmp32;
@@ -538,7 +538,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]);
}
gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
gain32 = WEBRTC_SPL_MUL(gain32, gain32);
gain32 *= gain32;
// check for overflow
while (AGC_MUL32(WEBRTC_SPL_RSHIFT_W32(env[k], 12) + 1, gain32)
> WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10)))
@@ -547,13 +547,13 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
if (gains[k + 1] > 8388607)
{
// Prevent wrap around
gains[k + 1] = WEBRTC_SPL_MUL(WEBRTC_SPL_RSHIFT_W32(gains[k+1], 8), 253);
gains[k + 1] = (gains[k+1] / 256) * 253;
} else
{
gains[k + 1] = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(gains[k+1], 253), 8);
gains[k + 1] = (gains[k+1] * 253) / 256;
}
gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
gain32 = WEBRTC_SPL_MUL(gain32, gain32);
gain32 *= gain32;
}
}
// gain reductions should be done 1 ms earlier than gain increases
@@ -575,7 +575,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
for (n = 0; n < L; n++)
{
// For lower band
tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
tmp32 = out[n] * ((gain32 + 127) >> 7);
out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
if (out_tmp > 4095)
{
@@ -585,14 +585,13 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
out[n] = (int16_t)-32768;
} else
{
tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4));
tmp32 = out[n] * (gain32 >> 4);
out[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
}
// For higher band
if (FS == 32000)
{
tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n],
WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
tmp32 = out_H[n] * ((gain32 + 127) >> 7);
out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
if (out_tmp > 4095)
{
@@ -602,8 +601,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
out_H[n] = (int16_t)-32768;
} else
{
tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n],
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
tmp32 = out_H[n] * (gain32 >> 4);
out_H[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
}
}
@@ -620,14 +618,12 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
for (n = 0; n < L; n++)
{
// For lower band
tmp32 = WEBRTC_SPL_MUL((int32_t)out[k * L + n],
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
tmp32 = out[k * L + n] * (gain32 >> 4);
out[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
// For higher band
if (FS == 32000)
{
tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[k * L + n],
WEBRTC_SPL_RSHIFT_W32(gain32, 4));
tmp32 = out_H[k * L + n] * (gain32 >> 4);
out_H[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
}
gain32 += delta;
@@ -704,10 +700,9 @@ int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
for (k = 0; k < 4; k++)
{
out = buf2[k] + HPstate;
tmp32 = WEBRTC_SPL_MUL(600, out);
tmp32 = 600 * out;
HPstate = (int16_t)(WEBRTC_SPL_RSHIFT_W32(tmp32, 10) - buf2[k]);
tmp32 = WEBRTC_SPL_MUL(out, out);
nrg += WEBRTC_SPL_RSHIFT_W32(tmp32, 6);
nrg += WEBRTC_SPL_RSHIFT_W32(out * out, 6);
}
}
state->HPstate = HPstate;
@@ -754,7 +749,7 @@ int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
// update short-term estimate of variance in energy level (Q8)
tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
tmp32 += WEBRTC_SPL_MUL(state->varianceShortTerm, 15);
tmp32 += state->varianceShortTerm * 15;
state->varianceShortTerm = WEBRTC_SPL_RSHIFT_W32(tmp32, 4);
// update short-term estimate of standard deviation in energy level (Q10)
@@ -769,7 +764,7 @@ int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
// update long-term estimate of variance in energy level (Q8)
tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
tmp32 += WEBRTC_SPL_MUL(state->varianceLongTerm, state->counter);
tmp32 += state->varianceLongTerm * state->counter;
state->varianceLongTerm = WebRtcSpl_DivW32W16(
tmp32, WebRtcSpl_AddSatW16(state->counter, 1));

View File

@@ -895,8 +895,8 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) {
avgHistLrtComplFX += tmp32;
avgSquareHistLrtFX += tmp32 * j;
}
fluctLrtFX = WEBRTC_SPL_MUL(avgSquareHistLrtFX, numHistLrt);
fluctLrtFX -= WEBRTC_SPL_MUL(avgHistLrtFX, avgHistLrtComplFX);
fluctLrtFX = avgSquareHistLrtFX * numHistLrt -
avgHistLrtFX * avgHistLrtComplFX;
thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt;
// get threshold for LRT feature:
tmpU32 = (FACTOR_1_LRT_DIFF * (uint32_t)avgHistLrtFX);
@@ -1139,7 +1139,7 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, uint16_t* magnIn) {
tmp32no1 = tmp32no2 * tmp16no1; // Q(prevQMagn+qMagn)
covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn)
tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, nShifts); // Q(prevQMagn-minPause)
varPauseUFX += (uint32_t)WEBRTC_SPL_MUL(tmp32no1, tmp32no1); // Q(2*(prevQMagn-minPause))
varPauseUFX += tmp32no1 * tmp32no1; // Q(2*(prevQMagn-minPause))
}
//update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts
inst->curAvgMagnEnergy += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy, 2 * inst->normData
@@ -1426,8 +1426,7 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU
// Calculate and update pinkNoiseExp. Result in Q14.
tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros)
tmp_1_w32 = WEBRTC_SPL_RSHIFT_W32(sum_log_i_log_magn, 3 + zeros);
tmp_1_w32 = WEBRTC_SPL_MUL((int32_t)(inst->magnLen - kStartBand),
tmp_1_w32);
tmp_1_w32 *= inst->magnLen - kStartBand;
tmp_2_w32 -= tmp_1_w32; // Q(14-zeros)
if (tmp_2_w32 > 0) {
// If the exponential parameter is negative force it to zero, which means a

View File

@@ -58,8 +58,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
// Here, LRT_TAVG = 0.5
zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
tmp32 = (frac32 * frac32 * -43) >> 19;
tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
frac32 = tmp32 + 37;
// tmp32 = log2(priorLocSnr[i])
@@ -222,9 +221,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
// nonSpeechProbFinal[i] = inst->priorNonSpeechProb /
// (inst->priorNonSpeechProb + invLrt);
if (inst->logLrtTimeAvgW32[i] < 65300) {
tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(
inst->logLrtTimeAvgW32[i], 23637),
14); // Q12
tmp32no1 = (inst->logLrtTimeAvgW32[i] * 23637) >> 14; // Q12
intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
if (intPart < -8) {
intPart = -8;