Refactor audio_processing/ns: Removes usage of macro WEBRTC_SPL_MUL_16_16_RSFT

The macro is defined as
#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \
(WEBRTC_SPL_MUL_16_16(a, b) >> (c))

where the latter macro is in C defined as
#define WEBRTC_SPL_MUL_16_16(a, b) \
((int32_t) (((int16_t)(a)) * ((int16_t)(b))))
(For definitions on ARMv7 and MIPS, see common_audio/signal_processing/include/spl_inl_{armv7,mips}.h)

The replacement consists of
- avoiding casts to int16_t if inputs already are int16_t
- adding explicit cast to <type> if result is assigned to <type> (other than int or int32_t)
- minor cleanups like remove of unnecessary parentheses and style changes

BUG=3348,3353
TESTED=Locally on Mac and trybots
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/41149004

Cr-Commit-Position: refs/heads/master@{#8666}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8666 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
bjornv@webrtc.org 2015-03-10 07:09:30 +00:00
parent b38b009d21
commit 7ef8b12a3b
4 changed files with 38 additions and 42 deletions

View File

@ -366,7 +366,7 @@ static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
log2 = (int16_t)(((31 - zeros) << 8)
+ WebRtcNsx_kLogTableFrac[frac]);
// log2(magn(i))*log(2)
lmagn[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15);
lmagn[i] = (int16_t)((log2 * log2_const) >> 15);
// + log(2^stages)
lmagn[i] += logval;
} else {
@ -401,7 +401,7 @@ static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
}
// update log quantile estimate
tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
tmp16 = (int16_t)((delta * countDiv) >> 14);
if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
// +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
// CounterDiv=1/(inst->counter[s]+1) in Q15
@ -410,7 +410,8 @@ static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
} else {
tmp16 += 1;
// *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1);
// TODO(bjornv): investigate why we need to truncate twice.
tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2);
inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
if (inst->noiseEstLogQuantile[offset + i] < logval) {
// This is the smallest fixed point representation we can
@ -456,10 +457,10 @@ static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) {
int i = 0, j = 0;
for (i = 0; i < inst->magnLen; i++) {
inst->real[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i],
(int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
inst->imag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i],
(int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
inst->real[i] = (int16_t)((inst->real[i] *
(int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages)
inst->imag[i] = (int16_t)((inst->imag[i] *
(int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages)
}
freq_buf[0] = inst->real[0];
@ -1366,11 +1367,10 @@ void WebRtcNsx_DataAnalysis(NoiseSuppressionFixedC* inst,
if (inst->fs == 8000) {
// Adjust values to shorter blocks in narrow band.
tmp_1_w32 = (int32_t)matrix_determinant;
tmp_1_w32 += WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], sum_log_i, 9);
tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], kSumLogIndex[65], 10);
tmp_1_w32 += (kSumLogIndex[65] * sum_log_i) >> 9;
tmp_1_w32 -= (kSumLogIndex[65] * kSumLogIndex[65]) >> 10;
tmp_1_w32 -= (int32_t)sum_log_i_square << 4;
tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT((int16_t)
(inst->magnLen - kStartBand), kSumSquareLogIndex[65], 2);
tmp_1_w32 -= ((inst->magnLen - kStartBand) * kSumSquareLogIndex[65]) >> 2;
matrix_determinant = (int16_t)tmp_1_w32;
sum_log_i -= kSumLogIndex[65]; // Q5
sum_log_i_square -= kSumSquareLogIndex[65]; // Q2
@ -1488,10 +1488,9 @@ void WebRtcNsx_DataSynthesis(NoiseSuppressionFixedC* inst, short* outFrame) {
//combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent
// factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code
tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(16384 - inst->priorNonSpeechProb,
gainFactor1, 14); // Q13 16384 = Q14(1.0)
tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->priorNonSpeechProb,
gainFactor2, 14); // Q13;
tmp16no1 = (int16_t)(((16384 - inst->priorNonSpeechProb) * gainFactor1) >>
14); // in Q13, where 16384 = Q14(1.0)
tmp16no2 = (int16_t)((inst->priorNonSpeechProb * gainFactor2) >> 14);
gainFactor = tmp16no1 + tmp16no2; // Q13
} // out of flag_gain_map==1
@ -2100,7 +2099,7 @@ void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14
} else {
// "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;"
gainTimeDomainHB = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(3, avgFilterGainHB, 2); // 3 = Q2(0.75); Q14
gainTimeDomainHB = (int16_t)((3 * avgFilterGainHB) >> 2); // 3 = Q2(0.75)
gainTimeDomainHB += gainModHB; // Q14
}
//make sure gain is within flooring range
@ -2111,10 +2110,8 @@ void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
//apply gain
for (i = 0; i < num_high_bands; ++i) {
for (j = 0; j < inst->blockLen10ms; j++) {
outFrameHB[i][j] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
gainTimeDomainHB,
inst->dataBufHBFX[i][j],
14); // Q0
outFrameHB[i][j] = (int16_t)((gainTimeDomainHB *
inst->dataBufHBFX[i][j]) >> 14); // Q0
}
}
} // end of H band gain computation

View File

@ -59,7 +59,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
tmp32 = (frac32 * frac32 * -43) >> 19;
tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
tmp32 += ((int16_t)frac32 * 5412) >> 12;
frac32 = tmp32 + 37;
// tmp32 = log2(priorLocSnr[i])
tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
@ -100,7 +100,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
tmp16no2 = kIndicatorTable[tableIndex];
tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
if (tmpIndFX == 0) {
tmpIndFX = 8192 - tmp16no2; // Q14
} else {
@ -132,7 +132,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
tmp16no2 = kIndicatorTable[tableIndex];
tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
if (tmpIndFX) {
tmpIndFX = 8192 + tmp16no2; // Q14
} else {
@ -202,8 +202,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
// inst->priorNonSpeechProb += PRIOR_UPDATE *
// (indPriorNonSpeech - inst->priorNonSpeechProb);
tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
PRIOR_UPDATE_Q14, tmp16, 14); // Q14
inst->priorNonSpeechProb += (int16_t)((PRIOR_UPDATE_Q14 * tmp16) >> 14);
//final speech probability: combine prior model with LR factor:
@ -229,7 +228,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
// Quadratic approximation of 2^frac
tmp32no2 = (frac * frac * 44) >> 19; // Q12.
tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
tmp32no2 += (frac * 84) >> 7; // Q12
invLrtFX = (1 << (8 + intPart)) +
WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8

View File

@ -134,7 +134,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
tmp16no2 = kIndicatorTable[tableIndex];
tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
if (tmpIndFX == 0) {
tmpIndFX = 8192 - tmp16no2; // Q14
} else {
@ -166,7 +166,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
tmp16no2 = kIndicatorTable[tableIndex];
tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
if (tmpIndFX) {
tmpIndFX = 8192 + tmp16no2; // Q14
} else {
@ -236,8 +236,7 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
// inst->priorNonSpeechProb += PRIOR_UPDATE *
// (indPriorNonSpeech - inst->priorNonSpeechProb);
tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
PRIOR_UPDATE_Q14, tmp16, 14); // Q14
inst->priorNonSpeechProb += (int16_t)((PRIOR_UPDATE_Q14 * tmp16) >> 14);
//final speech probability: combine prior model with LR factor:

View File

@ -168,7 +168,7 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
log2 = (int16_t)(((31 - zeros) << 8)
+ WebRtcNsx_kLogTableFrac[frac]);
// log2(magn(i))*log(2)
lmagn[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15);
lmagn[i] = (int16_t)((log2 * log2_const) >> 15);
// + log(2^stages)
lmagn[i] += logval;
} else {
@ -226,7 +226,7 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
// Update log quantile estimate
// tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
// tmp16 = (int16_t)((delta * countDiv) >> 14);
tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[0]), countDiv_16x4);
tmp16x4_1 = vshrn_n_s32(tmp32x4, 14);
tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[4]), countDiv_16x4);
@ -247,11 +247,11 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
// tmp16_1 = (Word16)(tmp16>>1);
tmp16x8_0 = vrshrq_n_s16(tmp16x8_0, 1);
// tmp16_2 = (Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16_1,3,1);
// tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1);
tmp32x4 = vmull_s16(vget_low_s16(tmp16x8_0), Q3_16x4);
tmp16x4_1 = vshrn_n_s32(tmp32x4, 1);
// tmp16_2 = (Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16_1,3,1);
// tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1);
tmp32x4 = vmull_s16(vget_high_s16(tmp16x8_0), Q3_16x4);
tmp16x4_0 = vshrn_n_s32(tmp32x4, 1);
@ -299,7 +299,7 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
}
}
// update log quantile estimate
tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
tmp16 = (int16_t)((delta * countDiv) >> 14);
if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
// +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
// CounterDiv=1/(inst->counter[s]+1) in Q15
@ -308,7 +308,8 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
} else {
tmp16 += 1;
// *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1);
// TODO(bjornv): investigate why we need to truncate twice.
tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2);
inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
if (inst->noiseEstLogQuantile[offset + i] < logval) {
// logval is the smallest fixed point representation we can have.
@ -360,10 +361,10 @@ void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst,
// Fixed point C code for the next block is as follows:
// for (i = 0; i < inst->magnLen; i++) {
// inst->real[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i],
// (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
// inst->imag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i],
// (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
// inst->real[i] = (int16_t)((inst->real[i] *
// (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages)
// inst->imag[i] = (int16_t)((inst->imag[i] *
// (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages)
// }
int16_t* preal = &inst->real[0];
@ -396,8 +397,8 @@ void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst,
}
// Filter the last element
*preal = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(*preal, *pns_filter, 14);
*pimag = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(*pimag, *pns_filter, 14);
*preal = (int16_t)((*preal * *pns_filter) >> 14);
*pimag = (int16_t)((*pimag * *pns_filter) >> 14);
// (2) Create spectrum.