common_audio: Replaced WEBRTC_SPL_LSHIFT_U32 with << in audio_processing
Affected components: * AECMobile - Added a help function since the same operation was performed several times. * Auto Gain Control * Noise Suppression (fixed point) BUG=3348,3353 TESTED=locally on Linux R=kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/20219004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7076 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
2c03a97d37
commit
c2c4117477
@ -706,6 +706,15 @@ int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal,
|
|||||||
return retVal;
|
return retVal;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExtractFractionPart(a, zeros)
|
||||||
|
//
|
||||||
|
// returns the fraction part of |a|, with |zeros| number of leading zeros, as an
|
||||||
|
// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the
|
||||||
|
// number of zeros match.
|
||||||
|
static int16_t ExtractFractionPart(uint32_t a, int zeros) {
|
||||||
|
return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23);
|
||||||
|
}
|
||||||
|
|
||||||
// WebRtcAecm_CalcEnergies(...)
|
// WebRtcAecm_CalcEnergies(...)
|
||||||
//
|
//
|
||||||
// This function calculates the log of energies for nearend, farend and estimated
|
// This function calculates the log of energies for nearend, farend and estimated
|
||||||
@ -751,9 +760,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm,
|
|||||||
if (nearEner)
|
if (nearEner)
|
||||||
{
|
{
|
||||||
zeros = WebRtcSpl_NormU32(nearEner);
|
zeros = WebRtcSpl_NormU32(nearEner);
|
||||||
frac = (int16_t)WEBRTC_SPL_RSHIFT_U32(
|
frac = ExtractFractionPart(nearEner, zeros);
|
||||||
(WEBRTC_SPL_LSHIFT_U32(nearEner, zeros) & 0x7FFFFFFF),
|
|
||||||
23);
|
|
||||||
// log2 in Q8
|
// log2 in Q8
|
||||||
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
||||||
tmp16 -= WEBRTC_SPL_LSHIFT_W16(aecm->dfaNoisyQDomain, 8);
|
tmp16 -= WEBRTC_SPL_LSHIFT_W16(aecm->dfaNoisyQDomain, 8);
|
||||||
@ -774,8 +781,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm,
|
|||||||
if (tmpFar)
|
if (tmpFar)
|
||||||
{
|
{
|
||||||
zeros = WebRtcSpl_NormU32(tmpFar);
|
zeros = WebRtcSpl_NormU32(tmpFar);
|
||||||
frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros)
|
frac = ExtractFractionPart(tmpFar, zeros);
|
||||||
& 0x7FFFFFFF), 23);
|
|
||||||
// log2 in Q8
|
// log2 in Q8
|
||||||
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
||||||
tmp16 -= WEBRTC_SPL_LSHIFT_W16(far_q, 8);
|
tmp16 -= WEBRTC_SPL_LSHIFT_W16(far_q, 8);
|
||||||
@ -787,8 +793,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm,
|
|||||||
if (tmpAdapt)
|
if (tmpAdapt)
|
||||||
{
|
{
|
||||||
zeros = WebRtcSpl_NormU32(tmpAdapt);
|
zeros = WebRtcSpl_NormU32(tmpAdapt);
|
||||||
frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros)
|
frac = ExtractFractionPart(tmpAdapt, zeros);
|
||||||
& 0x7FFFFFFF), 23);
|
|
||||||
//log2 in Q8
|
//log2 in Q8
|
||||||
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
||||||
tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
|
tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
|
||||||
@ -800,8 +805,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm,
|
|||||||
if (tmpStored)
|
if (tmpStored)
|
||||||
{
|
{
|
||||||
zeros = WebRtcSpl_NormU32(tmpStored);
|
zeros = WebRtcSpl_NormU32(tmpStored);
|
||||||
frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros)
|
frac = ExtractFractionPart(tmpStored, zeros);
|
||||||
& 0x7FFFFFFF), 23);
|
|
||||||
//log2 in Q8
|
//log2 in Q8
|
||||||
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
|
||||||
tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
|
tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
|
||||||
|
@ -154,7 +154,7 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
|
|||||||
fracPart = (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part
|
fracPart = (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part
|
||||||
tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8
|
tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8
|
||||||
tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22
|
tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22
|
||||||
tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((uint32_t)kGenFuncTable[intPart], 14); // Q22
|
tmpU32no1 += (uint32_t)kGenFuncTable[intPart] << 14; // Q22
|
||||||
logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14
|
logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14
|
||||||
// Compensate for negative exponent using the relation:
|
// Compensate for negative exponent using the relation:
|
||||||
// log2(1 + 2^-x) = log2(1 + 2^x) - x
|
// log2(1 + 2^-x) = log2(1 + 2^x) - x
|
||||||
|
@ -620,7 +620,7 @@ void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst,
|
|||||||
}
|
}
|
||||||
// Shift fractional part to Q(minNorm-stages)
|
// Shift fractional part to Q(minNorm-stages)
|
||||||
tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11);
|
tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11);
|
||||||
*noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (uint32_t)tmp32no2;
|
*noise_estimate_avg = (1 << int_part) + (uint32_t)tmp32no2;
|
||||||
// Scale up to initMagnEst, which is not block averaged
|
// Scale up to initMagnEst, which is not block averaged
|
||||||
*noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1);
|
*noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1);
|
||||||
}
|
}
|
||||||
@ -1149,7 +1149,7 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, uint16_t* magnIn) {
|
|||||||
tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn)
|
tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn)
|
||||||
norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16;
|
norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16;
|
||||||
if (norm32 > 0) {
|
if (norm32 > 0) {
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32); // Q(prevQMagn+qMagn+norm32)
|
tmpU32no1 <<= norm32; // Q(prevQMagn+qMagn+norm32)
|
||||||
} else {
|
} else {
|
||||||
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, -norm32); // Q(prevQMagn+qMagn+norm32)
|
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, -norm32); // Q(prevQMagn+qMagn+norm32)
|
||||||
}
|
}
|
||||||
@ -1660,7 +1660,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
// numerator = (initMagnEst - noise_estimate * overdrive)
|
// numerator = (initMagnEst - noise_estimate * overdrive)
|
||||||
// Result in Q(8+minNorm-stages)
|
// Result in Q(8+minNorm-stages)
|
||||||
tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive);
|
tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive);
|
||||||
numerator = WEBRTC_SPL_LSHIFT_U32(inst->initMagnEst[i], 8);
|
numerator = inst->initMagnEst[i] << 8;
|
||||||
if (numerator > tmpU32no1) {
|
if (numerator > tmpU32no1) {
|
||||||
// Suppression filter coefficient larger than zero, so calculate.
|
// Suppression filter coefficient larger than zero, so calculate.
|
||||||
numerator -= tmpU32no1;
|
numerator -= tmpU32no1;
|
||||||
@ -1671,7 +1671,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
nShifts = WEBRTC_SPL_SAT(6, nShifts, 0);
|
nShifts = WEBRTC_SPL_SAT(6, nShifts, 0);
|
||||||
|
|
||||||
// Shift numerator to Q(nShifts+8+minNorm-stages)
|
// Shift numerator to Q(nShifts+8+minNorm-stages)
|
||||||
numerator = WEBRTC_SPL_LSHIFT_U32(numerator, nShifts);
|
numerator <<= nShifts;
|
||||||
|
|
||||||
// Shift denominator to Q(nShifts-6+minNorm-stages)
|
// Shift denominator to Q(nShifts-6+minNorm-stages)
|
||||||
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], 6 - nShifts);
|
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], 6 - nShifts);
|
||||||
@ -1710,7 +1710,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
// Add them together and divide by startup length
|
// Add them together and divide by startup length
|
||||||
noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT);
|
noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT);
|
||||||
// Shift back if necessary
|
// Shift back if necessary
|
||||||
noiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], nShifts);
|
noiseU32[i] <<= nShifts;
|
||||||
}
|
}
|
||||||
// Update new Q-domain for 'noiseU32'
|
// Update new Q-domain for 'noiseU32'
|
||||||
qNoise = q_domain_to_use;
|
qNoise = q_domain_to_use;
|
||||||
@ -1753,15 +1753,15 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
|
|
||||||
// calculate post SNR: output in Q11
|
// calculate post SNR: output in Q11
|
||||||
postLocSnr[i] = 2048; // 1.0 in Q11
|
postLocSnr[i] = 2048; // 1.0 in Q11
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], 6); // Q(6+qMagn)
|
tmpU32no1 = (uint32_t)magnU16[i] << 6; // Q(6+qMagn)
|
||||||
if (postShifts < 0) {
|
if (postShifts < 0) {
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn)
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn)
|
||||||
} else {
|
} else {
|
||||||
tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], postShifts); // Q(6+qMagn)
|
tmpU32no2 = noiseU32[i] << postShifts; // Q(6+qMagn)
|
||||||
}
|
}
|
||||||
if (tmpU32no1 > tmpU32no2) {
|
if (tmpU32no1 > tmpU32no2) {
|
||||||
// Current magnitude larger than noise
|
// Current magnitude larger than noise
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, 11); // Q(17+qMagn)
|
tmpU32no1 <<= 11; // Q(17+qMagn)
|
||||||
if (tmpU32no2 > 0) {
|
if (tmpU32no2 > 0) {
|
||||||
tmpU32no1 /= tmpU32no2; // Q11
|
tmpU32no1 /= tmpU32no2; // Q11
|
||||||
postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
|
postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
|
||||||
@ -1772,7 +1772,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
|
|
||||||
// calculate prevNearSnr[i] and save for later instead of recalculating it later
|
// calculate prevNearSnr[i] and save for later instead of recalculating it later
|
||||||
nearMagnEst = WEBRTC_SPL_UMUL_16_16(inst->prevMagnU16[i], inst->noiseSupFilter[i]); // Q(prevQMagn+14)
|
nearMagnEst = WEBRTC_SPL_UMUL_16_16(inst->prevMagnU16[i], inst->noiseSupFilter[i]); // Q(prevQMagn+14)
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(nearMagnEst, 3); // Q(prevQMagn+17)
|
tmpU32no1 = nearMagnEst << 3; // Q(prevQMagn+17)
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], nShifts); // Q(prevQMagn+6)
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], nShifts); // Q(prevQMagn+6)
|
||||||
|
|
||||||
if (tmpU32no2 > 0) {
|
if (tmpU32no2 > 0) {
|
||||||
@ -1833,7 +1833,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
inst->featureSpecDiff = 0x007FFFFF;
|
inst->featureSpecDiff = 0x007FFFFF;
|
||||||
} else {
|
} else {
|
||||||
inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF,
|
inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF,
|
||||||
WEBRTC_SPL_LSHIFT_U32(tmpU32no3, norm32no1));
|
tmpU32no3 << norm32no1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1858,7 +1858,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
if (postShifts < 0) {
|
if (postShifts < 0) {
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(magnU16[i], -postShifts); // Q(prevQNoise)
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(magnU16[i], -postShifts); // Q(prevQNoise)
|
||||||
} else {
|
} else {
|
||||||
tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], postShifts); // Q(prevQNoise)
|
tmpU32no2 = (uint32_t)magnU16[i] << postShifts; // Q(prevQNoise)
|
||||||
}
|
}
|
||||||
if (prevNoiseU16[i] > tmpU32no2) {
|
if (prevNoiseU16[i] > tmpU32no2) {
|
||||||
sign = -1;
|
sign = -1;
|
||||||
@ -1979,18 +1979,18 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
if (nShifts < 0) {
|
if (nShifts < 0) {
|
||||||
// This case is equivalent with magn < noise which implies curNearSnr = 0;
|
// This case is equivalent with magn < noise which implies curNearSnr = 0;
|
||||||
tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn)
|
tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn)
|
||||||
tmpNoiseU32 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], -nShifts); // Q(qMagn)
|
tmpNoiseU32 = noiseU32[i] << -nShifts; // Q(qMagn)
|
||||||
} else if (nShifts > 17) {
|
} else if (nShifts > 17) {
|
||||||
tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], 17); // Q(qMagn+17)
|
tmpMagnU32 = (uint32_t)magnU16[i] << 17; // Q(qMagn+17)
|
||||||
tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17)
|
tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17)
|
||||||
} else {
|
} else {
|
||||||
tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], nShifts); // Q(qNoise_prev+11)
|
tmpMagnU32 = (uint32_t)magnU16[i] << nShifts; // Q(qNoise_prev+11)
|
||||||
tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11)
|
tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11)
|
||||||
}
|
}
|
||||||
if (tmpMagnU32 > tmpNoiseU32) {
|
if (tmpMagnU32 > tmpNoiseU32) {
|
||||||
tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur)
|
tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur)
|
||||||
norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1));
|
norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1));
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32no2); // Q(qCur+norm32no2)
|
tmpU32no1 <<= norm32no2; // Q(qCur+norm32no2)
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpNoiseU32, 11 - norm32no2); // Q(qCur+norm32no2-11)
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpNoiseU32, 11 - norm32no2); // Q(qCur+norm32no2-11)
|
||||||
if (tmpU32no2 > 0) {
|
if (tmpU32no2 > 0) {
|
||||||
tmpU32no1 /= tmpU32no2; // Q11
|
tmpU32no1 /= tmpU32no2; // Q11
|
||||||
@ -2033,7 +2033,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
|
|||||||
inst->prevQMagn = qMagn;
|
inst->prevQMagn = qMagn;
|
||||||
if (norm32no1 > 5) {
|
if (norm32no1 > 5) {
|
||||||
for (i = 0; i < inst->magnLen; i++) {
|
for (i = 0; i < inst->magnLen; i++) {
|
||||||
inst->prevNoiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], norm32no1 - 5); // Q(qNoise+11)
|
inst->prevNoiseU32[i] = noiseU32[i] << (norm32no1 - 5); // Q(qNoise+11)
|
||||||
inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
|
inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
|
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
|
||||||
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
|
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
|
||||||
|
|
||||||
@ -39,9 +41,9 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
|
|||||||
for (i = 0; i < inst->magnLen; i++) {
|
for (i = 0; i < inst->magnLen; i++) {
|
||||||
besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
|
besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
|
||||||
normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
|
normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
|
||||||
num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
|
num = postLocSnr[i] << normTmp; // Q(11+normTmp)
|
||||||
if (normTmp > 10) {
|
if (normTmp > 10) {
|
||||||
den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
|
den = priorLocSnr[i] << (normTmp - 11); // Q(normTmp)
|
||||||
} else {
|
} else {
|
||||||
den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
|
den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
|
||||||
}
|
}
|
||||||
@ -121,11 +123,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
|
|||||||
//widthPrior = widthPrior * 2.0;
|
//widthPrior = widthPrior * 2.0;
|
||||||
nShifts++;
|
nShifts++;
|
||||||
}
|
}
|
||||||
tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
|
tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25); // Q14
|
||||||
nShifts), 25);
|
|
||||||
//Q14
|
|
||||||
tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
|
|
||||||
25); //Q14
|
|
||||||
// compute indicator function: sigmoid map
|
// compute indicator function: sigmoid map
|
||||||
// FLOAT code
|
// FLOAT code
|
||||||
// indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
|
// indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
|
||||||
@ -151,8 +149,8 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
|
|||||||
if (inst->featureSpecDiff) {
|
if (inst->featureSpecDiff) {
|
||||||
normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
|
normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
|
||||||
WebRtcSpl_NormU32(inst->featureSpecDiff));
|
WebRtcSpl_NormU32(inst->featureSpecDiff));
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
|
assert(normTmp >= 0);
|
||||||
// Q(normTmp-2*stages)
|
tmpU32no1 = inst->featureSpecDiff << normTmp; // Q(normTmp-2*stages)
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
|
||||||
20 - inst->stages - normTmp);
|
20 - inst->stages - normTmp);
|
||||||
if (tmpU32no2 > 0) {
|
if (tmpU32no2 > 0) {
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
|
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
|
||||||
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
|
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
|
||||||
|
|
||||||
@ -155,11 +157,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
|
|||||||
//widthPrior = widthPrior * 2.0;
|
//widthPrior = widthPrior * 2.0;
|
||||||
nShifts++;
|
nShifts++;
|
||||||
}
|
}
|
||||||
tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
|
tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25); //Q14
|
||||||
nShifts), 25);
|
|
||||||
//Q14
|
|
||||||
tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
|
|
||||||
25); //Q14
|
|
||||||
// compute indicator function: sigmoid map
|
// compute indicator function: sigmoid map
|
||||||
// FLOAT code
|
// FLOAT code
|
||||||
// indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
|
// indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
|
||||||
@ -185,8 +183,8 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
|
|||||||
if (inst->featureSpecDiff) {
|
if (inst->featureSpecDiff) {
|
||||||
normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
|
normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
|
||||||
WebRtcSpl_NormU32(inst->featureSpecDiff));
|
WebRtcSpl_NormU32(inst->featureSpecDiff));
|
||||||
tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
|
assert(normTmp >= 0);
|
||||||
// Q(normTmp-2*stages)
|
tmpU32no1 = inst->featureSpecDiff << normTmp; // Q(normTmp-2*stages)
|
||||||
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
|
tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
|
||||||
20 - inst->stages - normTmp);
|
20 - inst->stages - normTmp);
|
||||||
if (tmpU32no2 > 0) {
|
if (tmpU32no2 > 0) {
|
||||||
|
Loading…
Reference in New Issue
Block a user