diff --git a/webrtc/modules/audio_processing/aecm/aecm_core.c b/webrtc/modules/audio_processing/aecm/aecm_core.c index 0f1dd7c34..03a0ecf35 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core.c @@ -958,9 +958,8 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, { // We need to shift down before multiplication shiftChFar = 32 - zerosCh - zerosFar; - tmpU32no1 = WEBRTC_SPL_UMUL_32_16( - WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], shiftChFar), - far_spectrum[i]); + tmpU32no1 = (aecm->channelAdapt32[i] >> shiftChFar) * + far_spectrum[i]; } // Determine Q-domain of numerator zerosNum = WebRtcSpl_NormU32(tmpU32no1); @@ -1019,14 +1018,10 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, shiftNum = 32 - (zerosNum + zerosFar); if (tmp32no1 > 0) { - tmp32no2 = (int32_t)WEBRTC_SPL_UMUL_32_16( - WEBRTC_SPL_RSHIFT_W32(tmp32no1, shiftNum), - far_spectrum[i]); + tmp32no2 = (tmp32no1 >> shiftNum) * far_spectrum[i]; } else { - tmp32no2 = -(int32_t)WEBRTC_SPL_UMUL_32_16( - WEBRTC_SPL_RSHIFT_W32(-tmp32no1, shiftNum), - far_spectrum[i]); + tmp32no2 = -((-tmp32no1 >> shiftNum) * far_spectrum[i]); } } // Normalize with respect to frequency bin @@ -1047,8 +1042,8 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, // We can never have negative channel gain aecm->channelAdapt32[i] = 0; } - aecm->channelAdapt16[i] - = (int16_t)WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], 16); + aecm->channelAdapt16[i] = + (int16_t)(aecm->channelAdapt32[i] >> 16); } } } diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_c.c b/webrtc/modules/audio_processing/aecm/aecm_core_c.c index d656f9b42..56beb4d14 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core_c.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core_c.c @@ -487,10 +487,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, } else { // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) - echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)WEBRTC_SPL_RSHIFT_W32( - aecm->echoFilt[i], - tmp16no1), - (uint16_t)supGain); + echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain; } } @@ -509,7 +506,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, tmp16no2 = ptrDfaClean[i]; } tmp32no1 = (int32_t)(tmp16no2 - tmp16no1); - tmp16no2 = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 4); + tmp16no2 = (int16_t)(tmp32no1 >> 4); tmp16no2 += tmp16no1; zeros16 = WebRtcSpl_NormW16(tmp16no2); if ((tmp16no2) & (-qDomainDiff > zeros16)) { @@ -743,7 +740,7 @@ static void ComfortNoise(AecmCore_t* aecm, for (i = 0; i < PART_LEN1; i++) { - tmp32 = WEBRTC_SPL_RSHIFT_W32(aecm->noiseEst[i], shiftFromNearToNoise); + tmp32 = aecm->noiseEst[i] >> shiftFromNearToNoise; if (tmp32 > 32767) { tmp32 = 32767; diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_mips.c b/webrtc/modules/audio_processing/aecm/aecm_core_mips.c index fdd5b2c9e..7ce828a84 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core_mips.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core_mips.c @@ -991,10 +991,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t* aecm, supGain >> tmp16no1); } else { // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) - echoEst32Gained = WEBRTC_SPL_UMUL_32_16( - (uint32_t)WEBRTC_SPL_RSHIFT_W32(aecm->echoFilt[i], - tmp16no1), - (uint16_t)supGain); + echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain; } } @@ -1014,7 +1011,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t* aecm, } tmp32no1 = (int32_t)(tmp16no2 - tmp16no1); - tmp16no2 = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 4); + tmp16no2 = (int16_t)(tmp32no1 >> 4); tmp16no2 += tmp16no1; zeros16 = WebRtcSpl_NormW16(tmp16no2); if ((tmp16no2) & (-qDomainDiff > zeros16)) { diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc index 90a17cd77..a0c386b23 100644 --- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc +++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc @@ -93,9 +93,8 @@ int Filter(FilterState* hpf, int16_t* data, int length) { tmp_int32, static_cast(-134217728)); - // Convert back to Q0 and use rounding - data[i] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12); - + // Convert back to Q0 and use rounding. + data[i] = (int16_t)(tmp_int32 >> 12); } return AudioProcessing::kNoError; diff --git a/webrtc/modules/audio_processing/ns/nsx_core.c b/webrtc/modules/audio_processing/ns/nsx_core.c index 33cee1711..28e9dc52c 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core.c +++ b/webrtc/modules/audio_processing/ns/nsx_core.c @@ -1472,7 +1472,7 @@ void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) { - inst->scaleEnergyIn); } else { // |energyIn| is currently in Q(|scaleEnergyIn|), but to later on end up - // with an |energyRation| in Q8 we need to change the Q-domain to + // with an |energyRatio| in Q8 we need to change the Q-domain to // Q(-8-scaleEnergyOut). inst->energyIn >>= 8 + scaleEnergyOut - inst->scaleEnergyIn; }