Noise suppression: Change signature to work on floats instead of ints

Internally, it already worked on floats. This patch just changes the
signature of a bunch of functions so that floats can be passed
directly from the new and improved AudioBuffer without converting the
data to int and back again first.

(The reference data to the ApmTest.Process test had to be modified
slightly; this is because the noise suppressor comes immediately after
the echo canceller, which also works on floats. If I truncate to
integers between the two steps, ApmTest.Process doesn't complain, but
of course that's exactly the sort of thing the float conversion is
supposed to let us avoid...)

BUG=
R=aluebs@webrtc.org, bjornv@webrtc.org, tina.legrand@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/13519004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@6385 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
kwiberg@webrtc.org 2014-06-10 11:13:09 +00:00
parent 1014101470
commit 12cd443752
6 changed files with 36 additions and 70 deletions

View File

@ -68,10 +68,10 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(i));
#if defined(WEBRTC_NS_FLOAT)
err = WebRtcNs_Process(static_cast<Handle*>(handle(i)),
audio->low_pass_split_data(i),
audio->high_pass_split_data(i),
audio->low_pass_split_data(i),
audio->high_pass_split_data(i));
audio->low_pass_split_data_f(i),
audio->high_pass_split_data_f(i),
audio->low_pass_split_data_f(i),
audio->high_pass_split_data_f(i));
#elif defined(WEBRTC_NS_FIXED)
err = WebRtcNsx_Process(static_cast<Handle*>(handle(i)),
audio->low_pass_split_data(i),

View File

@ -99,10 +99,10 @@ int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
* -1 - Error
*/
int WebRtcNs_Process(NsHandle* NS_inst,
short* spframe,
short* spframe_H,
short* outframe,
short* outframe_H);
float* spframe,
float* spframe_H,
float* outframe,
float* outframe_H);
/* Returns the internally used prior speech probability of the current frame.
* There is a frequency bin based one as well, with which this should not be

View File

@ -43,8 +43,8 @@ int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
}
int WebRtcNs_Process(NsHandle* NS_inst, short* spframe, short* spframe_H,
short* outframe, short* outframe_H) {
int WebRtcNs_Process(NsHandle* NS_inst, float* spframe, float* spframe_H,
float* outframe, float* outframe_H) {
return WebRtcNs_ProcessCore(
(NSinst_t*) NS_inst, spframe, spframe_H, outframe, outframe_H);
}

View File

@ -715,10 +715,10 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
}
int WebRtcNs_ProcessCore(NSinst_t* inst,
short* speechFrame,
short* speechFrameHB,
short* outFrame,
short* outFrameHB) {
float* speechFrame,
float* speechFrameHB,
float* outFrame,
float* outFrameHB) {
// main routine for noise reduction
int flagHB = 0;
@ -731,8 +731,8 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
float snrPrior, currentEstimateStsa;
float tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
float gammaNoiseTmp, gammaNoiseOld;
float noiseUpdateTmp, fTmp, dTmp;
float fin[BLOCKL_MAX], fout[BLOCKL_MAX];
float noiseUpdateTmp, fTmp;
float fout[BLOCKL_MAX];
float winData[ANAL_BLOCKL_MAX];
float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
@ -775,26 +775,17 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
updateParsFlag = inst->modelUpdatePars[0];
//
//for LB do all processing
// convert to float
for (i = 0; i < inst->blockLen10ms; i++) {
fin[i] = (float)speechFrame[i];
}
// update analysis buffer for L band
memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
sizeof(float) * (inst->anaLen - inst->blockLen10ms));
memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, fin,
memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, speechFrame,
sizeof(float) * inst->blockLen10ms);
if (flagHB == 1) {
// convert to float
for (i = 0; i < inst->blockLen10ms; i++) {
fin[i] = (float)speechFrameHB[i];
}
// update analysis buffer for H band
memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
sizeof(float) * (inst->anaLen - inst->blockLen10ms));
memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, fin,
memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, speechFrameHB,
sizeof(float) * inst->blockLen10ms);
}
@ -833,30 +824,16 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->outBuf[i] = fout[i + inst->blockLen10ms];
}
}
// convert to short
for (i = 0; i < inst->blockLen10ms; i++) {
dTmp = fout[i];
if (dTmp < WEBRTC_SPL_WORD16_MIN) {
dTmp = WEBRTC_SPL_WORD16_MIN;
} else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
dTmp = WEBRTC_SPL_WORD16_MAX;
}
outFrame[i] = (short)dTmp;
}
for (i = 0; i < inst->blockLen10ms; ++i)
outFrame[i] = WEBRTC_SPL_SAT(
WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
// for time-domain gain of HB
if (flagHB == 1) {
for (i = 0; i < inst->blockLen10ms; i++) {
dTmp = inst->dataBufHB[i];
if (dTmp < WEBRTC_SPL_WORD16_MIN) {
dTmp = WEBRTC_SPL_WORD16_MIN;
} else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
dTmp = WEBRTC_SPL_WORD16_MAX;
}
outFrameHB[i] = (short)dTmp;
}
} // end of H band gain computation
//
if (flagHB == 1)
for (i = 0; i < inst->blockLen10ms; ++i)
outFrameHB[i] = WEBRTC_SPL_SAT(
WEBRTC_SPL_WORD16_MAX, inst->dataBufHB[i], WEBRTC_SPL_WORD16_MIN);
return 0;
}
@ -1239,16 +1216,9 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->outLen -= inst->blockLen10ms;
}
// convert to short
for (i = 0; i < inst->blockLen10ms; i++) {
dTmp = fout[i];
if (dTmp < WEBRTC_SPL_WORD16_MIN) {
dTmp = WEBRTC_SPL_WORD16_MIN;
} else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
dTmp = WEBRTC_SPL_WORD16_MAX;
}
outFrame[i] = (short)dTmp;
}
for (i = 0; i < inst->blockLen10ms; ++i)
outFrame[i] = WEBRTC_SPL_SAT(
WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
// for time-domain gain of HB
if (flagHB == 1) {
@ -1289,13 +1259,9 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
}
//apply gain
for (i = 0; i < inst->blockLen10ms; i++) {
dTmp = gainTimeDomainHB * inst->dataBufHB[i];
if (dTmp < WEBRTC_SPL_WORD16_MIN) {
dTmp = WEBRTC_SPL_WORD16_MIN;
} else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
dTmp = WEBRTC_SPL_WORD16_MAX;
}
outFrameHB[i] = (short)dTmp;
float o = gainTimeDomainHB * inst->dataBufHB[i];
outFrameHB[i] = WEBRTC_SPL_SAT(
WEBRTC_SPL_WORD16_MAX, o, WEBRTC_SPL_WORD16_MIN);
}
} // end of H band gain computation
//

View File

@ -167,10 +167,10 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
int WebRtcNs_ProcessCore(NSinst_t* inst,
short* inFrameLow,
short* inFrameHigh,
short* outFrameLow,
short* outFrameHigh);
float* inFrameLow,
float* inFrameHigh,
float* outFrameLow,
float* outFrameHigh);
#ifdef __cplusplus