From b7192b82476d00384fdc153e6a09a6ac53cef67b Mon Sep 17 00:00:00 2001 From: "pbos@webrtc.org" Date: Wed, 10 Apr 2013 07:50:54 +0000 Subject: [PATCH] WebRtc_Word32 -> int32_t in audio_processing/ BUG=314 Review URL: https://webrtc-codereview.appspot.com/1307004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@3809 4adac7df-926f-26a2-2b94-8c16560cd09d --- .../modules/audio_processing/aec/aec_core.c | 2 +- .../audio_processing/aec/aec_core_internal.h | 2 +- .../audio_processing/aec/echo_cancellation.c | 23 +- .../aec/include/echo_cancellation.h | 84 ++- .../modules/audio_processing/aecm/aecm_core.c | 490 +++++++++--------- .../modules/audio_processing/aecm/aecm_core.h | 176 +++---- .../audio_processing/aecm/aecm_core_neon.S | 20 +- .../audio_processing/aecm/aecm_core_neon.c | 56 +- .../aecm/echo_control_mobile.c | 46 +- .../aecm/include/echo_control_mobile.h | 85 ++- .../modules/audio_processing/agc/analog_agc.c | 212 ++++---- .../modules/audio_processing/agc/analog_agc.h | 122 ++--- .../audio_processing/agc/digital_agc.c | 202 ++++---- .../audio_processing/agc/digital_agc.h | 62 +-- .../agc/include/gain_control.h | 52 +- .../modules/audio_processing/audio_buffer.cc | 16 +- .../audio_processing/audio_processing_impl.cc | 2 +- .../audio_processing/audio_processing_impl.h | 2 +- .../echo_cancellation_impl.cc | 6 +- .../echo_control_mobile_impl.cc | 10 +- .../audio_processing/gain_control_impl.cc | 24 +- .../audio_processing/high_pass_filter_impl.cc | 32 +- .../include/audio_processing.h | 4 +- .../include/mock_audio_processing.h | 4 +- .../ns/include/noise_suppression.h | 2 +- .../ns/include/noise_suppression_x.h | 2 +- .../audio_processing/ns/noise_suppression.c | 2 +- .../audio_processing/ns/noise_suppression_x.c | 2 +- webrtc/modules/audio_processing/ns/ns_core.c | 2 +- webrtc/modules/audio_processing/ns/ns_core.h | 6 +- webrtc/modules/audio_processing/ns/nsx_core.c | 482 ++++++++--------- webrtc/modules/audio_processing/ns/nsx_core.h | 90 ++-- .../audio_processing/ns/nsx_core_neon.c | 6 +- .../audio_processing/splitting_filter.cc | 20 +- .../audio_processing/splitting_filter.h | 20 +- .../audio_processing/test/process_test.cc | 10 +- .../audio_processing/voice_detection_impl.cc | 2 +- 37 files changed, 1189 insertions(+), 1191 deletions(-) diff --git a/webrtc/modules/audio_processing/aec/aec_core.c b/webrtc/modules/audio_processing/aec/aec_core.c index d638b7256..d194c8269 100644 --- a/webrtc/modules/audio_processing/aec/aec_core.c +++ b/webrtc/modules/audio_processing/aec/aec_core.c @@ -1361,7 +1361,7 @@ static void ComfortNoise(AecCore* aec, float efw[2][PART_LEN1], int i, num; float rand[PART_LEN]; float noise, noiseAvg, tmp, tmpAvg; - WebRtc_Word16 randW16[PART_LEN]; + int16_t randW16[PART_LEN]; complex_t u[PART_LEN1]; const float pi2 = 6.28318530717959f; diff --git a/webrtc/modules/audio_processing/aec/aec_core_internal.h b/webrtc/modules/audio_processing/aec/aec_core_internal.h index bce134db7..38049cf67 100644 --- a/webrtc/modules/audio_processing/aec/aec_core_internal.h +++ b/webrtc/modules/audio_processing/aec/aec_core_internal.h @@ -80,7 +80,7 @@ struct AecCore { int mult; // sampling frequency multiple int sampFreq; - WebRtc_UWord32 seed; + uint32_t seed; float mu; // stepsize float errThresh; // error threshold diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation.c b/webrtc/modules/audio_processing/aec/echo_cancellation.c index 9b711be22..2d4135982 100644 --- a/webrtc/modules/audio_processing/aec/echo_cancellation.c +++ b/webrtc/modules/audio_processing/aec/echo_cancellation.c @@ -45,7 +45,7 @@ int webrtc_aec_instance_count = 0; // (controlled by knownDelay) static int EstBufDelay(aecpc_t *aecInst); -WebRtc_Word32 WebRtcAec_Create(void **aecInst) +int32_t WebRtcAec_Create(void **aecInst) { aecpc_t *aecpc; if (aecInst == NULL) { @@ -106,7 +106,7 @@ WebRtc_Word32 WebRtcAec_Create(void **aecInst) return 0; } -WebRtc_Word32 WebRtcAec_Free(void *aecInst) +int32_t WebRtcAec_Free(void *aecInst) { aecpc_t *aecpc = aecInst; @@ -130,7 +130,7 @@ WebRtc_Word32 WebRtcAec_Free(void *aecInst) return 0; } -WebRtc_Word32 WebRtcAec_Init(void *aecInst, WebRtc_Word32 sampFreq, WebRtc_Word32 scSampFreq) +int32_t WebRtcAec_Init(void *aecInst, int32_t sampFreq, int32_t scSampFreq) { aecpc_t *aecpc = aecInst; AecConfig aecConfig; @@ -226,11 +226,11 @@ WebRtc_Word32 WebRtcAec_Init(void *aecInst, WebRtc_Word32 sampFreq, WebRtc_Word3 } // only buffer L band for farend -WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, const WebRtc_Word16 *farend, - WebRtc_Word16 nrOfSamples) +int32_t WebRtcAec_BufferFarend(void *aecInst, const int16_t *farend, + int16_t nrOfSamples) { aecpc_t *aecpc = aecInst; - WebRtc_Word32 retVal = 0; + int32_t retVal = 0; int newNrOfSamples = (int) nrOfSamples; short newFarend[MAX_RESAMP_LEN]; const int16_t* farend_ptr = farend; @@ -304,12 +304,13 @@ WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, const WebRtc_Word16 *farend, return retVal; } -WebRtc_Word32 WebRtcAec_Process(void *aecInst, const WebRtc_Word16 *nearend, - const WebRtc_Word16 *nearendH, WebRtc_Word16 *out, WebRtc_Word16 *outH, - WebRtc_Word16 nrOfSamples, WebRtc_Word16 msInSndCardBuf, WebRtc_Word32 skew) +int32_t WebRtcAec_Process(void *aecInst, const int16_t *nearend, + const int16_t *nearendH, int16_t *out, int16_t *outH, + int16_t nrOfSamples, int16_t msInSndCardBuf, + int32_t skew) { aecpc_t *aecpc = aecInst; - WebRtc_Word32 retVal = 0; + int32_t retVal = 0; short i; short nBlocks10ms; short nFrames; @@ -689,7 +690,7 @@ int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std) { return 0; } -WebRtc_Word32 WebRtcAec_get_error_code(void *aecInst) +int32_t WebRtcAec_get_error_code(void *aecInst) { aecpc_t *aecpc = aecInst; diff --git a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h index 0e963fdad..c362a6766 100644 --- a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h +++ b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h @@ -35,10 +35,10 @@ enum { }; typedef struct { - WebRtc_Word16 nlpMode; // default kAecNlpModerate - WebRtc_Word16 skewMode; // default kAecFalse - WebRtc_Word16 metricsMode; // default kAecFalse - int delay_logging; // default kAecFalse + int16_t nlpMode; // default kAecNlpModerate + int16_t skewMode; // default kAecFalse + int16_t metricsMode; // default kAecFalse + int delay_logging; // default kAecFalse //float realSkew; } AecConfig; @@ -73,10 +73,10 @@ extern "C" { * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK - * -1: error + * int32_t return 0: OK + * -1: error */ -WebRtc_Word32 WebRtcAec_Create(void **aecInst); +int32_t WebRtcAec_Create(void **aecInst); /* * This function releases the memory allocated by WebRtcAec_Create(). @@ -87,10 +87,10 @@ WebRtc_Word32 WebRtcAec_Create(void **aecInst); * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK - * -1: error + * int32_t return 0: OK + * -1: error */ -WebRtc_Word32 WebRtcAec_Free(void *aecInst); +int32_t WebRtcAec_Free(void *aecInst); /* * Initializes an AEC instance. @@ -98,17 +98,15 @@ WebRtc_Word32 WebRtcAec_Free(void *aecInst); * Inputs Description * ------------------------------------------------------------------- * void *aecInst Pointer to the AEC instance - * WebRtc_Word32 sampFreq Sampling frequency of data - * WebRtc_Word32 scSampFreq Soundcard sampling frequency + * int32_t sampFreq Sampling frequency of data + * int32_t scSampFreq Soundcard sampling frequency * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK - * -1: error + * int32_t return 0: OK + * -1: error */ -WebRtc_Word32 WebRtcAec_Init(void *aecInst, - WebRtc_Word32 sampFreq, - WebRtc_Word32 scSampFreq); +int32_t WebRtcAec_Init(void *aecInst, int32_t sampFreq, int32_t scSampFreq); /* * Inserts an 80 or 160 sample block of data into the farend buffer. @@ -116,18 +114,18 @@ WebRtc_Word32 WebRtcAec_Init(void *aecInst, * Inputs Description * ------------------------------------------------------------------- * void *aecInst Pointer to the AEC instance - * WebRtc_Word16 *farend In buffer containing one frame of + * int16_t *farend In buffer containing one frame of * farend signal for L band - * WebRtc_Word16 nrOfSamples Number of samples in farend buffer + * int16_t nrOfSamples Number of samples in farend buffer * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK - * -1: error + * int32_t return 0: OK + * -1: error */ -WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, - const WebRtc_Word16 *farend, - WebRtc_Word16 nrOfSamples); +int32_t WebRtcAec_BufferFarend(void *aecInst, + const int16_t *farend, + int16_t nrOfSamples); /* * Runs the echo canceller on an 80 or 160 sample blocks of data. @@ -135,34 +133,34 @@ WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, * Inputs Description * ------------------------------------------------------------------- * void *aecInst Pointer to the AEC instance - * WebRtc_Word16 *nearend In buffer containing one frame of + * int16_t *nearend In buffer containing one frame of * nearend+echo signal for L band - * WebRtc_Word16 *nearendH In buffer containing one frame of + * int16_t *nearendH In buffer containing one frame of * nearend+echo signal for H band - * WebRtc_Word16 nrOfSamples Number of samples in nearend buffer - * WebRtc_Word16 msInSndCardBuf Delay estimate for sound card and + * int16_t nrOfSamples Number of samples in nearend buffer + * int16_t msInSndCardBuf Delay estimate for sound card and * system buffers - * WebRtc_Word16 skew Difference between number of samples played + * int16_t skew Difference between number of samples played * and recorded at the soundcard (for clock skew * compensation) * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word16 *out Out buffer, one frame of processed nearend + * int16_t *out Out buffer, one frame of processed nearend * for L band - * WebRtc_Word16 *outH Out buffer, one frame of processed nearend + * int16_t *outH Out buffer, one frame of processed nearend * for H band - * WebRtc_Word32 return 0: OK - * -1: error + * int32_t return 0: OK + * -1: error */ -WebRtc_Word32 WebRtcAec_Process(void *aecInst, - const WebRtc_Word16 *nearend, - const WebRtc_Word16 *nearendH, - WebRtc_Word16 *out, - WebRtc_Word16 *outH, - WebRtc_Word16 nrOfSamples, - WebRtc_Word16 msInSndCardBuf, - WebRtc_Word32 skew); +int32_t WebRtcAec_Process(void *aecInst, + const int16_t *nearend, + const int16_t *nearendH, + int16_t *out, + int16_t *outH, + int16_t nrOfSamples, + int16_t msInSndCardBuf, + int32_t skew); /* * This function enables the user to set certain parameters on-the-fly. @@ -238,9 +236,9 @@ int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std); * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 11000-11100: error code + * int32_t return 11000-11100: error code */ -WebRtc_Word32 WebRtcAec_get_error_code(void *aecInst); +int32_t WebRtcAec_get_error_code(void *aecInst); // Returns a pointer to the low level AEC handle. // diff --git a/webrtc/modules/audio_processing/aecm/aecm_core.c b/webrtc/modules/audio_processing/aecm/aecm_core.c index a2d43775a..d3f3cd4a5 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core.c @@ -30,9 +30,9 @@ FILE *testfile; // Square root of Hanning window in Q14. #if defined(WEBRTC_DETECT_ARM_NEON) || defined(WEBRTC_ARCH_ARM_NEON) // Table is defined in an ARM assembly file. -extern const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END; +extern const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END; #else -static const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END = { +static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = { 0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172, 3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224, 6591, 6954, 7313, 7668, 8019, 8364, 8705, 9040, @@ -45,20 +45,20 @@ static const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END = { #endif //Q15 alpha = 0.99439986968132 const Factor for magnitude approximation -static const WebRtc_UWord16 kAlpha1 = 32584; +static const uint16_t kAlpha1 = 32584; //Q15 beta = 0.12967166976970 const Factor for magnitude approximation -static const WebRtc_UWord16 kBeta1 = 4249; +static const uint16_t kBeta1 = 4249; //Q15 alpha = 0.94234827210087 const Factor for magnitude approximation -static const WebRtc_UWord16 kAlpha2 = 30879; +static const uint16_t kAlpha2 = 30879; //Q15 beta = 0.33787806009150 const Factor for magnitude approximation -static const WebRtc_UWord16 kBeta2 = 11072; +static const uint16_t kBeta2 = 11072; //Q15 alpha = 0.82247698684306 const Factor for magnitude approximation -static const WebRtc_UWord16 kAlpha3 = 26951; +static const uint16_t kAlpha3 = 26951; //Q15 beta = 0.57762063060713 const Factor for magnitude approximation -static const WebRtc_UWord16 kBeta3 = 18927; +static const uint16_t kBeta3 = 18927; // Initialization table for echo channel in 8 kHz -static const WebRtc_Word16 kChannelStored8kHz[PART_LEN1] = { +static const int16_t kChannelStored8kHz[PART_LEN1] = { 2040, 1815, 1590, 1498, 1405, 1395, 1385, 1418, 1451, 1506, 1562, 1644, 1726, 1804, 1882, 1918, 1953, 1982, 2010, 2025, 2040, 2034, 2027, 2021, @@ -71,7 +71,7 @@ static const WebRtc_Word16 kChannelStored8kHz[PART_LEN1] = { }; // Initialization table for echo channel in 16 kHz -static const WebRtc_Word16 kChannelStored16kHz[PART_LEN1] = { +static const int16_t kChannelStored16kHz[PART_LEN1] = { 2040, 1590, 1405, 1385, 1451, 1562, 1726, 1882, 1953, 2010, 2040, 2027, 2014, 1980, 1869, 1732, 1635, 1572, 1517, 1444, 1367, 1294, 1245, 1233, @@ -83,7 +83,7 @@ static const WebRtc_Word16 kChannelStored16kHz[PART_LEN1] = { 3153 }; -static const WebRtc_Word16 kCosTable[] = { +static const int16_t kCosTable[] = { 8192, 8190, 8187, 8180, 8172, 8160, 8147, 8130, 8112, 8091, 8067, 8041, 8012, 7982, 7948, 7912, 7874, 7834, 7791, 7745, 7697, 7647, 7595, 7540, 7483, 7424, 7362, @@ -126,7 +126,7 @@ static const WebRtc_Word16 kCosTable[] = { 8091, 8112, 8130, 8147, 8160, 8172, 8180, 8187, 8190 }; -static const WebRtc_Word16 kSinTable[] = { +static const int16_t kSinTable[] = { 0, 142, 285, 428, 571, 713, 856, 998, 1140, 1281, 1422, 1563, 1703, 1842, 1981, 2120, 2258, 2395, 2531, 2667, 2801, 2935, 3068, 3200, @@ -174,15 +174,15 @@ static const WebRtc_Word16 kSinTable[] = { -1140, -998, -856, -713, -571, -428, -285, -142 }; -static const WebRtc_Word16 kNoiseEstQDomain = 15; -static const WebRtc_Word16 kNoiseEstIncCount = 5; +static const int16_t kNoiseEstQDomain = 15; +static const int16_t kNoiseEstIncCount = 5; static void ComfortNoise(AecmCore_t* aecm, - const WebRtc_UWord16* dfa, + const uint16_t* dfa, complex16_t* out, - const WebRtc_Word16* lambda); + const int16_t* lambda); -static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm); +static int16_t CalcSuppressionGain(AecmCore_t * const aecm); // Moves the pointer to the next entry and inserts |far_spectrum| and // corresponding Q-domain in its buffer. @@ -316,32 +316,32 @@ int WebRtcAecm_CreateCore(AecmCore_t **aecmInst) // Init some aecm pointers. 16 and 32 byte alignment is only necessary // for Neon code currently. - aecm->xBuf = (WebRtc_Word16*) (((uintptr_t)aecm->xBuf_buf + 31) & ~ 31); - aecm->dBufClean = (WebRtc_Word16*) (((uintptr_t)aecm->dBufClean_buf + 31) & ~ 31); - aecm->dBufNoisy = (WebRtc_Word16*) (((uintptr_t)aecm->dBufNoisy_buf + 31) & ~ 31); - aecm->outBuf = (WebRtc_Word16*) (((uintptr_t)aecm->outBuf_buf + 15) & ~ 15); - aecm->channelStored = (WebRtc_Word16*) (((uintptr_t) + aecm->xBuf = (int16_t*) (((uintptr_t)aecm->xBuf_buf + 31) & ~ 31); + aecm->dBufClean = (int16_t*) (((uintptr_t)aecm->dBufClean_buf + 31) & ~ 31); + aecm->dBufNoisy = (int16_t*) (((uintptr_t)aecm->dBufNoisy_buf + 31) & ~ 31); + aecm->outBuf = (int16_t*) (((uintptr_t)aecm->outBuf_buf + 15) & ~ 15); + aecm->channelStored = (int16_t*) (((uintptr_t) aecm->channelStored_buf + 15) & ~ 15); - aecm->channelAdapt16 = (WebRtc_Word16*) (((uintptr_t) + aecm->channelAdapt16 = (int16_t*) (((uintptr_t) aecm->channelAdapt16_buf + 15) & ~ 15); - aecm->channelAdapt32 = (WebRtc_Word32*) (((uintptr_t) + aecm->channelAdapt32 = (int32_t*) (((uintptr_t) aecm->channelAdapt32_buf + 31) & ~ 31); return 0; } -void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, const WebRtc_Word16* echo_path) +void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, const int16_t* echo_path) { int i = 0; // Reset the stored channel - memcpy(aecm->channelStored, echo_path, sizeof(WebRtc_Word16) * PART_LEN1); + memcpy(aecm->channelStored, echo_path, sizeof(int16_t) * PART_LEN1); // Reset the adapted channels - memcpy(aecm->channelAdapt16, echo_path, sizeof(WebRtc_Word16) * PART_LEN1); + memcpy(aecm->channelAdapt16, echo_path, sizeof(int16_t) * PART_LEN1); for (i = 0; i < PART_LEN1; i++) { aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)(aecm->channelAdapt16[i]), 16); + (int32_t)(aecm->channelAdapt16[i]), 16); } // Reset channel storing variables @@ -352,24 +352,24 @@ void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, const WebRtc_Word16* echo_pat } static void WindowAndFFTC(AecmCore_t* aecm, - WebRtc_Word16* fft, - const WebRtc_Word16* time_signal, + int16_t* fft, + const int16_t* time_signal, complex16_t* freq_signal, int time_signal_scaling) { int i, j; - memset(fft, 0, sizeof(WebRtc_Word16) * PART_LEN4); + memset(fft, 0, sizeof(int16_t) * PART_LEN4); // FFT of signal for (i = 0, j = 0; i < PART_LEN; i++, j += 2) { // Window time domain signal and insert into real part of // transformation array |fft| - fft[j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT( + fft[j] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT( (time_signal[i] << time_signal_scaling), WebRtcAecm_kSqrtHanning[i], 14); - fft[PART_LEN2 + j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT( + fft[PART_LEN2 + j] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT( (time_signal[i + PART_LEN] << time_signal_scaling), WebRtcAecm_kSqrtHanning[PART_LEN - i], 14); @@ -386,13 +386,13 @@ static void WindowAndFFTC(AecmCore_t* aecm, } static void InverseFFTAndWindowC(AecmCore_t* aecm, - WebRtc_Word16* fft, + int16_t* fft, complex16_t* efw, - WebRtc_Word16* output, - const WebRtc_Word16* nearendClean) + int16_t* output, + const int16_t* nearendClean) { int i, j, outCFFT; - WebRtc_Word32 tmp32no1; + int32_t tmp32no1; // Synthesis for (i = 1; i < PART_LEN; i++) @@ -417,13 +417,13 @@ static void InverseFFTAndWindowC(AecmCore_t* aecm, // to scale the samples in the next block. outCFFT = WebRtcSpl_RealInverseFFT(aecm->real_fft, fft, (int16_t*)efw); for (i = 0; i < PART_LEN; i++) { - efw[i].real = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + efw[i].real = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( efw[i].real, WebRtcAecm_kSqrtHanning[i], 14); - tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)efw[i].real, + tmp32no1 = WEBRTC_SPL_SHIFT_W32((int32_t)efw[i].real, outCFFT - aecm->dfaCleanQDomain); - efw[i].real = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + efw[i].real = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, tmp32no1 + aecm->outBuf[i], WEBRTC_SPL_WORD16_MIN); output[i] = efw[i].real; @@ -434,27 +434,27 @@ static void InverseFFTAndWindowC(AecmCore_t* aecm, 14); tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, outCFFT - aecm->dfaCleanQDomain); - aecm->outBuf[i] = (WebRtc_Word16)WEBRTC_SPL_SAT( + aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT( WEBRTC_SPL_WORD16_MAX, tmp32no1, WEBRTC_SPL_WORD16_MIN); } // Copy the current block to the old position (aecm->outBuf is shifted elsewhere) - memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN); - memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN); + memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN); + memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN, sizeof(int16_t) * PART_LEN); if (nearendClean != NULL) { - memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN); + memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN, sizeof(int16_t) * PART_LEN); } } static void CalcLinearEnergiesC(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est, - WebRtc_UWord32* far_energy, - WebRtc_UWord32* echo_energy_adapt, - WebRtc_UWord32* echo_energy_stored) + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored) { int i; @@ -464,21 +464,21 @@ static void CalcLinearEnergiesC(AecmCore_t* aecm, { echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]); - (*far_energy) += (WebRtc_UWord32)(far_spectrum[i]); + (*far_energy) += (uint32_t)(far_spectrum[i]); (*echo_energy_adapt) += WEBRTC_SPL_UMUL_16_16(aecm->channelAdapt16[i], far_spectrum[i]); - (*echo_energy_stored) += (WebRtc_UWord32)echo_est[i]; + (*echo_energy_stored) += (uint32_t)echo_est[i]; } } static void StoreAdaptiveChannelC(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est) + const uint16_t* far_spectrum, + int32_t* echo_est) { int i; // During startup we store the channel every block. - memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(WebRtc_Word16) * PART_LEN1); + memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(int16_t) * PART_LEN1); // Recalculate echo estimate for (i = 0; i < PART_LEN; i += 4) { @@ -502,20 +502,20 @@ static void ResetAdaptiveChannelC(AecmCore_t* aecm) // The stored channel has a significantly lower MSE than the adaptive one for // two consecutive calculations. Reset the adaptive channel. memcpy(aecm->channelAdapt16, aecm->channelStored, - sizeof(WebRtc_Word16) * PART_LEN1); + sizeof(int16_t) * PART_LEN1); // Restore the W32 channel for (i = 0; i < PART_LEN; i += 4) { aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)aecm->channelStored[i], 16); + (int32_t)aecm->channelStored[i], 16); aecm->channelAdapt32[i + 1] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)aecm->channelStored[i + 1], 16); + (int32_t)aecm->channelStored[i + 1], 16); aecm->channelAdapt32[i + 2] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)aecm->channelStored[i + 2], 16); + (int32_t)aecm->channelStored[i + 2], 16); aecm->channelAdapt32[i + 3] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)aecm->channelStored[i + 3], 16); + (int32_t)aecm->channelStored[i + 3], 16); } - aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16); + aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)aecm->channelStored[i], 16); } // Initialize function pointers for ARM Neon platform. @@ -547,8 +547,8 @@ static void WebRtcAecm_InitNeon(void) int WebRtcAecm_InitCore(AecmCore_t * const aecm, int samplingFreq) { int i = 0; - WebRtc_Word32 tmp32 = PART_LEN1 * PART_LEN1; - WebRtc_Word16 tmp16 = PART_LEN1; + int32_t tmp32 = PART_LEN1 * PART_LEN1; + int16_t tmp16 = PART_LEN1; if (samplingFreq != 8000 && samplingFreq != 16000) { @@ -556,7 +556,7 @@ int WebRtcAecm_InitCore(AecmCore_t * const aecm, int samplingFreq) return -1; } // sanity check of sampling frequency - aecm->mult = (WebRtc_Word16)samplingFreq / 8000; + aecm->mult = (int16_t)samplingFreq / 8000; aecm->farBufWritePos = 0; aecm->farBufReadPos = 0; @@ -623,7 +623,7 @@ int WebRtcAecm_InitCore(AecmCore_t * const aecm, int samplingFreq) { aecm->noiseEst[i] = (tmp32 << 8); tmp16--; - tmp32 -= (WebRtc_Word32)((tmp16 << 1) + 1); + tmp32 -= (int32_t)((tmp16 << 1) + 1); } for (; i < PART_LEN1; i++) { @@ -705,15 +705,15 @@ int WebRtcAecm_FreeCore(AecmCore_t *aecm) } int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, - const WebRtc_Word16 * farend, - const WebRtc_Word16 * nearendNoisy, - const WebRtc_Word16 * nearendClean, - WebRtc_Word16 * out) + const int16_t * farend, + const int16_t * nearendNoisy, + const int16_t * nearendClean, + int16_t * out) { - WebRtc_Word16 outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary. - WebRtc_Word16* outBlock = (WebRtc_Word16*) (((uintptr_t) outBlock_buf + 15) & ~ 15); + int16_t outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary. + int16_t* outBlock = (int16_t*) (((uintptr_t) outBlock_buf + 15) & ~ 15); - WebRtc_Word16 farFrame[FRAME_LEN]; + int16_t farFrame[FRAME_LEN]; const int16_t* out_ptr = NULL; int size = 0; @@ -809,11 +809,11 @@ int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, // // Return: - Filtered value. // -WebRtc_Word16 WebRtcAecm_AsymFilt(const WebRtc_Word16 filtOld, const WebRtc_Word16 inVal, - const WebRtc_Word16 stepSizePos, - const WebRtc_Word16 stepSizeNeg) +int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal, + const int16_t stepSizePos, + const int16_t stepSizeNeg) { - WebRtc_Word16 retVal; + int16_t retVal; if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN)) { @@ -845,38 +845,38 @@ WebRtc_Word16 WebRtcAecm_AsymFilt(const WebRtc_Word16 filtOld, const WebRtc_Word // @param echoEst [out] Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16). // void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, - const WebRtc_UWord16* far_spectrum, - const WebRtc_Word16 far_q, - const WebRtc_UWord32 nearEner, - WebRtc_Word32 * echoEst) + const uint16_t* far_spectrum, + const int16_t far_q, + const uint32_t nearEner, + int32_t * echoEst) { // Local variables - WebRtc_UWord32 tmpAdapt = 0; - WebRtc_UWord32 tmpStored = 0; - WebRtc_UWord32 tmpFar = 0; + uint32_t tmpAdapt = 0; + uint32_t tmpStored = 0; + uint32_t tmpFar = 0; int i; - WebRtc_Word16 zeros, frac; - WebRtc_Word16 tmp16; - WebRtc_Word16 increase_max_shifts = 4; - WebRtc_Word16 decrease_max_shifts = 11; - WebRtc_Word16 increase_min_shifts = 11; - WebRtc_Word16 decrease_min_shifts = 3; - WebRtc_Word16 kLogLowValue = WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7); + int16_t zeros, frac; + int16_t tmp16; + int16_t increase_max_shifts = 4; + int16_t decrease_max_shifts = 11; + int16_t increase_min_shifts = 11; + int16_t decrease_min_shifts = 3; + int16_t kLogLowValue = WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7); // Get log of near end energy and store in buffer // Shift buffer memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy, - sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1)); + sizeof(int16_t) * (MAX_BUF_LEN - 1)); // Logarithm of integrated magnitude spectrum (nearEner) tmp16 = kLogLowValue; if (nearEner) { zeros = WebRtcSpl_NormU32(nearEner); - frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32( + frac = (int16_t)WEBRTC_SPL_RSHIFT_U32( (WEBRTC_SPL_LSHIFT_U32(nearEner, zeros) & 0x7FFFFFFF), 23); // log2 in Q8 @@ -890,16 +890,16 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, // Shift buffers memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy, - sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1)); + sizeof(int16_t) * (MAX_BUF_LEN - 1)); memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy, - sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1)); + sizeof(int16_t) * (MAX_BUF_LEN - 1)); // Logarithm of delayed far end energy tmp16 = kLogLowValue; if (tmpFar) { zeros = WebRtcSpl_NormU32(tmpFar); - frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros) + frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros) & 0x7FFFFFFF), 23); // log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; @@ -912,7 +912,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (tmpAdapt) { zeros = WebRtcSpl_NormU32(tmpAdapt); - frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros) + frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros) & 0x7FFFFFFF), 23); //log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; @@ -925,7 +925,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, if (tmpStored) { zeros = WebRtcSpl_NormU32(tmpStored); - frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros) + frac = (int16_t)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros) & 0x7FFFFFFF), 23); //log2 in Q8 tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac; @@ -953,7 +953,7 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, tmp16 = 2560 - aecm->farEnergyMin; if (tmp16 > 0) { - tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, FAR_ENERGY_VAD_REGION, 9); + tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, FAR_ENERGY_VAD_REGION, 9); } else { tmp16 = 0; @@ -1022,12 +1022,12 @@ void WebRtcAecm_CalcEnergies(AecmCore_t * aecm, // @param mu [out] (Return value) Stepsize in log2(), i.e. number of shifts. // // -WebRtc_Word16 WebRtcAecm_CalcStepSize(AecmCore_t * const aecm) +int16_t WebRtcAecm_CalcStepSize(AecmCore_t * const aecm) { - WebRtc_Word32 tmp32; - WebRtc_Word16 tmp16; - WebRtc_Word16 mu = MU_MAX; + int32_t tmp32; + int16_t tmp16; + int16_t mu = MU_MAX; // Here we calculate the step size mu used in the // following NLMS based Channel estimation algorithm @@ -1045,7 +1045,7 @@ WebRtc_Word16 WebRtcAecm_CalcStepSize(AecmCore_t * const aecm) tmp16 = (aecm->farLogEnergy - aecm->farEnergyMin); tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, MU_DIFF); tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin); - mu = MU_MIN - 1 - (WebRtc_Word16)(tmp32); + mu = MU_MIN - 1 - (int16_t)(tmp32); // The -1 is an alternative to rounding. This way we get a larger // stepsize, so we in some sense compensate for truncation in NLMS } @@ -1071,24 +1071,24 @@ WebRtc_Word16 WebRtcAecm_CalcStepSize(AecmCore_t * const aecm) // @param echoEst [i/o] Estimated echo in Q(far_q+RESOLUTION_CHANNEL16). // void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, - const WebRtc_UWord16* far_spectrum, - const WebRtc_Word16 far_q, - const WebRtc_UWord16 * const dfa, - const WebRtc_Word16 mu, - WebRtc_Word32 * echoEst) + const uint16_t* far_spectrum, + const int16_t far_q, + const uint16_t * const dfa, + const int16_t mu, + int32_t * echoEst) { - WebRtc_UWord32 tmpU32no1, tmpU32no2; - WebRtc_Word32 tmp32no1, tmp32no2; - WebRtc_Word32 mseStored; - WebRtc_Word32 mseAdapt; + uint32_t tmpU32no1, tmpU32no2; + int32_t tmp32no1, tmp32no2; + int32_t mseStored; + int32_t mseAdapt; int i; - WebRtc_Word16 zerosFar, zerosNum, zerosCh, zerosDfa; - WebRtc_Word16 shiftChFar, shiftNum, shift2ResChan; - WebRtc_Word16 tmp16no1; - WebRtc_Word16 xfaQ, dfaQ; + int16_t zerosFar, zerosNum, zerosCh, zerosDfa; + int16_t shiftChFar, shiftNum, shift2ResChan; + int16_t tmp16no1; + int16_t xfaQ, dfaQ; // This is the channel estimation algorithm. It is base on NLMS but has a variable step // length, which was calculated above. @@ -1099,7 +1099,7 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, // Determine norm of channel and farend to make sure we don't get overflow in // multiplication zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]); - zerosFar = WebRtcSpl_NormU32((WebRtc_UWord32)far_spectrum[i]); + zerosFar = WebRtcSpl_NormU32((uint32_t)far_spectrum[i]); if (zerosCh + zerosFar > 31) { // Multiplication is safe @@ -1118,7 +1118,7 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, zerosNum = WebRtcSpl_NormU32(tmpU32no1); if (dfa[i]) { - zerosDfa = WebRtcSpl_NormU32((WebRtc_UWord32)dfa[i]); + zerosDfa = WebRtcSpl_NormU32((uint32_t)dfa[i]); } else { zerosDfa = 32; @@ -1137,8 +1137,8 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, } // Add in the same Q-domain tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ); - tmpU32no2 = WEBRTC_SPL_SHIFT_W32((WebRtc_UWord32)dfa[i], dfaQ); - tmp32no1 = (WebRtc_Word32)tmpU32no2 - (WebRtc_Word32)tmpU32no1; + tmpU32no2 = WEBRTC_SPL_SHIFT_W32((uint32_t)dfa[i], dfaQ); + tmp32no1 = (int32_t)tmpU32no2 - (int32_t)tmpU32no1; zerosNum = WebRtcSpl_NormW32(tmp32no1); if ((tmp32no1) && (far_spectrum[i] > (CHANNEL_VAD << far_q))) { @@ -1158,11 +1158,11 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, { if (tmp32no1 > 0) { - tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmp32no1, + tmp32no2 = (int32_t)WEBRTC_SPL_UMUL_32_16(tmp32no1, far_spectrum[i]); } else { - tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(-tmp32no1, + tmp32no2 = -(int32_t)WEBRTC_SPL_UMUL_32_16(-tmp32no1, far_spectrum[i]); } shiftNum = 0; @@ -1171,12 +1171,12 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, shiftNum = 32 - (zerosNum + zerosFar); if (tmp32no1 > 0) { - tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16( + tmp32no2 = (int32_t)WEBRTC_SPL_UMUL_32_16( WEBRTC_SPL_RSHIFT_W32(tmp32no1, shiftNum), far_spectrum[i]); } else { - tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16( + tmp32no2 = -(int32_t)WEBRTC_SPL_UMUL_32_16( WEBRTC_SPL_RSHIFT_W32(-tmp32no1, shiftNum), far_spectrum[i]); } @@ -1200,7 +1200,7 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, aecm->channelAdapt32[i] = 0; } aecm->channelAdapt16[i] - = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], 16); + = (int16_t)WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], 16); } } } @@ -1231,13 +1231,13 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, mseAdapt = 0; for (i = 0; i < MIN_MSE_COUNT; i++) { - tmp32no1 = ((WebRtc_Word32)aecm->echoStoredLogEnergy[i] - - (WebRtc_Word32)aecm->nearLogEnergy[i]); + tmp32no1 = ((int32_t)aecm->echoStoredLogEnergy[i] + - (int32_t)aecm->nearLogEnergy[i]); tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1); mseStored += tmp32no2; - tmp32no1 = ((WebRtc_Word32)aecm->echoAdaptLogEnergy[i] - - (WebRtc_Word32)aecm->nearLogEnergy[i]); + tmp32no1 = ((int32_t)aecm->echoAdaptLogEnergy[i] + - (int32_t)aecm->nearLogEnergy[i]); tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1); mseAdapt += tmp32no2; } @@ -1289,13 +1289,13 @@ void WebRtcAecm_UpdateChannel(AecmCore_t * aecm, // level (Q14). // // -static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm) +static int16_t CalcSuppressionGain(AecmCore_t * const aecm) { - WebRtc_Word32 tmp32no1; + int32_t tmp32no1; - WebRtc_Word16 supGain = SUPGAIN_DEFAULT; - WebRtc_Word16 tmp16no1; - WebRtc_Word16 dE = 0; + int16_t supGain = SUPGAIN_DEFAULT; + int16_t tmp16no1; + int16_t dE = 0; // Determine suppression gain used in the Wiener filter. The gain is based on a mix of far // end energy and echo estimation error. @@ -1319,14 +1319,14 @@ static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm) { tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffAB, dE); tmp32no1 += (SUPGAIN_EPC_DT >> 1); - tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT); + tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT); supGain = aecm->supGainErrParamA - tmp16no1; } else { tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffBD, (ENERGY_DEV_TOL - dE)); tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1); - tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL + tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL - SUPGAIN_EPC_DT)); supGain = aecm->supGainErrParamD + tmp16no1; } @@ -1347,10 +1347,10 @@ static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm) aecm->supGainOld = supGain; if (tmp16no1 < aecm->supGain) { - aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4); + aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4); } else { - aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4); + aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4); } // END: Update suppression gain @@ -1372,30 +1372,30 @@ static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm) // return value The Q-domain of current frequency values // static int TimeToFrequencyDomain(AecmCore_t* aecm, - const WebRtc_Word16* time_signal, + const int16_t* time_signal, complex16_t* freq_signal, - WebRtc_UWord16* freq_signal_abs, - WebRtc_UWord32* freq_signal_sum_abs) + uint16_t* freq_signal_abs, + uint32_t* freq_signal_sum_abs) { int i = 0; int time_signal_scaling = 0; - WebRtc_Word32 tmp32no1 = 0; - WebRtc_Word32 tmp32no2 = 0; + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; // In fft_buf, +16 for 32-byte alignment. - WebRtc_Word16 fft_buf[PART_LEN4 + 16]; - WebRtc_Word16 *fft = (WebRtc_Word16 *) (((uintptr_t) fft_buf + 31) & ~31); + int16_t fft_buf[PART_LEN4 + 16]; + int16_t *fft = (int16_t *) (((uintptr_t) fft_buf + 31) & ~31); - WebRtc_Word16 tmp16no1; + int16_t tmp16no1; #ifndef WEBRTC_ARCH_ARM_V7 - WebRtc_Word16 tmp16no2; + int16_t tmp16no2; #endif #ifdef AECM_WITH_ABS_APPROX - WebRtc_Word16 max_value = 0; - WebRtc_Word16 min_value = 0; - WebRtc_UWord16 alpha = 0; - WebRtc_UWord16 beta = 0; + int16_t max_value = 0; + int16_t min_value = 0; + uint16_t alpha = 0; + uint16_t beta = 0; #endif #ifdef AECM_DYNAMIC_Q @@ -1408,23 +1408,23 @@ static int TimeToFrequencyDomain(AecmCore_t* aecm, // Extract imaginary and real part, calculate the magnitude for all frequency bins freq_signal[0].imag = 0; freq_signal[PART_LEN].imag = 0; - freq_signal_abs[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16( + freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16( freq_signal[0].real); - freq_signal_abs[PART_LEN] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16( + freq_signal_abs[PART_LEN] = (uint16_t)WEBRTC_SPL_ABS_W16( freq_signal[PART_LEN].real); - (*freq_signal_sum_abs) = (WebRtc_UWord32)(freq_signal_abs[0]) + - (WebRtc_UWord32)(freq_signal_abs[PART_LEN]); + (*freq_signal_sum_abs) = (uint32_t)(freq_signal_abs[0]) + + (uint32_t)(freq_signal_abs[PART_LEN]); for (i = 1; i < PART_LEN; i++) { if (freq_signal[i].real == 0) { - freq_signal_abs[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16( + freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16( freq_signal[i].imag); } else if (freq_signal[i].imag == 0) { - freq_signal_abs[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16( + freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16( freq_signal[i].real); } else @@ -1463,14 +1463,14 @@ static int TimeToFrequencyDomain(AecmCore_t* aecm, alpha = kAlpha3; beta = kBeta3; } - tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(max_value, + tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(max_value, alpha, 15); - tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(min_value, + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(min_value, beta, 15); - freq_signal_abs[i] = (WebRtc_UWord16)tmp16no1 + - (WebRtc_UWord16)tmp16no2; + freq_signal_abs[i] = (uint16_t)tmp16no1 + + (uint16_t)tmp16no2; #else #ifdef WEBRTC_ARCH_ARM_V7 __asm __volatile( @@ -1490,65 +1490,65 @@ static int TimeToFrequencyDomain(AecmCore_t* aecm, #endif // WEBRTC_ARCH_ARM_V7 tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2); - freq_signal_abs[i] = (WebRtc_UWord16)tmp32no1; + freq_signal_abs[i] = (uint16_t)tmp32no1; #endif // AECM_WITH_ABS_APPROX } - (*freq_signal_sum_abs) += (WebRtc_UWord32)freq_signal_abs[i]; + (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i]; } return time_signal_scaling; } int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, - const WebRtc_Word16 * farend, - const WebRtc_Word16 * nearendNoisy, - const WebRtc_Word16 * nearendClean, - WebRtc_Word16 * output) + const int16_t * farend, + const int16_t * nearendNoisy, + const int16_t * nearendClean, + int16_t * output) { int i; - WebRtc_UWord32 xfaSum; - WebRtc_UWord32 dfaNoisySum; - WebRtc_UWord32 dfaCleanSum; - WebRtc_UWord32 echoEst32Gained; - WebRtc_UWord32 tmpU32; + uint32_t xfaSum; + uint32_t dfaNoisySum; + uint32_t dfaCleanSum; + uint32_t echoEst32Gained; + uint32_t tmpU32; - WebRtc_Word32 tmp32no1; + int32_t tmp32no1; - WebRtc_UWord16 xfa[PART_LEN1]; - WebRtc_UWord16 dfaNoisy[PART_LEN1]; - WebRtc_UWord16 dfaClean[PART_LEN1]; - WebRtc_UWord16* ptrDfaClean = dfaClean; - const WebRtc_UWord16* far_spectrum_ptr = NULL; + uint16_t xfa[PART_LEN1]; + uint16_t dfaNoisy[PART_LEN1]; + uint16_t dfaClean[PART_LEN1]; + uint16_t* ptrDfaClean = dfaClean; + const uint16_t* far_spectrum_ptr = NULL; // 32 byte aligned buffers (with +8 or +16). // TODO (kma): define fft with complex16_t. - WebRtc_Word16 fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe. - WebRtc_Word32 echoEst32_buf[PART_LEN1 + 8]; - WebRtc_Word32 dfw_buf[PART_LEN2 + 8]; - WebRtc_Word32 efw_buf[PART_LEN2 + 8]; + int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe. + int32_t echoEst32_buf[PART_LEN1 + 8]; + int32_t dfw_buf[PART_LEN2 + 8]; + int32_t efw_buf[PART_LEN2 + 8]; - WebRtc_Word16* fft = (WebRtc_Word16*) (((uintptr_t) fft_buf + 31) & ~ 31); - WebRtc_Word32* echoEst32 = (WebRtc_Word32*) (((uintptr_t) echoEst32_buf + 31) & ~ 31); + int16_t* fft = (int16_t*) (((uintptr_t) fft_buf + 31) & ~ 31); + int32_t* echoEst32 = (int32_t*) (((uintptr_t) echoEst32_buf + 31) & ~ 31); complex16_t* dfw = (complex16_t*) (((uintptr_t) dfw_buf + 31) & ~ 31); complex16_t* efw = (complex16_t*) (((uintptr_t) efw_buf + 31) & ~ 31); - WebRtc_Word16 hnl[PART_LEN1]; - WebRtc_Word16 numPosCoef = 0; - WebRtc_Word16 nlpGain = ONE_Q14; + int16_t hnl[PART_LEN1]; + int16_t numPosCoef = 0; + int16_t nlpGain = ONE_Q14; int delay; - WebRtc_Word16 tmp16no1; - WebRtc_Word16 tmp16no2; - WebRtc_Word16 mu; - WebRtc_Word16 supGain; - WebRtc_Word16 zeros32, zeros16; - WebRtc_Word16 zerosDBufNoisy, zerosDBufClean, zerosXBuf; + int16_t tmp16no1; + int16_t tmp16no2; + int16_t mu; + int16_t supGain; + int16_t zeros32, zeros16; + int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf; int far_q; - WebRtc_Word16 resolutionDiff, qDomainDiff; + int16_t resolutionDiff, qDomainDiff; const int kMinPrefBand = 4; const int kMaxPrefBand = 24; - WebRtc_Word32 avgHnl32 = 0; + int32_t avgHnl32 = 0; // Determine startup state. There are three states: // (0) the first CONV_LEN blocks @@ -1562,11 +1562,11 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, // END: Determine startup state // Buffer near and far end signals - memcpy(aecm->xBuf + PART_LEN, farend, sizeof(WebRtc_Word16) * PART_LEN); - memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(WebRtc_Word16) * PART_LEN); + memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN); + memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(int16_t) * PART_LEN); if (nearendClean != NULL) { - memcpy(aecm->dBufClean + PART_LEN, nearendClean, sizeof(WebRtc_Word16) * PART_LEN); + memcpy(aecm->dBufClean + PART_LEN, nearendClean, sizeof(int16_t) * PART_LEN); } // Transform far end signal from time domain to frequency domain. @@ -1583,7 +1583,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, dfaNoisy, &dfaNoisySum); aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain; - aecm->dfaNoisyQDomain = (WebRtc_Word16)zerosDBufNoisy; + aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy; if (nearendClean == NULL) @@ -1601,7 +1601,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, dfaClean, &dfaCleanSum); aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain; - aecm->dfaCleanQDomain = (WebRtc_Word16)zerosDBufClean; + aecm->dfaCleanQDomain = (int16_t)zerosDBufClean; } // Get the delay @@ -1634,7 +1634,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, // Get aligned far end spectrum far_spectrum_ptr = AlignedFarend(aecm, &far_q, delay); - zerosXBuf = (WebRtc_Word16) far_q; + zerosXBuf = (int16_t) far_q; if (far_spectrum_ptr == NULL) { return -1; @@ -1673,8 +1673,8 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, { // Multiplication is safe // Result in Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+aecm->xfaQDomainBuf[diff]) - echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i], - (WebRtc_UWord16)supGain); + echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], + (uint16_t)supGain); resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN; resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf); } else @@ -1684,15 +1684,15 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf); if (zeros32 > tmp16no1) { - echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i], - (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W16(supGain, + echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], + (uint16_t)WEBRTC_SPL_RSHIFT_W16(supGain, tmp16no1)); // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) } else { // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) echoEst32Gained = WEBRTC_SPL_UMUL_32_16( - (WebRtc_UWord32)WEBRTC_SPL_RSHIFT_W32(aecm->echoFilt[i], tmp16no1), - (WebRtc_UWord16)supGain); + (uint32_t)WEBRTC_SPL_RSHIFT_W32(aecm->echoFilt[i], tmp16no1), + (uint16_t)supGain); } } @@ -1709,8 +1709,8 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, qDomainDiff = 0; } tmp16no2 = WEBRTC_SPL_SHIFT_W16(ptrDfaClean[i], qDomainDiff); - tmp32no1 = (WebRtc_Word32)(tmp16no2 - tmp16no1); - tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 4); + tmp32no1 = (int32_t)(tmp16no2 - tmp16no1); + tmp16no2 = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 4); tmp16no2 += tmp16no1; zeros16 = WebRtcSpl_NormW16(tmp16no2); if ((tmp16no2) & (-qDomainDiff > zeros16)) @@ -1732,13 +1732,13 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, { // Multiply the suppression gain // Rounding - echoEst32Gained += (WebRtc_UWord32)(aecm->nearFilt[i] >> 1); - tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained, (WebRtc_UWord16)aecm->nearFilt[i]); + echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1); + tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained, (uint16_t)aecm->nearFilt[i]); // Current resolution is // Q-(RESOLUTION_CHANNEL + RESOLUTION_SUPGAIN - max(0, 17 - zeros16 - zeros32)) // Make sure we are in Q14 - tmp32no1 = (WebRtc_Word32)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff); + tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff); if (tmp32no1 > ONE_Q14) { hnl[i] = 0; @@ -1748,7 +1748,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, } else { // 1-echoEst/dfa - hnl[i] = ONE_Q14 - (WebRtc_Word16)tmp32no1; + hnl[i] = ONE_Q14 - (int16_t)tmp32no1; if (hnl[i] < 0) { hnl[i] = 0; @@ -1768,21 +1768,21 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, // speech distortion in double-talk. for (i = 0; i < PART_LEN1; i++) { - hnl[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], hnl[i], 14); + hnl[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], hnl[i], 14); } for (i = kMinPrefBand; i <= kMaxPrefBand; i++) { - avgHnl32 += (WebRtc_Word32)hnl[i]; + avgHnl32 += (int32_t)hnl[i]; } assert(kMaxPrefBand - kMinPrefBand + 1 > 0); avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1); for (i = kMaxPrefBand; i < PART_LEN1; i++) { - if (hnl[i] > (WebRtc_Word16)avgHnl32) + if (hnl[i] > (int16_t)avgHnl32) { - hnl[i] = (WebRtc_Word16)avgHnl32; + hnl[i] = (int16_t)avgHnl32; } } } @@ -1816,13 +1816,13 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, hnl[i] = ONE_Q14; } else { - hnl[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], nlpGain, 14); + hnl[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], nlpGain, 14); } // multiply with Wiener coefficients - efw[i].real = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, + efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, hnl[i], 14)); - efw[i].imag = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, + efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, hnl[i], 14)); } } @@ -1831,9 +1831,9 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, // multiply with Wiener coefficients for (i = 0; i < PART_LEN1; i++) { - efw[i].real = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, + efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, hnl[i], 14)); - efw[i].imag = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, + efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, hnl[i], 14)); } } @@ -1858,22 +1858,22 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, // \param[in] lambda Suppression gain with which to scale the noise level (Q14). // static void ComfortNoise(AecmCore_t* aecm, - const WebRtc_UWord16* dfa, + const uint16_t* dfa, complex16_t* out, - const WebRtc_Word16* lambda) + const int16_t* lambda) { - WebRtc_Word16 i; - WebRtc_Word16 tmp16; - WebRtc_Word32 tmp32; + int16_t i; + int16_t tmp16; + int32_t tmp32; - WebRtc_Word16 randW16[PART_LEN]; - WebRtc_Word16 uReal[PART_LEN1]; - WebRtc_Word16 uImag[PART_LEN1]; - WebRtc_Word32 outLShift32; - WebRtc_Word16 noiseRShift16[PART_LEN1]; + int16_t randW16[PART_LEN]; + int16_t uReal[PART_LEN1]; + int16_t uImag[PART_LEN1]; + int32_t outLShift32; + int16_t noiseRShift16[PART_LEN1]; - WebRtc_Word16 shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain; - WebRtc_Word16 minTrackShift; + int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain; + int16_t minTrackShift; assert(shiftFromNearToNoise >= 0); assert(shiftFromNearToNoise < 16); @@ -1893,7 +1893,7 @@ static void ComfortNoise(AecmCore_t* aecm, { // Shift to the noise domain. - tmp32 = (WebRtc_Word32)dfa[i]; + tmp32 = (int32_t)dfa[i]; outLShift32 = WEBRTC_SPL_LSHIFT_W32(tmp32, shiftFromNearToNoise); if (outLShift32 < aecm->noiseEst[i]) @@ -1958,11 +1958,11 @@ static void ComfortNoise(AecmCore_t* aecm, tmp32 = 32767; aecm->noiseEst[i] = WEBRTC_SPL_LSHIFT_W32(tmp32, shiftFromNearToNoise); } - noiseRShift16[i] = (WebRtc_Word16)tmp32; + noiseRShift16[i] = (int16_t)tmp32; tmp16 = ONE_Q14 - lambda[i]; noiseRShift16[i] - = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, noiseRShift16[i], 14); + = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, noiseRShift16[i], 14); } // Generate a uniform random array on [0 2^15-1]. @@ -1974,12 +1974,12 @@ static void ComfortNoise(AecmCore_t* aecm, for (i = 1; i < PART_LEN1; i++) { // Get a random index for the cos and sin tables over [0 359]. - tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(359, randW16[i - 1], 15); + tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(359, randW16[i - 1], 15); // Tables are in Q13. - uReal[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(noiseRShift16[i], + uReal[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(noiseRShift16[i], kCosTable[tmp16], 13); - uImag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(-noiseRShift16[i], + uImag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(-noiseRShift16[i], kSinTable[tmp16], 13); } uImag[PART_LEN] = 0; @@ -1992,7 +1992,7 @@ static void ComfortNoise(AecmCore_t* aecm, } void WebRtcAecm_BufferFarFrame(AecmCore_t* const aecm, - const WebRtc_Word16* const farend, + const int16_t* const farend, const int farLen) { int writeLen = farLen, writePos = 0; @@ -2003,18 +2003,18 @@ void WebRtcAecm_BufferFarFrame(AecmCore_t* const aecm, // Write to remaining buffer space before wrapping writeLen = FAR_BUF_LEN - aecm->farBufWritePos; memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos, - sizeof(WebRtc_Word16) * writeLen); + sizeof(int16_t) * writeLen); aecm->farBufWritePos = 0; writePos = writeLen; writeLen = farLen - writeLen; } memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos, - sizeof(WebRtc_Word16) * writeLen); + sizeof(int16_t) * writeLen); aecm->farBufWritePos += writeLen; } -void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, WebRtc_Word16 * const farend, +void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, int16_t * const farend, const int farLen, const int knownDelay) { int readLen = farLen; @@ -2042,13 +2042,13 @@ void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, WebRtc_Word16 * const far // Read from remaining buffer space before wrapping readLen = FAR_BUF_LEN - aecm->farBufReadPos; memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos, - sizeof(WebRtc_Word16) * readLen); + sizeof(int16_t) * readLen); aecm->farBufReadPos = 0; readPos = readLen; readLen = farLen - readLen; } memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos, - sizeof(WebRtc_Word16) * readLen); + sizeof(int16_t) * readLen); aecm->farBufReadPos += readLen; } diff --git a/webrtc/modules/audio_processing/aecm/aecm_core.h b/webrtc/modules/audio_processing/aecm/aecm_core.h index a986f27b4..988cb46f1 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core.h +++ b/webrtc/modules/audio_processing/aecm/aecm_core.h @@ -27,8 +27,8 @@ #endif typedef struct { - WebRtc_Word16 real; - WebRtc_Word16 imag; + int16_t real; + int16_t imag; } complex16_t; typedef struct { @@ -43,86 +43,86 @@ typedef struct { RingBuffer* nearCleanFrameBuf; RingBuffer* outFrameBuf; - WebRtc_Word16 farBuf[FAR_BUF_LEN]; + int16_t farBuf[FAR_BUF_LEN]; - WebRtc_Word16 mult; - WebRtc_UWord32 seed; + int16_t mult; + uint32_t seed; // Delay estimation variables void* delay_estimator_farend; void* delay_estimator; - WebRtc_UWord16 currentDelay; + uint16_t currentDelay; // Far end history variables // TODO(bjornv): Replace |far_history| with ring_buffer. uint16_t far_history[PART_LEN1 * MAX_DELAY]; int far_history_pos; int far_q_domains[MAX_DELAY]; - WebRtc_Word16 nlpFlag; - WebRtc_Word16 fixedDelay; + int16_t nlpFlag; + int16_t fixedDelay; - WebRtc_UWord32 totCount; + uint32_t totCount; - WebRtc_Word16 dfaCleanQDomain; - WebRtc_Word16 dfaCleanQDomainOld; - WebRtc_Word16 dfaNoisyQDomain; - WebRtc_Word16 dfaNoisyQDomainOld; + int16_t dfaCleanQDomain; + int16_t dfaCleanQDomainOld; + int16_t dfaNoisyQDomain; + int16_t dfaNoisyQDomainOld; - WebRtc_Word16 nearLogEnergy[MAX_BUF_LEN]; - WebRtc_Word16 farLogEnergy; - WebRtc_Word16 echoAdaptLogEnergy[MAX_BUF_LEN]; - WebRtc_Word16 echoStoredLogEnergy[MAX_BUF_LEN]; + int16_t nearLogEnergy[MAX_BUF_LEN]; + int16_t farLogEnergy; + int16_t echoAdaptLogEnergy[MAX_BUF_LEN]; + int16_t echoStoredLogEnergy[MAX_BUF_LEN]; // The extra 16 or 32 bytes in the following buffers are for alignment based // Neon code. // It's designed this way since the current GCC compiler can't align a // buffer in 16 or 32 byte boundaries properly. - WebRtc_Word16 channelStored_buf[PART_LEN1 + 8]; - WebRtc_Word16 channelAdapt16_buf[PART_LEN1 + 8]; - WebRtc_Word32 channelAdapt32_buf[PART_LEN1 + 8]; - WebRtc_Word16 xBuf_buf[PART_LEN2 + 16]; // farend - WebRtc_Word16 dBufClean_buf[PART_LEN2 + 16]; // nearend - WebRtc_Word16 dBufNoisy_buf[PART_LEN2 + 16]; // nearend - WebRtc_Word16 outBuf_buf[PART_LEN + 8]; + int16_t channelStored_buf[PART_LEN1 + 8]; + int16_t channelAdapt16_buf[PART_LEN1 + 8]; + int32_t channelAdapt32_buf[PART_LEN1 + 8]; + int16_t xBuf_buf[PART_LEN2 + 16]; // farend + int16_t dBufClean_buf[PART_LEN2 + 16]; // nearend + int16_t dBufNoisy_buf[PART_LEN2 + 16]; // nearend + int16_t outBuf_buf[PART_LEN + 8]; // Pointers to the above buffers - WebRtc_Word16 *channelStored; - WebRtc_Word16 *channelAdapt16; - WebRtc_Word32 *channelAdapt32; - WebRtc_Word16 *xBuf; - WebRtc_Word16 *dBufClean; - WebRtc_Word16 *dBufNoisy; - WebRtc_Word16 *outBuf; + int16_t *channelStored; + int16_t *channelAdapt16; + int32_t *channelAdapt32; + int16_t *xBuf; + int16_t *dBufClean; + int16_t *dBufNoisy; + int16_t *outBuf; - WebRtc_Word32 echoFilt[PART_LEN1]; - WebRtc_Word16 nearFilt[PART_LEN1]; - WebRtc_Word32 noiseEst[PART_LEN1]; + int32_t echoFilt[PART_LEN1]; + int16_t nearFilt[PART_LEN1]; + int32_t noiseEst[PART_LEN1]; int noiseEstTooLowCtr[PART_LEN1]; int noiseEstTooHighCtr[PART_LEN1]; - WebRtc_Word16 noiseEstCtr; - WebRtc_Word16 cngMode; + int16_t noiseEstCtr; + int16_t cngMode; - WebRtc_Word32 mseAdaptOld; - WebRtc_Word32 mseStoredOld; - WebRtc_Word32 mseThreshold; + int32_t mseAdaptOld; + int32_t mseStoredOld; + int32_t mseThreshold; - WebRtc_Word16 farEnergyMin; - WebRtc_Word16 farEnergyMax; - WebRtc_Word16 farEnergyMaxMin; - WebRtc_Word16 farEnergyVAD; - WebRtc_Word16 farEnergyMSE; + int16_t farEnergyMin; + int16_t farEnergyMax; + int16_t farEnergyMaxMin; + int16_t farEnergyVAD; + int16_t farEnergyMSE; int currentVADValue; - WebRtc_Word16 vadUpdateCount; + int16_t vadUpdateCount; - WebRtc_Word16 startupState; - WebRtc_Word16 mseChannelCount; - WebRtc_Word16 supGain; - WebRtc_Word16 supGainOld; + int16_t startupState; + int16_t mseChannelCount; + int16_t supGain; + int16_t supGainOld; - WebRtc_Word16 supGainErrParamA; - WebRtc_Word16 supGainErrParamD; - WebRtc_Word16 supGainErrParamDiffAB; - WebRtc_Word16 supGainErrParamDiffBD; + int16_t supGainErrParamA; + int16_t supGainErrParamD; + int16_t supGainErrParamDiffAB; + int16_t supGainErrParamDiffBD; struct RealFFT* real_fft; @@ -195,7 +195,7 @@ int WebRtcAecm_Control(AecmCore_t *aecm, int delay, int nlpFlag); // - aecm : Initialized instance // void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, - const WebRtc_Word16* echo_path); + const int16_t* echo_path); //////////////////////////////////////////////////////////////////////////////// // WebRtcAecm_ProcessFrame(...) @@ -215,10 +215,10 @@ void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, // - out : Out buffer, one frame of nearend signal : // // -int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, const WebRtc_Word16 * farend, - const WebRtc_Word16 * nearendNoisy, - const WebRtc_Word16 * nearendClean, - WebRtc_Word16 * out); +int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, const int16_t * farend, + const int16_t * nearendNoisy, + const int16_t * nearendClean, + int16_t * out); //////////////////////////////////////////////////////////////////////////////// // WebRtcAecm_ProcessBlock(...) @@ -238,10 +238,10 @@ int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, const WebRtc_Word16 * farend, // - out : Out buffer, one block of nearend signal : // // -int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, const WebRtc_Word16 * farend, - const WebRtc_Word16 * nearendNoisy, - const WebRtc_Word16 * noisyClean, - WebRtc_Word16 * out); +int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, const int16_t * farend, + const int16_t * nearendNoisy, + const int16_t * noisyClean, + int16_t * out); //////////////////////////////////////////////////////////////////////////////// // WebRtcAecm_BufferFarFrame() @@ -254,7 +254,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, const WebRtc_Word16 * farend, // - farLen : Length of frame // void WebRtcAecm_BufferFarFrame(AecmCore_t * const aecm, - const WebRtc_Word16 * const farend, + const int16_t * const farend, const int farLen); //////////////////////////////////////////////////////////////////////////////// @@ -269,7 +269,7 @@ void WebRtcAecm_BufferFarFrame(AecmCore_t * const aecm, // - knownDelay : known delay // void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, - WebRtc_Word16 * const farend, + int16_t * const farend, const int farLen, const int knownDelay); /////////////////////////////////////////////////////////////////////////////// @@ -278,17 +278,17 @@ void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, // typedef void (*CalcLinearEnergies)( AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echoEst, - WebRtc_UWord32* far_energy, - WebRtc_UWord32* echo_energy_adapt, - WebRtc_UWord32* echo_energy_stored); + const uint16_t* far_spectrum, + int32_t* echoEst, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored); extern CalcLinearEnergies WebRtcAecm_CalcLinearEnergies; typedef void (*StoreAdaptiveChannel)( AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est); + const uint16_t* far_spectrum, + int32_t* echo_est); extern StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel; typedef void (*ResetAdaptiveChannel)(AecmCore_t* aecm); @@ -296,17 +296,17 @@ extern ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel; typedef void (*WindowAndFFT)( AecmCore_t* aecm, - WebRtc_Word16* fft, - const WebRtc_Word16* time_signal, + int16_t* fft, + const int16_t* time_signal, complex16_t* freq_signal, int time_signal_scaling); extern WindowAndFFT WebRtcAecm_WindowAndFFT; typedef void (*InverseFFTAndWindow)( AecmCore_t* aecm, - WebRtc_Word16* fft, complex16_t* efw, - WebRtc_Word16* output, - const WebRtc_Word16* nearendClean); + int16_t* fft, complex16_t* efw, + int16_t* output, + const int16_t* nearendClean); extern InverseFFTAndWindow WebRtcAecm_InverseFFTAndWindow; // For the above function pointers, functions for generic platforms are declared @@ -314,27 +314,27 @@ extern InverseFFTAndWindow WebRtcAecm_InverseFFTAndWindow; // are declared below and defined in file aecm_core_neon.s. #if (defined WEBRTC_DETECT_ARM_NEON) || defined (WEBRTC_ARCH_ARM_NEON) void WebRtcAecm_WindowAndFFTNeon(AecmCore_t* aecm, - WebRtc_Word16* fft, - const WebRtc_Word16* time_signal, + int16_t* fft, + const int16_t* time_signal, complex16_t* freq_signal, int time_signal_scaling); void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, - WebRtc_Word16* fft, + int16_t* fft, complex16_t* efw, - WebRtc_Word16* output, - const WebRtc_Word16* nearendClean); + int16_t* output, + const int16_t* nearendClean); void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est, - WebRtc_UWord32* far_energy, - WebRtc_UWord32* echo_energy_adapt, - WebRtc_UWord32* echo_energy_stored); + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored); void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est); + const uint16_t* far_spectrum, + int32_t* echo_est); void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore_t* aecm); #endif diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_neon.S b/webrtc/modules/audio_processing/aecm/aecm_core_neon.S index 1a06312c3..b47cd28f2 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core_neon.S +++ b/webrtc/modules/audio_processing/aecm/aecm_core_neon.S @@ -24,8 +24,8 @@ GLOBAL_FUNCTION WebRtcAecm_StoreAdaptiveChannelNeon GLOBAL_FUNCTION WebRtcAecm_ResetAdaptiveChannelNeon @ void WebRtcAecm_WindowAndFFTNeon(AecmCore_t* aecm, -@ WebRtc_Word16* fft, -@ const WebRtc_Word16* time_signal, +@ int16_t* fft, +@ const int16_t* time_signal, @ complex16_t* freq_signal, @ int time_signal_scaling); .align 2 @@ -81,10 +81,10 @@ LOOP_PART_LEN2: pop {r4, r5, r6, pc} @ void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, -@ WebRtc_Word16* fft, +@ int16_t* fft, @ complex16_t* efw, -@ WebRtc_Word16* output, -@ const WebRtc_Word16* nearendClean); +@ int16_t* output, +@ const int16_t* nearendClean); .align 2 DEFINE_FUNCTION WebRtcAecm_InverseFFTAndWindowNeon push {r4-r8, lr} @@ -197,11 +197,11 @@ END: pop {r4-r8, pc} @ void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, -@ const WebRtc_UWord16* far_spectrum, -@ WebRtc_Word32* echo_est, -@ WebRtc_UWord32* far_energy, -@ WebRtc_UWord32* echo_energy_adapt, -@ WebRtc_UWord32* echo_energy_stored); +@ const uint16_t* far_spectrum, +@ int32_t* echo_est, +@ uint32_t* far_energy, +@ uint32_t* echo_energy_adapt, +@ uint32_t* echo_energy_stored); .align 2 DEFINE_FUNCTION WebRtcAecm_CalcLinearEnergiesNeon push {r4-r7} diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_neon.c b/webrtc/modules/audio_processing/aecm/aecm_core_neon.c index cf30cee99..c6910f090 100644 --- a/webrtc/modules/audio_processing/aecm/aecm_core_neon.c +++ b/webrtc/modules/audio_processing/aecm/aecm_core_neon.c @@ -19,7 +19,7 @@ // generating script and makefile, to replace these C functions. // Square root of Hanning window in Q14. -const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END = { +const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = { 0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172, 3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224, @@ -32,7 +32,7 @@ const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END = { }; // Square root of Hanning window in Q14, in reversed order. -static const ALIGN8_BEG WebRtc_Word16 kSqrtHanningReversed[] ALIGN8_END = { +static const ALIGN8_BEG int16_t kSqrtHanningReversed[] ALIGN8_END = { 16384, 16373, 16354, 16325, 16286, 16237, 16179, 16111, 16034, 15947, 15851, 15746, 15631, 15506, 15373, 15231, 15079, 14918, 14749, 14571, 14384, 14189, 13985, 13773, @@ -44,8 +44,8 @@ static const ALIGN8_BEG WebRtc_Word16 kSqrtHanningReversed[] ALIGN8_END = { }; void WebRtcAecm_WindowAndFFTNeon(AecmCore_t* aecm, - WebRtc_Word16* fft, - const WebRtc_Word16* time_signal, + int16_t* fft, + const int16_t* time_signal, complex16_t* freq_signal, int time_signal_scaling) { int i = 0; @@ -115,10 +115,10 @@ void WebRtcAecm_WindowAndFFTNeon(AecmCore_t* aecm, } void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, - WebRtc_Word16* fft, + int16_t* fft, complex16_t* efw, - WebRtc_Word16* output, - const WebRtc_Word16* nearendClean) { + int16_t* output, + const int16_t* nearendClean) { int i, j, outCFFT; assert((uintptr_t)efw % 32 == 0); @@ -161,7 +161,7 @@ void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, outCFFT = WebRtcSpl_RealInverseFFT(aecm->real_fft, fft, (int16_t*)efw); int32x4_t tmp32x4_2; - __asm __volatile("vdup.32 %q0, %1" : "=w"(tmp32x4_2) : "r"((WebRtc_Word32) + __asm __volatile("vdup.32 %q0, %1" : "=w"(tmp32x4_2) : "r"((int32_t) (outCFFT - aecm->dfaCleanQDomain))); for (i = 0; i < PART_LEN; i += 4) { int16x4_t tmp16x4_0; @@ -169,18 +169,18 @@ void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, int32x4_t tmp32x4_0; int32x4_t tmp32x4_1; - //efw[i].real = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + //efw[i].real = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( // efw[i].real, WebRtcAecm_kSqrtHanning[i], 14); __asm __volatile("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&efw[i].real)); __asm __volatile("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_1) : "r"(&WebRtcAecm_kSqrtHanning[i])); __asm __volatile("vmull.s16 %q0, %P1, %P2" : "=w"(tmp32x4_0) : "w"(tmp16x4_0), "w"(tmp16x4_1)); __asm __volatile("vrshr.s32 %q0, %q1, #14" : "=w"(tmp32x4_0) : "0"(tmp32x4_0)); - //tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)efw[i].real, + //tmp32no1 = WEBRTC_SPL_SHIFT_W32((int32_t)efw[i].real, // outCFFT - aecm->dfaCleanQDomain); __asm __volatile("vshl.s32 %q0, %q1, %q2" : "=w"(tmp32x4_0) : "0"(tmp32x4_0), "w"(tmp32x4_2)); - //efw[i].real = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + //efw[i].real = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, // tmp32no1 + aecm->outBuf[i], WEBRTC_SPL_WORD16_MIN); // output[i] = efw[i].real; __asm __volatile("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&aecm->outBuf[i])); @@ -199,7 +199,7 @@ void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, // tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, outCFFT - aecm->dfaCleanQDomain); __asm __volatile("vshl.s32 %q0, %q1, %q2" : "=w"(tmp32x4_0) : "0"(tmp32x4_0), "w"(tmp32x4_2)); - // aecm->outBuf[i] = (WebRtc_Word16)WEBRTC_SPL_SAT( + // aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT( // WEBRTC_SPL_WORD16_MAX, tmp32no1, WEBRTC_SPL_WORD16_MIN); __asm __volatile("vqmovn.s32 %P0, %q1" : "=w"(tmp16x4_0) : "w"(tmp32x4_0)); __asm __volatile("vst1.16 %P0, [%1, :64]" : : "w"(tmp16x4_0), "r"(&aecm->outBuf[i])); @@ -228,16 +228,16 @@ void WebRtcAecm_InverseFFTAndWindowNeon(AecmCore_t* aecm, } void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est, - WebRtc_UWord32* far_energy, - WebRtc_UWord32* echo_energy_adapt, - WebRtc_UWord32* echo_energy_stored) { + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored) { int i; - register WebRtc_UWord32 far_energy_r; - register WebRtc_UWord32 echo_energy_stored_r; - register WebRtc_UWord32 echo_energy_adapt_r; + register uint32_t far_energy_r; + register uint32_t echo_energy_stored_r; + register uint32_t echo_energy_adapt_r; assert((uintptr_t)echo_est % 32 == 0); assert((uintptr_t)(aecm->channelStored) % 16 == 0); @@ -250,7 +250,7 @@ void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, __asm __volatile("vmov.i32 q9, #0" : : : "q9"); // echo_energy_adapt for (i = 0; i < PART_LEN - 7; i += 8) { - // far_energy += (WebRtc_UWord32)(far_spectrum[i]); + // far_energy += (uint32_t)(far_spectrum[i]); __asm __volatile("vld1.16 {d26, d27}, [%0]" : : "r"(&far_spectrum[i]) : "q13"); __asm __volatile("vaddw.u16 q14, q14, d26" : : : "q14", "q13"); __asm __volatile("vaddw.u16 q14, q14, d27" : : : "q14", "q13"); @@ -263,7 +263,7 @@ void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, __asm __volatile("vst1.32 {d20, d21, d22, d23}, [%0, :256]" : : "r"(&echo_est[i]): "q10", "q11"); - // echo_energy_stored += (WebRtc_UWord32)echoEst[i]; + // echo_energy_stored += (uint32_t)echoEst[i]; __asm __volatile("vadd.u32 q8, q10" : : : "q10", "q8"); __asm __volatile("vadd.u32 q8, q11" : : : "q11", "q8"); @@ -290,15 +290,15 @@ void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore_t* aecm, // Get estimated echo energies for adaptive channel and stored channel. echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]); - *echo_energy_stored = echo_energy_stored_r + (WebRtc_UWord32)echo_est[i]; - *far_energy = far_energy_r + (WebRtc_UWord32)(far_spectrum[i]); + *echo_energy_stored = echo_energy_stored_r + (uint32_t)echo_est[i]; + *far_energy = far_energy_r + (uint32_t)(far_spectrum[i]); *echo_energy_adapt = echo_energy_adapt_r + WEBRTC_SPL_UMUL_16_16( aecm->channelAdapt16[i], far_spectrum[i]); } void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore_t* aecm, - const WebRtc_UWord16* far_spectrum, - WebRtc_Word32* echo_est) { + const uint16_t* far_spectrum, + int32_t* echo_est) { int i; assert((uintptr_t)echo_est % 32 == 0); @@ -331,7 +331,7 @@ void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore_t* aecm) { for (i = 0; i < PART_LEN - 7; i += 8) { // aecm->channelAdapt16[i] = aecm->channelStored[i]; - // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32) + // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((int32_t) // aecm->channelStored[i], 16); __asm __volatile("vld1.16 {d24, d25}, [%0, :128]" : : "r"(&aecm->channelStored[i]) : "q12"); @@ -344,5 +344,5 @@ void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore_t* aecm) { } aecm->channelAdapt16[i] = aecm->channelStored[i]; aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32( - (WebRtc_Word32)aecm->channelStored[i], 16); + (int32_t)aecm->channelStored[i], 16); } diff --git a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c index 7716567fc..b896de0a2 100644 --- a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c +++ b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c @@ -57,7 +57,7 @@ typedef struct int delayChange; short lastDelayDiff; - WebRtc_Word16 echoMode; + int16_t echoMode; #ifdef AEC_DEBUG FILE *bufFile; @@ -80,7 +80,7 @@ static int WebRtcAecm_EstBufDelay(aecmob_t *aecmInst, short msInSndCardBuf); // Stuffs the farend buffer if the estimated delay is too large static int WebRtcAecm_DelayComp(aecmob_t *aecmInst); -WebRtc_Word32 WebRtcAecm_Create(void **aecmInst) +int32_t WebRtcAecm_Create(void **aecmInst) { aecmob_t *aecm; if (aecmInst == NULL) @@ -130,7 +130,7 @@ WebRtc_Word32 WebRtcAecm_Create(void **aecmInst) return 0; } -WebRtc_Word32 WebRtcAecm_Free(void *aecmInst) +int32_t WebRtcAecm_Free(void *aecmInst) { aecmob_t *aecm = aecmInst; @@ -157,7 +157,7 @@ WebRtc_Word32 WebRtcAecm_Free(void *aecmInst) return 0; } -WebRtc_Word32 WebRtcAecm_Init(void *aecmInst, WebRtc_Word32 sampFreq) +int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq) { aecmob_t *aecm = aecmInst; AecmConfig aecConfig; @@ -220,11 +220,11 @@ WebRtc_Word32 WebRtcAecm_Init(void *aecmInst, WebRtc_Word32 sampFreq) return 0; } -WebRtc_Word32 WebRtcAecm_BufferFarend(void *aecmInst, const WebRtc_Word16 *farend, - WebRtc_Word16 nrOfSamples) +int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend, + int16_t nrOfSamples) { aecmob_t *aecm = aecmInst; - WebRtc_Word32 retVal = 0; + int32_t retVal = 0; if (aecm == NULL) { @@ -260,12 +260,12 @@ WebRtc_Word32 WebRtcAecm_BufferFarend(void *aecmInst, const WebRtc_Word16 *faren return retVal; } -WebRtc_Word32 WebRtcAecm_Process(void *aecmInst, const WebRtc_Word16 *nearendNoisy, - const WebRtc_Word16 *nearendClean, WebRtc_Word16 *out, - WebRtc_Word16 nrOfSamples, WebRtc_Word16 msInSndCardBuf) +int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy, + const int16_t *nearendClean, int16_t *out, + int16_t nrOfSamples, int16_t msInSndCardBuf) { aecmob_t *aecm = aecmInst; - WebRtc_Word32 retVal = 0; + int32_t retVal = 0; short i; short nmbrOfFilledBuffers; short nBlocks10ms; @@ -477,7 +477,7 @@ WebRtc_Word32 WebRtcAecm_Process(void *aecmInst, const WebRtc_Word16 *nearendNoi return retVal; } -WebRtc_Word32 WebRtcAecm_set_config(void *aecmInst, AecmConfig config) +int32_t WebRtcAecm_set_config(void *aecmInst, AecmConfig config) { aecmob_t *aecm = aecmInst; @@ -559,7 +559,7 @@ WebRtc_Word32 WebRtcAecm_set_config(void *aecmInst, AecmConfig config) return 0; } -WebRtc_Word32 WebRtcAecm_get_config(void *aecmInst, AecmConfig *config) +int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config) { aecmob_t *aecm = aecmInst; @@ -586,12 +586,12 @@ WebRtc_Word32 WebRtcAecm_get_config(void *aecmInst, AecmConfig *config) return 0; } -WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst, - const void* echo_path, - size_t size_bytes) +int32_t WebRtcAecm_InitEchoPath(void* aecmInst, + const void* echo_path, + size_t size_bytes) { aecmob_t *aecm = aecmInst; - const WebRtc_Word16* echo_path_ptr = echo_path; + const int16_t* echo_path_ptr = echo_path; if (aecmInst == NULL) { return -1; @@ -617,12 +617,12 @@ WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst, return 0; } -WebRtc_Word32 WebRtcAecm_GetEchoPath(void* aecmInst, - void* echo_path, - size_t size_bytes) +int32_t WebRtcAecm_GetEchoPath(void* aecmInst, + void* echo_path, + size_t size_bytes) { aecmob_t *aecm = aecmInst; - WebRtc_Word16* echo_path_ptr = echo_path; + int16_t* echo_path_ptr = echo_path; if (aecmInst == NULL) { return -1; @@ -649,10 +649,10 @@ WebRtc_Word32 WebRtcAecm_GetEchoPath(void* aecmInst, size_t WebRtcAecm_echo_path_size_bytes() { - return (PART_LEN1 * sizeof(WebRtc_Word16)); + return (PART_LEN1 * sizeof(int16_t)); } -WebRtc_Word32 WebRtcAecm_get_error_code(void *aecmInst) +int32_t WebRtcAecm_get_error_code(void *aecmInst) { aecmob_t *aecm = aecmInst; diff --git a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h b/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h index a8458b144..8ea2e87e2 100644 --- a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h +++ b/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h @@ -31,8 +31,8 @@ enum { #define AECM_BAD_PARAMETER_WARNING 12100 typedef struct { - WebRtc_Word16 cngMode; // AECM_FALSE, AECM_TRUE (default) - WebRtc_Word16 echoMode; // 0, 1, 2, 3 (default), 4 + int16_t cngMode; // AECM_FALSE, AECM_TRUE (default) + int16_t echoMode; // 0, 1, 2, 3 (default), 4 } AecmConfig; #ifdef __cplusplus @@ -50,10 +50,10 @@ extern "C" { * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_Create(void **aecmInst); +int32_t WebRtcAecm_Create(void **aecmInst); /* * This function releases the memory allocated by WebRtcAecm_Create() @@ -64,10 +64,10 @@ WebRtc_Word32 WebRtcAecm_Create(void **aecmInst); * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_Free(void *aecmInst); +int32_t WebRtcAecm_Free(void *aecmInst); /* * Initializes an AECM instance. @@ -75,15 +75,14 @@ WebRtc_Word32 WebRtcAecm_Free(void *aecmInst); * Inputs Description * ------------------------------------------------------------------- * void *aecmInst Pointer to the AECM instance - * WebRtc_Word32 sampFreq Sampling frequency of data + * int32_t sampFreq Sampling frequency of data * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_Init(void* aecmInst, - WebRtc_Word32 sampFreq); +int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq); /* * Inserts an 80 or 160 sample block of data into the farend buffer. @@ -91,18 +90,18 @@ WebRtc_Word32 WebRtcAecm_Init(void* aecmInst, * Inputs Description * ------------------------------------------------------------------- * void *aecmInst Pointer to the AECM instance - * WebRtc_Word16 *farend In buffer containing one frame of + * int16_t *farend In buffer containing one frame of * farend signal - * WebRtc_Word16 nrOfSamples Number of samples in farend buffer + * int16_t nrOfSamples Number of samples in farend buffer * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_BufferFarend(void* aecmInst, - const WebRtc_Word16* farend, - WebRtc_Word16 nrOfSamples); +int32_t WebRtcAecm_BufferFarend(void* aecmInst, + const int16_t* farend, + int16_t nrOfSamples); /* * Runs the AECM on an 80 or 160 sample blocks of data. @@ -110,31 +109,31 @@ WebRtc_Word32 WebRtcAecm_BufferFarend(void* aecmInst, * Inputs Description * ------------------------------------------------------------------- * void *aecmInst Pointer to the AECM instance - * WebRtc_Word16 *nearendNoisy In buffer containing one frame of + * int16_t *nearendNoisy In buffer containing one frame of * reference nearend+echo signal. If * noise reduction is active, provide * the noisy signal here. - * WebRtc_Word16 *nearendClean In buffer containing one frame of + * int16_t *nearendClean In buffer containing one frame of * nearend+echo signal. If noise * reduction is active, provide the * clean signal here. Otherwise pass a * NULL pointer. - * WebRtc_Word16 nrOfSamples Number of samples in nearend buffer - * WebRtc_Word16 msInSndCardBuf Delay estimate for sound card and + * int16_t nrOfSamples Number of samples in nearend buffer + * int16_t msInSndCardBuf Delay estimate for sound card and * system buffers * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word16 *out Out buffer, one frame of processed nearend - * WebRtc_Word32 return 0: OK + * int16_t *out Out buffer, one frame of processed nearend + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_Process(void* aecmInst, - const WebRtc_Word16* nearendNoisy, - const WebRtc_Word16* nearendClean, - WebRtc_Word16* out, - WebRtc_Word16 nrOfSamples, - WebRtc_Word16 msInSndCardBuf); +int32_t WebRtcAecm_Process(void* aecmInst, + const int16_t* nearendNoisy, + const int16_t* nearendClean, + int16_t* out, + int16_t nrOfSamples, + int16_t msInSndCardBuf); /* * This function enables the user to set certain parameters on-the-fly @@ -147,11 +146,10 @@ WebRtc_Word32 WebRtcAecm_Process(void* aecmInst, * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_set_config(void* aecmInst, - AecmConfig config); +int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config); /* * This function enables the user to set certain parameters on-the-fly @@ -164,11 +162,10 @@ WebRtc_Word32 WebRtcAecm_set_config(void* aecmInst, * ------------------------------------------------------------------- * AecmConfig *config Pointer to the config instance that * all properties will be written to - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_get_config(void *aecmInst, - AecmConfig *config); +int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config); /* * This function enables the user to set the echo path on-the-fly. @@ -181,12 +178,12 @@ WebRtc_Word32 WebRtcAecm_get_config(void *aecmInst, * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst, - const void* echo_path, - size_t size_bytes); +int32_t WebRtcAecm_InitEchoPath(void* aecmInst, + const void* echo_path, + size_t size_bytes); /* * This function enables the user to get the currently used echo path @@ -200,12 +197,12 @@ WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst, * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 0: OK + * int32_t return 0: OK * -1: error */ -WebRtc_Word32 WebRtcAecm_GetEchoPath(void* aecmInst, - void* echo_path, - size_t size_bytes); +int32_t WebRtcAecm_GetEchoPath(void* aecmInst, + void* echo_path, + size_t size_bytes); /* * This function enables the user to get the echo path size in bytes @@ -225,9 +222,9 @@ size_t WebRtcAecm_echo_path_size_bytes(); * * Outputs Description * ------------------------------------------------------------------- - * WebRtc_Word32 return 11000-11100: error code + * int32_t return 11000-11100: error code */ -WebRtc_Word32 WebRtcAecm_get_error_code(void *aecmInst); +int32_t WebRtcAecm_get_error_code(void *aecmInst); #ifdef __cplusplus } diff --git a/webrtc/modules/audio_processing/agc/analog_agc.c b/webrtc/modules/audio_processing/agc/analog_agc.c index d60b4b9e8..0965defec 100644 --- a/webrtc/modules/audio_processing/agc/analog_agc.c +++ b/webrtc/modules/audio_processing/agc/analog_agc.c @@ -25,21 +25,21 @@ #include "analog_agc.h" /* The slope of in Q13*/ -static const WebRtc_Word16 kSlope1[8] = {21793, 12517, 7189, 4129, 2372, 1362, 472, 78}; +static const int16_t kSlope1[8] = {21793, 12517, 7189, 4129, 2372, 1362, 472, 78}; /* The offset in Q14 */ -static const WebRtc_Word16 kOffset1[8] = {25395, 23911, 22206, 20737, 19612, 18805, 17951, +static const int16_t kOffset1[8] = {25395, 23911, 22206, 20737, 19612, 18805, 17951, 17367}; /* The slope of in Q13*/ -static const WebRtc_Word16 kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337}; +static const int16_t kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337}; /* The offset in Q14 */ -static const WebRtc_Word16 kOffset2[8] = {18432, 18379, 18290, 18177, 18052, 17920, 17670, +static const int16_t kOffset2[8] = {18432, 18379, 18290, 18177, 18052, 17920, 17670, 17286}; -static const WebRtc_Word16 kMuteGuardTimeMs = 8000; -static const WebRtc_Word16 kInitCheck = 42; +static const int16_t kMuteGuardTimeMs = 8000; +static const int16_t kInitCheck = 42; /* Default settings if config is not used */ #define AGC_DEFAULT_TARGET_LEVEL 3 @@ -72,12 +72,12 @@ static const WebRtc_Word16 kInitCheck = 42; * fprintf(1, '\t%i, %i, %i, %i,\n', round(10.^(linspace(0,10,32)/20) * 2^12)); */ /* Q12 */ -static const WebRtc_UWord16 kGainTableAnalog[GAIN_TBL_LEN] = {4096, 4251, 4412, 4579, 4752, +static const uint16_t kGainTableAnalog[GAIN_TBL_LEN] = {4096, 4251, 4412, 4579, 4752, 4932, 5118, 5312, 5513, 5722, 5938, 6163, 6396, 6638, 6889, 7150, 7420, 7701, 7992, 8295, 8609, 8934, 9273, 9623, 9987, 10365, 10758, 11165, 11587, 12025, 12480, 12953}; /* Gain/Suppression tables for virtual Mic (in Q10) */ -static const WebRtc_UWord16 kGainTableVirtualMic[128] = {1052, 1081, 1110, 1141, 1172, 1204, +static const uint16_t kGainTableVirtualMic[128] = {1052, 1081, 1110, 1141, 1172, 1204, 1237, 1271, 1305, 1341, 1378, 1416, 1454, 1494, 1535, 1577, 1620, 1664, 1710, 1757, 1805, 1854, 1905, 1957, 2010, 2065, 2122, 2180, 2239, 2301, 2364, 2428, 2495, 2563, 2633, 2705, 2779, 2855, 2933, 3013, 3096, 3180, 3267, 3357, 3449, 3543, 3640, 3739, @@ -88,7 +88,7 @@ static const WebRtc_UWord16 kGainTableVirtualMic[128] = {1052, 1081, 1110, 1141, 16055, 16494, 16945, 17409, 17885, 18374, 18877, 19393, 19923, 20468, 21028, 21603, 22194, 22801, 23425, 24065, 24724, 25400, 26095, 26808, 27541, 28295, 29069, 29864, 30681, 31520, 32382}; -static const WebRtc_UWord16 kSuppressionTableVirtualMic[128] = {1024, 1006, 988, 970, 952, +static const uint16_t kSuppressionTableVirtualMic[128] = {1024, 1006, 988, 970, 952, 935, 918, 902, 886, 870, 854, 839, 824, 809, 794, 780, 766, 752, 739, 726, 713, 700, 687, 675, 663, 651, 639, 628, 616, 605, 594, 584, 573, 563, 553, 543, 533, 524, 514, 505, 496, 487, 478, 470, 461, 453, 445, 437, 429, 421, 414, 406, 399, 392, 385, 378, @@ -102,7 +102,7 @@ static const WebRtc_UWord16 kSuppressionTableVirtualMic[128] = {1024, 1006, 988, * Matlab code * targetLevelTable = fprintf('%d,\t%d,\t%d,\t%d,\n', round((32767*10.^(-(0:63)'/20)).^2*16/2^7) */ -static const WebRtc_Word32 kTargetLevelTable[64] = {134209536, 106606424, 84680493, 67264106, +static const int32_t kTargetLevelTable[64] = {134209536, 106606424, 84680493, 67264106, 53429779, 42440782, 33711911, 26778323, 21270778, 16895980, 13420954, 10660642, 8468049, 6726411, 5342978, 4244078, 3371191, 2677832, 2127078, 1689598, 1342095, 1066064, 846805, 672641, 534298, 424408, 337119, 267783, 212708, 168960, 134210, @@ -110,13 +110,13 @@ static const WebRtc_Word32 kTargetLevelTable[64] = {134209536, 106606424, 846804 6726, 5343, 4244, 3371, 2678, 2127, 1690, 1342, 1066, 847, 673, 534, 424, 337, 268, 213, 169, 134, 107, 85, 67}; -int WebRtcAgc_AddMic(void *state, WebRtc_Word16 *in_mic, WebRtc_Word16 *in_mic_H, - WebRtc_Word16 samples) +int WebRtcAgc_AddMic(void *state, int16_t *in_mic, int16_t *in_mic_H, + int16_t samples) { - WebRtc_Word32 nrg, max_nrg, sample, tmp32; - WebRtc_Word32 *ptr; - WebRtc_UWord16 targetGainIdx, gain; - WebRtc_Word16 i, n, L, M, subFrames, tmp16, tmp_speech[16]; + int32_t nrg, max_nrg, sample, tmp32; + int32_t *ptr; + uint16_t targetGainIdx, gain; + int16_t i, n, L, M, subFrames, tmp16, tmp_speech[16]; Agc_t *stt; stt = (Agc_t *)state; @@ -205,10 +205,10 @@ int WebRtcAgc_AddMic(void *state, WebRtc_Word16 *in_mic, WebRtc_Word16 *in_mic_H assert(stt->maxLevel > stt->maxAnalog); /* Q1 */ - tmp16 = (WebRtc_Word16)(stt->micVol - stt->maxAnalog); + tmp16 = (int16_t)(stt->micVol - stt->maxAnalog); tmp32 = WEBRTC_SPL_MUL_16_16(GAIN_TBL_LEN - 1, tmp16); - tmp16 = (WebRtc_Word16)(stt->maxLevel - stt->maxAnalog); - targetGainIdx = (WebRtc_UWord16)WEBRTC_SPL_DIV(tmp32, tmp16); + tmp16 = (int16_t)(stt->maxLevel - stt->maxAnalog); + targetGainIdx = (uint16_t)WEBRTC_SPL_DIV(tmp32, tmp16); assert(targetGainIdx < GAIN_TBL_LEN); /* Increment through the table towards the target gain. @@ -238,7 +238,7 @@ int WebRtcAgc_AddMic(void *state, WebRtc_Word16 *in_mic, WebRtc_Word16 *in_mic_H in_mic[i] = -32768; } else { - in_mic[i] = (WebRtc_Word16)sample; + in_mic[i] = (int16_t)sample; } // For higher band @@ -254,7 +254,7 @@ int WebRtcAgc_AddMic(void *state, WebRtc_Word16 *in_mic, WebRtc_Word16 *in_mic_H in_mic_H[i] = -32768; } else { - in_mic_H[i] = (WebRtc_Word16)sample; + in_mic_H[i] = (int16_t)sample; } } } @@ -327,10 +327,10 @@ int WebRtcAgc_AddMic(void *state, WebRtc_Word16 *in_mic, WebRtc_Word16 *in_mic_H return 0; } -int WebRtcAgc_AddFarend(void *state, const WebRtc_Word16 *in_far, WebRtc_Word16 samples) +int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples) { - WebRtc_Word32 errHandle = 0; - WebRtc_Word16 i, subFrames; + int32_t errHandle = 0; + int16_t i, subFrames; Agc_t *stt; stt = (Agc_t *)state; @@ -393,22 +393,22 @@ int WebRtcAgc_AddFarend(void *state, const WebRtc_Word16 *in_far, WebRtc_Word16 return errHandle; } -int WebRtcAgc_VirtualMic(void *agcInst, WebRtc_Word16 *in_near, WebRtc_Word16 *in_near_H, - WebRtc_Word16 samples, WebRtc_Word32 micLevelIn, - WebRtc_Word32 *micLevelOut) +int WebRtcAgc_VirtualMic(void *agcInst, int16_t *in_near, int16_t *in_near_H, + int16_t samples, int32_t micLevelIn, + int32_t *micLevelOut) { - WebRtc_Word32 tmpFlt, micLevelTmp, gainIdx; - WebRtc_UWord16 gain; - WebRtc_Word16 ii; + int32_t tmpFlt, micLevelTmp, gainIdx; + uint16_t gain; + int16_t ii; Agc_t *stt; - WebRtc_UWord32 nrg; - WebRtc_Word16 sampleCntr; - WebRtc_UWord32 frameNrg = 0; - WebRtc_UWord32 frameNrgLimit = 5500; - WebRtc_Word16 numZeroCrossing = 0; - const WebRtc_Word16 kZeroCrossingLowLim = 15; - const WebRtc_Word16 kZeroCrossingHighLim = 20; + uint32_t nrg; + int16_t sampleCntr; + uint32_t frameNrg = 0; + uint32_t frameNrgLimit = 5500; + int16_t numZeroCrossing = 0; + const int16_t kZeroCrossingLowLim = 15; + const int16_t kZeroCrossingHighLim = 20; stt = (Agc_t *)agcInst; @@ -507,7 +507,7 @@ int WebRtcAgc_VirtualMic(void *agcInst, WebRtc_Word16 *in_near, WebRtc_Word16 *i gain = kSuppressionTableVirtualMic[127 - gainIdx]; } } - in_near[ii] = (WebRtc_Word16)tmpFlt; + in_near[ii] = (int16_t)tmpFlt; if (stt->fs == 32000) { tmpFlt = WEBRTC_SPL_MUL_16_U16(in_near_H[ii], gain); @@ -520,7 +520,7 @@ int WebRtcAgc_VirtualMic(void *agcInst, WebRtc_Word16 *in_near, WebRtc_Word16 *i { tmpFlt = -32768; } - in_near_H[ii] = (WebRtc_Word16)tmpFlt; + in_near_H[ii] = (int16_t)tmpFlt; } } /* Set the level we (finally) used */ @@ -538,7 +538,7 @@ int WebRtcAgc_VirtualMic(void *agcInst, WebRtc_Word16 *in_near, WebRtc_Word16 *i void WebRtcAgc_UpdateAgcThresholds(Agc_t *stt) { - WebRtc_Word16 tmp16; + int16_t tmp16; #ifdef MIC_LEVEL_FEEDBACK int zeros; @@ -552,7 +552,7 @@ void WebRtcAgc_UpdateAgcThresholds(Agc_t *stt) /* Set analog target level in envelope dBOv scale */ tmp16 = (DIFF_REF_TO_ANALOG * stt->compressionGaindB) + ANALOG_TARGET_LEVEL_2; - tmp16 = WebRtcSpl_DivW32W16ResW16((WebRtc_Word32)tmp16, ANALOG_TARGET_LEVEL); + tmp16 = WebRtcSpl_DivW32W16ResW16((int32_t)tmp16, ANALOG_TARGET_LEVEL); stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN + tmp16; if (stt->analogTarget < DIGITAL_REF_AT_0_COMP_GAIN) { @@ -587,14 +587,14 @@ void WebRtcAgc_UpdateAgcThresholds(Agc_t *stt) stt->lowerLimit = stt->startLowerLimit; } -void WebRtcAgc_SaturationCtrl(Agc_t *stt, WebRtc_UWord8 *saturated, WebRtc_Word32 *env) +void WebRtcAgc_SaturationCtrl(Agc_t *stt, uint8_t *saturated, int32_t *env) { - WebRtc_Word16 i, tmpW16; + int16_t i, tmpW16; /* Check if the signal is saturated */ for (i = 0; i < 10; i++) { - tmpW16 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(env[i], 20); + tmpW16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(env[i], 20); if (tmpW16 > 875) { stt->envSum += tmpW16; @@ -608,15 +608,15 @@ void WebRtcAgc_SaturationCtrl(Agc_t *stt, WebRtc_UWord8 *saturated, WebRtc_Word3 } /* stt->envSum *= 0.99; */ - stt->envSum = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(stt->envSum, - (WebRtc_Word16)32440, 15); + stt->envSum = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(stt->envSum, + (int16_t)32440, 15); } -void WebRtcAgc_ZeroCtrl(Agc_t *stt, WebRtc_Word32 *inMicLevel, WebRtc_Word32 *env) +void WebRtcAgc_ZeroCtrl(Agc_t *stt, int32_t *inMicLevel, int32_t *env) { - WebRtc_Word16 i; - WebRtc_Word32 tmp32 = 0; - WebRtc_Word32 midVal; + int16_t i; + int32_t tmp32 = 0; + int32_t midVal; /* Is the input signal zero? */ for (i = 0; i < 10; i++) @@ -682,8 +682,8 @@ void WebRtcAgc_SpeakerInactiveCtrl(Agc_t *stt) * silence. */ - WebRtc_Word32 tmp32; - WebRtc_Word16 vadThresh; + int32_t tmp32; + int16_t vadThresh; if (stt->vadMic.stdLongTerm < 2500) { @@ -698,13 +698,13 @@ void WebRtcAgc_SpeakerInactiveCtrl(Agc_t *stt) } /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */ - tmp32 = (WebRtc_Word32)vadThresh; - tmp32 += WEBRTC_SPL_MUL_16_16((WebRtc_Word16)31, stt->vadThreshold); - stt->vadThreshold = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 5); + tmp32 = (int32_t)vadThresh; + tmp32 += WEBRTC_SPL_MUL_16_16((int16_t)31, stt->vadThreshold); + stt->vadThreshold = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 5); } } -void WebRtcAgc_ExpCurve(WebRtc_Word16 volume, WebRtc_Word16 *index) +void WebRtcAgc_ExpCurve(int16_t volume, int16_t *index) { // volume in Q14 // index in [0-7] @@ -754,16 +754,16 @@ void WebRtcAgc_ExpCurve(WebRtc_Word16 volume, WebRtc_Word16 *index) } } -WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, - WebRtc_Word32 *outMicLevel, - WebRtc_Word16 vadLogRatio, - WebRtc_Word16 echo, WebRtc_UWord8 *saturationWarning) +int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel, + int32_t *outMicLevel, + int16_t vadLogRatio, + int16_t echo, uint8_t *saturationWarning) { - WebRtc_UWord32 tmpU32; - WebRtc_Word32 Rxx16w32, tmp32; - WebRtc_Word32 inMicLevelTmp, lastMicVol; - WebRtc_Word16 i; - WebRtc_UWord8 saturated = 0; + uint32_t tmpU32; + int32_t Rxx16w32, tmp32; + int32_t inMicLevelTmp, lastMicVol; + int16_t i; + uint8_t saturated = 0; Agc_t *stt; stt = (Agc_t *)state; @@ -785,9 +785,9 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, if (stt->firstCall == 0) { - WebRtc_Word32 tmpVol; + int32_t tmpVol; stt->firstCall = 1; - tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (WebRtc_Word32)51, 9); + tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (int32_t)51, 9); tmpVol = (stt->minLevel + tmp32); /* If the mic level is very low at start, increase it! */ @@ -807,7 +807,7 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* If the mic level was manually changed to a very low value raise it! */ if ((inMicLevelTmp != stt->micVol) && (inMicLevelTmp < stt->minOutput)) { - tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (WebRtc_Word32)51, 9); + tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (int32_t)51, 9); inMicLevelTmp = (stt->minLevel + tmp32); stt->micVol = inMicLevelTmp; #ifdef MIC_LEVEL_FEEDBACK @@ -856,8 +856,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* stt->micVol *= 0.903; */ tmp32 = inMicLevelTmp - stt->minLevel; - tmpU32 = WEBRTC_SPL_UMUL(29591, (WebRtc_UWord32)(tmp32)); - stt->micVol = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; + tmpU32 = WEBRTC_SPL_UMUL(29591, (uint32_t)(tmp32)); + stt->micVol = (int32_t)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; if (stt->micVol > lastMicVol - 2) { stt->micVol = lastMicVol - 2; @@ -988,8 +988,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* 0.95 in Q15 */ tmp32 = inMicLevelTmp - stt->minLevel; - tmpU32 = WEBRTC_SPL_UMUL(31130, (WebRtc_UWord32)(tmp32)); - stt->micVol = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; + tmpU32 = WEBRTC_SPL_UMUL(31130, (uint32_t)(tmp32)); + stt->micVol = (int32_t)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; if (stt->micVol > lastMicVol - 1) { stt->micVol = lastMicVol - 1; @@ -1036,8 +1036,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* 0.965 in Q15 */ tmp32 = inMicLevelTmp - stt->minLevel; - tmpU32 = WEBRTC_SPL_UMUL(31621, (WebRtc_UWord32)(inMicLevelTmp - stt->minLevel)); - stt->micVol = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; + tmpU32 = WEBRTC_SPL_UMUL(31621, (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (int32_t)WEBRTC_SPL_RSHIFT_U32(tmpU32, 15) + stt->minLevel; if (stt->micVol > lastMicVol - 1) { stt->micVol = lastMicVol - 1; @@ -1062,8 +1062,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, if (stt->msTooLow > stt->msecSpeechOuterChange) { /* Raise the recording level */ - WebRtc_Word16 index, weightFIX; - WebRtc_Word16 volNormFIX = 16384; // =1 in Q14. + int16_t index, weightFIX; + int16_t volNormFIX = 16384; // =1 in Q14. stt->msTooLow = 0; @@ -1071,7 +1071,7 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, tmp32 = WEBRTC_SPL_LSHIFT_W32(inMicLevelTmp - stt->minLevel, 14); if (stt->maxInit != stt->minLevel) { - volNormFIX = (WebRtc_Word16)WEBRTC_SPL_DIV(tmp32, + volNormFIX = (int16_t)WEBRTC_SPL_DIV(tmp32, (stt->maxInit - stt->minLevel)); } @@ -1080,7 +1080,7 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* Compute weighting factor for the volume increase, 32^(-2*X)/2+1.05 */ weightFIX = kOffset1[index] - - (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(kSlope1[index], + - (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(kSlope1[index], volNormFIX, 13); /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ @@ -1088,8 +1088,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 67); tmp32 = inMicLevelTmp - stt->minLevel; - tmpU32 = ((WebRtc_UWord32)weightFIX * (WebRtc_UWord32)(inMicLevelTmp - stt->minLevel)); - stt->micVol = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(tmpU32, 14) + stt->minLevel; + tmpU32 = ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (int32_t)WEBRTC_SPL_RSHIFT_U32(tmpU32, 14) + stt->minLevel; if (stt->micVol < lastMicVol + 2) { stt->micVol = lastMicVol + 2; @@ -1122,8 +1122,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, if (stt->msTooLow > stt->msecSpeechInnerChange) { /* Raise the recording level */ - WebRtc_Word16 index, weightFIX; - WebRtc_Word16 volNormFIX = 16384; // =1 in Q14. + int16_t index, weightFIX; + int16_t volNormFIX = 16384; // =1 in Q14. stt->msTooLow = 0; @@ -1131,7 +1131,7 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, tmp32 = WEBRTC_SPL_LSHIFT_W32(inMicLevelTmp - stt->minLevel, 14); if (stt->maxInit != stt->minLevel) { - volNormFIX = (WebRtc_Word16)WEBRTC_SPL_DIV(tmp32, + volNormFIX = (int16_t)WEBRTC_SPL_DIV(tmp32, (stt->maxInit - stt->minLevel)); } @@ -1140,7 +1140,7 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, /* Compute weighting factor for the volume increase, (3.^(-2.*X))/8+1 */ weightFIX = kOffset2[index] - - (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(kSlope2[index], + - (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(kSlope2[index], volNormFIX, 13); /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ @@ -1148,8 +1148,8 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, stt->Rxx160_LPw32 = WEBRTC_SPL_MUL(tmp32, 67); tmp32 = inMicLevelTmp - stt->minLevel; - tmpU32 = ((WebRtc_UWord32)weightFIX * (WebRtc_UWord32)(inMicLevelTmp - stt->minLevel)); - stt->micVol = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(tmpU32, 14) + stt->minLevel; + tmpU32 = ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (int32_t)WEBRTC_SPL_RSHIFT_U32(tmpU32, 14) + stt->minLevel; if (stt->micVol < lastMicVol + 1) { stt->micVol = lastMicVol + 1; @@ -1242,16 +1242,16 @@ WebRtc_Word32 WebRtcAgc_ProcessAnalog(void *state, WebRtc_Word32 inMicLevel, return 0; } -int WebRtcAgc_Process(void *agcInst, const WebRtc_Word16 *in_near, - const WebRtc_Word16 *in_near_H, WebRtc_Word16 samples, - WebRtc_Word16 *out, WebRtc_Word16 *out_H, WebRtc_Word32 inMicLevel, - WebRtc_Word32 *outMicLevel, WebRtc_Word16 echo, - WebRtc_UWord8 *saturationWarning) +int WebRtcAgc_Process(void *agcInst, const int16_t *in_near, + const int16_t *in_near_H, int16_t samples, + int16_t *out, int16_t *out_H, int32_t inMicLevel, + int32_t *outMicLevel, int16_t echo, + uint8_t *saturationWarning) { Agc_t *stt; - WebRtc_Word32 inMicLevelTmp; - WebRtc_Word16 subFrames, i; - WebRtc_UWord8 satWarningTmp = 0; + int32_t inMicLevelTmp; + int16_t subFrames, i; + uint8_t satWarningTmp = 0; stt = (Agc_t *)agcInst; @@ -1326,13 +1326,13 @@ int WebRtcAgc_Process(void *agcInst, const WebRtc_Word16 *in_near, if (in_near != out) { // Only needed if they don't already point to the same place. - memcpy(out, in_near, samples * sizeof(WebRtc_Word16)); + memcpy(out, in_near, samples * sizeof(int16_t)); } if (stt->fs == 32000) { if (in_near_H != out_H) { - memcpy(out_H, in_near_H, samples * sizeof(WebRtc_Word16)); + memcpy(out_H, in_near_H, samples * sizeof(int16_t)); } } @@ -1366,8 +1366,8 @@ int WebRtcAgc_Process(void *agcInst, const WebRtc_Word16 *in_near, /* update queue */ if (stt->inQueue > 1) { - memcpy(stt->env[0], stt->env[1], 10 * sizeof(WebRtc_Word32)); - memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(WebRtc_Word32)); + memcpy(stt->env[0], stt->env[1], 10 * sizeof(int32_t)); + memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(int32_t)); } if (stt->inQueue > 0) @@ -1523,11 +1523,11 @@ int WebRtcAgc_Free(void *state) /* minLevel - Minimum volume level * maxLevel - Maximum volume level */ -int WebRtcAgc_Init(void *agcInst, WebRtc_Word32 minLevel, WebRtc_Word32 maxLevel, - WebRtc_Word16 agcMode, WebRtc_UWord32 fs) +int WebRtcAgc_Init(void *agcInst, int32_t minLevel, int32_t maxLevel, + int16_t agcMode, uint32_t fs) { - WebRtc_Word32 max_add, tmp32; - WebRtc_Word16 i; + int32_t max_add, tmp32; + int16_t i; int tmpNorm; Agc_t *stt; @@ -1567,7 +1567,7 @@ int WebRtcAgc_Init(void *agcInst, WebRtc_Word32 minLevel, WebRtc_Word32 maxLevel /* If the volume range is smaller than 0-256 then * the levels are shifted up to Q8-domain */ - tmpNorm = WebRtcSpl_NormU32((WebRtc_UWord32)maxLevel); + tmpNorm = WebRtcSpl_NormU32((uint32_t)maxLevel); stt->scale = tmpNorm - 23; if (stt->scale < 0) { @@ -1617,7 +1617,7 @@ int WebRtcAgc_Init(void *agcInst, WebRtc_Word32 minLevel, WebRtc_Word32 maxLevel #endif /* Minimum output volume is 4% higher than the available lowest volume level */ - tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (WebRtc_Word32)10, 8); + tmp32 = WEBRTC_SPL_RSHIFT_W32((stt->maxLevel - stt->minLevel) * (int32_t)10, 8); stt->minOutput = (stt->minLevel + tmp32); stt->msTooLow = 0; @@ -1639,12 +1639,12 @@ int WebRtcAgc_Init(void *agcInst, WebRtc_Word32 minLevel, WebRtc_Word32 maxLevel for (i = 0; i < RXX_BUFFER_LEN; i++) { - stt->Rxx16_vectorw32[i] = (WebRtc_Word32)1000; /* -54dBm0 */ + stt->Rxx16_vectorw32[i] = (int32_t)1000; /* -54dBm0 */ } stt->Rxx160w32 = 125 * RXX_BUFFER_LEN; /* (stt->Rxx16_vectorw32[0]>>3) = 125 */ stt->Rxx16pos = 0; - stt->Rxx16_LPw32 = (WebRtc_Word32)16284; /* Q(-4) */ + stt->Rxx16_LPw32 = (int32_t)16284; /* Q(-4) */ for (i = 0; i < 5; i++) { diff --git a/webrtc/modules/audio_processing/agc/analog_agc.h b/webrtc/modules/audio_processing/agc/analog_agc.h index b32ac6581..ce005fca8 100644 --- a/webrtc/modules/audio_processing/agc/analog_agc.h +++ b/webrtc/modules/audio_processing/agc/analog_agc.h @@ -35,87 +35,87 @@ */ #define RXX_BUFFER_LEN 10 -static const WebRtc_Word16 kMsecSpeechInner = 520; -static const WebRtc_Word16 kMsecSpeechOuter = 340; +static const int16_t kMsecSpeechInner = 520; +static const int16_t kMsecSpeechOuter = 340; -static const WebRtc_Word16 kNormalVadThreshold = 400; +static const int16_t kNormalVadThreshold = 400; -static const WebRtc_Word16 kAlphaShortTerm = 6; // 1 >> 6 = 0.0156 -static const WebRtc_Word16 kAlphaLongTerm = 10; // 1 >> 10 = 0.000977 +static const int16_t kAlphaShortTerm = 6; // 1 >> 6 = 0.0156 +static const int16_t kAlphaLongTerm = 10; // 1 >> 10 = 0.000977 typedef struct { // Configurable parameters/variables - WebRtc_UWord32 fs; // Sampling frequency - WebRtc_Word16 compressionGaindB; // Fixed gain level in dB - WebRtc_Word16 targetLevelDbfs; // Target level in -dBfs of envelope (default -3) - WebRtc_Word16 agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig) - WebRtc_UWord8 limiterEnable; // Enabling limiter (on/off (default off)) + uint32_t fs; // Sampling frequency + int16_t compressionGaindB; // Fixed gain level in dB + int16_t targetLevelDbfs; // Target level in -dBfs of envelope (default -3) + int16_t agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig) + uint8_t limiterEnable; // Enabling limiter (on/off (default off)) WebRtcAgc_config_t defaultConfig; WebRtcAgc_config_t usedConfig; // General variables - WebRtc_Word16 initFlag; - WebRtc_Word16 lastError; + int16_t initFlag; + int16_t lastError; // Target level parameters // Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7) - WebRtc_Word32 analogTargetLevel; // = RXX_BUFFER_LEN * 846805; -22 dBfs - WebRtc_Word32 startUpperLimit; // = RXX_BUFFER_LEN * 1066064; -21 dBfs - WebRtc_Word32 startLowerLimit; // = RXX_BUFFER_LEN * 672641; -23 dBfs - WebRtc_Word32 upperPrimaryLimit; // = RXX_BUFFER_LEN * 1342095; -20 dBfs - WebRtc_Word32 lowerPrimaryLimit; // = RXX_BUFFER_LEN * 534298; -24 dBfs - WebRtc_Word32 upperSecondaryLimit;// = RXX_BUFFER_LEN * 2677832; -17 dBfs - WebRtc_Word32 lowerSecondaryLimit;// = RXX_BUFFER_LEN * 267783; -27 dBfs - WebRtc_UWord16 targetIdx; // Table index for corresponding target level + int32_t analogTargetLevel; // = RXX_BUFFER_LEN * 846805; -22 dBfs + int32_t startUpperLimit; // = RXX_BUFFER_LEN * 1066064; -21 dBfs + int32_t startLowerLimit; // = RXX_BUFFER_LEN * 672641; -23 dBfs + int32_t upperPrimaryLimit; // = RXX_BUFFER_LEN * 1342095; -20 dBfs + int32_t lowerPrimaryLimit; // = RXX_BUFFER_LEN * 534298; -24 dBfs + int32_t upperSecondaryLimit;// = RXX_BUFFER_LEN * 2677832; -17 dBfs + int32_t lowerSecondaryLimit;// = RXX_BUFFER_LEN * 267783; -27 dBfs + uint16_t targetIdx; // Table index for corresponding target level #ifdef MIC_LEVEL_FEEDBACK - WebRtc_UWord16 targetIdxOffset; // Table index offset for level compensation + uint16_t targetIdxOffset; // Table index offset for level compensation #endif - WebRtc_Word16 analogTarget; // Digital reference level in ENV scale + int16_t analogTarget; // Digital reference level in ENV scale // Analog AGC specific variables - WebRtc_Word32 filterState[8]; // For downsampling wb to nb - WebRtc_Word32 upperLimit; // Upper limit for mic energy - WebRtc_Word32 lowerLimit; // Lower limit for mic energy - WebRtc_Word32 Rxx160w32; // Average energy for one frame - WebRtc_Word32 Rxx16_LPw32; // Low pass filtered subframe energies - WebRtc_Word32 Rxx160_LPw32; // Low pass filtered frame energies - WebRtc_Word32 Rxx16_LPw32Max; // Keeps track of largest energy subframe - WebRtc_Word32 Rxx16_vectorw32[RXX_BUFFER_LEN];// Array with subframe energies - WebRtc_Word32 Rxx16w32_array[2][5];// Energy values of microphone signal - WebRtc_Word32 env[2][10]; // Envelope values of subframes + int32_t filterState[8]; // For downsampling wb to nb + int32_t upperLimit; // Upper limit for mic energy + int32_t lowerLimit; // Lower limit for mic energy + int32_t Rxx160w32; // Average energy for one frame + int32_t Rxx16_LPw32; // Low pass filtered subframe energies + int32_t Rxx160_LPw32; // Low pass filtered frame energies + int32_t Rxx16_LPw32Max; // Keeps track of largest energy subframe + int32_t Rxx16_vectorw32[RXX_BUFFER_LEN];// Array with subframe energies + int32_t Rxx16w32_array[2][5];// Energy values of microphone signal + int32_t env[2][10]; // Envelope values of subframes - WebRtc_Word16 Rxx16pos; // Current position in the Rxx16_vectorw32 - WebRtc_Word16 envSum; // Filtered scaled envelope in subframes - WebRtc_Word16 vadThreshold; // Threshold for VAD decision - WebRtc_Word16 inActive; // Inactive time in milliseconds - WebRtc_Word16 msTooLow; // Milliseconds of speech at a too low level - WebRtc_Word16 msTooHigh; // Milliseconds of speech at a too high level - WebRtc_Word16 changeToSlowMode; // Change to slow mode after some time at target - WebRtc_Word16 firstCall; // First call to the process-function - WebRtc_Word16 msZero; // Milliseconds of zero input - WebRtc_Word16 msecSpeechOuterChange;// Min ms of speech between volume changes - WebRtc_Word16 msecSpeechInnerChange;// Min ms of speech between volume changes - WebRtc_Word16 activeSpeech; // Milliseconds of active speech - WebRtc_Word16 muteGuardMs; // Counter to prevent mute action - WebRtc_Word16 inQueue; // 10 ms batch indicator + int16_t Rxx16pos; // Current position in the Rxx16_vectorw32 + int16_t envSum; // Filtered scaled envelope in subframes + int16_t vadThreshold; // Threshold for VAD decision + int16_t inActive; // Inactive time in milliseconds + int16_t msTooLow; // Milliseconds of speech at a too low level + int16_t msTooHigh; // Milliseconds of speech at a too high level + int16_t changeToSlowMode; // Change to slow mode after some time at target + int16_t firstCall; // First call to the process-function + int16_t msZero; // Milliseconds of zero input + int16_t msecSpeechOuterChange;// Min ms of speech between volume changes + int16_t msecSpeechInnerChange;// Min ms of speech between volume changes + int16_t activeSpeech; // Milliseconds of active speech + int16_t muteGuardMs; // Counter to prevent mute action + int16_t inQueue; // 10 ms batch indicator // Microphone level variables - WebRtc_Word32 micRef; // Remember ref. mic level for virtual mic - WebRtc_UWord16 gainTableIdx; // Current position in virtual gain table - WebRtc_Word32 micGainIdx; // Gain index of mic level to increase slowly - WebRtc_Word32 micVol; // Remember volume between frames - WebRtc_Word32 maxLevel; // Max possible vol level, incl dig gain - WebRtc_Word32 maxAnalog; // Maximum possible analog volume level - WebRtc_Word32 maxInit; // Initial value of "max" - WebRtc_Word32 minLevel; // Minimum possible volume level - WebRtc_Word32 minOutput; // Minimum output volume level - WebRtc_Word32 zeroCtrlMax; // Remember max gain => don't amp low input + int32_t micRef; // Remember ref. mic level for virtual mic + uint16_t gainTableIdx; // Current position in virtual gain table + int32_t micGainIdx; // Gain index of mic level to increase slowly + int32_t micVol; // Remember volume between frames + int32_t maxLevel; // Max possible vol level, incl dig gain + int32_t maxAnalog; // Maximum possible analog volume level + int32_t maxInit; // Initial value of "max" + int32_t minLevel; // Minimum possible volume level + int32_t minOutput; // Minimum output volume level + int32_t zeroCtrlMax; // Remember max gain => don't amp low input - WebRtc_Word16 scale; // Scale factor for internal volume levels + int16_t scale; // Scale factor for internal volume levels #ifdef MIC_LEVEL_FEEDBACK - WebRtc_Word16 numBlocksMicLvlSat; - WebRtc_UWord8 micLvlSat; + int16_t numBlocksMicLvlSat; + uint8_t micLvlSat; #endif // Structs for VAD and digital_agc AgcVad_t vadMic; @@ -124,10 +124,10 @@ typedef struct #ifdef AGC_DEBUG FILE* fpt; FILE* agcLog; - WebRtc_Word32 fcount; + int32_t fcount; #endif - WebRtc_Word16 lowLevelSignal; + int16_t lowLevelSignal; } Agc_t; #endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_ diff --git a/webrtc/modules/audio_processing/agc/digital_agc.c b/webrtc/modules/audio_processing/agc/digital_agc.c index 3b4b39b9c..604635129 100644 --- a/webrtc/modules/audio_processing/agc/digital_agc.c +++ b/webrtc/modules/audio_processing/agc/digital_agc.c @@ -37,7 +37,7 @@ // Generator table for y=log2(1+e^x) in Q8. enum { kGenFuncTableSize = 128 }; -static const WebRtc_UWord16 kGenFuncTable[kGenFuncTableSize] = { +static const uint16_t kGenFuncTable[kGenFuncTableSize] = { 256, 485, 786, 1126, 1484, 1849, 2217, 2586, 2955, 3324, 3693, 4063, 4432, 4801, 5171, 5540, 5909, 6279, 6648, 7017, 7387, 7756, 8125, 8495, @@ -56,29 +56,29 @@ static const WebRtc_UWord16 kGenFuncTable[kGenFuncTableSize] = { 44320, 44689, 45058, 45428, 45797, 46166, 46536, 46905 }; -static const WebRtc_Word16 kAvgDecayTime = 250; // frames; < 3000 +static const int16_t kAvgDecayTime = 250; // frames; < 3000 -WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 - WebRtc_Word16 digCompGaindB, // Q0 - WebRtc_Word16 targetLevelDbfs,// Q0 - WebRtc_UWord8 limiterEnable, - WebRtc_Word16 analogTarget) // Q0 +int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16 + int16_t digCompGaindB, // Q0 + int16_t targetLevelDbfs,// Q0 + uint8_t limiterEnable, + int16_t analogTarget) // Q0 { // This function generates the compressor gain table used in the fixed digital part. - WebRtc_UWord32 tmpU32no1, tmpU32no2, absInLevel, logApprox; - WebRtc_Word32 inLevel, limiterLvl; - WebRtc_Word32 tmp32, tmp32no1, tmp32no2, numFIX, den, y32; - const WebRtc_UWord16 kLog10 = 54426; // log2(10) in Q14 - const WebRtc_UWord16 kLog10_2 = 49321; // 10*log10(2) in Q14 - const WebRtc_UWord16 kLogE_1 = 23637; // log2(e) in Q14 - WebRtc_UWord16 constMaxGain; - WebRtc_UWord16 tmpU16, intPart, fracPart; - const WebRtc_Word16 kCompRatio = 3; - const WebRtc_Word16 kSoftLimiterLeft = 1; - WebRtc_Word16 limiterOffset = 0; // Limiter offset - WebRtc_Word16 limiterIdx, limiterLvlX; - WebRtc_Word16 constLinApprox, zeroGainLvl, maxGain, diffGain; - WebRtc_Word16 i, tmp16, tmp16no1; + uint32_t tmpU32no1, tmpU32no2, absInLevel, logApprox; + int32_t inLevel, limiterLvl; + int32_t tmp32, tmp32no1, tmp32no2, numFIX, den, y32; + const uint16_t kLog10 = 54426; // log2(10) in Q14 + const uint16_t kLog10_2 = 49321; // 10*log10(2) in Q14 + const uint16_t kLogE_1 = 23637; // log2(e) in Q14 + uint16_t constMaxGain; + uint16_t tmpU16, intPart, fracPart; + const int16_t kCompRatio = 3; + const int16_t kSoftLimiterLeft = 1; + int16_t limiterOffset = 0; // Limiter offset + int16_t limiterIdx, limiterLvlX; + int16_t constLinApprox, zeroGainLvl, maxGain, diffGain; + int16_t i, tmp16, tmp16no1; int zeros, zerosScale; // Constants @@ -117,7 +117,7 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 // limiterLvl = targetLevelDbfs + limiterOffset/compRatio limiterLvlX = analogTarget - limiterOffset; limiterIdx = 2 - + WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)limiterLvlX, 13), + + WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_LSHIFT_W32((int32_t)limiterLvlX, 13), WEBRTC_SPL_RSHIFT_U16(kLog10_2, 1)); tmp16no1 = WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio); limiterLvl = targetLevelDbfs + tmp16no1; @@ -139,22 +139,22 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 { // Calculate scaled input level (compressor): // inLevel = fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio) - tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(kCompRatio - 1, i - 1); // Q0 + tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16(kCompRatio - 1, i - 1); // Q0 tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1; // Q14 inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio); // Q14 // Calculate diffGain-inLevel, to map using the genFuncTable - inLevel = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)diffGain, 14) - inLevel; // Q14 + inLevel = WEBRTC_SPL_LSHIFT_W32((int32_t)diffGain, 14) - inLevel; // Q14 // Make calculations on abs(inLevel) and compensate for the sign afterwards. - absInLevel = (WebRtc_UWord32)WEBRTC_SPL_ABS_W32(inLevel); // Q14 + absInLevel = (uint32_t)WEBRTC_SPL_ABS_W32(inLevel); // Q14 // LUT with interpolation - intPart = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_U32(absInLevel, 14); - fracPart = (WebRtc_UWord16)(absInLevel & 0x00003FFF); // extract the fractional part + intPart = (uint16_t)WEBRTC_SPL_RSHIFT_U32(absInLevel, 14); + fracPart = (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8 tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22 - tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)kGenFuncTable[intPart], 14); // Q22 + tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((uint32_t)kGenFuncTable[intPart], 14); // Q22 logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14 // Compensate for negative exponent using the relation: // log2(1 + 2^-x) = log2(1 + 2^x) - x @@ -187,7 +187,7 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 } } numFIX = WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_U16(maxGain, constMaxGain), 6); // Q14 - numFIX -= WEBRTC_SPL_MUL_32_16((WebRtc_Word32)logApprox, diffGain); // Q14 + numFIX -= WEBRTC_SPL_MUL_32_16((int32_t)logApprox, diffGain); // Q14 // Calculate ratio // Shift |numFIX| as much as possible. @@ -231,8 +231,8 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 // Calculate power if (tmp32 > 0) { - intPart = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 14); - fracPart = (WebRtc_UWord16)(tmp32 & 0x00003FFF); // in Q14 + intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 14); + fracPart = (uint16_t)(tmp32 & 0x00003FFF); // in Q14 if (WEBRTC_SPL_RSHIFT_W32(fracPart, 13)) { tmp16 = WEBRTC_SPL_LSHIFT_W16(2, 14) - constLinApprox; @@ -246,7 +246,7 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 tmp32no2 = WEBRTC_SPL_MUL_32_16(fracPart, tmp16); tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 13); } - fracPart = (WebRtc_UWord16)tmp32no2; + fracPart = (uint16_t)tmp32no2; gainTable[i] = WEBRTC_SPL_LSHIFT_W32(1, intPart) + WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14); } else @@ -258,7 +258,7 @@ WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 return 0; } -WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *stt, WebRtc_Word16 agcMode) +int32_t WebRtcAgc_InitDigital(DigitalAgc_t *stt, int16_t agcMode) { if (agcMode == kAgcModeFixedDigital) @@ -268,7 +268,7 @@ WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *stt, WebRtc_Word16 agcMode) } else { // start out with 0 dB gain - stt->capacitorSlow = 134217728; // (WebRtc_Word32)(0.125f * 32768.0f * 32768.0f); + stt->capacitorSlow = 134217728; // (int32_t)(0.125f * 32768.0f * 32768.0f); } stt->capacitorFast = 0; stt->gain = 65536; @@ -285,8 +285,8 @@ WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *stt, WebRtc_Word16 agcMode) return 0; } -WebRtc_Word32 WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const WebRtc_Word16 *in_far, - WebRtc_Word16 nrSamples) +int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const int16_t *in_far, + int16_t nrSamples) { // Check for valid pointer if (&stt->vadFarend == NULL) @@ -300,26 +300,26 @@ WebRtc_Word32 WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const WebRtc_Word1 return 0; } -WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *in_near, - const WebRtc_Word16 *in_near_H, WebRtc_Word16 *out, - WebRtc_Word16 *out_H, WebRtc_UWord32 FS, - WebRtc_Word16 lowlevelSignal) +int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near, + const int16_t *in_near_H, int16_t *out, + int16_t *out_H, uint32_t FS, + int16_t lowlevelSignal) { // array for gains (one value per ms, incl start & end) - WebRtc_Word32 gains[11]; + int32_t gains[11]; - WebRtc_Word32 out_tmp, tmp32; - WebRtc_Word32 env[10]; - WebRtc_Word32 nrg, max_nrg; - WebRtc_Word32 cur_level; - WebRtc_Word32 gain32, delta; - WebRtc_Word16 logratio; - WebRtc_Word16 lower_thr, upper_thr; - WebRtc_Word16 zeros, zeros_fast, frac; - WebRtc_Word16 decay; - WebRtc_Word16 gate, gain_adj; - WebRtc_Word16 k, n; - WebRtc_Word16 L, L2; // samples/subframe + int32_t out_tmp, tmp32; + int32_t env[10]; + int32_t nrg, max_nrg; + int32_t cur_level; + int32_t gain32, delta; + int16_t logratio; + int16_t lower_thr, upper_thr; + int16_t zeros, zeros_fast, frac; + int16_t decay; + int16_t gate, gain_adj; + int16_t k, n; + int16_t L, L2; // samples/subframe // determine number of samples per ms if (FS == 8000) @@ -343,13 +343,13 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i if (in_near != out) { // Only needed if they don't already point to the same place. - memcpy(out, in_near, 10 * L * sizeof(WebRtc_Word16)); + memcpy(out, in_near, 10 * L * sizeof(int16_t)); } if (FS == 32000) { if (in_near_H != out_H) { - memcpy(out_H, in_near_H, 10 * L * sizeof(WebRtc_Word16)); + memcpy(out_H, in_near_H, 10 * L * sizeof(int16_t)); } } // VAD for near end @@ -359,7 +359,7 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i if (stt->vadFarend.counter > 10) { tmp32 = WEBRTC_SPL_MUL_16_16(3, logratio); - logratio = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 - stt->vadFarend.logRatio, 2); + logratio = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 - stt->vadFarend.logRatio, 2); } // Determine decay factor depending on VAD @@ -376,11 +376,11 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i decay = 0; } else { - // decay = (WebRtc_Word16)(((lower_thr - logratio) + // decay = (int16_t)(((lower_thr - logratio) // * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10); // SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr)) -> 65 tmp32 = WEBRTC_SPL_MUL_16_16((lower_thr - logratio), 65); - decay = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 10); + decay = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 10); } // adjust decay factor for long silence (detected as low standard deviation) @@ -392,9 +392,9 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i decay = 0; } else if (stt->vadNearend.stdLongTerm < 8096) { - // decay = (WebRtc_Word16)(((stt->vadNearend.stdLongTerm - 4000) * decay) >> 12); + // decay = (int16_t)(((stt->vadNearend.stdLongTerm - 4000) * decay) >> 12); tmp32 = WEBRTC_SPL_MUL_16_16((stt->vadNearend.stdLongTerm - 4000), decay); - decay = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 12); + decay = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 12); } if (lowlevelSignal != 0) @@ -457,13 +457,13 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i } // Translate signal level into gain, using a piecewise linear approximation // find number of leading zeros - zeros = WebRtcSpl_NormU32((WebRtc_UWord32)cur_level); + zeros = WebRtcSpl_NormU32((uint32_t)cur_level); if (cur_level == 0) { zeros = 31; } tmp32 = (WEBRTC_SPL_LSHIFT_W32(cur_level, zeros) & 0x7FFFFFFF); - frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12 + frac = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12 tmp32 = WEBRTC_SPL_MUL((stt->gainTable[zeros-1] - stt->gainTable[zeros]), frac); gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12); #ifdef AGC_DEBUG @@ -477,14 +477,14 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i // Gate processing (lower gain during absence of speech) zeros = WEBRTC_SPL_LSHIFT_W16(zeros, 9) - WEBRTC_SPL_RSHIFT_W16(frac, 3); // find number of leading zeros - zeros_fast = WebRtcSpl_NormU32((WebRtc_UWord32)stt->capacitorFast); + zeros_fast = WebRtcSpl_NormU32((uint32_t)stt->capacitorFast); if (stt->capacitorFast == 0) { zeros_fast = 31; } tmp32 = (WEBRTC_SPL_LSHIFT_W32(stt->capacitorFast, zeros_fast) & 0x7FFFFFFF); zeros_fast = WEBRTC_SPL_LSHIFT_W16(zeros_fast, 9); - zeros_fast -= (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 22); + zeros_fast -= (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 22); gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm; @@ -494,7 +494,7 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i } else { tmp32 = WEBRTC_SPL_MUL_16_16(stt->gatePrevious, 7); - gate = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((WebRtc_Word32)gate + tmp32, 3); + gate = (int16_t)WEBRTC_SPL_RSHIFT_W32((int32_t)gate + tmp32, 3); stt->gatePrevious = gate; } // gate < 0 -> no gate @@ -537,7 +537,7 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i gain32 = WEBRTC_SPL_MUL(gain32, gain32); // check for overflow while (AGC_MUL32(WEBRTC_SPL_RSHIFT_W32(env[k], 12) + 1, gain32) - > WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)32767, 2 * (1 - zeros + 10))) + > WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10))) { // multiply by 253/256 ==> -0.1 dB if (gains[k + 1] > 8388607) @@ -571,36 +571,36 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i for (n = 0; n < L; n++) { // For lower band - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7)); + tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7)); out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); if (out_tmp > 4095) { - out[n] = (WebRtc_Word16)32767; + out[n] = (int16_t)32767; } else if (out_tmp < -4096) { - out[n] = (WebRtc_Word16)-32768; + out[n] = (int16_t)-32768; } else { - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4)); - out[n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); + tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4)); + out[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); } // For higher band if (FS == 32000) { - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[n], + tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7)); out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); if (out_tmp > 4095) { - out_H[n] = (WebRtc_Word16)32767; + out_H[n] = (int16_t)32767; } else if (out_tmp < -4096) { - out_H[n] = (WebRtc_Word16)-32768; + out_H[n] = (int16_t)-32768; } else { - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[n], + tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4)); - out_H[n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); + out_H[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); } } // @@ -616,15 +616,15 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i for (n = 0; n < L; n++) { // For lower band - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out[k * L + n], + tmp32 = WEBRTC_SPL_MUL((int32_t)out[k * L + n], WEBRTC_SPL_RSHIFT_W32(gain32, 4)); - out[k * L + n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); + out[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); // For higher band if (FS == 32000) { - tmp32 = WEBRTC_SPL_MUL((WebRtc_Word32)out_H[k * L + n], + tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[k * L + n], WEBRTC_SPL_RSHIFT_W32(gain32, 4)); - out_H[k * L + n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); + out_H[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16); } gain32 += delta; } @@ -635,7 +635,7 @@ WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const WebRtc_Word16 *i void WebRtcAgc_InitVad(AgcVad_t *state) { - WebRtc_Word16 k; + int16_t k; state->HPstate = 0; // state of high pass filter state->logRatio = 0; // log( P(active) / P(inactive) ) @@ -661,17 +661,17 @@ void WebRtcAgc_InitVad(AgcVad_t *state) } } -WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state - const WebRtc_Word16 *in, // (i) Speech signal - WebRtc_Word16 nrSamples) // (i) number of samples +int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state + const int16_t *in, // (i) Speech signal + int16_t nrSamples) // (i) number of samples { - WebRtc_Word32 out, nrg, tmp32, tmp32b; - WebRtc_UWord16 tmpU16; - WebRtc_Word16 k, subfr, tmp16; - WebRtc_Word16 buf1[8]; - WebRtc_Word16 buf2[4]; - WebRtc_Word16 HPstate; - WebRtc_Word16 zeros, dB; + int32_t out, nrg, tmp32, tmp32b; + uint16_t tmpU16; + int16_t k, subfr, tmp16; + int16_t buf1[8]; + int16_t buf2[4]; + int16_t HPstate; + int16_t zeros, dB; // process in 10 sub frames of 1 ms (to save on memory) nrg = 0; @@ -683,9 +683,9 @@ WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state { for (k = 0; k < 8; k++) { - tmp32 = (WebRtc_Word32)in[2 * k] + (WebRtc_Word32)in[2 * k + 1]; + tmp32 = (int32_t)in[2 * k] + (int32_t)in[2 * k + 1]; tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 1); - buf1[k] = (WebRtc_Word16)tmp32; + buf1[k] = (int16_t)tmp32; } in += 16; @@ -701,7 +701,7 @@ WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state { out = buf2[k] + HPstate; tmp32 = WEBRTC_SPL_MUL(600, out); - HPstate = (WebRtc_Word16)(WEBRTC_SPL_RSHIFT_W32(tmp32, 10) - buf2[k]); + HPstate = (int16_t)(WEBRTC_SPL_RSHIFT_W32(tmp32, 10) - buf2[k]); tmp32 = WEBRTC_SPL_MUL(out, out); nrg += WEBRTC_SPL_RSHIFT_W32(tmp32, 6); } @@ -745,8 +745,8 @@ WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state } // update short-term estimate of mean energy level (Q10) - tmp32 = (WEBRTC_SPL_MUL_16_16(state->meanShortTerm, 15) + (WebRtc_Word32)dB); - state->meanShortTerm = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 4); + tmp32 = (WEBRTC_SPL_MUL_16_16(state->meanShortTerm, 15) + (int32_t)dB); + state->meanShortTerm = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 4); // update short-term estimate of variance in energy level (Q8) tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12); @@ -756,10 +756,10 @@ WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state // update short-term estimate of standard deviation in energy level (Q10) tmp32 = WEBRTC_SPL_MUL_16_16(state->meanShortTerm, state->meanShortTerm); tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceShortTerm, 12) - tmp32; - state->stdShortTerm = (WebRtc_Word16)WebRtcSpl_Sqrt(tmp32); + state->stdShortTerm = (int16_t)WebRtcSpl_Sqrt(tmp32); // update long-term estimate of mean energy level (Q10) - tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->counter) + (WebRtc_Word32)dB; + tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->counter) + (int32_t)dB; state->meanLongTerm = WebRtcSpl_DivW32W16ResW16(tmp32, WEBRTC_SPL_ADD_SAT_W16(state->counter, 1)); @@ -772,17 +772,17 @@ WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state // update long-term estimate of standard deviation in energy level (Q10) tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->meanLongTerm); tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceLongTerm, 12) - tmp32; - state->stdLongTerm = (WebRtc_Word16)WebRtcSpl_Sqrt(tmp32); + state->stdLongTerm = (int16_t)WebRtcSpl_Sqrt(tmp32); // update voice activity measure (Q10) tmp16 = WEBRTC_SPL_LSHIFT_W16(3, 12); tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, (dB - state->meanLongTerm)); tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm); - tmpU16 = WEBRTC_SPL_LSHIFT_U16((WebRtc_UWord16)13, 12); + tmpU16 = WEBRTC_SPL_LSHIFT_U16((uint16_t)13, 12); tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16); tmp32 += WEBRTC_SPL_RSHIFT_W32(tmp32b, 10); - state->logRatio = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 6); + state->logRatio = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 6); // limit if (state->logRatio > 2048) diff --git a/webrtc/modules/audio_processing/agc/digital_agc.h b/webrtc/modules/audio_processing/agc/digital_agc.h index 240b22066..573f3eca0 100644 --- a/webrtc/modules/audio_processing/agc/digital_agc.h +++ b/webrtc/modules/audio_processing/agc/digital_agc.h @@ -24,26 +24,26 @@ typedef struct { - WebRtc_Word32 downState[8]; - WebRtc_Word16 HPstate; - WebRtc_Word16 counter; - WebRtc_Word16 logRatio; // log( P(active) / P(inactive) ) (Q10) - WebRtc_Word16 meanLongTerm; // Q10 - WebRtc_Word32 varianceLongTerm; // Q8 - WebRtc_Word16 stdLongTerm; // Q10 - WebRtc_Word16 meanShortTerm; // Q10 - WebRtc_Word32 varianceShortTerm; // Q8 - WebRtc_Word16 stdShortTerm; // Q10 + int32_t downState[8]; + int16_t HPstate; + int16_t counter; + int16_t logRatio; // log( P(active) / P(inactive) ) (Q10) + int16_t meanLongTerm; // Q10 + int32_t varianceLongTerm; // Q8 + int16_t stdLongTerm; // Q10 + int16_t meanShortTerm; // Q10 + int32_t varianceShortTerm; // Q8 + int16_t stdShortTerm; // Q10 } AgcVad_t; // total = 54 bytes typedef struct { - WebRtc_Word32 capacitorSlow; - WebRtc_Word32 capacitorFast; - WebRtc_Word32 gain; - WebRtc_Word32 gainTable[32]; - WebRtc_Word16 gatePrevious; - WebRtc_Word16 agcMode; + int32_t capacitorSlow; + int32_t capacitorFast; + int32_t gain; + int32_t gainTable[32]; + int16_t gatePrevious; + int16_t agcMode; AgcVad_t vadNearend; AgcVad_t vadFarend; #ifdef AGC_DEBUG @@ -52,25 +52,27 @@ typedef struct #endif } DigitalAgc_t; -WebRtc_Word32 WebRtcAgc_InitDigital(DigitalAgc_t *digitalAgcInst, WebRtc_Word16 agcMode); +int32_t WebRtcAgc_InitDigital(DigitalAgc_t *digitalAgcInst, int16_t agcMode); -WebRtc_Word32 WebRtcAgc_ProcessDigital(DigitalAgc_t *digitalAgcInst, const WebRtc_Word16 *inNear, - const WebRtc_Word16 *inNear_H, WebRtc_Word16 *out, - WebRtc_Word16 *out_H, WebRtc_UWord32 FS, - WebRtc_Word16 lowLevelSignal); +int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *digitalAgcInst, + const int16_t *inNear, const int16_t *inNear_H, + int16_t *out, int16_t *out_H, uint32_t FS, + int16_t lowLevelSignal); -WebRtc_Word32 WebRtcAgc_AddFarendToDigital(DigitalAgc_t *digitalAgcInst, const WebRtc_Word16 *inFar, - WebRtc_Word16 nrSamples); +int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *digitalAgcInst, + const int16_t *inFar, + int16_t nrSamples); void WebRtcAgc_InitVad(AgcVad_t *vadInst); -WebRtc_Word16 WebRtcAgc_ProcessVad(AgcVad_t *vadInst, // (i) VAD state - const WebRtc_Word16 *in, // (i) Speech signal - WebRtc_Word16 nrSamples); // (i) number of samples +int16_t WebRtcAgc_ProcessVad(AgcVad_t *vadInst, // (i) VAD state + const int16_t *in, // (i) Speech signal + int16_t nrSamples); // (i) number of samples -WebRtc_Word32 WebRtcAgc_CalculateGainTable(WebRtc_Word32 *gainTable, // Q16 - WebRtc_Word16 compressionGaindB, // Q0 (in dB) - WebRtc_Word16 targetLevelDbfs,// Q0 (in dB) - WebRtc_UWord8 limiterEnable, WebRtc_Word16 analogTarget); +int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16 + int16_t compressionGaindB, // Q0 (in dB) + int16_t targetLevelDbfs,// Q0 (in dB) + uint8_t limiterEnable, + int16_t analogTarget); #endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_ diff --git a/webrtc/modules/audio_processing/agc/include/gain_control.h b/webrtc/modules/audio_processing/agc/include/gain_control.h index 8af5c718e..1ed06c0a9 100644 --- a/webrtc/modules/audio_processing/agc/include/gain_control.h +++ b/webrtc/modules/audio_processing/agc/include/gain_control.h @@ -39,9 +39,9 @@ enum typedef struct { - WebRtc_Word16 targetLevelDbfs; // default 3 (-3 dBOv) - WebRtc_Word16 compressionGaindB; // default 9 dB - WebRtc_UWord8 limiterEnable; // default kAgcTrue (on) + int16_t targetLevelDbfs; // default 3 (-3 dBOv) + int16_t compressionGaindB; // default 9 dB + uint8_t limiterEnable; // default kAgcTrue (on) } WebRtcAgc_config_t; #if defined(__cplusplus) @@ -65,8 +65,8 @@ extern "C" * : -1 - Error */ int WebRtcAgc_AddFarend(void* agcInst, - const WebRtc_Word16* inFar, - WebRtc_Word16 samples); + const int16_t* inFar, + int16_t samples); /* * This function processes a 10/20ms frame of microphone speech to determine @@ -92,9 +92,9 @@ int WebRtcAgc_AddFarend(void* agcInst, * : -1 - Error */ int WebRtcAgc_AddMic(void* agcInst, - WebRtc_Word16* inMic, - WebRtc_Word16* inMic_H, - WebRtc_Word16 samples); + int16_t* inMic, + int16_t* inMic_H, + int16_t samples); /* * This function replaces the analog microphone with a virtual one. @@ -123,11 +123,11 @@ int WebRtcAgc_AddMic(void* agcInst, * : -1 - Error */ int WebRtcAgc_VirtualMic(void* agcInst, - WebRtc_Word16* inMic, - WebRtc_Word16* inMic_H, - WebRtc_Word16 samples, - WebRtc_Word32 micLevelIn, - WebRtc_Word32* micLevelOut); + int16_t* inMic, + int16_t* inMic_H, + int16_t samples, + int32_t micLevelIn, + int32_t* micLevelOut); /* * This function processes a 10/20ms frame and adjusts (normalizes) the gain @@ -168,15 +168,15 @@ int WebRtcAgc_VirtualMic(void* agcInst, * : -1 - Error */ int WebRtcAgc_Process(void* agcInst, - const WebRtc_Word16* inNear, - const WebRtc_Word16* inNear_H, - WebRtc_Word16 samples, - WebRtc_Word16* out, - WebRtc_Word16* out_H, - WebRtc_Word32 inMicLevel, - WebRtc_Word32* outMicLevel, - WebRtc_Word16 echo, - WebRtc_UWord8* saturationWarning); + const int16_t* inNear, + const int16_t* inNear_H, + int16_t samples, + int16_t* out, + int16_t* out_H, + int32_t inMicLevel, + int32_t* outMicLevel, + int16_t echo, + uint8_t* saturationWarning); /* * This function sets the config parameters (targetLevelDbfs, @@ -247,10 +247,10 @@ int WebRtcAgc_Free(void *agcInst); * -1 - Error */ int WebRtcAgc_Init(void *agcInst, - WebRtc_Word32 minLevel, - WebRtc_Word32 maxLevel, - WebRtc_Word16 agcMode, - WebRtc_UWord32 fs); + int32_t minLevel, + int32_t maxLevel, + int16_t agcMode, + uint32_t fs); #if defined(__cplusplus) } diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc index aee9f68cd..db1f2eb57 100644 --- a/webrtc/modules/audio_processing/audio_buffer.cc +++ b/webrtc/modules/audio_processing/audio_buffer.cc @@ -54,10 +54,10 @@ struct SplitAudioChannel { int16_t low_pass_data[kSamplesPer16kHzChannel]; int16_t high_pass_data[kSamplesPer16kHzChannel]; - WebRtc_Word32 analysis_filter_state1[6]; - WebRtc_Word32 analysis_filter_state2[6]; - WebRtc_Word32 synthesis_filter_state1[6]; - WebRtc_Word32 synthesis_filter_state2[6]; + int32_t analysis_filter_state1[6]; + int32_t analysis_filter_state2[6]; + int32_t synthesis_filter_state1[6]; + int32_t synthesis_filter_state2[6]; }; // TODO(andrew): check range of input parameters? @@ -142,22 +142,22 @@ int16_t* AudioBuffer::low_pass_reference(int channel) const { return low_pass_reference_channels_[channel].data; } -WebRtc_Word32* AudioBuffer::analysis_filter_state1(int channel) const { +int32_t* AudioBuffer::analysis_filter_state1(int channel) const { assert(channel >= 0 && channel < num_channels_); return split_channels_[channel].analysis_filter_state1; } -WebRtc_Word32* AudioBuffer::analysis_filter_state2(int channel) const { +int32_t* AudioBuffer::analysis_filter_state2(int channel) const { assert(channel >= 0 && channel < num_channels_); return split_channels_[channel].analysis_filter_state2; } -WebRtc_Word32* AudioBuffer::synthesis_filter_state1(int channel) const { +int32_t* AudioBuffer::synthesis_filter_state1(int channel) const { assert(channel >= 0 && channel < num_channels_); return split_channels_[channel].synthesis_filter_state1; } -WebRtc_Word32* AudioBuffer::synthesis_filter_state2(int channel) const { +int32_t* AudioBuffer::synthesis_filter_state2(int channel) const { assert(channel >= 0 && channel < num_channels_); return split_channels_[channel].synthesis_filter_state2; } diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc index 0c7489341..922490b7c 100644 --- a/webrtc/modules/audio_processing/audio_processing_impl.cc +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc @@ -575,7 +575,7 @@ VoiceDetection* AudioProcessingImpl::voice_detection() const { return voice_detection_; } -WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) { +int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) { CriticalSectionScoped crit_scoped(crit_); id_ = id; diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h index 81e5ccb33..1a3c6ea64 100644 --- a/webrtc/modules/audio_processing/audio_processing_impl.h +++ b/webrtc/modules/audio_processing/audio_processing_impl.h @@ -82,7 +82,7 @@ class AudioProcessingImpl : public AudioProcessing { virtual VoiceDetection* voice_detection() const; // Module methods. - virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id); + virtual int32_t ChangeUniqueId(const int32_t id); private: bool is_data_processed() const; diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc index 09d210c9c..a32fb3364 100644 --- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc +++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc @@ -24,7 +24,7 @@ namespace webrtc { typedef void Handle; namespace { -WebRtc_Word16 MapSetting(EchoCancellation::SuppressionLevel level) { +int16_t MapSetting(EchoCancellation::SuppressionLevel level) { switch (level) { case EchoCancellation::kLowSuppression: return kAecNlpConservative; @@ -86,7 +86,7 @@ int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) { err = WebRtcAec_BufferFarend( my_handle, audio->low_pass_split_data(j), - static_cast(audio->samples_per_split_channel())); + static_cast(audio->samples_per_split_channel())); if (err != apm_->kNoError) { return GetHandleError(my_handle); // TODO(ajm): warning possible? @@ -129,7 +129,7 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) { audio->high_pass_split_data(i), audio->low_pass_split_data(i), audio->high_pass_split_data(i), - static_cast(audio->samples_per_split_channel()), + static_cast(audio->samples_per_split_channel()), apm_->stream_delay_ms(), stream_drift_samples_); diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc index 5b88aa5fd..04adefe35 100644 --- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc +++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc @@ -24,7 +24,7 @@ namespace webrtc { typedef void Handle; namespace { -WebRtc_Word16 MapSetting(EchoControlMobile::RoutingMode mode) { +int16_t MapSetting(EchoControlMobile::RoutingMode mode) { switch (mode) { case EchoControlMobile::kQuietEarpieceOrHeadset: return 0; @@ -95,7 +95,7 @@ int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) { err = WebRtcAecm_BufferFarend( my_handle, audio->low_pass_split_data(j), - static_cast(audio->samples_per_split_channel())); + static_cast(audio->samples_per_split_channel())); if (err != apm_->kNoError) { return GetHandleError(my_handle); // TODO(ajm): warning possible? @@ -127,8 +127,8 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) { for (int i = 0; i < audio->num_channels(); i++) { // TODO(ajm): improve how this works, possibly inside AECM. // This is kind of hacked up. - WebRtc_Word16* noisy = audio->low_pass_reference(i); - WebRtc_Word16* clean = audio->low_pass_split_data(i); + int16_t* noisy = audio->low_pass_reference(i); + int16_t* clean = audio->low_pass_split_data(i); if (noisy == NULL) { noisy = clean; clean = NULL; @@ -140,7 +140,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) { noisy, clean, audio->low_pass_split_data(i), - static_cast(audio->samples_per_split_channel()), + static_cast(audio->samples_per_split_channel()), apm_->stream_delay_ms()); if (err != apm_->kNoError) { diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc index a518ab5e3..01a372ac7 100644 --- a/webrtc/modules/audio_processing/gain_control_impl.cc +++ b/webrtc/modules/audio_processing/gain_control_impl.cc @@ -23,7 +23,7 @@ namespace webrtc { typedef void Handle; namespace { -WebRtc_Word16 MapSetting(GainControl::Mode mode) { +int16_t MapSetting(GainControl::Mode mode) { switch (mode) { case GainControl::kAdaptiveAnalog: return kAgcModeAdaptiveAnalog; @@ -59,7 +59,7 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) { assert(audio->samples_per_split_channel() <= 160); - WebRtc_Word16* mixed_data = audio->low_pass_split_data(0); + int16_t* mixed_data = audio->low_pass_split_data(0); if (audio->num_channels() > 1) { audio->CopyAndMixLowPass(1); mixed_data = audio->mixed_low_pass_data(0); @@ -70,7 +70,7 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) { int err = WebRtcAgc_AddFarend( my_handle, mixed_data, - static_cast(audio->samples_per_split_channel())); + static_cast(audio->samples_per_split_channel())); if (err != apm_->kNoError) { return GetHandleError(my_handle); @@ -97,7 +97,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) { my_handle, audio->low_pass_split_data(i), audio->high_pass_split_data(i), - static_cast(audio->samples_per_split_channel())); + static_cast(audio->samples_per_split_channel())); if (err != apm_->kNoError) { return GetHandleError(my_handle); @@ -107,13 +107,13 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) { for (int i = 0; i < num_handles(); i++) { Handle* my_handle = static_cast(handle(i)); - WebRtc_Word32 capture_level_out = 0; + int32_t capture_level_out = 0; err = WebRtcAgc_VirtualMic( my_handle, audio->low_pass_split_data(i), audio->high_pass_split_data(i), - static_cast(audio->samples_per_split_channel()), + static_cast(audio->samples_per_split_channel()), //capture_levels_[i], analog_capture_level_, &capture_level_out); @@ -145,14 +145,14 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) { stream_is_saturated_ = false; for (int i = 0; i < num_handles(); i++) { Handle* my_handle = static_cast(handle(i)); - WebRtc_Word32 capture_level_out = 0; - WebRtc_UWord8 saturation_warning = 0; + int32_t capture_level_out = 0; + uint8_t saturation_warning = 0; int err = WebRtcAgc_Process( my_handle, audio->low_pass_split_data(i), audio->high_pass_split_data(i), - static_cast(audio->samples_per_split_channel()), + static_cast(audio->samples_per_split_channel()), audio->low_pass_split_data(i), audio->high_pass_split_data(i), capture_levels_[i], @@ -345,10 +345,10 @@ int GainControlImpl::ConfigureHandle(void* handle) const { // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we // change the interface. //assert(target_level_dbfs_ <= 0); - //config.targetLevelDbfs = static_cast(-target_level_dbfs_); - config.targetLevelDbfs = static_cast(target_level_dbfs_); + //config.targetLevelDbfs = static_cast(-target_level_dbfs_); + config.targetLevelDbfs = static_cast(target_level_dbfs_); config.compressionGaindB = - static_cast(compression_gain_db_); + static_cast(compression_gain_db_); config.limiterEnable = limiter_enabled_; return WebRtcAgc_set_config(static_cast(handle), config); diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc index b20fed872..c4bfa839d 100644 --- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc +++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc @@ -21,16 +21,16 @@ namespace webrtc { namespace { -const WebRtc_Word16 kFilterCoefficients8kHz[5] = +const int16_t kFilterCoefficients8kHz[5] = {3798, -7596, 3798, 7807, -3733}; -const WebRtc_Word16 kFilterCoefficients[5] = +const int16_t kFilterCoefficients[5] = {4012, -8024, 4012, 8002, -3913}; struct FilterState { - WebRtc_Word16 y[4]; - WebRtc_Word16 x[2]; - const WebRtc_Word16* ba; + int16_t y[4]; + int16_t x[2]; + const int16_t* ba; }; int InitializeFilter(FilterState* hpf, int sample_rate_hz) { @@ -48,13 +48,13 @@ int InitializeFilter(FilterState* hpf, int sample_rate_hz) { return AudioProcessing::kNoError; } -int Filter(FilterState* hpf, WebRtc_Word16* data, int length) { +int Filter(FilterState* hpf, int16_t* data, int length) { assert(hpf != NULL); - WebRtc_Word32 tmp_int32 = 0; - WebRtc_Word16* y = hpf->y; - WebRtc_Word16* x = hpf->x; - const WebRtc_Word16* ba = hpf->ba; + int32_t tmp_int32 = 0; + int16_t* y = hpf->y; + int16_t* x = hpf->x; + const int16_t* ba = hpf->ba; for (int i = 0; i < length; i++) { // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2] @@ -82,20 +82,20 @@ int Filter(FilterState* hpf, WebRtc_Word16* data, int length) { // Update state (filtered part) y[2] = y[0]; y[3] = y[1]; - y[0] = static_cast(tmp_int32 >> 13); - y[1] = static_cast((tmp_int32 - - WEBRTC_SPL_LSHIFT_W32(static_cast(y[0]), 13)) << 2); + y[0] = static_cast(tmp_int32 >> 13); + y[1] = static_cast((tmp_int32 - + WEBRTC_SPL_LSHIFT_W32(static_cast(y[0]), 13)) << 2); // Rounding in Q12, i.e. add 2^11 tmp_int32 += 2048; // Saturate (to 2^27) so that the HP filtered signal does not overflow - tmp_int32 = WEBRTC_SPL_SAT(static_cast(134217727), + tmp_int32 = WEBRTC_SPL_SAT(static_cast(134217727), tmp_int32, - static_cast(-134217728)); + static_cast(-134217728)); // Convert back to Q0 and use rounding - data[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12); + data[i] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12); } diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h index 5e0c02c1a..510b1a079 100644 --- a/webrtc/modules/audio_processing/include/audio_processing.h +++ b/webrtc/modules/audio_processing/include/audio_processing.h @@ -253,8 +253,8 @@ class AudioProcessing : public Module { }; // Inherited from Module. - virtual WebRtc_Word32 TimeUntilNextProcess() { return -1; } - virtual WebRtc_Word32 Process() { return -1; } + virtual int32_t TimeUntilNextProcess() { return -1; } + virtual int32_t Process() { return -1; } }; // The acoustic echo cancellation (AEC) component provides better performance diff --git a/webrtc/modules/audio_processing/include/mock_audio_processing.h b/webrtc/modules/audio_processing/include/mock_audio_processing.h index c0d0a9688..c2ab7349d 100644 --- a/webrtc/modules/audio_processing/include/mock_audio_processing.h +++ b/webrtc/modules/audio_processing/include/mock_audio_processing.h @@ -233,9 +233,9 @@ class MockAudioProcessing : public AudioProcessing { return voice_detection_.get(); }; MOCK_METHOD0(TimeUntilNextProcess, - WebRtc_Word32()); + int32_t()); MOCK_METHOD0(Process, - WebRtc_Word32()); + int32_t()); private: scoped_ptr echo_cancellation_; diff --git a/webrtc/modules/audio_processing/ns/include/noise_suppression.h b/webrtc/modules/audio_processing/ns/include/noise_suppression.h index c9a8e3234..c5cee9cbe 100644 --- a/webrtc/modules/audio_processing/ns/include/noise_suppression.h +++ b/webrtc/modules/audio_processing/ns/include/noise_suppression.h @@ -62,7 +62,7 @@ int WebRtcNs_Free(NsHandle* NS_inst); * Return value : 0 - Ok * -1 - Error */ -int WebRtcNs_Init(NsHandle* NS_inst, WebRtc_UWord32 fs); +int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs); /* * This changes the aggressiveness of the noise suppression method. diff --git a/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h b/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h index b6eef9047..0ce89ba28 100644 --- a/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h +++ b/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h @@ -61,7 +61,7 @@ int WebRtcNsx_Free(NsxHandle* nsxInst); * Return value : 0 - Ok * -1 - Error */ -int WebRtcNsx_Init(NsxHandle* nsxInst, WebRtc_UWord32 fs); +int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs); /* * This changes the aggressiveness of the noise suppression method. diff --git a/webrtc/modules/audio_processing/ns/noise_suppression.c b/webrtc/modules/audio_processing/ns/noise_suppression.c index 0796a5b1e..c0345c59b 100644 --- a/webrtc/modules/audio_processing/ns/noise_suppression.c +++ b/webrtc/modules/audio_processing/ns/noise_suppression.c @@ -34,7 +34,7 @@ int WebRtcNs_Free(NsHandle* NS_inst) { } -int WebRtcNs_Init(NsHandle* NS_inst, WebRtc_UWord32 fs) { +int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs) { return WebRtcNs_InitCore((NSinst_t*) NS_inst, fs); } diff --git a/webrtc/modules/audio_processing/ns/noise_suppression_x.c b/webrtc/modules/audio_processing/ns/noise_suppression_x.c index 64ab28785..20a296ece 100644 --- a/webrtc/modules/audio_processing/ns/noise_suppression_x.c +++ b/webrtc/modules/audio_processing/ns/noise_suppression_x.c @@ -37,7 +37,7 @@ int WebRtcNsx_Free(NsxHandle* nsxInst) { return 0; } -int WebRtcNsx_Init(NsxHandle* nsxInst, WebRtc_UWord32 fs) { +int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs) { return WebRtcNsx_InitCore((NsxInst_t*)nsxInst, fs); } diff --git a/webrtc/modules/audio_processing/ns/ns_core.c b/webrtc/modules/audio_processing/ns/ns_core.c index 2e8cedd0e..064477a21 100644 --- a/webrtc/modules/audio_processing/ns/ns_core.c +++ b/webrtc/modules/audio_processing/ns/ns_core.c @@ -68,7 +68,7 @@ void WebRtcNs_set_feature_extraction_parameters(NSinst_t* inst) { } // Initialize state -int WebRtcNs_InitCore(NSinst_t* inst, WebRtc_UWord32 fs) { +int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) { int i; //We only support 10ms frames diff --git a/webrtc/modules/audio_processing/ns/ns_core.h b/webrtc/modules/audio_processing/ns/ns_core.h index 2f4c34ff6..e98bfbe0f 100644 --- a/webrtc/modules/audio_processing/ns/ns_core.h +++ b/webrtc/modules/audio_processing/ns/ns_core.h @@ -50,7 +50,7 @@ typedef struct NSParaExtract_t_ { typedef struct NSinst_t_ { - WebRtc_UWord32 fs; + uint32_t fs; int blockLen; int blockLen10ms; int windShift; @@ -80,7 +80,7 @@ typedef struct NSinst_t_ { float wfft[W_LENGTH]; // parameters for new method: some not needed, will reduce/cleanup later - WebRtc_Word32 blockInd; //frame index counter + int32_t blockInd; //frame index counter int modelUpdatePars[4]; //parameters for updating or estimating // thresholds/weights for prior model float priorModelPars[7]; //parameters for prior model @@ -127,7 +127,7 @@ extern "C" { * Return value : 0 - Ok * -1 - Error */ -int WebRtcNs_InitCore(NSinst_t* inst, WebRtc_UWord32 fs); +int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs); /**************************************************************************** * WebRtcNs_set_policy_core(...) diff --git a/webrtc/modules/audio_processing/ns/nsx_core.c b/webrtc/modules/audio_processing/ns/nsx_core.c index a605c1ed5..50f2a18fe 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core.c +++ b/webrtc/modules/audio_processing/ns/nsx_core.c @@ -22,15 +22,15 @@ #if (defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON) /* Tables are defined in ARM assembly files. */ -extern const WebRtc_Word16 WebRtcNsx_kLogTable[9]; -extern const WebRtc_Word16 WebRtcNsx_kCounterDiv[201]; -extern const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256]; +extern const int16_t WebRtcNsx_kLogTable[9]; +extern const int16_t WebRtcNsx_kCounterDiv[201]; +extern const int16_t WebRtcNsx_kLogTableFrac[256]; #else -static const WebRtc_Word16 WebRtcNsx_kLogTable[9] = { +static const int16_t WebRtcNsx_kLogTable[9] = { 0, 177, 355, 532, 710, 887, 1065, 1242, 1420 }; -static const WebRtc_Word16 WebRtcNsx_kCounterDiv[201] = { +static const int16_t WebRtcNsx_kCounterDiv[201] = { 32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311, 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840, @@ -47,7 +47,7 @@ static const WebRtc_Word16 WebRtcNsx_kCounterDiv[201] = { 172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163 }; -static const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256] = { +static const int16_t WebRtcNsx_kLogTableFrac[256] = { 0, 1, 3, 4, 6, 7, 9, 10, 11, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 47, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62, @@ -71,7 +71,7 @@ static const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256] = { // Skip first frequency bins during estimation. (0 <= value < 64) static const int kStartBand = 5; -static const WebRtc_Word16 kPowTableFrac[1024] = { +static const int16_t kPowTableFrac[1024] = { 0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 15, 16, @@ -202,13 +202,13 @@ static const WebRtc_Word16 kPowTableFrac[1024] = { 1013, 1014, 1016, 1017, 1018, 1020, 1021, 1023 }; -static const WebRtc_Word16 kIndicatorTable[17] = { +static const int16_t kIndicatorTable[17] = { 0, 2017, 3809, 5227, 6258, 6963, 7424, 7718, 7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187 }; // hybrib Hanning & flat window -static const WebRtc_Word16 kBlocks80w128x[128] = { +static const int16_t kBlocks80w128x[128] = { 0, 536, 1072, 1606, 2139, 2669, 3196, 3720, 4240, 4756, 5266, 5771, 6270, 6762, 7246, 7723, 8192, 8652, 9102, 9543, 9974, 10394, 10803, 11200, 11585, 11958, 12318, 12665, 12998, 13318, 13623, 13913, 14189, @@ -224,7 +224,7 @@ static const WebRtc_Word16 kBlocks80w128x[128] = { }; // hybrib Hanning & flat window -static const WebRtc_Word16 kBlocks160w256x[256] = { +static const int16_t kBlocks160w256x[256] = { 0, 268, 536, 804, 1072, 1339, 1606, 1872, 2139, 2404, 2669, 2933, 3196, 3459, 3720, 3981, 4240, 4499, 4756, 5012, 5266, 5520, 5771, 6021, @@ -269,7 +269,7 @@ static const WebRtc_Word16 kBlocks160w256x[256] = { // } else { // factor1 = 1.0; // } -static const WebRtc_Word16 kFactor1Table[257] = { +static const int16_t kFactor1Table[257] = { 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, @@ -304,7 +304,7 @@ static const WebRtc_Word16 kFactor1Table[257] = { // } // // Gain factor table: Input value in Q8 and output value in Q13 -static const WebRtc_Word16 kFactor2Aggressiveness1[257] = { +static const int16_t kFactor2Aggressiveness1[257] = { 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7596, 7614, 7632, 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, @@ -327,7 +327,7 @@ static const WebRtc_Word16 kFactor2Aggressiveness1[257] = { }; // Gain factor table: Input value in Q8 and output value in Q13 -static const WebRtc_Word16 kFactor2Aggressiveness2[257] = { +static const int16_t kFactor2Aggressiveness2[257] = { 7270, 7270, 7270, 7270, 7270, 7306, 7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632, 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, @@ -350,7 +350,7 @@ static const WebRtc_Word16 kFactor2Aggressiveness2[257] = { }; // Gain factor table: Input value in Q8 and output value in Q13 -static const WebRtc_Word16 kFactor2Aggressiveness3[257] = { +static const int16_t kFactor2Aggressiveness3[257] = { 7184, 7184, 7184, 7229, 7270, 7306, 7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632, 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, @@ -374,7 +374,7 @@ static const WebRtc_Word16 kFactor2Aggressiveness3[257] = { // sum of log2(i) from table index to inst->anaLen2 in Q5 // Note that the first table value is invalid, since log2(0) = -infinity -static const WebRtc_Word16 kSumLogIndex[66] = { +static const int16_t kSumLogIndex[66] = { 0, 22917, 22917, 22885, 22834, 22770, 22696, 22613, 22524, 22428, 22326, 22220, 22109, 21994, 21876, 21754, 21629, 21501, 21370, 21237, 21101, 20963, 20822, 20679, @@ -388,7 +388,7 @@ static const WebRtc_Word16 kSumLogIndex[66] = { // sum of log2(i)^2 from table index to inst->anaLen2 in Q2 // Note that the first table value is invalid, since log2(0) = -infinity -static const WebRtc_Word16 kSumSquareLogIndex[66] = { +static const int16_t kSumSquareLogIndex[66] = { 0, 16959, 16959, 16955, 16945, 16929, 16908, 16881, 16850, 16814, 16773, 16729, 16681, 16630, 16575, 16517, 16456, 16392, 16325, 16256, 16184, 16109, 16032, 15952, @@ -402,7 +402,7 @@ static const WebRtc_Word16 kSumSquareLogIndex[66] = { // log2(table index) in Q12 // Note that the first table value is invalid, since log2(0) = -infinity -static const WebRtc_Word16 kLogIndex[129] = { +static const int16_t kLogIndex[129] = { 0, 0, 4096, 6492, 8192, 9511, 10588, 11499, 12288, 12984, 13607, 14170, 14684, 15157, 15595, 16003, 16384, 16742, 17080, 17400, 17703, 17991, 18266, 18529, @@ -424,7 +424,7 @@ static const WebRtc_Word16 kLogIndex[129] = { // determinant of estimation matrix in Q0 corresponding to the log2 tables above // Note that the first table value is invalid, since log2(0) = -infinity -static const WebRtc_Word16 kDeterminantEstMatrix[66] = { +static const int16_t kDeterminantEstMatrix[66] = { 0, 29814, 25574, 22640, 20351, 18469, 16873, 15491, 14277, 13199, 12233, 11362, 10571, 9851, 9192, 8587, 8030, 7515, 7038, 6596, 6186, 5804, 5448, 5115, @@ -458,10 +458,10 @@ static void WebRtcNsx_InitNeon(void) { // Update the noise estimation information. static void UpdateNoiseEstimate(NsxInst_t* inst, int offset) { - WebRtc_Word32 tmp32no1 = 0; - WebRtc_Word32 tmp32no2 = 0; - WebRtc_Word16 tmp16 = 0; - const WebRtc_Word16 kExp2Const = 11819; // Q13 + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; + int16_t tmp16 = 0; + const int16_t kExp2Const = 11819; // Q13 int i = 0; @@ -476,9 +476,9 @@ static void UpdateNoiseEstimate(NsxInst_t* inst, int offset) { tmp32no2 = WEBRTC_SPL_MUL_16_16(kExp2Const, inst->noiseEstLogQuantile[offset + i]); tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac - tmp16 = (WebRtc_Word16) WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21); + tmp16 = (int16_t) WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21); tmp16 -= 21;// shift 21 to get result in Q0 - tmp16 += (WebRtc_Word16) inst->qNoise; //shift to get result in Q(qNoise) + tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise) if (tmp16 < 0) { tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1, -tmp16); } else { @@ -493,9 +493,9 @@ static void NoiseEstimationC(NsxInst_t* inst, uint16_t* magn, uint32_t* noise, int16_t* q_noise) { - WebRtc_Word16 lmagn[HALF_ANAL_BLOCKL], counter, countDiv; - WebRtc_Word16 countProd, delta, zeros, frac; - WebRtc_Word16 log2, tabind, logval, tmp16, tmp16no1, tmp16no2; + int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv; + int16_t countProd, delta, zeros, frac; + int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2; const int16_t log2_const = 22713; // Q15 const int16_t width_factor = 21845; @@ -516,15 +516,15 @@ static void NoiseEstimationC(NsxInst_t* inst, // lmagn in Q8 for (i = 0; i < inst->magnLen; i++) { if (magn[i]) { - zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]); - frac = (WebRtc_Word16)((((WebRtc_UWord32)magn[i] << zeros) + zeros = WebRtcSpl_NormU32((uint32_t)magn[i]); + frac = (int16_t)((((uint32_t)magn[i] << zeros) & 0x7FFFFFFF) >> 23); // log2(magn(i)) assert(frac < 256); - log2 = (WebRtc_Word16)(((31 - zeros) << 8) + log2 = (int16_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // log2(magn(i))*log(2) - lmagn[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15); + lmagn[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15); // + log(2^stages) lmagn[i] += logval; } else { @@ -540,7 +540,7 @@ static void NoiseEstimationC(NsxInst_t* inst, counter = inst->noiseEstCounter[s]; assert(counter < 201); countDiv = WebRtcNsx_kCounterDiv[counter]; - countProd = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(counter, countDiv); + countProd = (int16_t)WEBRTC_SPL_MUL_16_16(counter, countDiv); // quant_est(...) for (i = 0; i < inst->magnLen; i++) { @@ -559,7 +559,7 @@ static void NoiseEstimationC(NsxInst_t* inst, } // update log quantile estimate - tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14); + tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14); if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) { // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2 // CounterDiv=1/(inst->counter[s]+1) in Q15 @@ -570,7 +570,7 @@ static void NoiseEstimationC(NsxInst_t* inst, tmp16 += 1; tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1); // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2 - tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1); + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1); inst->noiseEstLogQuantile[offset + i] -= tmp16no2; if (inst->noiseEstLogQuantile[offset + i] < logval) { // This is the smallest fixed point representation we can @@ -582,9 +582,9 @@ static void NoiseEstimationC(NsxInst_t* inst, // update density estimate if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i]) < WIDTH_Q8) { - tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( inst->noiseEstDensity[offset + i], countProd, 15); - tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( width_factor, countDiv, 15); inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2; } @@ -606,9 +606,9 @@ static void NoiseEstimationC(NsxInst_t* inst, } for (i = 0; i < inst->magnLen; i++) { - noise[i] = (WebRtc_UWord32)(inst->noiseEstQuantile[i]); // Q(qNoise) + noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise) } - (*q_noise) = (WebRtc_Word16)inst->qNoise; + (*q_noise) = (int16_t)inst->qNoise; } // Filter the data in the frequency domain, and create spectrum. @@ -617,10 +617,10 @@ static void PrepareSpectrumC(NsxInst_t* inst, int16_t* freq_buf) { int16_t tmp16 = 0; for (i = 0; i < inst->magnLen; i++) { - inst->real[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i], - (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages) - inst->imag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i], - (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages) + inst->real[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i], + (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages) + inst->imag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i], + (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages) } freq_buf[0] = inst->real[0]; @@ -641,7 +641,7 @@ static __inline void DenormalizeC(NsxInst_t* inst, int16_t* in, int factor) { int i = 0, j = 0; int32_t tmp32 = 0; for (i = 0, j = 0; i < inst->anaLen; i += 1, j += 2) { - tmp32 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)in[j], + tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[j], factor - inst->normData); inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0 } @@ -714,16 +714,16 @@ static __inline void CreateComplexBufferC(NsxInst_t* inst, } void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst, - WebRtc_Word16 pink_noise_exp_avg, - WebRtc_Word32 pink_noise_num_avg, + int16_t pink_noise_exp_avg, + int32_t pink_noise_num_avg, int freq_index, - WebRtc_UWord32* noise_estimate, - WebRtc_UWord32* noise_estimate_avg) { - WebRtc_Word32 tmp32no1 = 0; - WebRtc_Word32 tmp32no2 = 0; + uint32_t* noise_estimate, + uint32_t* noise_estimate_avg) { + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; - WebRtc_Word16 int_part = 0; - WebRtc_Word16 frac_part = 0; + int16_t int_part = 0; + int16_t frac_part = 0; // Use pink noise estimate // noise_estimate = 2^(pinkNoiseNumerator + pinkNoiseExp * log2(j)) @@ -735,10 +735,10 @@ void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst, // Calculate output: 2^tmp32no1 // Output in Q(minNorm-stages) - tmp32no1 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)(inst->minNorm - inst->stages), 11); + tmp32no1 += WEBRTC_SPL_LSHIFT_W32((int32_t)(inst->minNorm - inst->stages), 11); if (tmp32no1 > 0) { - int_part = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 11); - frac_part = (WebRtc_Word16)(tmp32no1 & 0x000007ff); // Q11 + int_part = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 11); + frac_part = (int16_t)(tmp32no1 & 0x000007ff); // Q11 // Piecewise linear approximation of 'b' in // 2^(int_part+frac_part) = 2^int_part * (1 + b) // 'b' is given in Q11 and below stored in frac_part. @@ -752,14 +752,14 @@ void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst, } // Shift fractional part to Q(minNorm-stages) tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11); - *noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (WebRtc_UWord32)tmp32no2; + *noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (uint32_t)tmp32no2; // Scale up to initMagnEst, which is not block averaged - *noise_estimate = (*noise_estimate_avg) * (WebRtc_UWord32)(inst->blockIndex + 1); + *noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1); } } // Initialize state -WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t* inst, WebRtc_UWord32 fs) { +int32_t WebRtcNsx_InitCore(NsxInst_t* inst, uint32_t fs) { int i; //check for valid pointer @@ -823,11 +823,11 @@ WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t* inst, WebRtc_UWord32 fs) { inst->noiseEstDensity[i] = 153; // Q9 } for (i = 0; i < SIMULT; i++) { - inst->noiseEstCounter[i] = (WebRtc_Word16)(END_STARTUP_LONG * (i + 1)) / SIMULT; + inst->noiseEstCounter[i] = (int16_t)(END_STARTUP_LONG * (i + 1)) / SIMULT; } // Initialize suppression filter with ones - WebRtcSpl_MemSetW16((WebRtc_Word16*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL); + WebRtcSpl_MemSetW16((int16_t*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL); // Set the aggressiveness: default inst->aggrMode = 0; @@ -954,17 +954,17 @@ int WebRtcNsx_set_policy_core(NsxInst_t* inst, int mode) { // flag 0 means update histogram only, flag 1 means compute the thresholds/weights // threshold and weights are returned in: inst->priorModelPars void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { - WebRtc_UWord32 tmpU32; - WebRtc_UWord32 histIndex; - WebRtc_UWord32 posPeak1SpecFlatFX, posPeak2SpecFlatFX; - WebRtc_UWord32 posPeak1SpecDiffFX, posPeak2SpecDiffFX; + uint32_t tmpU32; + uint32_t histIndex; + uint32_t posPeak1SpecFlatFX, posPeak2SpecFlatFX; + uint32_t posPeak1SpecDiffFX, posPeak2SpecDiffFX; - WebRtc_Word32 tmp32; - WebRtc_Word32 fluctLrtFX, thresFluctLrtFX; - WebRtc_Word32 avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX; + int32_t tmp32; + int32_t fluctLrtFX, thresFluctLrtFX; + int32_t avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX; - WebRtc_Word16 j; - WebRtc_Word16 numHistLrt; + int16_t j; + int16_t numHistLrt; int i; int useFeatureSpecFlat, useFeatureSpecDiff, featureSum; @@ -977,7 +977,7 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { // LRT // Type casting to UWord32 is safe since negative values will not be wrapped to larger // values than HIST_PAR_EST - histIndex = (WebRtc_UWord32)(inst->featureLogLrt); + histIndex = (uint32_t)(inst->featureLogLrt); if (histIndex < HIST_PAR_EST) { inst->histLrt[histIndex]++; } @@ -1027,13 +1027,13 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { fluctLrtFX -= WEBRTC_SPL_MUL(avgHistLrtFX, avgHistLrtComplFX); thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt; // get threshold for LRT feature: - tmpU32 = (FACTOR_1_LRT_DIFF * (WebRtc_UWord32)avgHistLrtFX); + tmpU32 = (FACTOR_1_LRT_DIFF * (uint32_t)avgHistLrtFX); if ((fluctLrtFX < thresFluctLrtFX) || (numHistLrt == 0) || - (tmpU32 > (WebRtc_UWord32)(100 * numHistLrt))) { + (tmpU32 > (uint32_t)(100 * numHistLrt))) { //very low fluctuation, so likely noise inst->thresholdLogLrt = inst->maxLrt; } else { - tmp32 = (WebRtc_Word32)((tmpU32 << (9 + inst->stages)) / numHistLrt / + tmp32 = (int32_t)((tmpU32 << (9 + inst->stages)) / numHistLrt / 25); // check if value is within min/max range inst->thresholdLogLrt = WEBRTC_SPL_SAT(inst->maxLrt, @@ -1064,12 +1064,12 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { maxPeak1 = inst->histSpecFlat[i]; weightPeak1SpecFlat = inst->histSpecFlat[i]; - posPeak1SpecFlatFX = (WebRtc_UWord32)(2 * i + 1); + posPeak1SpecFlatFX = (uint32_t)(2 * i + 1); } else if (inst->histSpecFlat[i] > maxPeak2) { // Found new "second" peak maxPeak2 = inst->histSpecFlat[i]; weightPeak2SpecFlat = inst->histSpecFlat[i]; - posPeak2SpecFlatFX = (WebRtc_UWord32)(2 * i + 1); + posPeak2SpecFlatFX = (uint32_t)(2 * i + 1); } } @@ -1110,12 +1110,12 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { maxPeak1 = inst->histSpecDiff[i]; weightPeak1SpecDiff = inst->histSpecDiff[i]; - posPeak1SpecDiffFX = (WebRtc_UWord32)(2 * i + 1); + posPeak1SpecDiffFX = (uint32_t)(2 * i + 1); } else if (inst->histSpecDiff[i] > maxPeak2) { // Found new "second" peak maxPeak2 = inst->histSpecDiff[i]; weightPeak2SpecDiff = inst->histSpecDiff[i]; - posPeak2SpecDiffFX = (WebRtc_UWord32)(2 * i + 1); + posPeak2SpecDiffFX = (uint32_t)(2 * i + 1); } } @@ -1153,20 +1153,20 @@ void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) { // Compute spectral flatness on input spectrum // magn is the magnitude spectrum // spectral flatness is returned in inst->featureSpecFlat -void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) { - WebRtc_UWord32 tmpU32; - WebRtc_UWord32 avgSpectralFlatnessNum, avgSpectralFlatnessDen; +void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, uint16_t* magn) { + uint32_t tmpU32; + uint32_t avgSpectralFlatnessNum, avgSpectralFlatnessDen; - WebRtc_Word32 tmp32; - WebRtc_Word32 currentSpectralFlatness, logCurSpectralFlatness; + int32_t tmp32; + int32_t currentSpectralFlatness, logCurSpectralFlatness; - WebRtc_Word16 zeros, frac, intPart; + int16_t zeros, frac, intPart; int i; // for flatness avgSpectralFlatnessNum = 0; - avgSpectralFlatnessDen = inst->sumMagn - (WebRtc_UWord32)magn[0]; // Q(normData-stages) + avgSpectralFlatnessDen = inst->sumMagn - (uint32_t)magn[0]; // Q(normData-stages) // compute log of ratio of the geometric to arithmetic mean: check for log(0) case // flatness = exp( sum(log(magn[i]))/N - log(sum(magn[i])/N) ) @@ -1175,12 +1175,12 @@ void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) { for (i = 1; i < inst->magnLen; i++) { // First bin is excluded from spectrum measures. Number of bins is now a power of 2 if (magn[i]) { - zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]); - frac = (WebRtc_Word16)(((WebRtc_UWord32)((WebRtc_UWord32)(magn[i]) << zeros) + zeros = WebRtcSpl_NormU32((uint32_t)magn[i]); + frac = (int16_t)(((uint32_t)((uint32_t)(magn[i]) << zeros) & 0x7FFFFFFF) >> 23); // log2(magn(i)) assert(frac < 256); - tmpU32 = (WebRtc_UWord32)(((31 - zeros) << 8) + tmpU32 = (uint32_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8 avgSpectralFlatnessNum += tmpU32; // Q8 } else { @@ -1192,17 +1192,17 @@ void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) { } //ratio and inverse log: check for case of log(0) zeros = WebRtcSpl_NormU32(avgSpectralFlatnessDen); - frac = (WebRtc_Word16)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23); + frac = (int16_t)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23); // log2(avgSpectralFlatnessDen) assert(frac < 256); - tmp32 = (WebRtc_Word32)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8 - logCurSpectralFlatness = (WebRtc_Word32)avgSpectralFlatnessNum; - logCurSpectralFlatness += ((WebRtc_Word32)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1) + tmp32 = (int32_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8 + logCurSpectralFlatness = (int32_t)avgSpectralFlatnessNum; + logCurSpectralFlatness += ((int32_t)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1) logCurSpectralFlatness -= (tmp32 << (inst->stages - 1)); logCurSpectralFlatness = WEBRTC_SPL_LSHIFT_W32(logCurSpectralFlatness, 10 - inst->stages); // Q17 - tmp32 = (WebRtc_Word32)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness) + tmp32 = (int32_t)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness) & 0x0001FFFF)); //Q17 - intPart = -(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(logCurSpectralFlatness, 17); + intPart = -(int16_t)WEBRTC_SPL_RSHIFT_W32(logCurSpectralFlatness, 17); intPart += 7; // Shift 7 to get the output in Q10 (from Q17 = -17+10) if (intPart > 0) { currentSpectralFlatness = WEBRTC_SPL_RSHIFT_W32(tmp32, intPart); @@ -1211,9 +1211,9 @@ void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) { } //time average update of spectral flatness feature - tmp32 = currentSpectralFlatness - (WebRtc_Word32)inst->featureSpecFlat; // Q10 + tmp32 = currentSpectralFlatness - (int32_t)inst->featureSpecFlat; // Q10 tmp32 = WEBRTC_SPL_MUL_32_16(SPECT_FLAT_TAVG_Q14, tmp32); // Q24 - inst->featureSpecFlat = (WebRtc_UWord32)((WebRtc_Word32)inst->featureSpecFlat + inst->featureSpecFlat = (uint32_t)((int32_t)inst->featureSpecFlat + WEBRTC_SPL_RSHIFT_W32(tmp32, 14)); // Q10 // done with flatness feature } @@ -1223,18 +1223,18 @@ void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) { // magn_tmp is the input spectrum // the reference/template spectrum is inst->magn_avg_pause[i] // returns (normalized) spectral difference in inst->featureSpecDiff -void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn) { +void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, uint16_t* magnIn) { // This is to be calculated: // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause) - WebRtc_UWord32 tmpU32no1, tmpU32no2; - WebRtc_UWord32 varMagnUFX, varPauseUFX, avgDiffNormMagnUFX; + uint32_t tmpU32no1, tmpU32no2; + uint32_t varMagnUFX, varPauseUFX, avgDiffNormMagnUFX; - WebRtc_Word32 tmp32no1, tmp32no2; - WebRtc_Word32 avgPauseFX, avgMagnFX, covMagnPauseFX; - WebRtc_Word32 maxPause, minPause; + int32_t tmp32no1, tmp32no2; + int32_t avgPauseFX, avgMagnFX, covMagnPauseFX; + int32_t maxPause, minPause; - WebRtc_Word16 tmp16no1; + int16_t tmp16no1; int i, norm32, nShifts; @@ -1250,7 +1250,7 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn } // normalize by replacing div of "inst->magnLen" with "inst->stages-1" shifts avgPauseFX = WEBRTC_SPL_RSHIFT_W32(avgPauseFX, inst->stages - 1); - avgMagnFX = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(inst->sumMagn, inst->stages - 1); + avgMagnFX = (int32_t)WEBRTC_SPL_RSHIFT_U32(inst->sumMagn, inst->stages - 1); // Largest possible deviation in magnPause for (co)var calculations tmp32no1 = WEBRTC_SPL_MAX(maxPause - avgPauseFX, avgPauseFX - minPause); // Get number of shifts to make sure we don't get wrap around in varPause @@ -1261,13 +1261,13 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn covMagnPauseFX = 0; for (i = 0; i < inst->magnLen; i++) { // Compute var and cov of magn and magn_pause - tmp16no1 = (WebRtc_Word16)((WebRtc_Word32)magnIn[i] - avgMagnFX); + tmp16no1 = (int16_t)((int32_t)magnIn[i] - avgMagnFX); tmp32no2 = inst->avgMagnPause[i] - avgPauseFX; - varMagnUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1); // Q(2*qMagn) + varMagnUFX += (uint32_t)WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1); // Q(2*qMagn) tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no2, tmp16no1); // Q(prevQMagn+qMagn) covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn) tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, nShifts); // Q(prevQMagn-minPause) - varPauseUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL(tmp32no1, tmp32no1); // Q(2*(prevQMagn-minPause)) + varPauseUFX += (uint32_t)WEBRTC_SPL_MUL(tmp32no1, tmp32no1); // Q(2*(prevQMagn-minPause)) } //update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts inst->curAvgMagnEnergy += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy, 2 * inst->normData @@ -1275,7 +1275,7 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn avgDiffNormMagnUFX = varMagnUFX; // Q(2*qMagn) if ((varPauseUFX) && (covMagnPauseFX)) { - tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn) + tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn) norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16; if (norm32 > 0) { tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32); // Q(prevQMagn+qMagn+norm32) @@ -1318,16 +1318,16 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn // speech/noise probability is returned in: probSpeechFinal //snrLocPrior is the prior SNR for each frequency (in Q11) //snrLocPost is the post SNR for each frequency (in Q11) -void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFinal, - WebRtc_UWord32* priorLocSnr, WebRtc_UWord32* postLocSnr) { - WebRtc_UWord32 zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3; +void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, uint16_t* nonSpeechProbFinal, + uint32_t* priorLocSnr, uint32_t* postLocSnr) { + uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3; - WebRtc_Word32 invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32; - WebRtc_Word32 frac32, logTmp; - WebRtc_Word32 logLrtTimeAvgKsumFX; + int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32; + int32_t frac32, logTmp; + int32_t logLrtTimeAvgKsumFX; - WebRtc_Word16 indPriorFX16; - WebRtc_Word16 tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart; + int16_t indPriorFX16; + int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart; int i, normTmp, normTmp2, nShifts; @@ -1335,7 +1335,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin // this is the average over all frequencies of the smooth log LRT logLrtTimeAvgKsumFX = 0; for (i = 0; i < inst->magnLen; i++) { - besselTmpFX32 = (WebRtc_Word32)postLocSnr[i]; // Q11 + besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11 normTmp = WebRtcSpl_NormU32(postLocSnr[i]); num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp) if (normTmp > 10) { @@ -1352,13 +1352,13 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior) - inst->logLrtTimeAvg[i]); // Here, LRT_TAVG = 0.5 zeros = WebRtcSpl_NormU32(priorLocSnr[i]); - frac32 = (WebRtc_Word32)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19); + frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19); tmp32 = WEBRTC_SPL_MUL(frac32, frac32); tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19); - tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)frac32, 5412, 12); + tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12); frac32 = tmp32 + 37; // tmp32 = log2(priorLocSnr[i]) - tmp32 = (WebRtc_Word32)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12 + tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12 logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8); // log2(priorLocSnr[i])*log(2) tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1); // Q12 inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12 @@ -1387,12 +1387,12 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin } tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14 // compute indicator function: sigmoid map - tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14); + tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14); if ((tableIndex < 16) && (tableIndex >= 0)) { tmp16no2 = kIndicatorTable[tableIndex]; tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; - frac = (WebRtc_Word16)(tmp32no1 & 0x00003fff); // Q14 - tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14); + frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14); if (tmpIndFX == 0) { tmpIndFX = 8192 - tmp16no2; // Q14 } else { @@ -1414,18 +1414,18 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin //widthPrior = widthPrior * 2.0; nShifts++; } - tmp32no1 = (WebRtc_Word32)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, + tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), 25); //Q14 tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), 25); //Q14 // compute indicator function: sigmoid map // FLOAT code // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) + 1.0); - tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14); + tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14); if (tableIndex < 16) { tmp16no2 = kIndicatorTable[tableIndex]; tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; - frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14 - tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14); + frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14); if (tmpIndFX) { tmpIndFX = 8192 + tmp16no2; // Q14 } else { @@ -1448,7 +1448,7 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin // Q(20 - inst->stages) tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); } else { - tmpU32no1 = (WebRtc_UWord32)(0x7fffffff); + tmpU32no1 = (uint32_t)(0x7fffffff); } } tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff, 17), 25); @@ -1467,12 +1467,12 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin /* FLOAT code indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0); */ - tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14); + tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14); if (tableIndex < 16) { tmp16no2 = kIndicatorTable[tableIndex]; tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; - frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14 - tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( tmp16no1, frac, 14); if (tmpIndFX) { tmpIndFX = 8192 + tmp16no2; @@ -1493,12 +1493,12 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin // FLOAT code // inst->priorNonSpeechProb += PRIOR_UPDATE * (indPriorNonSpeech - inst->priorNonSpeechProb); tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14 - inst->priorNonSpeechProb += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT( + inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT( PRIOR_UPDATE_Q14, tmp16, 14); // Q14 //final speech probability: combine prior model with LR factor: - memset(nonSpeechProbFinal, 0, sizeof(WebRtc_UWord16) * inst->magnLen); + memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen); if (inst->priorNonSpeechProb > 0) { for (i = 0; i < inst->magnLen; i++) { @@ -1511,11 +1511,11 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin if (inst->logLrtTimeAvgW32[i] < 65300) { tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(inst->logLrtTimeAvgW32[i], 23637), 14); // Q12 - intPart = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12); + intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12); if (intPart < -8) { intPart = -8; } - frac = (WebRtc_Word16)(tmp32no1 & 0x00000fff); // Q12 + frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12 // Quadratic approximation of 2^frac tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12 @@ -1537,10 +1537,10 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14 } - tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)inst->priorNonSpeechProb, 8); // Q22 + tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)inst->priorNonSpeechProb, 8); // Q22 - nonSpeechProbFinal[i] = (WebRtc_UWord16)WEBRTC_SPL_DIV(tmp32no1, - (WebRtc_Word32)inst->priorNonSpeechProb + invLrtFX); // Q8 + nonSpeechProbFinal[i] = (uint16_t)WEBRTC_SPL_DIV(tmp32no1, + (int32_t)inst->priorNonSpeechProb + invLrtFX); // Q8 } } } @@ -1548,24 +1548,24 @@ void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFin } // Transform input (speechFrame) to frequency domain magnitude (magnU16) -void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* magnU16) { +void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU16) { - WebRtc_UWord32 tmpU32no1, tmpU32no2; + uint32_t tmpU32no1, tmpU32no2; - WebRtc_Word32 tmp_1_w32 = 0; - WebRtc_Word32 tmp_2_w32 = 0; - WebRtc_Word32 sum_log_magn = 0; - WebRtc_Word32 sum_log_i_log_magn = 0; + int32_t tmp_1_w32 = 0; + int32_t tmp_2_w32 = 0; + int32_t sum_log_magn = 0; + int32_t sum_log_i_log_magn = 0; - WebRtc_UWord16 sum_log_magn_u16 = 0; - WebRtc_UWord16 tmp_u16 = 0; + uint16_t sum_log_magn_u16 = 0; + uint16_t tmp_u16 = 0; - WebRtc_Word16 sum_log_i = 0; - WebRtc_Word16 sum_log_i_square = 0; - WebRtc_Word16 frac = 0; - WebRtc_Word16 log2 = 0; - WebRtc_Word16 matrix_determinant = 0; - WebRtc_Word16 maxWinData; + int16_t sum_log_i = 0; + int16_t sum_log_i_square = 0; + int16_t frac = 0; + int16_t log2 = 0; + int16_t matrix_determinant = 0; + int16_t maxWinData; int i, j; int zeros; @@ -1616,13 +1616,13 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* inst->real[0] = winData[0]; // Q(normData-stages) inst->real[inst->anaLen2] = winData[inst->anaLen]; // Q(2*(normData-stages)) - inst->magnEnergy = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[0], inst->real[0]); - inst->magnEnergy += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[inst->anaLen2], + inst->magnEnergy = (uint32_t)WEBRTC_SPL_MUL_16_16(inst->real[0], inst->real[0]); + inst->magnEnergy += (uint32_t)WEBRTC_SPL_MUL_16_16(inst->real[inst->anaLen2], inst->real[inst->anaLen2]); - magnU16[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages) - magnU16[inst->anaLen2] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]); - inst->sumMagn = (WebRtc_UWord32)magnU16[0]; // Q(normData-stages) - inst->sumMagn += (WebRtc_UWord32)magnU16[inst->anaLen2]; + magnU16[0] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages) + magnU16[inst->anaLen2] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]); + inst->sumMagn = (uint32_t)magnU16[0]; // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[inst->anaLen2]; if (inst->blockIndex >= END_STARTUP_SHORT) { for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) { @@ -1630,12 +1630,12 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* inst->imag[i] = -winData[j + 1]; // magnitude spectrum // energy in Q(2*(normData-stages)) - tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(winData[j], winData[j]); - tmpU32no1 += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(winData[j + 1], winData[j + 1]); + tmpU32no1 = (uint32_t)WEBRTC_SPL_MUL_16_16(winData[j], winData[j]); + tmpU32no1 += (uint32_t)WEBRTC_SPL_MUL_16_16(winData[j + 1], winData[j + 1]); inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages)) - magnU16[i] = (WebRtc_UWord16)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) - inst->sumMagn += (WebRtc_UWord32)magnU16[i]; // Q(normData-stages) + magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages) } } else { // @@ -1650,9 +1650,9 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* right_shifts_in_initMagnEst); // Q(minNorm-stages) // Shift magnU16 to same domain as initMagnEst - tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[0], + tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((uint32_t)magnU16[0], right_shifts_in_magnU16); // Q(minNorm-stages) - tmpU32no2 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[inst->anaLen2], + tmpU32no2 = WEBRTC_SPL_RSHIFT_W32((uint32_t)magnU16[inst->anaLen2], right_shifts_in_magnU16); // Q(minNorm-stages) // Update initMagnEst @@ -1662,15 +1662,15 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* log2 = 0; if (magnU16[inst->anaLen2]) { // Calculate log2(magnU16[inst->anaLen2]) - zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[inst->anaLen2]); - frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[inst->anaLen2] << zeros) & + zeros = WebRtcSpl_NormU32((uint32_t)magnU16[inst->anaLen2]); + frac = (int16_t)((((uint32_t)magnU16[inst->anaLen2] << zeros) & 0x7FFFFFFF) >> 23); // Q8 // log2(magnU16(i)) in Q8 assert(frac < 256); - log2 = (WebRtc_Word16)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); + log2 = (int16_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); } - sum_log_magn = (WebRtc_Word32)log2; // Q8 + sum_log_magn = (int32_t)log2; // Q8 // sum_log_i_log_magn in Q17 sum_log_i_log_magn = (WEBRTC_SPL_MUL_16_16(kLogIndex[inst->anaLen2], log2) >> 3); @@ -1679,19 +1679,19 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* inst->imag[i] = -winData[j + 1]; // magnitude spectrum // energy in Q(2*(normData-stages)) - tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(winData[j], winData[j]); - tmpU32no1 += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(winData[j + 1], winData[j + 1]); + tmpU32no1 = (uint32_t)WEBRTC_SPL_MUL_16_16(winData[j], winData[j]); + tmpU32no1 += (uint32_t)WEBRTC_SPL_MUL_16_16(winData[j + 1], winData[j + 1]); inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages)) - magnU16[i] = (WebRtc_UWord16)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) - inst->sumMagn += (WebRtc_UWord32)magnU16[i]; // Q(normData-stages) + magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages) // Switch initMagnEst to Q(minNorm-stages) inst->initMagnEst[i] = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], right_shifts_in_initMagnEst); // Shift magnU16 to same domain as initMagnEst, i.e., Q(minNorm-stages) - tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[i], + tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((uint32_t)magnU16[i], right_shifts_in_magnU16); // Update initMagnEst inst->initMagnEst[i] += tmpU32no1; // Q(minNorm-stages) @@ -1700,15 +1700,15 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* // For pink noise estimation. Collect data neglecting lower frequency band log2 = 0; if (magnU16[i]) { - zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[i]); - frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[i] << zeros) & + zeros = WebRtcSpl_NormU32((uint32_t)magnU16[i]); + frac = (int16_t)((((uint32_t)magnU16[i] << zeros) & 0x7FFFFFFF) >> 23); // log2(magnU16(i)) in Q8 assert(frac < 256); - log2 = (WebRtc_Word16)(((31 - zeros) << 8) + log2 = (int16_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); } - sum_log_magn += (WebRtc_Word32)log2; // Q8 + sum_log_magn += (int32_t)log2; // Q8 // sum_log_i_log_magn in Q17 sum_log_i_log_magn += (WEBRTC_SPL_MUL_16_16(kLogIndex[i], log2) >> 3); } @@ -1745,13 +1745,13 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* sum_log_i_square = kSumSquareLogIndex[kStartBand]; // Q2 if (inst->fs == 8000) { // Adjust values to shorter blocks in narrow band. - tmp_1_w32 = (WebRtc_Word32)matrix_determinant; + tmp_1_w32 = (int32_t)matrix_determinant; tmp_1_w32 += WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], sum_log_i, 9); tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], kSumLogIndex[65], 10); - tmp_1_w32 -= WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)sum_log_i_square, 4); - tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16) + tmp_1_w32 -= WEBRTC_SPL_LSHIFT_W32((int32_t)sum_log_i_square, 4); + tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) (inst->magnLen - kStartBand), kSumSquareLogIndex[65], 2); - matrix_determinant = (WebRtc_Word16)tmp_1_w32; + matrix_determinant = (int16_t)tmp_1_w32; sum_log_i -= kSumLogIndex[65]; // Q5 sum_log_i_square -= kSumSquareLogIndex[65]; // Q2 } @@ -1762,23 +1762,23 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* zeros = 0; } tmp_1_w32 = WEBRTC_SPL_LSHIFT_W32(sum_log_magn, 1); // Q9 - sum_log_magn_u16 = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W32(tmp_1_w32, zeros);//Q(9-zeros) + sum_log_magn_u16 = (uint16_t)WEBRTC_SPL_RSHIFT_W32(tmp_1_w32, zeros);//Q(9-zeros) // Calculate and update pinkNoiseNumerator. Result in Q11. tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i_square, sum_log_magn_u16); // Q(11-zeros) - tmpU32no1 = WEBRTC_SPL_RSHIFT_U32((WebRtc_UWord32)sum_log_i_log_magn, 12); // Q5 + tmpU32no1 = WEBRTC_SPL_RSHIFT_U32((uint32_t)sum_log_i_log_magn, 12); // Q5 // Shift the largest value of sum_log_i and tmp32no3 before multiplication - tmp_u16 = WEBRTC_SPL_LSHIFT_U16((WebRtc_UWord16)sum_log_i, 1); // Q6 - if ((WebRtc_UWord32)sum_log_i > tmpU32no1) { + tmp_u16 = WEBRTC_SPL_LSHIFT_U16((uint16_t)sum_log_i, 1); // Q6 + if ((uint32_t)sum_log_i > tmpU32no1) { tmp_u16 = WEBRTC_SPL_RSHIFT_U16(tmp_u16, zeros); } else { tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, zeros); } - tmp_2_w32 -= (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros) + tmp_2_w32 -= (int32_t)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros) matrix_determinant = WEBRTC_SPL_RSHIFT_W16(matrix_determinant, zeros); // Q(-zeros) tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11 - tmp_2_w32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)net_norm, 11); // Q11 + tmp_2_w32 += WEBRTC_SPL_LSHIFT_W32((int32_t)net_norm, 11); // Q11 if (tmp_2_w32 < 0) { tmp_2_w32 = 0; } @@ -1787,7 +1787,7 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* // Calculate and update pinkNoiseExp. Result in Q14. tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros) tmp_1_w32 = WEBRTC_SPL_RSHIFT_W32(sum_log_i_log_magn, 3 + zeros); - tmp_1_w32 = WEBRTC_SPL_MUL((WebRtc_Word32)(inst->magnLen - kStartBand), + tmp_1_w32 = WEBRTC_SPL_MUL((int32_t)(inst->magnLen - kStartBand), tmp_1_w32); tmp_2_w32 -= tmp_1_w32; // Q(14-zeros) if (tmp_2_w32 > 0) { @@ -1800,7 +1800,7 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* } void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) { - WebRtc_Word32 energyOut; + int32_t energyOut; int16_t realImag_buff[ANAL_BLOCKL_MAX * 2 + 16]; int16_t rfft_out_buff[ANAL_BLOCKL_MAX * 2 + 16]; @@ -1809,9 +1809,9 @@ void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) { int16_t* realImag = (int16_t*) (((uintptr_t)realImag_buff + 31) & ~31); int16_t* rfft_out = (int16_t*) (((uintptr_t) rfft_out_buff + 31) & ~31); - WebRtc_Word16 tmp16no1, tmp16no2; - WebRtc_Word16 energyRatio; - WebRtc_Word16 gainFactor, gainFactor1, gainFactor2; + int16_t tmp16no1, tmp16no2; + int16_t energyRatio; + int16_t gainFactor, gainFactor1, gainFactor2; int i; int outCIFFT; @@ -1856,7 +1856,7 @@ void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) { } assert(inst->energyIn > 0); - energyRatio = (WebRtc_Word16)WEBRTC_SPL_DIV(energyOut + energyRatio = (int16_t)WEBRTC_SPL_DIV(energyOut + WEBRTC_SPL_RSHIFT_W32(inst->energyIn, 1), inst->energyIn); // Q8 // Limit the ratio to [0, 1] in Q8, i.e., [0, 256] energyRatio = WEBRTC_SPL_SAT(256, energyRatio, 0); @@ -1869,9 +1869,9 @@ void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) { //combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent // factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code - tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(16384 - inst->priorNonSpeechProb, + tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(16384 - inst->priorNonSpeechProb, gainFactor1, 14); // Q13 16384 = Q14(1.0) - tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->priorNonSpeechProb, + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->priorNonSpeechProb, gainFactor2, 14); // Q13; gainFactor = tmp16no1 + tmp16no2; // Q13 } // out of flag_gain_map==1 @@ -1884,34 +1884,34 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram short* outFrame, short* outFrameHB) { // main routine for noise suppression - WebRtc_UWord32 tmpU32no1, tmpU32no2, tmpU32no3; - WebRtc_UWord32 satMax, maxNoiseU32; - WebRtc_UWord32 tmpMagnU32, tmpNoiseU32; - WebRtc_UWord32 nearMagnEst; - WebRtc_UWord32 noiseUpdateU32; - WebRtc_UWord32 noiseU32[HALF_ANAL_BLOCKL]; - WebRtc_UWord32 postLocSnr[HALF_ANAL_BLOCKL]; - WebRtc_UWord32 priorLocSnr[HALF_ANAL_BLOCKL]; - WebRtc_UWord32 prevNearSnr[HALF_ANAL_BLOCKL]; - WebRtc_UWord32 curNearSnr; - WebRtc_UWord32 priorSnr; - WebRtc_UWord32 noise_estimate = 0; - WebRtc_UWord32 noise_estimate_avg = 0; - WebRtc_UWord32 numerator = 0; + uint32_t tmpU32no1, tmpU32no2, tmpU32no3; + uint32_t satMax, maxNoiseU32; + uint32_t tmpMagnU32, tmpNoiseU32; + uint32_t nearMagnEst; + uint32_t noiseUpdateU32; + uint32_t noiseU32[HALF_ANAL_BLOCKL]; + uint32_t postLocSnr[HALF_ANAL_BLOCKL]; + uint32_t priorLocSnr[HALF_ANAL_BLOCKL]; + uint32_t prevNearSnr[HALF_ANAL_BLOCKL]; + uint32_t curNearSnr; + uint32_t priorSnr; + uint32_t noise_estimate = 0; + uint32_t noise_estimate_avg = 0; + uint32_t numerator = 0; - WebRtc_Word32 tmp32no1, tmp32no2; - WebRtc_Word32 pink_noise_num_avg = 0; + int32_t tmp32no1, tmp32no2; + int32_t pink_noise_num_avg = 0; - WebRtc_UWord16 tmpU16no1; - WebRtc_UWord16 magnU16[HALF_ANAL_BLOCKL]; - WebRtc_UWord16 prevNoiseU16[HALF_ANAL_BLOCKL]; - WebRtc_UWord16 nonSpeechProbFinal[HALF_ANAL_BLOCKL]; - WebRtc_UWord16 gammaNoise, prevGammaNoise; - WebRtc_UWord16 noiseSupFilterTmp[HALF_ANAL_BLOCKL]; + uint16_t tmpU16no1; + uint16_t magnU16[HALF_ANAL_BLOCKL]; + uint16_t prevNoiseU16[HALF_ANAL_BLOCKL]; + uint16_t nonSpeechProbFinal[HALF_ANAL_BLOCKL]; + uint16_t gammaNoise, prevGammaNoise; + uint16_t noiseSupFilterTmp[HALF_ANAL_BLOCKL]; - WebRtc_Word16 qMagn, qNoise; - WebRtc_Word16 avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB; - WebRtc_Word16 pink_noise_exp_avg = 0; + int16_t qMagn, qNoise; + int16_t avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB; + int16_t pink_noise_exp_avg = 0; int i; int nShifts, postShifts; @@ -1979,7 +1979,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram //noise estimate from previous frame for (i = 0; i < inst->magnLen; i++) { - prevNoiseU16[i] = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], 11); // Q(prevQNoise) + prevNoiseU16[i] = (uint16_t)WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], 11); // Q(prevQNoise) } if (inst->blockIndex < END_STARTUP_SHORT) { @@ -1989,10 +1989,10 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // Calculate frequency independent parts in parametric noise estimate and calculate // the estimate for the lower frequency band (same values for all frequency bins) if (inst->pinkNoiseExp) { - pink_noise_exp_avg = (WebRtc_Word16)WebRtcSpl_DivW32W16(inst->pinkNoiseExp, - (WebRtc_Word16)(inst->blockIndex + 1)); // Q14 + pink_noise_exp_avg = (int16_t)WebRtcSpl_DivW32W16(inst->pinkNoiseExp, + (int16_t)(inst->blockIndex + 1)); // Q14 pink_noise_num_avg = WebRtcSpl_DivW32W16(inst->pinkNoiseNumerator, - (WebRtc_Word16)(inst->blockIndex + 1)); // Q11 + (int16_t)(inst->blockIndex + 1)); // Q11 WebRtcNsx_CalcParametricNoiseEstimate(inst, pink_noise_exp_avg, pink_noise_num_avg, @@ -2045,8 +2045,8 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram tmpU32no1 = 1; } tmpU32no2 = WEBRTC_SPL_UDIV(numerator, tmpU32no1); // Q14 - noiseSupFilterTmp[i] = (WebRtc_UWord16)WEBRTC_SPL_SAT(16384, tmpU32no2, - (WebRtc_UWord32)(inst->denoiseBound)); // Q14 + noiseSupFilterTmp[i] = (uint16_t)WEBRTC_SPL_SAT(16384, tmpU32no2, + (uint32_t)(inst->denoiseBound)); // Q14 } } // Weight quantile noise 'noiseU32' with modeled noise 'noise_estimate_avg' @@ -2094,7 +2094,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // STEP 1: compute prior and post SNR based on quantile noise estimates // compute direct decision (DD) estimate of prior SNR: needed for new method - satMax = (WebRtc_UWord32)1048575;// Largest possible value without getting overflow despite shifting 12 steps + satMax = (uint32_t)1048575;// Largest possible value without getting overflow despite shifting 12 steps postShifts = 6 + qMagn - qNoise; nShifts = 5 - inst->prevQMagn + inst->prevQNoise; for (i = 0; i < inst->magnLen; i++) { @@ -2117,7 +2117,7 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram // calculate post SNR: output in Q11 postLocSnr[i] = 2048; // 1.0 in Q11 - tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], 6); // Q(6+qMagn) + tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], 6); // Q(6+qMagn) if (postShifts < 0) { tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn) } else { @@ -2302,11 +2302,11 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram tmp32no2 = WEBRTC_SPL_SHIFT_W32(inst->avgMagnPause[i], -nShifts); if (nonSpeechProbFinal[i] > ONE_MINUS_PROB_RANGE_Q8) { if (nShifts < 0) { - tmp32no1 = (WebRtc_Word32)magnU16[i] - tmp32no2; // Q(qMagn) + tmp32no1 = (int32_t)magnU16[i] - tmp32no2; // Q(qMagn) tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts) tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + 128, 8); // Q(qMagn) } else { - tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)magnU16[i], nShifts) + tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)magnU16[i], nShifts) - inst->avgMagnPause[i]; // Q(qMagn+nShifts) tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts) tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + (128 << nShifts), 8 + nShifts); // Q(qMagn) @@ -2342,13 +2342,13 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram curNearSnr = 0; // Q11 if (nShifts < 0) { // This case is equivalent with magn < noise which implies curNearSnr = 0; - tmpMagnU32 = (WebRtc_UWord32)magnU16[i]; // Q(qMagn) + tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn) tmpNoiseU32 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], -nShifts); // Q(qMagn) } else if (nShifts > 17) { tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], 17); // Q(qMagn+17) tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17) } else { - tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], nShifts); // Q(qNoise_prev+11) + tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((uint32_t)magnU16[i], nShifts); // Q(qNoise_prev+11) tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11) } if (tmpMagnU32 > tmpNoiseU32) { @@ -2371,22 +2371,22 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram priorSnr = tmpU32no1 + tmpU32no2; // Q22 //gain filter - tmpU32no1 = (WebRtc_UWord32)(inst->overdrive) + tmpU32no1 = (uint32_t)(inst->overdrive) + WEBRTC_SPL_RSHIFT_U32(priorSnr + 8192, 14); // Q8 assert(inst->overdrive > 0); - tmpU16no1 = (WebRtc_UWord16)WEBRTC_SPL_UDIV(priorSnr + (tmpU32no1 >> 1), tmpU32no1); // Q14 + tmpU16no1 = (uint16_t)WEBRTC_SPL_UDIV(priorSnr + (tmpU32no1 >> 1), tmpU32no1); // Q14 inst->noiseSupFilter[i] = WEBRTC_SPL_SAT(16384, tmpU16no1, inst->denoiseBound); // 16384 = Q14(1.0) // Q14 // Weight in the parametric Wiener filter during startup if (inst->blockIndex < END_STARTUP_SHORT) { // Weight the two suppression filters tmpU32no1 = WEBRTC_SPL_UMUL_16_16(inst->noiseSupFilter[i], - (WebRtc_UWord16)inst->blockIndex); + (uint16_t)inst->blockIndex); tmpU32no2 = WEBRTC_SPL_UMUL_16_16(noiseSupFilterTmp[i], - (WebRtc_UWord16)(END_STARTUP_SHORT + (uint16_t)(END_STARTUP_SHORT - inst->blockIndex)); tmpU32no1 += tmpU32no2; - inst->noiseSupFilter[i] = (WebRtc_UWord16)WebRtcSpl_DivU32U16(tmpU32no1, + inst->noiseSupFilter[i] = (uint16_t)WebRtcSpl_DivU32U16(tmpU32no1, END_STARTUP_SHORT); } } // end of loop over frequencies @@ -2432,11 +2432,11 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram tmpU16no1 = 0; // Q8 for (i = inst->anaLen2 - (inst->anaLen2 >> 2); i < inst->anaLen2; i++) { tmpU16no1 += nonSpeechProbFinal[i]; // Q8 - tmpU32no1 += (WebRtc_UWord32)(inst->noiseSupFilter[i]); // Q14 + tmpU32no1 += (uint32_t)(inst->noiseSupFilter[i]); // Q14 } - avgProbSpeechHB = (WebRtc_Word16)(4096 + avgProbSpeechHB = (int16_t)(4096 - WEBRTC_SPL_RSHIFT_U16(tmpU16no1, inst->stages - 7)); // Q12 - avgFilterGainHB = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32( + avgFilterGainHB = (int16_t)WEBRTC_SPL_RSHIFT_U32( tmpU32no1, inst->stages - 3); // Q14 // // original FLOAT code @@ -2471,18 +2471,18 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14 } else { // "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;" - gainTimeDomainHB = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(3, avgFilterGainHB, 2); // 3 = Q2(0.75); Q14 + gainTimeDomainHB = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(3, avgFilterGainHB, 2); // 3 = Q2(0.75); Q14 gainTimeDomainHB += gainModHB; // Q14 } //make sure gain is within flooring range gainTimeDomainHB - = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (WebRtc_Word16)(inst->denoiseBound)); // 16384 = Q14(1.0) + = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (int16_t)(inst->denoiseBound)); // 16384 = Q14(1.0) //apply gain for (i = 0; i < inst->blockLen10ms; i++) { outFrameHB[i] - = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(gainTimeDomainHB, inst->dataBufHBFX[i], 14); // Q0 + = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(gainTimeDomainHB, inst->dataBufHBFX[i], 14); // Q0 } } // end of H band gain computation diff --git a/webrtc/modules/audio_processing/ns/nsx_core.h b/webrtc/modules/audio_processing/ns/nsx_core.h index 6275cf930..f1cf43cbc 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core.h +++ b/webrtc/modules/audio_processing/ns/nsx_core.h @@ -20,19 +20,19 @@ #include "webrtc/typedefs.h" typedef struct NsxInst_t_ { - WebRtc_UWord32 fs; + uint32_t fs; - const WebRtc_Word16* window; - WebRtc_Word16 analysisBuffer[ANAL_BLOCKL_MAX]; - WebRtc_Word16 synthesisBuffer[ANAL_BLOCKL_MAX]; - WebRtc_UWord16 noiseSupFilter[HALF_ANAL_BLOCKL]; - WebRtc_UWord16 overdrive; /* Q8 */ - WebRtc_UWord16 denoiseBound; /* Q14 */ - const WebRtc_Word16* factor2Table; - WebRtc_Word16 noiseEstLogQuantile[SIMULT* HALF_ANAL_BLOCKL]; - WebRtc_Word16 noiseEstDensity[SIMULT* HALF_ANAL_BLOCKL]; - WebRtc_Word16 noiseEstCounter[SIMULT]; - WebRtc_Word16 noiseEstQuantile[HALF_ANAL_BLOCKL]; + const int16_t* window; + int16_t analysisBuffer[ANAL_BLOCKL_MAX]; + int16_t synthesisBuffer[ANAL_BLOCKL_MAX]; + uint16_t noiseSupFilter[HALF_ANAL_BLOCKL]; + uint16_t overdrive; /* Q8 */ + uint16_t denoiseBound; /* Q14 */ + const int16_t* factor2Table; + int16_t noiseEstLogQuantile[SIMULT* HALF_ANAL_BLOCKL]; + int16_t noiseEstDensity[SIMULT* HALF_ANAL_BLOCKL]; + int16_t noiseEstCounter[SIMULT]; + int16_t noiseEstQuantile[HALF_ANAL_BLOCKL]; int anaLen; int anaLen2; @@ -42,45 +42,45 @@ typedef struct NsxInst_t_ { int initFlag; int gainMap; - WebRtc_Word32 maxLrt; - WebRtc_Word32 minLrt; + int32_t maxLrt; + int32_t minLrt; // Log LRT factor with time-smoothing in Q8. - WebRtc_Word32 logLrtTimeAvgW32[HALF_ANAL_BLOCKL]; - WebRtc_Word32 featureLogLrt; - WebRtc_Word32 thresholdLogLrt; - WebRtc_Word16 weightLogLrt; + int32_t logLrtTimeAvgW32[HALF_ANAL_BLOCKL]; + int32_t featureLogLrt; + int32_t thresholdLogLrt; + int16_t weightLogLrt; - WebRtc_UWord32 featureSpecDiff; - WebRtc_UWord32 thresholdSpecDiff; - WebRtc_Word16 weightSpecDiff; + uint32_t featureSpecDiff; + uint32_t thresholdSpecDiff; + int16_t weightSpecDiff; - WebRtc_UWord32 featureSpecFlat; - WebRtc_UWord32 thresholdSpecFlat; - WebRtc_Word16 weightSpecFlat; + uint32_t featureSpecFlat; + uint32_t thresholdSpecFlat; + int16_t weightSpecFlat; // Conservative estimate of noise spectrum. - WebRtc_Word32 avgMagnPause[HALF_ANAL_BLOCKL]; - WebRtc_UWord32 magnEnergy; - WebRtc_UWord32 sumMagn; - WebRtc_UWord32 curAvgMagnEnergy; - WebRtc_UWord32 timeAvgMagnEnergy; - WebRtc_UWord32 timeAvgMagnEnergyTmp; + int32_t avgMagnPause[HALF_ANAL_BLOCKL]; + uint32_t magnEnergy; + uint32_t sumMagn; + uint32_t curAvgMagnEnergy; + uint32_t timeAvgMagnEnergy; + uint32_t timeAvgMagnEnergyTmp; - WebRtc_UWord32 whiteNoiseLevel; // Initial noise estimate. + uint32_t whiteNoiseLevel; // Initial noise estimate. // Initial magnitude spectrum estimate. - WebRtc_UWord32 initMagnEst[HALF_ANAL_BLOCKL]; + uint32_t initMagnEst[HALF_ANAL_BLOCKL]; // Pink noise parameters: - WebRtc_Word32 pinkNoiseNumerator; // Numerator. - WebRtc_Word32 pinkNoiseExp; // Power of freq. + int32_t pinkNoiseNumerator; // Numerator. + int32_t pinkNoiseExp; // Power of freq. int minNorm; // Smallest normalization factor. int zeroInputSignal; // Zero input signal flag. // Noise spectrum from previous frame. - WebRtc_UWord32 prevNoiseU32[HALF_ANAL_BLOCKL]; + uint32_t prevNoiseU32[HALF_ANAL_BLOCKL]; // Magnitude spectrum from previous frame. - WebRtc_UWord16 prevMagnU16[HALF_ANAL_BLOCKL]; + uint16_t prevMagnU16[HALF_ANAL_BLOCKL]; // Prior speech/noise probability in Q14. - WebRtc_Word16 priorNonSpeechProb; + int16_t priorNonSpeechProb; int blockIndex; // Frame index counter. // Parameter for updating or estimating thresholds/weights for prior model. @@ -88,21 +88,21 @@ typedef struct NsxInst_t_ { int cntThresUpdate; // Histograms for parameter estimation. - WebRtc_Word16 histLrt[HIST_PAR_EST]; - WebRtc_Word16 histSpecFlat[HIST_PAR_EST]; - WebRtc_Word16 histSpecDiff[HIST_PAR_EST]; + int16_t histLrt[HIST_PAR_EST]; + int16_t histSpecFlat[HIST_PAR_EST]; + int16_t histSpecDiff[HIST_PAR_EST]; // Quantities for high band estimate. - WebRtc_Word16 dataBufHBFX[ANAL_BLOCKL_MAX]; // Q0 + int16_t dataBufHBFX[ANAL_BLOCKL_MAX]; // Q0 int qNoise; int prevQNoise; int prevQMagn; int blockLen10ms; - WebRtc_Word16 real[ANAL_BLOCKL_MAX]; - WebRtc_Word16 imag[ANAL_BLOCKL_MAX]; - WebRtc_Word32 energyIn; + int16_t real[ANAL_BLOCKL_MAX]; + int16_t imag[ANAL_BLOCKL_MAX]; + int32_t energyIn; int scaleEnergyIn; int normData; @@ -129,7 +129,7 @@ extern "C" * Return value : 0 - Ok * -1 - Error */ -WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t* inst, WebRtc_UWord32 fs); +int32_t WebRtcNsx_InitCore(NsxInst_t* inst, uint32_t fs); /**************************************************************************** * WebRtcNsx_set_policy_core(...) diff --git a/webrtc/modules/audio_processing/ns/nsx_core_neon.c b/webrtc/modules/audio_processing/ns/nsx_core_neon.c index 796b3eae9..5e20a8076 100644 --- a/webrtc/modules/audio_processing/ns/nsx_core_neon.c +++ b/webrtc/modules/audio_processing/ns/nsx_core_neon.c @@ -14,11 +14,11 @@ #include // Constants to compensate for shifting signal log(2^shifts). -const WebRtc_Word16 WebRtcNsx_kLogTable[9] = { +const int16_t WebRtcNsx_kLogTable[9] = { 0, 177, 355, 532, 710, 887, 1065, 1242, 1420 }; -const WebRtc_Word16 WebRtcNsx_kCounterDiv[201] = { +const int16_t WebRtcNsx_kCounterDiv[201] = { 32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311, 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840, @@ -35,7 +35,7 @@ const WebRtc_Word16 WebRtcNsx_kCounterDiv[201] = { 172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163 }; -const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256] = { +const int16_t WebRtcNsx_kLogTableFrac[256] = { 0, 1, 3, 4, 6, 7, 9, 10, 11, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 47, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62, diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc index 1526141cc..448a454cd 100644 --- a/webrtc/modules/audio_processing/splitting_filter.cc +++ b/webrtc/modules/audio_processing/splitting_filter.cc @@ -13,20 +13,20 @@ namespace webrtc { -void SplittingFilterAnalysis(const WebRtc_Word16* in_data, - WebRtc_Word16* low_band, - WebRtc_Word16* high_band, - WebRtc_Word32* filter_state1, - WebRtc_Word32* filter_state2) +void SplittingFilterAnalysis(const int16_t* in_data, + int16_t* low_band, + int16_t* high_band, + int32_t* filter_state1, + int32_t* filter_state2) { WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2); } -void SplittingFilterSynthesis(const WebRtc_Word16* low_band, - const WebRtc_Word16* high_band, - WebRtc_Word16* out_data, - WebRtc_Word32* filt_state1, - WebRtc_Word32* filt_state2) +void SplittingFilterSynthesis(const int16_t* low_band, + const int16_t* high_band, + int16_t* out_data, + int32_t* filt_state1, + int32_t* filt_state2) { WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2); } diff --git a/webrtc/modules/audio_processing/splitting_filter.h b/webrtc/modules/audio_processing/splitting_filter.h index 661bfb2f6..1655726f3 100644 --- a/webrtc/modules/audio_processing/splitting_filter.h +++ b/webrtc/modules/audio_processing/splitting_filter.h @@ -31,11 +31,11 @@ namespace webrtc { * - low_band : The signal from the 0-4 kHz band * - high_band : The signal from the 4-8 kHz band */ -void SplittingFilterAnalysis(const WebRtc_Word16* in_data, - WebRtc_Word16* low_band, - WebRtc_Word16* high_band, - WebRtc_Word32* filt_state1, - WebRtc_Word32* filt_state2); +void SplittingFilterAnalysis(const int16_t* in_data, + int16_t* low_band, + int16_t* high_band, + int32_t* filt_state1, + int32_t* filt_state2); /* * SplittingFilterbank_synthesisQMF(...) @@ -53,11 +53,11 @@ void SplittingFilterAnalysis(const WebRtc_Word16* in_data, * Output: * - out_data : super-wb speech signal */ -void SplittingFilterSynthesis(const WebRtc_Word16* low_band, - const WebRtc_Word16* high_band, - WebRtc_Word16* out_data, - WebRtc_Word32* filt_state1, - WebRtc_Word32* filt_state2); +void SplittingFilterSynthesis(const int16_t* low_band, + const int16_t* high_band, + int16_t* out_data, + int32_t* filt_state1, + int32_t* filt_state2); } // namespace webrtc #endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_ diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc index dfc7b1e19..85c8c7808 100644 --- a/webrtc/modules/audio_processing/test/process_test.cc +++ b/webrtc/modules/audio_processing/test/process_test.cc @@ -574,10 +574,10 @@ void void_main(int argc, char* argv[]) { TickTime t0 = TickTime::Now(); TickTime t1 = t0; - WebRtc_Word64 max_time_us = 0; - WebRtc_Word64 max_time_reverse_us = 0; - WebRtc_Word64 min_time_us = 1e6; - WebRtc_Word64 min_time_reverse_us = 1e6; + int64_t max_time_us = 0; + int64_t max_time_reverse_us = 0; + int64_t min_time_us = 1e6; + int64_t min_time_reverse_us = 1e6; // TODO(ajm): Ideally we would refactor this block into separate functions, // but for now we want to share the variables. @@ -1015,7 +1015,7 @@ void void_main(int argc, char* argv[]) { if (perf_testing) { if (primary_count > 0) { - WebRtc_Word64 exec_time = acc_ticks.Milliseconds(); + int64_t exec_time = acc_ticks.Milliseconds(); printf("\nTotal time: %.3f s, file time: %.2f s\n", exec_time * 0.001, primary_count * 0.01); printf("Time per frame: %.3f ms (average), %.3f ms (max)," diff --git a/webrtc/modules/audio_processing/voice_detection_impl.cc b/webrtc/modules/audio_processing/voice_detection_impl.cc index 50b99a0d4..8a505efdb 100644 --- a/webrtc/modules/audio_processing/voice_detection_impl.cc +++ b/webrtc/modules/audio_processing/voice_detection_impl.cc @@ -61,7 +61,7 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) { } assert(audio->samples_per_split_channel() <= 160); - WebRtc_Word16* mixed_data = audio->low_pass_split_data(0); + int16_t* mixed_data = audio->low_pass_split_data(0); if (audio->num_channels() > 1) { audio->CopyAndMixLowPass(1); mixed_data = audio->mixed_low_pass_data(0);