git-svn-id: http://webrtc.googlecode.com/svn/trunk@4 4adac7df-926f-26a2-2b94-8c16560cd09d

This commit is contained in:
niklase@google.com
2011-05-30 11:22:19 +00:00
parent 01813fe945
commit 77ae29bc81
1153 changed files with 404089 additions and 0 deletions

View File

@@ -0,0 +1,236 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INTERFACE_WEBRTC_CNG_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INTERFACE_WEBRTC_CNG_H_
#include "typedefs.h"
#ifdef __cplusplus
extern "C" {
#endif
#define WEBRTC_CNG_MAX_LPC_ORDER 12
#define WEBRTC_CNG_MAX_OUTSIZE_ORDER 640
/* Define Error codes */
/* 6100 Encoder */
#define CNG_ENCODER_MEMORY_ALLOCATION_FAILED 6110
#define CNG_ENCODER_NOT_INITIATED 6120
#define CNG_DISALLOWED_LPC_ORDER 6130
#define CNG_DISALLOWED_FRAME_SIZE 6140
#define CNG_DISALLOWED_SAMPLING_FREQUENCY 6150
/* 6200 Decoder */
#define CNG_DECODER_MEMORY_ALLOCATION_FAILED 6210
#define CNG_DECODER_NOT_INITIATED 6220
typedef struct WebRtcCngEncInst CNG_enc_inst;
typedef struct WebRtcCngDecInst CNG_dec_inst;
/****************************************************************************
* WebRtcCng_Version(...)
*
* These functions returns the version name (string must be at least
* 500 characters long)
*
* Output:
* - version : Pointer to character string
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Version(WebRtc_Word8 *version);
/****************************************************************************
* WebRtcCng_AssignSizeEnc/Dec(...)
*
* These functions get the size needed for storing the instance for encoder
* and decoder, respectively
*
* Input/Output:
* - sizeinbytes : Pointer to integer where the size is returned
*
* Return value : 0
*/
WebRtc_Word16 WebRtcCng_AssignSizeEnc(int *sizeinbytes);
WebRtc_Word16 WebRtcCng_AssignSizeDec(int *sizeinbytes);
/****************************************************************************
* WebRtcCng_AssignEnc/Dec(...)
*
* These functions Assignes memory for the instances.
*
* Input:
* - CNG_inst_Addr : Adress to where to assign memory
* Output:
* - inst : Pointer to the instance that should be created
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_AssignEnc(CNG_enc_inst **inst, void *CNG_inst_Addr);
WebRtc_Word16 WebRtcCng_AssignDec(CNG_dec_inst **inst, void *CNG_inst_Addr);
/****************************************************************************
* WebRtcCng_CreateEnc/Dec(...)
*
* These functions create an instance to the specified structure
*
* Input:
* - XXX_inst : Pointer to created instance that should be created
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_CreateEnc(CNG_enc_inst **cng_inst);
WebRtc_Word16 WebRtcCng_CreateDec(CNG_dec_inst **cng_inst);
/****************************************************************************
* WebRtcCng_InitEnc/Dec(...)
*
* This function initializes a instance
*
* Input:
* - cng_inst : Instance that should be initialized
*
* - fs : 8000 for narrowband and 16000 for wideband
* - interval : generate SID data every interval ms
* - quality : Number of refl. coefs, maximum allowed is 12
*
* Output:
* - cng_inst : Initialized instance
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_InitEnc(CNG_enc_inst *cng_inst,
WebRtc_Word16 fs,
WebRtc_Word16 interval,
WebRtc_Word16 quality);
WebRtc_Word16 WebRtcCng_InitDec(CNG_dec_inst *cng_dec_inst);
/****************************************************************************
* WebRtcCng_FreeEnc/Dec(...)
*
* These functions frees the dynamic memory of a specified instance
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_FreeEnc(CNG_enc_inst *cng_inst);
WebRtc_Word16 WebRtcCng_FreeDec(CNG_dec_inst *cng_inst);
/****************************************************************************
* WebRtcCng_Encode(...)
*
* These functions analyzes background noise
*
* Input:
* - cng_inst : Pointer to created instance
* - speech : Signal to be analyzed
* - nrOfSamples : Size of speech vector
* - forceSID : not zero to force SID frame and reset
*
* Output:
* - bytesOut : Nr of bytes to transmit, might be 0
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Encode(CNG_enc_inst *cng_inst,
WebRtc_Word16 *speech,
WebRtc_Word16 nrOfSamples,
WebRtc_UWord8* SIDdata,
WebRtc_Word16 *bytesOut,
WebRtc_Word16 forceSID);
/****************************************************************************
* WebRtcCng_UpdateSid(...)
*
* These functions updates the CN state, when a new SID packet arrives
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
* - SID : SID packet, all headers removed
* - length : Length in bytes of SID packet
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_UpdateSid(CNG_dec_inst *cng_inst,
WebRtc_UWord8 *SID,
WebRtc_Word16 length);
/****************************************************************************
* WebRtcCng_Generate(...)
*
* These functions generates CN data when needed
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
* - outData : pointer to area to write CN data
* - nrOfSamples : How much data to generate
* - new_period : >0 if a new period of CNG, will reset history
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Generate(CNG_dec_inst *cng_inst,
WebRtc_Word16 * outData,
WebRtc_Word16 nrOfSamples,
WebRtc_Word16 new_period);
/*****************************************************************************
* WebRtcCng_GetErrorCodeEnc/Dec(...)
*
* This functions can be used to check the error code of a CNG instance. When
* a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occurred in the
* specified instance.
*
* Input:
* - CNG_inst : CNG enc/dec instance
*
* Return value : Error code
*/
WebRtc_Word16 WebRtcCng_GetErrorCodeEnc(CNG_enc_inst *cng_inst);
WebRtc_Word16 WebRtcCng_GetErrorCodeDec(CNG_dec_inst *cng_inst);
#ifdef __cplusplus
}
#endif
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INTERFACE_WEBRTC_CNG_H_

View File

@@ -0,0 +1,42 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'CNG',
'type': '<(library)',
'dependencies': [
'../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
],
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/webrtc_cng.h',
'webrtc_cng.c',
'cng_helpfuns.c',
'cng_helpfuns.h',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc_cng.h"
#include "signal_processing_library.h"
#include "typedefs.h"
#include "cng_helpfuns.h"
#ifdef __cplusplus
extern "C" {
#endif
void WebRtcCng_K2a16(
WebRtc_Word16 *k, /* Q15. */
int useOrder,
WebRtc_Word16 *a /* Q12. */
)
{
WebRtc_Word16 any[WEBRTC_SPL_MAX_LPC_ORDER+1];
WebRtc_Word16 *aptr, *aptr2, *anyptr;
G_CONST WebRtc_Word16 *kptr;
int m, i;
kptr = k;
*a = 4096; /* i.e., (Word16_MAX >> 3)+1 */
*any = *a;
a[1] = (*k+4) >> 3;
for( m=1; m<useOrder; m++ )
{
kptr++;
aptr = a;
aptr++;
aptr2 = &a[m];
anyptr = any;
anyptr++;
any[m+1] = (*kptr+4) >> 3;
for( i=0; i<m; i++ ) {
*anyptr++ = (*aptr++) + (WebRtc_Word16)( (( (WebRtc_Word32)(*aptr2--) * (WebRtc_Word32)*kptr )+16384) >> 15);
}
aptr = a;
anyptr = any;
for( i=0; i<(m+2); i++ ){
*aptr++ = *anyptr++;
}
}
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_SOURCE_CNG_HELPFUNS_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_SOURCE_CNG_HELPFUNS_H_
extern WebRtc_Word32 lpc_lagwinTbl_fixw32[WEBRTC_CNG_MAX_LPC_ORDER + 1];
#ifdef __cplusplus
extern "C" {
#endif
void WebRtcCng_K2a16(WebRtc_Word16 *k, int useOrder, WebRtc_Word16 *a);
#ifdef __cplusplus
}
#endif
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_SOURCE_CNG_HELPFUNS_H_

View File

@@ -0,0 +1,735 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h>
#include <stdlib.h>
#include "webrtc_cng.h"
#include "signal_processing_library.h"
#include "cng_helpfuns.h"
#include "stdio.h"
typedef struct WebRtcCngDecInst_t_ {
WebRtc_UWord32 dec_seed;
WebRtc_Word32 dec_target_energy;
WebRtc_Word32 dec_used_energy;
WebRtc_Word16 dec_target_reflCoefs[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_used_reflCoefs[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_filtstate[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_filtstateLow[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_Efiltstate[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_EfiltstateLow[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 dec_order;
WebRtc_Word16 dec_target_scale_factor; /*Q29*/
WebRtc_Word16 dec_used_scale_factor; /*Q29*/
WebRtc_Word16 target_scale_factor; /* Q13 */
WebRtc_Word16 errorcode;
WebRtc_Word16 initflag;
} WebRtcCngDecInst_t;
typedef struct WebRtcCngEncInst_t_ {
WebRtc_Word16 enc_nrOfCoefs;
WebRtc_Word16 enc_sampfreq;
WebRtc_Word16 enc_interval;
WebRtc_Word16 enc_msSinceSID;
WebRtc_Word32 enc_Energy;
WebRtc_Word16 enc_reflCoefs[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word32 enc_corrVector[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 enc_filtstate[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 enc_filtstateLow[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_UWord32 enc_seed;
WebRtc_Word16 errorcode;
WebRtc_Word16 initflag;
} WebRtcCngEncInst_t;
const WebRtc_Word32 WebRtcCng_kDbov[94]={
1081109975, 858756178, 682134279, 541838517, 430397633, 341876992,
271562548, 215709799, 171344384, 136103682, 108110997, 85875618,
68213428, 54183852, 43039763, 34187699, 27156255, 21570980,
17134438, 13610368, 10811100, 8587562, 6821343, 5418385,
4303976, 3418770, 2715625, 2157098, 1713444, 1361037,
1081110, 858756, 682134, 541839, 430398, 341877,
271563, 215710, 171344, 136104, 108111, 85876,
68213, 54184, 43040, 34188, 27156, 21571,
17134, 13610, 10811, 8588, 6821, 5418,
4304, 3419, 2716, 2157, 1713, 1361,
1081, 859, 682, 542, 430, 342,
272, 216, 171, 136, 108, 86,
68, 54, 43, 34, 27, 22,
17, 14, 11, 9, 7, 5,
4, 3, 3, 2, 2, 1,
1, 1, 1, 1
};
const WebRtc_Word16 WebRtcCng_kCorrWindow[WEBRTC_CNG_MAX_LPC_ORDER] = {
32702, 32636, 32570, 32505, 32439, 32374,
32309, 32244, 32179, 32114, 32049, 31985
};
/****************************************************************************
* WebRtcCng_Version(...)
*
* These functions returns the version name (string must be at least
* 500 characters long)
*
* Output:
* - version : Pointer to character string
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Version(WebRtc_Word8 *version)
{
strcpy((char*)version,(const char*)"1.2.0\n");
return(0);
}
/****************************************************************************
* WebRtcCng_AssignSizeEnc/Dec(...)
*
* These functions get the size needed for storing the instance for encoder
* and decoder, respectively
*
* Input/Output:
* - sizeinbytes : Pointer to integer where the size is returned
*
* Return value : 0
*/
WebRtc_Word16 WebRtcCng_AssignSizeEnc(int *sizeinbytes)
{
*sizeinbytes=sizeof(WebRtcCngEncInst_t)*2/sizeof(WebRtc_Word16);
return(0);
}
WebRtc_Word16 WebRtcCng_AssignSizeDec(int *sizeinbytes)
{
*sizeinbytes=sizeof(WebRtcCngDecInst_t)*2/sizeof(WebRtc_Word16);
return(0);
}
/****************************************************************************
* WebRtcCng_AssignEnc/Dec(...)
*
* These functions Assignes memory for the instances.
*
* Input:
* - CNG_inst_Addr : Adress to where to assign memory
* Output:
* - inst : Pointer to the instance that should be created
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_AssignEnc(CNG_enc_inst **inst, void *CNG_inst_Addr)
{
if (CNG_inst_Addr!=NULL) {
*inst = (CNG_enc_inst*)CNG_inst_Addr;
(*(WebRtcCngEncInst_t**) inst)->errorcode = 0;
(*(WebRtcCngEncInst_t**) inst)->initflag = 0;
return(0);
} else {
/* The memory could not be allocated */
return(-1);
}
}
WebRtc_Word16 WebRtcCng_AssignDec(CNG_dec_inst **inst, void *CNG_inst_Addr)
{
if (CNG_inst_Addr!=NULL) {
*inst = (CNG_dec_inst*)CNG_inst_Addr;
(*(WebRtcCngDecInst_t**) inst)->errorcode = 0;
(*(WebRtcCngDecInst_t**) inst)->initflag = 0;
return(0);
} else {
/* The memory could not be allocated */
return(-1);
}
}
/****************************************************************************
* WebRtcCng_CreateEnc/Dec(...)
*
* These functions create an instance to the specified structure
*
* Input:
* - XXX_inst : Pointer to created instance that should be created
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_CreateEnc(CNG_enc_inst **cng_inst)
{
*cng_inst=(CNG_enc_inst*)malloc(sizeof(WebRtcCngEncInst_t));
if(cng_inst!=NULL) {
(*(WebRtcCngEncInst_t**) cng_inst)->errorcode = 0;
(*(WebRtcCngEncInst_t**) cng_inst)->initflag = 0;
return(0);
}
else {
/* The memory could not be allocated */
return(-1);
}
}
WebRtc_Word16 WebRtcCng_CreateDec(CNG_dec_inst **cng_inst)
{
*cng_inst=(CNG_dec_inst*)malloc(sizeof(WebRtcCngDecInst_t));
if(cng_inst!=NULL) {
(*(WebRtcCngDecInst_t**) cng_inst)->errorcode = 0;
(*(WebRtcCngDecInst_t**) cng_inst)->initflag = 0;
return(0);
}
else {
/* The memory could not be allocated */
return(-1);
}
}
/****************************************************************************
* WebRtcCng_InitEnc/Dec(...)
*
* This function initializes a instance
*
* Input:
* - cng_inst : Instance that should be initialized
*
* - fs : 8000 for narrowband and 16000 for wideband
* - interval : generate SID data every interval ms
* - quality : TBD
*
* Output:
* - cng_inst : Initialized instance
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_InitEnc(CNG_enc_inst *cng_inst,
WebRtc_Word16 fs,
WebRtc_Word16 interval,
WebRtc_Word16 quality)
{
int i;
WebRtcCngEncInst_t* inst=(WebRtcCngEncInst_t*)cng_inst;
memset(inst, 0, sizeof(WebRtcCngEncInst_t));
/* Check LPC order */
if (quality>WEBRTC_CNG_MAX_LPC_ORDER) {
inst->errorcode = CNG_DISALLOWED_LPC_ORDER;
return (-1);
}
if (fs<=0) {
inst->errorcode = CNG_DISALLOWED_SAMPLING_FREQUENCY;
return (-1);
}
inst->enc_sampfreq=fs;
inst->enc_interval=interval;
inst->enc_nrOfCoefs=quality;
inst->enc_msSinceSID=0;
inst->enc_seed=7777; /*For debugging only*/
inst->enc_Energy=0;
for(i=0;i<(WEBRTC_CNG_MAX_LPC_ORDER+1);i++){
inst->enc_reflCoefs[i]=0;
inst->enc_corrVector[i]=0;
}
inst->initflag=1;
return(0);
}
WebRtc_Word16 WebRtcCng_InitDec(CNG_dec_inst *cng_inst)
{
int i;
WebRtcCngDecInst_t* inst=(WebRtcCngDecInst_t*)cng_inst;
memset(inst, 0, sizeof(WebRtcCngDecInst_t));
inst->dec_seed=7777; /*For debugging only*/
inst->dec_order=5;
inst->dec_target_scale_factor=0;
inst->dec_used_scale_factor=0;
for(i=0;i<(WEBRTC_CNG_MAX_LPC_ORDER+1);i++){
inst->dec_filtstate[i]=0;
inst->dec_target_reflCoefs[i]=0;
inst->dec_used_reflCoefs[i]=0;
}
inst->dec_target_reflCoefs[0]=0;
inst->dec_used_reflCoefs[0]=0;
inst ->dec_used_energy=0;
inst->initflag=1;
return(0);
}
/****************************************************************************
* WebRtcCng_FreeEnc/Dec(...)
*
* These functions frees the dynamic memory of a specified instance
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_FreeEnc(CNG_enc_inst *cng_inst)
{
free(cng_inst);
return(0);
}
WebRtc_Word16 WebRtcCng_FreeDec(CNG_dec_inst *cng_inst)
{
free(cng_inst);
return(0);
}
/****************************************************************************
* WebRtcCng_Encode(...)
*
* These functions analyzes background noise
*
* Input:
* - cng_inst : Pointer to created instance
* - speech : Signal (noise) to be analyzed
* - nrOfSamples : Size of speech vector
* - bytesOut : Nr of bytes to transmit, might be 0
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Encode(CNG_enc_inst *cng_inst,
WebRtc_Word16 *speech,
WebRtc_Word16 nrOfSamples,
WebRtc_UWord8* SIDdata,
WebRtc_Word16* bytesOut,
WebRtc_Word16 forceSID)
{
WebRtcCngEncInst_t* inst=(WebRtcCngEncInst_t*)cng_inst;
WebRtc_Word16 arCoefs[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word32 corrVector[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 refCs[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 hanningW[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
WebRtc_Word16 ReflBeta=19661; /*0.6 in q15*/
WebRtc_Word16 ReflBetaComp=13107; /*0.4 in q15*/
WebRtc_Word32 outEnergy;
int outShifts;
int i, stab;
int acorrScale;
int index;
WebRtc_Word32 diff;
WebRtc_Word16 ind,factor;
WebRtc_Word32 *bptr, blo, bhi;
WebRtc_Word16 negate;
const WebRtc_Word16 *aptr;
WebRtc_Word16 speechBuf[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
/* check if encoder initiated */
if (inst->initflag != 1) {
inst->errorcode = CNG_ENCODER_NOT_INITIATED;
return (-1);
}
/* check framesize */
if (nrOfSamples>WEBRTC_CNG_MAX_OUTSIZE_ORDER) {
inst->errorcode = CNG_DISALLOWED_FRAME_SIZE;
return (-1);
}
for(i=0;i<nrOfSamples;i++){
speechBuf[i]=speech[i];
}
factor=nrOfSamples;
/* Calculate energy and a coefficients */
outEnergy =WebRtcSpl_Energy(speechBuf, nrOfSamples, &outShifts);
while(outShifts>0){
if(outShifts>5){ /*We can only do 5 shifts without destroying accuracy in division factor*/
outEnergy<<=(outShifts-5);
outShifts=5;
}
else{
factor/=2;
outShifts--;
}
}
outEnergy=WebRtcSpl_DivW32W16(outEnergy,factor);
if (outEnergy > 1){
/* Create Hanning Window */
WebRtcSpl_GetHanningWindow(hanningW, nrOfSamples/2);
for( i=0;i<(nrOfSamples/2);i++ )
hanningW[nrOfSamples-i-1]=hanningW[i];
WebRtcSpl_ElementwiseVectorMult(speechBuf, hanningW, speechBuf, nrOfSamples, 14);
WebRtcSpl_AutoCorrelation( speechBuf, nrOfSamples, inst->enc_nrOfCoefs, corrVector, &acorrScale );
if( *corrVector==0 )
*corrVector = WEBRTC_SPL_WORD16_MAX;
/* Adds the bandwidth expansion */
aptr = WebRtcCng_kCorrWindow;
bptr = corrVector;
// (zzz) lpc16_1 = 17+1+820+2+2 = 842 (ordo2=700)
for( ind=0; ind<inst->enc_nrOfCoefs; ind++ )
{
// The below code multiplies the 16 b corrWindow values (Q15) with
// the 32 b corrvector (Q0) and shifts the result down 15 steps.
negate = *bptr<0;
if( negate )
*bptr = -*bptr;
blo = (WebRtc_Word32)*aptr * (*bptr & 0xffff);
bhi = ((blo >> 16) & 0xffff) + ((WebRtc_Word32)(*aptr++) * ((*bptr >> 16) & 0xffff));
blo = (blo & 0xffff) | ((bhi & 0xffff) << 16);
*bptr = (( (bhi>>16) & 0x7fff) << 17) | ((WebRtc_UWord32)blo >> 15);
if( negate )
*bptr = -*bptr;
bptr++;
}
// end of bandwidth expansion
stab=WebRtcSpl_LevinsonDurbin(corrVector, arCoefs, refCs, inst->enc_nrOfCoefs);
if(!stab){
// disregard from this frame
*bytesOut=0;
return(0);
}
}
else {
for(i=0;i<inst->enc_nrOfCoefs; i++)
refCs[i]=0;
}
if(forceSID){
/*Read instantaneous values instead of averaged*/
for(i=0;i<inst->enc_nrOfCoefs;i++)
inst->enc_reflCoefs[i]=refCs[i];
inst->enc_Energy=outEnergy;
}
else{
/*Average history with new values*/
for(i=0;i<(inst->enc_nrOfCoefs);i++){
inst->enc_reflCoefs[i]=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->enc_reflCoefs[i],ReflBeta,15);
inst->enc_reflCoefs[i]+=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(refCs[i],ReflBetaComp,15);
}
inst->enc_Energy=(outEnergy>>2)+(inst->enc_Energy>>1)+(inst->enc_Energy>>2);
}
if(inst->enc_Energy<1){
inst->enc_Energy=1;
}
if((inst->enc_msSinceSID>(inst->enc_interval-1))||forceSID){
/* Search for best dbov value */
/* Clumsy linear search that can be optimized since database is sorted */
index=0;
diff=WEBRTC_SPL_ABS_W32(inst->enc_Energy-WebRtcCng_kDbov[index]);
for(i=1;i<93;i++){
/* Always round downwards */
if((inst->enc_Energy-WebRtcCng_kDbov[i])>0){
index=i;
break;
}
}
if((i==93)&&(index==0))
index=94;
SIDdata[0]=index;
/* Quantize coefs with tweak for WebRtc implementation of RFC3389 */
if(inst->enc_nrOfCoefs==WEBRTC_CNG_MAX_LPC_ORDER){
for(i=0;i<inst->enc_nrOfCoefs;i++){
SIDdata[i+1]=((inst->enc_reflCoefs[i]+128)>>8); /* Q15 to Q7*/ /* +127 */
}
}else{
for(i=0;i<inst->enc_nrOfCoefs;i++){
SIDdata[i+1]=(127+((inst->enc_reflCoefs[i]+128)>>8)); /* Q15 to Q7*/ /* +127 */
}
}
inst->enc_msSinceSID=0;
*bytesOut=inst->enc_nrOfCoefs+1;
inst->enc_msSinceSID+=(1000*nrOfSamples)/inst->enc_sampfreq;
return(inst->enc_nrOfCoefs+1);
}else{
inst->enc_msSinceSID+=(1000*nrOfSamples)/inst->enc_sampfreq;
*bytesOut=0;
return(0);
}
}
/****************************************************************************
* WebRtcCng_UpdateSid(...)
*
* These functions updates the CN state, when a new SID packet arrives
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
* - SID : SID packet, all headers removed
* - length : Length in bytes of SID packet
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_UpdateSid(CNG_dec_inst *cng_inst,
WebRtc_UWord8 *SID,
WebRtc_Word16 length)
{
WebRtcCngDecInst_t* inst=(WebRtcCngDecInst_t*)cng_inst;
WebRtc_Word16 refCs[WEBRTC_CNG_MAX_LPC_ORDER];
WebRtc_Word32 targetEnergy;
int i;
if (inst->initflag != 1) {
inst->errorcode = CNG_DECODER_NOT_INITIATED;
return (-1);
}
/*Throw away reflection coefficients of higher order than we can handle*/
if(length> (WEBRTC_CNG_MAX_LPC_ORDER+1))
length=WEBRTC_CNG_MAX_LPC_ORDER+1;
inst->dec_order=length-1;
if(SID[0]>93)
SID[0]=93;
targetEnergy=WebRtcCng_kDbov[SID[0]];
/* Take down target energy to 75% */
targetEnergy=targetEnergy>>1;
targetEnergy+=targetEnergy>>2;
inst->dec_target_energy=targetEnergy;
/* Reconstruct coeffs with tweak for WebRtc implementation of RFC3389 */
if(inst->dec_order==WEBRTC_CNG_MAX_LPC_ORDER){
for(i=0;i<(inst->dec_order);i++){
refCs[i]=SID[i+1]<<8; /* Q7 to Q15*/
inst->dec_target_reflCoefs[i]=refCs[i];
}
}else{
for(i=0;i<(inst->dec_order);i++){
refCs[i]=(SID[i+1]-127)<<8; /* Q7 to Q15*/
inst->dec_target_reflCoefs[i]=refCs[i];
}
}
for(i=(inst->dec_order);i<WEBRTC_CNG_MAX_LPC_ORDER;i++){
refCs[i]=0;
inst->dec_target_reflCoefs[i]=refCs[i];
}
return(0);
}
/****************************************************************************
* WebRtcCng_Generate(...)
*
* These functions generates CN data when needed
*
* Input:
* - cng_inst : Pointer to created instance that should be freed
* - outData : pointer to area to write CN data
* - nrOfSamples : How much data to generate
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcCng_Generate(CNG_dec_inst *cng_inst,
WebRtc_Word16 *outData,
WebRtc_Word16 nrOfSamples,
WebRtc_Word16 new_period)
{
WebRtcCngDecInst_t* inst=(WebRtcCngDecInst_t*)cng_inst;
int i;
WebRtc_Word16 excitation[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
WebRtc_Word16 low[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
WebRtc_Word16 lpPoly[WEBRTC_CNG_MAX_LPC_ORDER+1];
WebRtc_Word16 ReflBetaStd=26214; /*0.8 in q15*/
WebRtc_Word16 ReflBetaCompStd=6553; /*0.2in q15*/
WebRtc_Word16 ReflBetaNewP=19661; /*0.6 in q15*/
WebRtc_Word16 ReflBetaCompNewP=13107; /*0.4 in q15*/
WebRtc_Word16 Beta,BetaC, tmp1, tmp2, tmp3;
WebRtc_Word32 targetEnergy;
WebRtc_Word16 En;
WebRtc_Word16 temp16;
if (nrOfSamples>WEBRTC_CNG_MAX_OUTSIZE_ORDER) {
inst->errorcode = CNG_DISALLOWED_FRAME_SIZE;
return (-1);
}
if (new_period) {
inst->dec_used_scale_factor=inst->dec_target_scale_factor;
Beta=ReflBetaNewP;
BetaC=ReflBetaCompNewP;
} else {
Beta=ReflBetaStd;
BetaC=ReflBetaCompStd;
}
/*Here we use a 0.5 weighting, should possibly be modified to 0.6*/
tmp1=inst->dec_used_scale_factor<<2; /* Q13->Q15 */
tmp2=inst->dec_target_scale_factor<<2; /* Q13->Q15 */
tmp3=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp1,Beta,15);
tmp3+=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp2,BetaC,15);
inst->dec_used_scale_factor=tmp3>>2; /* Q15->Q13 */
inst->dec_used_energy=inst->dec_used_energy>>1;
inst->dec_used_energy+=inst->dec_target_energy>>1;
/* Do the same for the reflection coeffs */
for (i=0;i<WEBRTC_CNG_MAX_LPC_ORDER;i++) {
inst->dec_used_reflCoefs[i]=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->dec_used_reflCoefs[i],Beta,15);
inst->dec_used_reflCoefs[i]+=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->dec_target_reflCoefs[i],BetaC,15);
}
/* Compute the polynomial coefficients */
WebRtcCng_K2a16(inst->dec_used_reflCoefs, WEBRTC_CNG_MAX_LPC_ORDER, lpPoly);
/***/
targetEnergy=inst->dec_used_energy;
// Calculate scaling factor based on filter energy
En=8192; //1.0 in Q13
for (i=0; i<(WEBRTC_CNG_MAX_LPC_ORDER); i++) {
// Floating point value for reference
// E*=1.0-((float)inst->dec_used_reflCoefs[i]/32768.0)*((float)inst->dec_used_reflCoefs[i]/32768.0);
// Same in fixed point
temp16=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->dec_used_reflCoefs[i],inst->dec_used_reflCoefs[i],15); // K(i).^2 in Q15
temp16=0x7fff - temp16; // 1 - K(i).^2 in Q15
En=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(En,temp16,15);
}
//float scaling= sqrt(E*inst->dec_target_energy/((1<<24)));
//Calculate sqrt(En*target_energy/exctiation energy)
targetEnergy=WebRtcSpl_Sqrt(inst->dec_used_energy);
En=(WebRtc_Word16)WebRtcSpl_Sqrt(En)<<6; //We are missing a factor sqrt(2) here
En=(En*3)>>1; //1.5 estimates sqrt(2)
inst->dec_used_scale_factor=(WebRtc_Word16)((En*targetEnergy)>>12);
/***/
/*Generate excitation*/
/*Excitation energy per sample is 2.^24 - Q13 N(0,1) */
for(i=0;i<nrOfSamples;i++){
excitation[i]=WebRtcSpl_RandN(&inst->dec_seed)>>1;
}
/*Scale to correct energy*/
WebRtcSpl_ScaleVector(excitation, excitation, inst->dec_used_scale_factor, nrOfSamples, 13);
WebRtcSpl_FilterAR(
lpPoly, /* Coefficients in Q12 */
WEBRTC_CNG_MAX_LPC_ORDER+1,
excitation, /* Speech samples */
nrOfSamples,
inst->dec_filtstate, /* State preservation */
WEBRTC_CNG_MAX_LPC_ORDER,
inst->dec_filtstateLow, /* State preservation */
WEBRTC_CNG_MAX_LPC_ORDER,
outData, /* Filtered speech samples */
low,
nrOfSamples
);
return(0);
}
/****************************************************************************
* WebRtcCng_GetErrorCodeEnc/Dec(...)
*
* This functions can be used to check the error code of a CNG instance. When
* a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occured in the
* specified instance.
*
* Input:
* - CNG_inst : CNG enc/dec instance
*
* Return value : Error code
*/
WebRtc_Word16 WebRtcCng_GetErrorCodeEnc(CNG_enc_inst *cng_inst)
{
/* typecast pointer to real structure */
WebRtcCngEncInst_t* inst=(WebRtcCngEncInst_t*)cng_inst;
return inst->errorcode;
}
WebRtc_Word16 WebRtcCng_GetErrorCodeDec(CNG_dec_inst *cng_inst)
{
/* typecast pointer to real structure */
WebRtcCngDecInst_t* inst=(WebRtcCngDecInst_t*)cng_inst;
return inst->errorcode;
}

View File

@@ -0,0 +1,225 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* CNG.cpp : Defines the entry point for the console application.
*/
#include <stdlib.h>
#include <string.h>
#include "stdafx.h"
#include "webrtc_cng.h"
#include "webrtc_vad.h"
CNG_enc_inst *e_inst;
CNG_dec_inst *d_inst;
VadInst *vinst;
//#define ASSIGN
short anaSpeech[WEBRTC_CNG_MAX_OUTSIZE_ORDER], genSpeech[WEBRTC_CNG_MAX_OUTSIZE_ORDER], state[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
unsigned char SIDpkt[114];
int main(int argc, char* argv[])
{
FILE * infile, *outfile, *statefile;
short res=0,errtype;
/*float time=0.0;*/
WebRtcVad_Create(&vinst);
WebRtcVad_Init(vinst);
short size;
int samps=0;
if (argc < 5){
printf("Usage:\n CNG.exe infile outfile samplingfreq(Hz) interval(ms) order\n\n");
return(0);
}
infile=fopen(argv[1],"rb");
if (infile==NULL){
printf("file %s does not exist\n",argv[1]);
return(0);
}
outfile=fopen(argv[2],"wb");
statefile=fopen("CNGVAD.d","wb");
if (outfile==NULL){
printf("file %s could not be created\n",argv[2]);
return(0);
}
unsigned int fs=16000;
short frameLen=fs/50;
#ifndef ASSIGN
res=WebRtcCng_CreateEnc(&e_inst);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
res=WebRtcCng_CreateDec(&d_inst);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeDec(d_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
#else
// Test the Assign-functions
int Esize, Dsize;
void *Eaddr, *Daddr;
res=WebRtcCng_AssignSizeEnc(&Esize);
res=WebRtcCng_AssignSizeDec(&Dsize);
Eaddr=malloc(Esize);
Daddr=malloc(Dsize);
res=WebRtcCng_AssignEnc(&e_inst, Eaddr);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
res=WebRtcCng_AssignDec(&d_inst, Daddr);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeDec(d_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
#endif
res=WebRtcCng_InitEnc(e_inst,atoi(argv[3]),atoi(argv[4]),atoi(argv[5]));
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
res=WebRtcCng_InitDec(d_inst);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeDec(d_inst);
fprintf(stderr,"\n\n Error in initialization: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
static bool firstSilent=true;
int numSamp=0;
int speech=0;
int silent=0;
long cnt=0;
while(fread(anaSpeech,2,frameLen,infile)==frameLen){
cnt++;
if (cnt==60){
cnt=60;
}
/* time+=(float)frameLen/fs;
numSamp+=frameLen;
float temp[640];
for(unsigned int j=0;j<frameLen;j++)
temp[j]=(float)anaSpeech[j]; */
// if(!WebRtcVad_Process(vinst, fs, anaSpeech, frameLen)){
if(1){ // Do CNG coding of entire file
// if(!((anaSpeech[0]==0)&&(anaSpeech[1]==0)&&(anaSpeech[2]==0))){
if(firstSilent){
res = WebRtcCng_Encode(e_inst, anaSpeech, frameLen/2, SIDpkt,&size,1);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in encoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
firstSilent=false;
res=WebRtcCng_Encode(e_inst, &anaSpeech[frameLen/2], frameLen/2, SIDpkt,&size,1);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in encoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
}
else{
res=WebRtcCng_Encode(e_inst, anaSpeech, frameLen/2, SIDpkt,&size,0);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in encoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
res=WebRtcCng_Encode(e_inst, &anaSpeech[frameLen/2], frameLen/2, SIDpkt,&size,0);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeEnc(e_inst);
fprintf(stderr,"\n\n Error in encoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
}
if(size>0){
res=WebRtcCng_UpdateSid(d_inst,SIDpkt, size);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeDec(d_inst);
fprintf(stderr,"\n\n Error in decoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
}
res=WebRtcCng_Generate(d_inst,genSpeech, frameLen,0);
if (res < 0) {
/* exit if returned with error */
errtype=WebRtcCng_GetErrorCodeDec(d_inst);
fprintf(stderr,"\n\n Error in decoder: %d.\n\n", errtype);
exit(EXIT_FAILURE);
}
memcpy(state,anaSpeech,2*frameLen);
}
else{
firstSilent=true;
memcpy(genSpeech,anaSpeech,2*frameLen);
memset(anaSpeech,0,frameLen*2);
memset(state,0,frameLen*2);
}
fwrite(genSpeech,2,frameLen,outfile);
fwrite(state,2,frameLen,statefile);
}
fclose(infile);
fclose(outfile);
fclose(statefile);
return 0;
}

View File

@@ -0,0 +1,18 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// stdafx.cpp : source file that includes just the standard includes
// CNG.pch will be the pre-compiled header
// stdafx.obj will contain the pre-compiled type information
#include "stdafx.h"
// TODO: reference any additional headers you need in STDAFX.H
// and not in this file

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// stdafx.h : include file for standard system include files,
// or project specific include files that are used frequently, but
// are changed infrequently
//
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_TEST_STDAFX_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_TEST_STDAFX_H_
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
#include <stdio.h>
// TODO: reference additional headers your program requires here
//{{AFX_INSERT_LOCATION}}
// Microsoft Visual C++ will insert additional declarations immediately before the previous line.
#endif // !defined(AFX_STDAFX_H__DE2097A7_569B_42A0_A615_41BF352D6FFB__INCLUDED_)

View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_G711_MAIN_INTERFACE_G711_INTERFACE_H_
#define MODULES_AUDIO_CODING_CODECS_G711_MAIN_INTERFACE_G711_INTERFACE_H_
#include "typedefs.h"
// Comfort noise constants
#define G711_WEBRTC_SPEECH 1
#define G711_WEBRTC_CNG 2
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
* WebRtcG711_EncodeA(...)
*
* This function encodes a G711 A-law frame and inserts it into a packet.
* Input speech length has be of any length.
*
* Input:
* - state : Dummy state to make this codec look more like
* other codecs
* - speechIn : Input speech vector
* - len : Samples in speechIn
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
WebRtc_Word16 WebRtcG711_EncodeA(void *state,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded);
/****************************************************************************
* WebRtcG711_EncodeU(...)
*
* This function encodes a G711 U-law frame and inserts it into a packet.
* Input speech length has be of any length.
*
* Input:
* - state : Dummy state to make this codec look more like
* other codecs
* - speechIn : Input speech vector
* - len : Samples in speechIn
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
WebRtc_Word16 WebRtcG711_EncodeU(void *state,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded);
/****************************************************************************
* WebRtcG711_DecodeA(...)
*
* This function decodes a packet G711 A-law frame.
*
* Input:
* - state : Dummy state to make this codec look more like
* other codecs
* - encoded : Encoded data
* - len : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
* - speechType : 1 normal, 2 CNG (for G711 it should
* always return 1 since G711 does not have a
* built-in DTX/CNG scheme)
*
* Return value : >0 - Samples in decoded vector
* -1 - Error
*/
WebRtc_Word16 WebRtcG711_DecodeA(void *state,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
/****************************************************************************
* WebRtcG711_DecodeU(...)
*
* This function decodes a packet G711 U-law frame.
*
* Input:
* - state : Dummy state to make this codec look more like
* other codecs
* - encoded : Encoded data
* - len : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
* - speechType : 1 normal, 2 CNG (for G711 it should
* always return 1 since G711 does not have a
* built-in DTX/CNG scheme)
*
* Return value : >0 - Samples in decoded vector
* -1 - Error
*/
WebRtc_Word16 WebRtcG711_DecodeU(void *state,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
/**********************************************************************
* WebRtcG711_Version(...)
*
* This function gives the version string of the G.711 codec.
*
* Input:
* - lenBytes: the size of Allocated space (in Bytes) where
* the version number is written to (in string format).
*
* Output:
* - version: Pointer to a buffer where the version number is
* written to.
*
*/
WebRtc_Word16 WebRtcG711_Version(char* version, WebRtc_Word16 lenBytes);
#ifdef __cplusplus
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_G711_MAIN_INTERFACE_G711_INTERFACE_H_ */

View File

@@ -0,0 +1,83 @@
/*
* SpanDSP - a series of DSP components for telephony
*
* g711.c - A-law and u-law transcoding routines
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2006 Steve Underwood
*
* Despite my general liking of the GPL, I place this code in the
* public domain for the benefit of all mankind - even the slimy
* ones who might try to proprietize my work and use it to my
* detriment.
*
* $Id: g711.c,v 1.1 2006/06/07 15:46:39 steveu Exp $
*
* Modifications for WebRtc, 2011/04/28, by tlegrand:
* -Removed unused include files
* -Changed to use WebRtc types
* -Added option to run encoder bitexact with ITU-T reference implementation
*/
/*! \file */
#include "g711.h"
#include "typedefs.h"
/* Copied from the CCITT G.711 specification */
static const WebRtc_UWord8 ulaw_to_alaw_table[256] =
{
42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36, 37,
58, 59, 56, 57, 62, 63, 60, 61, 50, 51, 48, 49, 54, 55, 52, 53,
10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 26,
27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17, 22, 23, 20, 21, 106,
104, 105, 110, 111, 108, 109, 98, 99, 96, 97, 102, 103, 100, 101, 122, 120,
126, 127, 124, 125, 114, 115, 112, 113, 118, 119, 116, 117, 75, 73, 79, 77,
66, 67, 64, 65, 70, 71, 68, 69, 90, 91, 88, 89, 94, 95, 92, 93,
82, 82, 83, 83, 80, 80, 81, 81, 86, 86, 87, 87, 84, 84, 85, 85,
170, 171, 168, 169, 174, 175, 172, 173, 162, 163, 160, 161, 166, 167, 164, 165,
186, 187, 184, 185, 190, 191, 188, 189, 178, 179, 176, 177, 182, 183, 180, 181,
138, 139, 136, 137, 142, 143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 154,
155, 152, 153, 158, 159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149, 234,
232, 233, 238, 239, 236, 237, 226, 227, 224, 225, 230, 231, 228, 229, 250, 248,
254, 255, 252, 253, 242, 243, 240, 241, 246, 247, 244, 245, 203, 201, 207, 205,
194, 195, 192, 193, 198, 199, 196, 197, 218, 219, 216, 217, 222, 223, 220, 221,
210, 210, 211, 211, 208, 208, 209, 209, 214, 214, 215, 215, 212, 212, 213, 213
};
/* These transcoding tables are copied from the CCITT G.711 specification. To achieve
optimal results, do not change them. */
static const WebRtc_UWord8 alaw_to_ulaw_table[256] =
{
42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36, 37,
57, 58, 55, 56, 61, 62, 59, 60, 49, 50, 47, 48, 53, 54, 51, 52,
10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 5,
26, 27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17, 22, 23, 20, 21,
98, 99, 96, 97, 102, 103, 100, 101, 93, 93, 92, 92, 95, 95, 94, 94,
116, 118, 112, 114, 124, 126, 120, 122, 106, 107, 104, 105, 110, 111, 108, 109,
72, 73, 70, 71, 76, 77, 74, 75, 64, 65, 63, 63, 68, 69, 66, 67,
86, 87, 84, 85, 90, 91, 88, 89, 79, 79, 78, 78, 82, 83, 80, 81,
170, 171, 168, 169, 174, 175, 172, 173, 162, 163, 160, 161, 166, 167, 164, 165,
185, 186, 183, 184, 189, 190, 187, 188, 177, 178, 175, 176, 181, 182, 179, 180,
138, 139, 136, 137, 142, 143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 133,
154, 155, 152, 153, 158, 159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149,
226, 227, 224, 225, 230, 231, 228, 229, 221, 221, 220, 220, 223, 223, 222, 222,
244, 246, 240, 242, 252, 254, 248, 250, 234, 235, 232, 233, 238, 239, 236, 237,
200, 201, 198, 199, 204, 205, 202, 203, 192, 193, 191, 191, 196, 197, 194, 195,
214, 215, 212, 213, 218, 219, 216, 217, 207, 207, 206, 206, 210, 211, 208, 209
};
WebRtc_UWord8 alaw_to_ulaw(WebRtc_UWord8 alaw)
{
return alaw_to_ulaw_table[alaw];
}
/*- End of function --------------------------------------------------------*/
WebRtc_UWord8 ulaw_to_alaw(WebRtc_UWord8 ulaw)
{
return ulaw_to_alaw_table[ulaw];
}
/*- End of function --------------------------------------------------------*/
/*- End of file ------------------------------------------------------------*/

View File

@@ -0,0 +1,57 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'G711',
'type': '<(library)',
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/g711_interface.h',
'g711_interface.c',
'g711.c',
'g711.h',
],
},
{
'target_name': 'g711_test',
'type': 'executable',
'dependencies': [
'G711',
],
'sources': [
'../testG711/testG711.cpp',
],
# 'conditions': [
# ['OS=="linux"', {
# 'cflags': [
# '-fexceptions', # enable exceptions
# ],
# }],
# ],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@@ -0,0 +1,382 @@
/*
* SpanDSP - a series of DSP components for telephony
*
* g711.h - In line A-law and u-law conversion routines
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2001 Steve Underwood
*
* Despite my general liking of the GPL, I place this code in the
* public domain for the benefit of all mankind - even the slimy
* ones who might try to proprietize my work and use it to my
* detriment.
*
* $Id: g711.h,v 1.1 2006/06/07 15:46:39 steveu Exp $
*
* Modifications for WebRtc, 2011/04/28, by tlegrand:
* -Changed to use WebRtc types
* -Changed __inline__ to __inline
* -Two changes to make implementation bitexact with ITU-T reference implementation
*/
/*! \file */
/*! \page g711_page A-law and mu-law handling
Lookup tables for A-law and u-law look attractive, until you consider the impact
on the CPU cache. If it causes a substantial area of your processor cache to get
hit too often, cache sloshing will severely slow things down. The main reason
these routines are slow in C, is the lack of direct access to the CPU's "find
the first 1" instruction. A little in-line assembler fixes that, and the
conversion routines can be faster than lookup tables, in most real world usage.
A "find the first 1" instruction is available on most modern CPUs, and is a
much underused feature.
If an assembly language method of bit searching is not available, these routines
revert to a method that can be a little slow, so the cache thrashing might not
seem so bad :(
Feel free to submit patches to add fast "find the first 1" support for your own
favourite processor.
Look up tables are used for transcoding between A-law and u-law, since it is
difficult to achieve the precise transcoding procedure laid down in the G.711
specification by other means.
*/
#if !defined(_G711_H_)
#define _G711_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "typedefs.h"
#if defined(__i386__)
/*! \brief Find the bit position of the highest set bit in a word
\param bits The word to be searched
\return The bit number of the highest set bit, or -1 if the word is zero. */
static __inline__ int top_bit(unsigned int bits)
{
int res;
__asm__ __volatile__(" movl $-1,%%edx;\n"
" bsrl %%eax,%%edx;\n"
: "=d" (res)
: "a" (bits));
return res;
}
/*- End of function --------------------------------------------------------*/
/*! \brief Find the bit position of the lowest set bit in a word
\param bits The word to be searched
\return The bit number of the lowest set bit, or -1 if the word is zero. */
static __inline__ int bottom_bit(unsigned int bits)
{
int res;
__asm__ __volatile__(" movl $-1,%%edx;\n"
" bsfl %%eax,%%edx;\n"
: "=d" (res)
: "a" (bits));
return res;
}
/*- End of function --------------------------------------------------------*/
#elif defined(__x86_64__)
static __inline__ int top_bit(unsigned int bits)
{
int res;
__asm__ __volatile__(" movq $-1,%%rdx;\n"
" bsrq %%rax,%%rdx;\n"
: "=d" (res)
: "a" (bits));
return res;
}
/*- End of function --------------------------------------------------------*/
static __inline__ int bottom_bit(unsigned int bits)
{
int res;
__asm__ __volatile__(" movq $-1,%%rdx;\n"
" bsfq %%rax,%%rdx;\n"
: "=d" (res)
: "a" (bits));
return res;
}
/*- End of function --------------------------------------------------------*/
#else
static __inline int top_bit(unsigned int bits)
{
int i;
if (bits == 0)
return -1;
i = 0;
if (bits & 0xFFFF0000)
{
bits &= 0xFFFF0000;
i += 16;
}
if (bits & 0xFF00FF00)
{
bits &= 0xFF00FF00;
i += 8;
}
if (bits & 0xF0F0F0F0)
{
bits &= 0xF0F0F0F0;
i += 4;
}
if (bits & 0xCCCCCCCC)
{
bits &= 0xCCCCCCCC;
i += 2;
}
if (bits & 0xAAAAAAAA)
{
bits &= 0xAAAAAAAA;
i += 1;
}
return i;
}
/*- End of function --------------------------------------------------------*/
static __inline int bottom_bit(unsigned int bits)
{
int i;
if (bits == 0)
return -1;
i = 32;
if (bits & 0x0000FFFF)
{
bits &= 0x0000FFFF;
i -= 16;
}
if (bits & 0x00FF00FF)
{
bits &= 0x00FF00FF;
i -= 8;
}
if (bits & 0x0F0F0F0F)
{
bits &= 0x0F0F0F0F;
i -= 4;
}
if (bits & 0x33333333)
{
bits &= 0x33333333;
i -= 2;
}
if (bits & 0x55555555)
{
bits &= 0x55555555;
i -= 1;
}
return i;
}
/*- End of function --------------------------------------------------------*/
#endif
/* N.B. It is tempting to use look-up tables for A-law and u-law conversion.
* However, you should consider the cache footprint.
*
* A 64K byte table for linear to x-law and a 512 byte table for x-law to
* linear sound like peanuts these days, and shouldn't an array lookup be
* real fast? No! When the cache sloshes as badly as this one will, a tight
* calculation may be better. The messiest part is normally finding the
* segment, but a little inline assembly can fix that on an i386, x86_64 and
* many other modern processors.
*/
/*
* Mu-law is basically as follows:
*
* Biased Linear Input Code Compressed Code
* ------------------------ ---------------
* 00000001wxyza 000wxyz
* 0000001wxyzab 001wxyz
* 000001wxyzabc 010wxyz
* 00001wxyzabcd 011wxyz
* 0001wxyzabcde 100wxyz
* 001wxyzabcdef 101wxyz
* 01wxyzabcdefg 110wxyz
* 1wxyzabcdefgh 111wxyz
*
* Each biased linear code has a leading 1 which identifies the segment
* number. The value of the segment number is equal to 7 minus the number
* of leading 0's. The quantization interval is directly available as the
* four bits wxyz. * The trailing bits (a - h) are ignored.
*
* Ordinarily the complement of the resulting code word is used for
* transmission, and so the code word is complemented before it is returned.
*
* For further information see John C. Bellamy's Digital Telephony, 1982,
* John Wiley & Sons, pps 98-111 and 472-476.
*/
//#define ULAW_ZEROTRAP /* turn on the trap as per the MIL-STD */
#define ULAW_BIAS 0x84 /* Bias for linear code. */
/*! \brief Encode a linear sample to u-law
\param linear The sample to encode.
\return The u-law value.
*/
static __inline WebRtc_UWord8 linear_to_ulaw(int linear)
{
WebRtc_UWord8 u_val;
int mask;
int seg;
/* Get the sign and the magnitude of the value. */
if (linear < 0)
{
/* WebRtc, tlegrand: -1 added to get bitexact to reference implementation */
linear = ULAW_BIAS - linear - 1;
mask = 0x7F;
}
else
{
linear = ULAW_BIAS + linear;
mask = 0xFF;
}
seg = top_bit(linear | 0xFF) - 7;
/*
* Combine the sign, segment, quantization bits,
* and complement the code word.
*/
if (seg >= 8)
u_val = (WebRtc_UWord8) (0x7F ^ mask);
else
u_val = (WebRtc_UWord8) (((seg << 4) | ((linear >> (seg + 3)) & 0xF)) ^ mask);
#ifdef ULAW_ZEROTRAP
/* Optional ITU trap */
if (u_val == 0)
u_val = 0x02;
#endif
return u_val;
}
/*- End of function --------------------------------------------------------*/
/*! \brief Decode an u-law sample to a linear value.
\param ulaw The u-law sample to decode.
\return The linear value.
*/
static __inline WebRtc_Word16 ulaw_to_linear(WebRtc_UWord8 ulaw)
{
int t;
/* Complement to obtain normal u-law value. */
ulaw = ~ulaw;
/*
* Extract and bias the quantization bits. Then
* shift up by the segment number and subtract out the bias.
*/
t = (((ulaw & 0x0F) << 3) + ULAW_BIAS) << (((int) ulaw & 0x70) >> 4);
return (WebRtc_Word16) ((ulaw & 0x80) ? (ULAW_BIAS - t) : (t - ULAW_BIAS));
}
/*- End of function --------------------------------------------------------*/
/*
* A-law is basically as follows:
*
* Linear Input Code Compressed Code
* ----------------- ---------------
* 0000000wxyza 000wxyz
* 0000001wxyza 001wxyz
* 000001wxyzab 010wxyz
* 00001wxyzabc 011wxyz
* 0001wxyzabcd 100wxyz
* 001wxyzabcde 101wxyz
* 01wxyzabcdef 110wxyz
* 1wxyzabcdefg 111wxyz
*
* For further information see John C. Bellamy's Digital Telephony, 1982,
* John Wiley & Sons, pps 98-111 and 472-476.
*/
#define ALAW_AMI_MASK 0x55
/*! \brief Encode a linear sample to A-law
\param linear The sample to encode.
\return The A-law value.
*/
static __inline WebRtc_UWord8 linear_to_alaw(int linear)
{
int mask;
int seg;
if (linear >= 0)
{
/* Sign (bit 7) bit = 1 */
mask = ALAW_AMI_MASK | 0x80;
}
else
{
/* Sign (bit 7) bit = 0 */
mask = ALAW_AMI_MASK;
/* WebRtc, tlegrand: Changed from -8 to -1 to get bitexact to reference
* implementation */
linear = -linear - 1;
}
/* Convert the scaled magnitude to segment number. */
seg = top_bit(linear | 0xFF) - 7;
if (seg >= 8)
{
if (linear >= 0)
{
/* Out of range. Return maximum value. */
return (WebRtc_UWord8) (0x7F ^ mask);
}
/* We must be just a tiny step below zero */
return (WebRtc_UWord8) (0x00 ^ mask);
}
/* Combine the sign, segment, and quantization bits. */
return (WebRtc_UWord8) (((seg << 4) | ((linear >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask);
}
/*- End of function --------------------------------------------------------*/
/*! \brief Decode an A-law sample to a linear value.
\param alaw The A-law sample to decode.
\return The linear value.
*/
static __inline WebRtc_Word16 alaw_to_linear(WebRtc_UWord8 alaw)
{
int i;
int seg;
alaw ^= ALAW_AMI_MASK;
i = ((alaw & 0x0F) << 4);
seg = (((int) alaw & 0x70) >> 4);
if (seg)
i = (i + 0x108) << (seg - 1);
else
i += 8;
return (WebRtc_Word16) ((alaw & 0x80) ? i : -i);
}
/*- End of function --------------------------------------------------------*/
/*! \brief Transcode from A-law to u-law, using the procedure defined in G.711.
\param alaw The A-law sample to transcode.
\return The best matching u-law value.
*/
WebRtc_UWord8 alaw_to_ulaw(WebRtc_UWord8 alaw);
/*! \brief Transcode from u-law to A-law, using the procedure defined in G.711.
\param alaw The u-law sample to transcode.
\return The best matching A-law value.
*/
WebRtc_UWord8 ulaw_to_alaw(WebRtc_UWord8 ulaw);
#ifdef __cplusplus
}
#endif
#endif
/*- End of file ------------------------------------------------------------*/

View File

@@ -0,0 +1,171 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h>
#include "g711.h"
#include "g711_interface.h"
#include "typedefs.h"
WebRtc_Word16 WebRtcG711_EncodeA(void *state,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded)
{
int n;
WebRtc_UWord16 tempVal, tempVal2;
// Set to avoid getting warnings
state = state;
// Sanity check of input length
if (len < 0) {
return (-1);
}
// Loop over all samples
for (n = 0; n < len; n++) {
tempVal = (WebRtc_UWord16)linear_to_alaw(speechIn[n]);
#ifdef WEBRTC_BIG_ENDIAN
if ((n & 0x1) == 1) {
encoded[n>>1]|=((WebRtc_UWord16)tempVal);
} else {
encoded[n>>1]=((WebRtc_UWord16)tempVal)<<8;
}
#else
if ((n & 0x1) == 1) {
tempVal2 |= ((WebRtc_UWord16) tempVal) << 8;
encoded[n >> 1] |= ((WebRtc_UWord16) tempVal) << 8;
} else {
tempVal2 = ((WebRtc_UWord16) tempVal);
encoded[n >> 1] = ((WebRtc_UWord16) tempVal);
}
#endif
}
return (len);
}
WebRtc_Word16 WebRtcG711_EncodeU(void *state,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded)
{
int n;
WebRtc_UWord16 tempVal;
// Set to avoid getting warnings
state = state;
// Sanity check of input length
if (len < 0) {
return (-1);
}
// Loop over all samples
for (n = 0; n < len; n++) {
tempVal = (WebRtc_UWord16)linear_to_ulaw(speechIn[n]);
#ifdef WEBRTC_BIG_ENDIAN
if ((n & 0x1) == 1) {
encoded[n>>1]|=((WebRtc_UWord16)tempVal);
} else {
encoded[n>>1]=((WebRtc_UWord16)tempVal)<<8;
}
#else
if ((n & 0x1) == 1) {
encoded[n >> 1] |= ((WebRtc_UWord16) tempVal) << 8;
} else {
encoded[n >> 1] = ((WebRtc_UWord16) tempVal);
}
#endif
}
return (len);
}
WebRtc_Word16 WebRtcG711_DecodeA(void *state,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
int n;
WebRtc_UWord16 tempVal;
// Set to avoid getting warnings
state = state;
// Sanity check of input length
if (len < 0) {
return (-1);
}
for (n = 0; n < len; n++) {
#ifdef WEBRTC_BIG_ENDIAN
if ((n & 0x1) == 1) {
tempVal=((WebRtc_UWord16)encoded[n>>1] & 0xFF);
} else {
tempVal=((WebRtc_UWord16)encoded[n>>1] >> 8);
}
#else
if ((n & 0x1) == 1) {
tempVal = (encoded[n >> 1] >> 8);
} else {
tempVal = (encoded[n >> 1] & 0xFF);
}
#endif
decoded[n] = (WebRtc_Word16) alaw_to_linear(tempVal);
}
*speechType = 1;
return (len);
}
WebRtc_Word16 WebRtcG711_DecodeU(void *state,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
int n;
WebRtc_UWord16 tempVal;
// Set to avoid getting warnings
state = state;
// Sanity check of input length
if (len < 0) {
return (-1);
}
for (n = 0; n < len; n++) {
#ifdef WEBRTC_BIG_ENDIAN
if ((n & 0x1) == 1) {
tempVal=((WebRtc_UWord16)encoded[n>>1] & 0xFF);
} else {
tempVal=((WebRtc_UWord16)encoded[n>>1] >> 8);
}
#else
if ((n & 0x1) == 1) {
tempVal = (encoded[n >> 1] >> 8);
} else {
tempVal = (encoded[n >> 1] & 0xFF);
}
#endif
decoded[n] = (WebRtc_Word16) ulaw_to_linear(tempVal);
}
*speechType = 1;
return (len);
}
WebRtc_Word16 WebRtcG711_Version(char* version, WebRtc_Word16 lenBytes)
{
strncpy(version, "2.0.0", lenBytes);
return 0;
}

View File

@@ -0,0 +1,171 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* testG711.cpp : Defines the entry point for the console application.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* include API */
#include "g711_interface.h"
/* Runtime statistics */
#include <time.h>
#define CLOCKS_PER_SEC_G711 1000
/* function for reading audio data from PCM file */
int readframe(WebRtc_Word16 *data, FILE *inp, int length) {
short k, rlen, status = 0;
rlen = (short)fread(data, sizeof(WebRtc_Word16), length, inp);
if (rlen < length) {
for (k = rlen; k < length; k++)
data[k] = 0;
status = 1;
}
return status;
}
int main(int argc, char* argv[])
{
char inname[80], outname[40], bitname[40];
FILE *inp, *outp, *bitp;
int framecnt, endfile;
WebRtc_Word16 framelength = 80;
int err;
/* Runtime statistics */
double starttime;
double runtime;
double length_file;
WebRtc_Word16 stream_len = 0;
WebRtc_Word16 shortdata[480];
WebRtc_Word16 decoded[480];
WebRtc_Word16 decoded2[480];
WebRtc_Word16 streamdata[500];
WebRtc_Word16 speechType[1];
char law[2];
char versionNumber[40];
/* handling wrong input arguments in the command line */
if ((argc!=5) && (argc!=6)) {
printf("\n\nWrong number of arguments or flag values.\n\n");
printf("\n");
printf("\nG.711 test application\n\n");
printf("Usage:\n\n");
printf("./testG711.exe framelength law infile outfile \n\n");
printf("framelength: Framelength in samples.\n");
printf("law : Coding law, A och u.\n");
printf("infile : Normal speech input file\n");
printf("outfile : Speech output file\n\n");
printf("outbits : Output bitstream file [optional]\n\n");
exit(0);
}
/* Get version and print */
WebRtcG711_Version(versionNumber, 40);
printf("-----------------------------------\n");
printf("G.711 version: %s\n\n", versionNumber);
/* Get frame length */
framelength = atoi(argv[1]);
/* Get compression law */
strcpy(law, argv[2]);
/* Get Input and Output files */
sscanf(argv[3], "%s", inname);
sscanf(argv[4], "%s", outname);
if (argc==6) {
sscanf(argv[5], "%s", bitname);
if ((bitp = fopen(bitname,"wb")) == NULL) {
printf(" G.711: Cannot read file %s.\n", bitname);
exit(1);
}
}
if ((inp = fopen(inname,"rb")) == NULL) {
printf(" G.711: Cannot read file %s.\n", inname);
exit(1);
}
if ((outp = fopen(outname,"wb")) == NULL) {
printf(" G.711: Cannot write file %s.\n", outname);
exit(1);
}
printf("\nInput: %s\nOutput: %s\n", inname, outname);
if (argc==6) {
printf("\nBitfile: %s\n", bitname);
}
starttime = clock()/(double)CLOCKS_PER_SEC_G711; /* Runtime statistics */
/* Initialize encoder and decoder */
framecnt= 0;
endfile = 0;
while (endfile == 0) {
framecnt++;
/* Read speech block */
endfile = readframe(shortdata, inp, framelength);
/* G.711 encoding */
if (!strcmp(law,"A")) {
/* A-law encoding */
stream_len = WebRtcG711_EncodeA(NULL, shortdata, framelength, streamdata);
if (argc==6){
/* Write bits to file */
fwrite(streamdata,sizeof(unsigned char),stream_len,bitp);
}
err = WebRtcG711_DecodeA(NULL, streamdata, stream_len, decoded, speechType);
} else if (!strcmp(law,"u")){
/* u-law encoding */
stream_len = WebRtcG711_EncodeU(NULL, shortdata, framelength, streamdata);
if (argc==6){
/* Write bits to file */
fwrite(streamdata,sizeof(unsigned char),stream_len,bitp);
}
err = WebRtcG711_DecodeU(NULL, streamdata, stream_len, decoded, speechType);
} else {
printf("Wrong law mode\n");
exit (1);
}
if (stream_len < 0 || err < 0) {
/* exit if returned with error */
printf("Error in encoder/decoder\n");
} else {
/* Write coded speech to file */
fwrite(decoded,sizeof(short),framelength,outp);
}
}
runtime = (double)(clock()/(double)CLOCKS_PER_SEC_G711-starttime);
length_file = ((double)framecnt*(double)framelength/8000);
printf("\n\nLength of speech file: %.1f s\n", length_file);
printf("Time to run G.711: %.2f s (%.2f %% of realtime)\n\n", runtime, (100*runtime/length_file));
printf("---------------------END----------------------\n");
fclose(inp);
fclose(outp);
return 0;
}

View File

@@ -0,0 +1,190 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_G722_MAIN_INTERFACE_G722_INTERFACE_H_
#define MODULES_AUDIO_CODING_CODECS_G722_MAIN_INTERFACE_G722_INTERFACE_H_
#include "typedefs.h"
/*
* Solution to support multiple instances
*/
typedef struct WebRtcG722EncInst G722EncInst;
typedef struct WebRtcG722DecInst G722DecInst;
/*
* Comfort noise constants
*/
#define G722_WEBRTC_SPEECH 1
#define G722_WEBRTC_CNG 2
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
* WebRtcG722_CreateEncoder(...)
*
* Create memory used for G722 encoder
*
* Input:
* - G722enc_inst : G722 instance for encoder
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_CreateEncoder(G722EncInst **G722enc_inst);
/****************************************************************************
* WebRtcG722_EncoderInit(...)
*
* This function initializes a G722 instance
*
* Input:
* - G722enc_inst : G722 instance, i.e. the user that should receive
* be initialized
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_EncoderInit(G722EncInst *G722enc_inst);
/****************************************************************************
* WebRtcG722_FreeEncoder(...)
*
* Free the memory used for G722 encoder
*
* Input:
* - G722enc_inst : G722 instance for encoder
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst);
/****************************************************************************
* WebRtcG722_Encode(...)
*
* This function encodes G722 encoded data.
*
* Input:
* - G722enc_inst : G722 instance, i.e. the user that should encode
* a packet
* - speechIn : Input speech vector
* - len : Samples in speechIn
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_Encode(G722EncInst *G722enc_inst,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded);
/****************************************************************************
* WebRtcG722_CreateDecoder(...)
*
* Create memory used for G722 encoder
*
* Input:
* - G722dec_inst : G722 instance for decoder
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst);
/****************************************************************************
* WebRtcG722_DecoderInit(...)
*
* This function initializes a G729 instance
*
* Input:
* - G729_decinst_t : G729 instance, i.e. the user that should receive
* be initialized
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_DecoderInit(G722DecInst *G722dec_inst);
/****************************************************************************
* WebRtcG722_FreeDecoder(...)
*
* Free the memory used for G722 decoder
*
* Input:
* - G722dec_inst : G722 instance for decoder
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst);
/****************************************************************************
* WebRtcG722_Decode(...)
*
* This function decodes a packet with G729 frame(s). Output speech length
* will be a multiple of 80 samples (80*frames/packet).
*
* Input:
* - G722dec_inst : G722 instance, i.e. the user that should decode
* a packet
* - encoded : Encoded G722 frame(s)
* - len : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
* - speechType : 1 normal, 2 CNG (Since G722 does not have its own
* DTX/CNG scheme it should always return 1)
*
* Return value : >0 - Samples in decoded vector
* -1 - Error
*/
WebRtc_Word16 WebRtcG722_Decode(G722DecInst *G722dec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
/****************************************************************************
* WebRtcG722_Version(...)
*
* Get a string with the current version of the codec
*/
WebRtc_Word16 WebRtcG722_Version(char *versionStr, short len);
#ifdef __cplusplus
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_G722_MAIN_INTERFACE_G722_INTERFACE_H_ */

View File

@@ -0,0 +1,56 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'G722',
'type': '<(library)',
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/g722_interface.h',
'g722_interface.c',
'g722_encode.c',
'g722_decode.c',
'g722_enc_dec.h',
],
},
{
'target_name': 'G722Test',
'type': 'executable',
'dependencies': [
'G722',
],
'sources': [
'../testG722/testG722.cpp',
],
'conditions': [
['OS=="linux"', {
'cflags': [
'-fexceptions', # enable exceptions
],
}],
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@@ -0,0 +1,407 @@
/*
* SpanDSP - a series of DSP components for telephony
*
* g722_decode.c - The ITU G.722 codec, decode part.
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2005 Steve Underwood
*
* Despite my general liking of the GPL, I place my own contributions
* to this code in the public domain for the benefit of all mankind -
* even the slimy ones who might try to proprietize my work and use it
* to my detriment.
*
* Based in part on a single channel G.722 codec which is:
*
* Copyright (c) CMU 1993
* Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
*
* $Id: g722_decode.c,v 1.15 2006/07/07 16:37:49 steveu Exp $
*
* Modifications for WebRtc, 2011/04/28, by tlegrand:
* -Removed usage of inttypes.h and tgmath.h
* -Changed to use WebRtc types
* -Changed __inline__ to __inline
* -Added saturation check on output
*/
/*! \file */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <memory.h>
#include <stdlib.h>
#include "typedefs.h"
#include "g722_enc_dec.h"
#if !defined(FALSE)
#define FALSE 0
#endif
#if !defined(TRUE)
#define TRUE (!FALSE)
#endif
static __inline WebRtc_Word16 saturate(WebRtc_Word32 amp)
{
WebRtc_Word16 amp16;
/* Hopefully this is optimised for the common case - not clipping */
amp16 = (WebRtc_Word16) amp;
if (amp == amp16)
return amp16;
if (amp > WEBRTC_INT16_MAX)
return WEBRTC_INT16_MAX;
return WEBRTC_INT16_MIN;
}
/*- End of function --------------------------------------------------------*/
static void block4(g722_decode_state_t *s, int band, int d);
static void block4(g722_decode_state_t *s, int band, int d)
{
int wd1;
int wd2;
int wd3;
int i;
/* Block 4, RECONS */
s->band[band].d[0] = d;
s->band[band].r[0] = saturate(s->band[band].s + d);
/* Block 4, PARREC */
s->band[band].p[0] = saturate(s->band[band].sz + d);
/* Block 4, UPPOL2 */
for (i = 0; i < 3; i++)
s->band[band].sg[i] = s->band[band].p[i] >> 15;
wd1 = saturate(s->band[band].a[1] << 2);
wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
if (wd2 > 32767)
wd2 = 32767;
wd3 = (s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128;
wd3 += (wd2 >> 7);
wd3 += (s->band[band].a[2]*32512) >> 15;
if (wd3 > 12288)
wd3 = 12288;
else if (wd3 < -12288)
wd3 = -12288;
s->band[band].ap[2] = wd3;
/* Block 4, UPPOL1 */
s->band[band].sg[0] = s->band[band].p[0] >> 15;
s->band[band].sg[1] = s->band[band].p[1] >> 15;
wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
wd2 = (s->band[band].a[1]*32640) >> 15;
s->band[band].ap[1] = saturate(wd1 + wd2);
wd3 = saturate(15360 - s->band[band].ap[2]);
if (s->band[band].ap[1] > wd3)
s->band[band].ap[1] = wd3;
else if (s->band[band].ap[1] < -wd3)
s->band[band].ap[1] = -wd3;
/* Block 4, UPZERO */
wd1 = (d == 0) ? 0 : 128;
s->band[band].sg[0] = d >> 15;
for (i = 1; i < 7; i++)
{
s->band[band].sg[i] = s->band[band].d[i] >> 15;
wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
wd3 = (s->band[band].b[i]*32640) >> 15;
s->band[band].bp[i] = saturate(wd2 + wd3);
}
/* Block 4, DELAYA */
for (i = 6; i > 0; i--)
{
s->band[band].d[i] = s->band[band].d[i - 1];
s->band[band].b[i] = s->band[band].bp[i];
}
for (i = 2; i > 0; i--)
{
s->band[band].r[i] = s->band[band].r[i - 1];
s->band[band].p[i] = s->band[band].p[i - 1];
s->band[band].a[i] = s->band[band].ap[i];
}
/* Block 4, FILTEP */
wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
wd1 = (s->band[band].a[1]*wd1) >> 15;
wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
wd2 = (s->band[band].a[2]*wd2) >> 15;
s->band[band].sp = saturate(wd1 + wd2);
/* Block 4, FILTEZ */
s->band[band].sz = 0;
for (i = 6; i > 0; i--)
{
wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
}
s->band[band].sz = saturate(s->band[band].sz);
/* Block 4, PREDIC */
s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
}
/*- End of function --------------------------------------------------------*/
g722_decode_state_t *g722_decode_init(g722_decode_state_t *s, int rate, int options)
{
if (s == NULL)
{
if ((s = (g722_decode_state_t *) malloc(sizeof(*s))) == NULL)
return NULL;
}
memset(s, 0, sizeof(*s));
if (rate == 48000)
s->bits_per_sample = 6;
else if (rate == 56000)
s->bits_per_sample = 7;
else
s->bits_per_sample = 8;
if ((options & G722_SAMPLE_RATE_8000))
s->eight_k = TRUE;
if ((options & G722_PACKED) && s->bits_per_sample != 8)
s->packed = TRUE;
else
s->packed = FALSE;
s->band[0].det = 32;
s->band[1].det = 8;
return s;
}
/*- End of function --------------------------------------------------------*/
int g722_decode_release(g722_decode_state_t *s)
{
free(s);
return 0;
}
/*- End of function --------------------------------------------------------*/
int g722_decode(g722_decode_state_t *s, WebRtc_Word16 amp[],
const WebRtc_UWord8 g722_data[], int len)
{
static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0 };
static const int ilb[32] =
{
2048, 2093, 2139, 2186, 2233, 2282, 2332,
2383, 2435, 2489, 2543, 2599, 2656, 2714,
2774, 2834, 2896, 2960, 3025, 3091, 3158,
3228, 3298, 3371, 3444, 3520, 3597, 3676,
3756, 3838, 3922, 4008
};
static const int wh[3] = {0, -214, 798};
static const int rh2[4] = {2, 1, 2, 1};
static const int qm2[4] = {-7408, -1616, 7408, 1616};
static const int qm4[16] =
{
0, -20456, -12896, -8968,
-6288, -4240, -2584, -1200,
20456, 12896, 8968, 6288,
4240, 2584, 1200, 0
};
static const int qm5[32] =
{
-280, -280, -23352, -17560,
-14120, -11664, -9752, -8184,
-6864, -5712, -4696, -3784,
-2960, -2208, -1520, -880,
23352, 17560, 14120, 11664,
9752, 8184, 6864, 5712,
4696, 3784, 2960, 2208,
1520, 880, 280, -280
};
static const int qm6[64] =
{
-136, -136, -136, -136,
-24808, -21904, -19008, -16704,
-14984, -13512, -12280, -11192,
-10232, -9360, -8576, -7856,
-7192, -6576, -6000, -5456,
-4944, -4464, -4008, -3576,
-3168, -2776, -2400, -2032,
-1688, -1360, -1040, -728,
24808, 21904, 19008, 16704,
14984, 13512, 12280, 11192,
10232, 9360, 8576, 7856,
7192, 6576, 6000, 5456,
4944, 4464, 4008, 3576,
3168, 2776, 2400, 2032,
1688, 1360, 1040, 728,
432, 136, -432, -136
};
static const int qmf_coeffs[12] =
{
3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
};
int dlowt;
int rlow;
int ihigh;
int dhigh;
int rhigh;
int xout1;
int xout2;
int wd1;
int wd2;
int wd3;
int code;
int outlen;
int i;
int j;
outlen = 0;
rhigh = 0;
for (j = 0; j < len; )
{
if (s->packed)
{
/* Unpack the code bits */
if (s->in_bits < s->bits_per_sample)
{
s->in_buffer |= (g722_data[j++] << s->in_bits);
s->in_bits += 8;
}
code = s->in_buffer & ((1 << s->bits_per_sample) - 1);
s->in_buffer >>= s->bits_per_sample;
s->in_bits -= s->bits_per_sample;
}
else
{
code = g722_data[j++];
}
switch (s->bits_per_sample)
{
default:
case 8:
wd1 = code & 0x3F;
ihigh = (code >> 6) & 0x03;
wd2 = qm6[wd1];
wd1 >>= 2;
break;
case 7:
wd1 = code & 0x1F;
ihigh = (code >> 5) & 0x03;
wd2 = qm5[wd1];
wd1 >>= 1;
break;
case 6:
wd1 = code & 0x0F;
ihigh = (code >> 4) & 0x03;
wd2 = qm4[wd1];
break;
}
/* Block 5L, LOW BAND INVQBL */
wd2 = (s->band[0].det*wd2) >> 15;
/* Block 5L, RECONS */
rlow = s->band[0].s + wd2;
/* Block 6L, LIMIT */
if (rlow > 16383)
rlow = 16383;
else if (rlow < -16384)
rlow = -16384;
/* Block 2L, INVQAL */
wd2 = qm4[wd1];
dlowt = (s->band[0].det*wd2) >> 15;
/* Block 3L, LOGSCL */
wd2 = rl42[wd1];
wd1 = (s->band[0].nb*127) >> 7;
wd1 += wl[wd2];
if (wd1 < 0)
wd1 = 0;
else if (wd1 > 18432)
wd1 = 18432;
s->band[0].nb = wd1;
/* Block 3L, SCALEL */
wd1 = (s->band[0].nb >> 6) & 31;
wd2 = 8 - (s->band[0].nb >> 11);
wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
s->band[0].det = wd3 << 2;
block4(s, 0, dlowt);
if (!s->eight_k)
{
/* Block 2H, INVQAH */
wd2 = qm2[ihigh];
dhigh = (s->band[1].det*wd2) >> 15;
/* Block 5H, RECONS */
rhigh = dhigh + s->band[1].s;
/* Block 6H, LIMIT */
if (rhigh > 16383)
rhigh = 16383;
else if (rhigh < -16384)
rhigh = -16384;
/* Block 2H, INVQAH */
wd2 = rh2[ihigh];
wd1 = (s->band[1].nb*127) >> 7;
wd1 += wh[wd2];
if (wd1 < 0)
wd1 = 0;
else if (wd1 > 22528)
wd1 = 22528;
s->band[1].nb = wd1;
/* Block 3H, SCALEH */
wd1 = (s->band[1].nb >> 6) & 31;
wd2 = 10 - (s->band[1].nb >> 11);
wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
s->band[1].det = wd3 << 2;
block4(s, 1, dhigh);
}
if (s->itu_test_mode)
{
amp[outlen++] = (WebRtc_Word16) (rlow << 1);
amp[outlen++] = (WebRtc_Word16) (rhigh << 1);
}
else
{
if (s->eight_k)
{
amp[outlen++] = (WebRtc_Word16) (rlow << 1);
}
else
{
/* Apply the receive QMF */
for (i = 0; i < 22; i++)
s->x[i] = s->x[i + 2];
s->x[22] = rlow + rhigh;
s->x[23] = rlow - rhigh;
xout1 = 0;
xout2 = 0;
for (i = 0; i < 12; i++)
{
xout2 += s->x[2*i]*qmf_coeffs[i];
xout1 += s->x[2*i + 1]*qmf_coeffs[11 - i];
}
/* We shift by 12 to allow for the QMF filters (DC gain = 4096), less 1
to allow for the 15 bit input to the G.722 algorithm. */
/* WebRtc, tlegrand: added saturation */
amp[outlen++] = saturate(xout1 >> 11);
amp[outlen++] = saturate(xout2 >> 11);
}
}
}
return outlen;
}
/*- End of function --------------------------------------------------------*/
/*- End of file ------------------------------------------------------------*/

View File

@@ -0,0 +1,154 @@
/*
* SpanDSP - a series of DSP components for telephony
*
* g722.h - The ITU G.722 codec.
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2005 Steve Underwood
*
* Despite my general liking of the GPL, I place my own contributions
* to this code in the public domain for the benefit of all mankind -
* even the slimy ones who might try to proprietize my work and use it
* to my detriment.
*
* Based on a single channel G.722 codec which is:
*
***** Copyright (c) CMU 1993 *****
* Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
*
* $Id: g722.h,v 1.10 2006/06/16 12:45:53 steveu Exp $
*
* Modifications for WebRtc, 2011/04/28, by tlegrand:
* -Changed to use WebRtc types
* -Added new defines for minimum and maximum values of short int
*/
/*! \file */
#if !defined(_G722_ENC_DEC_H_)
#define _G722_ENC_DEC_H_
/*! \page g722_page G.722 encoding and decoding
\section g722_page_sec_1 What does it do?
The G.722 module is a bit exact implementation of the ITU G.722 specification for all three
specified bit rates - 64000bps, 56000bps and 48000bps. It passes the ITU tests.
To allow fast and flexible interworking with narrow band telephony, the encoder and decoder
support an option for the linear audio to be an 8k samples/second stream. In this mode the
codec is considerably faster, and still fully compatible with wideband terminals using G.722.
\section g722_page_sec_2 How does it work?
???.
*/
#define WEBRTC_INT16_MAX 32767
#define WEBRTC_INT16_MIN -32768
enum
{
G722_SAMPLE_RATE_8000 = 0x0001,
G722_PACKED = 0x0002
};
typedef struct
{
/*! TRUE if the operating in the special ITU test mode, with the band split filters
disabled. */
int itu_test_mode;
/*! TRUE if the G.722 data is packed */
int packed;
/*! TRUE if encode from 8k samples/second */
int eight_k;
/*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
int bits_per_sample;
/*! Signal history for the QMF */
int x[24];
struct
{
int s;
int sp;
int sz;
int r[3];
int a[3];
int ap[3];
int p[3];
int d[7];
int b[7];
int bp[7];
int sg[7];
int nb;
int det;
} band[2];
unsigned int in_buffer;
int in_bits;
unsigned int out_buffer;
int out_bits;
} g722_encode_state_t;
typedef struct
{
/*! TRUE if the operating in the special ITU test mode, with the band split filters
disabled. */
int itu_test_mode;
/*! TRUE if the G.722 data is packed */
int packed;
/*! TRUE if decode to 8k samples/second */
int eight_k;
/*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
int bits_per_sample;
/*! Signal history for the QMF */
int x[24];
struct
{
int s;
int sp;
int sz;
int r[3];
int a[3];
int ap[3];
int p[3];
int d[7];
int b[7];
int bp[7];
int sg[7];
int nb;
int det;
} band[2];
unsigned int in_buffer;
int in_bits;
unsigned int out_buffer;
int out_bits;
} g722_decode_state_t;
#ifdef __cplusplus
extern "C" {
#endif
g722_encode_state_t *g722_encode_init(g722_encode_state_t *s, int rate, int options);
int g722_encode_release(g722_encode_state_t *s);
int g722_encode(g722_encode_state_t *s,
WebRtc_UWord8 g722_data[],
const WebRtc_Word16 amp[],
int len);
g722_decode_state_t *g722_decode_init(g722_decode_state_t *s, int rate, int options);
int g722_decode_release(g722_decode_state_t *s);
int g722_decode(g722_decode_state_t *s,
WebRtc_Word16 amp[],
const WebRtc_UWord8 g722_data[],
int len);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,433 @@
/*
* SpanDSP - a series of DSP components for telephony
*
* g722_encode.c - The ITU G.722 codec, encode part.
*
* Written by Steve Underwood <steveu@coppice.org>
*
* Copyright (C) 2005 Steve Underwood
*
* All rights reserved.
*
* Despite my general liking of the GPL, I place my own contributions
* to this code in the public domain for the benefit of all mankind -
* even the slimy ones who might try to proprietize my work and use it
* to my detriment.
*
* Based on a single channel 64kbps only G.722 codec which is:
*
***** Copyright (c) CMU 1993 *****
* Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
*
* $Id: g722_encode.c,v 1.14 2006/07/07 16:37:49 steveu Exp $
*
* Modifications for WebRtc, 2011/04/28, by tlegrand:
* -Removed usage of inttypes.h and tgmath.h
* -Changed to use WebRtc types
* -Added option to run encoder bitexact with ITU-T reference implementation
*/
/*! \file */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <memory.h>
#include <stdlib.h>
#include "typedefs.h"
#include "g722_enc_dec.h"
#if !defined(FALSE)
#define FALSE 0
#endif
#if !defined(TRUE)
#define TRUE (!FALSE)
#endif
static __inline WebRtc_Word16 saturate(WebRtc_Word32 amp)
{
WebRtc_Word16 amp16;
/* Hopefully this is optimised for the common case - not clipping */
amp16 = (WebRtc_Word16) amp;
if (amp == amp16)
return amp16;
if (amp > WEBRTC_INT16_MAX)
return WEBRTC_INT16_MAX;
return WEBRTC_INT16_MIN;
}
/*- End of function --------------------------------------------------------*/
static void block4(g722_encode_state_t *s, int band, int d)
{
int wd1;
int wd2;
int wd3;
int i;
/* Block 4, RECONS */
s->band[band].d[0] = d;
s->band[band].r[0] = saturate(s->band[band].s + d);
/* Block 4, PARREC */
s->band[band].p[0] = saturate(s->band[band].sz + d);
/* Block 4, UPPOL2 */
for (i = 0; i < 3; i++)
s->band[band].sg[i] = s->band[band].p[i] >> 15;
wd1 = saturate(s->band[band].a[1] << 2);
wd2 = (s->band[band].sg[0] == s->band[band].sg[1]) ? -wd1 : wd1;
if (wd2 > 32767)
wd2 = 32767;
wd3 = (wd2 >> 7) + ((s->band[band].sg[0] == s->band[band].sg[2]) ? 128 : -128);
wd3 += (s->band[band].a[2]*32512) >> 15;
if (wd3 > 12288)
wd3 = 12288;
else if (wd3 < -12288)
wd3 = -12288;
s->band[band].ap[2] = wd3;
/* Block 4, UPPOL1 */
s->band[band].sg[0] = s->band[band].p[0] >> 15;
s->band[band].sg[1] = s->band[band].p[1] >> 15;
wd1 = (s->band[band].sg[0] == s->band[band].sg[1]) ? 192 : -192;
wd2 = (s->band[band].a[1]*32640) >> 15;
s->band[band].ap[1] = saturate(wd1 + wd2);
wd3 = saturate(15360 - s->band[band].ap[2]);
if (s->band[band].ap[1] > wd3)
s->band[band].ap[1] = wd3;
else if (s->band[band].ap[1] < -wd3)
s->band[band].ap[1] = -wd3;
/* Block 4, UPZERO */
wd1 = (d == 0) ? 0 : 128;
s->band[band].sg[0] = d >> 15;
for (i = 1; i < 7; i++)
{
s->band[band].sg[i] = s->band[band].d[i] >> 15;
wd2 = (s->band[band].sg[i] == s->band[band].sg[0]) ? wd1 : -wd1;
wd3 = (s->band[band].b[i]*32640) >> 15;
s->band[band].bp[i] = saturate(wd2 + wd3);
}
/* Block 4, DELAYA */
for (i = 6; i > 0; i--)
{
s->band[band].d[i] = s->band[band].d[i - 1];
s->band[band].b[i] = s->band[band].bp[i];
}
for (i = 2; i > 0; i--)
{
s->band[band].r[i] = s->band[band].r[i - 1];
s->band[band].p[i] = s->band[band].p[i - 1];
s->band[band].a[i] = s->band[band].ap[i];
}
/* Block 4, FILTEP */
wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
wd1 = (s->band[band].a[1]*wd1) >> 15;
wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
wd2 = (s->band[band].a[2]*wd2) >> 15;
s->band[band].sp = saturate(wd1 + wd2);
/* Block 4, FILTEZ */
s->band[band].sz = 0;
for (i = 6; i > 0; i--)
{
wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
}
s->band[band].sz = saturate(s->band[band].sz);
/* Block 4, PREDIC */
s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
}
/*- End of function --------------------------------------------------------*/
g722_encode_state_t *g722_encode_init(g722_encode_state_t *s, int rate, int options)
{
if (s == NULL)
{
if ((s = (g722_encode_state_t *) malloc(sizeof(*s))) == NULL)
return NULL;
}
memset(s, 0, sizeof(*s));
if (rate == 48000)
s->bits_per_sample = 6;
else if (rate == 56000)
s->bits_per_sample = 7;
else
s->bits_per_sample = 8;
if ((options & G722_SAMPLE_RATE_8000))
s->eight_k = TRUE;
if ((options & G722_PACKED) && s->bits_per_sample != 8)
s->packed = TRUE;
else
s->packed = FALSE;
s->band[0].det = 32;
s->band[1].det = 8;
return s;
}
/*- End of function --------------------------------------------------------*/
int g722_encode_release(g722_encode_state_t *s)
{
free(s);
return 0;
}
/*- End of function --------------------------------------------------------*/
/* WebRtc, tlegrand:
* Only define the following if bit-exactness with reference implementation
* is needed. Will only have any effect if input signal is saturated.
*/
//#define RUN_LIKE_REFERENCE_G722
#ifdef RUN_LIKE_REFERENCE_G722
WebRtc_Word16 limitValues (WebRtc_Word16 rl)
{
WebRtc_Word16 yl;
yl = (rl > 16383) ? 16383 : ((rl < -16384) ? -16384 : rl);
return (yl);
}
#endif
int g722_encode(g722_encode_state_t *s, WebRtc_UWord8 g722_data[],
const WebRtc_Word16 amp[], int len)
{
static const int q6[32] =
{
0, 35, 72, 110, 150, 190, 233, 276,
323, 370, 422, 473, 530, 587, 650, 714,
786, 858, 940, 1023, 1121, 1219, 1339, 1458,
1612, 1765, 1980, 2195, 2557, 2919, 0, 0
};
static const int iln[32] =
{
0, 63, 62, 31, 30, 29, 28, 27,
26, 25, 24, 23, 22, 21, 20, 19,
18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 0
};
static const int ilp[32] =
{
0, 61, 60, 59, 58, 57, 56, 55,
54, 53, 52, 51, 50, 49, 48, 47,
46, 45, 44, 43, 42, 41, 40, 39,
38, 37, 36, 35, 34, 33, 32, 0
};
static const int wl[8] =
{
-60, -30, 58, 172, 334, 538, 1198, 3042
};
static const int rl42[16] =
{
0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0
};
static const int ilb[32] =
{
2048, 2093, 2139, 2186, 2233, 2282, 2332,
2383, 2435, 2489, 2543, 2599, 2656, 2714,
2774, 2834, 2896, 2960, 3025, 3091, 3158,
3228, 3298, 3371, 3444, 3520, 3597, 3676,
3756, 3838, 3922, 4008
};
static const int qm4[16] =
{
0, -20456, -12896, -8968,
-6288, -4240, -2584, -1200,
20456, 12896, 8968, 6288,
4240, 2584, 1200, 0
};
static const int qm2[4] =
{
-7408, -1616, 7408, 1616
};
static const int qmf_coeffs[12] =
{
3, -11, 12, 32, -210, 951, 3876, -805, 362, -156, 53, -11,
};
static const int ihn[3] = {0, 1, 0};
static const int ihp[3] = {0, 3, 2};
static const int wh[3] = {0, -214, 798};
static const int rh2[4] = {2, 1, 2, 1};
int dlow;
int dhigh;
int el;
int wd;
int wd1;
int ril;
int wd2;
int il4;
int ih2;
int wd3;
int eh;
int mih;
int i;
int j;
/* Low and high band PCM from the QMF */
int xlow;
int xhigh;
int g722_bytes;
/* Even and odd tap accumulators */
int sumeven;
int sumodd;
int ihigh;
int ilow;
int code;
g722_bytes = 0;
xhigh = 0;
for (j = 0; j < len; )
{
if (s->itu_test_mode)
{
xlow =
xhigh = amp[j++] >> 1;
}
else
{
if (s->eight_k)
{
/* We shift by 1 to allow for the 15 bit input to the G.722 algorithm. */
xlow = amp[j++] >> 1;
}
else
{
/* Apply the transmit QMF */
/* Shuffle the buffer down */
for (i = 0; i < 22; i++)
s->x[i] = s->x[i + 2];
s->x[22] = amp[j++];
s->x[23] = amp[j++];
/* Discard every other QMF output */
sumeven = 0;
sumodd = 0;
for (i = 0; i < 12; i++)
{
sumodd += s->x[2*i]*qmf_coeffs[i];
sumeven += s->x[2*i + 1]*qmf_coeffs[11 - i];
}
/* We shift by 12 to allow for the QMF filters (DC gain = 4096), plus 1
to allow for us summing two filters, plus 1 to allow for the 15 bit
input to the G.722 algorithm. */
xlow = (sumeven + sumodd) >> 14;
xhigh = (sumeven - sumodd) >> 14;
#ifdef RUN_LIKE_REFERENCE_G722
/* The following lines are only used to verify bit-exactness
* with reference implementation of G.722. Higher precision
* is achieved without limiting the values.
*/
xlow = limitValues(xlow);
xhigh = limitValues(xhigh);
#endif
}
}
/* Block 1L, SUBTRA */
el = saturate(xlow - s->band[0].s);
/* Block 1L, QUANTL */
wd = (el >= 0) ? el : -(el + 1);
for (i = 1; i < 30; i++)
{
wd1 = (q6[i]*s->band[0].det) >> 12;
if (wd < wd1)
break;
}
ilow = (el < 0) ? iln[i] : ilp[i];
/* Block 2L, INVQAL */
ril = ilow >> 2;
wd2 = qm4[ril];
dlow = (s->band[0].det*wd2) >> 15;
/* Block 3L, LOGSCL */
il4 = rl42[ril];
wd = (s->band[0].nb*127) >> 7;
s->band[0].nb = wd + wl[il4];
if (s->band[0].nb < 0)
s->band[0].nb = 0;
else if (s->band[0].nb > 18432)
s->band[0].nb = 18432;
/* Block 3L, SCALEL */
wd1 = (s->band[0].nb >> 6) & 31;
wd2 = 8 - (s->band[0].nb >> 11);
wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
s->band[0].det = wd3 << 2;
block4(s, 0, dlow);
if (s->eight_k)
{
/* Just leave the high bits as zero */
code = (0xC0 | ilow) >> (8 - s->bits_per_sample);
}
else
{
/* Block 1H, SUBTRA */
eh = saturate(xhigh - s->band[1].s);
/* Block 1H, QUANTH */
wd = (eh >= 0) ? eh : -(eh + 1);
wd1 = (564*s->band[1].det) >> 12;
mih = (wd >= wd1) ? 2 : 1;
ihigh = (eh < 0) ? ihn[mih] : ihp[mih];
/* Block 2H, INVQAH */
wd2 = qm2[ihigh];
dhigh = (s->band[1].det*wd2) >> 15;
/* Block 3H, LOGSCH */
ih2 = rh2[ihigh];
wd = (s->band[1].nb*127) >> 7;
s->band[1].nb = wd + wh[ih2];
if (s->band[1].nb < 0)
s->band[1].nb = 0;
else if (s->band[1].nb > 22528)
s->band[1].nb = 22528;
/* Block 3H, SCALEH */
wd1 = (s->band[1].nb >> 6) & 31;
wd2 = 10 - (s->band[1].nb >> 11);
wd3 = (wd2 < 0) ? (ilb[wd1] << -wd2) : (ilb[wd1] >> wd2);
s->band[1].det = wd3 << 2;
block4(s, 1, dhigh);
code = ((ihigh << 6) | ilow) >> (8 - s->bits_per_sample);
}
if (s->packed)
{
/* Pack the code bits */
s->out_buffer |= (code << s->out_bits);
s->out_bits += s->bits_per_sample;
if (s->out_bits >= 8)
{
g722_data[g722_bytes++] = (WebRtc_UWord8) (s->out_buffer & 0xFF);
s->out_bits -= 8;
s->out_buffer >>= 8;
}
}
else
{
g722_data[g722_bytes++] = (WebRtc_UWord8) code;
}
}
return g722_bytes;
}
/*- End of function --------------------------------------------------------*/
/*- End of file ------------------------------------------------------------*/

View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include <string.h>
#include "g722_interface.h"
#include "g722_enc_dec.h"
#include "typedefs.h"
WebRtc_Word16 WebRtcG722_CreateEncoder(G722EncInst **G722enc_inst)
{
*G722enc_inst=(G722EncInst*)malloc(sizeof(g722_encode_state_t));
if (*G722enc_inst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcG722_EncoderInit(G722EncInst *G722enc_inst)
{
// Create and/or reset the G.722 encoder
// Bitrate 64 kbps and wideband mode (2)
G722enc_inst = (G722EncInst *) g722_encode_init(
(g722_encode_state_t*) G722enc_inst, 64000, 2);
if (G722enc_inst == NULL) {
return -1;
} else {
return 0;
}
}
WebRtc_Word16 WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst)
{
// Free encoder memory
return g722_encode_release((g722_encode_state_t*) G722enc_inst);
}
WebRtc_Word16 WebRtcG722_Encode(G722EncInst *G722enc_inst,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded)
{
unsigned char *codechar = (unsigned char*) encoded;
// Encode the input speech vector
return g722_encode((g722_encode_state_t*) G722enc_inst,
codechar, speechIn, len);
}
WebRtc_Word16 WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst)
{
*G722dec_inst=(G722DecInst*)malloc(sizeof(g722_decode_state_t));
if (*G722dec_inst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcG722_DecoderInit(G722DecInst *G722dec_inst)
{
// Create and/or reset the G.722 decoder
// Bitrate 64 kbps and wideband mode (2)
G722dec_inst = (G722DecInst *) g722_decode_init(
(g722_decode_state_t*) G722dec_inst, 64000, 2);
if (G722dec_inst == NULL) {
return -1;
} else {
return 0;
}
}
WebRtc_Word16 WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
{
// Free encoder memory
return g722_decode_release((g722_decode_state_t*) G722dec_inst);
}
WebRtc_Word16 WebRtcG722_Decode(G722DecInst *G722dec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
// Decode the G.722 encoder stream
*speechType=G722_WEBRTC_SPEECH;
return g722_decode((g722_decode_state_t*) G722dec_inst,
decoded, (WebRtc_UWord8*) encoded, len);
}
WebRtc_Word16 WebRtcG722_Version(char *versionStr, short len)
{
// Get version string
char version[30] = "2.0.0\n";
if (strlen(version) < (unsigned int)len)
{
strcpy(versionStr, version);
return 0;
}
else
{
return -1;
}
}

View File

@@ -0,0 +1,157 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* testG722.cpp : Defines the entry point for the console application.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "typedefs.h"
/* include API */
#include "g722_interface.h"
/* Runtime statistics */
#include <time.h>
#define CLOCKS_PER_SEC_G722 100000
// Forward declaration
typedef struct WebRtcG722EncInst G722EncInst;
typedef struct WebRtcG722DecInst G722DecInst;
/* function for reading audio data from PCM file */
int readframe(WebRtc_Word16 *data, FILE *inp, int length)
{
short k, rlen, status = 0;
rlen = (short)fread(data, sizeof(WebRtc_Word16), length, inp);
if (rlen < length) {
for (k = rlen; k < length; k++)
data[k] = 0;
status = 1;
}
return status;
}
int main(int argc, char* argv[])
{
char inname[60], outbit[40], outname[40];
FILE *inp, *outbitp, *outp;
int framecnt, endfile;
WebRtc_Word16 framelength = 160;
G722EncInst *G722enc_inst;
G722DecInst *G722dec_inst;
int err;
/* Runtime statistics */
double starttime;
double runtime;
double length_file;
WebRtc_Word16 stream_len = 0;
WebRtc_Word16 shortdata[960];
WebRtc_Word16 decoded[960];
WebRtc_Word16 streamdata[80*3];
WebRtc_Word16 speechType[1];
/* handling wrong input arguments in the command line */
if (argc!=5) {
printf("\n\nWrong number of arguments or flag values.\n\n");
printf("\n");
printf("Usage:\n\n");
printf("./testG722.exe framelength infile outbitfile outspeechfile \n\n");
printf("with:\n");
printf("framelength : Framelength in samples.\n\n");
printf("infile : Normal speech input file\n\n");
printf("outbitfile : Bitstream output file\n\n");
printf("outspeechfile: Speech output file\n\n");
exit(0);
}
/* Get frame length */
framelength = atoi(argv[1]);
/* Get Input and Output files */
sscanf(argv[2], "%s", inname);
sscanf(argv[3], "%s", outbit);
sscanf(argv[4], "%s", outname);
if ((inp = fopen(inname,"rb")) == NULL) {
printf(" G.722: Cannot read file %s.\n", inname);
exit(1);
}
if ((outbitp = fopen(outbit,"wb")) == NULL) {
printf(" G.722: Cannot write file %s.\n", outbit);
exit(1);
}
if ((outp = fopen(outname,"wb")) == NULL) {
printf(" G.722: Cannot write file %s.\n", outname);
exit(1);
}
printf("\nInput:%s\nOutput bitstream:%s\nOutput:%s\n", inname, outbit, outname);
/* Create and init */
WebRtcG722_CreateEncoder((G722EncInst **)&G722enc_inst);
WebRtcG722_CreateDecoder((G722DecInst **)&G722dec_inst);
WebRtcG722_EncoderInit((G722EncInst *)G722enc_inst);
WebRtcG722_DecoderInit((G722DecInst *)G722dec_inst);
/* Initialize encoder and decoder */
framecnt = 0;
endfile = 0;
while (endfile == 0) {
framecnt++;
/* Read speech block */
endfile = readframe(shortdata, inp, framelength);
/* Start clock before call to encoder and decoder */
starttime = clock()/(double)CLOCKS_PER_SEC_G722;
/* G.722 encoding + decoding */
stream_len = WebRtcG722_Encode((G722EncInst *)G722enc_inst, shortdata, framelength, streamdata);
err = WebRtcG722_Decode((G722DecInst *)G722dec_inst, streamdata, stream_len, decoded, speechType);
/* Stop clock after call to encoder and decoder */
runtime += (double)((clock()/(double)CLOCKS_PER_SEC_G722)-starttime);
if (stream_len < 0 || err < 0) {
/* exit if returned with error */
printf("Error in encoder/decoder\n");
} else {
/* Write coded bits to file */
fwrite(streamdata,sizeof(short),stream_len/2,outbitp);
/* Write coded speech to file */
fwrite(decoded,sizeof(short),framelength,outp);
}
}
WebRtcG722_FreeEncoder((G722EncInst *)G722enc_inst);
WebRtcG722_FreeDecoder((G722DecInst *)G722dec_inst);
length_file = ((double)framecnt*(double)framelength/16000);
printf("\n\nLength of speech file: %.1f s\n", length_file);
printf("Time to run G.722: %.2f s (%.2f %% of realtime)\n\n", runtime, (100*runtime/length_file));
printf("---------------------END----------------------\n");
fclose(inp);
fclose(outbitp);
fclose(outp);
return 0;
}

View File

@@ -0,0 +1,3 @@
tlegrand@google.com
turajs@google.com
jks@google.com

View File

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_MAIN_INTERFACE_PCM16B_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_MAIN_INTERFACE_PCM16B_H_
/*
* Define the fixpoint numeric formats
*/
#include "typedefs.h"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
* WebRtcPcm16b_EncodeW16(...)
*
* "Encode" a sample vector to 16 bit linear (Encoded standard is big endian)
*
* Input:
* - speechIn16b : Input speech vector
* - len : Number of samples in speech vector
*
* Output:
* - speechOut16b : Encoded data vector (big endian 16 bit)
*
* Returned value : Size in bytes of speechOut16b
*/
WebRtc_Word16 WebRtcPcm16b_EncodeW16(WebRtc_Word16 *speechIn16b,
WebRtc_Word16 len,
WebRtc_Word16 *speechOut16b);
/****************************************************************************
* WebRtcPcm16b_Encode(...)
*
* "Encode" a sample vector to 16 bit linear (Encoded standard is big endian)
*
* Input:
* - speech16b : Input speech vector
* - len : Number of samples in speech vector
*
* Output:
* - speech8b : Encoded data vector (big endian 16 bit)
*
* Returned value : Size in bytes of speech8b
*/
WebRtc_Word16 WebRtcPcm16b_Encode(WebRtc_Word16 *speech16b,
WebRtc_Word16 len,
unsigned char *speech8b);
/****************************************************************************
* WebRtcPcm16b_DecodeW16(...)
*
* "Decode" a vector to 16 bit linear (Encoded standard is big endian)
*
* Input:
* - speechIn16b : Encoded data vector (big endian 16 bit)
* - len : Number of bytes in speechIn16b
*
* Output:
* - speechOut16b : Decoded speech vector
*
* Returned value : Samples in speechOut16b
*/
WebRtc_Word16 WebRtcPcm16b_DecodeW16(void *inst,
WebRtc_Word16 *speechIn16b,
WebRtc_Word16 len,
WebRtc_Word16 *speechOut16b,
WebRtc_Word16* speechType);
/****************************************************************************
* WebRtcPcm16b_Decode(...)
*
* "Decode" a vector to 16 bit linear (Encoded standard is big endian)
*
* Input:
* - speech8b : Encoded data vector (big endian 16 bit)
* - len : Number of bytes in speech8b
*
* Output:
* - speech16b : Decoded speech vector
*
* Returned value : Samples in speech16b
*/
WebRtc_Word16 WebRtcPcm16b_Decode(unsigned char *speech8b,
WebRtc_Word16 len,
WebRtc_Word16 *speech16b);
#ifdef __cplusplus
}
#endif
#endif /* PCM16B */

View File

@@ -0,0 +1,100 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "pcm16b.h"
#include "typedefs.h"
#ifdef WEBRTC_BIG_ENDIAN
#include "signal_processing_library.h"
#endif
#define HIGHEND 0xFF00
#define LOWEND 0xFF
/* Encoder with WebRtc_Word16 Output */
WebRtc_Word16 WebRtcPcm16b_EncodeW16(WebRtc_Word16 *speechIn16b,
WebRtc_Word16 len,
WebRtc_Word16 *speechOut16b)
{
#ifdef WEBRTC_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
#else
int i;
for (i=0;i<len;i++) {
speechOut16b[i]=(((WebRtc_UWord16)speechIn16b[i])>>8)|((((WebRtc_UWord16)speechIn16b[i])<<8)&0xFF00);
}
#endif
return(len<<1);
}
/* Encoder with char Output (old version) */
WebRtc_Word16 WebRtcPcm16b_Encode(WebRtc_Word16 *speech16b,
WebRtc_Word16 len,
unsigned char *speech8b)
{
WebRtc_Word16 samples=len*2;
WebRtc_Word16 pos;
WebRtc_Word16 short1;
WebRtc_Word16 short2;
for (pos=0;pos<len;pos++) {
short1=HIGHEND & speech16b[pos];
short2=LOWEND & speech16b[pos];
short1=short1>>8;
speech8b[pos*2]=(unsigned char) short1;
speech8b[pos*2+1]=(unsigned char) short2;
}
return(samples);
}
/* Decoder with WebRtc_Word16 Input instead of char when the WebRtc_Word16 Encoder is used */
WebRtc_Word16 WebRtcPcm16b_DecodeW16(void *inst,
WebRtc_Word16 *speechIn16b,
WebRtc_Word16 len,
WebRtc_Word16 *speechOut16b,
WebRtc_Word16* speechType)
{
#ifdef WEBRTC_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(WebRtc_Word16)+1)>>1));
#else
int i;
int samples=len>>1;
for (i=0;i<samples;i++) {
speechOut16b[i]=(((WebRtc_UWord16)speechIn16b[i])>>8)|(((WebRtc_UWord16)(speechIn16b[i]&0xFF))<<8);
}
#endif
*speechType=1;
return(len>>1);
}
/* "old" version of the decoder that uses char as input (not used in NetEq any more) */
WebRtc_Word16 WebRtcPcm16b_Decode(unsigned char *speech8b,
WebRtc_Word16 len,
WebRtc_Word16 *speech16b)
{
WebRtc_Word16 samples=len>>1;
WebRtc_Word16 pos;
WebRtc_Word16 shortval;
for (pos=0;pos<samples;pos++) {
shortval=((unsigned short) speech8b[pos*2]);
shortval=(shortval<<8)&HIGHEND;
shortval=shortval|(((unsigned short) speech8b[pos*2+1])&LOWEND);
speech16b[pos]=shortval;
}
return(samples);
}

View File

@@ -0,0 +1,37 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'PCM16B',
'type': '<(library)',
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/pcm16b.h',
'pcm16b.c',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@@ -0,0 +1,35 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../common_settings.gypi', # Common settings
],
'targets': [
# ilbc_test
{
'target_name': 'iLBCtest',
'type': 'executable',
'dependencies': [
'./main/source/ilbc.gyp:iLBC',
],
'include_dirs': [
'./main/interface',
],
'sources': [
'./main/test/iLBC_test.c',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,731 @@
Network Working Group A. Duric
Request for Comments: 3952 Telio
Category: Experimental S. Andersen
Aalborg University
December 2004
Real-time Transport Protocol (RTP) Payload Format
for internet Low Bit Rate Codec (iLBC) Speech
Status of this Memo
This memo defines an Experimental Protocol for the Internet
community. It does not specify an Internet standard of any kind.
Discussion and suggestions for improvement are requested.
Distribution of this memo is unlimited.
Copyright Notice
Copyright (C) The Internet Society (2004).
Abstract
This document describes the Real-time Transport Protocol (RTP)
payload format for the internet Low Bit Rate Codec (iLBC) Speech
developed by Global IP Sound (GIPS). Also, within the document there
are included necessary details for the use of iLBC with MIME and
Session Description Protocol (SDP).
Table of Contents
1. Introduction. . . . . . . . . . . . . . . . . . . . . . . . . . 2
2. Background. . . . . . . . . . . . . . . . . . . . . . . . . . . 2
3. RTP Payload Format. . . . . . . . . . . . . . . . . . . . . . . 3
3.1. Bitstream definition . . . . . . . . . . . . . . . . . . . 3
3.2. Multiple iLBC frames in a RTP packet . . . . . . . . . . . 6
4. IANA Considerations . . . . . . . . . . . . . . . . . . . . . . 7
4.1. Storage Mode . . . . . . . . . . . . . . . . . . . . . . . 7
4.2. MIME registration of iLBC. . . . . . . . . . . . . . . . . 8
5. Mapping to SDP Parameters . . . . . . . . . . . . . . . . . . . 9
6. Security Considerations . . . . . . . . . . . . . . . . . . . . 11
7. References. . . . . . . . . . . . . . . . . . . . . . . . . . . 11
7.1. Normative References . . . . . . . . . . . . . . . . . . . 11
7.2. Informative References . . . . . . . . . . . . . . . . . . 12
8. Acknowledgements. . . . . . . . . . . . . . . . . . . . . . . . 12
Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 12
Full Copyright Statement . . . . . . . . . . . . . . . . . . . . . 13
Duric & Andersen Experimental [Page 1]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
1. Introduction
This document describes how compressed iLBC speech, as produced by
the iLBC codec [1], may be formatted for use as an RTP payload type.
Methods are provided to packetize the codec data frames into RTP
packets. The sender may send one or more codec data frames per
packet depending on the application scenario or based on the
transport network condition, bandwidth restriction, delay
requirements and packet-loss tolerance.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
document are to be interpreted as described in BCP 14, RFC 2119 [2].
2. Background
Global IP Sound (GIPS) has developed a speech compression algorithm
for use in IP based communications [1]. The iLBC codec enables
graceful speech quality degradation in the case of lost frames, which
occurs in connection with lost or delayed IP packets.
This codec is suitable for real time communications such as,
telephony and videoconferencing, streaming audio, archival and
messaging.
The iLBC codec [1] is an algorithm that compresses each basic frame
(20 ms or 30 ms) of 8000 Hz, 16-bit sampled input speech, into output
frames with rate of 400 bits for 30 ms basic frame size and 304 bits
for 20 ms basic frame size.
The codec supports two basic frame lengths: 30 ms at 13.33 kbit/s and
20 ms at 15.2 kbit/s, using a block independent linear-predictive
coding (LPC) algorithm. When the codec operates at block lengths of
20 ms, it produces 304 bits per block which MUST be packetized in 38
bytes. Similarly, for block lengths of 30 ms it produces 400 bits
per block which MUST be packetized in 50 bytes. This algorithm
results in a speech coding system with a controlled response to
packet losses similar to what is known from pulse code modulation
(PCM) with a packet loss concealment (PLC), such as ITU-T G711
standard [7], which operates at a fixed bit rate of 64 kbit/s. At
the same time, this algorithm enables fixed bit rate coding with a
quality-versus-bit rate tradeoff close to what is known from code-
excited linear prediction (CELP).
Duric & Andersen Experimental [Page 2]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
3. RTP Payload Format
The iLBC codec uses 20 or 30 ms frames and a sampling rate clock of 8
kHz, so the RTP timestamp MUST be in units of 1/8000 of a second. The
RTP payload for iLBC has the format shown in the figure bellow. No
addition header specific to this payload format is required.
This format is intended for the situations where the sender and the
receiver send one or more codec data frames per packet. The RTP
packet looks as follows:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| RTP Header [3] |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
| |
+ one or more frames of iLBC [1] |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Figure 1, Packet format diagram
The RTP header of the packetized encoded iLBC speech has the expected
values as described in [3]. The usage of M bit SHOULD be as
specified in the applicable RTP profile, for example, RFC 3551 [4]
specifies that if the sender does not suppress silence (i.e., sends a
frame on every frame interval), the M bit will always be zero. When
more then one codec data frame is present in a single RTP packet, the
timestamp is, as always, the oldest data frame represented in the RTP
packet.
The assignment of an RTP payload type for this new packet format is
outside the scope of this document, and will not be specified here.
It is expected that the RTP profile for a particular class of
applications will assign a payload type for this encoding, or if that
is not done, then a payload type in the dynamic range shall be chosen
by the sender.
3.1. Bitstream definition
The total number of bits used to describe one frame of 20 ms speech
is 304, which fits in 38 bytes and results in a bit rate of 15.20
kbit/s. For the case with a frame length of 30 ms speech the total
number of bits used is 400, which fits in 50 bytes and results in a
bit rate of 13.33 kbit/s. In the bitstream definition, the bits are
distributed into three classes according to their bit error or loss
sensitivity. The most sensitive bits (class 1) are placed first in
Duric & Andersen Experimental [Page 3]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
the bitstream for each frame. The less sensitive bits (class 2) are
placed after the class 1 bits. The least sensitive bits (class 3)
are placed at the end of the bitstream for each frame.
Looking at the 20/30 ms frame length cases for each class: The class
1 bits occupy a total of 6/8 bytes (48/64 bits), the class 2 bits
occupy 8/12 bytes (64/96 bits), and the class 3 bits occupy 24/30
bytes (191/239 bits). This distribution of the bits enables the use
of uneven level protection (ULP). The detailed bit allocation is
shown in the table below. When a quantization index is distributed
between more classes the more significant bits belong to the lowest
class.
Duric & Andersen Experimental [Page 4]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
Bitstream structure:
------------------------------------------------------------------+
Parameter | Bits Class <1,2,3> |
| 20 ms frame | 30 ms frame |
----------------------------------+---------------+---------------+
Split 1 | 6 <6,0,0> | 6 <6,0,0> |
LSF 1 Split 2 | 7 <7,0,0> | 7 <7,0,0> |
LSF Split 3 | 7 <7,0,0> | 7 <7,0,0> |
------------------+---------------+---------------+
Split 1 | NA (Not Appl.)| 6 <6,0,0> |
LSF 2 Split 2 | NA | 7 <7,0,0> |
Split 3 | NA | 7 <7,0,0> |
------------------+---------------+---------------+
Sum | 20 <20,0,0> | 40 <40,0,0> |
----------------------------------+---------------+---------------+
Block Class. | 2 <2,0,0> | 3 <3,0,0> |
----------------------------------+---------------+---------------+
Position 22 sample segment | 1 <1,0,0> | 1 <1,0,0> |
----------------------------------+---------------+---------------+
Scale Factor State Coder | 6 <6,0,0> | 6 <6,0,0> |
----------------------------------+---------------+---------------+
Sample 0 | 3 <0,1,2> | 3 <0,1,2> |
Quantized Sample 1 | 3 <0,1,2> | 3 <0,1,2> |
Residual : | : : | : : |
State : | : : | : : |
Samples : | : : | : : |
Sample 56 | 3 <0,1,2> | 3 <0,1,2> |
Sample 57 | NA | 3 <0,1,2> |
------------------+---------------+---------------+
Sum | 171 <0,57,114>| 174 <0,58,116>|
----------------------------------+---------------+---------------+
Stage 1 | 7 <6,0,1> | 7 <4,2,1> |
CB for 22/23 Stage 2 | 7 <0,0,7> | 7 <0,0,7> |
sample block Stage 3 | 7 <0,0,7> | 7 <0,0,7> |
------------------+---------------+---------------+
Sum | 21 <6,0,15> | 21 <4,2,15> |
----------------------------------+---------------+---------------+
Stage 1 | 5 <2,0,3> | 5 <1,1,3> |
Gain for 22/23 Stage 2 | 4 <1,1,2> | 4 <1,1,2> |
sample block Stage 3 | 3 <0,0,3> | 3 <0,0,3> |
------------------+---------------+---------------+
Sum | 12 <3,1,8> | 12 <2,2,8> |
----------------------------------+---------------+---------------+
Stage 1 | 8 <7,0,1> | 8 <6,1,1> |
sub-block 1 Stage 2 | 7 <0,0,7> | 7 <0,0,7> |
Stage 3 | 7 <0,0,7> | 7 <0,0,7> |
------------------+---------------+---------------+
Duric & Andersen Experimental [Page 5]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
Stage 1 | 8 <0,0,8> | 8 <0,7,1> |
sub-block 2 Stage 2 | 8 <0,0,8> | 8 <0,0,8> |
Indices Stage 3 | 8 <0,0,8> | 8 <0,0,8> |
for CB ------------------+---------------+---------------+
sub-blocks Stage 1 | NA | 8 <0,7,1> |
sub-block 3 Stage 2 | NA | 8 <0,0,8> |
Stage 3 | NA | 8 <0,0,8> |
------------------+---------------+---------------+
Stage 1 | NA | 8 <0,7,1> |
sub-block 4 Stage 2 | NA | 8 <0,0,8> |
Stage 3 | NA | 8 <0,0,8> |
------------------+---------------+---------------+
Sum | 46 <7,0,39> | 94 <6,22,66> |
----------------------------------+---------------+---------------+
Stage 1 | 5 <1,2,2> | 5 <1,2,2> |
sub-block 1 Stage 2 | 4 <1,1,2> | 4 <1,2,1> |
Stage 3 | 3 <0,0,3> | 3 <0,0,3> |
------------------+---------------+---------------+
Stage 1 | 5 <1,1,3> | 5 <0,2,3> |
sub-block 2 Stage 2 | 4 <0,2,2> | 4 <0,2,2> |
Stage 3 | 3 <0,0,3> | 3 <0,0,3> |
Gains for ------------------+---------------+---------------+
sub-blocks Stage 1 | NA | 5 <0,1,4> |
sub-block 3 Stage 2 | NA | 4 <0,1,3> |
Stage 3 | NA | 3 <0,0,3> |
------------------+---------------+---------------+
Stage 1 | NA | 5 <0,1,4> |
sub-block 4 Stage 2 | NA | 4 <0,1,3> |
Stage 3 | NA | 3 <0,0,3> |
------------------+---------------+---------------+
Sum | 24 <3,6,15> | 48 <2,12,34> |
-------------------------------------------------------------------
Empty frame indicator | 1 <0,0,1> | 1 <0,0,1> |
-------------------------------------------------------------------
SUM 304 <48,64,192> 400 <64,96,240>
Table 3.1 The bitstream definition for iLBC.
When packetized into the payload, all the class 1 bits MUST be sorted
in order (from top and down) as they were specified in the table.
Additionally, all the class 2 bits MUST be sorted (from top and down)
and all the class 3 bits MUST be sorted in the same sequential order.
3.2. Multiple iLBC frames in a RTP packet
More than one iLBC frame may be included in a single RTP packet by a
sender.
Duric & Andersen Experimental [Page 6]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
It is important to observe that senders have the following additional
restrictions:
o SHOULD NOT include more iLBC frames in a single RTP packet than
will fit in the MTU of the RTP transport protocol.
o Frames MUST NOT be split between RTP packets.
o Frames of the different modes (20 ms and 30 ms) MUST NOT be
included within the same packet.
It is RECOMMENDED that the number of frames contained within an RTP
packet are consistent with the application. For example, in
telephony and other real time applications where delay is important,
the delay is lower depending on the amount of frames per packet
(i.e., fewer frames per packet, the lower the delay). Whereas for
bandwidth constrained links or delay insensitive streaming messaging
application, one or more frames per packet would be acceptable.
Information describing the number of frames contained in an RTP
packet is not transmitted as part of the RTP payload. The way to
determine the number of iLBC frames is to count the total number of
octets within the RTP packet, and divide the octet count by the
number of expected octets per frame (32/50 per frame).
4. IANA Considerations
One new MIME sub-type as described in this section has been
registered.
4.1. Storage Mode
The storage mode is used for storing speech frames (e.g., as a file
or email attachment).
+------------------+
| Header |
+------------------+
| Speech frame 1 |
+------------------+
: :
+------------------+
| Speech frame n |
+------------------+
Figure 2, Storage format diagram
Duric & Andersen Experimental [Page 7]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
The file begins with a header that includes only a magic number to
identify that it is an iLBC file.
The magic number for iLBC file MUST correspond to the ASCII character
string:
o for 30 ms frame size mode:"#!iLBC30\n", or "0x23 0x21 0x69
0x4C 0x42 0x43 0x33 0x30 0x0A" in hexadecimal form,
o for 20 ms frame size mode:"#!iLBC20\n", or "0x23 0x21 0x69
0x4C 0x42 0x43 0x32 0x30 0x0A" in hexadecimal form.
After the header, follow the speech frames in consecutive order.
Speech frames lost in transmission MUST be stored as "empty frames",
as defined in [1].
4.2. MIME Registration of iLBC
MIME media type name: audio
MIME subtype: iLBC
Optional parameters:
All of the parameters does apply for RTP transfer only.
maxptime:The maximum amount of media which can be encapsulated in
each packet, expressed as time in milliseconds. The time
SHALL be calculated as the sum of the time the media present
in the packet represents. The time SHOULD be a multiple of
the frame size. This attribute is probably only meaningful
for audio data, but may be used with other media types if it
makes sense. It is a media attribute, and is not dependent
on charset. Note that this attribute was introduced after
RFC 2327, and non updated implementations will ignore this
attribute.
mode: The iLBC operating frame mode (20 or 30 ms) that will be
encapsulated in each packet. Values can be 0, 20 and 30
(where 0 is reserved, 20 stands for preferred 20 ms frame
size and 30 stands for preferred 30 ms frame size).
ptime: Defined as usual for RTP audio (see [5]).
Encoding considerations:
This type is defined for transfer via both RTP (RFC 3550)
and stored-file methods as described in Section 4.1, of RFC
Duric & Andersen Experimental [Page 8]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
3952. Audio data is binary data, and must be encoded for
non-binary transport; the Base64 encoding is suitable for
email.
Security considerations:
See Section 6 of RFC 3952.
Public specification:
Please refer to RFC 3951 [1].
Additional information:
The following applies to stored-file transfer methods:
Magic number:
ASCII character string for:
o 30 ms frame size mode "#!iLBC30\n" (or 0x23 0x21
0x69 0x4C 0x42 0x43 0x33 0x30 0x0A in hexadecimal)
o 20 ms frame size mode "#!iLBC20\n" (or 0x23 0x21
0x69 0x4C 0x42 0x43 0x32 0x30 0x0A in hexadecimal)
File extensions: lbc, LBC
Macintosh file type code: none
Object identifier or OID: none
Person & email address to contact for further information:
alan.duric@telio.no
Intended usage: COMMON.
It is expected that many VoIP applications will use this
type.
Author/Change controller:
alan.duric@telio.no
IETF Audio/Video transport working group
5. Mapping To SDP Parameters
The information carried in the MIME media type specification has a
specific mapping to fields in the Session Description Protocol (SDP)
[5], which is commonly used to describe RTP sessions. When SDP is
used to specify sessions employing the iLBC codec, the mapping is as
follows:
o The MIME type ("audio") goes in SDP "m=" as the media name.
o The MIME subtype (payload format name) goes in SDP "a=rtpmap" as
the encoding name.
Duric & Andersen Experimental [Page 9]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
o The parameters "ptime" and "maxptime" go in the SDP "a=ptime" and
"a=maxptime" attributes, respectively.
o The parameter "mode" goes in the SDP "a=fmtp" attribute by copying
it directly from the MIME media type string as "mode=value".
When conveying information by SDP, the encoding name SHALL be "iLBC"
(the same as the MIME subtype).
An example of the media representation in SDP for describing iLBC
might be:
m=audio 49120 RTP/AVP 97
a=rtpmap:97 iLBC/8000
If 20 ms frame size mode is used, remote iLBC encoder SHALL receive
"mode" parameter in the SDP "a=fmtp" attribute by copying them
directly from the MIME media type string as a semicolon separated
with parameter=value, where parameter is "mode", and values can be 0
and 20 (where 0 is reserved and 20 stands for preferred 20 ms frame
size). An example of the media representation in SDP for describing
iLBC when 20 ms frame size mode is used might be:
m=audio 49120 RTP/AVP 97
a=rtpmap:97 iLBC/8000
a=fmtp:97 mode=20
It is important to emphasize the bi-directional character of the
"mode" parameter - both sides of a bi-directional session MUST use
the same "mode" value.
The offer contains the preferred mode of the offerer. The answerer
may agree to that mode by including the same mode in the answer, or
may include a different mode. The resulting mode used by both
parties SHALL be the lower of the bandwidth modes in the offer and
answer.
That is, an offer of "mode=20" receiving an answer of "mode=30" will
result in "mode=30" being used by both participants. Similarly, an
offer of "mode=30" and an answer of "mode=20" will result in
"mode=30" being used by both participants.
This is important when one end point utilizes a bandwidth constrained
link (e.g., 28.8k modem link or slower), where only the lower frame
size will work.
Duric & Andersen Experimental [Page 10]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
Parameter ptime can not be used for the purpose of specifying iLBC
operating mode, due to fact that for the certain values it will be
impossible to distinguish which mode is about to be used (e.g., when
ptime=60, it would be impossible to distinguish if packet is carrying
2 frames of 30 ms or 3 frames of 20 ms, etc.).
Note that the payload format (encoding) names are commonly shown in
upper case. MIME subtypes are commonly shown in lower case. These
names are case-insensitive in both places. Similarly, parameter
names are case-insensitive both in MIME types and in the default
mapping to the SDP a=fmtp attribute
6. Security Considerations
RTP packets using the payload format defined in this specification
are subject to the general security considerations discussed in [3]
and any appropriate profile (e.g., [4]).
As this format transports encoded speech, the main security issues
include confidentiality and authentication of the speech itself. The
payload format itself does not have any built-in security mechanisms.
Confidentiality of the media streams is achieved by encryption,
therefore external mechanisms, such as SRTP [6], MAY be used for that
purpose. The data compression used with this payload format is
applied end-to-end; hence encryption may be performed after
compression with no conflict between the two operations.
A potential denial-of-service threat exists for data encoding using
compression techniques that have non-uniform receiver-end
computational load. The attacker can inject pathological datagrams
into the stream which are complex to decode and cause the receiver to
become overloaded. However, the encodings covered in this document
do not exhibit any significant non-uniformity.
7. References
7.1. Normative References
[1] Andersen, S., Duric, A., Astrom, H., Hagen, R., Kleijn, W., and
J. Linden, "Internet Low Bit Rate Codec (iLBC)", RFC 3951,
December 2004.
[2] Bradner, S., "Key words for use in RFCs to Indicate Requirement
Levels", BCP 14, RFC 2119, March 1997.
[3] Schulzrinne, H., Casner, S., Frederick, R., and V. Jacobson,
"RTP: A Transport Protocol for Real-Time Applications", STD 64,
RFC 3550, July 2003.
Duric & Andersen Experimental [Page 11]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
[4] Schulzrinne, H. and S. Casner, "RTP Profile for Audio and Video
Conferences with Minimal Control", STD 65, RFC 3551, July 2003.
[5] Handley, M. and V. Jacobson, "SDP: Session Description
Protocol", RFC 2327, April 1998.
[6] Baugher, M., McGrew, D., Naslund, M., Carrara, E., and K.
Norrman, "The Secure Real-time Transport Protocol", RFC 3711,
March 2004.
7.2. Informative References
[7] ITU-T Recommendation G.711, available online from the ITU
bookstore at http://www.itu.int.
8. Acknowledgements
Henry Sinnreich, Patrik Faltstrom, Alan Johnston and Jean-Francois
Mule for great support of the iLBC initiative and for valuable
feedback and comments.
Authors' Addresses
Alan Duric
Telio AS
Stoperigt. 2
Oslo, N-0250
Norway
Phone: +47 21673505
EMail: alan.duric@telio.no
Soren Vang Andersen
Department of Communication Technology
Aalborg University
Fredrik Bajers Vej 7A
9200 Aalborg
Denmark
Phone: ++45 9 6358627
EMail: sva@kom.auc.dk
Duric & Andersen Experimental [Page 12]
RFC 3952 RTP Payload Format for iLBC Speech December 2004
Full Copyright Statement
Copyright (C) The Internet Society (2004).
This document is subject to the rights, licenses and restrictions
contained in BCP 78, and except as set forth therein, the authors
retain all their rights.
This document and the information contained herein are provided on an
"AS IS" basis and THE CONTRIBUTOR, THE ORGANIZATION HE/SHE REPRESENTS
OR IS SPONSORED BY (IF ANY), THE INTERNET SOCIETY AND THE INTERNET
ENGINEERING TASK FORCE DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE
INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED
WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
Intellectual Property
The IETF takes no position regarding the validity or scope of any
Intellectual Property Rights or other rights that might be claimed to
pertain to the implementation or use of the technology described in
this document or the extent to which any license under such rights
might or might not be available; nor does it represent that it has
made any independent effort to identify any such rights. Information
on the IETF's procedures with respect to rights in IETF Documents can
be found in BCP 78 and BCP 79.
Copies of IPR disclosures made to the IETF Secretariat and any
assurances of licenses to be made available, or the result of an
attempt made to obtain a general license or permission for the use of
such proprietary rights by implementers or users of this
specification can be obtained from the IETF on-line IPR repository at
http://www.ietf.org/ipr.
The IETF invites any interested party to bring to its attention any
copyrights, patents or patent applications, or other proprietary
rights that may cover technology that may be required to implement
this standard. Please address the information to the IETF at ietf-
ipr@ietf.org.
Acknowledgement
Funding for the RFC Editor function is currently provided by the
Internet Society.
Duric & Andersen Experimental [Page 13]

View File

@@ -0,0 +1,260 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* ilbc.h
*
* This header file contains all of the API's for iLBC.
*
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
/*
* Define the fixpoint numeric formats
*/
#include "typedefs.h"
/*
* Solution to support multiple instances
* Customer has to cast instance to proper type
*/
typedef struct iLBC_encinst_t_ iLBC_encinst_t;
typedef struct iLBC_decinst_t_ iLBC_decinst_t;
/*
* Comfort noise constants
*/
#define ILBC_SPEECH 1
#define ILBC_CNG 2
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
* WebRtcIlbcfix_XxxAssign(...)
*
* These functions assigns the encoder/decoder instance to the specified
* memory location
*
* Input:
* - XXX_xxxinst : Pointer to created instance that should be
* assigned
* - ILBCXXX_inst_Addr : Pointer to the desired memory space
* - size : The size that this structure occupies (in Word16)
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_EncoderAssign(iLBC_encinst_t **iLBC_encinst,
WebRtc_Word16 *ILBCENC_inst_Addr,
WebRtc_Word16 *size);
WebRtc_Word16 WebRtcIlbcfix_DecoderAssign(iLBC_decinst_t **iLBC_decinst,
WebRtc_Word16 *ILBCDEC_inst_Addr,
WebRtc_Word16 *size);
/****************************************************************************
* WebRtcIlbcfix_XxxAssign(...)
*
* These functions create a instance to the specified structure
*
* Input:
* - XXX_inst : Pointer to created instance that should be created
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_EncoderCreate(iLBC_encinst_t **iLBC_encinst);
WebRtc_Word16 WebRtcIlbcfix_DecoderCreate(iLBC_decinst_t **iLBC_decinst);
/****************************************************************************
* WebRtcIlbcfix_XxxFree(...)
*
* These functions frees the dynamic memory of a specified instance
*
* Input:
* - XXX_inst : Pointer to created instance that should be freed
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_EncoderFree(iLBC_encinst_t *iLBC_encinst);
WebRtc_Word16 WebRtcIlbcfix_DecoderFree(iLBC_decinst_t *iLBC_decinst);
/****************************************************************************
* WebRtcIlbcfix_EncoderInit(...)
*
* This function initializes a iLBC instance
*
* Input:
* - iLBCenc_inst : iLBC instance, i.e. the user that should receive
* be initialized
* - frameLen : The frame length of the codec 20/30 (ms)
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_EncoderInit(iLBC_encinst_t *iLBCenc_inst,
WebRtc_Word16 frameLen);
/****************************************************************************
* WebRtcIlbcfix_Encode(...)
*
* This function encodes one iLBC frame. Input speech length has be a
* multiple of the frame length.
*
* Input:
* - iLBCenc_inst : iLBC instance, i.e. the user that should encode
* a package
* - speechIn : Input speech vector
* - len : Samples in speechIn (160, 240, 320 or 480)
*
* Output:
* - encoded : The encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_Encode(iLBC_encinst_t *iLBCenc_inst,
WebRtc_Word16 *speechIn,
WebRtc_Word16 len,
WebRtc_Word16 *encoded);
/****************************************************************************
* WebRtcIlbcfix_DecoderInit(...)
*
* This function initializes a iLBC instance with either 20 or 30 ms frames
* Alternatively the WebRtcIlbcfix_DecoderInit_XXms can be used. Then it's
* not needed to specify the frame length with a variable.
*
* Input:
* - iLBC_decinst_t : iLBC instance, i.e. the user that should receive
* be initialized
* - frameLen : The frame length of the codec 20/30 (ms)
*
* Return value : 0 - Ok
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_DecoderInit(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 frameLen);
WebRtc_Word16 WebRtcIlbcfix_DecoderInit20Ms(iLBC_decinst_t *iLBCdec_inst);
WebRtc_Word16 WebRtcIlbcfix_Decoderinit30Ms(iLBC_decinst_t *iLBCdec_inst);
/****************************************************************************
* WebRtcIlbcfix_Decode(...)
*
* This function decodes a packet with iLBC frame(s). Output speech length
* will be a multiple of 160 or 240 samples ((160 or 240)*frames/packet).
*
* Input:
* - iLBCdec_inst : iLBC instance, i.e. the user that should decode
* a packet
* - encoded : Encoded iLBC frame(s)
* - len : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
* - speechType : 1 normal, 2 CNG
*
* Return value : >0 - Samples in decoded vector
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_Decode(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16* encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
WebRtc_Word16 WebRtcIlbcfix_Decode20Ms(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
WebRtc_Word16 WebRtcIlbcfix_Decode30Ms(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType);
/****************************************************************************
* WebRtcIlbcfix_DecodePlc(...)
*
* This function conducts PLC for iLBC frame(s). Output speech length
* will be a multiple of 160 or 240 samples.
*
* Input:
* - iLBCdec_inst : iLBC instance, i.e. the user that should perform
* a PLC
* - noOfLostFrames : Number of PLC frames to produce
*
* Output:
* - decoded : The "decoded" vector
*
* Return value : >0 - Samples in decoded PLC vector
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_DecodePlc(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *decoded,
WebRtc_Word16 noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_NetEqPlc(...)
*
* This function updates the decoder when a packet loss has occured, but it
* does not produce any PLC data. Function can be used if another PLC method
* is used (i.e NetEq).
*
* Input:
* - iLBCdec_inst : iLBC instance that should be updated
* - noOfLostFrames : Number of lost frames
*
* Output:
* - decoded : The "decoded" vector (nothing in this case)
*
* Return value : >0 - Samples in decoded PLC vector
* -1 - Error
*/
WebRtc_Word16 WebRtcIlbcfix_NetEqPlc(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *decoded,
WebRtc_Word16 noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_version(...)
*
* This function returns the version number of iLBC
*
* Output:
* - version : Version number of iLBC (maximum 20 char)
*/
void WebRtcIlbcfix_version(WebRtc_Word8 *version);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AbsQuant.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "abs_quant_loop.h"
/*----------------------------------------------------------------*
* predictive noise shaping encoding of scaled start state
* (subrutine for WebRtcIlbcfix_StateSearch)
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuant(
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i) Encoder instance */
iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
and idxVec, uses state_first as
input) */
WebRtc_Word16 *in, /* (i) vector to encode */
WebRtc_Word16 *weightDenum /* (i) denominator of synthesis filter */
) {
WebRtc_Word16 *syntOut;
WebRtc_Word16 quantLen[2];
/* Stack based */
WebRtc_Word16 syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
WebRtc_Word16 in_weightedVec[STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
WebRtc_Word16 *in_weighted = &in_weightedVec[LPC_FILTERORDER];
/* Initialize the buffers */
WebRtcSpl_MemSetW16(syntOutBuf, 0, LPC_FILTERORDER+STATE_SHORT_LEN_30MS);
syntOut = &syntOutBuf[LPC_FILTERORDER];
/* Start with zero state */
WebRtcSpl_MemSetW16(in_weightedVec, 0, LPC_FILTERORDER);
/* Perform the quantization loop in two sections of length quantLen[i],
where the perceptual weighting filter is updated at the subframe
border */
if (iLBC_encbits->state_first) {
quantLen[0]=SUBL;
quantLen[1]=iLBCenc_inst->state_short_len-SUBL;
} else {
quantLen[0]=iLBCenc_inst->state_short_len-SUBL;
quantLen[1]=SUBL;
}
/* Calculate the weighted residual, switch perceptual weighting
filter at the subframe border */
WebRtcSpl_FilterARFastQ12(
in, in_weighted,
weightDenum, LPC_FILTERORDER+1, quantLen[0]);
WebRtcSpl_FilterARFastQ12(
&in[quantLen[0]], &in_weighted[quantLen[0]],
&weightDenum[LPC_FILTERORDER+1], LPC_FILTERORDER+1, quantLen[1]);
WebRtcIlbcfix_AbsQuantLoop(
syntOut,
in_weighted,
weightDenum,
quantLen,
iLBC_encbits->idxVec);
}

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AbsQuant.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
#include "defines.h"
/*----------------------------------------------------------------*
* predictive noise shaping encoding of scaled start state
* (subrutine for WebRtcIlbcfix_StateSearch)
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuant(
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i) Encoder instance */
iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
and idxVec, uses state_first as
input) */
WebRtc_Word16 *in, /* (i) vector to encode */
WebRtc_Word16 *weightDenum /* (i) denominator of synthesis filter */
);
#endif

View File

@@ -0,0 +1,95 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AbsQuantLoop.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "sort_sq.h"
void WebRtcIlbcfix_AbsQuantLoop(
WebRtc_Word16 *syntOutIN,
WebRtc_Word16 *in_weightedIN,
WebRtc_Word16 *weightDenumIN,
WebRtc_Word16 *quantLenIN,
WebRtc_Word16 *idxVecIN
)
{
int n, k1, k2;
WebRtc_Word16 index;
WebRtc_Word32 toQW32;
WebRtc_Word32 toQ32;
WebRtc_Word16 tmp16a;
WebRtc_Word16 xq;
WebRtc_Word16 *syntOut = syntOutIN;
WebRtc_Word16 *in_weighted = in_weightedIN;
WebRtc_Word16 *weightDenum = weightDenumIN;
WebRtc_Word16 *quantLen = quantLenIN;
WebRtc_Word16 *idxVec = idxVecIN;
n=0;
for(k1=0;k1<2;k1++) {
for(k2=0;k2<quantLen[k1];k2++){
/* Filter to get the predicted value */
WebRtcSpl_FilterARFastQ12(
syntOut, syntOut,
weightDenum, LPC_FILTERORDER+1, 1);
/* the quantizer */
toQW32 = (WebRtc_Word32)(*in_weighted) - (WebRtc_Word32)(*syntOut);
toQ32 = (((WebRtc_Word32)toQW32)<<2);
if (toQ32 > 32767) {
toQ32 = (WebRtc_Word32) 32767;
} else if (toQ32 < -32768) {
toQ32 = (WebRtc_Word32) -32768;
}
/* Quantize the state */
if (toQW32<(-7577)) {
/* To prevent negative overflow */
index=0;
} else if (toQW32>8151) {
/* To prevent positive overflow */
index=7;
} else {
/* Find the best quantization index
(state_sq3Tbl is in Q13 and toQ is in Q11)
*/
WebRtcIlbcfix_SortSq(&xq, &index,
(WebRtc_Word16)toQ32,
WebRtcIlbcfix_kStateSq3, 8);
}
/* Store selected index */
(*idxVec++) = index;
/* Compute decoded sample and update of the prediction filter */
tmp16a = ((WebRtcIlbcfix_kStateSq3[index] + 2 ) >> 2);
*syntOut = (WebRtc_Word16) (tmp16a + (WebRtc_Word32)(*in_weighted) - toQW32);
n++;
syntOut++; in_weighted++;
}
/* Update perceptual weighting filter at subframe border */
weightDenum += 11;
}
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AbsQuantLoop.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
#include "defines.h"
/*----------------------------------------------------------------*
* predictive noise shaping encoding of scaled start state
* (subrutine for WebRtcIlbcfix_StateSearch)
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuantLoop(
WebRtc_Word16 *syntOutIN,
WebRtc_Word16 *in_weightedIN,
WebRtc_Word16 *weightDenumIN,
WebRtc_Word16 *quantLenIN,
WebRtc_Word16 *idxVecIN
);
#endif

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AugmentedCbCorr.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "augmented_cb_corr.h"
void WebRtcIlbcfix_AugmentedCbCorr(
WebRtc_Word16 *target, /* (i) Target vector */
WebRtc_Word16 *buffer, /* (i) Memory buffer */
WebRtc_Word16 *interpSamples, /* (i) buffer with
interpolated samples */
WebRtc_Word32 *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
WebRtc_Word16 low, /* (i) Lag to start from (typically
20) */
WebRtc_Word16 high, /* (i) Lag to end at (typically 39) */
WebRtc_Word16 scale) /* (i) Scale factor to use for
the crossDot */
{
int lagcount;
WebRtc_Word16 ilow;
WebRtc_Word16 *targetPtr;
WebRtc_Word32 *crossDotPtr;
WebRtc_Word16 *iSPtr=interpSamples;
/* Calculate the correlation between the target and the
interpolated codebook. The correlation is calculated in
3 sections with the interpolated part in the middle */
crossDotPtr=crossDot;
for (lagcount=low; lagcount<=high; lagcount++) {
ilow = (WebRtc_Word16) (lagcount-4);
/* Compute dot product for the first (lagcount-4) samples */
(*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
/* Compute dot product on the interpolated samples */
(*crossDotPtr) += WebRtcSpl_DotProductWithScale(target+ilow, iSPtr, 4, scale);
targetPtr = target + lagcount;
iSPtr += lagcount-ilow;
/* Compute dot product for the remaining samples */
(*crossDotPtr) += WebRtcSpl_DotProductWithScale(targetPtr, buffer-lagcount, SUBL-lagcount, scale);
crossDotPtr++;
}
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_AugmentedCbCorr.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Calculate correlation between target and Augmented codebooks
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AugmentedCbCorr(
WebRtc_Word16 *target, /* (i) Target vector */
WebRtc_Word16 *buffer, /* (i) Memory buffer */
WebRtc_Word16 *interpSamples, /* (i) buffer with
interpolated samples */
WebRtc_Word32 *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
WebRtc_Word16 low, /* (i) Lag to start from (typically
20) */
WebRtc_Word16 high, /* (i) Lag to end at (typically 39 */
WebRtc_Word16 scale); /* (i) Scale factor to use for
the crossDot */
#endif

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_BwExpand.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* lpc bandwidth expansion
*---------------------------------------------------------------*/
/* The output is in the same domain as the input */
void WebRtcIlbcfix_BwExpand(
WebRtc_Word16 *out, /* (o) the bandwidth expanded lpc coefficients */
WebRtc_Word16 *in, /* (i) the lpc coefficients before bandwidth
expansion */
WebRtc_Word16 *coef, /* (i) the bandwidth expansion factor Q15 */
WebRtc_Word16 length /* (i) the length of lpc coefficient vectors */
) {
int i;
out[0] = in[0];
for (i = 1; i < length; i++) {
/* out[i] = coef[i] * in[i] with rounding.
in[] and out[] are in Q12 and coef[] is in Q15
*/
out[i] = (WebRtc_Word16)((WEBRTC_SPL_MUL_16_16(coef[i], in[i])+16384)>>15);
}
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_BwExpand.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
#include "defines.h"
/*----------------------------------------------------------------*
* lpc bandwidth expansion
*---------------------------------------------------------------*/
void WebRtcIlbcfix_BwExpand(
WebRtc_Word16 *out, /* (o) the bandwidth expanded lpc coefficients */
WebRtc_Word16 *in, /* (i) the lpc coefficients before bandwidth
expansion */
WebRtc_Word16 *coef, /* (i) the bandwidth expansion factor Q15 */
WebRtc_Word16 length /* (i) the length of lpc coefficient vectors */
);
#endif

View File

@@ -0,0 +1,67 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbConstruct.c
******************************************************************/
#include "defines.h"
#include "gain_dequant.h"
#include "get_cd_vec.h"
/*----------------------------------------------------------------*
* Construct decoded vector from codebook and gains.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_CbConstruct(
WebRtc_Word16 *decvector, /* (o) Decoded vector */
WebRtc_Word16 *index, /* (i) Codebook indices */
WebRtc_Word16 *gain_index, /* (i) Gain quantization indices */
WebRtc_Word16 *mem, /* (i) Buffer for codevector construction */
WebRtc_Word16 lMem, /* (i) Length of buffer */
WebRtc_Word16 veclen /* (i) Length of vector */
){
int j;
WebRtc_Word16 gain[CB_NSTAGES];
/* Stack based */
WebRtc_Word16 cbvec0[SUBL];
WebRtc_Word16 cbvec1[SUBL];
WebRtc_Word16 cbvec2[SUBL];
WebRtc_Word32 a32;
WebRtc_Word16 *gainPtr;
/* gain de-quantization */
gain[0] = WebRtcIlbcfix_GainDequant(gain_index[0], 16384, 0);
gain[1] = WebRtcIlbcfix_GainDequant(gain_index[1], gain[0], 1);
gain[2] = WebRtcIlbcfix_GainDequant(gain_index[2], gain[1], 2);
/* codebook vector construction and construction of total vector */
/* Stack based */
WebRtcIlbcfix_GetCbVec(cbvec0, mem, index[0], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec1, mem, index[1], lMem, veclen);
WebRtcIlbcfix_GetCbVec(cbvec2, mem, index[2], lMem, veclen);
gainPtr = &gain[0];
for (j=0;j<veclen;j++) {
a32 = WEBRTC_SPL_MUL_16_16(*gainPtr++, cbvec0[j]);
a32 += WEBRTC_SPL_MUL_16_16(*gainPtr++, cbvec1[j]);
a32 += WEBRTC_SPL_MUL_16_16(*gainPtr, cbvec2[j]);
gainPtr -= 2;
decvector[j] = (WebRtc_Word16) WEBRTC_SPL_RSHIFT_W32(a32 + 8192, 14);
}
return;
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbConstruct.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Construct decoded vector from codebook and gains.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_CbConstruct(
WebRtc_Word16 *decvector, /* (o) Decoded vector */
WebRtc_Word16 *index, /* (i) Codebook indices */
WebRtc_Word16 *gain_index, /* (i) Gain quantization indices */
WebRtc_Word16 *mem, /* (i) Buffer for codevector construction */
WebRtc_Word16 lMem, /* (i) Length of buffer */
WebRtc_Word16 veclen /* (i) Length of vector */
);
#endif

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergy.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "cb_mem_energy_calc.h"
/*----------------------------------------------------------------*
* Function WebRtcIlbcfix_CbMemEnergy computes the energy of all
* the vectors in the codebook memory that will be used in the
* following search for the best match.
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CbMemEnergy(
WebRtc_Word16 range,
WebRtc_Word16 *CB, /* (i) The CB memory (1:st section) */
WebRtc_Word16 *filteredCB, /* (i) The filtered CB memory (2:nd section) */
WebRtc_Word16 lMem, /* (i) Length of the CB memory */
WebRtc_Word16 lTarget, /* (i) Length of the target vector */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts, /* (o) Shift value of the energy */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size /* (i) Index to where the energy values should be stored */
) {
WebRtc_Word16 *ppi, *ppo, *pp;
WebRtc_Word32 energy, tmp32;
/* Compute the energy and store it in a vector. Also the
* corresponding shift values are stored. The energy values
* are reused in all three stages. */
/* Calculate the energy in the first block of 'lTarget' sampels. */
ppi = CB+lMem-lTarget-1;
ppo = CB+lMem-1;
pp=CB+lMem-lTarget;
energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
/* Normalize the energy and store the number of shifts */
energyShifts[0] = (WebRtc_Word16)WebRtcSpl_NormW32(energy);
tmp32 = WEBRTC_SPL_LSHIFT_W32(energy, energyShifts[0]);
energyW16[0] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 16);
/* Compute the energy of the rest of the cb memory
* by step wise adding and subtracting the next
* sample and the last sample respectively. */
WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, 0);
/* Next, precompute the energy values for the filtered cb section */
energy=0;
pp=filteredCB+lMem-lTarget;
energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
/* Normalize the energy and store the number of shifts */
energyShifts[base_size] = (WebRtc_Word16)WebRtcSpl_NormW32(energy);
tmp32 = WEBRTC_SPL_LSHIFT_W32(energy, energyShifts[base_size]);
energyW16[base_size] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 16);
ppi = filteredCB + lMem - 1 - lTarget;
ppo = filteredCB + lMem - 1;
WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, base_size);
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergy.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
void WebRtcIlbcfix_CbMemEnergy(
WebRtc_Word16 range,
WebRtc_Word16 *CB, /* (i) The CB memory (1:st section) */
WebRtc_Word16 *filteredCB, /* (i) The filtered CB memory (2:nd section) */
WebRtc_Word16 lMem, /* (i) Length of the CB memory */
WebRtc_Word16 lTarget, /* (i) Length of the target vector */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts, /* (o) Shift value of the energy */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size /* (i) Index to where the energy values should be stored */
);
#endif

View File

@@ -0,0 +1,67 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergyAugmentation.c
******************************************************************/
#include "defines.h"
#include "constants.h"
void WebRtcIlbcfix_CbMemEnergyAugmentation(
WebRtc_Word16 *interpSamples, /* (i) The interpolated samples */
WebRtc_Word16 *CBmem, /* (i) The CB memory */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size, /* (i) Index to where the energy values should be stored */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts /* (o) Shift value of the energy */
){
WebRtc_Word32 energy, tmp32;
WebRtc_Word16 *ppe, *pp, *interpSamplesPtr;
WebRtc_Word16 *CBmemPtr, lagcount;
WebRtc_Word16 *enPtr=&energyW16[base_size-20];
WebRtc_Word16 *enShPtr=&energyShifts[base_size-20];
WebRtc_Word32 nrjRecursive;
CBmemPtr = CBmem+147;
interpSamplesPtr = interpSamples;
/* Compute the energy for the first (low-5) noninterpolated samples */
nrjRecursive = WebRtcSpl_DotProductWithScale( CBmemPtr-19, CBmemPtr-19, 15, scale);
ppe = CBmemPtr - 20;
for (lagcount=20; lagcount<=39; lagcount++) {
/* Update the energy recursively to save complexity */
nrjRecursive = nrjRecursive +
WEBRTC_SPL_MUL_16_16_RSFT(*ppe, *ppe, scale);
ppe--;
energy = nrjRecursive;
/* interpolation */
energy += WebRtcSpl_DotProductWithScale(interpSamplesPtr, interpSamplesPtr, 4, scale);
interpSamplesPtr += 4;
/* Compute energy for the remaining samples */
pp = CBmemPtr - lagcount;
energy += WebRtcSpl_DotProductWithScale(pp, pp, SUBL-lagcount, scale);
/* Normalize the energy and store the number of shifts */
(*enShPtr) = (WebRtc_Word16)WebRtcSpl_NormW32(energy);
tmp32 = WEBRTC_SPL_LSHIFT_W32(energy, (*enShPtr));
(*enPtr) = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 16);
enShPtr++;
enPtr++;
}
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergyAugmentation.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
void WebRtcIlbcfix_CbMemEnergyAugmentation(
WebRtc_Word16 *interpSamples, /* (i) The interpolated samples */
WebRtc_Word16 *CBmem, /* (i) The CB memory */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size, /* (i) Index to where the energy values should be stored */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts /* (o) Shift value of the energy */
);
#endif

View File

@@ -0,0 +1,65 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergyCalc.c
******************************************************************/
#include "defines.h"
/* Compute the energy of the rest of the cb memory
* by step wise adding and subtracting the next
* sample and the last sample respectively */
void WebRtcIlbcfix_CbMemEnergyCalc(
WebRtc_Word32 energy, /* (i) input start energy */
WebRtc_Word16 range, /* (i) number of iterations */
WebRtc_Word16 *ppi, /* (i) input pointer 1 */
WebRtc_Word16 *ppo, /* (i) input pointer 2 */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts, /* (o) Shift value of the energy */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size /* (i) Index to where the energy values should be stored */
)
{
WebRtc_Word16 j,shft;
WebRtc_Word32 tmp;
WebRtc_Word16 *eSh_ptr;
WebRtc_Word16 *eW16_ptr;
eSh_ptr = &energyShifts[1+base_size];
eW16_ptr = &energyW16[1+base_size];
for(j=0;j<range-1;j++) {
/* Calculate next energy by a +/-
operation on the edge samples */
tmp = WEBRTC_SPL_MUL_16_16(*ppi, *ppi);
tmp -= WEBRTC_SPL_MUL_16_16(*ppo, *ppo);
energy += WEBRTC_SPL_RSHIFT_W32(tmp, scale);
energy = WEBRTC_SPL_MAX(energy, 0);
ppi--;
ppo--;
/* Normalize the energy into a WebRtc_Word16 and store
the number of shifts */
shft = (WebRtc_Word16)WebRtcSpl_NormW32(energy);
*eSh_ptr++ = shft;
tmp = WEBRTC_SPL_LSHIFT_W32(energy, shft);
*eW16_ptr++ = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp, 16);
}
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbMemEnergyCalc.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
void WebRtcIlbcfix_CbMemEnergyCalc(
WebRtc_Word32 energy, /* (i) input start energy */
WebRtc_Word16 range, /* (i) number of iterations */
WebRtc_Word16 *ppi, /* (i) input pointer 1 */
WebRtc_Word16 *ppo, /* (i) input pointer 2 */
WebRtc_Word16 *energyW16, /* (o) Energy in the CB vectors */
WebRtc_Word16 *energyShifts, /* (o) Shift value of the energy */
WebRtc_Word16 scale, /* (i) The scaling of all energy values */
WebRtc_Word16 base_size /* (i) Index to where the energy values should be stored */
);
#endif

View File

@@ -0,0 +1,396 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbSearch.c
******************************************************************/
#include "defines.h"
#include "gain_quant.h"
#include "filtered_cb_vecs.h"
#include "constants.h"
#include "cb_mem_energy.h"
#include "interpolate_samples.h"
#include "cb_mem_energy_augmentation.h"
#include "cb_search_core.h"
#include "energy_inverse.h"
#include "augmented_cb_corr.h"
#include "cb_update_best_index.h"
#include "create_augmented_vec.h"
/*----------------------------------------------------------------*
* Search routine for codebook encoding and gain quantization.
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CbSearch(
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i) the encoder state structure */
WebRtc_Word16 *index, /* (o) Codebook indices */
WebRtc_Word16 *gain_index, /* (o) Gain quantization indices */
WebRtc_Word16 *intarget, /* (i) Target vector for encoding */
WebRtc_Word16 *decResidual,/* (i) Decoded residual for codebook construction */
WebRtc_Word16 lMem, /* (i) Length of buffer */
WebRtc_Word16 lTarget, /* (i) Length of vector */
WebRtc_Word16 *weightDenum,/* (i) weighting filter coefficients in Q12 */
WebRtc_Word16 block /* (i) the subblock number */
) {
WebRtc_Word16 i, j, stage, range;
WebRtc_Word16 *pp, scale, tmp;
WebRtc_Word16 bits, temp1, temp2;
WebRtc_Word16 base_size;
WebRtc_Word32 codedEner, targetEner;
WebRtc_Word16 gains[CB_NSTAGES+1];
WebRtc_Word16 *cb_vecPtr;
WebRtc_Word16 indexOffset, sInd, eInd;
WebRtc_Word32 CritMax=0;
WebRtc_Word16 shTotMax=WEBRTC_SPL_WORD16_MIN;
WebRtc_Word16 bestIndex=0;
WebRtc_Word16 bestGain=0;
WebRtc_Word16 indexNew, CritNewSh;
WebRtc_Word32 CritNew;
WebRtc_Word32 *cDotPtr;
WebRtc_Word16 noOfZeros;
WebRtc_Word16 *gainPtr;
WebRtc_Word32 t32, tmpW32;
WebRtc_Word16 *WebRtcIlbcfix_kGainSq5_ptr;
/* Stack based */
WebRtc_Word16 CBbuf[CB_MEML+LPC_FILTERORDER+CB_HALFFILTERLEN];
WebRtc_Word32 cDot[128];
WebRtc_Word32 Crit[128];
WebRtc_Word16 targetVec[SUBL+LPC_FILTERORDER];
WebRtc_Word16 cbvectors[CB_MEML];
WebRtc_Word16 codedVec[SUBL];
WebRtc_Word16 interpSamples[20*4];
WebRtc_Word16 interpSamplesFilt[20*4];
WebRtc_Word16 energyW16[CB_EXPAND*128];
WebRtc_Word16 energyShifts[CB_EXPAND*128];
WebRtc_Word16 *inverseEnergy=energyW16; /* Reuse memory */
WebRtc_Word16 *inverseEnergyShifts=energyShifts; /* Reuse memory */
WebRtc_Word16 *buf = &CBbuf[LPC_FILTERORDER];
WebRtc_Word16 *target = &targetVec[LPC_FILTERORDER];
WebRtc_Word16 *aug_vec = (WebRtc_Word16*)cDot; /* length [SUBL], reuse memory */
/* Determine size of codebook sections */
base_size=lMem-lTarget+1;
if (lTarget==SUBL) {
base_size=lMem-19;
}
/* weighting of the CB memory */
noOfZeros=lMem-WebRtcIlbcfix_kFilterRange[block];
WebRtcSpl_MemSetW16(&buf[-LPC_FILTERORDER], 0, noOfZeros+LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(
decResidual+noOfZeros, buf+noOfZeros,
weightDenum, LPC_FILTERORDER+1, WebRtcIlbcfix_kFilterRange[block]);
/* weighting of the target vector */
WEBRTC_SPL_MEMCPY_W16(&target[-LPC_FILTERORDER], buf+noOfZeros+WebRtcIlbcfix_kFilterRange[block]-LPC_FILTERORDER, LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(
intarget, target,
weightDenum, LPC_FILTERORDER+1, lTarget);
/* Store target, towards the end codedVec is calculated as
the initial target minus the remaining target */
WEBRTC_SPL_MEMCPY_W16(codedVec, target, lTarget);
/* Find the highest absolute value to calculate proper
vector scale factor (so that it uses 12 bits) */
temp1 = WebRtcSpl_MaxAbsValueW16(buf, (WebRtc_Word16)lMem);
temp2 = WebRtcSpl_MaxAbsValueW16(target, (WebRtc_Word16)lTarget);
if ((temp1>0)&&(temp2>0)) {
temp1 = WEBRTC_SPL_MAX(temp1, temp2);
scale = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_MUL_16_16(temp1, temp1));
} else {
/* temp1 or temp2 is negative (maximum was -32768) */
scale = 30;
}
/* Scale to so that a mul-add 40 times does not overflow */
scale = scale - 25;
scale = WEBRTC_SPL_MAX(0, scale);
/* Compute energy of the original target */
targetEner = WebRtcSpl_DotProductWithScale(target, target, lTarget, scale);
/* Prepare search over one more codebook section. This section
is created by filtering the original buffer with a filter. */
WebRtcIlbcfix_FilteredCbVecs(cbvectors, buf, lMem, WebRtcIlbcfix_kFilterRange[block]);
range = WebRtcIlbcfix_kSearchRange[block][0];
if(lTarget == SUBL) {
/* Create the interpolated samples and store them for use in all stages */
/* First section, non-filtered half of the cb */
WebRtcIlbcfix_InterpolateSamples(interpSamples, buf, lMem);
/* Second section, filtered half of the cb */
WebRtcIlbcfix_InterpolateSamples(interpSamplesFilt, cbvectors, lMem);
/* Compute the CB vectors' energies for the first cb section (non-filtered) */
WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamples, buf,
scale, 20, energyW16, energyShifts);
/* Compute the CB vectors' energies for the second cb section (filtered cb) */
WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors,
scale, (WebRtc_Word16)(base_size+20), energyW16, energyShifts);
/* Compute the CB vectors' energies and store them in the vector
* energyW16. Also the corresponding shift values are stored. The
* energy values are used in all three stages. */
WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
lTarget, energyW16+20, energyShifts+20, scale, base_size);
} else {
/* Compute the CB vectors' energies and store them in the vector
* energyW16. Also the corresponding shift values are stored. The
* energy values are used in all three stages. */
WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
lTarget, energyW16, energyShifts, scale, base_size);
/* Set the energy positions 58-63 and 122-127 to zero
(otherwise they are uninitialized) */
WebRtcSpl_MemSetW16(energyW16+range, 0, (base_size-range));
WebRtcSpl_MemSetW16(energyW16+range+base_size, 0, (base_size-range));
}
/* Calculate Inverse Energy (energyW16 is already normalized
and will contain the inverse energy in Q29 after this call */
WebRtcIlbcfix_EnergyInverse(energyW16, base_size*CB_EXPAND);
/* The gain value computed in the previous stage is used
* as an upper limit to what the next stage gain value
* is allowed to be. In stage 0, 16384 (1.0 in Q14) is used as
* the upper limit. */
gains[0] = 16384;
for (stage=0; stage<CB_NSTAGES; stage++) {
/* Set up memories */
range = WebRtcIlbcfix_kSearchRange[block][stage];
/* initialize search measures */
CritMax=0;
shTotMax=-100;
bestIndex=0;
bestGain=0;
/* loop over lags 40+ in the first codebook section, full search */
cb_vecPtr = buf+lMem-lTarget;
/* Calculate all the cross correlations (augmented part of CB) */
if (lTarget==SUBL) {
WebRtcIlbcfix_AugmentedCbCorr(target, buf+lMem,
interpSamples, cDot,
20, 39, scale);
cDotPtr=&cDot[20];
} else {
cDotPtr=cDot;
}
/* Calculate all the cross correlations (main part of CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget, range, scale, -1);
/* Adjust the search range for the augmented vectors */
if (lTarget==SUBL) {
range=WebRtcIlbcfix_kSearchRange[block][stage]+20;
} else {
range=WebRtcIlbcfix_kSearchRange[block][stage];
}
indexOffset=0;
/* Search for best index in this part of the vector */
WebRtcIlbcfix_CbSearchCore(
cDot, range, stage, inverseEnergy,
inverseEnergyShifts, Crit,
&indexNew, &CritNew, &CritNewSh);
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
CritNew, CritNewSh, (WebRtc_Word16)(indexNew+indexOffset), cDot[indexNew+indexOffset],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
sInd=bestIndex-(WebRtc_Word16)(CB_RESRANGE>>1);
eInd=sInd+CB_RESRANGE;
if (sInd<0) {
eInd-=sInd;
sInd=0;
}
if (eInd>=range) {
eInd=range-1;
sInd=eInd-CB_RESRANGE;
}
range = WebRtcIlbcfix_kSearchRange[block][stage];
if (lTarget==SUBL) {
i=sInd;
if (sInd<20) {
WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors+lMem,
interpSamplesFilt, cDot,
(WebRtc_Word16)(sInd+20), (WebRtc_Word16)(WEBRTC_SPL_MIN(39, (eInd+20))), scale);
i=20;
}
cDotPtr=&cDot[WEBRTC_SPL_MAX(0,(20-sInd))];
cb_vecPtr = cbvectors+lMem-20-i;
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget, (WebRtc_Word16)(eInd-i+1), scale, -1);
} else {
cDotPtr = cDot;
cb_vecPtr = cbvectors+lMem-lTarget-sInd;
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget, (WebRtc_Word16)(eInd-sInd+1), scale, -1);
}
/* Adjust the search range for the augmented vectors */
indexOffset=base_size+sInd;
/* Search for best index in this part of the vector */
WebRtcIlbcfix_CbSearchCore(
cDot, (WebRtc_Word16)(eInd-sInd+1), stage, inverseEnergy+indexOffset,
inverseEnergyShifts+indexOffset, Crit,
&indexNew, &CritNew, &CritNewSh);
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
CritNew, CritNewSh, (WebRtc_Word16)(indexNew+indexOffset), cDot[indexNew],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
index[stage] = bestIndex;
bestGain = WebRtcIlbcfix_GainQuant(bestGain,
(WebRtc_Word16)WEBRTC_SPL_ABS_W16(gains[stage]), stage, &gain_index[stage]);
/* Extract the best (according to measure) codebook vector
Also adjust the index, so that the augmented vectors are last.
Above these vectors were first...
*/
if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
if(index[stage]<base_size) {
pp=buf+lMem-lTarget-index[stage];
} else {
pp=cbvectors+lMem-lTarget-
index[stage]+base_size;
}
} else {
if (index[stage]<base_size) {
if (index[stage]>=20) {
/* Adjust index and extract vector */
index[stage]-=20;
pp=buf+lMem-lTarget-index[stage];
} else {
/* Adjust index and extract vector */
index[stage]+=(base_size-20);
WebRtcIlbcfix_CreateAugmentedVec((WebRtc_Word16)(index[stage]-base_size+40),
buf+lMem, aug_vec);
pp = aug_vec;
}
} else {
if ((index[stage] - base_size) >= 20) {
/* Adjust index and extract vector */
index[stage]-=20;
pp=cbvectors+lMem-lTarget-
index[stage]+base_size;
} else {
/* Adjust index and extract vector */
index[stage]+=(base_size-20);
WebRtcIlbcfix_CreateAugmentedVec((WebRtc_Word16)(index[stage]-2*base_size+40),
cbvectors+lMem, aug_vec);
pp = aug_vec;
}
}
}
/* Subtract the best codebook vector, according
to measure, from the target vector */
WebRtcSpl_AddAffineVectorToVector(target, pp, (WebRtc_Word16)(-bestGain), (WebRtc_Word32)8192, (WebRtc_Word16)14, (int)lTarget);
/* record quantized gain */
gains[stage+1] = bestGain;
} /* end of Main Loop. for (stage=0;... */
/* Calculte the coded vector (original target - what's left) */
for (i=0;i<lTarget;i++) {
codedVec[i]-=target[i];
}
/* Gain adjustment for energy matching */
codedEner = WebRtcSpl_DotProductWithScale(codedVec, codedVec, lTarget, scale);
j=gain_index[0];
temp1 = (WebRtc_Word16)WebRtcSpl_NormW32(codedEner);
temp2 = (WebRtc_Word16)WebRtcSpl_NormW32(targetEner);
if(temp1 < temp2) {
bits = 16 - temp1;
} else {
bits = 16 - temp2;
}
tmp = (WebRtc_Word16) WEBRTC_SPL_MUL_16_16_RSFT(gains[1],gains[1], 14);
targetEner = WEBRTC_SPL_MUL_16_16(
WEBRTC_SPL_SHIFT_W32(targetEner, -bits), tmp);
tmpW32 = ((WebRtc_Word32)(gains[1]-1))<<1;
/* Pointer to the table that contains
gain_sq5TblFIX * gain_sq5TblFIX in Q14 */
gainPtr=(WebRtc_Word16*)WebRtcIlbcfix_kGainSq5Sq+gain_index[0];
temp1 = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(codedEner, -bits);
WebRtcIlbcfix_kGainSq5_ptr = (WebRtc_Word16*)&WebRtcIlbcfix_kGainSq5[j];
/* targetEner and codedEner are in Q(-2*scale) */
for (i=gain_index[0];i<32;i++) {
/* Change the index if
(codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
gainTbl[i] < 2*gain[0]
*/
t32 = WEBRTC_SPL_MUL_16_16(temp1, (*gainPtr));
t32 = t32 - targetEner;
if (t32 < 0) {
if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
j=i;
WebRtcIlbcfix_kGainSq5_ptr = (WebRtc_Word16*)&WebRtcIlbcfix_kGainSq5[i];
}
}
gainPtr++;
}
gain_index[0]=j;
return;
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbSearch.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
void WebRtcIlbcfix_CbSearch(
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i) the encoder state structure */
WebRtc_Word16 *index, /* (o) Codebook indices */
WebRtc_Word16 *gain_index, /* (o) Gain quantization indices */
WebRtc_Word16 *intarget, /* (i) Target vector for encoding */
WebRtc_Word16 *decResidual,/* (i) Decoded residual for codebook construction */
WebRtc_Word16 lMem, /* (i) Length of buffer */
WebRtc_Word16 lTarget, /* (i) Length of vector */
WebRtc_Word16 *weightDenum,/* (i) weighting filter coefficients in Q12 */
WebRtc_Word16 block /* (i) the subblock number */
);
#endif

View File

@@ -0,0 +1,113 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbSearchCore.c
******************************************************************/
#include "defines.h"
#include "constants.h"
void WebRtcIlbcfix_CbSearchCore(
WebRtc_Word32 *cDot, /* (i) Cross Correlation */
WebRtc_Word16 range, /* (i) Search range */
WebRtc_Word16 stage, /* (i) Stage of this search */
WebRtc_Word16 *inverseEnergy, /* (i) Inversed energy */
WebRtc_Word16 *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
WebRtc_Word32 *Crit, /* (o) The criteria */
WebRtc_Word16 *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
WebRtc_Word32 *bestCrit, /* (o) Value of critera for the
chosen index */
WebRtc_Word16 *bestCritSh) /* (o) The domain of the chosen
criteria */
{
WebRtc_Word32 maxW32, tmp32;
WebRtc_Word16 max, sh, tmp16;
int i;
WebRtc_Word32 *cDotPtr;
WebRtc_Word16 cDotSqW16;
WebRtc_Word16 *inverseEnergyPtr;
WebRtc_Word32 *critPtr;
WebRtc_Word16 *inverseEnergyShiftPtr;
/* Don't allow negative values for stage 0 */
if (stage==0) {
cDotPtr=cDot;
for (i=0;i<range;i++) {
*cDotPtr=WEBRTC_SPL_MAX(0, (*cDotPtr));
cDotPtr++;
}
}
/* Normalize cDot to WebRtc_Word16, calculate the square of cDot and store the upper WebRtc_Word16 */
maxW32 = WebRtcSpl_MaxAbsValueW32(cDot, range);
sh = (WebRtc_Word16)WebRtcSpl_NormW32(maxW32);
cDotPtr = cDot;
inverseEnergyPtr = inverseEnergy;
critPtr = Crit;
inverseEnergyShiftPtr=inverseEnergyShift;
max=WEBRTC_SPL_WORD16_MIN;
for (i=0;i<range;i++) {
/* Calculate cDot*cDot and put the result in a WebRtc_Word16 */
tmp32 = WEBRTC_SPL_LSHIFT_W32(*cDotPtr,sh);
tmp16 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32,16);
cDotSqW16 = (WebRtc_Word16)(((WebRtc_Word32)(tmp16)*(tmp16))>>16);
/* Calculate the criteria (cDot*cDot/energy) */
*critPtr=WEBRTC_SPL_MUL_16_16(cDotSqW16, (*inverseEnergyPtr));
/* Extract the maximum shift value under the constraint
that the criteria is not zero */
if ((*critPtr)!=0) {
max = WEBRTC_SPL_MAX((*inverseEnergyShiftPtr), max);
}
inverseEnergyPtr++;
inverseEnergyShiftPtr++;
critPtr++;
cDotPtr++;
}
/* If no max shifts still at initialization value, set shift to zero */
if (max==WEBRTC_SPL_WORD16_MIN) {
max = 0;
}
/* Modify the criterias, so that all of them use the same Q domain */
critPtr=Crit;
inverseEnergyShiftPtr=inverseEnergyShift;
for (i=0;i<range;i++) {
/* Guarantee that the shift value is less than 16
in order to simplify for DSP's (and guard against >31) */
tmp16 = WEBRTC_SPL_MIN(16, max-(*inverseEnergyShiftPtr));
(*critPtr)=WEBRTC_SPL_SHIFT_W32((*critPtr),-tmp16);
critPtr++;
inverseEnergyShiftPtr++;
}
/* Find the index of the best value */
*bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
*bestCrit = Crit[*bestIndex];
/* Calculate total shifts of this criteria */
*bestCritSh = 32 - 2*sh + max;
return;
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbSearchCore.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
#include "defines.h"
void WebRtcIlbcfix_CbSearchCore(
WebRtc_Word32 *cDot, /* (i) Cross Correlation */
WebRtc_Word16 range, /* (i) Search range */
WebRtc_Word16 stage, /* (i) Stage of this search */
WebRtc_Word16 *inverseEnergy, /* (i) Inversed energy */
WebRtc_Word16 *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
WebRtc_Word32 *Crit, /* (o) The criteria */
WebRtc_Word16 *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
WebRtc_Word32 *bestCrit, /* (o) Value of critera for the
chosen index */
WebRtc_Word16 *bestCritSh); /* (o) The domain of the chosen
criteria */
#endif

View File

@@ -0,0 +1,89 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbUpdateBestIndex.c
******************************************************************/
#include "defines.h"
#include "cb_update_best_index.h"
#include "constants.h"
void WebRtcIlbcfix_CbUpdateBestIndex(
WebRtc_Word32 CritNew, /* (i) New Potentially best Criteria */
WebRtc_Word16 CritNewSh, /* (i) Shift value of above Criteria */
WebRtc_Word16 IndexNew, /* (i) Index of new Criteria */
WebRtc_Word32 cDotNew, /* (i) Cross dot of new index */
WebRtc_Word16 invEnergyNew, /* (i) Inversed energy new index */
WebRtc_Word16 energyShiftNew, /* (i) Energy shifts of new index */
WebRtc_Word32 *CritMax, /* (i/o) Maximum Criteria (so far) */
WebRtc_Word16 *shTotMax, /* (i/o) Shifts of maximum criteria */
WebRtc_Word16 *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
WebRtc_Word16 *bestGain) /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
{
WebRtc_Word16 shOld, shNew, tmp16;
WebRtc_Word16 scaleTmp;
WebRtc_Word32 gainW32;
/* Normalize the new and old Criteria to the same domain */
if (CritNewSh>(*shTotMax)) {
shOld=WEBRTC_SPL_MIN(31,CritNewSh-(*shTotMax));
shNew=0;
} else {
shOld=0;
shNew=WEBRTC_SPL_MIN(31,(*shTotMax)-CritNewSh);
}
/* Compare the two criterias. If the new one is better,
calculate the gain and store this index as the new best one
*/
if (WEBRTC_SPL_RSHIFT_W32(CritNew, shNew)>
WEBRTC_SPL_RSHIFT_W32((*CritMax),shOld)) {
tmp16 = (WebRtc_Word16)WebRtcSpl_NormW32(cDotNew);
tmp16 = 16 - tmp16;
/* Calculate the gain in Q14
Compensate for inverseEnergyshift in Q29 and that the energy
value was stored in a WebRtc_Word16 (shifted down 16 steps)
=> 29-14+16 = 31 */
scaleTmp = -energyShiftNew-tmp16+31;
scaleTmp = WEBRTC_SPL_MIN(31, scaleTmp);
gainW32 = WEBRTC_SPL_MUL_16_16_RSFT(
((WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(cDotNew, -tmp16)), invEnergyNew, scaleTmp);
/* Check if criteria satisfies Gain criteria (max 1.3)
if it is larger set the gain to 1.3
(slightly different from FLP version)
*/
if (gainW32>21299) {
*bestGain=21299;
} else if (gainW32<-21299) {
*bestGain=-21299;
} else {
*bestGain=(WebRtc_Word16)gainW32;
}
*CritMax=CritNew;
*shTotMax=CritNewSh;
*bestIndex = IndexNew;
}
return;
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CbUpdateBestIndex.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
#include "defines.h"
void WebRtcIlbcfix_CbUpdateBestIndex(
WebRtc_Word32 CritNew, /* (i) New Potentially best Criteria */
WebRtc_Word16 CritNewSh, /* (i) Shift value of above Criteria */
WebRtc_Word16 IndexNew, /* (i) Index of new Criteria */
WebRtc_Word32 cDotNew, /* (i) Cross dot of new index */
WebRtc_Word16 invEnergyNew, /* (i) Inversed energy new index */
WebRtc_Word16 energyShiftNew, /* (i) Energy shifts of new index */
WebRtc_Word32 *CritMax, /* (i/o) Maximum Criteria (so far) */
WebRtc_Word16 *shTotMax, /* (i/o) Shifts of maximum criteria */
WebRtc_Word16 *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
WebRtc_Word16 *bestGain); /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
#endif

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Chebyshev.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*------------------------------------------------------------------*
* Calculate the Chevyshev polynomial series
* F(w) = 2*exp(-j5w)*C(x)
* C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
* T_i(x) is the i:th order Chebyshev polynomial
*------------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_Chebyshev(
/* (o) Result of C(x) */
WebRtc_Word16 x, /* (i) Value to the Chevyshev polynomial */
WebRtc_Word16 *f /* (i) The coefficients in the polynomial */
) {
WebRtc_Word16 b1_high, b1_low; /* Use the high, low format to increase the accuracy */
WebRtc_Word32 b2;
WebRtc_Word32 tmp1W32;
WebRtc_Word32 tmp2W32;
int i;
b2 = (WebRtc_Word32)0x1000000; /* b2 = 1.0 (Q23) */
/* Calculate b1 = 2*x + f[1] */
tmp1W32 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)x, 10);
tmp1W32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)f[1], 14);
for (i = 2; i < 5; i++) {
tmp2W32 = tmp1W32;
/* Split b1 (in tmp1W32) into a high and low part */
b1_high = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp1W32, 16);
b1_low = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp1W32-WEBRTC_SPL_LSHIFT_W32(((WebRtc_Word32)b1_high),16), 1);
/* Calculate 2*x*b1-b2+f[i] */
tmp1W32 = WEBRTC_SPL_LSHIFT_W32( (WEBRTC_SPL_MUL_16_16(b1_high, x) +
WEBRTC_SPL_MUL_16_16_RSFT(b1_low, x, 15)), 2);
tmp1W32 -= b2;
tmp1W32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)f[i], 14);
/* Update b2 for next round */
b2 = tmp2W32;
}
/* Split b1 (in tmp1W32) into a high and low part */
b1_high = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp1W32, 16);
b1_low = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp1W32-WEBRTC_SPL_LSHIFT_W32(((WebRtc_Word32)b1_high),16), 1);
/* tmp1W32 = x*b1 - b2 + f[i]/2 */
tmp1W32 = WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_16(b1_high, x), 1) +
WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_16_RSFT(b1_low, x, 15), 1);
tmp1W32 -= b2;
tmp1W32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)f[i], 13);
/* Handle overflows and set to maximum or minimum WebRtc_Word16 instead */
if (tmp1W32>((WebRtc_Word32)33553408)) {
return(WEBRTC_SPL_WORD16_MAX);
} else if (tmp1W32<((WebRtc_Word32)-33554432)) {
return(WEBRTC_SPL_WORD16_MIN);
} else {
return((WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp1W32, 10));
}
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Chebyshev.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
#include "defines.h"
/*------------------------------------------------------------------*
* Calculate the Chevyshev polynomial series
* F(w) = 2*exp(-j5w)*C(x)
* C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
* T_i(x) is the i:th order Chebyshev polynomial
*------------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_Chebyshev(
/* (o) Result of C(x) */
WebRtc_Word16 x, /* (i) Value to the Chevyshev polynomial */
WebRtc_Word16 *f /* (i) The coefficients in the polynomial */
);
#endif

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CompCorr.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* Compute cross correlation and pitch gain for pitch prediction
* of last subframe at given lag.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_CompCorr(
WebRtc_Word32 *corr, /* (o) cross correlation */
WebRtc_Word32 *ener, /* (o) energy */
WebRtc_Word16 *buffer, /* (i) signal buffer */
WebRtc_Word16 lag, /* (i) pitch lag */
WebRtc_Word16 bLen, /* (i) length of buffer */
WebRtc_Word16 sRange, /* (i) correlation search length */
WebRtc_Word16 scale /* (i) number of rightshifts to use */
){
WebRtc_Word16 *w16ptr;
w16ptr=&buffer[bLen-sRange-lag];
/* Calculate correlation and energy */
(*corr)=WebRtcSpl_DotProductWithScale(&buffer[bLen-sRange], w16ptr, sRange, scale);
(*ener)=WebRtcSpl_DotProductWithScale(w16ptr, w16ptr, sRange, scale);
/* For zero energy set the energy to 0 in order to avoid potential
problems for coming divisions */
if (*ener == 0) {
*corr = 0;
*ener = 1;
}
}

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CompCorr.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Compute cross correlation and pitch gain for pitch prediction
* of last subframe at given lag.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_CompCorr(
WebRtc_Word32 *corr, /* (o) cross correlation */
WebRtc_Word32 *ener, /* (o) energy */
WebRtc_Word16 *buffer, /* (i) signal buffer */
WebRtc_Word16 lag, /* (i) pitch lag */
WebRtc_Word16 bLen, /* (i) length of buffer */
WebRtc_Word16 sRange, /* (i) correlation search length */
WebRtc_Word16 scale /* (i) number of rightshifts to use */
);
#endif

View File

@@ -0,0 +1,49 @@
clear;
pack;
%
% Enter the path to YOUR executable and remember to define the perprocessor
% variable PRINT_MIPS te get the instructions printed to the screen.
%
command = '!iLBCtest.exe 30 speechAndBGnoise.pcm out1.bit out1.pcm tlm10_30ms.dat';
cout=' > st.txt'; %saves to matlab variable 'st'
eval(strcat(command,cout));
if(length(cout)>3)
load st.txt
else
disp('No cout file to load')
end
% initialize vector to zero
index = find(st(1:end,1)==-1);
indexnonzero = find(st(1:end,1)>0);
frames = length(index)-indexnonzero(1)+1;
start = indexnonzero(1) - 1;
functionOrder=max(st(:,2));
new=zeros(frames,functionOrder);
for i = 1:frames,
for j = index(start-1+i)+1:(index(start+i)-1),
new(i,st(j,2)) = new(i,st(j,2)) + st(j,1);
end
end
result=zeros(functionOrder,3);
for i=1:functionOrder
nonzeroelements = find(new(1:end,i)>0);
result(i,1)=i;
% Compute each function's mean complexity
% result(i,2)=(sum(new(nonzeroelements,i))/(length(nonzeroelements)*0.03))/1000000;
% Compute each function's maximum complexity in encoding
% and decoding respectively and then add it together:
% result(i,3)=(max(new(1:end,i))/0.03)/1000000;
result(i,3)=(max(new(1:size(new,1)/2,i))/0.03)/1000000 + (max(new(size(new,1)/2+1:end,i))/0.03)/1000000;
end
result
% Compute maximum complexity for a single frame (enc/dec separately and together)
maxEncComplexityInAFrame = (max(sum(new(1:size(new,1)/2,:),2))/0.03)/1000000
maxDecComplexityInAFrame = (max(sum(new(size(new,1)/2+1:end,:),2))/0.03)/1000000
totalComplexity = maxEncComplexityInAFrame + maxDecComplexityInAFrame

View File

@@ -0,0 +1,666 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
constants.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/* HP Filters {b[0] b[1] b[2] -a[1] -a[2]} */
const WebRtc_Word16 WebRtcIlbcfix_kHpInCoefs[5] = {3798, -7596, 3798, 7807, -3733};
const WebRtc_Word16 WebRtcIlbcfix_kHpOutCoefs[5] = {3849, -7699, 3849, 7918, -3833};
/* Window in Q11 to window the energies of the 5 choises (3 for 20ms) in the choise for
the 80 sample start state
*/
const WebRtc_Word16 WebRtcIlbcfix_kStartSequenceEnrgWin[NSUB_MAX-1]= {
1638, 1843, 2048, 1843, 1638
};
/* LP Filter coeffs used for downsampling */
const WebRtc_Word16 WebRtcIlbcfix_kLpFiltCoefs[FILTERORDER_DS_PLUS1]= {
-273, 512, 1297, 1696, 1297, 512, -273
};
/* Constants used in the LPC calculations */
/* Hanning LPC window (in Q15) */
const WebRtc_Word16 WebRtcIlbcfix_kLpcWin[BLOCKL_MAX] = {
6, 22, 50, 89, 139, 200, 272, 355, 449, 554, 669, 795,
932, 1079, 1237, 1405, 1583, 1771, 1969, 2177, 2395, 2622, 2858, 3104,
3359, 3622, 3894, 4175, 4464, 4761, 5066, 5379, 5699, 6026, 6361, 6702,
7050, 7404, 7764, 8130, 8502, 8879, 9262, 9649, 10040, 10436, 10836, 11240,
11647, 12058, 12471, 12887, 13306, 13726, 14148, 14572, 14997, 15423, 15850, 16277,
16704, 17131, 17558, 17983, 18408, 18831, 19252, 19672, 20089, 20504, 20916, 21325,
21730, 22132, 22530, 22924, 23314, 23698, 24078, 24452, 24821, 25185, 25542, 25893,
26238, 26575, 26906, 27230, 27547, 27855, 28156, 28450, 28734, 29011, 29279, 29538,
29788, 30029, 30261, 30483, 30696, 30899, 31092, 31275, 31448, 31611, 31764, 31906,
32037, 32158, 32268, 32367, 32456, 32533, 32600, 32655, 32700, 32733, 32755, 32767,
32767, 32755, 32733, 32700, 32655, 32600, 32533, 32456, 32367, 32268, 32158, 32037,
31906, 31764, 31611, 31448, 31275, 31092, 30899, 30696, 30483, 30261, 30029, 29788,
29538, 29279, 29011, 28734, 28450, 28156, 27855, 27547, 27230, 26906, 26575, 26238,
25893, 25542, 25185, 24821, 24452, 24078, 23698, 23314, 22924, 22530, 22132, 21730,
21325, 20916, 20504, 20089, 19672, 19252, 18831, 18408, 17983, 17558, 17131, 16704,
16277, 15850, 15423, 14997, 14572, 14148, 13726, 13306, 12887, 12471, 12058, 11647,
11240, 10836, 10436, 10040, 9649, 9262, 8879, 8502, 8130, 7764, 7404, 7050,
6702, 6361, 6026, 5699, 5379, 5066, 4761, 4464, 4175, 3894, 3622, 3359,
3104, 2858, 2622, 2395, 2177, 1969, 1771, 1583, 1405, 1237, 1079, 932,
795, 669, 554, 449, 355, 272, 200, 139, 89, 50, 22, 6
};
/* Asymmetric LPC window (in Q15)*/
const WebRtc_Word16 WebRtcIlbcfix_kLpcAsymWin[BLOCKL_MAX] = {
2, 7, 15, 27, 42, 60, 81, 106, 135, 166, 201, 239,
280, 325, 373, 424, 478, 536, 597, 661, 728, 798, 872, 949,
1028, 1111, 1197, 1287, 1379, 1474, 1572, 1674, 1778, 1885, 1995, 2108,
2224, 2343, 2465, 2589, 2717, 2847, 2980, 3115, 3254, 3395, 3538, 3684,
3833, 3984, 4138, 4295, 4453, 4615, 4778, 4944, 5112, 5283, 5456, 5631,
5808, 5987, 6169, 6352, 6538, 6725, 6915, 7106, 7300, 7495, 7692, 7891,
8091, 8293, 8497, 8702, 8909, 9118, 9328, 9539, 9752, 9966, 10182, 10398,
10616, 10835, 11055, 11277, 11499, 11722, 11947, 12172, 12398, 12625, 12852, 13080,
13309, 13539, 13769, 14000, 14231, 14463, 14695, 14927, 15160, 15393, 15626, 15859,
16092, 16326, 16559, 16792, 17026, 17259, 17492, 17725, 17957, 18189, 18421, 18653,
18884, 19114, 19344, 19573, 19802, 20030, 20257, 20483, 20709, 20934, 21157, 21380,
21602, 21823, 22042, 22261, 22478, 22694, 22909, 23123, 23335, 23545, 23755, 23962,
24168, 24373, 24576, 24777, 24977, 25175, 25371, 25565, 25758, 25948, 26137, 26323,
26508, 26690, 26871, 27049, 27225, 27399, 27571, 27740, 27907, 28072, 28234, 28394,
28552, 28707, 28860, 29010, 29157, 29302, 29444, 29584, 29721, 29855, 29987, 30115,
30241, 30364, 30485, 30602, 30717, 30828, 30937, 31043, 31145, 31245, 31342, 31436,
31526, 31614, 31699, 31780, 31858, 31933, 32005, 32074, 32140, 32202, 32261, 32317,
32370, 32420, 32466, 32509, 32549, 32585, 32618, 32648, 32675, 32698, 32718, 32734,
32748, 32758, 32764, 32767, 32767, 32667, 32365, 31863, 31164, 30274, 29197, 27939,
26510, 24917, 23170, 21281, 19261, 17121, 14876, 12540, 10126, 7650, 5126, 2571
};
/* Lag window for LPC (Q31) */
const WebRtc_Word32 WebRtcIlbcfix_kLpcLagWin[LPC_FILTERORDER + 1]={
2147483647, 2144885453, 2137754373, 2125918626, 2109459810,
2088483140, 2063130336, 2033564590, 1999977009, 1962580174,
1921610283};
/* WebRtcIlbcfix_kLpcChirpSyntDenum vector in Q15 corresponding
* floating point vector {1 0.9025 0.9025^2 0.9025^3 ...}
*/
const WebRtc_Word16 WebRtcIlbcfix_kLpcChirpSyntDenum[LPC_FILTERORDER + 1] = {
32767, 29573, 26690, 24087,
21739, 19619, 17707, 15980,
14422, 13016, 11747};
/* WebRtcIlbcfix_kLpcChirpWeightDenum in Q15 corresponding to
* floating point vector {1 0.4222 0.4222^2... }
*/
const WebRtc_Word16 WebRtcIlbcfix_kLpcChirpWeightDenum[LPC_FILTERORDER + 1] = {
32767, 13835, 5841, 2466, 1041, 440,
186, 78, 33, 14, 6};
/* LSF quantization Q13 domain */
const WebRtc_Word16 WebRtcIlbcfix_kLsfCb[64 * 3 + 128 * 3 + 128 * 4] = {
1273, 2238, 3696,
3199, 5309, 8209,
3606, 5671, 7829,
2815, 5262, 8778,
2608, 4027, 5493,
1582, 3076, 5945,
2983, 4181, 5396,
2437, 4322, 6902,
1861, 2998, 4613,
2007, 3250, 5214,
1388, 2459, 4262,
2563, 3805, 5269,
2036, 3522, 5129,
1935, 4025, 6694,
2744, 5121, 7338,
2810, 4248, 5723,
3054, 5405, 7745,
1449, 2593, 4763,
3411, 5128, 6596,
2484, 4659, 7496,
1668, 2879, 4818,
1812, 3072, 5036,
1638, 2649, 3900,
2464, 3550, 4644,
1853, 2900, 4158,
2458, 4163, 5830,
2556, 4036, 6254,
2703, 4432, 6519,
3062, 4953, 7609,
1725, 3703, 6187,
2221, 3877, 5427,
2339, 3579, 5197,
2021, 4633, 7037,
2216, 3328, 4535,
2961, 4739, 6667,
2807, 3955, 5099,
2788, 4501, 6088,
1642, 2755, 4431,
3341, 5282, 7333,
2414, 3726, 5727,
1582, 2822, 5269,
2259, 3447, 4905,
3117, 4986, 7054,
1825, 3491, 5542,
3338, 5736, 8627,
1789, 3090, 5488,
2566, 3720, 4923,
2846, 4682, 7161,
1950, 3321, 5976,
1834, 3383, 6734,
3238, 4769, 6094,
2031, 3978, 5903,
1877, 4068, 7436,
2131, 4644, 8296,
2764, 5010, 8013,
2194, 3667, 6302,
2053, 3127, 4342,
3523, 6595, 10010,
3134, 4457, 5748,
3142, 5819, 9414,
2223, 4334, 6353,
2022, 3224, 4822,
2186, 3458, 5544,
2552, 4757, 6870,
10905, 12917, 14578,
9503, 11485, 14485,
9518, 12494, 14052,
6222, 7487, 9174,
7759, 9186, 10506,
8315, 12755, 14786,
9609, 11486, 13866,
8909, 12077, 13643,
7369, 9054, 11520,
9408, 12163, 14715,
6436, 9911, 12843,
7109, 9556, 11884,
7557, 10075, 11640,
6482, 9202, 11547,
6463, 7914, 10980,
8611, 10427, 12752,
7101, 9676, 12606,
7428, 11252, 13172,
10197, 12955, 15842,
7487, 10955, 12613,
5575, 7858, 13621,
7268, 11719, 14752,
7476, 11744, 13795,
7049, 8686, 11922,
8234, 11314, 13983,
6560, 11173, 14984,
6405, 9211, 12337,
8222, 12054, 13801,
8039, 10728, 13255,
10066, 12733, 14389,
6016, 7338, 10040,
6896, 8648, 10234,
7538, 9170, 12175,
7327, 12608, 14983,
10516, 12643, 15223,
5538, 7644, 12213,
6728, 12221, 14253,
7563, 9377, 12948,
8661, 11023, 13401,
7280, 8806, 11085,
7723, 9793, 12333,
12225, 14648, 16709,
8768, 13389, 15245,
10267, 12197, 13812,
5301, 7078, 11484,
7100, 10280, 11906,
8716, 12555, 14183,
9567, 12464, 15434,
7832, 12305, 14300,
7608, 10556, 12121,
8913, 11311, 12868,
7414, 9722, 11239,
8666, 11641, 13250,
9079, 10752, 12300,
8024, 11608, 13306,
10453, 13607, 16449,
8135, 9573, 10909,
6375, 7741, 10125,
10025, 12217, 14874,
6985, 11063, 14109,
9296, 13051, 14642,
8613, 10975, 12542,
6583, 10414, 13534,
6191, 9368, 13430,
5742, 6859, 9260,
7723, 9813, 13679,
8137, 11291, 12833,
6562, 8973, 10641,
6062, 8462, 11335,
6928, 8784, 12647,
7501, 8784, 10031,
8372, 10045, 12135,
8191, 9864, 12746,
5917, 7487, 10979,
5516, 6848, 10318,
6819, 9899, 11421,
7882, 12912, 15670,
9558, 11230, 12753,
7752, 9327, 11472,
8479, 9980, 11358,
11418, 14072, 16386,
7968, 10330, 14423,
8423, 10555, 12162,
6337, 10306, 14391,
8850, 10879, 14276,
6750, 11885, 15710,
7037, 8328, 9764,
6914, 9266, 13476,
9746, 13949, 15519,
11032, 14444, 16925,
8032, 10271, 11810,
10962, 13451, 15833,
10021, 11667, 13324,
6273, 8226, 12936,
8543, 10397, 13496,
7936, 10302, 12745,
6769, 8138, 10446,
6081, 7786, 11719,
8637, 11795, 14975,
8790, 10336, 11812,
7040, 8490, 10771,
7338, 10381, 13153,
6598, 7888, 9358,
6518, 8237, 12030,
9055, 10763, 12983,
6490, 10009, 12007,
9589, 12023, 13632,
6867, 9447, 10995,
7930, 9816, 11397,
10241, 13300, 14939,
5830, 8670, 12387,
9870, 11915, 14247,
9318, 11647, 13272,
6721, 10836, 12929,
6543, 8233, 9944,
8034, 10854, 12394,
9112, 11787, 14218,
9302, 11114, 13400,
9022, 11366, 13816,
6962, 10461, 12480,
11288, 13333, 15222,
7249, 8974, 10547,
10566, 12336, 14390,
6697, 11339, 13521,
11851, 13944, 15826,
6847, 8381, 11349,
7509, 9331, 10939,
8029, 9618, 11909,
13973, 17644, 19647, 22474,
14722, 16522, 20035, 22134,
16305, 18179, 21106, 23048,
15150, 17948, 21394, 23225,
13582, 15191, 17687, 22333,
11778, 15546, 18458, 21753,
16619, 18410, 20827, 23559,
14229, 15746, 17907, 22474,
12465, 15327, 20700, 22831,
15085, 16799, 20182, 23410,
13026, 16935, 19890, 22892,
14310, 16854, 19007, 22944,
14210, 15897, 18891, 23154,
14633, 18059, 20132, 22899,
15246, 17781, 19780, 22640,
16396, 18904, 20912, 23035,
14618, 17401, 19510, 21672,
15473, 17497, 19813, 23439,
18851, 20736, 22323, 23864,
15055, 16804, 18530, 20916,
16490, 18196, 19990, 21939,
11711, 15223, 21154, 23312,
13294, 15546, 19393, 21472,
12956, 16060, 20610, 22417,
11628, 15843, 19617, 22501,
14106, 16872, 19839, 22689,
15655, 18192, 20161, 22452,
12953, 15244, 20619, 23549,
15322, 17193, 19926, 21762,
16873, 18676, 20444, 22359,
14874, 17871, 20083, 21959,
11534, 14486, 19194, 21857,
17766, 19617, 21338, 23178,
13404, 15284, 19080, 23136,
15392, 17527, 19470, 21953,
14462, 16153, 17985, 21192,
17734, 19750, 21903, 23783,
16973, 19096, 21675, 23815,
16597, 18936, 21257, 23461,
15966, 17865, 20602, 22920,
15416, 17456, 20301, 22972,
18335, 20093, 21732, 23497,
15548, 17217, 20679, 23594,
15208, 16995, 20816, 22870,
13890, 18015, 20531, 22468,
13211, 15377, 19951, 22388,
12852, 14635, 17978, 22680,
16002, 17732, 20373, 23544,
11373, 14134, 19534, 22707,
17329, 19151, 21241, 23462,
15612, 17296, 19362, 22850,
15422, 19104, 21285, 23164,
13792, 17111, 19349, 21370,
15352, 17876, 20776, 22667,
15253, 16961, 18921, 22123,
14108, 17264, 20294, 23246,
15785, 17897, 20010, 21822,
17399, 19147, 20915, 22753,
13010, 15659, 18127, 20840,
16826, 19422, 22218, 24084,
18108, 20641, 22695, 24237,
18018, 20273, 22268, 23920,
16057, 17821, 21365, 23665,
16005, 17901, 19892, 23016,
13232, 16683, 21107, 23221,
13280, 16615, 19915, 21829,
14950, 18575, 20599, 22511,
16337, 18261, 20277, 23216,
14306, 16477, 21203, 23158,
12803, 17498, 20248, 22014,
14327, 17068, 20160, 22006,
14402, 17461, 21599, 23688,
16968, 18834, 20896, 23055,
15070, 17157, 20451, 22315,
15419, 17107, 21601, 23946,
16039, 17639, 19533, 21424,
16326, 19261, 21745, 23673,
16489, 18534, 21658, 23782,
16594, 18471, 20549, 22807,
18973, 21212, 22890, 24278,
14264, 18674, 21123, 23071,
15117, 16841, 19239, 23118,
13762, 15782, 20478, 23230,
14111, 15949, 20058, 22354,
14990, 16738, 21139, 23492,
13735, 16971, 19026, 22158,
14676, 17314, 20232, 22807,
16196, 18146, 20459, 22339,
14747, 17258, 19315, 22437,
14973, 17778, 20692, 23367,
15715, 17472, 20385, 22349,
15702, 18228, 20829, 23410,
14428, 16188, 20541, 23630,
16824, 19394, 21365, 23246,
13069, 16392, 18900, 21121,
12047, 16640, 19463, 21689,
14757, 17433, 19659, 23125,
15185, 16930, 19900, 22540,
16026, 17725, 19618, 22399,
16086, 18643, 21179, 23472,
15462, 17248, 19102, 21196,
17368, 20016, 22396, 24096,
12340, 14475, 19665, 23362,
13636, 16229, 19462, 22728,
14096, 16211, 19591, 21635,
12152, 14867, 19943, 22301,
14492, 17503, 21002, 22728,
14834, 16788, 19447, 21411,
14650, 16433, 19326, 22308,
14624, 16328, 19659, 23204,
13888, 16572, 20665, 22488,
12977, 16102, 18841, 22246,
15523, 18431, 21757, 23738,
14095, 16349, 18837, 20947,
13266, 17809, 21088, 22839,
15427, 18190, 20270, 23143,
11859, 16753, 20935, 22486,
12310, 17667, 21736, 23319,
14021, 15926, 18702, 22002,
12286, 15299, 19178, 21126,
15703, 17491, 21039, 23151,
12272, 14018, 18213, 22570,
14817, 16364, 18485, 22598,
17109, 19683, 21851, 23677,
12657, 14903, 19039, 22061,
14713, 16487, 20527, 22814,
14635, 16726, 18763, 21715,
15878, 18550, 20718, 22906
};
const WebRtc_Word16 WebRtcIlbcfix_kLsfDimCb[LSF_NSPLIT] = {3, 3, 4};
const WebRtc_Word16 WebRtcIlbcfix_kLsfSizeCb[LSF_NSPLIT] = {64,128,128};
const WebRtc_Word16 WebRtcIlbcfix_kLsfMean[LPC_FILTERORDER] = {
2308, 3652, 5434, 7885,
10255, 12559, 15160, 17513,
20328, 22752};
const WebRtc_Word16 WebRtcIlbcfix_kLspMean[LPC_FILTERORDER] = {
31476, 29565, 25819, 18725, 10276,
1236, -9049, -17600, -25884, -30618
};
/* Q14 */
const WebRtc_Word16 WebRtcIlbcfix_kLsfWeight20ms[4] = {12288, 8192, 4096, 0};
const WebRtc_Word16 WebRtcIlbcfix_kLsfWeight30ms[6] = {8192, 16384, 10923, 5461, 0, 0};
/*
cos(x) in Q15
WebRtcIlbcfix_kCos[i] = cos(pi*i/64.0)
used in WebRtcIlbcfix_Lsp2Lsf()
*/
const WebRtc_Word16 WebRtcIlbcfix_kCos[64] = {
32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853,
30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279,
23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010,
12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608,
0, -1608, -3212, -4808, -6393, -7962, -9512, -11039,
-12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006,
-23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622,
-30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729
};
/*
Derivative in Q19, used to interpolate between the
WebRtcIlbcfix_kCos[] values to get a more exact y = cos(x)
*/
const WebRtc_Word16 WebRtcIlbcfix_kCosDerivative[64] = {
-632, -1893, -3150, -4399, -5638, -6863, -8072, -9261,
-10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744,
-18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526,
-24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726,
-25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009,
-23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637,
-17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428,
-9261, -8072, -6863, -5638, -4399, -3150, -1893, -632};
/*
Table in Q15, used for a2lsf conversion
WebRtcIlbcfix_kCosGrid[i] = cos((2*pi*i)/(float)(2*COS_GRID_POINTS));
*/
const WebRtc_Word16 WebRtcIlbcfix_kCosGrid[COS_GRID_POINTS + 1] = {
32760, 32723, 32588, 32364, 32051, 31651, 31164, 30591,
29935, 29196, 28377, 27481, 26509, 25465, 24351, 23170,
21926, 20621, 19260, 17846, 16384, 14876, 13327, 11743,
10125, 8480, 6812, 5126, 3425, 1714, 0, -1714, -3425,
-5126, -6812, -8480, -10125, -11743, -13327, -14876,
-16384, -17846, -19260, -20621, -21926, -23170, -24351,
-25465, -26509, -27481, -28377, -29196, -29935, -30591,
-31164, -31651, -32051, -32364, -32588, -32723, -32760
};
/*
Derivative of y = acos(x) in Q12
used in WebRtcIlbcfix_Lsp2Lsf()
*/
const WebRtc_Word16 WebRtcIlbcfix_kAcosDerivative[64] = {
-26887, -8812, -5323, -3813, -2979, -2444, -2081, -1811,
-1608, -1450, -1322, -1219, -1132, -1059, -998, -946,
-901, -861, -827, -797, -772, -750, -730, -713,
-699, -687, -677, -668, -662, -657, -654, -652,
-652, -654, -657, -662, -668, -677, -687, -699,
-713, -730, -750, -772, -797, -827, -861, -901,
-946, -998, -1059, -1132, -1219, -1322, -1450, -1608,
-1811, -2081, -2444, -2979, -3813, -5323, -8812, -26887
};
/* Tables for quantization of start state */
/* State quantization tables */
const WebRtc_Word16 WebRtcIlbcfix_kStateSq3[8] = { /* Values in Q13 */
-30473, -17838, -9257, -2537,
3639, 10893, 19958, 32636
};
/* This table defines the limits for the selection of the freqg
less or equal than value 0 => index = 0
less or equal than value k => index = k
*/
const WebRtc_Word32 WebRtcIlbcfix_kChooseFrgQuant[64] = {
118, 163, 222, 305, 425, 604,
851, 1174, 1617, 2222, 3080, 4191,
5525, 7215, 9193, 11540, 14397, 17604,
21204, 25209, 29863, 35720, 42531, 50375,
59162, 68845, 80108, 93754, 110326, 129488,
150654, 174328, 201962, 233195, 267843, 308239,
354503, 405988, 464251, 531550, 608652, 697516,
802526, 928793, 1080145, 1258120, 1481106, 1760881,
2111111, 2546619, 3078825, 3748642, 4563142, 5573115,
6887601, 8582108, 10797296, 14014513, 18625760, 25529599,
37302935, 58819185, 109782723, WEBRTC_SPL_WORD32_MAX
};
const WebRtc_Word16 WebRtcIlbcfix_kScale[64] = {
/* Values in Q16 */
29485, 25003, 21345, 18316, 15578, 13128, 10973, 9310, 7955,
6762, 5789, 4877, 4255, 3699, 3258, 2904, 2595, 2328,
2123, 1932, 1785, 1631, 1493, 1370, 1260, 1167, 1083,
/* Values in Q21 */
32081, 29611, 27262, 25229, 23432, 21803, 20226, 18883, 17609,
16408, 15311, 14327, 13390, 12513, 11693, 10919, 10163, 9435,
8739, 8100, 7424, 6813, 6192, 5648, 5122, 4639, 4207, 3798,
3404, 3048, 2706, 2348, 2036, 1713, 1393, 1087, 747
};
/*frgq in fixpoint, but already computed like this:
for(i=0; i<64; i++){
a = (pow(10,frgq[i])/4.5);
WebRtcIlbcfix_kFrgQuantMod[i] = round(a);
}
Value 0 :36 in Q8
37:58 in Q5
59:63 in Q3
*/
const WebRtc_Word16 WebRtcIlbcfix_kFrgQuantMod[64] = {
/* First 37 values in Q8 */
569, 671, 786, 916, 1077, 1278,
1529, 1802, 2109, 2481, 2898, 3440,
3943, 4535, 5149, 5778, 6464, 7208,
7904, 8682, 9397, 10285, 11240, 12246,
13313, 14382, 15492, 16735, 18131, 19693,
21280, 22912, 24624, 26544, 28432, 30488,
32720,
/* 22 values in Q5 */
4383, 4684, 5012, 5363, 5739, 6146,
6603, 7113, 7679, 8285, 9040, 9850,
10838, 11882, 13103, 14467, 15950, 17669,
19712, 22016, 24800, 28576,
/* 5 values in Q3 */
8240, 9792, 12040, 15440, 22472
};
/* Constants for codebook search and creation */
/* Expansion filter to get additional cb section.
* Q12 and reversed compared to flp
*/
const WebRtc_Word16 WebRtcIlbcfix_kCbFiltersRev[CB_FILTERLEN]={
-140, 446, -755, 3302, 2922, -590, 343, -138};
/* Weighting coefficients for short lags.
* [0.2 0.4 0.6 0.8] in Q15 */
const WebRtc_Word16 WebRtcIlbcfix_kAlpha[4]={
6554, 13107, 19661, 26214};
/* Ranges for search and filters at different subframes */
const WebRtc_Word16 WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
{58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
const WebRtc_Word16 WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
/* Gain Quantization for the codebook gains of the 3 stages */
/* Q14 (one extra value (max WebRtc_Word16) to simplify for the search) */
const WebRtc_Word16 WebRtcIlbcfix_kGainSq3[9]={
-16384, -10813, -5407, 0, 4096, 8192,
12288, 16384, 32767};
/* Q14 (one extra value (max WebRtc_Word16) to simplify for the search) */
const WebRtc_Word16 WebRtcIlbcfix_kGainSq4[17]={
-17203, -14746, -12288, -9830, -7373, -4915,
-2458, 0, 2458, 4915, 7373, 9830,
12288, 14746, 17203, 19661, 32767};
/* Q14 (one extra value (max WebRtc_Word16) to simplify for the search) */
const WebRtc_Word16 WebRtcIlbcfix_kGainSq5[33]={
614, 1229, 1843, 2458, 3072, 3686,
4301, 4915, 5530, 6144, 6758, 7373,
7987, 8602, 9216, 9830, 10445, 11059,
11674, 12288, 12902, 13517, 14131, 14746,
15360, 15974, 16589, 17203, 17818, 18432,
19046, 19661, 32767};
/* Q14 gain_sq5Tbl squared in Q14 */
const WebRtc_Word16 WebRtcIlbcfix_kGainSq5Sq[32] = {
23, 92, 207, 368, 576, 829,
1129, 1474, 1866, 2304, 2787, 3317,
3893, 4516, 5184, 5897, 6658, 7464,
8318, 9216, 10160, 11151, 12187, 13271,
14400, 15574, 16796, 18062, 19377, 20736,
22140, 23593
};
const WebRtc_Word16* const WebRtcIlbcfix_kGain[3] =
{WebRtcIlbcfix_kGainSq5, WebRtcIlbcfix_kGainSq4, WebRtcIlbcfix_kGainSq3};
/* Tables for the Enhancer, using upsamling factor 4 (ENH_UPS0 = 4) */
const WebRtc_Word16 WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1]={
{0, 0, 0, 4096, 0, 0, 0},
{64, -315, 1181, 3531, -436, 77, -64},
{97, -509, 2464, 2464, -509, 97, -97},
{77, -436, 3531, 1181, -315, 64, -77}
};
const WebRtc_Word16 WebRtcIlbcfix_kEnhWt[3] = {
4800, 16384, 27968 /* Q16 */
};
const WebRtc_Word16 WebRtcIlbcfix_kEnhPlocs[ENH_NBLOCKS_TOT] = {
160, 480, 800, 1120, 1440, 1760, 2080, 2400 /* Q(-2) */
};
/* PLC table */
const WebRtc_Word16 WebRtcIlbcfix_kPlcPerSqr[6] = { /* Grid points for square of periodiciy in Q15 */
839, 1343, 2048, 2998, 4247, 5849
};
const WebRtc_Word16 WebRtcIlbcfix_kPlcPitchFact[6] = { /* Value of y=(x^4-0.4)/(0.7-0.4) in grid points in Q15 */
0, 5462, 10922, 16384, 21846, 27306
};
const WebRtc_Word16 WebRtcIlbcfix_kPlcPfSlope[6] = { /* Slope of y=(x^4-0.4)/(0.7-0.4) in Q11 */
26667, 18729, 13653, 10258, 7901, 6214
};

View File

@@ -0,0 +1,92 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
constants.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
#include "defines.h"
#include "typedefs.h"
/* high pass filters */
extern const WebRtc_Word16 WebRtcIlbcfix_kHpInCoefs[];
extern const WebRtc_Word16 WebRtcIlbcfix_kHpOutCoefs[];
/* Window for start state decision */
extern const WebRtc_Word16 WebRtcIlbcfix_kStartSequenceEnrgWin[];
/* low pass filter used for downsampling */
extern const WebRtc_Word16 WebRtcIlbcfix_kLpFiltCoefs[];
/* LPC analysis and quantization */
extern const WebRtc_Word16 WebRtcIlbcfix_kLpcWin[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLpcAsymWin[];
extern const WebRtc_Word32 WebRtcIlbcfix_kLpcLagWin[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLpcChirpSyntDenum[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLpcChirpWeightDenum[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfDimCb[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfSizeCb[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfCb[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfWeight20ms[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfWeight30ms[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLsfMean[];
extern const WebRtc_Word16 WebRtcIlbcfix_kLspMean[];
extern const WebRtc_Word16 WebRtcIlbcfix_kCos[];
extern const WebRtc_Word16 WebRtcIlbcfix_kCosDerivative[];
extern const WebRtc_Word16 WebRtcIlbcfix_kCosGrid[];
extern const WebRtc_Word16 WebRtcIlbcfix_kAcosDerivative[];
/* state quantization tables */
extern const WebRtc_Word16 WebRtcIlbcfix_kStateSq3[];
extern const WebRtc_Word32 WebRtcIlbcfix_kChooseFrgQuant[];
extern const WebRtc_Word16 WebRtcIlbcfix_kScale[];
extern const WebRtc_Word16 WebRtcIlbcfix_kFrgQuantMod[];
/* Ranges for search and filters at different subframes */
extern const WebRtc_Word16 WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
extern const WebRtc_Word16 WebRtcIlbcfix_kFilterRange[];
/* gain quantization tables */
extern const WebRtc_Word16 WebRtcIlbcfix_kGainSq3[];
extern const WebRtc_Word16 WebRtcIlbcfix_kGainSq4[];
extern const WebRtc_Word16 WebRtcIlbcfix_kGainSq5[];
extern const WebRtc_Word16 WebRtcIlbcfix_kGainSq5Sq[];
extern const WebRtc_Word16* const WebRtcIlbcfix_kGain[];
/* adaptive codebook definitions */
extern const WebRtc_Word16 WebRtcIlbcfix_kCbFiltersRev[];
extern const WebRtc_Word16 WebRtcIlbcfix_kAlpha[];
/* enhancer definitions */
extern const WebRtc_Word16 WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1];
extern const WebRtc_Word16 WebRtcIlbcfix_kEnhWt[];
extern const WebRtc_Word16 WebRtcIlbcfix_kEnhPlocs[];
/* PLC tables */
extern const WebRtc_Word16 WebRtcIlbcfix_kPlcPerSqr[];
extern const WebRtc_Word16 WebRtcIlbcfix_kPlcPitchFact[];
extern const WebRtc_Word16 WebRtcIlbcfix_kPlcPfSlope[];
#endif

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CreateAugmentedVec.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* Recreate a specific codebook vector from the augmented part.
*
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
WebRtc_Word16 index, /* (i) Index for the augmented vector to be created */
WebRtc_Word16 *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
WebRtc_Word16 *cbVec /* (o) The construced codebook vector */
) {
WebRtc_Word16 ilow;
WebRtc_Word16 *ppo, *ppi;
WebRtc_Word16 cbVecTmp[4];
ilow = index-4;
/* copy the first noninterpolated part */
ppo = buffer-index;
WEBRTC_SPL_MEMCPY_W16(cbVec, ppo, index);
/* interpolation */
ppo = buffer - 4;
ppi = buffer - index - 4;
/* perform cbVec[ilow+k] = ((ppi[k]*alphaTbl[k])>>15) + ((ppo[k]*alphaTbl[3-k])>>15);
for k = 0..3
*/
WebRtcSpl_ElementwiseVectorMult(&cbVec[ilow], ppi, WebRtcIlbcfix_kAlpha, 4, 15);
WebRtcSpl_ReverseOrderMultArrayElements(cbVecTmp, ppo, &WebRtcIlbcfix_kAlpha[3], 4, 15);
WebRtcSpl_AddVectorsAndShift(&cbVec[ilow], &cbVec[ilow], cbVecTmp, 4, 0);
/* copy the second noninterpolated part */
ppo = buffer - index;
WEBRTC_SPL_MEMCPY_W16(cbVec+index,ppo,(SUBL-index));
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_CreateAugmentedVec.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Recreate a specific codebook vector from the augmented part.
*
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
WebRtc_Word16 index, /* (i) Index for the augmented vector to be created */
WebRtc_Word16 *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
WebRtc_Word16 *cbVec /* (o) The construced codebook vector */
);
#endif

View File

@@ -0,0 +1,244 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Decode.c
******************************************************************/
#include "defines.h"
#include "simple_lsf_dequant.h"
#include "decoder_interpolate_lsf.h"
#include "index_conv_dec.h"
#include "do_plc.h"
#include "constants.h"
#include "enhancer_interface.h"
#include "xcorr_coef.h"
#include "lsf_check.h"
#include "decode_residual.h"
#include "unpack_bits.h"
#include "hp_output.h"
#ifndef WEBRTC_BIG_ENDIAN
#include "swap_bytes.h"
#endif
/*----------------------------------------------------------------*
* main decoder function
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecodeImpl(
WebRtc_Word16 *decblock, /* (o) decoded signal block */
WebRtc_UWord16 *bytes, /* (i) encoded signal bits */
iLBC_Dec_Inst_t *iLBCdec_inst, /* (i/o) the decoder state
structure */
WebRtc_Word16 mode /* (i) 0: bad packet, PLC,
1: normal */
) {
int i;
WebRtc_Word16 order_plus_one;
WebRtc_Word16 last_bit;
WebRtc_Word16 *data;
/* Stack based */
WebRtc_Word16 decresidual[BLOCKL_MAX];
WebRtc_Word16 PLCresidual[BLOCKL_MAX + LPC_FILTERORDER];
WebRtc_Word16 syntdenum[NSUB_MAX*(LPC_FILTERORDER+1)];
WebRtc_Word16 PLClpc[LPC_FILTERORDER + 1];
iLBC_bits *iLBCbits_inst = (iLBC_bits*)PLCresidual;
/* Reuse some buffers that are non overlapping in order to save stack memory */
data = &PLCresidual[LPC_FILTERORDER];
if (mode>0) { /* the data are good */
/* decode data */
#ifndef WEBRTC_BIG_ENDIAN
WebRtcIlbcfix_SwapBytes((WebRtc_UWord16*)bytes, iLBCdec_inst->no_of_words);
#endif
/* Unpacketize bits into parameters */
last_bit = WebRtcIlbcfix_UnpackBits(bytes, iLBCbits_inst, iLBCdec_inst->mode);
#ifndef WEBRTC_BIG_ENDIAN
/* Swap back so that the the input vector "bytes" is unchanged */
WebRtcIlbcfix_SwapBytes((WebRtc_UWord16*)bytes, iLBCdec_inst->no_of_words);
#endif
/* Check for bit errors */
if (iLBCbits_inst->startIdx<1)
mode = 0;
if ((iLBCdec_inst->mode==20) && (iLBCbits_inst->startIdx>3))
mode = 0;
if ((iLBCdec_inst->mode==30) && (iLBCbits_inst->startIdx>5))
mode = 0;
if (last_bit==1)
mode = 0;
if (mode==1) { /* No bit errors was detected, continue decoding */
/* Stack based */
WebRtc_Word16 lsfdeq[LPC_FILTERORDER*LPC_N_MAX];
WebRtc_Word16 weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
/* adjust index */
WebRtcIlbcfix_IndexConvDec(iLBCbits_inst->cb_index);
/* decode the lsf */
WebRtcIlbcfix_SimpleLsfDeQ(lsfdeq, (WebRtc_Word16*)(iLBCbits_inst->lsf), iLBCdec_inst->lpc_n);
WebRtcIlbcfix_LsfCheck(lsfdeq, LPC_FILTERORDER, iLBCdec_inst->lpc_n);
WebRtcIlbcfix_DecoderInterpolateLsp(syntdenum, weightdenum,
lsfdeq, LPC_FILTERORDER, iLBCdec_inst);
/* Decode the residual using the cb and gain indexes */
WebRtcIlbcfix_DecodeResidual(iLBCdec_inst, iLBCbits_inst, decresidual, syntdenum);
/* preparing the plc for a future loss! */
WebRtcIlbcfix_DoThePlc( PLCresidual, PLClpc, 0,
decresidual, syntdenum + (LPC_FILTERORDER + 1)*(iLBCdec_inst->nsub - 1),
(WebRtc_Word16)(iLBCdec_inst->last_lag), iLBCdec_inst);
/* Use the output from doThePLC */
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
}
}
if (mode == 0) {
/* the data is bad (either a PLC call
* was made or a bit error was detected)
*/
/* packet loss conceal */
WebRtcIlbcfix_DoThePlc( PLCresidual, PLClpc, 1,
decresidual, syntdenum, (WebRtc_Word16)(iLBCdec_inst->last_lag), iLBCdec_inst);
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
order_plus_one = LPC_FILTERORDER + 1;
for (i = 0; i < iLBCdec_inst->nsub; i++) {
WEBRTC_SPL_MEMCPY_W16(syntdenum+(i*order_plus_one),
PLClpc, order_plus_one);
}
}
if ((*iLBCdec_inst).use_enhancer == 1) { /* Enhancer activated */
/* Update the filter and filter coefficients if there was a packet loss */
if (iLBCdec_inst->prev_enh_pl==2) {
for (i=0;i<iLBCdec_inst->nsub;i++) {
WEBRTC_SPL_MEMCPY_W16(&(iLBCdec_inst->old_syntdenum[i*(LPC_FILTERORDER+1)]),
syntdenum, (LPC_FILTERORDER+1));
}
}
/* post filtering */
(*iLBCdec_inst).last_lag =
WebRtcIlbcfix_EnhancerInterface(data, decresidual, iLBCdec_inst);
/* synthesis filtering */
/* Set up the filter state */
WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
if (iLBCdec_inst->mode==20) {
/* Enhancer has 40 samples delay */
i=0;
WebRtcSpl_FilterARFastQ12(
data, data,
iLBCdec_inst->old_syntdenum + (i+iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1),
LPC_FILTERORDER+1, SUBL);
for (i=1; i < iLBCdec_inst->nsub; i++) {
WebRtcSpl_FilterARFastQ12(
data+i*SUBL, data+i*SUBL,
syntdenum+(i-1)*(LPC_FILTERORDER+1),
LPC_FILTERORDER+1, SUBL);
}
} else if (iLBCdec_inst->mode==30) {
/* Enhancer has 80 samples delay */
for (i=0; i < 2; i++) {
WebRtcSpl_FilterARFastQ12(
data+i*SUBL, data+i*SUBL,
iLBCdec_inst->old_syntdenum + (i+4)*(LPC_FILTERORDER+1),
LPC_FILTERORDER+1, SUBL);
}
for (i=2; i < iLBCdec_inst->nsub; i++) {
WebRtcSpl_FilterARFastQ12(
data+i*SUBL, data+i*SUBL,
syntdenum+(i-2)*(LPC_FILTERORDER+1),
LPC_FILTERORDER+1, SUBL);
}
}
/* Save the filter state */
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
} else { /* Enhancer not activated */
WebRtc_Word16 lag;
/* Find last lag (since the enhancer is not called to give this info) */
lag = 20;
if (iLBCdec_inst->mode==20) {
lag = (WebRtc_Word16)WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-60],
&decresidual[iLBCdec_inst->blockl-60-lag],
60,
80, lag, -1);
} else {
lag = (WebRtc_Word16)WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
ENH_BLOCKL,
100, lag, -1);
}
/* Store lag (it is needed if next packet is lost) */
(*iLBCdec_inst).last_lag = (int)lag;
/* copy data and run synthesis filter */
WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);
/* Set up the filter state */
WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
for (i=0; i < iLBCdec_inst->nsub; i++) {
WebRtcSpl_FilterARFastQ12(
data+i*SUBL, data+i*SUBL,
syntdenum + i*(LPC_FILTERORDER+1),
LPC_FILTERORDER+1, SUBL);
}
/* Save the filter state */
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
}
WEBRTC_SPL_MEMCPY_W16(decblock,data,iLBCdec_inst->blockl);
/* High pass filter the signal (with upscaling a factor 2 and saturation) */
WebRtcIlbcfix_HpOutput(decblock, (WebRtc_Word16*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
iLBCdec_inst->blockl);
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->old_syntdenum,
syntdenum, iLBCdec_inst->nsub*(LPC_FILTERORDER+1));
iLBCdec_inst->prev_enh_pl=0;
if (mode==0) { /* PLC was used */
iLBCdec_inst->prev_enh_pl=1;
}
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Decode.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
#include "defines.h"
/*----------------------------------------------------------------*
* main decoder function
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecodeImpl(
WebRtc_Word16 *decblock, /* (o) decoded signal block */
WebRtc_UWord16 *bytes, /* (i) encoded signal bits */
iLBC_Dec_Inst_t *iLBCdec_inst, /* (i/o) the decoder state
structure */
WebRtc_Word16 mode /* (i) 0: bad packet, PLC,
1: normal */
);
#endif

View File

@@ -0,0 +1,189 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DecodeResidual.c
******************************************************************/
#include "defines.h"
#include "state_construct.h"
#include "cb_construct.h"
#include "index_conv_dec.h"
#include "do_plc.h"
#include "constants.h"
#include "enhancer_interface.h"
#include "xcorr_coef.h"
#include "lsf_check.h"
/*----------------------------------------------------------------*
* frame residual decoder function (subrutine to iLBC_decode)
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecodeResidual(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i/o) the decoder state structure */
iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits, which are used
for the decoding */
WebRtc_Word16 *decresidual, /* (o) decoded residual frame */
WebRtc_Word16 *syntdenum /* (i) the decoded synthesis filter
coefficients */
) {
WebRtc_Word16 meml_gotten, Nfor, Nback, diff, start_pos;
WebRtc_Word16 subcount, subframe;
WebRtc_Word16 *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
WebRtc_Word16 *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
WebRtc_Word16 *mem = &memVec[CB_HALFFILTERLEN]; /* Memory for codebook */
diff = STATE_LEN - iLBCdec_inst->state_short_len;
if (iLBC_encbits->state_first == 1) {
start_pos = (iLBC_encbits->startIdx-1)*SUBL;
} else {
start_pos = (iLBC_encbits->startIdx-1)*SUBL + diff;
}
/* decode scalar part of start state */
WebRtcIlbcfix_StateConstruct(iLBC_encbits->idxForMax,
iLBC_encbits->idxVec, &syntdenum[(iLBC_encbits->startIdx-1)*(LPC_FILTERORDER+1)],
&decresidual[start_pos], iLBCdec_inst->state_short_len
);
if (iLBC_encbits->state_first) { /* put adaptive part in the end */
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-iLBCdec_inst->state_short_len));
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
iLBCdec_inst->state_short_len);
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(
&decresidual[start_pos+iLBCdec_inst->state_short_len],
iLBC_encbits->cb_index, iLBC_encbits->gain_index,
mem+CB_MEML-ST_MEM_L_TBL,
ST_MEM_L_TBL, (WebRtc_Word16)diff
);
}
else {/* put adaptive part in the beginning */
/* create reversed vectors for prediction */
WebRtcSpl_MemCpyReversedOrder(reverseDecresidual+diff,
&decresidual[(iLBC_encbits->startIdx+1)*SUBL-1-STATE_LEN], diff);
/* setup memory */
meml_gotten = iLBCdec_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+start_pos, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-meml_gotten));
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(
reverseDecresidual,
iLBC_encbits->cb_index, iLBC_encbits->gain_index,
mem+CB_MEML-ST_MEM_L_TBL,
ST_MEM_L_TBL, diff
);
/* get decoded residual from reversed vector */
WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1],
reverseDecresidual, diff);
}
/* counter for predicted subframes */
subcount=1;
/* forward prediction of subframes */
Nfor = iLBCdec_inst->nsub-iLBC_encbits->startIdx-1;
if( Nfor > 0 ) {
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
decresidual+(iLBC_encbits->startIdx-1)*SUBL, STATE_LEN);
/* loop over subframes to encode */
for (subframe=0; subframe<Nfor; subframe++) {
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(
&decresidual[(iLBC_encbits->startIdx+1+subframe)*SUBL],
iLBC_encbits->cb_index+subcount*CB_NSTAGES,
iLBC_encbits->gain_index+subcount*CB_NSTAGES,
mem, MEM_LF_TBL, SUBL
);
/* update memory */
WEBRTC_SPL_MEMMOVE_W16(mem, mem+SUBL, CB_MEML-SUBL);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
&decresidual[(iLBC_encbits->startIdx+1+subframe)*SUBL], SUBL);
subcount++;
}
}
/* backward prediction of subframes */
Nback = iLBC_encbits->startIdx-1;
if( Nback > 0 ){
/* setup memory */
meml_gotten = SUBL*(iLBCdec_inst->nsub+1-iLBC_encbits->startIdx);
if( meml_gotten > CB_MEML ) {
meml_gotten=CB_MEML;
}
WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-meml_gotten));
/* loop over subframes to decode */
for (subframe=0; subframe<Nback; subframe++) {
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(
&reverseDecresidual[subframe*SUBL],
iLBC_encbits->cb_index+subcount*CB_NSTAGES,
iLBC_encbits->gain_index+subcount*CB_NSTAGES,
mem, MEM_LF_TBL, SUBL
);
/* update memory */
WEBRTC_SPL_MEMMOVE_W16(mem, mem+SUBL, CB_MEML-SUBL);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
&reverseDecresidual[subframe*SUBL], SUBL);
subcount++;
}
/* get decoded residual from reversed vector */
WebRtcSpl_MemCpyReversedOrder(decresidual+SUBL*Nback-1,
reverseDecresidual, SUBL*Nback);
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DecodeResidual.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
#include "defines.h"
/*----------------------------------------------------------------*
* frame residual decoder function (subrutine to iLBC_decode)
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecodeResidual(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i/o) the decoder state structure */
iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits, which are used
for the decoding */
WebRtc_Word16 *decresidual, /* (o) decoded residual frame */
WebRtc_Word16 *syntdenum /* (i) the decoded synthesis filter
coefficients */
);
#endif

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DecoderInterpolateLsp.c
******************************************************************/
#include "lsf_interpolate_to_poly_dec.h"
#include "bw_expand.h"
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* obtain synthesis and weighting filters form lsf coefficients
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecoderInterpolateLsp(
WebRtc_Word16 *syntdenum, /* (o) synthesis filter coefficients */
WebRtc_Word16 *weightdenum, /* (o) weighting denumerator
coefficients */
WebRtc_Word16 *lsfdeq, /* (i) dequantized lsf coefficients */
WebRtc_Word16 length, /* (i) length of lsf coefficient vector */
iLBC_Dec_Inst_t *iLBCdec_inst
/* (i) the decoder state structure */
){
int i, pos, lp_length;
WebRtc_Word16 lp[LPC_FILTERORDER + 1], *lsfdeq2;
lsfdeq2 = lsfdeq + length;
lp_length = length + 1;
if (iLBCdec_inst->mode==30) {
/* subframe 1: Interpolation between old and first LSF */
WebRtcIlbcfix_LspInterpolate2PolyDec(lp, (*iLBCdec_inst).lsfdeqold, lsfdeq,
WebRtcIlbcfix_kLsfWeight30ms[0], length);
WEBRTC_SPL_MEMCPY_W16(syntdenum,lp,lp_length);
WebRtcIlbcfix_BwExpand(weightdenum, lp, (WebRtc_Word16*)WebRtcIlbcfix_kLpcChirpSyntDenum, (WebRtc_Word16)lp_length);
/* subframes 2 to 6: interpolation between first and last LSF */
pos = lp_length;
for (i = 1; i < 6; i++) {
WebRtcIlbcfix_LspInterpolate2PolyDec(lp, lsfdeq, lsfdeq2,
WebRtcIlbcfix_kLsfWeight30ms[i], length);
WEBRTC_SPL_MEMCPY_W16(syntdenum + pos,lp,lp_length);
WebRtcIlbcfix_BwExpand(weightdenum + pos, lp,
(WebRtc_Word16*)WebRtcIlbcfix_kLpcChirpSyntDenum, (WebRtc_Word16)lp_length);
pos += lp_length;
}
} else { /* iLBCdec_inst->mode=20 */
/* subframes 1 to 4: interpolation between old and new LSF */
pos = 0;
for (i = 0; i < iLBCdec_inst->nsub; i++) {
WebRtcIlbcfix_LspInterpolate2PolyDec(lp, iLBCdec_inst->lsfdeqold, lsfdeq,
WebRtcIlbcfix_kLsfWeight20ms[i], length);
WEBRTC_SPL_MEMCPY_W16(syntdenum+pos,lp,lp_length);
WebRtcIlbcfix_BwExpand(weightdenum+pos, lp,
(WebRtc_Word16*)WebRtcIlbcfix_kLpcChirpSyntDenum, (WebRtc_Word16)lp_length);
pos += lp_length;
}
}
/* update memory */
if (iLBCdec_inst->mode==30) {
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq2, length);
} else {
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq, length);
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DecoderInterpolateLsp.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
#include "defines.h"
/*----------------------------------------------------------------*
* obtain synthesis and weighting filters form lsf coefficients
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DecoderInterpolateLsp(
WebRtc_Word16 *syntdenum, /* (o) synthesis filter coefficients */
WebRtc_Word16 *weightdenum, /* (o) weighting denumerator
coefficients */
WebRtc_Word16 *lsfdeq, /* (i) dequantized lsf coefficients */
WebRtc_Word16 length, /* (i) length of lsf coefficient vector */
iLBC_Dec_Inst_t *iLBCdec_inst
/* (i) the decoder state structure */
);
#endif

View File

@@ -0,0 +1,219 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
define.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
#include "typedefs.h"
#include "signal_processing_library.h"
#include <string.h>
/* general codec settings */
#define FS 8000
#define BLOCKL_20MS 160
#define BLOCKL_30MS 240
#define BLOCKL_MAX 240
#define NSUB_20MS 4
#define NSUB_30MS 6
#define NSUB_MAX 6
#define NASUB_20MS 2
#define NASUB_30MS 4
#define NASUB_MAX 4
#define SUBL 40
#define STATE_LEN 80
#define STATE_SHORT_LEN_30MS 58
#define STATE_SHORT_LEN_20MS 57
/* LPC settings */
#define LPC_FILTERORDER 10
#define LPC_LOOKBACK 60
#define LPC_N_20MS 1
#define LPC_N_30MS 2
#define LPC_N_MAX 2
#define LPC_ASYMDIFF 20
#define LSF_NSPLIT 3
#define LSF_NUMBER_OF_STEPS 4
#define LPC_HALFORDER 5
#define COS_GRID_POINTS 60
/* cb settings */
#define CB_NSTAGES 3
#define CB_EXPAND 2
#define CB_MEML 147
#define CB_FILTERLEN (2*4)
#define CB_HALFFILTERLEN 4
#define CB_RESRANGE 34
#define CB_MAXGAIN_FIXQ6 83 /* error = -0.24% */
#define CB_MAXGAIN_FIXQ14 21299
/* enhancer */
#define ENH_BLOCKL 80 /* block length */
#define ENH_BLOCKL_HALF (ENH_BLOCKL/2)
#define ENH_HL 3 /* 2*ENH_HL+1 is number blocks
in said second sequence */
#define ENH_SLOP 2 /* max difference estimated and
correct pitch period */
#define ENH_PLOCSL 8 /* pitch-estimates and
pitch-locations buffer length */
#define ENH_OVERHANG 2
#define ENH_UPS0 4 /* upsampling rate */
#define ENH_FL0 3 /* 2*FLO+1 is the length of each filter */
#define ENH_FLO_MULT2_PLUS1 7
#define ENH_VECTL (ENH_BLOCKL+2*ENH_FL0)
#define ENH_CORRDIM (2*ENH_SLOP+1)
#define ENH_NBLOCKS (BLOCKL/ENH_BLOCKL)
#define ENH_NBLOCKS_EXTRA 5
#define ENH_NBLOCKS_TOT 8 /* ENH_NBLOCKS+ENH_NBLOCKS_EXTRA */
#define ENH_BUFL (ENH_NBLOCKS_TOT)*ENH_BLOCKL
#define ENH_BUFL_FILTEROVERHEAD 3
#define ENH_A0 819 /* Q14 */
#define ENH_A0_MINUS_A0A0DIV4 848256041 /* Q34 */
#define ENH_A0DIV2 26843546 /* Q30 */
/* PLC */
/* Down sampling */
#define FILTERORDER_DS_PLUS1 7
#define DELAY_DS 3
#define FACTOR_DS 2
/* bit stream defs */
#define NO_OF_BYTES_20MS 38
#define NO_OF_BYTES_30MS 50
#define NO_OF_WORDS_20MS 19
#define NO_OF_WORDS_30MS 25
#define STATE_BITS 3
#define BYTE_LEN 8
#define ULP_CLASSES 3
/* help parameters */
#define TWO_PI_FIX 25736 /* Q12 */
/* Constants for codebook search and creation */
#define ST_MEM_L_TBL 85
#define MEM_LF_TBL 147
/* Struct for the bits */
typedef struct iLBC_bits_t_ {
WebRtc_Word16 lsf[LSF_NSPLIT*LPC_N_MAX];
WebRtc_Word16 cb_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB index */
WebRtc_Word16 gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
WebRtc_Word16 idxForMax;
WebRtc_Word16 state_first;
WebRtc_Word16 idxVec[STATE_SHORT_LEN_30MS];
WebRtc_Word16 firstbits;
WebRtc_Word16 startIdx;
} iLBC_bits;
/* type definition encoder instance */
typedef struct iLBC_Enc_Inst_t_ {
/* flag for frame size mode */
WebRtc_Word16 mode;
/* basic parameters for different frame sizes */
WebRtc_Word16 blockl;
WebRtc_Word16 nsub;
WebRtc_Word16 nasub;
WebRtc_Word16 no_of_bytes, no_of_words;
WebRtc_Word16 lpc_n;
WebRtc_Word16 state_short_len;
/* analysis filter state */
WebRtc_Word16 anaMem[LPC_FILTERORDER];
/* Fix-point old lsf parameters for interpolation */
WebRtc_Word16 lsfold[LPC_FILTERORDER];
WebRtc_Word16 lsfdeqold[LPC_FILTERORDER];
/* signal buffer for LP analysis */
WebRtc_Word16 lpc_buffer[LPC_LOOKBACK + BLOCKL_MAX];
/* state of input HP filter */
WebRtc_Word16 hpimemx[2];
WebRtc_Word16 hpimemy[4];
#ifdef SPLIT_10MS
WebRtc_Word16 weightdenumbuf[66];
WebRtc_Word16 past_samples[160];
WebRtc_UWord16 bytes[25];
WebRtc_Word16 section;
WebRtc_Word16 Nfor_flag;
WebRtc_Word16 Nback_flag;
WebRtc_Word16 start_pos;
WebRtc_Word16 diff;
#endif
} iLBC_Enc_Inst_t;
/* type definition decoder instance */
typedef struct iLBC_Dec_Inst_t_ {
/* flag for frame size mode */
WebRtc_Word16 mode;
/* basic parameters for different frame sizes */
WebRtc_Word16 blockl;
WebRtc_Word16 nsub;
WebRtc_Word16 nasub;
WebRtc_Word16 no_of_bytes, no_of_words;
WebRtc_Word16 lpc_n;
WebRtc_Word16 state_short_len;
/* synthesis filter state */
WebRtc_Word16 syntMem[LPC_FILTERORDER];
/* old LSF for interpolation */
WebRtc_Word16 lsfdeqold[LPC_FILTERORDER];
/* pitch lag estimated in enhancer and used in PLC */
int last_lag;
/* PLC state information */
int consPLICount, prev_enh_pl;
WebRtc_Word16 perSquare;
WebRtc_Word16 prevScale, prevPLI;
WebRtc_Word16 prevLag, prevLpc[LPC_FILTERORDER+1];
WebRtc_Word16 prevResidual[NSUB_MAX*SUBL];
WebRtc_Word16 seed;
/* previous synthesis filter parameters */
WebRtc_Word16 old_syntdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
/* state of output HP filter */
WebRtc_Word16 hpimemx[2];
WebRtc_Word16 hpimemy[4];
/* enhancer state information */
int use_enhancer;
WebRtc_Word16 enh_buf[ENH_BUFL+ENH_BUFL_FILTEROVERHEAD];
WebRtc_Word16 enh_period[ENH_NBLOCKS_TOT];
} iLBC_Dec_Inst_t;
#endif

View File

@@ -0,0 +1,308 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DoThePlc.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "comp_corr.h"
#include "bw_expand.h"
/*----------------------------------------------------------------*
* Packet loss concealment routine. Conceals a residual signal
* and LP parameters. If no packet loss, update state.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DoThePlc(
WebRtc_Word16 *PLCresidual, /* (o) concealed residual */
WebRtc_Word16 *PLClpc, /* (o) concealed LP parameters */
WebRtc_Word16 PLI, /* (i) packet loss indicator
0 - no PL, 1 = PL */
WebRtc_Word16 *decresidual, /* (i) decoded residual */
WebRtc_Word16 *lpc, /* (i) decoded LPC (only used for no PL) */
WebRtc_Word16 inlag, /* (i) pitch lag */
iLBC_Dec_Inst_t *iLBCdec_inst
/* (i/o) decoder instance */
){
WebRtc_Word16 i, pick;
WebRtc_Word32 cross, ener, cross_comp, ener_comp = 0;
WebRtc_Word32 measure, maxMeasure, energy;
WebRtc_Word16 max, crossSquareMax, crossSquare;
WebRtc_Word16 j, lag, tmp1, tmp2, randlag;
WebRtc_Word16 shift1, shift2, shift3, shiftMax;
WebRtc_Word16 scale3;
WebRtc_Word16 corrLen;
WebRtc_Word32 tmpW32, tmp2W32;
WebRtc_Word16 use_gain;
WebRtc_Word16 tot_gain;
WebRtc_Word16 max_perSquare;
WebRtc_Word16 scale1, scale2;
WebRtc_Word16 totscale;
WebRtc_Word32 nom;
WebRtc_Word16 denom;
WebRtc_Word16 pitchfact;
WebRtc_Word16 use_lag;
int ind;
WebRtc_Word16 randvec[BLOCKL_MAX];
/* Packet Loss */
if (PLI == 1) {
(*iLBCdec_inst).consPLICount += 1;
/* if previous frame not lost,
determine pitch pred. gain */
if (iLBCdec_inst->prevPLI != 1) {
/* Maximum 60 samples are correlated, preserve as high accuracy
as possible without getting overflow */
max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual, (WebRtc_Word16)iLBCdec_inst->blockl);
scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
if (scale3 < 0) {
scale3 = 0;
}
/* Store scale for use when interpolating between the
* concealment and the received packet */
iLBCdec_inst->prevScale = scale3;
/* Search around the previous lag +/-3 to find the
best pitch period */
lag = inlag - 3;
/* Guard against getting outside the frame */
corrLen = WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
WebRtcIlbcfix_CompCorr( &cross, &ener,
iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
/* Normalize and store cross^2 and the number of shifts */
shiftMax = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross))-15;
crossSquareMax = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(WEBRTC_SPL_SHIFT_W32(cross, -shiftMax),
WEBRTC_SPL_SHIFT_W32(cross, -shiftMax), 15);
for (j=inlag-2;j<=inlag+3;j++) {
WebRtcIlbcfix_CompCorr( &cross_comp, &ener_comp,
iLBCdec_inst->prevResidual, j, iLBCdec_inst->blockl, corrLen, scale3);
/* Use the criteria (corr*corr)/energy to compare if
this lag is better or not. To avoid the division,
do a cross multiplication */
shift1 = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross_comp))-15;
crossSquare = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1),
WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1), 15);
shift2 = WebRtcSpl_GetSizeInBits(ener)-15;
measure = WEBRTC_SPL_MUL_16_16(WEBRTC_SPL_SHIFT_W32(ener, -shift2),
crossSquare);
shift3 = WebRtcSpl_GetSizeInBits(ener_comp)-15;
maxMeasure = WEBRTC_SPL_MUL_16_16(WEBRTC_SPL_SHIFT_W32(ener_comp, -shift3),
crossSquareMax);
/* Calculate shift value, so that the two measures can
be put in the same Q domain */
if(((shiftMax<<1)+shift3) > ((shift1<<1)+shift2)) {
tmp1 = WEBRTC_SPL_MIN(31, (shiftMax<<1)+shift3-(shift1<<1)-shift2);
tmp2 = 0;
} else {
tmp1 = 0;
tmp2 = WEBRTC_SPL_MIN(31, (shift1<<1)+shift2-(shiftMax<<1)-shift3);
}
if ((measure>>tmp1) > (maxMeasure>>tmp2)) {
/* New lag is better => record lag, measure and domain */
lag = j;
crossSquareMax = crossSquare;
cross = cross_comp;
shiftMax = shift1;
ener = ener_comp;
}
}
/* Calculate the periodicity for the lag with the maximum correlation.
Definition of the periodicity:
abs(corr(vec1, vec2))/(sqrt(energy(vec1))*sqrt(energy(vec2)))
Work in the Square domain to simplify the calculations
max_perSquare is less than 1 (in Q15)
*/
tmp2W32=WebRtcSpl_DotProductWithScale(&iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
&iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
corrLen, scale3);
if ((tmp2W32>0)&&(ener_comp>0)) {
/* norm energies to WebRtc_Word16, compute the product of the energies and
use the upper WebRtc_Word16 as the denominator */
scale1=(WebRtc_Word16)WebRtcSpl_NormW32(tmp2W32)-16;
tmp1=(WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(tmp2W32, scale1);
scale2=(WebRtc_Word16)WebRtcSpl_NormW32(ener)-16;
tmp2=(WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(ener, scale2);
denom=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp1, tmp2, 16); /* denom in Q(scale1+scale2-16) */
/* Square the cross correlation and norm it such that max_perSquare
will be in Q15 after the division */
totscale = scale1+scale2-1;
tmp1 = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(cross, (totscale>>1));
tmp2 = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(cross, totscale-(totscale>>1));
nom = WEBRTC_SPL_MUL_16_16(tmp1, tmp2);
max_perSquare = (WebRtc_Word16)WebRtcSpl_DivW32W16(nom, denom);
} else {
max_perSquare = 0;
}
}
/* previous frame lost, use recorded lag and gain */
else {
lag = iLBCdec_inst->prevLag;
max_perSquare = iLBCdec_inst->perSquare;
}
/* Attenuate signal and scale down pitch pred gain if
several frames lost consecutively */
use_gain = 32767; /* 1.0 in Q15 */
if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>320) {
use_gain = 29491; /* 0.9 in Q15 */
} else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>640) {
use_gain = 22938; /* 0.7 in Q15 */
} else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>960) {
use_gain = 16384; /* 0.5 in Q15 */
} else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>1280) {
use_gain = 0; /* 0.0 in Q15 */
}
/* Compute mixing factor of picth repeatition and noise:
for max_per>0.7 set periodicity to 1.0
0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
max_per<0.4 set periodicity to 0.0
*/
if (max_perSquare>7868) { /* periodicity > 0.7 (0.7^4=0.2401 in Q15) */
pitchfact = 32767;
} else if (max_perSquare>839) { /* 0.4 < periodicity < 0.7 (0.4^4=0.0256 in Q15) */
/* find best index and interpolate from that */
ind = 5;
while ((max_perSquare<WebRtcIlbcfix_kPlcPerSqr[ind])&&(ind>0)) {
ind--;
}
/* pitch fact is approximated by first order */
tmpW32 = (WebRtc_Word32)WebRtcIlbcfix_kPlcPitchFact[ind] +
WEBRTC_SPL_MUL_16_16_RSFT(WebRtcIlbcfix_kPlcPfSlope[ind], (max_perSquare-WebRtcIlbcfix_kPlcPerSqr[ind]), 11);
pitchfact = (WebRtc_Word16)WEBRTC_SPL_MIN(tmpW32, 32767); /* guard against overflow */
} else { /* periodicity < 0.4 */
pitchfact = 0;
}
/* avoid repetition of same pitch cycle (buzzyness) */
use_lag = lag;
if (lag<80) {
use_lag = 2*lag;
}
/* compute concealed residual */
energy = 0;
for (i=0; i<iLBCdec_inst->blockl; i++) {
/* noise component - 52 < randlagFIX < 117 */
iLBCdec_inst->seed = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16(iLBCdec_inst->seed, 31821)+(WebRtc_Word32)13849);
randlag = 53 + (WebRtc_Word16)(iLBCdec_inst->seed & 63);
pick = i - randlag;
if (pick < 0) {
randvec[i] = iLBCdec_inst->prevResidual[iLBCdec_inst->blockl+pick];
} else {
randvec[i] = iLBCdec_inst->prevResidual[pick];
}
/* pitch repeatition component */
pick = i - use_lag;
if (pick < 0) {
PLCresidual[i] = iLBCdec_inst->prevResidual[iLBCdec_inst->blockl+pick];
} else {
PLCresidual[i] = PLCresidual[pick];
}
/* Attinuate total gain for each 10 ms */
if (i<80) {
tot_gain=use_gain;
} else if (i<160) {
tot_gain=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(31130, use_gain, 15); /* 0.95*use_gain */
} else {
tot_gain=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(29491, use_gain, 15); /* 0.9*use_gain */
}
/* mix noise and pitch repeatition */
PLCresidual[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tot_gain,
(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32( (WEBRTC_SPL_MUL_16_16(pitchfact, PLCresidual[i]) +
WEBRTC_SPL_MUL_16_16((32767-pitchfact), randvec[i]) + 16384),
15),
15);
/* Shifting down the result one step extra to ensure that no overflow
will occur */
energy += WEBRTC_SPL_MUL_16_16_RSFT(PLCresidual[i],
PLCresidual[i], (iLBCdec_inst->prevScale+1));
}
/* less than 30 dB, use only noise */
if (energy < (WEBRTC_SPL_SHIFT_W32(((WebRtc_Word32)iLBCdec_inst->blockl*900),-(iLBCdec_inst->prevScale+1)))) {
energy = 0;
for (i=0; i<iLBCdec_inst->blockl; i++) {
PLCresidual[i] = randvec[i];
}
}
/* use the old LPC */
WEBRTC_SPL_MEMCPY_W16(PLClpc, (*iLBCdec_inst).prevLpc, LPC_FILTERORDER+1);
/* Update state in case there are multiple frame losses */
iLBCdec_inst->prevLag = lag;
iLBCdec_inst->perSquare = max_perSquare;
}
/* no packet loss, copy input */
else {
WEBRTC_SPL_MEMCPY_W16(PLCresidual, decresidual, iLBCdec_inst->blockl);
WEBRTC_SPL_MEMCPY_W16(PLClpc, lpc, (LPC_FILTERORDER+1));
iLBCdec_inst->consPLICount = 0;
}
/* update state */
iLBCdec_inst->prevPLI = PLI;
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevLpc, PLClpc, (LPC_FILTERORDER+1));
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevResidual, PLCresidual, iLBCdec_inst->blockl);
return;
}

View File

@@ -0,0 +1,41 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_DoThePlc.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Packet loss concealment routine. Conceals a residual signal
* and LP parameters. If no packet loss, update state.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_DoThePlc(
WebRtc_Word16 *PLCresidual, /* (o) concealed residual */
WebRtc_Word16 *PLClpc, /* (o) concealed LP parameters */
WebRtc_Word16 PLI, /* (i) packet loss indicator
0 - no PL, 1 = PL */
WebRtc_Word16 *decresidual, /* (i) decoded residual */
WebRtc_Word16 *lpc, /* (i) decoded LPC (only used for no PL) */
WebRtc_Word16 inlag, /* (i) pitch lag */
iLBC_Dec_Inst_t *iLBCdec_inst
/* (i/o) decoder instance */
);
#endif

View File

@@ -0,0 +1,518 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Encode.c
******************************************************************/
#include "defines.h"
#include "lpc_encode.h"
#include "frame_classify.h"
#include "state_search.h"
#include "state_construct.h"
#include "constants.h"
#include "cb_search.h"
#include "cb_construct.h"
#include "index_conv_enc.h"
#include "pack_bits.h"
#include "hp_input.h"
#ifdef SPLIT_10MS
#include "unpack_bits.h"
#include "index_conv_dec.h"
#endif
#ifndef WEBRTC_BIG_ENDIAN
#include "swap_bytes.h"
#endif
/*----------------------------------------------------------------*
* main encoder function
*---------------------------------------------------------------*/
void WebRtcIlbcfix_EncodeImpl(
WebRtc_UWord16 *bytes, /* (o) encoded data bits iLBC */
WebRtc_Word16 *block, /* (i) speech vector to encode */
iLBC_Enc_Inst_t *iLBCenc_inst /* (i/o) the general encoder
state */
){
int n, meml_gotten, Nfor, Nback;
WebRtc_Word16 diff, start_pos;
int index;
int subcount, subframe;
WebRtc_Word16 start_count, end_count;
WebRtc_Word16 *residual;
WebRtc_Word32 en1, en2;
WebRtc_Word16 scale, max;
WebRtc_Word16 *syntdenum;
WebRtc_Word16 *decresidual;
WebRtc_Word16 *reverseResidual;
WebRtc_Word16 *reverseDecresidual;
/* Stack based */
WebRtc_Word16 weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
WebRtc_Word16 dataVec[BLOCKL_MAX + LPC_FILTERORDER];
WebRtc_Word16 memVec[CB_MEML+CB_FILTERLEN];
WebRtc_Word16 bitsMemory[sizeof(iLBC_bits)/sizeof(WebRtc_Word16)];
iLBC_bits *iLBCbits_inst = (iLBC_bits*)bitsMemory;
#ifdef SPLIT_10MS
WebRtc_Word16 *weightdenumbuf = iLBCenc_inst->weightdenumbuf;
WebRtc_Word16 last_bit;
#endif
WebRtc_Word16 *data = &dataVec[LPC_FILTERORDER];
WebRtc_Word16 *mem = &memVec[CB_HALFFILTERLEN];
/* Reuse som buffers to save stack memory */
residual = &iLBCenc_inst->lpc_buffer[LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl];
syntdenum = mem; /* syntdenum[(LPC_FILTERORDER + 1)*NSUB_MAX] and mem are used non overlapping in the code */
decresidual = residual; /* Already encoded residual is overwritten by the decoded version */
reverseResidual = data; /* data and reverseResidual are used non overlapping in the code */
reverseDecresidual = reverseResidual; /* Already encoded residual is overwritten by the decoded version */
#ifdef SPLIT_10MS
WebRtcSpl_MemSetW16 ( (WebRtc_Word16 *) iLBCbits_inst, 0,
(WebRtc_Word16) (sizeof(iLBC_bits) / sizeof(WebRtc_Word16)) );
start_pos = iLBCenc_inst->start_pos;
diff = iLBCenc_inst->diff;
if (iLBCenc_inst->section != 0){
WEBRTC_SPL_MEMCPY_W16 (weightdenum, weightdenumbuf,
SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
/* Un-Packetize the frame into parameters */
last_bit = WebRtcIlbcfix_UnpackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
if (last_bit)
return;
/* adjust index */
WebRtcIlbcfix_IndexConvDec (iLBCbits_inst->cb_index);
if (iLBCenc_inst->section == 1){
/* Save first 80 samples of a 160/240 sample frame for 20/30msec */
WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples, block, 80);
}
else{ // iLBCenc_inst->section == 2 AND mode = 30ms
/* Save second 80 samples of a 240 sample frame for 30msec */
WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples + 80, block, 80);
}
}
else{ // iLBCenc_inst->section == 0
/* form a complete frame of 160/240 for 20msec/30msec mode */
WEBRTC_SPL_MEMCPY_W16 (data + (iLBCenc_inst->mode * 8) - 80, block, 80);
WEBRTC_SPL_MEMCPY_W16 (data, iLBCenc_inst->past_samples,
(iLBCenc_inst->mode * 8) - 80);
iLBCenc_inst->Nfor_flag = 0;
iLBCenc_inst->Nback_flag = 0;
#else
/* copy input block to data*/
WEBRTC_SPL_MEMCPY_W16(data,block,iLBCenc_inst->blockl);
#endif
/* high pass filtering of input signal and scale down the residual (*0.5) */
WebRtcIlbcfix_HpInput(data, (WebRtc_Word16*)WebRtcIlbcfix_kHpInCoefs,
iLBCenc_inst->hpimemy, iLBCenc_inst->hpimemx,
iLBCenc_inst->blockl);
/* LPC of hp filtered input data */
WebRtcIlbcfix_LpcEncode(syntdenum, weightdenum, iLBCbits_inst->lsf, data,
iLBCenc_inst);
/* Set up state */
WEBRTC_SPL_MEMCPY_W16(dataVec, iLBCenc_inst->anaMem, LPC_FILTERORDER);
/* inverse filter to get residual */
for (n=0; n<iLBCenc_inst->nsub; n++ ) {
WebRtcSpl_FilterMAFastQ12(
&data[n*SUBL], &residual[n*SUBL],
&syntdenum[n*(LPC_FILTERORDER+1)],
LPC_FILTERORDER+1, SUBL);
}
/* Copy the state for next frame */
WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->anaMem, &data[iLBCenc_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
/* find state location */
iLBCbits_inst->startIdx = WebRtcIlbcfix_FrameClassify(iLBCenc_inst,residual);
/* check if state should be in first or last part of the
two subframes */
index = (iLBCbits_inst->startIdx-1)*SUBL;
max=WebRtcSpl_MaxAbsValueW16(&residual[index], 2*SUBL);
scale=WebRtcSpl_GetSizeInBits(WEBRTC_SPL_MUL_16_16(max,max));
/* Scale to maximum 25 bits so that the MAC won't cause overflow */
scale = scale - 25;
if(scale < 0) {
scale = 0;
}
diff = STATE_LEN - iLBCenc_inst->state_short_len;
en1=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
iLBCenc_inst->state_short_len, scale);
index += diff;
en2=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
iLBCenc_inst->state_short_len, scale);
if (en1 > en2) {
iLBCbits_inst->state_first = 1;
start_pos = (iLBCbits_inst->startIdx-1)*SUBL;
} else {
iLBCbits_inst->state_first = 0;
start_pos = (iLBCbits_inst->startIdx-1)*SUBL + diff;
}
/* scalar quantization of state */
WebRtcIlbcfix_StateSearch(iLBCenc_inst, iLBCbits_inst, &residual[start_pos],
&syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
&weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)]);
WebRtcIlbcfix_StateConstruct(iLBCbits_inst->idxForMax, iLBCbits_inst->idxVec,
&syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
&decresidual[start_pos], iLBCenc_inst->state_short_len
);
/* predictive quantization in state */
if (iLBCbits_inst->state_first) { /* put adaptive part in the end */
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-iLBCenc_inst->state_short_len));
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
decresidual+start_pos, iLBCenc_inst->state_short_len);
/* encode subframes */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
&residual[start_pos+iLBCenc_inst->state_short_len],
mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
&weightdenum[iLBCbits_inst->startIdx*(LPC_FILTERORDER+1)], 0);
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(&decresidual[start_pos+iLBCenc_inst->state_short_len],
iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL,
diff
);
}
else { /* put adaptive part in the beginning */
/* create reversed vectors for prediction */
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[diff-1],
&residual[(iLBCbits_inst->startIdx+1)*SUBL-STATE_LEN], diff);
/* setup memory */
meml_gotten = iLBCenc_inst->state_short_len;
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-iLBCenc_inst->state_short_len));
/* encode subframes */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
reverseResidual, mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
&weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
0);
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(reverseDecresidual,
iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL,
diff
);
/* get decoded residual from reversed vector */
WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1], reverseDecresidual, diff);
}
#ifdef SPLIT_10MS
iLBCenc_inst->start_pos = start_pos;
iLBCenc_inst->diff = diff;
iLBCenc_inst->section++;
/* adjust index */
WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
/* Packetize the parameters into the frame */
WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
return;
}
#endif
/* forward prediction of subframes */
Nfor = iLBCenc_inst->nsub-iLBCbits_inst->startIdx-1;
/* counter for predicted subframes */
#ifdef SPLIT_10MS
if (iLBCenc_inst->mode == 20)
{
subcount = 1;
}
if (iLBCenc_inst->mode == 30)
{
if (iLBCenc_inst->section == 1)
{
subcount = 1;
}
if (iLBCenc_inst->section == 2)
{
subcount = 3;
}
}
#else
subcount=1;
#endif
if( Nfor > 0 ){
/* setup memory */
WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
decresidual+(iLBCbits_inst->startIdx-1)*SUBL, STATE_LEN);
#ifdef SPLIT_10MS
if (iLBCenc_inst->Nfor_flag > 0)
{
for (subframe = 0; subframe < WEBRTC_SPL_MIN (Nfor, 2); subframe++)
{
/* update memory */
WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
&decresidual[(iLBCbits_inst->startIdx + 1 +
subframe) * SUBL], SUBL);
}
}
iLBCenc_inst->Nfor_flag++;
if (iLBCenc_inst->mode == 20)
{
start_count = 0;
end_count = Nfor;
}
if (iLBCenc_inst->mode == 30)
{
if (iLBCenc_inst->section == 1)
{
start_count = 0;
end_count = WEBRTC_SPL_MIN (Nfor, 2);
}
if (iLBCenc_inst->section == 2)
{
start_count = WEBRTC_SPL_MIN (Nfor, 2);
end_count = Nfor;
}
}
#else
start_count = 0;
end_count = (WebRtc_Word16)Nfor;
#endif
/* loop over subframes to encode */
for (subframe = start_count; subframe < end_count; subframe++){
/* encode subframe */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
iLBCbits_inst->gain_index+subcount*CB_NSTAGES,
&residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
(WebRtc_Word16)subcount);
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(&decresidual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
iLBCbits_inst->gain_index+subcount*CB_NSTAGES,
mem, MEM_LF_TBL,
SUBL
);
/* update memory */
WEBRTC_SPL_MEMMOVE_W16(mem, mem+SUBL, (CB_MEML-SUBL));
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
&decresidual[(iLBCbits_inst->startIdx+1+subframe)*SUBL], SUBL);
subcount++;
}
}
#ifdef SPLIT_10MS
if ((iLBCenc_inst->section == 1) &&
(iLBCenc_inst->mode == 30) && (Nfor > 0) && (end_count == 2))
{
iLBCenc_inst->section++;
/* adjust index */
WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
/* Packetize the parameters into the frame */
WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
return;
}
#endif
/* backward prediction of subframes */
Nback = iLBCbits_inst->startIdx-1;
if( Nback > 0 ){
/* create reverse order vectors
(The decresidual does not need to be copied since it is
contained in the same vector as the residual)
*/
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
/* setup memory */
meml_gotten = SUBL*(iLBCenc_inst->nsub+1-iLBCbits_inst->startIdx);
if( meml_gotten > CB_MEML ) {
meml_gotten=CB_MEML;
}
WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
WebRtcSpl_MemSetW16(mem, 0, (WebRtc_Word16)(CB_MEML-meml_gotten));
#ifdef SPLIT_10MS
if (iLBCenc_inst->Nback_flag > 0)
{
for (subframe = 0; subframe < WEBRTC_SPL_MAX (2 - Nfor, 0); subframe++)
{
/* update memory */
WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
&reverseDecresidual[subframe * SUBL], SUBL);
}
}
iLBCenc_inst->Nback_flag++;
if (iLBCenc_inst->mode == 20)
{
start_count = 0;
end_count = Nback;
}
if (iLBCenc_inst->mode == 30)
{
if (iLBCenc_inst->section == 1)
{
start_count = 0;
end_count = WEBRTC_SPL_MAX (2 - Nfor, 0);
}
if (iLBCenc_inst->section == 2)
{
start_count = WEBRTC_SPL_MAX (2 - Nfor, 0);
end_count = Nback;
}
}
#else
start_count = 0;
end_count = (WebRtc_Word16)Nback;
#endif
/* loop over subframes to encode */
for (subframe = start_count; subframe < end_count; subframe++){
/* encode subframe */
WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
(WebRtc_Word16)subcount);
/* construct decoded vector */
WebRtcIlbcfix_CbConstruct(&reverseDecresidual[subframe*SUBL],
iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
iLBCbits_inst->gain_index+subcount*CB_NSTAGES,
mem, MEM_LF_TBL, SUBL
);
/* update memory */
WEBRTC_SPL_MEMMOVE_W16(mem, mem+SUBL, (CB_MEML-SUBL));
WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
&reverseDecresidual[subframe*SUBL], SUBL);
subcount++;
}
/* get decoded residual from reversed vector */
WebRtcSpl_MemCpyReversedOrder(&decresidual[SUBL*Nback-1], reverseDecresidual, SUBL*Nback);
}
/* end encoding part */
/* adjust index */
WebRtcIlbcfix_IndexConvEnc(iLBCbits_inst->cb_index);
/* Packetize the parameters into the frame */
#ifdef SPLIT_10MS
if( (iLBCenc_inst->mode==30) && (iLBCenc_inst->section==1) ){
WebRtcIlbcfix_PackBits(iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
}
else{
WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
}
#else
WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
#endif
#ifndef WEBRTC_BIG_ENDIAN
/* Swap bytes for LITTLE ENDIAN since the packbits()
function assumes BIG_ENDIAN machine */
#ifdef SPLIT_10MS
if (( (iLBCenc_inst->section == 1) && (iLBCenc_inst->mode == 20) ) ||
( (iLBCenc_inst->section == 2) && (iLBCenc_inst->mode == 30) )){
WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words);
}
#else
WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words);
#endif
#endif
#ifdef SPLIT_10MS
if (subcount == (iLBCenc_inst->nsub - 1))
{
iLBCenc_inst->section = 0;
}
else
{
iLBCenc_inst->section++;
WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
}
#endif
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Encode.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
#include "defines.h"
/*----------------------------------------------------------------*
* main encoder function
*---------------------------------------------------------------*/
void WebRtcIlbcfix_EncodeImpl(
WebRtc_UWord16 *bytes, /* (o) encoded data bits iLBC */
WebRtc_Word16 *block, /* (i) speech vector to encode */
iLBC_Enc_Inst_t *iLBCenc_inst /* (i/o) the general encoder
state */
);
#endif

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnergyInverse.c
******************************************************************/
/* Inverses the in vector in into Q29 domain */
#include "energy_inverse.h"
void WebRtcIlbcfix_EnergyInverse(
WebRtc_Word16 *energy, /* (i/o) Energy and inverse
energy (in Q29) */
int noOfEnergies) /* (i) The length of the energy
vector */
{
WebRtc_Word32 Nom=(WebRtc_Word32)0x1FFFFFFF;
WebRtc_Word16 *energyPtr;
int i;
/* Set the minimum energy value to 16384 to avoid overflow */
energyPtr=energy;
for (i=0; i<noOfEnergies; i++) {
(*energyPtr)=WEBRTC_SPL_MAX((*energyPtr),16384);
energyPtr++;
}
/* Calculate inverse energy in Q29 */
energyPtr=energy;
for (i=0; i<noOfEnergies; i++) {
(*energyPtr) = (WebRtc_Word16)WebRtcSpl_DivW32W16(Nom, (*energyPtr));
energyPtr++;
}
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnergyInverse.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
#include "defines.h"
/* Inverses the in vector in into Q29 domain */
void WebRtcIlbcfix_EnergyInverse(
WebRtc_Word16 *energy, /* (i/o) Energy and inverse
energy (in Q29) */
int noOfEnergies); /* (i) The length of the energy
vector */
#endif

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnhUpsample.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* upsample finite array assuming zeros outside bounds
*---------------------------------------------------------------*/
void WebRtcIlbcfix_EnhUpsample(
WebRtc_Word32 *useq1, /* (o) upsampled output sequence */
WebRtc_Word16 *seq1 /* (i) unupsampled sequence */
){
int j;
WebRtc_Word32 *pu1, *pu11;
WebRtc_Word16 *ps, *w16tmp;
const WebRtc_Word16 *pp;
/* filtering: filter overhangs left side of sequence */
pu1=useq1;
for (j=0;j<ENH_UPS0; j++) {
pu11=pu1;
/* i = 2 */
pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
ps=seq1+2;
(*pu11) = WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
pu11+=ENH_UPS0;
/* i = 3 */
pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
ps=seq1+3;
(*pu11) = WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
pu11+=ENH_UPS0;
/* i = 4 */
pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
ps=seq1+4;
(*pu11) = WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--,*pp++);
pu1++;
}
/* filtering: simple convolution=inner products
(not needed since the sequence is so short)
*/
/* filtering: filter overhangs right side of sequence */
/* Code with loops, which is equivivalent to the expanded version below
filterlength = 5;
hf1 = 2;
for(j=0;j<ENH_UPS0; j++){
pu = useq1 + (filterlength-hfl)*ENH_UPS0 + j;
for(i=1; i<=hfl; i++){
*pu=0;
pp = polyp[j]+i;
ps = seq1+dim1-1;
for(k=0;k<filterlength-i;k++) {
*pu += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
}
pu+=ENH_UPS0;
}
}
*/
pu1 = useq1 + 12;
w16tmp = seq1+4;
for (j=0;j<ENH_UPS0; j++) {
pu11 = pu1;
/* i = 1 */
pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+2;
ps = w16tmp;
(*pu11) = WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
pu11+=ENH_UPS0;
/* i = 2 */
pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+3;
ps = w16tmp;
(*pu11) = WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
(*pu11) += WEBRTC_SPL_MUL_16_16(*ps--, *pp++);
pu11+=ENH_UPS0;
pu1++;
}
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnhUpsample.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
#include "defines.h"
/*----------------------------------------------------------------*
* upsample finite array assuming zeros outside bounds
*---------------------------------------------------------------*/
void WebRtcIlbcfix_EnhUpsample(
WebRtc_Word32 *useq1, /* (o) upsampled output sequence */
WebRtc_Word16 *seq1 /* (i) unupsampled sequence */
);
#endif

View File

@@ -0,0 +1,52 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Enhancer.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "get_sync_seq.h"
#include "smooth.h"
/*----------------------------------------------------------------*
* perform enhancement on idata+centerStartPos through
* idata+centerStartPos+ENH_BLOCKL-1
*---------------------------------------------------------------*/
void WebRtcIlbcfix_Enhancer(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i) Decoder state */
WebRtc_Word16 *odata, /* (o) smoothed block, dimension blockl */
WebRtc_Word16 *idata, /* (i) data buffer used for enhancing */
WebRtc_Word16 idatal, /* (i) dimension idata */
WebRtc_Word16 centerStartPos, /* (i) first sample current block within idata */
WebRtc_Word16 *period, /* (i) pitch period array (pitch bward-in time) */
WebRtc_Word16 *plocs, /* (i) locations where period array values valid */
WebRtc_Word16 periodl /* (i) dimension of period and plocs */
){
/* Stack based */
WebRtc_Word16 surround[ENH_BLOCKL];
WebRtcSpl_MemSetW16(surround, 0, ENH_BLOCKL);
/* get said second sequence of segments */
WebRtcIlbcfix_GetSyncSeq(iLBCdec_inst, idata,idatal,centerStartPos,period,plocs,periodl,ENH_HL,surround);
/* compute the smoothed output from said second sequence */
WebRtcIlbcfix_Smooth(odata, idata+centerStartPos, surround);
}

View File

@@ -0,0 +1,41 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_Enhancer.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
#include "defines.h"
/*----------------------------------------------------------------*
* perform enhancement on idata+centerStartPos through
* idata+centerStartPos+ENH_BLOCKL-1
*---------------------------------------------------------------*/
void WebRtcIlbcfix_Enhancer(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i) Decoder state */
WebRtc_Word16 *odata, /* (o) smoothed block, dimension blockl */
WebRtc_Word16 *idata, /* (i) data buffer used for enhancing */
WebRtc_Word16 idatal, /* (i) dimension idata */
WebRtc_Word16 centerStartPos, /* (i) first sample current block within idata */
WebRtc_Word16 *period, /* (i) pitch period array (pitch bward-in time) */
WebRtc_Word16 *plocs, /* (i) locations where period array values valid */
WebRtc_Word16 periodl /* (i) dimension of period and plocs */
);
#endif

View File

@@ -0,0 +1,343 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnhancerInterface.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "xcorr_coef.h"
#include "enhancer.h"
#include "hp_output.h"
/*----------------------------------------------------------------*
* interface for enhancer
*---------------------------------------------------------------*/
int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
WebRtc_Word16 *out, /* (o) enhanced signal */
WebRtc_Word16 *in, /* (i) unenhanced signal */
iLBC_Dec_Inst_t *iLBCdec_inst /* (i) buffers etc */
){
int iblock;
int lag=20, tlag=20;
int inLen=iLBCdec_inst->blockl+120;
WebRtc_Word16 scale, scale1, plc_blockl;
WebRtc_Word16 *enh_buf, *enh_period;
WebRtc_Word32 tmp1, tmp2, max, new_blocks;
WebRtc_Word16 *enh_bufPtr1;
int i, k;
WebRtc_Word16 EnChange;
WebRtc_Word16 SqrtEnChange;
WebRtc_Word16 inc;
WebRtc_Word16 win;
WebRtc_Word16 *tmpW16ptr;
WebRtc_Word16 startPos;
WebRtc_Word16 *plc_pred;
WebRtc_Word16 *target, *regressor;
WebRtc_Word16 max16;
int shifts;
WebRtc_Word32 ener;
WebRtc_Word16 enerSh;
WebRtc_Word16 corrSh;
WebRtc_Word16 ind, sh;
WebRtc_Word16 start, stop;
/* Stack based */
WebRtc_Word16 totsh[3];
WebRtc_Word16 downsampled[(BLOCKL_MAX+120)>>1]; /* length 180 */
WebRtc_Word32 corr32[50];
WebRtc_Word32 corrmax[3];
WebRtc_Word16 corr16[3];
WebRtc_Word16 en16[3];
WebRtc_Word16 lagmax[3];
plc_pred = downsampled; /* Reuse memory since plc_pred[ENH_BLOCKL] and downsampled are non overlapping */
enh_buf=iLBCdec_inst->enh_buf;
enh_period=iLBCdec_inst->enh_period;
/* Copy in the new data into the enhancer buffer */
WEBRTC_SPL_MEMMOVE_W16(enh_buf, &enh_buf[iLBCdec_inst->blockl],
ENH_BUFL-iLBCdec_inst->blockl);
WEBRTC_SPL_MEMCPY_W16(&enh_buf[ENH_BUFL-iLBCdec_inst->blockl], in,
iLBCdec_inst->blockl);
/* Set variables that are dependent on frame size */
if (iLBCdec_inst->mode==30) {
plc_blockl=ENH_BLOCKL;
new_blocks=3;
startPos=320; /* Start position for enhancement (640-new_blocks*ENH_BLOCKL-80) */
} else {
plc_blockl=40;
new_blocks=2;
startPos=440; /* Start position for enhancement (640-new_blocks*ENH_BLOCKL-40) */
}
/* Update the pitch prediction for each enhancer block, move the old ones */
WEBRTC_SPL_MEMMOVE_W16(enh_period, &enh_period[new_blocks], (ENH_NBLOCKS_TOT-new_blocks));
k=WebRtcSpl_DownsampleFast(
enh_buf+ENH_BUFL-inLen, /* Input samples */
(WebRtc_Word16)(inLen+ENH_BUFL_FILTEROVERHEAD),
downsampled,
(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W16(inLen, 1),
(WebRtc_Word16*)WebRtcIlbcfix_kLpFiltCoefs, /* Coefficients in Q12 */
FILTERORDER_DS_PLUS1, /* Length of filter (order-1) */
FACTOR_DS,
DELAY_DS);
/* Estimate the pitch in the down sampled domain. */
for(iblock = 0; iblock<new_blocks; iblock++){
/* references */
i=60+WEBRTC_SPL_MUL_16_16(iblock,ENH_BLOCKL_HALF);
target=downsampled+i;
regressor=downsampled+i-10;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50], (WebRtc_Word16)(ENH_BLOCKL_HALF+50-1));
shifts = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_MUL_16_16(max16, max16)) - 25;
shifts = WEBRTC_SPL_MAX(0, shifts);
/* compute cross correlation */
WebRtcSpl_CrossCorrelation(corr32, target, regressor,
ENH_BLOCKL_HALF, 50, (WebRtc_Word16)shifts, -1);
/* Find 3 highest correlations that should be compared for the
highest (corr*corr)/ener */
for (i=0;i<2;i++) {
lagmax[i] = WebRtcSpl_MaxIndexW32(corr32, 50);
corrmax[i] = corr32[lagmax[i]];
start = lagmax[i] - 2;
stop = lagmax[i] + 2;
start = WEBRTC_SPL_MAX(0, start);
stop = WEBRTC_SPL_MIN(49, stop);
for (k=start; k<=stop; k++) {
corr32[k] = 0;
}
}
lagmax[2] = WebRtcSpl_MaxIndexW32(corr32, 50);
corrmax[2] = corr32[lagmax[2]];
/* Calculate normalized corr^2 and ener */
for (i=0;i<3;i++) {
corrSh = 15-WebRtcSpl_GetSizeInBits(corrmax[i]);
ener = WebRtcSpl_DotProductWithScale(&regressor[-lagmax[i]], &regressor[-lagmax[i]], ENH_BLOCKL_HALF, shifts);
enerSh = 15-WebRtcSpl_GetSizeInBits(ener);
corr16[i] = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(corrmax[i], corrSh);
corr16[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(corr16[i], corr16[i], 16);
en16[i] = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(ener, enerSh);
totsh[i] = enerSh - WEBRTC_SPL_LSHIFT_W32(corrSh, 1);
}
/* Compare lagmax[0..3] for the (corr^2)/ener criteria */
ind = 0;
for (i=1; i<3; i++) {
if (totsh[ind] > totsh[i]) {
sh = WEBRTC_SPL_MIN(31, totsh[ind]-totsh[i]);
if ( WEBRTC_SPL_MUL_16_16(corr16[ind], en16[i]) < WEBRTC_SPL_MUL_16_16_RSFT(corr16[i], en16[ind], sh)) {
ind = i;
}
} else {
sh = WEBRTC_SPL_MIN(31, totsh[i]-totsh[ind]);
if (WEBRTC_SPL_MUL_16_16_RSFT(corr16[ind], en16[i], sh) < WEBRTC_SPL_MUL_16_16(corr16[i], en16[ind])) {
ind = i;
}
}
}
lag = lagmax[ind] + 10;
/* Store the estimated lag in the non-downsampled domain */
enh_period[ENH_NBLOCKS_TOT-new_blocks+iblock] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(lag, 8);
/* Store the estimated lag for backward PLC */
if (iLBCdec_inst->prev_enh_pl==1) {
if (!iblock) {
tlag = WEBRTC_SPL_MUL_16_16(lag, 2);
}
} else {
if (iblock==1) {
tlag = WEBRTC_SPL_MUL_16_16(lag, 2);
}
}
lag = WEBRTC_SPL_MUL_16_16(lag, 2);
}
if ((iLBCdec_inst->prev_enh_pl==1)||(iLBCdec_inst->prev_enh_pl==2)) {
/* Calculate the best lag of the new frame
This is used to interpolate backwards and mix with the PLC'd data
*/
/* references */
target=in;
regressor=in+tlag-1;
/* scaling */
max16=WebRtcSpl_MaxAbsValueW16(regressor, (WebRtc_Word16)(plc_blockl+3-1));
if (max16>5000)
shifts=2;
else
shifts=0;
/* compute cross correlation */
WebRtcSpl_CrossCorrelation(corr32, target, regressor,
plc_blockl, 3, (WebRtc_Word16)shifts, 1);
/* find lag */
lag=WebRtcSpl_MaxIndexW32(corr32, 3);
lag+=tlag-1;
/* Copy the backward PLC to plc_pred */
if (iLBCdec_inst->prev_enh_pl==1) {
if (lag>plc_blockl) {
WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-plc_blockl], plc_blockl);
} else {
WEBRTC_SPL_MEMCPY_W16(&plc_pred[plc_blockl-lag], in, lag);
WEBRTC_SPL_MEMCPY_W16(plc_pred, &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl+lag], (plc_blockl-lag));
}
} else {
int pos;
pos = plc_blockl;
while (lag<pos) {
WEBRTC_SPL_MEMCPY_W16(&plc_pred[pos-lag], in, lag);
pos = pos - lag;
}
WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-pos], pos);
}
if (iLBCdec_inst->prev_enh_pl==1) {
/* limit energy change
if energy in backward PLC is more than 4 times higher than the forward PLC,
then reduce the energy in the backward PLC vector:
sample 1...len-16 set energy of the to 4 times forward PLC
sample len-15..len interpolate between 4 times fw PLC and bw PLC energy
Note: Compared to floating point code there is a slight change,
the window is 16 samples long instead of 10 samples to simplify the calculations
*/
max=WebRtcSpl_MaxAbsValueW16(&enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl], plc_blockl);
max16=WebRtcSpl_MaxAbsValueW16(plc_pred, plc_blockl);
max = WEBRTC_SPL_MAX(max, max16);
scale=22-(WebRtc_Word16)WebRtcSpl_NormW32(max);
scale=WEBRTC_SPL_MAX(scale,0);
tmp2 = WebRtcSpl_DotProductWithScale(&enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl], &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl], plc_blockl, scale);
tmp1 = WebRtcSpl_DotProductWithScale(plc_pred, plc_pred, plc_blockl, scale);
/* Check the energy difference */
if ((tmp1>0)&&((tmp1>>2)>tmp2)) {
/* EnChange is now guaranteed to be <0.5
Calculate EnChange=tmp2/tmp1 in Q16
*/
scale1=(WebRtc_Word16)WebRtcSpl_NormW32(tmp1);
tmp1=WEBRTC_SPL_SHIFT_W32(tmp1, (scale1-16)); /* using 15 bits */
tmp2=WEBRTC_SPL_SHIFT_W32(tmp2, (scale1));
EnChange = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp2, (WebRtc_Word16)tmp1);
/* Calculate the Sqrt of the energy in Q15 ((14+16)/2) */
SqrtEnChange = (WebRtc_Word16)WebRtcSpl_Sqrt(WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)EnChange, 14));
/* Multiply first part of vector with 2*SqrtEnChange */
WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, (WebRtc_Word16)(plc_blockl-16), 14);
/* Calculate increase parameter for window part (16 last samples) */
inc=(2048-WEBRTC_SPL_RSHIFT_W16(SqrtEnChange, 3)); /* (1-2*SqrtEnChange)/16 in Q15 */
win=0;
tmpW16ptr=&plc_pred[plc_blockl-16];
for (i=16;i>0;i--) {
(*tmpW16ptr)=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((*tmpW16ptr),
(SqrtEnChange+(win>>1)), 14); /* multiply by (2.0*SqrtEnChange+win) */
win += inc;
tmpW16ptr++;
}
}
/* Make the linear interpolation between the forward PLC'd data
and the backward PLC'd data (from the new frame)
*/
if (plc_blockl==40) {
inc=400; /* 1/41 in Q14 */
} else { /* plc_blockl==80 */
inc=202; /* 1/81 in Q14 */
}
win=0;
enh_bufPtr1=&enh_buf[ENH_BUFL-1-iLBCdec_inst->blockl];
for (i=0; i<plc_blockl; i++) {
win+=inc;
*enh_bufPtr1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((*enh_bufPtr1), win, 14);
*enh_bufPtr1 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((16384-win), plc_pred[plc_blockl-1-i], 14);
enh_bufPtr1--;
}
} else {
WebRtc_Word16 *synt = &downsampled[LPC_FILTERORDER];
enh_bufPtr1=&enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl];
WEBRTC_SPL_MEMCPY_W16(enh_bufPtr1, plc_pred, plc_blockl);
/* Clear fileter memory */
WebRtcSpl_MemSetW16(iLBCdec_inst->syntMem, 0, LPC_FILTERORDER);
WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemy, 0, 4);
WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemx, 0, 2);
/* Initialize filter memory by filtering through 2 lags */
WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)], LPC_FILTERORDER+1, (WebRtc_Word16)lag);
WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER], LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (WebRtc_Word16*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
(WebRtc_Word16)lag);
WebRtcSpl_FilterARFastQ12(
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[(iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)], LPC_FILTERORDER+1, (WebRtc_Word16)lag);
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER], LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (WebRtc_Word16*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
(WebRtc_Word16)lag);
}
}
/* Perform enhancement block by block */
for (iblock = 0; iblock<new_blocks; iblock++) {
WebRtcIlbcfix_Enhancer(iLBCdec_inst, out+WEBRTC_SPL_MUL_16_16(iblock, ENH_BLOCKL), enh_buf,
ENH_BUFL, (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16(iblock, ENH_BLOCKL)+startPos),
enh_period, (WebRtc_Word16*)WebRtcIlbcfix_kEnhPlocs, ENH_NBLOCKS_TOT);
}
return (lag);
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_EnhancerInterface.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
#include "defines.h"
/*----------------------------------------------------------------*
* interface for enhancer
*---------------------------------------------------------------*/
int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
WebRtc_Word16 *out, /* (o) enhanced signal */
WebRtc_Word16 *in, /* (i) unenhanced signal */
iLBC_Dec_Inst_t *iLBCdec_inst /* (i) buffers etc */
);
#endif

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_FilteredCbVecs.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* Construct an additional codebook vector by filtering the
* initial codebook buffer. This vector is then used to expand
* the codebook with an additional section.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_FilteredCbVecs(
WebRtc_Word16 *cbvectors, /* (o) Codebook vector for the higher section */
WebRtc_Word16 *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
int lMem, /* (i) Length of codebook memory */
WebRtc_Word16 samples /* (i) Number of samples to filter */
) {
/* Set up the memory, start with zero state */
WebRtcSpl_MemSetW16(CBmem+lMem, 0, CB_HALFFILTERLEN);
WebRtcSpl_MemSetW16(CBmem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
WebRtcSpl_MemSetW16(cbvectors, 0, lMem-samples);
/* Filter to obtain the filtered CB memory */
WebRtcSpl_FilterMAFastQ12(
CBmem+CB_HALFFILTERLEN+lMem-samples, cbvectors+lMem-samples,
(WebRtc_Word16*)WebRtcIlbcfix_kCbFiltersRev, CB_FILTERLEN, samples);
return;
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_FilteredCbVecs.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Construct an additional codebook vector by filtering the
* initial codebook buffer. This vector is then used to expand
* the codebook with an additional section.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_FilteredCbVecs(
WebRtc_Word16 *cbvectors, /* (o) Codebook vector for the higher section */
WebRtc_Word16 *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
int lMem, /* (i) Length of codebook memory */
WebRtc_Word16 samples /* (i) Number of samples to filter */
);
#endif

View File

@@ -0,0 +1,88 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_FrameClassify.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* Classification of subframes to localize start state
*---------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i/o) the encoder state structure */
WebRtc_Word16 *residualFIX /* (i) lpc residual signal */
){
WebRtc_Word16 max, scale;
WebRtc_Word32 ssqEn[NSUB_MAX-1];
WebRtc_Word16 *ssqPtr;
WebRtc_Word32 *seqEnPtr;
WebRtc_Word32 maxW32;
WebRtc_Word16 scale1;
WebRtc_Word16 pos;
int n;
/*
Calculate the energy of each of the 80 sample blocks
in the draft the 4 first and last samples are windowed with 1/5...4/5
and 4/5...1/5 respectively. To simplify for the fixpoint we have changed
this to 0 0 1 1 and 1 1 0 0
*/
max = WebRtcSpl_MaxAbsValueW16(residualFIX, iLBCenc_inst->blockl);
scale=WebRtcSpl_GetSizeInBits(WEBRTC_SPL_MUL_16_16(max,max));
/* Scale to maximum 24 bits so that it won't overflow for 76 samples */
scale = scale-24;
scale1 = WEBRTC_SPL_MAX(0, scale);
/* Calculate energies */
ssqPtr=residualFIX + 2;
seqEnPtr=ssqEn;
for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
(*seqEnPtr) = WebRtcSpl_DotProductWithScale(ssqPtr, ssqPtr, 76, scale1);
ssqPtr += 40;
seqEnPtr++;
}
/* Scale to maximum 20 bits in order to allow for the 11 bit window */
maxW32 = WebRtcSpl_MaxValueW32(ssqEn, (WebRtc_Word16)(iLBCenc_inst->nsub-1));
scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
scale1 = WEBRTC_SPL_MAX(0, scale);
/* Window each 80 block with the ssqEn_winTbl window to give higher probability for
the blocks in the middle
*/
seqEnPtr=ssqEn;
if (iLBCenc_inst->mode==20) {
ssqPtr=(WebRtc_Word16*)WebRtcIlbcfix_kStartSequenceEnrgWin+1;
} else {
ssqPtr=(WebRtc_Word16*)WebRtcIlbcfix_kStartSequenceEnrgWin;
}
for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
(*seqEnPtr)=WEBRTC_SPL_MUL(((*seqEnPtr)>>scale1), (*ssqPtr));
seqEnPtr++;
ssqPtr++;
}
/* Extract the best choise of start state */
pos = WebRtcSpl_MaxIndexW32(ssqEn, (WebRtc_Word16)(iLBCenc_inst->nsub-1)) + 1;
return(pos);
}

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_FrameClassify.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
WebRtc_Word16 WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
iLBC_Enc_Inst_t *iLBCenc_inst,
/* (i/o) the encoder state structure */
WebRtc_Word16 *residualFIX /* (i) lpc residual signal */
);
#endif

View File

@@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GainDequant.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* decoder for quantized gains in the gain-shape coding of
* residual
*---------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_GainDequant(
/* (o) quantized gain value (Q14) */
WebRtc_Word16 index, /* (i) quantization index */
WebRtc_Word16 maxIn, /* (i) maximum of unquantized gain (Q14) */
WebRtc_Word16 stage /* (i) The stage of the search */
){
WebRtc_Word16 scale;
const WebRtc_Word16 *gain;
/* obtain correct scale factor */
scale=WEBRTC_SPL_ABS_W16(maxIn);
scale = WEBRTC_SPL_MAX(1638, scale); /* if lower than 0.1, set it to 0.1 */
/* select the quantization table and return the decoded value */
gain = WebRtcIlbcfix_kGain[stage];
return((WebRtc_Word16)((WEBRTC_SPL_MUL_16_16(scale, gain[index])+8192)>>14));
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GainDequant.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
#include "defines.h"
/*----------------------------------------------------------------*
* decoder for quantized gains in the gain-shape coding of
* residual
*---------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_GainDequant(
/* (o) quantized gain value (Q14) */
WebRtc_Word16 index, /* (i) quantization index */
WebRtc_Word16 maxIn, /* (i) maximum of unquantized gain (Q14) */
WebRtc_Word16 stage /* (i) The stage of the search */
);
#endif

View File

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GainQuant.c
******************************************************************/
#include "defines.h"
#include "constants.h"
/*----------------------------------------------------------------*
* quantizer for the gain in the gain-shape coding of residual
*---------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
WebRtc_Word16 gain, /* (i) gain value Q14 */
WebRtc_Word16 maxIn, /* (i) maximum of gain value Q14 */
WebRtc_Word16 stage, /* (i) The stage of the search */
WebRtc_Word16 *index /* (o) quantization index */
) {
WebRtc_Word16 scale, returnVal, cblen;
WebRtc_Word32 gainW32, measure1, measure2;
const WebRtc_Word16 *cbPtr, *cb;
int loc, noMoves, noChecks, i;
/* ensure a lower bound (0.1) on the scaling factor */
scale = WEBRTC_SPL_MAX(1638, maxIn);
/* select the quantization table and calculate
the length of the table and the number of
steps in the binary search that are needed */
cb = WebRtcIlbcfix_kGain[stage];
cblen = 32>>stage;
noChecks = 4-stage;
/* Multiply the gain with 2^14 to make the comparison
easier and with higher precision */
gainW32 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)gain, 14);
/* Do a binary search, starting in the middle of the CB
loc - defines the current position in the table
noMoves - defines the number of steps to move in the CB in order
to get next CB location
*/
loc = cblen>>1;
noMoves = loc;
cbPtr = cb + loc; /* Centre of CB */
for (i=noChecks;i>0;i--) {
noMoves>>=1;
measure1=WEBRTC_SPL_MUL_16_16(scale, (*cbPtr));
/* Move up if gain is larger, otherwise move down in table */
measure1 = measure1 - gainW32;
if (0>measure1) {
cbPtr+=noMoves;
loc+=noMoves;
} else {
cbPtr-=noMoves;
loc-=noMoves;
}
}
/* Check which value is the closest one: loc-1, loc or loc+1 */
measure1=WEBRTC_SPL_MUL_16_16(scale, (*cbPtr));
if (gainW32>measure1) {
/* Check against value above loc */
measure2=WEBRTC_SPL_MUL_16_16(scale, (*(cbPtr+1)));
if ((measure2-gainW32)<(gainW32-measure1)) {
loc+=1;
}
} else {
/* Check against value below loc */
measure2=WEBRTC_SPL_MUL_16_16(scale, (*(cbPtr-1)));
if ((gainW32-measure2)<=(measure1-gainW32)) {
loc-=1;
}
}
/* Guard against getting outside the table. The calculation above can give a location
which is one above the maximum value (in very rare cases) */
loc=WEBRTC_SPL_MIN(loc, (cblen-1));
*index=loc;
/* Calculate the quantized gain value (in Q14) */
returnVal=(WebRtc_Word16)((WEBRTC_SPL_MUL_16_16(scale, cb[loc])+8192)>>14);
/* return the quantized value */
return(returnVal);
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GainQuant.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
#include "defines.h"
/*----------------------------------------------------------------*
* quantizer for the gain in the gain-shape coding of residual
*---------------------------------------------------------------*/
WebRtc_Word16 WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
WebRtc_Word16 gain, /* (i) gain value Q14 */
WebRtc_Word16 maxIn, /* (i) maximum of gain value Q14 */
WebRtc_Word16 stage, /* (i) The stage of the search */
WebRtc_Word16 *index /* (o) quantization index */
);
#endif

View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetCbVec.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "create_augmented_vec.h"
/*----------------------------------------------------------------*
* Construct codebook vector for given index.
*---------------------------------------------------------------*/
void WebRtcIlbcfix_GetCbVec(
WebRtc_Word16 *cbvec, /* (o) Constructed codebook vector */
WebRtc_Word16 *mem, /* (i) Codebook buffer */
WebRtc_Word16 index, /* (i) Codebook index */
WebRtc_Word16 lMem, /* (i) Length of codebook buffer */
WebRtc_Word16 cbveclen /* (i) Codebook vector length */
){
WebRtc_Word16 k, base_size;
WebRtc_Word16 lag;
/* Stack based */
WebRtc_Word16 tempbuff2[SUBL+5];
/* Determine size of codebook sections */
base_size=lMem-cbveclen+1;
if (cbveclen==SUBL) {
base_size+=WEBRTC_SPL_RSHIFT_W16(cbveclen,1);
}
/* No filter -> First codebook section */
if (index<lMem-cbveclen+1) {
/* first non-interpolated vectors */
k=index+cbveclen;
/* get vector */
WEBRTC_SPL_MEMCPY_W16(cbvec, mem+lMem-k, cbveclen);
} else if (index < base_size) {
/* Calculate lag */
k=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(2, (index-(lMem-cbveclen+1)))+cbveclen;
lag=WEBRTC_SPL_RSHIFT_W16(k, 1);
WebRtcIlbcfix_CreateAugmentedVec(lag, mem+lMem, cbvec);
}
/* Higher codebbok section based on filtering */
else {
WebRtc_Word16 memIndTest;
/* first non-interpolated vectors */
if (index-base_size<lMem-cbveclen+1) {
/* Set up filter memory, stuff zeros outside memory buffer */
memIndTest = lMem-(index-base_size+cbveclen);
WebRtcSpl_MemSetW16(mem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
/* do filtering to get the codebook vector */
WebRtcSpl_FilterMAFastQ12(
&mem[memIndTest+4], cbvec, (WebRtc_Word16*)WebRtcIlbcfix_kCbFiltersRev,
CB_FILTERLEN, cbveclen);
}
/* interpolated vectors */
else {
/* Stuff zeros outside memory buffer */
memIndTest = lMem-cbveclen-CB_FILTERLEN;
WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
/* do filtering */
WebRtcSpl_FilterMAFastQ12(
&mem[memIndTest+7], tempbuff2, (WebRtc_Word16*)WebRtcIlbcfix_kCbFiltersRev,
CB_FILTERLEN, (WebRtc_Word16)(cbveclen+5));
/* Calculate lag index */
lag = (cbveclen<<1)-20+index-base_size-lMem-1;
WebRtcIlbcfix_CreateAugmentedVec(lag, tempbuff2+SUBL+5, cbvec);
}
}
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetCbVec.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
void WebRtcIlbcfix_GetCbVec(
WebRtc_Word16 *cbvec, /* (o) Constructed codebook vector */
WebRtc_Word16 *mem, /* (i) Codebook buffer */
WebRtc_Word16 index, /* (i) Codebook index */
WebRtc_Word16 lMem, /* (i) Length of codebook buffer */
WebRtc_Word16 cbveclen /* (i) Codebook vector length */
);
#endif

View File

@@ -0,0 +1,83 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetLspPoly.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* Construct the polynomials F1(z) and F2(z) from the LSP
* (Computations are done in Q24)
*
* The expansion is performed using the following recursion:
*
* f[0] = 1;
* tmp = -2.0 * lsp[0];
* f[1] = tmp;
* for (i=2; i<=5; i++) {
* b = -2.0 * lsp[2*i-2];
* f[i] = tmp*f[i-1] + 2.0*f[i-2];
* for (j=i; j>=2; j--) {
* f[j] = f[j] + tmp*f[j-1] + f[j-2];
* }
* f[i] = f[i] + tmp;
* }
*---------------------------------------------------------------*/
void WebRtcIlbcfix_GetLspPoly(
WebRtc_Word16 *lsp, /* (i) LSP in Q15 */
WebRtc_Word32 *f) /* (o) polonymial in Q24 */
{
WebRtc_Word32 tmpW32;
int i, j;
WebRtc_Word16 high, low;
WebRtc_Word16 *lspPtr;
WebRtc_Word32 *fPtr;
lspPtr = lsp;
fPtr = f;
/* f[0] = 1.0 (Q24) */
(*fPtr) = (WebRtc_Word32)16777216;
fPtr++;
(*fPtr) = WEBRTC_SPL_MUL((*lspPtr), -1024);
fPtr++;
lspPtr+=2;
for(i=2; i<=5; i++)
{
(*fPtr) = fPtr[-2];
for(j=i; j>1; j--)
{
/* Compute f[j] = f[j] + tmp*f[j-1] + f[j-2]; */
high = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(fPtr[-1], 16);
low = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(fPtr[-1]-WEBRTC_SPL_LSHIFT_W32(((WebRtc_Word32)high),16), 1);
tmpW32 = WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_16(high, (*lspPtr)), 2) +
WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_16_RSFT(low, (*lspPtr), 15), 2);
(*fPtr) += fPtr[-2];
(*fPtr) -= tmpW32;
fPtr--;
}
(*fPtr) -= (WebRtc_Word32)WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)(*lspPtr), 10);
fPtr+=i;
lspPtr+=2;
}
return;
}

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetLspPoly.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
#include "defines.h"
/*----------------------------------------------------------------*
* Construct the polynomials F1(z) and F2(z) from the LSP
* (Computations are done in Q24)
*
* The expansion is performed using the following recursion:
*
* f[0] = 1;
* tmp = -2.0 * lsp[0];
* f[1] = tmp;
* for (i=2; i<=5; i++) {
* b = -2.0 * lsp[2*i-2];
* f[i] = tmp*f[i-1] + 2.0*f[i-2];
* for (j=i; j>=2; j--) {
* f[j] = f[j] + tmp*f[j-1] + f[j-2];
* }
* f[i] = f[i] + tmp;
* }
*---------------------------------------------------------------*/
void WebRtcIlbcfix_GetLspPoly(
WebRtc_Word16 *lsp, /* (i) LSP in Q15 */
WebRtc_Word32 *f); /* (o) polonymial in Q24 */
#endif

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetSyncSeq.c
******************************************************************/
#include "defines.h"
#include "constants.h"
#include "refiner.h"
#include "nearest_neighbor.h"
/*----------------------------------------------------------------*
* get the pitch-synchronous sample sequence
*---------------------------------------------------------------*/
void WebRtcIlbcfix_GetSyncSeq(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i) Decoder state */
WebRtc_Word16 *idata, /* (i) original data */
WebRtc_Word16 idatal, /* (i) dimension of data */
WebRtc_Word16 centerStartPos, /* (i) where current block starts */
WebRtc_Word16 *period, /* (i) rough-pitch-period array (Q-2) */
WebRtc_Word16 *plocs, /* (i) where periods of period array are taken (Q-2) */
WebRtc_Word16 periodl, /* (i) dimension period array */
WebRtc_Word16 hl, /* (i) 2*hl+1 is the number of sequences */
WebRtc_Word16 *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
){
WebRtc_Word16 i,centerEndPos,q;
/* Stack based */
WebRtc_Word16 lagBlock[2*ENH_HL+1];
WebRtc_Word16 blockStartPos[2*ENH_HL+1]; /* Defines the position to search around (Q2) */
WebRtc_Word16 plocs2[ENH_PLOCSL];
centerEndPos=centerStartPos+ENH_BLOCKL-1;
/* present (find predicted lag from this position) */
WebRtcIlbcfix_NearestNeighbor(iLBCdec_inst, lagBlock+hl,plocs,
(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(2, (centerStartPos+centerEndPos)),
periodl);
blockStartPos[hl]=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, centerStartPos);
/* past (find predicted position and perform a refined
search to find the best sequence) */
for(q=hl-1;q>=0;q--) {
blockStartPos[q]=blockStartPos[q+1]-period[lagBlock[q+1]];
WebRtcIlbcfix_NearestNeighbor(iLBCdec_inst, lagBlock+q, plocs,
(WebRtc_Word16)(blockStartPos[q] + (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, ENH_BLOCKL_HALF)-period[lagBlock[q+1]]),
periodl);
if((blockStartPos[q]-(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, ENH_OVERHANG))>=0) {
/* Find the best possible sequence in the 4 times upsampled
domain around blockStartPos+q */
WebRtcIlbcfix_Refiner(iLBCdec_inst, blockStartPos+q,idata,idatal,
centerStartPos,blockStartPos[q],surround,WebRtcIlbcfix_kEnhWt[q]);
} else {
/* Don't add anything since this sequence would
be outside the buffer */
}
}
/* future (find predicted position and perform a refined
search to find the best sequence) */
for(i=0;i<periodl;i++) {
plocs2[i]=(plocs[i]-period[i]);
}
for(q=hl+1;q<=WEBRTC_SPL_MUL_16_16(2, hl);q++) {
WebRtcIlbcfix_NearestNeighbor(iLBCdec_inst, lagBlock+q,plocs2,
(WebRtc_Word16)(blockStartPos[q-1]+
(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, ENH_BLOCKL_HALF)),periodl);
blockStartPos[q]=blockStartPos[q-1]+period[lagBlock[q]];
if( (blockStartPos[q]+(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, (ENH_BLOCKL+ENH_OVERHANG)))
<
(WebRtc_Word16)WEBRTC_SPL_MUL_16_16(4, idatal)) {
/* Find the best possible sequence in the 4 times upsampled
domain around blockStartPos+q */
WebRtcIlbcfix_Refiner(iLBCdec_inst, blockStartPos+q, idata, idatal,
centerStartPos,blockStartPos[q],surround,WebRtcIlbcfix_kEnhWt[2*hl-q]);
}
else {
/* Don't add anything since this sequence would
be outside the buffer */
}
}
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_GetSyncSeq.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
#include "defines.h"
/*----------------------------------------------------------------*
* get the pitch-synchronous sample sequence
*---------------------------------------------------------------*/
void WebRtcIlbcfix_GetSyncSeq(
iLBC_Dec_Inst_t *iLBCdec_inst,
/* (i) Decoder state */
WebRtc_Word16 *idata, /* (i) original data */
WebRtc_Word16 idatal, /* (i) dimension of data */
WebRtc_Word16 centerStartPos, /* (i) where current block starts */
WebRtc_Word16 *period, /* (i) rough-pitch-period array (Q-2) */
WebRtc_Word16 *plocs, /* (i) where periods of period array are taken (Q-2) */
WebRtc_Word16 periodl, /* (i) dimension period array */
WebRtc_Word16 hl, /* (i) 2*hl+1 is the number of sequences */
WebRtc_Word16 *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
);
#endif

View File

@@ -0,0 +1,88 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_HpInput.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* high-pass filter of input with *0.5 and saturation
*---------------------------------------------------------------*/
void WebRtcIlbcfix_HpInput(
WebRtc_Word16 *signal, /* (i/o) signal vector */
WebRtc_Word16 *ba, /* (i) B- and A-coefficients (2:nd order)
{b[0] b[1] b[2] -a[1] -a[2]} a[0]
is assumed to be 1.0 */
WebRtc_Word16 *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
WebRtc_Word16 *x, /* (i/o) Filter state x[n-1] x[n-2] */
WebRtc_Word16 len) /* (i) Number of samples to filter */
{
int i;
WebRtc_Word32 tmpW32;
WebRtc_Word32 tmpW32b;
for (i=0; i<len; i++) {
/*
y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+ (-a[1])*y[i-1] + (-a[2])*y[i-2];
*/
tmpW32 = WEBRTC_SPL_MUL_16_16(y[1], ba[3]); /* (-a[1])*y[i-1] (low part) */
tmpW32 += WEBRTC_SPL_MUL_16_16(y[3], ba[4]); /* (-a[2])*y[i-2] (low part) */
tmpW32 = (tmpW32>>15);
tmpW32 += WEBRTC_SPL_MUL_16_16(y[0], ba[3]); /* (-a[1])*y[i-1] (high part) */
tmpW32 += WEBRTC_SPL_MUL_16_16(y[2], ba[4]); /* (-a[2])*y[i-2] (high part) */
tmpW32 = (tmpW32<<1);
tmpW32 += WEBRTC_SPL_MUL_16_16(signal[i], ba[0]); /* b[0]*x[0] */
tmpW32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); /* b[1]*x[i-1] */
tmpW32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); /* b[2]*x[i-2] */
/* Update state (input part) */
x[1] = x[0];
x[0] = signal[i];
/* Rounding in Q(12+1), i.e. add 2^12 */
tmpW32b = tmpW32 + 4096;
/* Saturate (to 2^28) so that the HP filtered signal does not overflow */
tmpW32b = WEBRTC_SPL_SAT((WebRtc_Word32)268435455, tmpW32b, (WebRtc_Word32)-268435456);
/* Convert back to Q0 and multiply with 0.5 */
signal[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmpW32b, 13);
/* Update state (filtered part) */
y[2] = y[0];
y[3] = y[1];
/* upshift tmpW32 by 3 with saturation */
if (tmpW32>268435455) {
tmpW32 = WEBRTC_SPL_WORD32_MAX;
} else if (tmpW32<-268435456) {
tmpW32 = WEBRTC_SPL_WORD32_MIN;
} else {
tmpW32 = WEBRTC_SPL_LSHIFT_W32(tmpW32, 3);
}
y[0] = (WebRtc_Word16)(tmpW32 >> 16);
y[1] = (WebRtc_Word16)((tmpW32 - WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)y[0], 16))>>1);
}
return;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_HpInput.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
#include "defines.h"
void WebRtcIlbcfix_HpInput(
WebRtc_Word16 *signal, /* (i/o) signal vector */
WebRtc_Word16 *ba, /* (i) B- and A-coefficients (2:nd order)
{b[0] b[1] b[2] -a[1] -a[2]} a[0]
is assumed to be 1.0 */
WebRtc_Word16 *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
WebRtc_Word16 *x, /* (i/o) Filter state x[n-1] x[n-2] */
WebRtc_Word16 len); /* (i) Number of samples to filter */
#endif

View File

@@ -0,0 +1,89 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_HpOutput.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* high-pass filter of output and *2 with saturation
*---------------------------------------------------------------*/
void WebRtcIlbcfix_HpOutput(
WebRtc_Word16 *signal, /* (i/o) signal vector */
WebRtc_Word16 *ba, /* (i) B- and A-coefficients (2:nd order)
{b[0] b[1] b[2] -a[1] -a[2]} a[0]
is assumed to be 1.0 */
WebRtc_Word16 *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
WebRtc_Word16 *x, /* (i/o) Filter state x[n-1] x[n-2] */
WebRtc_Word16 len) /* (i) Number of samples to filter */
{
int i;
WebRtc_Word32 tmpW32;
WebRtc_Word32 tmpW32b;
for (i=0; i<len; i++) {
/*
y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+ (-a[1])*y[i-1] + (-a[2])*y[i-2];
*/
tmpW32 = WEBRTC_SPL_MUL_16_16(y[1], ba[3]); /* (-a[1])*y[i-1] (low part) */
tmpW32 += WEBRTC_SPL_MUL_16_16(y[3], ba[4]); /* (-a[2])*y[i-2] (low part) */
tmpW32 = (tmpW32>>15);
tmpW32 += WEBRTC_SPL_MUL_16_16(y[0], ba[3]); /* (-a[1])*y[i-1] (high part) */
tmpW32 += WEBRTC_SPL_MUL_16_16(y[2], ba[4]); /* (-a[2])*y[i-2] (high part) */
tmpW32 = (tmpW32<<1);
tmpW32 += WEBRTC_SPL_MUL_16_16(signal[i], ba[0]); /* b[0]*x[0] */
tmpW32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); /* b[1]*x[i-1] */
tmpW32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); /* b[2]*x[i-2] */
/* Update state (input part) */
x[1] = x[0];
x[0] = signal[i];
/* Rounding in Q(12-1), i.e. add 2^10 */
tmpW32b = tmpW32 + 1024;
/* Saturate (to 2^26) so that the HP filtered signal does not overflow */
tmpW32b = WEBRTC_SPL_SAT((WebRtc_Word32)67108863, tmpW32b, (WebRtc_Word32)-67108864);
/* Convert back to Q0 and multiply with 2 */
signal[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmpW32b, 11);
/* Update state (filtered part) */
y[2] = y[0];
y[3] = y[1];
/* upshift tmpW32 by 3 with saturation */
if (tmpW32>268435455) {
tmpW32 = WEBRTC_SPL_WORD32_MAX;
} else if (tmpW32<-268435456) {
tmpW32 = WEBRTC_SPL_WORD32_MIN;
} else {
tmpW32 = WEBRTC_SPL_LSHIFT_W32(tmpW32, 3);
}
y[0] = (WebRtc_Word16)(tmpW32 >> 16);
y[1] = (WebRtc_Word16)((tmpW32 - WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)y[0], 16))>>1);
}
return;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_HpOutput.h
******************************************************************/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
#include "defines.h"
void WebRtcIlbcfix_HpOutput(
WebRtc_Word16 *signal, /* (i/o) signal vector */
WebRtc_Word16 *ba, /* (i) B- and A-coefficients (2:nd order)
{b[0] b[1] b[2] -a[1] -a[2]} a[0]
is assumed to be 1.0 */
WebRtc_Word16 *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
WebRtc_Word16 *x, /* (i/o) Filter state x[n-1] x[n-2] */
WebRtc_Word16 len); /* (i) Number of samples to filter */
#endif

View File

@@ -0,0 +1,255 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
iLBCInterface.c
******************************************************************/
#include "ilbc.h"
#include "defines.h"
#include "init_encode.h"
#include "encode.h"
#include "init_decode.h"
#include "decode.h"
#include <stdlib.h>
WebRtc_Word16 WebRtcIlbcfix_EncoderAssign(iLBC_encinst_t **iLBC_encinst, WebRtc_Word16 *ILBCENC_inst_Addr, WebRtc_Word16 *size) {
*iLBC_encinst=(iLBC_encinst_t*)ILBCENC_inst_Addr;
*size=sizeof(iLBC_Enc_Inst_t)/sizeof(WebRtc_Word16);
if (*iLBC_encinst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_DecoderAssign(iLBC_decinst_t **iLBC_decinst, WebRtc_Word16 *ILBCDEC_inst_Addr, WebRtc_Word16 *size) {
*iLBC_decinst=(iLBC_decinst_t*)ILBCDEC_inst_Addr;
*size=sizeof(iLBC_Dec_Inst_t)/sizeof(WebRtc_Word16);
if (*iLBC_decinst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_EncoderCreate(iLBC_encinst_t **iLBC_encinst) {
*iLBC_encinst=(iLBC_encinst_t*)malloc(sizeof(iLBC_Enc_Inst_t));
if (*iLBC_encinst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_DecoderCreate(iLBC_decinst_t **iLBC_decinst) {
*iLBC_decinst=(iLBC_decinst_t*)malloc(sizeof(iLBC_Dec_Inst_t));
if (*iLBC_decinst!=NULL) {
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_EncoderFree(iLBC_encinst_t *iLBC_encinst) {
free(iLBC_encinst);
return(0);
}
WebRtc_Word16 WebRtcIlbcfix_DecoderFree(iLBC_decinst_t *iLBC_decinst) {
free(iLBC_decinst);
return(0);
}
WebRtc_Word16 WebRtcIlbcfix_EncoderInit(iLBC_encinst_t *iLBCenc_inst, WebRtc_Word16 mode)
{
if ((mode==20)||(mode==30)) {
WebRtcIlbcfix_InitEncode((iLBC_Enc_Inst_t*) iLBCenc_inst, mode);
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_Encode(iLBC_encinst_t *iLBCenc_inst, WebRtc_Word16 *speechIn, WebRtc_Word16 len, WebRtc_Word16 *encoded) {
WebRtc_Word16 pos = 0;
WebRtc_Word16 encpos = 0;
if ((len != ((iLBC_Enc_Inst_t*)iLBCenc_inst)->blockl) &&
#ifdef SPLIT_10MS
(len != 80) &&
#endif
(len != 2*((iLBC_Enc_Inst_t*)iLBCenc_inst)->blockl) &&
(len != 3*((iLBC_Enc_Inst_t*)iLBCenc_inst)->blockl))
{
/* A maximum of 3 frames/packet is allowed */
return(-1);
} else {
/* call encoder */
while (pos<len) {
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) &encoded[encpos], &speechIn[pos], (iLBC_Enc_Inst_t*) iLBCenc_inst);
#ifdef SPLIT_10MS
pos += 80;
if(((iLBC_Enc_Inst_t*)iLBCenc_inst)->section == 0)
#else
pos += ((iLBC_Enc_Inst_t*)iLBCenc_inst)->blockl;
#endif
encpos += ((iLBC_Enc_Inst_t*)iLBCenc_inst)->no_of_words;
}
return (encpos*2);
}
}
WebRtc_Word16 WebRtcIlbcfix_DecoderInit(iLBC_decinst_t *iLBCdec_inst, WebRtc_Word16 mode) {
if ((mode==20)||(mode==30)) {
WebRtcIlbcfix_InitDecode((iLBC_Dec_Inst_t*) iLBCdec_inst, mode, 1);
return(0);
} else {
return(-1);
}
}
WebRtc_Word16 WebRtcIlbcfix_DecoderInit20Ms(iLBC_decinst_t *iLBCdec_inst) {
WebRtcIlbcfix_InitDecode((iLBC_Dec_Inst_t*) iLBCdec_inst, 20, 1);
return(0);
}
WebRtc_Word16 WebRtcIlbcfix_Decoderinit30Ms(iLBC_decinst_t *iLBCdec_inst) {
WebRtcIlbcfix_InitDecode((iLBC_Dec_Inst_t*) iLBCdec_inst, 30, 1);
return(0);
}
WebRtc_Word16 WebRtcIlbcfix_Decode(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
int i=0;
/* Allow for automatic switching between the frame sizes
(although you do get some discontinuity) */
if ((len==((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==2*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==3*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)) {
/* ok, do nothing */
} else {
/* Test if the mode has changed */
if (((iLBC_Dec_Inst_t*)iLBCdec_inst)->mode==20) {
if ((len==NO_OF_BYTES_30MS)||
(len==2*NO_OF_BYTES_30MS)||
(len==3*NO_OF_BYTES_30MS)) {
WebRtcIlbcfix_InitDecode(((iLBC_Dec_Inst_t*)iLBCdec_inst), 30, ((iLBC_Dec_Inst_t*)iLBCdec_inst)->use_enhancer);
} else {
/* Unsupported frame length */
return(-1);
}
} else {
if ((len==NO_OF_BYTES_20MS)||
(len==2*NO_OF_BYTES_20MS)||
(len==3*NO_OF_BYTES_20MS)) {
WebRtcIlbcfix_InitDecode(((iLBC_Dec_Inst_t*)iLBCdec_inst), 20, ((iLBC_Dec_Inst_t*)iLBCdec_inst)->use_enhancer);
} else {
/* Unsupported frame length */
return(-1);
}
}
}
while ((i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)<len) {
WebRtcIlbcfix_DecodeImpl(&decoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl], (WebRtc_UWord16*) &encoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_words], (iLBC_Dec_Inst_t*) iLBCdec_inst, 1);
i++;
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl);
}
WebRtc_Word16 WebRtcIlbcfix_Decode20Ms(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
int i=0;
if ((len==((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==2*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==3*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)) {
/* ok, do nothing */
} else {
return(-1);
}
while ((i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)<len) {
WebRtcIlbcfix_DecodeImpl(&decoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl], (WebRtc_UWord16*) &encoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_words], (iLBC_Dec_Inst_t*) iLBCdec_inst, 1);
i++;
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl);
}
WebRtc_Word16 WebRtcIlbcfix_Decode30Ms(iLBC_decinst_t *iLBCdec_inst,
WebRtc_Word16 *encoded,
WebRtc_Word16 len,
WebRtc_Word16 *decoded,
WebRtc_Word16 *speechType)
{
int i=0;
if ((len==((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==2*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)||
(len==3*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)) {
/* ok, do nothing */
} else {
return(-1);
}
while ((i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_bytes)<len) {
WebRtcIlbcfix_DecodeImpl(&decoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl], (WebRtc_UWord16*) &encoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->no_of_words], (iLBC_Dec_Inst_t*) iLBCdec_inst, 1);
i++;
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
return(i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl);
}
WebRtc_Word16 WebRtcIlbcfix_DecodePlc(iLBC_decinst_t *iLBCdec_inst, WebRtc_Word16 *decoded, WebRtc_Word16 noOfLostFrames) {
int i;
WebRtc_UWord16 dummy;
for (i=0;i<noOfLostFrames;i++) {
/* call decoder */
WebRtcIlbcfix_DecodeImpl(&decoded[i*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl], &dummy, (iLBC_Dec_Inst_t*) iLBCdec_inst, 0);
}
return (noOfLostFrames*((iLBC_Dec_Inst_t*)iLBCdec_inst)->blockl);
}
WebRtc_Word16 WebRtcIlbcfix_NetEqPlc(iLBC_decinst_t *iLBCdec_inst, WebRtc_Word16 *decoded, WebRtc_Word16 noOfLostFrames) {
/* Two input parameters not used, but needed for function pointers in NetEQ */
decoded = decoded;
noOfLostFrames = noOfLostFrames;
WebRtcSpl_MemSetW16(((iLBC_Dec_Inst_t*)iLBCdec_inst)->enh_buf, 0, ENH_BUFL);
((iLBC_Dec_Inst_t*)iLBCdec_inst)->prev_enh_pl = 2;
return (0);
}
void WebRtcIlbcfix_version(WebRtc_Word8 *version)
{
strcpy((char*)version, "1.1.0");
}

View File

@@ -0,0 +1,177 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'iLBC',
'type': '<(library)',
'dependencies': [
'../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
],
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/ilbc.h',
'abs_quant.c',
'abs_quant_loop.c',
'augmented_cb_corr.c',
'bw_expand.c',
'cb_construct.c',
'cb_mem_energy.c',
'cb_mem_energy_augmentation.c',
'cb_mem_energy_calc.c',
'cb_search.c',
'cb_search_core.c',
'cb_update_best_index.c',
'chebyshev.c',
'comp_corr.c',
'constants.c',
'create_augmented_vec.c',
'decode.c',
'decode_residual.c',
'decoder_interpolate_lsf.c',
'do_plc.c',
'encode.c',
'energy_inverse.c',
'enh_upsample.c',
'enhancer.c',
'enhancer_interface.c',
'filtered_cb_vecs.c',
'frame_classify.c',
'gain_dequant.c',
'gain_quant.c',
'get_cd_vec.c',
'get_lsp_poly.c',
'get_sync_seq.c',
'hp_input.c',
'hp_output.c',
'ilbc.c',
'index_conv_dec.c',
'index_conv_enc.c',
'init_decode.c',
'init_encode.c',
'interpolate.c',
'interpolate_samples.c',
'lpc_encode.c',
'lsf_check.c',
'lsf_interpolate_to_poly_dec.c',
'lsf_interpolate_to_poly_enc.c',
'lsf_to_lsp.c',
'lsf_to_poly.c',
'lsp_to_lsf.c',
'my_corr.c',
'nearest_neighbor.c',
'pack_bits.c',
'poly_to_lsf.c',
'poly_to_lsp.c',
'refiner.c',
'simple_interpolate_lsf.c',
'simple_lpc_analysis.c',
'simple_lsf_dequant.c',
'simple_lsf_quant.c',
'smooth.c',
'smooth_out_data.c',
'sort_sq.c',
'split_vq.c',
'state_construct.c',
'state_search.c',
'swap_bytes.c',
'unpack_bits.c',
'vq3.c',
'vq4.c',
'window32_w32.c',
'xcorr_coef.c',
'abs_quant.h',
'abs_quant_loop.h',
'augmented_cb_corr.h',
'bw_expand.h',
'cb_construct.h',
'cb_mem_energy.h',
'cb_mem_energy_augmentation.h',
'cb_mem_energy_calc.h',
'cb_search.h',
'cb_search_core.h',
'cb_update_best_index.h',
'chebyshev.h',
'comp_corr.h',
'constants.h',
'create_augmented_vec.h',
'decode.h',
'decode_residual.h',
'decoder_interpolate_lsf.h',
'do_plc.h',
'encode.h',
'energy_inverse.h',
'enh_upsample.h',
'enhancer.h',
'enhancer_interface.h',
'filtered_cb_vecs.h',
'frame_classify.h',
'gain_dequant.h',
'gain_quant.h',
'get_cd_vec.h',
'get_lsp_poly.h',
'get_sync_seq.h',
'hp_input.h',
'hp_output.h',
'defines.h',
'index_conv_dec.h',
'index_conv_enc.h',
'init_decode.h',
'init_encode.h',
'interpolate.h',
'interpolate_samples.h',
'lpc_encode.h',
'lsf_check.h',
'lsf_interpolate_to_poly_dec.h',
'lsf_interpolate_to_poly_enc.h',
'lsf_to_lsp.h',
'lsf_to_poly.h',
'lsp_to_lsf.h',
'my_corr.h',
'nearest_neighbor.h',
'pack_bits.h',
'poly_to_lsf.h',
'poly_to_lsp.h',
'refiner.h',
'simple_interpolate_lsf.h',
'simple_lpc_analysis.h',
'simple_lsf_dequant.h',
'simple_lsf_quant.h',
'smooth.h',
'smooth_out_data.h',
'sort_sq.h',
'split_vq.h',
'state_construct.h',
'state_search.h',
'swap_bytes.h',
'unpack_bits.h',
'vq3.h',
'vq4.h',
'window32_w32.h',
'xcorr_coef.h',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_IndexConvDec.c
******************************************************************/
#include "defines.h"
void WebRtcIlbcfix_IndexConvDec(
WebRtc_Word16 *index /* (i/o) Codebook indexes */
){
int k;
for (k=4;k<6;k++) {
/* Readjust the second and third codebook index for the first 40 sample
so that they look the same as the first (in terms of lag)
*/
if ((index[k]>=44)&&(index[k]<108)) {
index[k]+=64;
} else if ((index[k]>=108)&&(index[k]<128)) {
index[k]+=128;
} else {
/* ERROR */
}
}
}

Some files were not shown because too many files have changed in this diff Show More