Prepare to convert various types to size_t.

This makes some behaviorally-invariant changes to make certain code that
currently only works correctly with signed types work safely regardless of the
signedness of the types in question.  This is preparation for a future change
that will convert a variety of types to size_t.

There are also some formatting changes (e.g. converting "enum hack" usage to real consts) to make it simpler to just change "int" to "size_t" in the future to change the types of those constants.

BUG=none
R=andrew@webrtc.org, juberti@webrtc.org, kwiberg@webrtc.org
TBR=ajm

Review URL: https://codereview.webrtc.org/1174813003

Cr-Commit-Position: refs/heads/master@{#9413}
This commit is contained in:
Peter Kasting 2015-06-10 21:15:38 -07:00
parent 786dbdcc38
commit f045e4da43
42 changed files with 153 additions and 127 deletions

View File

@ -58,8 +58,8 @@ class FakeAudioCaptureModule
// The value for the following constants have been derived by running VoE // The value for the following constants have been derived by running VoE
// using a real ADM. The constants correspond to 10ms of mono audio at 44kHz. // using a real ADM. The constants correspond to 10ms of mono audio at 44kHz.
enum{kNumberSamples = 440}; static const int kNumberSamples = 440;
enum{kNumberBytesPerSample = sizeof(Sample)}; static const int kNumberBytesPerSample = sizeof(Sample);
// Creates a FakeAudioCaptureModule or returns NULL on failure. // Creates a FakeAudioCaptureModule or returns NULL on failure.
// |process_thread| is used to push and pull audio frames to and from the // |process_thread| is used to push and pull audio frames to and from the

View File

@ -49,7 +49,7 @@ float ComputeSNR(const ChannelBuffer<float>& ref,
int best_delay = 0; int best_delay = 0;
// Search within one sample of the expected delay. // Search within one sample of the expected delay.
for (int delay = std::max(expected_delay - 1, 0); for (int delay = std::max(expected_delay, 1) - 1;
delay <= std::min(expected_delay + 1, ref.num_frames()); delay <= std::min(expected_delay + 1, ref.num_frames());
++delay) { ++delay) {
float mse = 0; float mse = 0;

View File

@ -40,8 +40,7 @@ int RealFourier::FftLength(int order) {
} }
int RealFourier::ComplexLength(int order) { int RealFourier::ComplexLength(int order) {
CHECK_GE(order, 0); return FftLength(order) / 2 + 1;
return (1 << order) / 2 + 1;
} }
RealFourier::fft_real_scoper RealFourier::AllocRealBuffer(int count) { RealFourier::fft_real_scoper RealFourier::AllocRealBuffer(int count) {

View File

@ -34,22 +34,20 @@ class SincResamplerCallback {
// SincResampler is a high-quality single-channel sample-rate converter. // SincResampler is a high-quality single-channel sample-rate converter.
class SincResampler { class SincResampler {
public: public:
enum {
// The kernel size can be adjusted for quality (higher is better) at the // The kernel size can be adjusted for quality (higher is better) at the
// expense of performance. Must be a multiple of 32. // expense of performance. Must be a multiple of 32.
// TODO(dalecurtis): Test performance to see if we can jack this up to 64+. // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
kKernelSize = 32, static const int kKernelSize = 32;
// Default request size. Affects how often and for how much SincResampler // Default request size. Affects how often and for how much SincResampler
// calls back for input. Must be greater than kKernelSize. // calls back for input. Must be greater than kKernelSize.
kDefaultRequestSize = 512, static const int kDefaultRequestSize = 512;
// The kernel offset count is used for interpolation and is the number of // The kernel offset count is used for interpolation and is the number of
// sub-sample kernel shifts. Can be adjusted for quality (higher is better) // sub-sample kernel shifts. Can be adjusted for quality (higher is better)
// at the expense of allocating more memory. // at the expense of allocating more memory.
kKernelOffsetCount = 32, static const int kKernelOffsetCount = 32;
kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1), static const int kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1);
};
// Constructs a SincResampler with the specified |read_cb|, which is used to // Constructs a SincResampler with the specified |read_cb|, which is used to
// acquire audio data for resampling. |io_sample_rate_ratio| is the ratio // acquire audio data for resampling. |io_sample_rate_ratio| is the ratio

View File

@ -36,12 +36,11 @@ void SinusoidalLinearChirpSource::Run(int frames, float* destination) {
destination[i] = 0; destination[i] = 0;
} else { } else {
// Calculate time in seconds. // Calculate time in seconds.
double t = (static_cast<double>(current_index_) - delay_samples_) / if (current_index_ < delay_samples_) {
sample_rate_;
if (t < 0) {
destination[i] = 0; destination[i] = 0;
} else { } else {
// Sinusoidal linear chirp. // Sinusoidal linear chirp.
double t = (current_index_ - delay_samples_) / sample_rate_;
destination[i] = destination[i] =
sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t)); sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t));
} }

View File

@ -40,7 +40,7 @@ class SinusoidalLinearChirpSource : public SincResamplerCallback {
kMinFrequency = 5 kMinFrequency = 5
}; };
double sample_rate_; int sample_rate_;
int total_samples_; int total_samples_;
double max_frequency_; double max_frequency_;
double k_; double k_;

View File

@ -51,7 +51,7 @@ int WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
for (i = 0; i < order + 1; i++) { for (i = 0; i < order + 1; i++) {
sum = 0; sum = 0;
/* Unroll the loop to improve performance. */ /* Unroll the loop to improve performance. */
for (j = 0; j < in_vector_length - i - 3; j += 4) { for (j = 0; i + j + 3 < in_vector_length; j += 4) {
sum += (in_vector[j + 0] * in_vector[i + j + 0]) >> scaling; sum += (in_vector[j + 0] * in_vector[i + j + 0]) >> scaling;
sum += (in_vector[j + 1] * in_vector[i + j + 1]) >> scaling; sum += (in_vector[j + 1] * in_vector[i + j + 1]) >> scaling;
sum += (in_vector[j + 2] * in_vector[i + j + 2]) >> scaling; sum += (in_vector[j + 2] * in_vector[i + j + 2]) >> scaling;

View File

@ -18,7 +18,7 @@ int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
int i = 0; int i = 0;
/* Unroll the loop to improve performance. */ /* Unroll the loop to improve performance. */
for (i = 0; i < length - 3; i += 4) { for (i = 0; i + 3 < length; i += 4) {
sum += (vector1[i + 0] * vector2[i + 0]) >> scaling; sum += (vector1[i + 0] * vector2[i + 0]) >> scaling;
sum += (vector1[i + 1] * vector2[i + 1]) >> scaling; sum += (vector1[i + 1] * vector2[i + 1]) >> scaling;
sum += (vector1[i + 2] * vector2[i + 2]) >> scaling; sum += (vector1[i + 2] * vector2[i + 2]) >> scaling;

View File

@ -41,7 +41,7 @@ int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K,
norm = WebRtcSpl_NormW32(R[0]); norm = WebRtcSpl_NormW32(R[0]);
for (i = order; i >= 0; i--) for (i = 0; i <= order; ++i)
{ {
temp1W32 = WEBRTC_SPL_LSHIFT_W32(R[i], norm); temp1W32 = WEBRTC_SPL_LSHIFT_W32(R[i], norm);
// Put R in hi and low format // Put R in hi and low format

View File

@ -86,7 +86,7 @@ int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, int length) {
uint32x4_t max32x4_1 = vdupq_n_u32(0); uint32x4_t max32x4_1 = vdupq_n_u32(0);
// First part, unroll the loop 8 times. // First part, unroll the loop 8 times.
for (i = length - residual; i >0; i -= 8) { for (i = 0; i < length - residual; i += 8) {
int32x4_t in32x4_0 = vld1q_s32(p_start); int32x4_t in32x4_0 = vld1q_s32(p_start);
p_start += 4; p_start += 4;
int32x4_t in32x4_1 = vld1q_s32(p_start); int32x4_t in32x4_1 = vld1q_s32(p_start);
@ -139,7 +139,7 @@ int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, int length) {
int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN); int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN);
// First part, unroll the loop 8 times. // First part, unroll the loop 8 times.
for (i = length - residual; i >0; i -= 8) { for (i = 0; i < length - residual; i += 8) {
int16x8_t in16x8 = vld1q_s16(p_start); int16x8_t in16x8 = vld1q_s16(p_start);
max16x8 = vmaxq_s16(max16x8, in16x8); max16x8 = vmaxq_s16(max16x8, in16x8);
p_start += 8; p_start += 8;
@ -180,7 +180,7 @@ int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, int length) {
int32x4_t max32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN); int32x4_t max32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN);
// First part, unroll the loop 8 times. // First part, unroll the loop 8 times.
for (i = length - residual; i >0; i -= 8) { for (i = 0; i < length - residual; i += 8) {
int32x4_t in32x4_0 = vld1q_s32(p_start); int32x4_t in32x4_0 = vld1q_s32(p_start);
p_start += 4; p_start += 4;
int32x4_t in32x4_1 = vld1q_s32(p_start); int32x4_t in32x4_1 = vld1q_s32(p_start);
@ -223,7 +223,7 @@ int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, int length) {
int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX); int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX);
// First part, unroll the loop 8 times. // First part, unroll the loop 8 times.
for (i = length - residual; i >0; i -= 8) { for (i = 0; i < length - residual; i += 8) {
int16x8_t in16x8 = vld1q_s16(p_start); int16x8_t in16x8 = vld1q_s16(p_start);
min16x8 = vminq_s16(min16x8, in16x8); min16x8 = vminq_s16(min16x8, in16x8);
p_start += 8; p_start += 8;
@ -264,7 +264,7 @@ int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, int length) {
int32x4_t min32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX); int32x4_t min32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX);
// First part, unroll the loop 8 times. // First part, unroll the loop 8 times.
for (i = length - residual; i >0; i -= 8) { for (i = 0; i < length - residual; i += 8) {
int32x4_t in32x4_0 = vld1q_s32(p_start); int32x4_t in32x4_0 = vld1q_s32(p_start);
p_start += 4; p_start += 4;
int32x4_t in32x4_1 = vld1q_s32(p_start); int32x4_t in32x4_1 = vld1q_s32(p_start);

View File

@ -56,7 +56,7 @@ int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
bool AudioDecoder::HasDecodePlc() const { return false; } bool AudioDecoder::HasDecodePlc() const { return false; }
int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return -1; } int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return 0; }
int AudioDecoder::IncomingPacket(const uint8_t* payload, int AudioDecoder::IncomingPacket(const uint8_t* payload,
size_t payload_len, size_t payload_len,

View File

@ -136,9 +136,9 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
(frames_to_encode > 3 ? 3 : frames_to_encode); (frames_to_encode > 3 ? 3 : frames_to_encode);
if (frames_to_encode == 4) if (frames_to_encode == 4)
blocks_in_first_vad_call = 2; blocks_in_first_vad_call = 2;
CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
const int blocks_in_second_vad_call = const int blocks_in_second_vad_call =
frames_to_encode - blocks_in_first_vad_call; frames_to_encode - blocks_in_first_vad_call;
CHECK_GE(blocks_in_second_vad_call, 0);
// Check if all of the buffer is passive speech. Start with checking the first // Check if all of the buffer is passive speech. Start with checking the first
// block. // block.
@ -217,7 +217,7 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
info = speech_encoder_->Encode( info = speech_encoder_->Encode(
rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame], rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded); samples_per_10ms_frame, max_encoded_bytes, encoded);
if (i == frames_to_encode - 1) { if (i + 1 == frames_to_encode) {
CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data."; CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
} else { } else {
CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early."; CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";

View File

@ -88,13 +88,13 @@ AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
} }
CHECK_EQ(speech_buffer_.size(), full_frame_samples_); CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
CHECK_GE(max_encoded_bytes, full_frame_samples_); CHECK_GE(max_encoded_bytes, full_frame_samples_);
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
CHECK_GE(ret, 0);
speech_buffer_.clear();
EncodedInfo info; EncodedInfo info;
info.encoded_timestamp = first_timestamp_in_buffer_; info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_; info.payload_type = payload_type_;
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
CHECK_GE(ret, 0);
info.encoded_bytes = static_cast<size_t>(ret); info.encoded_bytes = static_cast<size_t>(ret);
speech_buffer_.clear();
return info; return info;
} }

View File

@ -86,6 +86,10 @@ int main(int argc, char* argv[]) {
printf("G.711 version: %s\n\n", versionNumber); printf("G.711 version: %s\n\n", versionNumber);
/* Get frame length */ /* Get frame length */
framelength = atoi(argv[1]); framelength = atoi(argv[1]);
if (framelength < 0) {
printf(" G.711: Invalid framelength %d.\n", framelength);
exit(1);
}
/* Get compression law */ /* Get compression law */
strcpy(law, argv[2]); strcpy(law, argv[2]);

View File

@ -24,7 +24,8 @@ const int kSampleRateHz = 16000;
} // namespace } // namespace
bool AudioEncoderG722::Config::IsOk() const { bool AudioEncoderG722::Config::IsOk() const {
return (frame_size_ms % 10 == 0) && (num_channels >= 1); return (frame_size_ms > 0) && (frame_size_ms % 10 == 0) &&
(num_channels >= 1);
} }
AudioEncoderG722::EncoderState::EncoderState() { AudioEncoderG722::EncoderState::EncoderState() {

View File

@ -83,6 +83,10 @@ int main(int argc, char* argv[])
/* Get frame length */ /* Get frame length */
framelength = atoi(argv[1]); framelength = atoi(argv[1]);
if (framelength < 0) {
printf(" G.722: Invalid framelength %d.\n", framelength);
exit(1);
}
/* Get Input and Output files */ /* Get Input and Output files */
sscanf(argv[2], "%s", inname); sscanf(argv[2], "%s", inname);

View File

@ -41,7 +41,7 @@ void WebRtcIlbcfix_CbMemEnergyCalc(
eSh_ptr = &energyShifts[1+base_size]; eSh_ptr = &energyShifts[1+base_size];
eW16_ptr = &energyW16[1+base_size]; eW16_ptr = &energyW16[1+base_size];
for(j=0;j<range-1;j++) { for (j = 0; j + 1 < range; j++) {
/* Calculate next energy by a +/- /* Calculate next energy by a +/-
operation on the edge samples */ operation on the edge samples */

View File

@ -227,12 +227,9 @@ void WebRtcIlbcfix_CbSearch(
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset], inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain); &CritMax, &shTotMax, &bestIndex, &bestGain);
sInd=bestIndex-(int16_t)(CB_RESRANGE>>1); sInd = ((CB_RESRANGE >> 1) > bestIndex) ?
0 : (bestIndex - (CB_RESRANGE >> 1));
eInd=sInd+CB_RESRANGE; eInd=sInd+CB_RESRANGE;
if (sInd<0) {
eInd-=sInd;
sInd=0;
}
if (eInd>=range) { if (eInd>=range) {
eInd=range-1; eInd=range-1;
sInd=eInd-CB_RESRANGE; sInd=eInd-CB_RESRANGE;
@ -247,9 +244,11 @@ void WebRtcIlbcfix_CbSearch(
interpSamplesFilt, cDot, interpSamplesFilt, cDot,
(int16_t)(sInd+20), (int16_t)(WEBRTC_SPL_MIN(39, (eInd+20))), scale); (int16_t)(sInd+20), (int16_t)(WEBRTC_SPL_MIN(39, (eInd+20))), scale);
i=20; i=20;
cDotPtr = &cDot[20 - sInd];
} else {
cDotPtr = cDot;
} }
cDotPtr=&cDot[WEBRTC_SPL_MAX(0,(20-sInd))];
cb_vecPtr = cbvectors+lMem-20-i; cb_vecPtr = cbvectors+lMem-20-i;
/* Calculate the cross correlations (main part of the filtered CB) */ /* Calculate the cross correlations (main part of the filtered CB) */

View File

@ -41,7 +41,7 @@ void WebRtcIlbcfix_DecodeResidual(
int16_t *syntdenum /* (i) the decoded synthesis filter int16_t *syntdenum /* (i) the decoded synthesis filter
coefficients */ coefficients */
) { ) {
int16_t meml_gotten, Nfor, Nback, diff, start_pos; int16_t meml_gotten, diff, start_pos;
int16_t subcount, subframe; int16_t subcount, subframe;
int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */ int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */ int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
@ -110,9 +110,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* forward prediction of subframes */ /* forward prediction of subframes */
Nfor = iLBCdec_inst->nsub-iLBC_encbits->startIdx-1; if (iLBCdec_inst->nsub > iLBC_encbits->startIdx + 1) {
if( Nfor > 0 ) {
/* setup memory */ /* setup memory */
WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN); WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
@ -121,6 +119,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to encode */ /* loop over subframes to encode */
int16_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nfor; subframe++) { for (subframe=0; subframe<Nfor; subframe++) {
/* construct decoded vector */ /* construct decoded vector */
@ -143,9 +142,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* backward prediction of subframes */ /* backward prediction of subframes */
Nback = iLBC_encbits->startIdx-1; if (iLBC_encbits->startIdx > 1) {
if( Nback > 0 ){
/* setup memory */ /* setup memory */
@ -160,6 +157,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to decode */ /* loop over subframes to decode */
int16_t Nback = iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nback; subframe++) { for (subframe=0; subframe<Nback; subframe++) {
/* construct decoded vector */ /* construct decoded vector */

View File

@ -37,7 +37,7 @@ void WebRtcIlbcfix_DoThePlc(
IlbcDecoder *iLBCdec_inst IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */ /* (i/o) decoder instance */
){ ){
int16_t i, pick; int16_t i;
int32_t cross, ener, cross_comp, ener_comp = 0; int32_t cross, ener, cross_comp, ener_comp = 0;
int32_t measure, maxMeasure, energy; int32_t measure, maxMeasure, energy;
int16_t max, crossSquareMax, crossSquare; int16_t max, crossSquareMax, crossSquare;
@ -234,22 +234,19 @@ void WebRtcIlbcfix_DoThePlc(
/* noise component - 52 < randlagFIX < 117 */ /* noise component - 52 < randlagFIX < 117 */
iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849); iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63); randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63);
if (randlag > i) {
pick = i - randlag; randvec[i] =
iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
if (pick < 0) {
randvec[i] = iLBCdec_inst->prevResidual[iLBCdec_inst->blockl+pick];
} else { } else {
randvec[i] = iLBCdec_inst->prevResidual[pick]; randvec[i] = iLBCdec_inst->prevResidual[i - randlag];
} }
/* pitch repeatition component */ /* pitch repeatition component */
pick = i - use_lag; if (use_lag > i) {
PLCresidual[i] =
if (pick < 0) { iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - use_lag];
PLCresidual[i] = iLBCdec_inst->prevResidual[iLBCdec_inst->blockl+pick];
} else { } else {
PLCresidual[i] = PLCresidual[pick]; PLCresidual[i] = PLCresidual[i - use_lag];
} }
/* Attinuate total gain for each 10 ms */ /* Attinuate total gain for each 10 ms */

View File

@ -48,7 +48,7 @@ void WebRtcIlbcfix_EncodeImpl(
IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
state */ state */
){ ){
int n, meml_gotten, Nfor, Nback; int n, meml_gotten, Nfor;
int16_t diff, start_pos; int16_t diff, start_pos;
int index; int index;
int subcount, subframe; int subcount, subframe;
@ -379,15 +379,14 @@ void WebRtcIlbcfix_EncodeImpl(
/* backward prediction of subframes */ /* backward prediction of subframes */
Nback = iLBCbits_inst->startIdx-1; if (iLBCbits_inst->startIdx > 1) {
if( Nback > 0 ){
/* create reverse order vectors /* create reverse order vectors
(The decresidual does not need to be copied since it is (The decresidual does not need to be copied since it is
contained in the same vector as the residual) contained in the same vector as the residual)
*/ */
int Nback = iLBCbits_inst->startIdx - 1;
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL); WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
/* setup memory */ /* setup memory */
@ -425,11 +424,11 @@ void WebRtcIlbcfix_EncodeImpl(
if (iLBCenc_inst->section == 1) if (iLBCenc_inst->section == 1)
{ {
start_count = 0; start_count = 0;
end_count = WEBRTC_SPL_MAX (2 - Nfor, 0); end_count = (Nfor >= 2) ? 0 : (2 - NFor);
} }
if (iLBCenc_inst->section == 2) if (iLBCenc_inst->section == 2)
{ {
start_count = WEBRTC_SPL_MAX (2 - Nfor, 0); start_count = (Nfor >= 2) ? 0 : (2 - NFor);
end_count = Nback; end_count = Nback;
} }
} }

View File

@ -110,9 +110,8 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
for(iblock = 0; iblock<new_blocks; iblock++){ for(iblock = 0; iblock<new_blocks; iblock++){
/* references */ /* references */
i = 60 + iblock * ENH_BLOCKL_HALF; target = downsampled + 60 + iblock * ENH_BLOCKL_HALF;
target=downsampled+i; regressor = target - 10;
regressor=downsampled+i-10;
/* scaling */ /* scaling */
max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50], max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50],

View File

@ -165,6 +165,10 @@ int main(int argc, char* argv[])
fprintf(stderr, "--- Encoding block %i --- ",blockcount); fprintf(stderr, "--- Encoding block %i --- ",blockcount);
len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data); len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data);
if (len < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
fprintf(stderr, "\r"); fprintf(stderr, "\r");
/* write byte file */ /* write byte file */
@ -202,6 +206,10 @@ int main(int argc, char* argv[])
if (pli==1) { if (pli==1) {
len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
(int16_t)len, decoded_data,&speechType); (int16_t)len, decoded_data,&speechType);
if (len < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
} else { } else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1); len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
} }

View File

@ -139,6 +139,10 @@ int main(int argc, char* argv[])
#else #else
len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)(mode<<3), encoded_data); len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)(mode<<3), encoded_data);
#endif #endif
if (len < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
fprintf(stderr, "\r"); fprintf(stderr, "\r");
#ifdef JUNK_DATA #ifdef JUNK_DATA
@ -176,6 +180,10 @@ int main(int argc, char* argv[])
if (pli==1) { if (pli==1) {
len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data, len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data,
&speechType); &speechType);
if (len < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
} else { } else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1); len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
} }

View File

@ -309,7 +309,7 @@ int16_t WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
&((ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0]); &((ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0]);
minCorr = WEBRTC_SPL_WORD32_MAX; minCorr = WEBRTC_SPL_WORD32_MAX;
if ( (FRAMESAMPLES_HALF - 2*lag0 - 10) > 0 ) if ((FRAMESAMPLES_HALF - 10) > 2 * lag0)
{ {
minIdx = 11; minIdx = 11;
for( i = 0; i < 21; i++ ) for( i = 0; i < 21; i++ )

View File

@ -279,13 +279,15 @@ void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
ARfQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0 ARfQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
} }
for (i=orderCoef-1;i>=0;i--) //get the state of f&g for the first input, for all orders for (i=orderCoef;i>0;i--) //get the state of f&g for the first input, for all orders
{ {
tmp32 = (cthQ15[i] * ARfQ0vec[0] - sthQ15[i] * stateGQ0[i] + 16384) >> 15; tmp32 = (cthQ15[i - 1] * ARfQ0vec[0] - sthQ15[i - 1] * stateGQ0[i - 1] +
16384) >> 15;
tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0 tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
tmp32 = (sthQ15[i] * ARfQ0vec[0] + cthQ15[i] * stateGQ0[i] + 16384) >> 15; tmp32 = (sthQ15[i - 1] * ARfQ0vec[0] + cthQ15[i - 1] * stateGQ0[i - 1] +
ARgQ0vec[i+1] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0 16384) >> 15;
ARgQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
ARfQ0vec[0] = tmpAR; ARfQ0vec[0] = tmpAR;
} }
ARgQ0vec[0] = ARfQ0vec[0]; ARgQ0vec[0] = ARfQ0vec[0];

View File

@ -46,21 +46,21 @@ HALF_SUBFRAME_LOOP: @ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++)
add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef] add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef]
add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef] add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef]
ORDER_COEF_LOOP: @ for(k = order_coef - 1 ; k >= 0; k--) ORDER_COEF_LOOP: @ for(k = order_coef ; k > 0; k--)
ldrh r7, [r3, #-2]! @ sth_Q15[k] ldrh r7, [r3, #-2]! @ sth_Q15[k - 1]
ldrh r6, [r2, #-2]! @ cth_Q15[k] ldrh r6, [r2, #-2]! @ cth_Q15[k - 1]
ldrh r8, [r0, #-2] @ ar_g_Q0[k] ldrh r8, [r0, #-2] @ ar_g_Q0[k - 1]
smlabb r11, r7, r5, r12 @ sth_Q15[k] * tmpAR + 16384 smlabb r11, r7, r5, r12 @ sth_Q15[k - 1] * tmpAR + 16384
smlabb r10, r6, r5, r12 @ cth_Q15[k] * tmpAR + 16384 smlabb r10, r6, r5, r12 @ cth_Q15[k - 1] * tmpAR + 16384
smulbb r7, r7, r8 @ sth_Q15[k] * ar_g_Q0[k] smulbb r7, r7, r8 @ sth_Q15[k - 1] * ar_g_Q0[k - 1]
smlabb r11, r6, r8, r11 @ cth_Q15[k]*ar_g_Q0[k]+(sth_Q15[k]*tmpAR+16384) smlabb r11, r6, r8, r11 @ cth_Q15[k - 1]*ar_g_Q0[k - 1]+(sth_Q15[k - 1]*tmpAR+16384)
sub r10, r10, r7 @ cth_Q15[k]*tmpAR+16384-(sth_Q15[k]*ar_g_Q0[k]) sub r10, r10, r7 @ cth_Q15[k - 1]*tmpAR+16384-(sth_Q15[k - 1]*ar_g_Q0[k - 1])
ssat r11, #16, r11, asr #15 ssat r11, #16, r11, asr #15
ssat r5, #16, r10, asr #15 ssat r5, #16, r10, asr #15
strh r11, [r0], #-2 @ Output: ar_g_Q0[k+1] strh r11, [r0], #-2 @ Output: ar_g_Q0[k]
subs r9, #1 subs r9, #1
bgt ORDER_COEF_LOOP bgt ORDER_COEF_LOOP

View File

@ -35,11 +35,13 @@ void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
int32_t tmp32_2 = 0; int32_t tmp32_2 = 0;
tmpAR = ar_f_Q0[n + 1]; tmpAR = ar_f_Q0[n + 1];
for (k = order_coef - 1; k >= 0; k--) { for (k = order_coef; k > 0; k--) {
tmp32 = (cth_Q15[k] * tmpAR - sth_Q15[k] * ar_g_Q0[k] + 16384) >> 15; tmp32 = (cth_Q15[k - 1] * tmpAR - sth_Q15[k - 1] * ar_g_Q0[k - 1] +
tmp32_2 = (sth_Q15[k] * tmpAR + cth_Q15[k] * ar_g_Q0[k] + 16384) >> 15; 16384) >> 15;
tmp32_2 = (sth_Q15[k - 1] * tmpAR + cth_Q15[k - 1] * ar_g_Q0[k - 1] +
16384) >> 15;
tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32);
ar_g_Q0[k + 1] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32_2); ar_g_Q0[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32_2);
} }
ar_f_Q0[n + 1] = tmpAR; ar_f_Q0[n + 1] = tmpAR;
ar_g_Q0[0] = tmpAR; ar_g_Q0[0] = tmpAR;

View File

@ -69,7 +69,6 @@ void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
int16_t oldLagQ7; int16_t oldLagQ7;
int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12; int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
int indW32 = 0, frcQQ = 0; int indW32 = 0, frcQQ = 0;
int32_t tmpW32;
const int16_t* fracoeffQQ = NULL; const int16_t* fracoeffQQ = NULL;
// Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM(). // Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
@ -123,8 +122,7 @@ void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
curGainQ12 += gaindeltaQ12; curGainQ12 += gaindeltaQ12;
curLagQ7 += lagdeltaQ7; curLagQ7 += lagdeltaQ7;
indW32 = CalcLrIntQ(curLagQ7, 7); indW32 = CalcLrIntQ(curLagQ7, 7);
tmpW32 = (indW32 << 7) - curLagQ7; frcQQ = ((indW32 << 7) + 64 - curLagQ7) >> 4;
frcQQ = (tmpW32 >> 4) + 4;
if (frcQQ == PITCH_FRACS) { if (frcQQ == PITCH_FRACS) {
frcQQ = 0; frcQQ = 0;
@ -195,8 +193,7 @@ void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
// Update parameters for each segment. // Update parameters for each segment.
curLagQ7 += lagdeltaQ7; curLagQ7 += lagdeltaQ7;
indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7); indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
tmpW16 = (indW16 << 7) - curLagQ7; frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
frcQQ = (tmpW16 >> 4) + 4;
if (frcQQ == PITCH_FRACS) { if (frcQQ == PITCH_FRACS) {
frcQQ = 0; frcQQ = 0;

View File

@ -65,18 +65,21 @@ float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
// ISAC takes 10 ms everycall // ISAC takes 10 ms everycall
const int subblocks = block_duration_ms_ / 10; const int subblocks = block_duration_ms_ / 10;
const int subblock_length = 10 * input_sampling_khz_; const int subblock_length = 10 * input_sampling_khz_;
int value; int value = 0;
clock_t clocks = clock(); clock_t clocks = clock();
size_t pointer = 0; size_t pointer = 0;
for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) { for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) {
value = WebRtcIsacfix_Encode(ISACFIX_main_inst_, &in_data[pointer], value = WebRtcIsacfix_Encode(ISACFIX_main_inst_, &in_data[pointer],
bit_stream); bit_stream);
if (idx == subblocks - 1)
EXPECT_GT(value, 0);
else
EXPECT_EQ(0, value);
} }
clocks = clock() - clocks; clocks = clock() - clocks;
EXPECT_GT(value, 0);
assert(value <= max_bytes);
*encoded_bytes = value; *encoded_bytes = value;
assert(*encoded_bytes <= max_bytes);
return 1000.0 * clocks / CLOCKS_PER_SEC; return 1000.0 * clocks / CLOCKS_PER_SEC;
} }

View File

@ -232,7 +232,7 @@ int main(int argc, char* argv[])
CodingMode = 0; CodingMode = 0;
testNum = 0; testNum = 0;
testCE = 0; testCE = 0;
for (i = 1; i < argc-2;i++) { for (i = 1; i + 2 < argc; i++) {
/* Instantaneous mode */ /* Instantaneous mode */
if (!strcmp ("-I", argv[i])) { if (!strcmp ("-I", argv[i])) {
printf("\nInstantaneous BottleNeck\n"); printf("\nInstantaneous BottleNeck\n");

View File

@ -185,7 +185,7 @@ int main(int argc, char* argv[]) {
char transCodingFileName[500]; char transCodingFileName[500];
int16_t totFileLoop = 0; int16_t totFileLoop = 0;
int16_t numFileLoop = 0; int16_t numFileLoop = 0;
for (i = 1; i < argc - 2; i++) { for (i = 1; i + 2 < argc; i++) {
if (!strcmp("-LOOP", argv[i])) { if (!strcmp("-LOOP", argv[i])) {
i++; i++;
totFileLoop = (int16_t)atol(argv[i]); totFileLoop = (int16_t)atol(argv[i]);

View File

@ -106,6 +106,7 @@ int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
input_audio, input_audio,
input_samples, kMaxBytes, input_samples, kMaxBytes,
bitstream_); bitstream_);
EXPECT_GE(encoded_bytes_, 0);
return WebRtcOpus_Decode(decoder, bitstream_, return WebRtcOpus_Decode(decoder, bitstream_,
encoded_bytes_, output_audio, encoded_bytes_, output_audio,
audio_type); audio_type);
@ -539,6 +540,7 @@ TEST_P(OpusTest, DISABLED_ON_IOS(OpusDurationEstimation)) {
speech_data_.GetNextBlock(), speech_data_.GetNextBlock(),
kOpus10msFrameSamples, kMaxBytes, kOpus10msFrameSamples, kMaxBytes,
bitstream_); bitstream_);
EXPECT_GE(encoded_bytes_, 0);
EXPECT_EQ(kOpus10msFrameSamples, EXPECT_EQ(kOpus10msFrameSamples,
WebRtcOpus_DurationEst(opus_decoder_, bitstream_, WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
encoded_bytes_)); encoded_bytes_));
@ -548,6 +550,7 @@ TEST_P(OpusTest, DISABLED_ON_IOS(OpusDurationEstimation)) {
speech_data_.GetNextBlock(), speech_data_.GetNextBlock(),
kOpus20msFrameSamples, kMaxBytes, kOpus20msFrameSamples, kMaxBytes,
bitstream_); bitstream_);
EXPECT_GE(encoded_bytes_, 0);
EXPECT_EQ(kOpus20msFrameSamples, EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DurationEst(opus_decoder_, bitstream_, WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
encoded_bytes_)); encoded_bytes_));

View File

@ -89,9 +89,11 @@ int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
if (length_out_buff < frame.samples_per_channel_) { if (length_out_buff < frame.samples_per_channel_) {
return -1; return -1;
} }
for (int n = frame.samples_per_channel_ - 1; n >= 0; --n) { for (int n = frame.samples_per_channel_; n > 0; --n) {
out_buff[2 * n + 1] = frame.data_[n]; int i = n - 1;
out_buff[2 * n] = frame.data_[n]; int16_t sample = frame.data_[i];
out_buff[2 * i + 1] = sample;
out_buff[2 * i] = sample;
} }
return 0; return 0;
} }

View File

@ -404,7 +404,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Find the maximizing index |i| of the cost function // Find the maximizing index |i| of the cost function
// f[i] = best_correlation[i] / best_distortion[i]. // f[i] = best_correlation[i] / best_distortion[i].
int32_t best_ratio = std::numeric_limits<int32_t>::min(); int32_t best_ratio = std::numeric_limits<int32_t>::min();
int best_index = -1; int best_index = std::numeric_limits<int>::max();
for (int i = 0; i < kNumCorrelationCandidates; ++i) { for (int i = 0; i < kNumCorrelationCandidates; ++i) {
int32_t ratio; int32_t ratio;
if (best_distortion[i] > 0) { if (best_distortion[i] > 0) {
@ -549,9 +549,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
} }
// Set the 3 lag values. // Set the 3 lag values.
int lag_difference = distortion_lag - correlation_lag; if (distortion_lag == correlation_lag) {
if (lag_difference == 0) {
// |distortion_lag| and |correlation_lag| are equal.
expand_lags_[0] = distortion_lag; expand_lags_[0] = distortion_lag;
expand_lags_[1] = distortion_lag; expand_lags_[1] = distortion_lag;
expand_lags_[2] = distortion_lag; expand_lags_[2] = distortion_lag;
@ -563,7 +561,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Second lag is the average of the two. // Second lag is the average of the two.
expand_lags_[1] = (distortion_lag + correlation_lag) / 2; expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
// Third lag is the average again, but rounding towards |correlation_lag|. // Third lag is the average again, but rounding towards |correlation_lag|.
if (lag_difference > 0) { if (distortion_lag > correlation_lag) {
expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2; expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
} else { } else {
expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2; expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2;
@ -691,9 +689,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
temp_sum += kCoefficients[1] * x1; temp_sum += kCoefficients[1] * x1;
temp_sum += kCoefficients[2] * x2; temp_sum += kCoefficients[2] * x2;
temp_sum += kCoefficients[3] * x3; temp_sum += kCoefficients[3] * x3;
parameters.voice_mix_factor = temp_sum / 4096; parameters.voice_mix_factor =
parameters.voice_mix_factor = std::min(parameters.voice_mix_factor, static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
static_cast<int16_t>(16384));
parameters.voice_mix_factor = std::max(parameters.voice_mix_factor, parameters.voice_mix_factor = std::max(parameters.voice_mix_factor,
static_cast<int16_t>(0)); static_cast<int16_t>(0));
} else { } else {

View File

@ -175,7 +175,7 @@ int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
// This is the truncated length. // This is the truncated length.
} }
// This assert should always be true thanks to the if statement above. // This assert should always be true thanks to the if statement above.
assert(210 * kMaxSampleRate / 8000 - *old_length >= 0); assert(210 * kMaxSampleRate / 8000 >= *old_length);
AudioMultiVector expanded_temp(num_channels_); AudioMultiVector expanded_temp(num_channels_);
expand_->Process(&expanded_temp); expand_->Process(&expanded_temp);
@ -342,7 +342,7 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
int start_index = timestamps_per_call_ + int start_index = timestamps_per_call_ +
static_cast<int>(expand_->overlap_length()); static_cast<int>(expand_->overlap_length());
start_index = std::max(start_position, start_index); start_index = std::max(start_position, start_index);
start_index = std::max(start_index - input_length, 0); start_index = (input_length > start_index) ? 0 : (start_index - input_length);
// Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.) // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
int start_index_downsamp = start_index / (fs_mult_ * 2); int start_index_downsamp = start_index / (fs_mult_ * 2);

View File

@ -1520,10 +1520,10 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
borrowed_samples_per_channel = static_cast<int>(required_samples - borrowed_samples_per_channel = static_cast<int>(required_samples -
decoded_length_per_channel); decoded_length_per_channel);
// Calculate how many of these were already played out. // Calculate how many of these were already played out.
old_borrowed_samples_per_channel = static_cast<int>( const int future_length = static_cast<int>(sync_buffer_->FutureLength());
borrowed_samples_per_channel - sync_buffer_->FutureLength()); old_borrowed_samples_per_channel =
old_borrowed_samples_per_channel = std::max( (borrowed_samples_per_channel > future_length) ?
0, old_borrowed_samples_per_channel); (borrowed_samples_per_channel - future_length) : 0;
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer, decoded_buffer,
sizeof(int16_t) * decoded_length); sizeof(int16_t) * decoded_length);

View File

@ -83,8 +83,10 @@ int Normal::Process(const int16_t* input,
scaling = std::max(scaling, 0); // |scaling| should always be >= 0. scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
int32_t energy = WebRtcSpl_DotProductWithScale(signal, signal, int32_t energy = WebRtcSpl_DotProductWithScale(signal, signal,
energy_length, scaling); energy_length, scaling);
if ((energy_length >> scaling) > 0) { int32_t scaled_energy_length =
energy = energy / (energy_length >> scaling); static_cast<int32_t>(energy_length >> scaling);
if (scaled_energy_length > 0) {
energy = energy / scaled_energy_length;
} else { } else {
energy = 0; energy = 0;
} }

View File

@ -450,7 +450,10 @@ int main(int argc, char* argv[]) {
CHECK_NOT_NULL(out_file); CHECK_NOT_NULL(out_file);
printf("Output file: %s\n\n", argv[2]); printf("Output file: %s\n\n", argv[2]);
packet_size = atoi(argv[3]); packet_size = atoi(argv[3]);
CHECK_NOT_NULL(packet_size); if (packet_size <= 0) {
printf("Packet size %d must be positive", packet_size);
return -1;
}
printf("Packet size: %i\n", packet_size); printf("Packet size: %i\n", packet_size);
// check for stereo // check for stereo

View File

@ -208,7 +208,7 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
uint16x8_t tmp16x8_4; uint16x8_t tmp16x8_4;
int32x4_t tmp32x4; int32x4_t tmp32x4;
for (i = 0; i < inst->magnLen - 7; i += 8) { for (i = 0; i + 7 < inst->magnLen; i += 8) {
// Compute delta. // Compute delta.
// Smaller step size during startup. This prevents from using // Smaller step size during startup. This prevents from using
// unrealistic values causing overflow. // unrealistic values causing overflow.

View File

@ -442,8 +442,9 @@ inline void AudioFrame::UpdateFrame(int id, uint32_t timestamp,
num_channels_ = num_channels; num_channels_ = num_channels;
energy_ = energy; energy_ = energy;
assert(num_channels >= 0);
const int length = samples_per_channel * num_channels; const int length = samples_per_channel * num_channels;
assert(length <= kMaxDataSizeSamples && length >= 0); assert(length <= kMaxDataSizeSamples);
if (data != NULL) { if (data != NULL) {
memcpy(data_, data, sizeof(int16_t) * length); memcpy(data_, data, sizeof(int16_t) * length);
} else { } else {
@ -466,8 +467,9 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) {
energy_ = src.energy_; energy_ = src.energy_;
interleaved_ = src.interleaved_; interleaved_ = src.interleaved_;
assert(num_channels_ >= 0);
const int length = samples_per_channel_ * num_channels_; const int length = samples_per_channel_ * num_channels_;
assert(length <= kMaxDataSizeSamples && length >= 0); assert(length <= kMaxDataSizeSamples);
memcpy(data_, src.data_, sizeof(int16_t) * length); memcpy(data_, src.data_, sizeof(int16_t) * length);
} }