Relanding r7807.

r7807 was reverted to be excluded from the cause of a failure.

It has been verified and can reland now.

BUG=

TBR=kjellander@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/32649004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7810 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
minyue@webrtc.org 2014-12-04 12:14:12 +00:00
parent 52bc4f4797
commit 33ccdfa1f5
8 changed files with 34 additions and 335 deletions

View File

@ -166,9 +166,7 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_Decode(...)
@ -190,21 +188,12 @@ int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst);
* Return value : >0 - Samples per channel in decoded vector
* -1 - Error
*/
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
* WebRtcOpus_DecodePlc(...)
* TODO(tlegrand): Remove master and slave functions when NetEq4 is in place.
* WebRtcOpus_DecodePlcMaster(...)
* WebRtcOpus_DecodePlcSlave(...)
*
* This function processes PLC for opus frame(s).
* Input:
@ -219,10 +208,6 @@ int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const uint8_t* encoded,
*/
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
/****************************************************************************
* WebRtcOpus_DecodeFec(...)

View File

@ -156,10 +156,8 @@ void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
if (!lost_current) {
// Decode current frame.
value_2 = WebRtcOpus_DecodeNew(opus_decoder_, &bit_stream_[0],
encoded_bytes_,
&out_data_[value_1 * channels_],
&audio_type);
value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
&out_data_[value_1 * channels_], &audio_type);
EXPECT_EQ(block_length_sample_, value_2);
}
}

View File

@ -18,8 +18,7 @@ struct WebRtcOpusEncInst {
};
struct WebRtcOpusDecInst {
OpusDecoder* decoder_left;
OpusDecoder* decoder_right;
OpusDecoder* decoder;
int prev_decoded_samples;
int channels;
};

View File

@ -149,8 +149,7 @@ int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
}
int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
int error_l;
int error_r;
int error;
OpusDecInst* state;
if (inst != NULL) {
@ -160,11 +159,9 @@ int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
return -1;
}
/* Create new memory for left and right channel, always at 48000 Hz. */
state->decoder_left = opus_decoder_create(48000, channels, &error_l);
state->decoder_right = opus_decoder_create(48000, channels, &error_r);
if (error_l == OPUS_OK && error_r == OPUS_OK && state->decoder_left != NULL
&& state->decoder_right != NULL) {
/* Create new memory, always at 48000 Hz. */
state->decoder = opus_decoder_create(48000, channels, &error);
if (error == OPUS_OK && state->decoder != NULL) {
/* Creation of memory all ok. */
state->channels = channels;
state->prev_decoded_samples = kWebRtcOpusDefaultFrameSize;
@ -173,11 +170,8 @@ int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
}
/* If memory allocation was unsuccessful, free the entire state. */
if (state->decoder_left) {
opus_decoder_destroy(state->decoder_left);
}
if (state->decoder_right) {
opus_decoder_destroy(state->decoder_right);
if (state->decoder) {
opus_decoder_destroy(state->decoder);
}
free(state);
}
@ -186,8 +180,7 @@ int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
if (inst) {
opus_decoder_destroy(inst->decoder_left);
opus_decoder_destroy(inst->decoder_right);
opus_decoder_destroy(inst->decoder);
free(inst);
return 0;
} else {
@ -199,24 +192,8 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
return inst->channels;
}
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
return -1;
}
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
return -1;
}
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_right, OPUS_RESET_STATE);
int error = opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
@ -256,10 +233,10 @@ static int DecodeFec(OpusDecoder* inst, const uint8_t* encoded,
return -1;
}
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples = DecodeNative(inst->decoder_left,
int decoded_samples = DecodeNative(inst->decoder,
encoded,
encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel,
@ -275,70 +252,6 @@ int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
return decoded_samples;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int i;
/* If mono case, just do a regular call to the decoder.
* If stereo, call to WebRtcOpus_Decode() gives left channel as output, and
* calls to WebRtcOpus_Decode_slave() give right channel as output.
* This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
decoded_samples = DecodeNative(inst->decoder_left, encoded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
* case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
decoded[i] = decoded[i * 2];
}
}
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
return decoded_samples;
}
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int i;
decoded_samples = DecodeNative(inst->decoder_right, encoded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
* case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
decoded[i] = decoded[i * 2 + 1];
}
} else {
/* Decode slave should never be called for mono packets. */
return -1;
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int16_t audio_type = 0;
@ -351,7 +264,7 @@ int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
decoded_samples = DecodeNative(inst->decoder, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
@ -360,82 +273,6 @@ int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int decoded_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
/* If mono case, just do a regular call to the decoder.
* If stereo, call to WebRtcOpus_DecodePlcMaster() gives left channel as
* output, and calls to WebRtcOpus_DecodePlcSlave() give right channel as
* output. This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of sample pairs, in
* case of stereo. The original number of samples in |decoded| equals
* |decoded_samples| times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
decoded[i] = decoded[i * 2];
}
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int decoded_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
/* Calls to WebRtcOpus_DecodePlcSlave() give right channel as output.
* The function should never be called in the mono case. */
if (inst->channels != 2) {
return -1;
}
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel)
? plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_right, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
/* The parameter |decoded_samples| holds the number of sample pairs,
* The original number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
decoded[i] = decoded[i * 2 + 1];
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
@ -448,7 +285,7 @@ int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
fec_samples = opus_packet_get_samples_per_frame(encoded, 48000);
decoded_samples = DecodeFec(inst->decoder_left, encoded, encoded_bytes,
decoded_samples = DecodeFec(inst->decoder, encoded, encoded_bytes,
fec_samples, decoded, audio_type);
if (decoded_samples < 0) {
return -1;

View File

@ -72,8 +72,8 @@ float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
int value;
int16_t audio_type;
clock_t clocks = clock();
value = WebRtcOpus_DecodeNew(opus_decoder_, bit_stream, encoded_bytes,
out_data, &audio_type);
value = WebRtcOpus_Decode(opus_decoder_, bit_stream, encoded_bytes, out_data,
&audio_type);
clocks = clock() - clocks;
EXPECT_EQ(output_length_sample_, value);
return 1000.0 * clocks / CLOCKS_PER_SEC;

View File

@ -35,9 +35,7 @@ class OpusTest : public ::testing::Test {
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
WebRtcOpusDecInst* opus_mono_decoder_;
WebRtcOpusDecInst* opus_mono_decoder_new_;
WebRtcOpusDecInst* opus_stereo_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_new_;
int16_t speech_data_[kOpusMaxFrameSamples];
int16_t output_data_[kOpusMaxFrameSamples];
@ -48,9 +46,7 @@ OpusTest::OpusTest()
: opus_mono_encoder_(NULL),
opus_stereo_encoder_(NULL),
opus_mono_decoder_(NULL),
opus_mono_decoder_new_(NULL),
opus_stereo_decoder_(NULL),
opus_stereo_decoder_new_(NULL) {
opus_stereo_decoder_(NULL) {
}
void OpusTest::SetUp() {
@ -117,91 +113,56 @@ TEST_F(OpusTest, OpusEncodeDecodeMono) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_new_, 1));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 32000));
// Check number of channels for decoder.
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_));
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_mono_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode|.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i], output_data_decode[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_new_));
}
TEST_F(OpusTest, OpusEncodeDecodeStereo) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 64000));
// Check number of channels for decoder.
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_));
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
TEST_F(OpusTest, OpusSetBitRate) {
@ -249,67 +210,30 @@ TEST_F(OpusTest, OpusDecodeInit) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
EXPECT_EQ(0, WebRtcOpus_DecoderInitNew(opus_stereo_decoder_new_));
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderInitSlave(opus_stereo_decoder_));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
TEST_F(OpusTest, OpusEnableDisableFec) {
@ -382,49 +306,33 @@ TEST_F(OpusTest, OpusDecodePlcMono) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_new_, 1));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 32000));
// Check number of channels for decoder.
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_));
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_mono_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
// Call decoder PLC for both versions of the decoder.
// Call decoder PLC.
int16_t plc_buffer[kOpusMaxFrameSamples];
int16_t plc_buffer_new[kOpusMaxFrameSamples];
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlc(opus_mono_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer| should be the same as in |plc_buffer_new|.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer[i], plc_buffer_new[i]);
}
WebRtcOpus_DecodePlc(opus_mono_decoder_, plc_buffer, 1));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_new_));
}
// PLC in stereo mode.
@ -432,61 +340,33 @@ TEST_F(OpusTest, OpusDecodePlcStereo) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 64000));
// Check number of channels for decoder.
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_));
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, bitstream_,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, bitstream_,
encoded_bytes,
output_data_decode_slave,
&audio_type));
// Call decoder PLC for both versions of the decoder.
int16_t plc_buffer_left[kOpusMaxFrameSamples];
int16_t plc_buffer_right[kOpusMaxFrameSamples];
int16_t plc_buffer_new[kOpusMaxFrameSamples];
// Call decoder PLC.
int16_t plc_buffer[kOpusMaxFrameSamples];
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
plc_buffer_left, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
plc_buffer_right, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer_left| and |plc_buffer_right|should be the same as the
// interleaved samples in |plc_buffer_new|.
for (int i = 0, j = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer_left[i], plc_buffer_new[j++]);
EXPECT_EQ(plc_buffer_right[i], plc_buffer_new[j++]);
}
WebRtcOpus_DecodePlc(opus_stereo_decoder_, plc_buffer, 1));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
// Duration estimation.

View File

@ -85,8 +85,8 @@ void OpusTest::Perform() {
// Create Opus decoders for mono and stereo for stand-alone testing of Opus.
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1), -1);
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2), -1);
ASSERT_GT(WebRtcOpus_DecoderInitNew(opus_mono_decoder_), -1);
ASSERT_GT(WebRtcOpus_DecoderInitNew(opus_stereo_decoder_), -1);
ASSERT_GT(WebRtcOpus_DecoderInit(opus_mono_decoder_), -1);
ASSERT_GT(WebRtcOpus_DecoderInit(opus_stereo_decoder_), -1);
ASSERT_TRUE(acm_receiver_.get() != NULL);
EXPECT_EQ(0, acm_receiver_->InitializeReceiver());
@ -304,7 +304,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
// Run stand-alone Opus decoder, or decode PLC.
if (channels == 1) {
if (!lost_packet) {
decoded_samples += WebRtcOpus_DecodeNew(
decoded_samples += WebRtcOpus_Decode(
opus_mono_decoder_, bitstream, bitstream_len_byte,
&out_audio[decoded_samples * channels], &audio_type);
} else {
@ -313,7 +313,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
}
} else {
if (!lost_packet) {
decoded_samples += WebRtcOpus_DecodeNew(
decoded_samples += WebRtcOpus_Decode(
opus_stereo_decoder_, bitstream, bitstream_len_byte,
&out_audio[decoded_samples * channels], &audio_type);
} else {

View File

@ -404,7 +404,7 @@ AudioDecoderOpus::~AudioDecoderOpus() {
int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcOpus_DecodeNew(dec_state_, encoded,
int16_t ret = WebRtcOpus_Decode(dec_state_, encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
if (ret > 0)
@ -427,7 +427,7 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
}
int AudioDecoderOpus::Init() {
return WebRtcOpus_DecoderInitNew(dec_state_);
return WebRtcOpus_DecoderInit(dec_state_);
}
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,