Adding Opus unit test

This CL adds a unit test for Opus, as well as new APIs for true stereo decoding (skipping master/slave approach).

BUG=

Review URL: https://webrtc-codereview.appspot.com/1222006

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3860 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
tina.legrand@webrtc.org 2013-04-17 10:39:41 +00:00
parent 4392d5f9f8
commit db11fab49e
4 changed files with 488 additions and 81 deletions

View File

@ -80,6 +80,7 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst);
@ -103,10 +104,13 @@ int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst);
* Return value : >0 - Samples in decoded vector
* -1 - Error
*/
int16_t WebRtcOpus_Decode(OpusDecInst* inst, int16_t* encoded,
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, int16_t* encoded,
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************

View File

@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include <stdlib.h>
#include <string.h>
#include "opus.h"
#include "common_audio/signal_processing/resample_by_2_internal.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
enum {
/* Maximum supported frame size in WebRTC is 60 ms. */
@ -31,6 +31,9 @@ enum {
/* Sample count is 48 kHz * samples per frame * stereo. */
kWebRtcOpusMaxFrameSize = 48 * kWebRtcOpusMaxDecodeFrameSizeMs * 2,
/* Number of samples in resampler state. */
kWebRtcOpusStateSize = 7,
};
struct WebRtcOpusEncInst {
@ -39,27 +42,34 @@ struct WebRtcOpusEncInst {
int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst, int32_t channels) {
OpusEncInst* state;
state = (OpusEncInst*) calloc(1, sizeof(OpusEncInst));
if (state) {
int error;
// Default to VoIP application for mono, and AUDIO for stereo.
int application = (channels == 1) ?
OPUS_APPLICATION_VOIP : OPUS_APPLICATION_AUDIO;
if (inst != NULL) {
state = (OpusEncInst*) calloc(1, sizeof(OpusEncInst));
if (state) {
int error;
/* Default to VoIP application for mono, and AUDIO for stereo. */
int application =
(channels == 1) ? OPUS_APPLICATION_VOIP : OPUS_APPLICATION_AUDIO;
state->encoder = opus_encoder_create(48000, channels, application, &error);
if (error == OPUS_OK || state->encoder != NULL ) {
*inst = state;
return 0;
state->encoder = opus_encoder_create(48000, channels, application,
&error);
if (error == OPUS_OK && state->encoder != NULL) {
*inst = state;
return 0;
}
free(state);
}
free(state);
}
return -1;
}
int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
opus_encoder_destroy(inst->encoder);
free(inst);
return 0;
if (inst) {
opus_encoder_destroy(inst->encoder);
free(inst);
return 0;
} else {
return -1;
}
}
int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
@ -82,7 +92,11 @@ int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
}
int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
if (inst) {
return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
} else {
return -1;
}
}
struct WebRtcOpusDecInst {
@ -98,46 +112,61 @@ int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
int error_r;
OpusDecInst* state;
// Create Opus decoder memory.
state = (OpusDecInst*) calloc(1, sizeof(OpusDecInst));
if (state == NULL) {
return -1;
}
if (inst != NULL) {
/* Create Opus decoder memory. */
state = (OpusDecInst*) calloc(1, sizeof(OpusDecInst));
if (state == NULL) {
return -1;
}
// Create new memory for left and right channel, always at 48000 Hz.
state->decoder_left = opus_decoder_create(48000, channels, &error_l);
state->decoder_right = opus_decoder_create(48000, channels, &error_r);
if (error_l == OPUS_OK && error_r == OPUS_OK && state->decoder_left != NULL
&& state->decoder_right != NULL) {
// Creation of memory all ok.
state->channels = channels;
*inst = state;
return 0;
}
/* Create new memory for left and right channel, always at 48000 Hz. */
state->decoder_left = opus_decoder_create(48000, channels, &error_l);
state->decoder_right = opus_decoder_create(48000, channels, &error_r);
if (error_l == OPUS_OK && error_r == OPUS_OK && state->decoder_left != NULL
&& state->decoder_right != NULL) {
/* Creation of memory all ok. */
state->channels = channels;
*inst = state;
return 0;
}
// If memory allocation was unsuccessful, free the entire state.
if (state->decoder_left) {
opus_decoder_destroy(state->decoder_left);
/* If memory allocation was unsuccessful, free the entire state. */
if (state->decoder_left) {
opus_decoder_destroy(state->decoder_left);
}
if (state->decoder_right) {
opus_decoder_destroy(state->decoder_right);
}
free(state);
}
if (state->decoder_right) {
opus_decoder_destroy(state->decoder_right);
}
free(state);
state = NULL;
return -1;
}
int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
opus_decoder_destroy(inst->decoder_left);
opus_decoder_destroy(inst->decoder_right);
free(inst);
return 0;
if (inst) {
opus_decoder_destroy(inst->decoder_left);
opus_decoder_destroy(inst->decoder_right);
free(inst);
return 0;
} else {
return -1;
}
}
int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
return inst->channels;
}
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
memset(inst->state_48_32_left, 0, sizeof(inst->state_48_32_left));
memset(inst->state_48_32_right, 0, sizeof(inst->state_48_32_right));
return 0;
}
return -1;
}
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
@ -156,7 +185,7 @@ int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst) {
return -1;
}
static int DecodeNative(OpusDecoder* inst, int16_t* encoded,
static int DecodeNative(OpusDecoder* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
unsigned char* coded = (unsigned char*) encoded;
@ -173,16 +202,113 @@ static int DecodeNative(OpusDecoder* inst, int16_t* encoded,
return -1;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, int16_t* encoded,
/* Resample from 48 to 32 kHz. Length of state is assumed to be
* kWebRtcOpusStateSize (7).
*/
static int WebRtcOpus_Resample48to32(const int16_t* samples_in, int length,
int16_t* state, int16_t* samples_out) {
int i;
int blocks;
int16_t output_samples;
int32_t buffer32[kWebRtcOpusMaxFrameSize + kWebRtcOpusStateSize];
/* Resample from 48 kHz to 32 kHz. */
for (i = 0; i < kWebRtcOpusStateSize; i++) {
buffer32[i] = state[i];
state[i] = samples_in[length - kWebRtcOpusStateSize + i];
}
for (i = 0; i < length; i++) {
buffer32[kWebRtcOpusStateSize + i] = samples_in[i];
}
/* Resampling 3 samples to 2. Function divides the input in |blocks| number
* of 3-sample groups, and output is |blocks| number of 2-sample groups.
* When this is removed, the compensation in WebRtcOpus_DurationEst should be
* removed too. */
blocks = length / 3;
WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
output_samples = (int16_t) (blocks * 2);
WebRtcSpl_VectorBitShiftW32ToW16(samples_out, output_samples, buffer32, 15);
return output_samples;
}
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
/* Enough for 120 ms (the largest Opus packet size) of mono audio at 48 kHz
* and resampler overlap. This will need to be enlarged for stereo decoding.
*/
int16_t buffer16_left[kWebRtcOpusMaxFrameSize];
int16_t buffer16_right[kWebRtcOpusMaxFrameSize];
int16_t buffer_out[kWebRtcOpusMaxFrameSize];
int16_t* coded = (int16_t*) encoded;
int decoded_samples;
int resampled_samples;
int i;
/* If mono case, just do a regular call to the decoder.
* If stereo, we need to de-interleave the stereo output in to blocks with
* left and right channel. Each block is resampled to 32 kHz, and then
* interleaved again. */
/* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_left, coded, encoded_bytes,
buffer16_left, audio_type);
if (decoded_samples < 0) {
return -1;
}
/* De-interleave if stereo. */
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
* case of stereo. Number of samples in |buffer16| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. */
buffer16_left[i] = buffer16_left[i * 2];
buffer16_right[i] = buffer16_left[i * 2 + 1];
}
/* Resample from 48 kHz to 32 kHz for left channel. */
resampled_samples = WebRtcOpus_Resample48to32(buffer16_left,
decoded_samples,
inst->state_48_32_left,
buffer_out);
/* Add samples interleaved to output vector. */
for (i = 0; i < resampled_samples; i++) {
decoded[i * 2] = buffer_out[i];
}
/* Resample from 48 kHz to 32 kHz for right channel. */
resampled_samples = WebRtcOpus_Resample48to32(buffer16_right,
decoded_samples,
inst->state_48_32_right,
buffer_out);
/* Add samples interleaved to output vector. */
for (i = 0; i < decoded_samples; i++) {
decoded[i * 2 + 1] = buffer_out[i];
}
} else {
/* Resample from 48 kHz to 32 kHz for left channel. */
resampled_samples = WebRtcOpus_Resample48to32(buffer16_left,
decoded_samples,
inst->state_48_32_left,
decoded);
}
return resampled_samples;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
/* Enough for 120 ms (the largest Opus packet size) of mono audio at 48 kHz
* and resampler overlap. This will need to be enlarged for stereo decoding.
*/
int16_t buffer16[kWebRtcOpusMaxFrameSize];
int32_t buffer32[kWebRtcOpusMaxFrameSize + 7];
int decoded_samples;
int blocks;
int16_t output_samples;
int i;
@ -208,36 +334,22 @@ int16_t WebRtcOpus_Decode(OpusDecInst* inst, int16_t* encoded,
buffer16[i] = buffer16[i * 2];
}
}
/* Resample from 48 kHz to 32 kHz. */
for (i = 0; i < 7; i++) {
buffer32[i] = inst->state_48_32_left[i];
inst->state_48_32_left[i] = buffer16[decoded_samples - 7 + i];
}
for (i = 0; i < decoded_samples; i++) {
buffer32[7 + i] = buffer16[i];
}
/* Resampling 3 samples to 2. Function divides the input in |blocks| number
* of 3-sample groups, and output is |blocks| number of 2-sample groups.
* When this is removed, the compensation in WebRtcOpus_DurationEst should be
* removed too. */
blocks = decoded_samples / 3;
WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
output_samples = (int16_t) (blocks * 2);
WebRtcSpl_VectorBitShiftW32ToW16(decoded, output_samples, buffer32, 15);
output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
inst->state_48_32_left, decoded);
return output_samples;
}
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, int16_t* encoded,
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
/* Enough for 120 ms (the largest Opus packet size) of mono audio at 48 kHz
* and resampler overlap. This will need to be enlarged for stereo decoding.
*/
int16_t buffer16[kWebRtcOpusMaxFrameSize];
int32_t buffer32[kWebRtcOpusMaxFrameSize + 7];
int decoded_samples;
int blocks;
int16_t output_samples;
int i;
@ -261,19 +373,8 @@ int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, int16_t* encoded,
return -1;
}
/* Resample from 48 kHz to 32 kHz. */
for (i = 0; i < 7; i++) {
buffer32[i] = inst->state_48_32_right[i];
inst->state_48_32_right[i] = buffer16[decoded_samples - 7 + i];
}
for (i = 0; i < decoded_samples; i++) {
buffer32[7 + i] = buffer16[i];
}
/* Resampling 3 samples to 2. Function divides the input in |blocks| number
* of 3-sample groups, and output is |blocks| number of 2-sample groups. */
blocks = decoded_samples / 3;
WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
output_samples = (int16_t) (blocks * 2);
WebRtcSpl_VectorBitShiftW32ToW16(decoded, output_samples, buffer32, 15);
output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
inst->state_48_32_right, decoded);
return output_samples;
}

View File

@ -0,0 +1,301 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/test/testsupport/fileutils.h"
struct WebRtcOpusEncInst;
struct WebRtcOpusDecInst;
namespace webrtc {
// Number of samples in a 60 ms stereo frame, sampled at 48 kHz.
enum { kOpusNumberOfSamples = 480 * 6 * 2 };
// Maximum number of bytes in output bitstream.
enum { kMaxBytes = 1000 };
class OpusTest : public ::testing::Test {
protected:
OpusTest();
virtual void SetUp();
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
WebRtcOpusDecInst* opus_mono_decoder_;
WebRtcOpusDecInst* opus_mono_decoder_new_;
WebRtcOpusDecInst* opus_stereo_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_new_;
int16_t speech_data_[kOpusNumberOfSamples];
int16_t output_data_[kOpusNumberOfSamples];
uint8_t bitstream_[kMaxBytes];
};
OpusTest::OpusTest()
: opus_mono_encoder_(NULL),
opus_stereo_encoder_(NULL),
opus_mono_decoder_(NULL),
opus_mono_decoder_new_(NULL),
opus_stereo_decoder_(NULL),
opus_stereo_decoder_new_(NULL) {
}
void OpusTest::SetUp() {
// Read some samples from a speech file, to be used in the encode test.
// In this test we do not care that the sampling frequency of the file is
// really 32000 Hz. We pretend that it is 48000 Hz.
FILE* input_file;
const std::string file_name =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
input_file = fopen(file_name.c_str(), "rb");
ASSERT_TRUE(input_file != NULL);
ASSERT_EQ(kOpusNumberOfSamples,
static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
kOpusNumberOfSamples, input_file)));
fclose(input_file);
input_file = NULL;
}
// Test failing Create.
TEST_F(OpusTest, OpusCreateFail) {
// Test to see that an invalid pointer is caught.
EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(NULL, 1));
EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 3));
EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(NULL, 1));
EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 3));
}
// Test failing Free.
TEST_F(OpusTest, OpusFreeFail) {
// Test to see that an invalid pointer is caught.
EXPECT_EQ(-1, WebRtcOpus_EncoderFree(NULL));
EXPECT_EQ(-1, WebRtcOpus_DecoderFree(NULL));
}
// Test normal Create and Free.
TEST_F(OpusTest, OpusCreateFree) {
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_TRUE(opus_mono_encoder_ != NULL);
EXPECT_TRUE(opus_mono_decoder_ != NULL);
EXPECT_TRUE(opus_stereo_encoder_ != NULL);
EXPECT_TRUE(opus_stereo_decoder_ != NULL);
// Free encoder and decoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
}
TEST_F(OpusTest, OpusEncodeDecodeMono) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_new_, 1));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 32000));
// Check number of channels for decoder.
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_));
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusNumberOfSamples];
int16_t output_data_decode[kOpusNumberOfSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_, 960,
kMaxBytes, bitstream_);
EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_Decode(opus_mono_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode|.
for (int i = 0; i < 640; i++) {
EXPECT_EQ(output_data_decode_new[i], output_data_decode[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_new_));
}
TEST_F(OpusTest, OpusEncodeDecodeStereo) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 64000));
// Check number of channels for decoder.
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_));
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusNumberOfSamples];
int16_t output_data_decode[kOpusNumberOfSamples];
int16_t output_data_decode_slave[kOpusNumberOfSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
kMaxBytes, bitstream_);
EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < 640; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
TEST_F(OpusTest, OpusSetBitRate) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_mono_encoder_, 60000));
EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 60000));
// Create encoder memory, try with different bitrates.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 30000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 60000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 300000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 600000));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
// Encode and decode one frame (stereo), initialize the decoder and
// decode once more.
TEST_F(OpusTest, OpusDecodeInit) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusNumberOfSamples];
int16_t output_data_decode[kOpusNumberOfSamples];
int16_t output_data_decode_slave[kOpusNumberOfSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
kMaxBytes, bitstream_);
EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < 640; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
EXPECT_EQ(0, WebRtcOpus_DecoderInitNew(opus_stereo_decoder_new_));
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderInitSlave(opus_stereo_decoder_));
EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < 640; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
// PLC not implemented.
TEST_F(OpusTest, OpusDecodePlc) {
int16_t plc_buffer[kOpusNumberOfSamples];
EXPECT_EQ(-1, WebRtcOpus_DecodePlc(opus_stereo_decoder_, plc_buffer, 1));
}
// Duration estimation.
TEST_F(OpusTest, OpusDurationEstimation) {
// Create.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
// Encode with different packet sizes (input 48 kHz, output in 32 kHz).
int16_t encoded_bytes;
// 10 ms.
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 480,
kMaxBytes, bitstream_);
EXPECT_EQ(320, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
encoded_bytes));
// 20 ms
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
kMaxBytes, bitstream_);
EXPECT_EQ(640, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
encoded_bytes));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
}
} // namespace webrtc

View File

@ -190,6 +190,7 @@
'../../codecs/isac/fix/source/filterbanks_unittest.cc',
'../../codecs/isac/fix/source/lpc_masking_model_unittest.cc',
'../../codecs/isac/fix/source/transform_unittest.cc',
'../../codecs/opus/opus_unittest.cc',
# Test for NetEq 4.
'../../neteq4/audio_multi_vector_unittest.cc',
'../../neteq4/audio_vector_unittest.cc',