Make an AudioEncoder subclass for RED

This class only supports the simple case of payload duplication. That
is, one single encoder is used, and the redundant payload is a one-step
delayed payload.

BUG=3926
R=kjellander@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/31199004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7913 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org 2014-12-16 13:41:36 +00:00
parent 88bdec8c3a
commit c1c9291e9b
17 changed files with 624 additions and 6 deletions

View File

@ -101,6 +101,7 @@ source_set("audio_coding") {
":isacfix",
":neteq",
":pcm16b",
":red",
"../../common_audio",
"../../system_wrappers",
]
@ -120,6 +121,15 @@ source_set("audio_decoder_interface") {
public_configs = [ "../..:common_inherited_config" ]
}
source_set("audio_encoder_interface") {
sources = [
"codecs/audio_encoder.cc",
"codecs/audio_encoder.h",
]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
}
config("cng_config") {
include_dirs = [
"../../..",
@ -144,7 +154,35 @@ source_set("cng") {
":cng_config",
]
deps = [ "../../common_audio" ]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
config("red_config") {
include_dirs = [
"codecs/red",
]
}
source_set("red") {
sources = [
"codecs/red/audio_encoder_copy_red.cc",
"codecs/red/audio_encoder_copy_red.h",
]
configs += [ "../..:common_config" ]
public_configs = [
"../..:common_inherited_config",
":red_config",
]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
config("g711_config") {
@ -170,6 +208,8 @@ source_set("g711") {
"../..:common_inherited_config",
":g711_config",
]
deps = [ ":audio_encoder_interface" ]
}
config("g722_config") {
@ -196,6 +236,8 @@ source_set("g722") {
"../..:common_inherited_config",
":g722_config",
]
deps = [ ":audio_encoder_interface" ]
}
config("ilbc_config") {
@ -357,7 +399,10 @@ source_set("ilbc") {
":ilbc_config",
]
deps = [ "../../common_audio" ]
deps = [
"../../common_audio",
":audio_encoder_interface",
]
}
config("isac_config") {
@ -443,6 +488,7 @@ source_set("isac") {
deps = [
":audio_decoder_interface",
":audio_encoder_interface",
"../../common_audio",
]
}
@ -520,6 +566,7 @@ source_set("isacfix") {
]
deps = [
":audio_encoder_interface",
"../../common_audio",
"../../system_wrappers",
]
@ -631,6 +678,7 @@ source_set("pcm16b") {
]
deps = [
":audio_encoder_interface",
":g711",
]
@ -654,13 +702,16 @@ source_set("webrtc_opus") {
"codecs/opus/opus_inst.h",
"codecs/opus/opus_interface.c",
]
deps = [ ":audio_encoder_interface" ]
if (build_with_mozilla) {
include_dirs = [ getenv("DIST") + "/include/opus" ]
} else {
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
deps = [ "//third_party/opus" ]
deps += [ "//third_party/opus" ]
}
}

View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
namespace webrtc {
AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
}
AudioEncoder::EncodedInfo::~EncodedInfo() {
}
} // namespace webrtc

View File

@ -12,6 +12,7 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
#include <algorithm>
#include <vector>
#include "webrtc/base/checks.h"
#include "webrtc/typedefs.h"
@ -19,17 +20,35 @@
namespace webrtc {
// This is the interface class for encoders in AudioCoding module. Each codec
// codec type must have an implementation of this class.
// type must have an implementation of this class.
class AudioEncoder {
public:
struct EncodedInfo {
EncodedInfo() : encoded_bytes(0), encoded_timestamp(0), payload_type(0) {}
struct EncodedInfoLeaf {
EncodedInfoLeaf()
: encoded_bytes(0), encoded_timestamp(0), payload_type(0) {}
size_t encoded_bytes;
uint32_t encoded_timestamp;
int payload_type;
};
// This is the main struct for auxiliary encoding information. Each encoded
// packet should be accompanied by one EncodedInfo struct, containing the
// total number of |encoded_bytes|, the |encoded_timestamp| and the
// |payload_type|. If the packet contains redundant encodings, the |redundant|
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
// vector represents one encoding; the order of structs in the vector is the
// same as the order in which the actual payloads are written to the byte
// stream. When EncoderInfoLeaf structs are present in the vector, the main
// struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the
// vector.
struct EncodedInfo : public EncodedInfoLeaf {
EncodedInfo();
~EncodedInfo();
std::vector<EncodedInfoLeaf> redundant;
};
virtual ~AudioEncoder() {}
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *

View File

@ -13,6 +13,7 @@
'type': 'static_library',
'dependencies': [
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_encoder_interface',
],
'include_dirs': [
'include',

View File

@ -11,6 +11,9 @@
{
'target_name': 'G711',
'type': 'static_library',
'dependencies': [
'audio_encoder_interface',
],
'include_dirs': [
'include',
'<(webrtc_root)',

View File

@ -10,6 +10,9 @@
{
'target_name': 'G722',
'type': 'static_library',
'dependencies': [
'audio_encoder_interface',
],
'include_dirs': [
'include',
'<(webrtc_root)',

View File

@ -13,6 +13,7 @@
'type': 'static_library',
'dependencies': [
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_encoder_interface',
],
'include_dirs': [
'interface',

View File

@ -16,5 +16,14 @@
'audio_decoder.h',
],
},
{
'target_name': 'audio_encoder_interface',
'type': 'static_library',
'sources': [
'audio_encoder.cc',
'audio_encoder.h',
],
},
],
}

View File

@ -14,6 +14,7 @@
'dependencies': [
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_decoder_interface',
'audio_encoder_interface',
],
'include_dirs': [
'../interface',

View File

@ -23,6 +23,9 @@
],
}],
],
'dependencies': [
'audio_encoder_interface',
],
'include_dirs': [
'<(webrtc_root)',
],

View File

@ -12,6 +12,7 @@
'target_name': 'PCM16B',
'type': 'static_library',
'dependencies': [
'audio_encoder_interface',
'G711',
],
'include_dirs': [

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
#include <string.h>
namespace webrtc {
AudioEncoderCopyRed::AudioEncoderCopyRed(const Config& config)
: speech_encoder_(config.speech_encoder),
red_payload_type_(config.payload_type),
secondary_allocated_(0) {
CHECK(speech_encoder_) << "Speech encoder not provided.";
}
AudioEncoderCopyRed::~AudioEncoderCopyRed() {
}
int AudioEncoderCopyRed::sample_rate_hz() const {
return speech_encoder_->sample_rate_hz();
}
int AudioEncoderCopyRed::num_channels() const {
return speech_encoder_->num_channels();
}
int AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
int AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
bool AudioEncoderCopyRed::EncodeInternal(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) {
if (!speech_encoder_->Encode(timestamp, audio,
static_cast<size_t>(sample_rate_hz() / 100),
max_encoded_bytes, encoded, info))
return false;
if (max_encoded_bytes < info->encoded_bytes + secondary_info_.encoded_bytes)
return false;
CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
if (info->encoded_bytes > 0) {
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
// discarding the (empty) vector of redundant information. This is
// intentional.
info->redundant.push_back(*info);
DCHECK_EQ(info->redundant.size(), 1u);
if (secondary_info_.encoded_bytes > 0) {
memcpy(&encoded[info->encoded_bytes], secondary_encoded_.get(),
secondary_info_.encoded_bytes);
info->redundant.push_back(secondary_info_);
DCHECK_EQ(info->redundant.size(), 2u);
}
// Save primary to secondary.
if (secondary_allocated_ < info->encoded_bytes) {
secondary_encoded_.reset(new uint8_t[info->encoded_bytes]);
secondary_allocated_ = info->encoded_bytes;
}
CHECK(secondary_encoded_);
memcpy(secondary_encoded_.get(), encoded, info->encoded_bytes);
secondary_info_ = *info;
}
// Update main EncodedInfo.
info->payload_type = red_payload_type_;
info->encoded_bytes = 0;
for (std::vector<EncodedInfoLeaf>::const_iterator it =
info->redundant.begin();
it != info->redundant.end(); ++it) {
info->encoded_bytes += it->encoded_bytes;
}
return true;
}
} // namespace webrtc

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
#include <vector>
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
// This class implements redundant audio coding. The class object will have an
// underlying AudioEncoder object that performs the actual encodings. The
// current class will gather the two latest encodings from the underlying codec
// into one packet.
class AudioEncoderCopyRed : public AudioEncoder {
public:
struct Config {
public:
int payload_type;
AudioEncoder* speech_encoder;
};
// Caller keeps ownership of the AudioEncoder object.
explicit AudioEncoderCopyRed(const Config& config);
virtual ~AudioEncoderCopyRed();
virtual int sample_rate_hz() const OVERRIDE;
virtual int num_channels() const OVERRIDE;
virtual int Num10MsFramesInNextPacket() const OVERRIDE;
virtual int Max10MsFramesInAPacket() const OVERRIDE;
protected:
virtual bool EncodeInternal(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
EncodedInfo* info) OVERRIDE;
private:
AudioEncoder* speech_encoder_;
int red_payload_type_;
scoped_ptr<uint8_t[]> secondary_encoded_;
size_t secondary_allocated_;
EncodedInfoLeaf secondary_info_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_

View File

@ -0,0 +1,321 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
using ::testing::Return;
using ::testing::_;
using ::testing::SetArgPointee;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::MockFunction;
namespace webrtc {
namespace {
static const size_t kMaxEncodedBytes = 1000;
static const size_t kMaxNumSamples = 48 * 10 * 2; // 10 ms @ 48 kHz stereo.
}
class AudioEncoderCopyRedTest : public ::testing::Test {
protected:
AudioEncoderCopyRedTest()
: timestamp_(4711),
sample_rate_hz_(16000),
num_audio_samples_10ms(sample_rate_hz_ / 100),
red_payload_type_(200) {
AudioEncoderCopyRed::Config config;
config.payload_type = red_payload_type_;
config.speech_encoder = &mock_encoder_;
red_.reset(new AudioEncoderCopyRed(config));
memset(encoded_, 0, sizeof(encoded_));
memset(audio_, 0, sizeof(audio_));
EXPECT_CALL(mock_encoder_, num_channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_encoder_, sample_rate_hz())
.WillRepeatedly(Return(sample_rate_hz_));
}
virtual void TearDown() OVERRIDE {
red_.reset();
// Don't expect the red_ object to delete the AudioEncoder object. But it
// will be deleted with the test fixture. This is why we explicitly delete
// the red_ object above, and set expectations on mock_encoder_ afterwards.
EXPECT_CALL(mock_encoder_, Die()).Times(1);
}
void Encode() {
ASSERT_TRUE(red_.get() != NULL);
encoded_info_ = AudioEncoder::EncodedInfo();
ASSERT_TRUE(red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
kMaxEncodedBytes, encoded_, &encoded_info_));
timestamp_ += num_audio_samples_10ms;
}
MockAudioEncoder mock_encoder_;
scoped_ptr<AudioEncoderCopyRed> red_;
uint32_t timestamp_;
int16_t audio_[kMaxNumSamples];
const int sample_rate_hz_;
size_t num_audio_samples_10ms;
uint8_t encoded_[kMaxEncodedBytes];
AudioEncoder::EncodedInfo encoded_info_;
const int red_payload_type_;
};
class MockEncodeHelper {
public:
MockEncodeHelper() : write_payload_(false), payload_(NULL) {
memset(&info_, 0, sizeof(info_));
}
bool Encode(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
AudioEncoder::EncodedInfo* info) {
if (write_payload_) {
CHECK(encoded);
CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
memcpy(encoded, payload_, info_.encoded_bytes);
}
CHECK(info);
*info = info_;
return true;
}
AudioEncoder::EncodedInfo info_;
bool write_payload_;
uint8_t* payload_;
};
TEST_F(AudioEncoderCopyRedTest, CreateAndDestroy) {
}
TEST_F(AudioEncoderCopyRedTest, CheckSampleRatePropagation) {
EXPECT_CALL(mock_encoder_, sample_rate_hz()).WillOnce(Return(17));
EXPECT_EQ(17, red_->sample_rate_hz());
}
TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
EXPECT_CALL(mock_encoder_, num_channels()).WillOnce(Return(17));
EXPECT_EQ(17, red_->num_channels());
}
TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
EXPECT_EQ(17, red_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17));
EXPECT_EQ(17, red_->Max10MsFramesInAPacket());
}
// Checks that the an Encode() call is immediately propagated to the speech
// encoder.
TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
// Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
// check ensures that exactly one call to EncodeInternal happens in each
// Encode call.
InSequence s;
MockFunction<void(int check_point_id)> check;
for (int i = 1; i <= 6; ++i) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(check, Call(i));
Encode();
check.Call(i);
}
}
// Checks that no output is produced if the underlying codec doesn't emit any
// new data, even if the RED codec is loaded with a secondary encoding.
TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
// Start with one Encode() call that will produce output.
static const size_t kEncodedSize = 17;
AudioEncoder::EncodedInfo info;
info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
Encode();
// First call is a special case, since it does not include a secondary
// payload.
EXPECT_EQ(1u, encoded_info_.redundant.size());
EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
// Next call to the speech encoder will not produce any output.
info.encoded_bytes = 0;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
// Final call to the speech encoder will produce output.
info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
Encode();
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
ASSERT_EQ(2u, encoded_info_.redundant.size());
}
// Checks that the correct payload sizes are populated into the redundancy
// information.
TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
// Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
// of calls.
static const int kNumPackets = 10;
InSequence s;
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
AudioEncoder::EncodedInfo info;
info.encoded_bytes = encode_size;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
}
// First call is a special case, since it does not include a secondary
// payload.
Encode();
EXPECT_EQ(1u, encoded_info_.redundant.size());
EXPECT_EQ(1u, encoded_info_.encoded_bytes);
for (size_t i = 2; i <= kNumPackets; ++i) {
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(i, encoded_info_.redundant[0].encoded_bytes);
EXPECT_EQ(i - 1, encoded_info_.redundant[1].encoded_bytes);
EXPECT_EQ(i + i - 1, encoded_info_.encoded_bytes);
}
}
// Checks that the correct timestamps are returned.
TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
MockEncodeHelper helper;
helper.info_.encoded_bytes = 17;
helper.info_.encoded_timestamp = timestamp_;
uint32_t primary_timestamp = timestamp_;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
// First call is a special case, since it does not include a secondary
// payload.
Encode();
EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
uint32_t secondary_timestamp = primary_timestamp;
primary_timestamp = timestamp_;
helper.info_.encoded_timestamp = timestamp_;
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(primary_timestamp, encoded_info_.redundant[0].encoded_timestamp);
EXPECT_EQ(secondary_timestamp, encoded_info_.redundant[1].encoded_timestamp);
EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
}
// Checks that the primary and secondary payloads are written correctly.
TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
// Let the mock encoder write payloads with increasing values. The first
// payload will have values 0, 1, 2, ..., kPayloadLenBytes - 1.
MockEncodeHelper helper;
static const size_t kPayloadLenBytes = 5;
helper.info_.encoded_bytes = kPayloadLenBytes;
helper.write_payload_ = true;
uint8_t payload[kPayloadLenBytes];
for (uint8_t i = 0; i < kPayloadLenBytes; ++i) {
payload[i] = i;
}
helper.payload_ = payload;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
// First call is a special case, since it does not include a secondary
// payload.
Encode();
EXPECT_EQ(kPayloadLenBytes, encoded_info_.encoded_bytes);
for (size_t i = 0; i < kPayloadLenBytes; ++i) {
EXPECT_EQ(i, encoded_[i]);
}
for (int j = 0; j < 5; ++j) {
// Increment all values of the payload by 10.
for (size_t i = 0; i < kPayloadLenBytes; ++i)
helper.payload_[i] += 10;
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[0].encoded_bytes);
EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[1].encoded_bytes);
for (size_t i = 0; i < kPayloadLenBytes; ++i) {
// Check primary payload.
EXPECT_EQ((j + 1) * 10 + i, encoded_[i]);
// Check secondary payload.
EXPECT_EQ(j * 10 + i, encoded_[i + kPayloadLenBytes]);
}
}
}
// Checks correct propagation of payload type.
// Checks that the correct timestamps are returned.
TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
MockEncodeHelper helper;
helper.info_.encoded_bytes = 17;
const int primary_payload_type = red_payload_type_ + 1;
helper.info_.payload_type = primary_payload_type;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
// First call is a special case, since it does not include a secondary
// payload.
Encode();
ASSERT_EQ(1u, encoded_info_.redundant.size());
EXPECT_EQ(primary_payload_type, encoded_info_.redundant[0].payload_type);
EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
const int secondary_payload_type = red_payload_type_ + 2;
helper.info_.payload_type = secondary_payload_type;
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(secondary_payload_type, encoded_info_.redundant[0].payload_type);
EXPECT_EQ(primary_payload_type, encoded_info_.redundant[1].payload_type);
EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
}
#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
// This test fixture tests various error conditions that makes the
// AudioEncoderCng die via CHECKs.
class AudioEncoderCopyRedDeathTest : public AudioEncoderCopyRedTest {
protected:
AudioEncoderCopyRedDeathTest() : AudioEncoderCopyRedTest() {}
};
TEST_F(AudioEncoderCopyRedDeathTest, WrongFrameSize) {
num_audio_samples_10ms *= 2; // 20 ms frame.
EXPECT_DEATH(Encode(), "");
num_audio_samples_10ms = 0; // Zero samples.
EXPECT_DEATH(Encode(), "");
}
TEST_F(AudioEncoderCopyRedDeathTest, NullSpeechEncoder) {
AudioEncoderCopyRed* red;
AudioEncoderCopyRed::Config config;
config.speech_encoder = NULL;
EXPECT_DEATH(red = new AudioEncoderCopyRed(config),
"Speech encoder not provided.");
}
#endif // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
} // namespace webrtc

View File

@ -0,0 +1,33 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'targets': [
{
'target_name': 'red',
'type': 'static_library',
'dependencies': [
'audio_encoder_interface',
],
'include_dirs': [
'include',
'<(webrtc_root)',
],
'direct_dependent_settings': {
'include_dirs': [
'include',
'<(webrtc_root)',
],
},
'sources': [
'audio_encoder_copy_red.h',
'audio_encoder_copy_red.cc',
],
},
], # targets
}

View File

@ -16,6 +16,7 @@
'iSAC',
'iSACFix',
'PCM16B',
'red',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
],

View File

@ -17,6 +17,7 @@
'audio_coding/codecs/isac/main/source/isac.gypi',
'audio_coding/codecs/isac/fix/source/isacfix.gypi',
'audio_coding/codecs/pcm16b/pcm16b.gypi',
'audio_coding/codecs/red/red.gypi',
'audio_coding/main/acm2/audio_coding_module.gypi',
'audio_coding/neteq/neteq.gypi',
'audio_conference_mixer/source/audio_conference_mixer.gypi',
@ -81,6 +82,7 @@
'neteq_unittest_tools',
'paced_sender',
'PCM16B', # Needed by NetEq tests.
'red',
'remote_bitrate_estimator',
'rtp_rtcp',
'video_codecs_test_framework',
@ -118,6 +120,7 @@
'audio_coding/codecs/isac/fix/source/transform_unittest.cc',
'audio_coding/codecs/isac/main/source/isac_unittest.cc',
'audio_coding/codecs/opus/opus_unittest.cc',
'audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc',
'audio_coding/neteq/audio_classifier_unittest.cc',
'audio_coding/neteq/audio_multi_vector_unittest.cc',
'audio_coding/neteq/audio_vector_unittest.cc',