Replace ACMGenericCodec with CodecOwner and AudioEncoderMutable

CodecOwner is introduced here; AudioEncoderMutable was introduced in a
previous commit, but had no users until now. The only remaining task
for ACMGenericCodec was to construct and maintain the stack of speech,
CNG, and RED encoders. This task is now handled by the CodecOwner,
which is owned and used by the CodecManager.

COAUTHOR=henrik.lundin@webrtc.org
BUG=4228
R=jmarusic@webrtc.org, minyue@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/43189004

Cr-Commit-Position: refs/heads/master@{#9152}
This commit is contained in:
Karl Wiberg 2015-05-07 15:49:23 +02:00
parent 53d0dc3f06
commit 2ea71c3279
16 changed files with 571 additions and 1555 deletions

View File

@ -21,8 +21,6 @@ source_set("audio_coding") {
"main/acm2/acm_codec_database.cc",
"main/acm2/acm_codec_database.h",
"main/acm2/acm_common_defs.h",
"main/acm2/acm_generic_codec.cc",
"main/acm2/acm_generic_codec.h",
"main/acm2/acm_receiver.cc",
"main/acm2/acm_receiver.h",
"main/acm2/acm_resampler.cc",
@ -34,6 +32,8 @@ source_set("audio_coding") {
"main/acm2/call_statistics.h",
"main/acm2/codec_manager.cc",
"main/acm2/codec_manager.h",
"main/acm2/codec_owner.cc",
"main/acm2/codec_owner.h",
"main/acm2/initial_delay_manager.cc",
"main/acm2/initial_delay_manager.h",
"main/acm2/nack.cc",

View File

@ -372,48 +372,6 @@ const NetEqDecoder* ACMCodecDB::NetEQDecoders() {
return neteq_decoders_;
}
// Creates memory/instance for storing codec state.
ACMGenericCodec* ACMCodecDB::CreateCodecInstance(const CodecInst& codec_inst,
int cng_pt_nb,
int cng_pt_wb,
int cng_pt_swb,
int cng_pt_fb,
bool enable_red,
int red_payload_type) {
// All we have support for right now.
if (!STR_CASE_CMP(codec_inst.plname, "ISAC")) {
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
#endif
} else if (!STR_CASE_CMP(codec_inst.plname, "PCMU") ||
!STR_CASE_CMP(codec_inst.plname, "PCMA")) {
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
} else if (!STR_CASE_CMP(codec_inst.plname, "ILBC")) {
#ifdef WEBRTC_CODEC_ILBC
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
#endif
} else if (!STR_CASE_CMP(codec_inst.plname, "G722")) {
#ifdef WEBRTC_CODEC_G722
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
#endif
} else if (!STR_CASE_CMP(codec_inst.plname, "opus")) {
#ifdef WEBRTC_CODEC_OPUS
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
#endif
} else if (!STR_CASE_CMP(codec_inst.plname, "L16")) {
#ifdef WEBRTC_CODEC_PCM16
return new ACMGenericCodec(codec_inst, cng_pt_nb, cng_pt_wb, cng_pt_swb,
cng_pt_fb, enable_red, red_payload_type);
#endif
}
return NULL;
}
// Checks if the bitrate is valid for the codec.
bool ACMCodecDB::IsRateValid(int codec_id, int rate) {
return database_[codec_id].rate == rate;

View File

@ -17,7 +17,7 @@
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
namespace webrtc {
@ -146,7 +146,7 @@ class ACMCodecDB {
// implement ACMGenericCodec::Decoder(), which returns
// a pointer to AudioDecoder. This pointer is injected
// into NetEq when this codec is registered as receive
// codec.
// codec. DEPRECATED.
struct CodecSettings {
int num_packet_sizes;
int packet_sizes_samples[kMaxNumPacketSize];
@ -202,22 +202,6 @@ class ACMCodecDB {
// Returns the NetEQ decoder database.
static const NetEqDecoder* NetEQDecoders();
// Creates a codec wrapper containing an AudioEncoder object (or an
// ACMGenericCodec subclass during the refactoring time). The type of
// AudioEncoder is decided by looking at the information in |codec_inst|.
// The |cng_pt_*| parameters should contain the RTP payload type used for each
// type of comfort noise; if not used (or not know when this function is
// called), -1 can be set. The parameter |enable_red| indicates that RED
// is enabled, and that |red_payload_type| should be used as RTP payload type
// for RED encodings.
static ACMGenericCodec* CreateCodecInstance(const CodecInst& codec_inst,
int cng_pt_nb,
int cng_pt_wb,
int cng_pt_swb,
int cng_pt_fb,
bool enable_red,
int red_payload_type);
// Specifies if the codec specified by |codec_id| MUST own its own decoder.
// This is the case for codecs which *should* share a single codec instance
// between encoder and decoder. Or for codecs which ACM should have control

View File

@ -1,533 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
#include <assert.h>
#include <string.h>
#include <algorithm>
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
namespace {
static const int kInvalidPayloadType = 255;
std::map<int, int>::iterator FindSampleRateInMap(std::map<int, int>* cng_pt_map,
int sample_rate_hz) {
return find_if(cng_pt_map->begin(), cng_pt_map->end(),
[sample_rate_hz](decltype(*cng_pt_map->begin()) p) {
return p.second == sample_rate_hz;
});
}
void SetPtInMap(std::map<int, int>* pt_map,
int sample_rate_hz,
int payload_type) {
if (payload_type == kInvalidPayloadType)
return;
CHECK_GE(payload_type, 0);
CHECK_LT(payload_type, 128);
auto pt_iter = FindSampleRateInMap(pt_map, sample_rate_hz);
if (pt_iter != pt_map->end()) {
// Remove item in map with sample_rate_hz.
pt_map->erase(pt_iter);
}
(*pt_map)[payload_type] = sample_rate_hz;
}
} // namespace
namespace acm2 {
// Enum for CNG
enum {
kMaxPLCParamsCNG = WEBRTC_CNG_MAX_LPC_ORDER,
kNewCNGNumLPCParams = 8
};
// Interval for sending new CNG parameters (SID frames) is 100 msec.
enum {
kCngSidIntervalMsec = 100
};
// We set some of the variables to invalid values as a check point
// if a proper initialization has happened. Another approach is
// to initialize to a default codec that we are sure is always included.
ACMGenericCodec::ACMGenericCodec(const CodecInst& codec_inst,
int cng_pt_nb,
int cng_pt_wb,
int cng_pt_swb,
int cng_pt_fb,
bool enable_red,
int red_pt_nb)
: has_internal_fec_(false),
copy_red_enabled_(enable_red),
encoder_(NULL),
bitrate_bps_(0),
fec_enabled_(false),
loss_rate_(0),
max_playback_rate_hz_(48000),
max_payload_size_bytes_(-1),
max_rate_bps_(-1),
opus_dtx_enabled_(false),
is_opus_(false),
is_isac_(false),
opus_application_set_(false) {
acm_codec_params_.codec_inst = codec_inst;
acm_codec_params_.enable_dtx = false;
acm_codec_params_.enable_vad = false;
acm_codec_params_.vad_mode = VADNormal;
SetPtInMap(&red_pt_, 8000, red_pt_nb);
SetPtInMap(&cng_pt_, 8000, cng_pt_nb);
SetPtInMap(&cng_pt_, 16000, cng_pt_wb);
SetPtInMap(&cng_pt_, 32000, cng_pt_swb);
SetPtInMap(&cng_pt_, 48000, cng_pt_fb);
ResetAudioEncoder();
CHECK(encoder_);
}
ACMGenericCodec::~ACMGenericCodec() {
}
AudioDecoderProxy::AudioDecoderProxy()
: decoder_lock_(CriticalSectionWrapper::CreateCriticalSection()),
decoder_(nullptr) {
}
AudioDecoderProxy::~AudioDecoderProxy() = default;
void AudioDecoderProxy::SetDecoder(AudioDecoder* decoder) {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
decoder_ = decoder;
CHECK_EQ(decoder_->Init(), 0);
}
bool AudioDecoderProxy::IsSet() const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return (decoder_ != nullptr);
}
int AudioDecoderProxy::Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->Decode(encoded, encoded_len, sample_rate_hz,
max_decoded_bytes, decoded, speech_type);
}
int AudioDecoderProxy::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->DecodeRedundant(encoded, encoded_len, sample_rate_hz,
max_decoded_bytes, decoded, speech_type);
}
bool AudioDecoderProxy::HasDecodePlc() const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->HasDecodePlc();
}
int AudioDecoderProxy::DecodePlc(int num_frames, int16_t* decoded) {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->DecodePlc(num_frames, decoded);
}
int AudioDecoderProxy::Init() {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->Init();
}
int AudioDecoderProxy::IncomingPacket(const uint8_t* payload,
size_t payload_len,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->IncomingPacket(payload, payload_len, rtp_sequence_number,
rtp_timestamp, arrival_timestamp);
}
int AudioDecoderProxy::ErrorCode() {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->ErrorCode();
}
int AudioDecoderProxy::PacketDuration(const uint8_t* encoded,
size_t encoded_len) const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->PacketDuration(encoded, encoded_len);
}
int AudioDecoderProxy::PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->PacketDurationRedundant(encoded, encoded_len);
}
bool AudioDecoderProxy::PacketHasFec(const uint8_t* encoded,
size_t encoded_len) const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->PacketHasFec(encoded, encoded_len);
}
CNG_dec_inst* AudioDecoderProxy::CngDecoderInstance() {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->CngDecoderInstance();
}
size_t AudioDecoderProxy::Channels() const {
CriticalSectionScoped decoder_lock(decoder_lock_.get());
return decoder_->Channels();
}
int16_t ACMGenericCodec::EncoderParams(WebRtcACMCodecParams* enc_params) const {
*enc_params = acm_codec_params_;
return 0;
}
int16_t ACMGenericCodec::InitEncoder(WebRtcACMCodecParams* codec_params,
bool force_initialization) {
bitrate_bps_ = 0;
loss_rate_ = 0;
opus_dtx_enabled_ = false;
acm_codec_params_ = *codec_params;
if (force_initialization)
opus_application_set_ = false;
opus_application_ = GetOpusApplication(codec_params->codec_inst.channels,
opus_dtx_enabled_);
opus_application_set_ = true;
ResetAudioEncoder();
return 0;
}
void ACMGenericCodec::ResetAudioEncoder() {
const CodecInst& codec_inst = acm_codec_params_.codec_inst;
if (!STR_CASE_CMP(codec_inst.plname, "PCMU")) {
AudioEncoderPcmU::Config config;
config.num_channels = codec_inst.channels;
config.frame_size_ms = codec_inst.pacsize / 8;
config.payload_type = codec_inst.pltype;
audio_encoder_.reset(new AudioEncoderPcmU(config));
} else if (!STR_CASE_CMP(codec_inst.plname, "PCMA")) {
AudioEncoderPcmA::Config config;
config.num_channels = codec_inst.channels;
config.frame_size_ms = codec_inst.pacsize / 8;
config.payload_type = codec_inst.pltype;
audio_encoder_.reset(new AudioEncoderPcmA(config));
#ifdef WEBRTC_CODEC_PCM16
} else if (!STR_CASE_CMP(codec_inst.plname, "L16")) {
AudioEncoderPcm16B::Config config;
config.num_channels = codec_inst.channels;
config.sample_rate_hz = codec_inst.plfreq;
config.frame_size_ms = codec_inst.pacsize / (config.sample_rate_hz / 1000);
config.payload_type = codec_inst.pltype;
audio_encoder_.reset(new AudioEncoderPcm16B(config));
#endif
#ifdef WEBRTC_CODEC_ILBC
} else if (!STR_CASE_CMP(codec_inst.plname, "ILBC")) {
AudioEncoderIlbc::Config config;
config.frame_size_ms = codec_inst.pacsize / 8;
config.payload_type = codec_inst.pltype;
audio_encoder_.reset(new AudioEncoderIlbc(config));
#endif
#ifdef WEBRTC_CODEC_OPUS
} else if (!STR_CASE_CMP(codec_inst.plname, "opus")) {
is_opus_ = true;
has_internal_fec_ = true;
AudioEncoderOpus::Config config;
config.frame_size_ms = codec_inst.pacsize / 48;
config.num_channels = codec_inst.channels;
config.fec_enabled = fec_enabled_;
config.bitrate_bps = codec_inst.rate;
config.max_playback_rate_hz = max_playback_rate_hz_;
config.dtx_enabled = opus_dtx_enabled_;
config.payload_type = codec_inst.pltype;
switch (GetOpusApplication(config.num_channels, config.dtx_enabled)) {
case kVoip:
config.application = AudioEncoderOpus::ApplicationMode::kVoip;
break;
case kAudio:
config.application = AudioEncoderOpus::ApplicationMode::kAudio;
break;
}
audio_encoder_.reset(new AudioEncoderOpus(config));
#endif
#ifdef WEBRTC_CODEC_G722
} else if (!STR_CASE_CMP(codec_inst.plname, "G722")) {
AudioEncoderG722::Config config;
config.num_channels = codec_inst.channels;
config.frame_size_ms = codec_inst.pacsize / 16;
config.payload_type = codec_inst.pltype;
audio_encoder_.reset(new AudioEncoderG722(config));
#endif
#ifdef WEBRTC_CODEC_ISACFX
} else if (!STR_CASE_CMP(codec_inst.plname, "ISAC")) {
is_isac_ = true;
AudioEncoderDecoderIsacFix::Config config;
config.payload_type = codec_inst.pltype;
config.sample_rate_hz = codec_inst.plfreq;
config.frame_size_ms = rtc::CheckedDivExact(codec_inst.pacsize, 16);
if (codec_inst.rate != -1)
config.bit_rate = codec_inst.rate;
config.max_payload_size_bytes = max_payload_size_bytes_;
config.max_bit_rate = max_rate_bps_;
config.adaptive_mode = (codec_inst.rate == -1);
auto* enc_dec = new AudioEncoderDecoderIsacFix(config);
decoder_proxy_.SetDecoder(enc_dec);
audio_encoder_.reset(enc_dec);
#endif
#ifdef WEBRTC_CODEC_ISAC
} else if (!STR_CASE_CMP(codec_inst.plname, "ISAC")) {
is_isac_ = true;
AudioEncoderDecoderIsac::Config config;
config.payload_type = codec_inst.pltype;
config.sample_rate_hz = codec_inst.plfreq;
config.frame_size_ms =
rtc::CheckedDivExact(1000 * codec_inst.pacsize, config.sample_rate_hz);
if (codec_inst.rate != -1)
config.bit_rate = codec_inst.rate;
config.max_payload_size_bytes = max_payload_size_bytes_;
config.max_bit_rate = max_rate_bps_;
config.adaptive_mode = (codec_inst.rate == -1);
auto* enc_dec = new AudioEncoderDecoderIsac(config);
decoder_proxy_.SetDecoder(enc_dec);
audio_encoder_.reset(enc_dec);
#endif
} else {
FATAL();
}
if (bitrate_bps_ != 0)
audio_encoder_->SetTargetBitrate(bitrate_bps_);
audio_encoder_->SetProjectedPacketLossRate(loss_rate_ / 100.0);
encoder_ = audio_encoder_.get();
// Attach RED if needed.
auto pt_iter =
FindSampleRateInMap(&red_pt_, audio_encoder_->SampleRateHz());
if (copy_red_enabled_ && pt_iter != red_pt_.end()) {
CHECK_NE(pt_iter->first, kInvalidPayloadType);
AudioEncoderCopyRed::Config config;
config.payload_type = pt_iter->first;
config.speech_encoder = encoder_;
red_encoder_.reset(new AudioEncoderCopyRed(config));
encoder_ = red_encoder_.get();
} else {
red_encoder_.reset();
copy_red_enabled_ = false;
}
// Attach CNG if needed.
// Reverse-lookup from sample rate to complete key-value pair.
pt_iter =
FindSampleRateInMap(&cng_pt_, audio_encoder_->SampleRateHz());
if (acm_codec_params_.enable_dtx && pt_iter != cng_pt_.end()) {
AudioEncoderCng::Config config;
config.num_channels = acm_codec_params_.codec_inst.channels;
config.payload_type = pt_iter->first;
config.speech_encoder = encoder_;
switch (acm_codec_params_.vad_mode) {
case VADNormal:
config.vad_mode = Vad::kVadNormal;
break;
case VADLowBitrate:
config.vad_mode = Vad::kVadLowBitrate;
break;
case VADAggr:
config.vad_mode = Vad::kVadAggressive;
break;
case VADVeryAggr:
config.vad_mode = Vad::kVadVeryAggressive;
break;
default:
FATAL();
}
cng_encoder_.reset(new AudioEncoderCng(config));
encoder_ = cng_encoder_.get();
} else {
cng_encoder_.reset();
}
}
OpusApplicationMode ACMGenericCodec::GetOpusApplication(
int num_channels, bool enable_dtx) const {
if (opus_application_set_)
return opus_application_;
return num_channels == 1 || enable_dtx ? kVoip : kAudio;
}
void ACMGenericCodec::SetBitRate(const int bitrate_bps) {
encoder_->SetTargetBitrate(bitrate_bps);
bitrate_bps_ = bitrate_bps;
}
int16_t ACMGenericCodec::SetVAD(bool* enable_dtx,
bool* enable_vad,
ACMVADMode* mode) {
if (is_opus_) {
*enable_dtx = false;
*enable_vad = false;
return 0;
}
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting and
// the |enable_vad| is set equal to |enable_dtx|.
// The case when VAD is enabled but DTX is disabled may result in a
// kPassiveNormalEncoded frame type, but this is not a case that VoE
// distinguishes from the cases where DTX is in fact used. In the case where
// DTX is enabled but VAD is disabled, the comment in the ACM interface states
// that VAD will be enabled anyway.
DCHECK_EQ(*enable_dtx, *enable_vad);
*enable_vad = *enable_dtx;
acm_codec_params_.enable_dtx = *enable_dtx;
acm_codec_params_.enable_vad = *enable_vad;
acm_codec_params_.vad_mode = *mode;
if (acm_codec_params_.enable_dtx && !cng_encoder_) {
ResetAudioEncoder();
} else if (!acm_codec_params_.enable_dtx && cng_encoder_) {
cng_encoder_.reset();
encoder_ = audio_encoder_.get();
}
return 0;
}
void ACMGenericCodec::SetCngPt(int sample_rate_hz, int payload_type) {
SetPtInMap(&cng_pt_, sample_rate_hz, payload_type);
ResetAudioEncoder();
}
void ACMGenericCodec::SetRedPt(int sample_rate_hz, int payload_type) {
SetPtInMap(&red_pt_, sample_rate_hz, payload_type);
ResetAudioEncoder();
}
int32_t ACMGenericCodec::SetISACMaxPayloadSize(
const uint16_t max_payload_len_bytes) {
if (!is_isac_)
return -1; // Needed for tests to pass.
max_payload_size_bytes_ = max_payload_len_bytes;
ResetAudioEncoder();
return 0;
}
int32_t ACMGenericCodec::SetISACMaxRate(const uint32_t max_rate_bps) {
if (!is_isac_)
return -1; // Needed for tests to pass.
max_rate_bps_ = max_rate_bps;
ResetAudioEncoder();
return 0;
}
int ACMGenericCodec::SetOpusMaxPlaybackRate(int frequency_hz) {
if (!is_opus_)
return -1; // Needed for tests to pass.
max_playback_rate_hz_ = frequency_hz;
ResetAudioEncoder();
return 0;
}
AudioDecoder* ACMGenericCodec::Decoder() {
return decoder_proxy_.IsSet() ? &decoder_proxy_ : nullptr;
}
int ACMGenericCodec::EnableOpusDtx(bool force_voip) {
if (!is_opus_)
return -1; // Needed for tests to pass.
if (!force_voip &&
GetOpusApplication(encoder_->NumChannels(), true) != kVoip) {
// Opus DTX can only be enabled when application mode is KVoip.
return -1;
}
opus_application_ = kVoip;
opus_application_set_ = true;
opus_dtx_enabled_ = true;
ResetAudioEncoder();
return 0;
}
int ACMGenericCodec::DisableOpusDtx() {
if (!is_opus_)
return -1; // Needed for tests to pass.
opus_dtx_enabled_ = false;
ResetAudioEncoder();
return 0;
}
int ACMGenericCodec::SetFEC(bool enable_fec) {
if (!HasInternalFEC())
return enable_fec ? -1 : 0;
if (fec_enabled_ != enable_fec) {
fec_enabled_ = enable_fec;
ResetAudioEncoder();
}
return 0;
}
int ACMGenericCodec::SetOpusApplication(OpusApplicationMode application,
bool disable_dtx_if_needed) {
if (opus_dtx_enabled_ && application == kAudio) {
if (disable_dtx_if_needed) {
opus_dtx_enabled_ = false;
} else {
// Opus can only be set to kAudio when DTX is off.
return -1;
}
}
opus_application_ = application;
opus_application_set_ = true;
ResetAudioEncoder();
return 0;
}
int ACMGenericCodec::SetPacketLossRate(int loss_rate) {
encoder_->SetProjectedPacketLossRate(loss_rate / 100.0);
loss_rate_ = loss_rate;
return 0;
}
int ACMGenericCodec::SetCopyRed(bool enable) {
copy_red_enabled_ = enable;
ResetAudioEncoder();
return copy_red_enabled_ == enable ? 0 : -1;
}
AudioEncoder* ACMGenericCodec::GetAudioEncoder() {
return encoder_;
}
const AudioEncoder* ACMGenericCodec::GetAudioEncoder() const {
return encoder_;
}
} // namespace acm2
} // namespace webrtc

View File

@ -1,449 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
#include <map>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
#define MAX_FRAME_SIZE_10MSEC 6
// forward declaration
struct WebRtcVadInst;
struct WebRtcCngEncInst;
namespace webrtc {
struct WebRtcACMCodecParams;
struct CodecInst;
class CriticalSectionWrapper;
namespace acm2 {
// forward declaration
class AcmReceiver;
// Proxy for AudioDecoder
class AudioDecoderProxy final : public AudioDecoder {
public:
AudioDecoderProxy();
~AudioDecoderProxy() override;
void SetDecoder(AudioDecoder* decoder);
bool IsSet() const;
int Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) override;
int DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) override;
bool HasDecodePlc() const override;
int DecodePlc(int num_frames, int16_t* decoded) override;
int Init() override;
int IncomingPacket(const uint8_t* payload,
size_t payload_len,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) override;
int ErrorCode() override;
int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
int PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const override;
bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
CNG_dec_inst* CngDecoderInstance() override;
size_t Channels() const override;
private:
rtc::scoped_ptr<CriticalSectionWrapper> decoder_lock_;
AudioDecoder* decoder_ GUARDED_BY(decoder_lock_);
};
class ACMGenericCodec {
public:
ACMGenericCodec(const CodecInst& codec_inst,
int cng_pt_nb,
int cng_pt_wb,
int cng_pt_swb,
int cng_pt_fb,
bool enable_red,
int red_pt_nb);
~ACMGenericCodec();
///////////////////////////////////////////////////////////////////////////
// ACMGenericCodec* CreateInstance();
// The function will be used for FEC. It is not implemented yet.
//
ACMGenericCodec* CreateInstance();
///////////////////////////////////////////////////////////////////////////
// bool EncoderInitialized();
//
// Return value:
// True if the encoder is successfully initialized,
// false otherwise.
//
bool EncoderInitialized();
///////////////////////////////////////////////////////////////////////////
// int16_t EncoderParams()
// It is called to get encoder parameters. It will call
// EncoderParamsSafe() in turn.
//
// Output:
// -enc_params : a buffer where the encoder parameters is
// written to. If the encoder is not
// initialized this buffer is filled with
// invalid values
// Return value:
// -1 if the encoder is not initialized,
// 0 otherwise.
//
int16_t EncoderParams(WebRtcACMCodecParams* enc_params) const;
///////////////////////////////////////////////////////////////////////////
// int16_t InitEncoder(...)
// This function is called to initialize the encoder with the given
// parameters.
//
// Input:
// -codec_params : parameters of encoder.
// -force_initialization: if false the initialization is invoked only if
// the encoder is not initialized. If true the
// encoder is forced to (re)initialize.
//
// Return value:
// 0 if could initialize successfully,
// -1 if failed to initialize.
//
//
int16_t InitEncoder(WebRtcACMCodecParams* codec_params,
bool force_initialization);
///////////////////////////////////////////////////////////////////////////
// uint32_t NoMissedSamples()
// This function returns the number of samples which are overwritten in
// the audio buffer. The audio samples are overwritten if the input audio
// buffer is full, but Add10MsData() is called. (We might remove this
// function if it is not used)
//
// Return Value:
// Number of samples which are overwritten.
//
uint32_t NoMissedSamples() const;
///////////////////////////////////////////////////////////////////////////
// void ResetNoMissedSamples()
// This function resets the number of overwritten samples to zero.
// (We might remove this function if we remove NoMissedSamples())
//
void ResetNoMissedSamples();
///////////////////////////////////////////////////////////////////////////
// void SetBitRate()
// The function is called to set the encoding rate. If the value is not
// supported by the codec, another appropriate value is used.
//
// Input:
// -bitrate_bps : encoding rate in bits per second
//
void SetBitRate(const int bitrate_bps);
///////////////////////////////////////////////////////////////////////////
// uint32_t EarliestTimestamp()
// Returns the timestamp of the first 10 ms in audio buffer. This is used
// to identify if a synchronization of two encoders is required.
//
// Return value:
// timestamp of the first 10 ms audio in the audio buffer.
//
uint32_t EarliestTimestamp() const;
///////////////////////////////////////////////////////////////////////////
// int16_t SetVAD()
// This is called to set VAD & DTX. If the codec has internal DTX, it will
// be used. If DTX is enabled and the codec does not have internal DTX,
// WebRtc-VAD will be used to decide if the frame is active. If DTX is
// disabled but VAD is enabled, the audio is passed through VAD to label it
// as active or passive, but the frame is encoded normally. However the
// bit-stream is labeled properly so that ACM::Process() can use this
// information. In case of failure, the previous states of the VAD & DTX
// are kept.
//
// Inputs/Output:
// -enable_dtx : if true DTX will be enabled otherwise the DTX is
// disabled. If codec has internal DTX that will be
// used, otherwise WebRtc-CNG is used. In the latter
// case VAD is automatically activated.
// -enable_vad : if true WebRtc-VAD is enabled, otherwise VAD is
// disabled, except for the case that DTX is enabled
// but codec doesn't have internal DTX. In this case
// VAD is enabled regardless of the value of
// |enable_vad|.
// -mode : this specifies the aggressiveness of VAD.
//
// Return value
// -1 if failed to set DTX & VAD as specified,
// 0 if succeeded.
//
int16_t SetVAD(bool* enable_dtx, bool* enable_vad, ACMVADMode* mode);
// Registers comfort noise at |sample_rate_hz| to use |payload_type|.
void SetCngPt(int sample_rate_hz, int payload_type);
// Registers RED at |sample_rate_hz| to use |payload_type|.
void SetRedPt(int sample_rate_hz, int payload_type);
///////////////////////////////////////////////////////////////////////////
// UpdateEncoderSampFreq()
// Call this function to update the encoder sampling frequency. This
// is for codecs where one payload-name supports several encoder sampling
// frequencies. Otherwise, to change the sampling frequency we need to
// register new codec. ACM will consider that as registration of a new
// codec, not a change in parameter. For iSAC, switching from WB to SWB
// is treated as a change in parameter. Therefore, we need this function.
//
// Input:
// -samp_freq_hz : encoder sampling frequency.
//
// Return value:
// -1 if failed, or if this is meaningless for the given codec.
// 0 if succeeded.
//
int16_t UpdateEncoderSampFreq(uint16_t samp_freq_hz);
///////////////////////////////////////////////////////////////////////////
// EncoderSampFreq()
// Get the sampling frequency that the encoder (WebRtc wrapper) expects.
//
// Output:
// -samp_freq_hz : sampling frequency, in Hertz, which the encoder
// should be fed with.
//
// Return value:
// -1 if failed to output sampling rate.
// 0 if the sample rate is returned successfully.
//
int16_t EncoderSampFreq(uint16_t* samp_freq_hz);
///////////////////////////////////////////////////////////////////////////
// SetISACMaxPayloadSize()
// Set the maximum payload size of iSAC packets. No iSAC payload,
// regardless of its frame-size, may exceed the given limit. For
// an iSAC payload of size B bits and frame-size T sec we have;
// (B < max_payload_len_bytes * 8) and (B/T < max_rate_bit_per_sec), c.f.
// SetISACMaxRate().
//
// Input:
// -max_payload_len_bytes : maximum payload size in bytes.
//
// Return value:
// -1 if failed to set the maximum payload-size.
// 0 if the given length is set successfully.
//
int32_t SetISACMaxPayloadSize(const uint16_t max_payload_len_bytes);
///////////////////////////////////////////////////////////////////////////
// SetISACMaxRate()
// Set the maximum instantaneous rate of iSAC. For a payload of B bits
// with a frame-size of T sec the instantaneous rate is B/T bits per
// second. Therefore, (B/T < max_rate_bit_per_sec) and
// (B < max_payload_len_bytes * 8) are always satisfied for iSAC payloads,
// c.f SetISACMaxPayloadSize().
//
// Input:
// -max_rate_bps : maximum instantaneous bit-rate given in bits/sec.
//
// Return value:
// -1 if failed to set the maximum rate.
// 0 if the maximum rate is set successfully.
//
int32_t SetISACMaxRate(const uint32_t max_rate_bps);
///////////////////////////////////////////////////////////////////////////
// int SetOpusApplication(OpusApplicationMode application,
// bool disable_dtx_if_needed)
// Sets the intended application for the Opus encoder. Opus uses this to
// optimize the encoding for applications like VOIP and music. Currently, two
// modes are supported: kVoip and kAudio. kAudio is only allowed when Opus
// DTX is switched off. If DTX is on, and |application| == kAudio, a failure
// will be triggered unless |disable_dtx_if_needed| == true, for which, the
// DTX will be forced off.
//
// Input:
// - application : intended application.
// - disable_dtx_if_needed : whether to force Opus DTX to stop when needed.
//
// Return value:
// -1 if failed or on codecs other than Opus.
// 0 if succeeded.
//
int SetOpusApplication(OpusApplicationMode application,
bool disable_dtx_if_needed);
///////////////////////////////////////////////////////////////////////////
// int SetOpusMaxPlaybackRate()
// Sets maximum playback rate the receiver will render, if the codec is Opus.
// This is to tell Opus that it is enough to code the input audio up to a
// bandwidth. Opus can take this information to optimize the bit rate and
// increase the computation efficiency.
//
// Input:
// -frequency_hz : maximum playback rate in Hz.
//
// Return value:
// -1 if failed or on codecs other than Opus.
// 0 if succeeded.
//
int SetOpusMaxPlaybackRate(int /* frequency_hz */);
///////////////////////////////////////////////////////////////////////////
// EnableOpusDtx(bool force_voip)
// Enable the DTX, if the codec is Opus. Currently, DTX can only be enabled
// when the application mode is kVoip. If |force_voip| == true, the
// application mode will be forced to kVoip. Otherwise, a failure will be
// triggered if current application mode is kAudio.
// Input:
// - force_voip : whether to force application mode to kVoip.
// Return value:
// -1 if failed or on codecs other than Opus.
// 0 if succeeded.
//
int EnableOpusDtx(bool force_voip);
///////////////////////////////////////////////////////////////////////////
// DisbleOpusDtx()
// Disable the DTX, if the codec is Opus.
// Return value:
// -1 if failed or on codecs other than Opus.
// 0 if succeeded.
//
int DisableOpusDtx();
///////////////////////////////////////////////////////////////////////////
// HasFrameToEncode()
// Returns true if there is enough audio buffered for encoding, such that
// calling Encode() will return a payload.
//
bool HasFrameToEncode() const;
// Returns a pointer to the AudioDecoder part of a joint encoder-decoder
// object, if it exists. Otherwise, nullptr is returned.
AudioDecoder* Decoder();
///////////////////////////////////////////////////////////////////////////
// bool HasInternalFEC()
// Used to check if the codec has internal FEC.
//
// Return value:
// true if the codec has an internal FEC, e.g. Opus.
// false otherwise.
//
bool HasInternalFEC() const {
return has_internal_fec_;
}
///////////////////////////////////////////////////////////////////////////
// int SetFEC();
// Sets the codec internal FEC. No effects on codecs that do not provide
// internal FEC.
//
// Input:
// -enable_fec : if true FEC will be enabled otherwise the FEC is
// disabled.
//
// Return value:
// -1 if failed,
// 0 if succeeded.
//
int SetFEC(bool enable_fec);
///////////////////////////////////////////////////////////////////////////
// int SetPacketLossRate()
// Sets expected packet loss rate for encoding. Some encoders provide packet
// loss gnostic encoding to make stream less sensitive to packet losses,
// through e.g., FEC. No effects on codecs that do not provide such encoding.
//
// Input:
// -loss_rate : expected packet loss rate (0 -- 100 inclusive).
//
// Return value:
// -1 if failed,
// 0 if succeeded or packet loss rate is ignored.
//
int SetPacketLossRate(int /* loss_rate */);
///////////////////////////////////////////////////////////////////////////
// int SetCopyRed()
// Enable or disable copy RED. It fails if there is no RED payload that
// matches the codec, e.g., sample rate differs.
//
// Return value:
// -1 if failed,
// 0 if succeeded.
int SetCopyRed(bool enable);
AudioEncoder* GetAudioEncoder();
const AudioEncoder* GetAudioEncoder() const;
private:
bool has_internal_fec_;
bool copy_red_enabled_;
void ResetAudioEncoder();
OpusApplicationMode GetOpusApplication(int num_channels,
bool enable_dtx) const;
rtc::scoped_ptr<AudioEncoder> audio_encoder_;
rtc::scoped_ptr<AudioEncoder> cng_encoder_;
rtc::scoped_ptr<AudioEncoder> red_encoder_;
AudioEncoder* encoder_;
AudioDecoderProxy decoder_proxy_;
WebRtcACMCodecParams acm_codec_params_;
int bitrate_bps_;
bool fec_enabled_;
int loss_rate_;
int max_playback_rate_hz_;
int max_payload_size_bytes_;
int max_rate_bps_;
bool opus_dtx_enabled_;
bool is_opus_;
bool is_isac_;
// Map from payload type to CNG sample rate (Hz).
std::map<int, int> cng_pt_;
// Map from payload type to RED sample rate (Hz).
std::map<int, int> red_pt_;
OpusApplicationMode opus_application_;
bool opus_application_set_;
};
} // namespace acm2
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_

View File

@ -1,145 +0,0 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
namespace webrtc {
namespace acm2 {
#ifdef WEBRTC_CODEC_OPUS
namespace {
const CodecInst kDefaultOpusCodecInst = {105, "opus", 48000, 960, 1, 32000};
const int kCngPt = 255; // Not using CNG in this test.
const int kRedPt = 255; // Not using RED in this test.
} // namespace
class AcmGenericCodecOpusTest : public ::testing::Test {
protected:
AcmGenericCodecOpusTest() {
acm_codec_params_ = {kDefaultOpusCodecInst, false, false, VADNormal};
}
void CreateCodec() {
codec_wrapper_.reset(new ACMGenericCodec(
acm_codec_params_.codec_inst, kCngPt, kCngPt, kCngPt, kCngPt,
false /* enable RED */, kRedPt));
ASSERT_TRUE(codec_wrapper_);
ASSERT_EQ(0, codec_wrapper_->InitEncoder(&acm_codec_params_, true));
}
const AudioEncoderOpus* GetAudioEncoderOpus() {
const AudioEncoderOpus* ptr = static_cast<const AudioEncoderOpus*>(
codec_wrapper_->GetAudioEncoder());
EXPECT_NE(nullptr, ptr);
return ptr;
}
WebRtcACMCodecParams acm_codec_params_;
rtc::scoped_ptr<ACMGenericCodec> codec_wrapper_;
};
TEST_F(AcmGenericCodecOpusTest, DefaultApplicationModeMono) {
acm_codec_params_.codec_inst.channels = 1;
CreateCodec();
EXPECT_EQ(AudioEncoderOpus::kVoip, GetAudioEncoderOpus()->application());
}
TEST_F(AcmGenericCodecOpusTest, DefaultApplicationModeStereo) {
acm_codec_params_.codec_inst.channels = 2;
CreateCodec();
EXPECT_EQ(AudioEncoderOpus::kAudio, GetAudioEncoderOpus()->application());
}
TEST_F(AcmGenericCodecOpusTest, ChangeApplicationMode) {
// Create a stereo encoder.
acm_codec_params_.codec_inst.channels = 2;
CreateCodec();
// Verify that the mode is kAudio.
const AudioEncoderOpus* opus_ptr = GetAudioEncoderOpus();
EXPECT_EQ(AudioEncoderOpus::kAudio, opus_ptr->application());
// Change mode.
EXPECT_EQ(0, codec_wrapper_->SetOpusApplication(kVoip, false));
// Verify that the AudioEncoder object was changed.
EXPECT_NE(opus_ptr, GetAudioEncoderOpus());
EXPECT_EQ(AudioEncoderOpus::kVoip, GetAudioEncoderOpus()->application());
}
TEST_F(AcmGenericCodecOpusTest, ResetWontChangeApplicationMode) {
// Create a stereo encoder.
acm_codec_params_.codec_inst.channels = 2;
CreateCodec();
const AudioEncoderOpus* opus_ptr = GetAudioEncoderOpus();
// Verify that the mode is kAudio.
EXPECT_EQ(AudioEncoderOpus::kAudio, opus_ptr->application());
// Trigger a reset.
ASSERT_EQ(0, codec_wrapper_->InitEncoder(&acm_codec_params_, false));
// Verify that the AudioEncoder object changed.
EXPECT_NE(opus_ptr, GetAudioEncoderOpus());
// Verify that the mode is still kAudio.
EXPECT_EQ(AudioEncoderOpus::kAudio, GetAudioEncoderOpus()->application());
// Now change to kVoip.
EXPECT_EQ(0, codec_wrapper_->SetOpusApplication(kVoip, false));
EXPECT_EQ(AudioEncoderOpus::kVoip, GetAudioEncoderOpus()->application());
opus_ptr = GetAudioEncoderOpus();
// Trigger a reset again.
ASSERT_EQ(0, codec_wrapper_->InitEncoder(&acm_codec_params_, false));
// Verify that the AudioEncoder object changed.
EXPECT_NE(opus_ptr, GetAudioEncoderOpus());
// Verify that the mode is still kVoip.
EXPECT_EQ(AudioEncoderOpus::kVoip, GetAudioEncoderOpus()->application());
}
TEST_F(AcmGenericCodecOpusTest, ToggleDtx) {
// Create a stereo encoder.
acm_codec_params_.codec_inst.channels = 2;
CreateCodec();
// Verify that the mode is still kAudio.
EXPECT_EQ(AudioEncoderOpus::kAudio, GetAudioEncoderOpus()->application());
// DTX is not allowed in audio mode, if mode forcing flag is false.
EXPECT_EQ(-1, codec_wrapper_->EnableOpusDtx(false));
EXPECT_EQ(AudioEncoderOpus::kAudio, GetAudioEncoderOpus()->application());
// DTX will be on, if mode forcing flag is true. Then application mode is
// switched to kVoip.
EXPECT_EQ(0, codec_wrapper_->EnableOpusDtx(true));
EXPECT_EQ(AudioEncoderOpus::kVoip, GetAudioEncoderOpus()->application());
// Audio mode is not allowed when DTX is on, and DTX forcing flag is false.
EXPECT_EQ(-1, codec_wrapper_->SetOpusApplication(kAudio, false));
EXPECT_TRUE(GetAudioEncoderOpus()->dtx_enabled());
// Audio mode will be set, if DTX forcing flag is true. Then DTX is switched
// off.
EXPECT_EQ(0, codec_wrapper_->SetOpusApplication(kAudio, true));
EXPECT_FALSE(GetAudioEncoderOpus()->dtx_enabled());
// Now we set VOIP mode. The DTX forcing flag has no effect.
EXPECT_EQ(0, codec_wrapper_->SetOpusApplication(kVoip, true));
EXPECT_FALSE(GetAudioEncoderOpus()->dtx_enabled());
// In VOIP mode, we can enable DTX with mode forcing flag being false.
EXPECT_EQ(0, codec_wrapper_->EnableOpusDtx(false));
// Turn off DTX.
EXPECT_EQ(0, codec_wrapper_->DisableOpusDtx());
// When DTX is off, we can set Audio mode with DTX forcing flag being false.
EXPECT_EQ(0, codec_wrapper_->SetOpusApplication(kAudio, false));
}
#endif // WEBRTC_CODEC_OPUS
} // namespace acm2
} // namespace webrtc

View File

@ -59,8 +59,6 @@
'acm_codec_database.cc',
'acm_codec_database.h',
'acm_common_defs.h',
'acm_generic_codec.cc',
'acm_generic_codec.h',
'acm_receiver.cc',
'acm_receiver.h',
'acm_resampler.cc',
@ -72,6 +70,8 @@
'call_statistics.h',
'codec_manager.cc',
'codec_manager.h',
'codec_owner.cc',
'codec_owner.h',
'initial_delay_manager.cc',
'initial_delay_manager.h',
'nack.cc',

View File

@ -19,7 +19,6 @@
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@ -170,8 +169,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
return -1;
}
AudioEncoder* audio_encoder =
codec_manager_.current_encoder()->GetAudioEncoder();
AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();
// Scale the timestamp to the codec's RTP timestamp rate.
uint32_t rtp_timestamp =
first_frame_ ? input_data.input_timestamp
@ -259,13 +257,13 @@ int AudioCodingModuleImpl::SendFrequency() const {
"SendFrequency()");
CriticalSectionScoped lock(acm_crit_sect_);
if (!codec_manager_.current_encoder()) {
if (!codec_manager_.CurrentEncoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendFrequency Failed, no codec is registered");
return -1;
}
return codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz();
return codec_manager_.CurrentEncoder()->SampleRateHz();
}
// Get encode bitrate.
@ -273,25 +271,26 @@ int AudioCodingModuleImpl::SendFrequency() const {
// codecs return there longterm avarage or their fixed rate.
// TODO(henrik.lundin): Remove; not used.
int AudioCodingModuleImpl::SendBitrate() const {
CriticalSectionScoped lock(acm_crit_sect_);
if (!codec_manager_.current_encoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendBitrate Failed, no codec is registered");
return -1;
}
WebRtcACMCodecParams encoder_param;
codec_manager_.current_encoder()->EncoderParams(&encoder_param);
return encoder_param.codec_inst.rate;
FATAL() << "Deprecated";
// CriticalSectionScoped lock(acm_crit_sect_);
//
// if (!codec_manager_.current_encoder()) {
// WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
// "SendBitrate Failed, no codec is registered");
// return -1;
// }
//
// WebRtcACMCodecParams encoder_param;
// codec_manager_.current_encoder()->EncoderParams(&encoder_param);
//
// return encoder_param.codec_inst.rate;
}
void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
CriticalSectionScoped lock(acm_crit_sect_);
if (codec_manager_.current_encoder()) {
codec_manager_.current_encoder()->SetBitRate(bitrate_bps);
if (codec_manager_.CurrentEncoder()) {
codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps);
}
}
@ -369,9 +368,8 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
}
// Check whether we need an up-mix or down-mix?
bool remix =
ptr_frame->num_channels_ !=
codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels();
bool remix = ptr_frame->num_channels_ !=
codec_manager_.CurrentEncoder()->NumChannels();
if (remix) {
if (ptr_frame->num_channels_ == 1) {
@ -388,15 +386,14 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
const int16_t* ptr_audio = ptr_frame->data_;
// For pushing data to primary, point the |ptr_audio| to correct buffer.
if (codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels() !=
if (codec_manager_.CurrentEncoder()->NumChannels() !=
ptr_frame->num_channels_)
ptr_audio = input_data->buffer;
input_data->input_timestamp = ptr_frame->timestamp_;
input_data->audio = ptr_audio;
input_data->length_per_channel = ptr_frame->samples_per_channel_;
input_data->audio_channel =
codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels();
input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels();
return 0;
}
@ -408,15 +405,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// is required, |*ptr_out| points to |in_frame|.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) {
bool resample =
(in_frame.sample_rate_hz_ !=
codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz());
bool resample = (in_frame.sample_rate_hz_ !=
codec_manager_.CurrentEncoder()->SampleRateHz());
// This variable is true if primary codec and secondary codec (if exists)
// are both mono and input is stereo.
bool down_mix =
(in_frame.num_channels_ == 2) &&
(codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels() == 1);
bool down_mix = (in_frame.num_channels_ == 2) &&
(codec_manager_.CurrentEncoder()->NumChannels() == 1);
if (!first_10ms_data_) {
expected_in_ts_ = in_frame.timestamp_;
@ -427,9 +422,8 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
expected_codec_ts_ +=
(in_frame.timestamp_ - expected_in_ts_) *
static_cast<uint32_t>(
(static_cast<double>(codec_manager_.current_encoder()
->GetAudioEncoder()
->SampleRateHz()) /
(static_cast<double>(
codec_manager_.CurrentEncoder()->SampleRateHz()) /
static_cast<double>(in_frame.sample_rate_hz_)));
expected_in_ts_ = in_frame.timestamp_;
}
@ -470,7 +464,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
src_ptr_audio, in_frame.sample_rate_hz_,
codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz(),
codec_manager_.CurrentEncoder()->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
@ -480,7 +474,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
return -1;
}
preprocess_frame_.sample_rate_hz_ =
codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz();
codec_manager_.CurrentEncoder()->SampleRateHz();
}
expected_codec_ts_ += preprocess_frame_.samples_per_channel_;
@ -528,11 +522,9 @@ int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
CriticalSectionScoped lock(acm_crit_sect_);
if (HaveValidEncoder("SetPacketLossRate") &&
codec_manager_.current_encoder()->SetPacketLossRate(loss_rate) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Set packet loss rate failed.");
return -1;
if (HaveValidEncoder("SetPacketLossRate")) {
codec_manager_.CurrentSpeechEncoder()->SetProjectedPacketLossRate(
loss_rate / 100.0);
}
return 0;
}
@ -543,8 +535,10 @@ int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
bool enable_vad,
ACMVADMode mode) {
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
DCHECK_EQ(enable_dtx, enable_vad);
CriticalSectionScoped lock(acm_crit_sect_);
return codec_manager_.SetVAD(enable_dtx, enable_vad, mode);
return codec_manager_.SetVAD(enable_dtx, mode);
}
// Get VAD/DTX settings.
@ -802,7 +796,8 @@ int AudioCodingModuleImpl::SetISACMaxRate(int max_bit_per_sec) {
return -1;
}
return codec_manager_.current_encoder()->SetISACMaxRate(max_bit_per_sec);
codec_manager_.CurrentSpeechEncoder()->SetMaxRate(max_bit_per_sec);
return 0;
}
// TODO(henrik.lundin): Remove? Only used in tests. Deprecated in VoiceEngine.
@ -813,8 +808,8 @@ int AudioCodingModuleImpl::SetISACMaxPayloadSize(int max_size_bytes) {
return -1;
}
return codec_manager_.current_encoder()->SetISACMaxPayloadSize(
max_size_bytes);
codec_manager_.CurrentSpeechEncoder()->SetMaxPayloadSize(max_size_bytes);
return 0;
}
// TODO(henrik.lundin): Remove? Only used in tests.
@ -840,8 +835,22 @@ int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application,
if (!HaveValidEncoder("SetOpusApplication")) {
return -1;
}
return codec_manager_.current_encoder()->SetOpusApplication(
application, disable_dtx_if_needed);
AudioEncoderMutable::Application app;
switch (application) {
case kVoip:
app = AudioEncoderMutable::kApplicationSpeech;
break;
case kAudio:
app = AudioEncoderMutable::kApplicationAudio;
break;
default:
FATAL();
return 0;
}
return codec_manager_.CurrentSpeechEncoder()->SetApplication(
app, disable_dtx_if_needed)
? 0
: -1;
}
// Informs Opus encoder of the maximum playback rate the receiver will render.
@ -850,7 +859,9 @@ int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
return -1;
}
return codec_manager_.current_encoder()->SetOpusMaxPlaybackRate(frequency_hz);
return codec_manager_.CurrentSpeechEncoder()->SetMaxPlaybackRate(frequency_hz)
? 0
: -1;
}
int AudioCodingModuleImpl::EnableOpusDtx(bool force_voip) {
@ -858,7 +869,8 @@ int AudioCodingModuleImpl::EnableOpusDtx(bool force_voip) {
if (!HaveValidEncoder("EnableOpusDtx")) {
return -1;
}
return codec_manager_.current_encoder()->EnableOpusDtx(force_voip);
return codec_manager_.CurrentSpeechEncoder()->SetDtx(true, force_voip) ? 0
: -1;
}
int AudioCodingModuleImpl::DisableOpusDtx() {
@ -866,7 +878,7 @@ int AudioCodingModuleImpl::DisableOpusDtx() {
if (!HaveValidEncoder("DisableOpusDtx")) {
return -1;
}
return codec_manager_.current_encoder()->DisableOpusDtx();
return codec_manager_.CurrentSpeechEncoder()->SetDtx(false, false) ? 0 : -1;
}
int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
@ -874,7 +886,7 @@ int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
}
bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
if (!codec_manager_.current_encoder()) {
if (!codec_manager_.CurrentEncoder()) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"%s failed: No send codec is registered.", caller_name);
return false;

View File

@ -30,7 +30,6 @@ class AudioCodingImpl;
namespace acm2 {
class ACMDTMFDetection;
class ACMGenericCodec;
class AudioCodingModuleImpl : public AudioCodingModule {
public:

View File

@ -12,25 +12,26 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
namespace acm2 {
namespace {
bool IsCodecRED(const CodecInst* codec) {
return (STR_CASE_CMP(codec->plname, "RED") == 0);
bool IsCodecRED(const CodecInst& codec) {
return (STR_CASE_CMP(codec.plname, "RED") == 0);
}
bool IsCodecRED(int index) {
return (IsCodecRED(&ACMCodecDB::database_[index]));
return (IsCodecRED(ACMCodecDB::database_[index]));
}
bool IsCodecCN(const CodecInst* codec) {
return (STR_CASE_CMP(codec->plname, "CN") == 0);
bool IsCodecCN(const CodecInst& codec) {
return (STR_CASE_CMP(codec.plname, "CN") == 0);
}
bool IsCodecCN(int index) {
return (IsCodecCN(&ACMCodecDB::database_[index]));
return (IsCodecCN(ACMCodecDB::database_[index]));
}
// Check if the given codec is a valid to be registered as send codec.
@ -80,13 +81,13 @@ int IsValidSendCodec(const CodecInst& send_codec, bool is_primary_encoder) {
if (!is_primary_encoder) {
// If registering the secondary encoder, then RED and CN are not valid
// choices as encoder.
if (IsCodecRED(&send_codec)) {
if (IsCodecRED(send_codec)) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"RED cannot be secondary codec");
return -1;
}
if (IsCodecCN(&send_codec)) {
if (IsCodecCN(send_codec)) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"DTX cannot be secondary codec");
return -1;
@ -96,7 +97,56 @@ int IsValidSendCodec(const CodecInst& send_codec, bool is_primary_encoder) {
}
bool IsIsac(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "isac");
return
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
!STR_CASE_CMP(codec.plname, "isac") ||
#endif
false;
}
bool IsOpus(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_OPUS
!STR_CASE_CMP(codec.plname, "opus") ||
#endif
false;
}
bool IsPcmU(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcmu");
}
bool IsPcmA(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcma");
}
bool IsPcm16B(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_PCM16
!STR_CASE_CMP(codec.plname, "l16") ||
#endif
false;
}
bool IsIlbc(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_ILBC
!STR_CASE_CMP(codec.plname, "ilbc") ||
#endif
false;
}
bool IsG722(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_G722
!STR_CASE_CMP(codec.plname, "g722") ||
#endif
false;
}
bool CodecSupported(const CodecInst& codec) {
return IsOpus(codec) || IsPcmU(codec) || IsPcmA(codec) || IsPcm16B(codec) ||
IsIlbc(codec) || IsG722(codec) || IsIsac(codec);
}
const CodecInst kEmptyCodecInst = {-1, "noCodecRegistered", 0, 0, 0, 0};
@ -110,10 +160,8 @@ CodecManager::CodecManager(AudioCodingModuleImpl* acm)
cng_fb_pltype_(255),
red_nb_pltype_(255),
stereo_send_(false),
vad_enabled_(false),
dtx_enabled_(false),
vad_mode_(VADNormal),
current_encoder_(nullptr),
send_codec_inst_(kEmptyCodecInst),
red_enabled_(false),
codec_fec_enabled_(false) {
@ -151,7 +199,7 @@ int CodecManager::RegisterSendCodec(const CodecInst& send_codec) {
int dummy_id = 0;
// RED can be registered with other payload type. If not registered a default
// payload type is used.
if (IsCodecRED(&send_codec)) {
if (IsCodecRED(send_codec)) {
// TODO(tlegrand): Remove this check. Already taken care of in
// ACMCodecDB::CodecNumber().
// Check if the payload-type is valid
@ -170,30 +218,29 @@ int CodecManager::RegisterSendCodec(const CodecInst& send_codec) {
"registration");
return -1;
}
SetRedPayloadType(send_codec.plfreq, send_codec.pltype);
return 0;
}
// CNG can be registered with other payload type. If not registered the
// default payload types from codec database will be used.
if (IsCodecCN(&send_codec)) {
if (IsCodecCN(send_codec)) {
// CNG is registered.
switch (send_codec.plfreq) {
case 8000: {
cng_nb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
break;
return 0;
}
case 16000: {
cng_wb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
break;
return 0;
}
case 32000: {
cng_swb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
break;
return 0;
}
case 48000: {
cng_fb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
break;
return 0;
}
default: {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
@ -202,185 +249,76 @@ int CodecManager::RegisterSendCodec(const CodecInst& send_codec) {
return -1;
}
}
SetCngPayloadType(send_codec.plfreq, send_codec.pltype);
return 0;
}
// Set Stereo, and make sure VAD and DTX is turned off.
if (send_codec.channels == 2) {
stereo_send_ = true;
if (vad_enabled_ || dtx_enabled_) {
if (dtx_enabled_) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, dummy_id,
"VAD/DTX is turned off, not supported when sending stereo.");
}
vad_enabled_ = false;
dtx_enabled_ = false;
} else {
stereo_send_ = false;
}
// Check if the codec is already registered as send codec.
bool is_send_codec;
if (current_encoder_) {
int send_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_);
assert(send_codec_id >= 0);
is_send_codec = send_codec_id == codec_id;
} else {
is_send_codec = false;
bool new_codec = true;
if (codec_owner_.Encoder()) {
int new_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_);
DCHECK_GE(new_codec_id, 0);
new_codec = new_codec_id != codec_id;
}
// If new codec, or new settings, register.
if (!is_send_codec) {
ACMGenericCodec* new_codec;
if (!IsIsac(send_codec)) {
encoder_.reset(ACMCodecDB::CreateCodecInstance(
send_codec, cng_nb_pltype_, cng_wb_pltype_, cng_swb_pltype_,
cng_fb_pltype_, red_enabled_, red_nb_pltype_));
new_codec = encoder_.get();
} else {
if (!isac_enc_dec_) {
isac_enc_dec_.reset(ACMCodecDB::CreateCodecInstance(
send_codec, cng_nb_pltype_, cng_wb_pltype_, cng_swb_pltype_,
cng_fb_pltype_, red_enabled_, red_nb_pltype_));
}
new_codec = isac_enc_dec_.get();
if (RedPayloadType(send_codec.plfreq) == -1) {
red_enabled_ = false;
}
if (new_codec) {
// This is a new codec. Register it and return.
DCHECK(CodecSupported(send_codec));
if (IsOpus(send_codec)) {
// VAD/DTX not supported.
dtx_enabled_ = false;
}
DCHECK(new_codec);
codec_owner_.SetEncoders(
send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
DCHECK(codec_owner_.Encoder());
WebRtcACMCodecParams codec_params;
memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
codec_params.enable_vad = vad_enabled_;
codec_params.enable_dtx = dtx_enabled_;
codec_params.vad_mode = vad_mode_;
// Force initialization.
if (new_codec->InitEncoder(&codec_params, true) < 0) {
// Could not initialize the encoder.
// Check if already have a registered codec.
// Depending on that different messages are logged.
if (!current_encoder_) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Cannot Initialize the encoder No Encoder is registered");
} else {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Cannot Initialize the encoder, continue encoding with "
"the previously registered codec");
}
return -1;
}
// Update states.
dtx_enabled_ = codec_params.enable_dtx;
vad_enabled_ = codec_params.enable_vad;
vad_mode_ = codec_params.vad_mode;
// Everything is fine so we can replace the previous codec with this one.
if (current_encoder_) {
// If we change codec we start fresh with RED.
// This is not strictly required by the standard.
if (new_codec->SetCopyRed(red_enabled_) < 0) {
// We tried to preserve the old red status, if failed, it means the
// red status has to be flipped.
red_enabled_ = !red_enabled_;
}
new_codec->SetVAD(&dtx_enabled_, &vad_enabled_, &vad_mode_);
if (!new_codec->HasInternalFEC()) {
codec_fec_enabled_ = false;
} else {
if (new_codec->SetFEC(codec_fec_enabled_) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Cannot set codec FEC");
return -1;
}
}
}
current_encoder_ = new_codec;
DCHECK(current_encoder_);
memcpy(&send_codec_inst_, &send_codec, sizeof(CodecInst));
return 0;
} else {
// If codec is the same as already registered check if any parameters
// has changed compared to the current values.
// If any parameter is valid then apply it and record.
bool force_init = false;
// Check the payload type.
if (send_codec.pltype != send_codec_inst_.pltype) {
// At this point check if the given payload type is valid.
// Record it later when the sampling frequency is changed
// successfully.
if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Out of range payload type");
return -1;
}
}
// If there is a codec that ONE instance of codec supports multiple
// sampling frequencies, then we need to take care of it here.
// one such a codec is iSAC. Both WB and SWB are encoded and decoded
// with one iSAC instance. Therefore, we need to update the encoder
// frequency if required.
if (send_codec_inst_.plfreq != send_codec.plfreq) {
force_init = true;
}
// If packet size or number of channels has changed, we need to
// re-initialize the encoder.
if (send_codec_inst_.pacsize != send_codec.pacsize) {
force_init = true;
}
if (send_codec_inst_.channels != send_codec.channels) {
force_init = true;
}
if (force_init) {
WebRtcACMCodecParams codec_params;
memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
codec_params.enable_vad = vad_enabled_;
codec_params.enable_dtx = dtx_enabled_;
codec_params.vad_mode = vad_mode_;
// Force initialization.
if (current_encoder_->InitEncoder(&codec_params, true) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Could not change the codec packet-size.");
return -1;
}
send_codec_inst_.plfreq = send_codec.plfreq;
send_codec_inst_.pacsize = send_codec.pacsize;
send_codec_inst_.channels = send_codec.channels;
}
// If the change of sampling frequency has been successful then
// we store the payload-type.
send_codec_inst_.pltype = send_codec.pltype;
// Check if a change in Rate is required.
if (send_codec.rate != send_codec_inst_.rate) {
current_encoder_->SetBitRate(send_codec.rate);
send_codec_inst_.rate = send_codec.rate;
}
if (!current_encoder_->HasInternalFEC()) {
codec_fec_enabled_ = false;
} else {
if (current_encoder_->SetFEC(codec_fec_enabled_) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
"Cannot set codec FEC");
return -1;
}
}
codec_fec_enabled_ =
codec_fec_enabled_ &&
codec_owner_.SpeechEncoder()->SetFec(codec_fec_enabled_);
send_codec_inst_ = send_codec;
return 0;
}
// This is an existing codec; re-create it if any parameters have changed.
if (send_codec_inst_.plfreq != send_codec.plfreq ||
send_codec_inst_.pacsize != send_codec.pacsize ||
send_codec_inst_.channels != send_codec.channels) {
codec_owner_.SetEncoders(
send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
DCHECK(codec_owner_.Encoder());
}
send_codec_inst_.plfreq = send_codec.plfreq;
send_codec_inst_.pacsize = send_codec.pacsize;
send_codec_inst_.channels = send_codec.channels;
send_codec_inst_.pltype = send_codec.pltype;
// Check if a change in Rate is required.
if (send_codec.rate != send_codec_inst_.rate) {
codec_owner_.SpeechEncoder()->SetTargetBitrate(send_codec.rate);
send_codec_inst_.rate = send_codec.rate;
}
codec_fec_enabled_ = codec_fec_enabled_ &&
codec_owner_.SpeechEncoder()->SetFec(codec_fec_enabled_);
return 0;
}
int CodecManager::SendCodec(CodecInst* current_codec) const {
@ -388,16 +326,12 @@ int CodecManager::SendCodec(CodecInst* current_codec) const {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec()");
if (!current_encoder_) {
if (!codec_owner_.Encoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
"SendCodec Failed, no codec is registered");
return -1;
}
WebRtcACMCodecParams encoder_param;
current_encoder_->EncoderParams(&encoder_param);
encoder_param.codec_inst.pltype = send_codec_inst_.pltype;
memcpy(current_codec, &(encoder_param.codec_inst), sizeof(CodecInst));
*current_codec = send_codec_inst_;
return 0;
}
@ -425,16 +359,11 @@ int CodecManager::RegisterReceiveCodec(const CodecInst& codec) {
return -1;
}
AudioDecoder* decoder = NULL;
// Get |decoder| associated with |codec|. |decoder| can be NULL if |codec|
// does not own its decoder.
if (GetAudioDecoder(codec, codec_id, &decoder) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"Wrong codec params to be registered as receive codec");
return -1;
}
uint8_t payload_type = static_cast<uint8_t>(codec.pltype);
return acm_->RegisterDecoder(codec_id, payload_type, codec.channels, decoder);
// uint8_t payload_type = static_cast<uint8_t>(codec.pltype);
return acm_->RegisterDecoder(codec_id, codec.pltype, codec.channels,
GetAudioDecoder(codec));
}
bool CodecManager::SetCopyRed(bool enable) {
@ -443,51 +372,53 @@ bool CodecManager::SetCopyRed(bool enable) {
"Codec internal FEC and RED cannot be co-enabled.");
return false;
}
if (current_encoder_ && current_encoder_->SetCopyRed(enable) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"SetCopyRed failed");
if (enable && RedPayloadType(send_codec_inst_.plfreq) == -1) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
"Cannot enable RED at %i Hz.", send_codec_inst_.plfreq);
return false;
}
red_enabled_ = enable;
if (red_enabled_ != enable) {
red_enabled_ = enable;
if (codec_owner_.Encoder())
codec_owner_.SetEncoders(
send_codec_inst_,
dtx_enabled_ ? CngPayloadType(send_codec_inst_.plfreq) : -1,
vad_mode_,
red_enabled_ ? RedPayloadType(send_codec_inst_.plfreq) : -1);
}
return true;
}
int CodecManager::SetVAD(bool enable_dtx, bool enable_vad, ACMVADMode mode) {
int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
// Sanity check of the mode.
if ((mode != VADNormal) && (mode != VADLowBitrate) && (mode != VADAggr) &&
(mode != VADVeryAggr)) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"Invalid VAD Mode %d, no change is made to VAD/DTX status",
mode);
return -1;
}
DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
mode == VADVeryAggr);
// Check that the send codec is mono. We don't support VAD/DTX for stereo
// sending.
if ((enable_dtx || enable_vad) && stereo_send_) {
if (enable && stereo_send_) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"VAD/DTX not supported for stereo sending");
dtx_enabled_ = false;
vad_enabled_ = false;
vad_mode_ = mode;
return -1;
}
// Store VAD/DTX settings. Values can be changed in the call to "SetVAD"
// below.
dtx_enabled_ = enable_dtx;
vad_enabled_ = enable_vad;
vad_mode_ = mode;
// If a send codec is registered, set VAD/DTX for the codec.
if (current_encoder_ &&
current_encoder_->SetVAD(&dtx_enabled_, &vad_enabled_, &vad_mode_) < 0) {
// SetVAD failed.
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"SetVAD failed");
vad_enabled_ = false;
if (IsOpus(send_codec_inst_)) {
// VAD/DTX not supported.
dtx_enabled_ = false;
return -1;
return 0;
}
if (dtx_enabled_ != enable || vad_mode_ != mode) {
dtx_enabled_ = enable;
vad_mode_ = mode;
if (codec_owner_.Encoder())
codec_owner_.SetEncoders(
send_codec_inst_,
dtx_enabled_ ? CngPayloadType(send_codec_inst_.plfreq) : -1,
vad_mode_,
red_enabled_ ? RedPayloadType(send_codec_inst_.plfreq) : -1);
}
return 0;
}
@ -496,7 +427,7 @@ void CodecManager::VAD(bool* dtx_enabled,
bool* vad_enabled,
ACMVADMode* mode) const {
*dtx_enabled = dtx_enabled_;
*vad_enabled = vad_enabled_;
*vad_enabled = dtx_enabled_;
*mode = vad_mode_;
}
@ -507,55 +438,44 @@ int CodecManager::SetCodecFEC(bool enable_codec_fec) {
return -1;
}
// Set codec FEC.
if (current_encoder_ && current_encoder_->SetFEC(enable_codec_fec) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
"Set codec internal FEC failed.");
return -1;
}
codec_fec_enabled_ = enable_codec_fec;
return 0;
CHECK(codec_owner_.SpeechEncoder());
codec_fec_enabled_ = codec_owner_.SpeechEncoder()->SetFec(enable_codec_fec) &&
enable_codec_fec;
return codec_fec_enabled_ == enable_codec_fec ? 0 : -1;
}
void CodecManager::SetCngPayloadType(int sample_rate_hz, int payload_type) {
if (isac_enc_dec_)
isac_enc_dec_->SetCngPt(sample_rate_hz, payload_type);
if (encoder_)
encoder_->SetCngPt(sample_rate_hz, payload_type);
AudioDecoder* CodecManager::GetAudioDecoder(const CodecInst& codec) {
return IsIsac(codec) ? codec_owner_.GetIsacDecoder() : nullptr;
}
void CodecManager::SetRedPayloadType(int sample_rate_hz, int payload_type) {
if (isac_enc_dec_)
isac_enc_dec_->SetRedPt(sample_rate_hz, payload_type);
if (encoder_)
encoder_->SetRedPt(sample_rate_hz, payload_type);
}
int CodecManager::GetAudioDecoder(const CodecInst& codec,
int codec_id,
AudioDecoder** decoder) {
if (!ACMCodecDB::OwnsDecoder(codec_id)) {
DCHECK(!IsIsac(codec)) << "Codec must not be iSAC at this point.";
*decoder = nullptr;
return 0;
}
DCHECK(IsIsac(codec)) << "Codec must be iSAC at this point.";
// This codec has to own its own decoder. Therefore, it should create the
// corresponding AudioDecoder class and insert it into NetEq. If the codec
// does not exist create it.
//
// TODO(turajs): this part of the code is common with RegisterSendCodec(),
// make a method for it.
if (!isac_enc_dec_) {
isac_enc_dec_.reset(ACMCodecDB::CreateCodecInstance(
codec, cng_nb_pltype_, cng_wb_pltype_, cng_swb_pltype_, cng_fb_pltype_,
red_enabled_, red_nb_pltype_));
if (!isac_enc_dec_)
int CodecManager::CngPayloadType(int sample_rate_hz) const {
switch (sample_rate_hz) {
case 8000:
return cng_nb_pltype_;
case 16000:
return cng_wb_pltype_;
case 32000:
return cng_swb_pltype_;
case 48000:
return cng_fb_pltype_;
default:
FATAL() << sample_rate_hz << " Hz is not supported";
return -1;
}
}
int CodecManager::RedPayloadType(int sample_rate_hz) const {
switch (sample_rate_hz) {
case 8000:
return red_nb_pltype_;
case 16000:
case 32000:
case 48000:
return -1;
default:
FATAL() << sample_rate_hz << " Hz is not supported";
return -1;
}
*decoder = isac_enc_dec_->Decoder();
DCHECK(*decoder);
return 0;
}
} // namespace acm2

View File

@ -12,8 +12,12 @@
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_MANAGER_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/common_types.h"
@ -23,7 +27,6 @@ class AudioDecoder;
namespace acm2 {
class ACMGenericCodec;
class AudioCodingModuleImpl;
class CodecManager final {
@ -39,7 +42,7 @@ class CodecManager final {
bool SetCopyRed(bool enable);
int SetVAD(bool enable_dtx, bool enable_vad, ACMVADMode mode);
int SetVAD(bool enable, ACMVADMode mode);
void VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const;
@ -51,28 +54,23 @@ class CodecManager final {
bool codec_fec_enabled() const { return codec_fec_enabled_; }
ACMGenericCodec* current_encoder() { return current_encoder_; }
const ACMGenericCodec* current_encoder() const { return current_encoder_; }
AudioEncoderMutable* CurrentSpeechEncoder() {
return codec_owner_.SpeechEncoder();
}
AudioEncoder* CurrentEncoder() { return codec_owner_.Encoder(); }
const AudioEncoder* CurrentEncoder() const { return codec_owner_.Encoder(); }
private:
void SetCngPayloadType(int sample_rate_hz, int payload_type);
// Returns a pointer to AudioDecoder of the given codec. For iSAC, encoding
// and decoding have to be performed on a shared codec instance. By calling
// this method, we get the codec instance that ACM owns.
// If |codec| does not share an instance between encoder and decoder, returns
// null.
AudioDecoder* GetAudioDecoder(const CodecInst& codec);
void SetRedPayloadType(int sample_rate_hz, int payload_type);
int CngPayloadType(int sample_rate_hz) const;
// Get a pointer to AudioDecoder of the given codec. For some codecs, e.g.
// iSAC, encoding and decoding have to be performed on a shared
// codec-instance. By calling this method, we get the codec-instance that ACM
// owns, then pass that to NetEq. This way, we perform both encoding and
// decoding on the same codec-instance. Furthermore, ACM would have control
// over decoder functionality if required. If |codec| does not share an
// instance between encoder and decoder, the |*decoder| is set NULL.
// The field ACMCodecDB::CodecSettings.owns_decoder indicates that if a
// codec owns the decoder-instance. For such codecs |*decoder| should be a
// valid pointer, otherwise it will be NULL.
int GetAudioDecoder(const CodecInst& codec,
int codec_id,
AudioDecoder** decoder);
int RedPayloadType(int sample_rate_hz) const;
AudioCodingModuleImpl* acm_;
rtc::ThreadChecker thread_checker_;
@ -82,15 +80,12 @@ class CodecManager final {
uint8_t cng_fb_pltype_;
uint8_t red_nb_pltype_;
bool stereo_send_;
bool vad_enabled_;
bool dtx_enabled_;
ACMVADMode vad_mode_;
ACMGenericCodec* current_encoder_;
CodecInst send_codec_inst_;
bool red_enabled_;
bool codec_fec_enabled_;
rtc::scoped_ptr<ACMGenericCodec> isac_enc_dec_;
rtc::scoped_ptr<ACMGenericCodec> encoder_;
CodecOwner codec_owner_;
DISALLOW_COPY_AND_ASSIGN(CodecManager);
};

View File

@ -0,0 +1,230 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
#include "webrtc/base/checks.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
namespace webrtc {
namespace acm2 {
namespace {
bool IsIsac(const CodecInst& codec) {
return
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
!STR_CASE_CMP(codec.plname, "isac") ||
#endif
false;
}
bool IsOpus(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_OPUS
!STR_CASE_CMP(codec.plname, "opus") ||
#endif
false;
}
bool IsPcmU(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcmu");
}
bool IsPcmA(const CodecInst& codec) {
return !STR_CASE_CMP(codec.plname, "pcma");
}
bool IsPcm16B(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_PCM16
!STR_CASE_CMP(codec.plname, "l16") ||
#endif
false;
}
bool IsIlbc(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_ILBC
!STR_CASE_CMP(codec.plname, "ilbc") ||
#endif
false;
}
bool IsG722(const CodecInst& codec) {
return
#ifdef WEBRTC_CODEC_G722
!STR_CASE_CMP(codec.plname, "g722") ||
#endif
false;
}
} // namespace
CodecOwner::CodecOwner() : isac_is_encoder_(false) {
}
CodecOwner::~CodecOwner() = default;
namespace {
AudioEncoderDecoderMutableIsac* CreateIsacCodec(const CodecInst& speech_inst) {
#if defined(WEBRTC_CODEC_ISACFX)
return new AudioEncoderDecoderMutableIsacFix(speech_inst);
#elif defined(WEBRTC_CODEC_ISAC)
return new AudioEncoderDecoderMutableIsacFloat(speech_inst);
#else
FATAL() << "iSAC is not supported.";
return nullptr;
#endif
}
AudioEncoder* CreateSpeechEncoder(
const CodecInst& speech_inst,
rtc::scoped_ptr<AudioEncoderMutable>* speech_encoder,
rtc::scoped_ptr<AudioEncoderDecoderMutableIsac>* isac_codec,
bool* isac_is_encoder) {
if (IsIsac(speech_inst)) {
if (*isac_codec) {
(*isac_codec)->UpdateSettings(speech_inst);
} else {
isac_codec->reset(CreateIsacCodec(speech_inst));
}
*isac_is_encoder = true;
speech_encoder->reset();
return isac_codec->get();
}
if (IsOpus(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutableOpus(speech_inst));
} else if (IsPcmU(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutablePcmU(speech_inst));
} else if (IsPcmA(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutablePcmA(speech_inst));
} else if (IsPcm16B(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutablePcm16B(speech_inst));
} else if (IsIlbc(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutableIlbc(speech_inst));
} else if (IsG722(speech_inst)) {
speech_encoder->reset(new AudioEncoderMutableG722(speech_inst));
} else {
FATAL();
}
*isac_is_encoder = false;
return speech_encoder->get();
}
AudioEncoder* CreateRedEncoder(int red_payload_type,
AudioEncoder* encoder,
rtc::scoped_ptr<AudioEncoder>* red_encoder) {
if (red_payload_type == -1) {
red_encoder->reset();
return encoder;
}
AudioEncoderCopyRed::Config config;
config.payload_type = red_payload_type;
config.speech_encoder = encoder;
red_encoder->reset(new AudioEncoderCopyRed(config));
return red_encoder->get();
}
AudioEncoder* CreateCngEncoder(int cng_payload_type,
ACMVADMode vad_mode,
AudioEncoder* encoder,
rtc::scoped_ptr<AudioEncoder>* cng_encoder) {
if (cng_payload_type == -1) {
cng_encoder->reset();
return encoder;
}
AudioEncoderCng::Config config;
config.num_channels = encoder->NumChannels();
config.payload_type = cng_payload_type;
config.speech_encoder = encoder;
switch (vad_mode) {
case VADNormal:
config.vad_mode = Vad::kVadNormal;
break;
case VADLowBitrate:
config.vad_mode = Vad::kVadLowBitrate;
break;
case VADAggr:
config.vad_mode = Vad::kVadAggressive;
break;
case VADVeryAggr:
config.vad_mode = Vad::kVadVeryAggressive;
break;
default:
FATAL();
}
cng_encoder->reset(new AudioEncoderCng(config));
return cng_encoder->get();
}
} // namespace
void CodecOwner::SetEncoders(const CodecInst& speech_inst,
int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type) {
AudioEncoder* encoder = CreateSpeechEncoder(speech_inst, &speech_encoder_,
&isac_codec_, &isac_is_encoder_);
encoder = CreateRedEncoder(red_payload_type, encoder, &red_encoder_);
encoder =
CreateCngEncoder(cng_payload_type, vad_mode, encoder, &cng_encoder_);
DCHECK(!speech_encoder_ || !isac_is_encoder_);
DCHECK(!isac_is_encoder_ || isac_codec_);
}
AudioDecoder* CodecOwner::GetIsacDecoder() {
if (!isac_codec_) {
DCHECK(!isac_is_encoder_);
// None of the parameter values in |speech_inst| matter when the codec is
// used only as a decoder.
CodecInst speech_inst;
speech_inst.plfreq = 16000;
speech_inst.rate = -1;
speech_inst.pacsize = 480;
isac_codec_.reset(CreateIsacCodec(speech_inst));
}
return isac_codec_.get();
}
AudioEncoder* CodecOwner::Encoder() {
const auto& const_this = *this;
return const_cast<AudioEncoder*>(const_this.Encoder());
}
const AudioEncoder* CodecOwner::Encoder() const {
if (cng_encoder_)
return cng_encoder_.get();
if (red_encoder_)
return red_encoder_.get();
return SpeechEncoder();
}
AudioEncoderMutable* CodecOwner::SpeechEncoder() {
const auto& const_this = *this;
return const_cast<AudioEncoderMutable*>(const_this.SpeechEncoder());
}
const AudioEncoderMutable* CodecOwner::SpeechEncoder() const {
DCHECK(!speech_encoder_ || !isac_is_encoder_);
DCHECK(!isac_is_encoder_ || isac_codec_);
if (speech_encoder_)
return speech_encoder_.get();
return isac_is_encoder_ ? isac_codec_.get() : nullptr;
}
} // namespace acm2
} // namespace webrtc

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
namespace webrtc {
class AudioDecoder;
namespace acm2 {
class CodecOwner {
public:
CodecOwner();
~CodecOwner();
void SetEncoders(const CodecInst& speech_inst,
int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type);
AudioDecoder* GetIsacDecoder();
AudioEncoder* Encoder();
const AudioEncoder* Encoder() const;
AudioEncoderMutable* SpeechEncoder();
const AudioEncoderMutable* SpeechEncoder() const;
private:
// If iSAC is registered as an encoder, |isac_is_encoder_| is true,
// |isac_codec_| is valid and |speech_encoder_| is null. If another encoder
// is registered, |isac_is_encoder_| is false, |speech_encoder_| is valid
// and |isac_codec_| is valid iff iSAC has been registered as a decoder.
rtc::scoped_ptr<AudioEncoderMutable> speech_encoder_;
rtc::scoped_ptr<AudioEncoderDecoderMutableIsac> isac_codec_;
bool isac_is_encoder_;
// |cng_encoder_| and |red_encoder_| are valid iff CNG or RED, respectively,
// are active.
rtc::scoped_ptr<AudioEncoder> cng_encoder_;
rtc::scoped_ptr<AudioEncoder> red_encoder_;
DISALLOW_COPY_AND_ASSIGN(CodecOwner);
};
} // namespace acm2
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_

View File

@ -10,7 +10,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
namespace webrtc {
namespace acm2 {
@ -22,22 +22,14 @@ const int16_t kZeroData[kDataLengthSamples] = {0};
const CodecInst kDefaultCodecInst =
{0, "pcmu", 8000, kPacketSizeSamples, 1, 64000};
const int kCngPt = 13;
const int kNoCngPt = 255;
const int kRedPt = 255; // Not using RED in this test.
} // namespace
class AcmGenericCodecTest : public ::testing::Test {
class CodecOwnerTest : public ::testing::Test {
protected:
AcmGenericCodecTest() : timestamp_(0) {
acm_codec_params_ = {kDefaultCodecInst, true, true, VADNormal};
}
CodecOwnerTest() : timestamp_(0) {}
void CreateCodec() {
codec_.reset(new ACMGenericCodec(acm_codec_params_.codec_inst, kCngPt,
kNoCngPt, kNoCngPt, kNoCngPt,
false /* enable RED */, kRedPt));
ASSERT_TRUE(codec_);
ASSERT_EQ(0, codec_->InitEncoder(&acm_codec_params_, true));
codec_owner_.SetEncoders(kDefaultCodecInst, kCngPt, VADNormal, -1);
}
void EncodeAndVerify(size_t expected_out_length,
@ -46,7 +38,7 @@ class AcmGenericCodecTest : public ::testing::Test {
int expected_send_even_if_empty) {
uint8_t out[kPacketSizeSamples];
AudioEncoder::EncodedInfo encoded_info;
encoded_info = codec_->GetAudioEncoder()->Encode(
encoded_info = codec_owner_.Encoder()->Encode(
timestamp_, kZeroData, kDataLengthSamples, kPacketSizeSamples, out);
timestamp_ += kDataLengthSamples;
EXPECT_TRUE(encoded_info.redundant.empty());
@ -59,8 +51,7 @@ class AcmGenericCodecTest : public ::testing::Test {
encoded_info.send_even_if_empty);
}
WebRtcACMCodecParams acm_codec_params_;
rtc::scoped_ptr<ACMGenericCodec> codec_;
CodecOwner codec_owner_;
uint32_t timestamp_;
};
@ -73,7 +64,7 @@ class AcmGenericCodecTest : public ::testing::Test {
// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
// module.)
TEST_F(AcmGenericCodecTest, VerifyCngFrames) {
TEST_F(CodecOwnerTest, VerifyCngFrames) {
CreateCodec();
uint32_t expected_timestamp = timestamp_;
// Verify no frame.

View File

@ -343,14 +343,6 @@ void TestStereo::Perform() {
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(-1, acm_a_->SetVAD(true, false, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(-1, acm_a_->SetVAD(false, true, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(0, acm_a_->SetVAD(false, false, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);

View File

@ -94,14 +94,13 @@
],
'sources': [
'audio_coding/codecs/cng/audio_encoder_cng_unittest.cc',
'audio_coding/main/acm2/acm_generic_codec_test.cc',
'audio_coding/main/acm2/acm_generic_codec_opus_test.cc',
'audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc',
'audio_coding/main/acm2/acm_receiver_unittest.cc',
'audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc',
'audio_coding/main/acm2/audio_coding_module_unittest.cc',
'audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc',
'audio_coding/main/acm2/call_statistics_unittest.cc',
'audio_coding/main/acm2/codec_owner_unittest.cc',
'audio_coding/main/acm2/initial_delay_manager_unittest.cc',
'audio_coding/main/acm2/nack_unittest.cc',
'audio_coding/codecs/cng/cng_unittest.cc',