Reland r9159 "Adding a new constraint to set NetEq buffer capacity ..."

The original change was reverted due to a breakage in the chrome build.
This change includes a fix for this.

TBR=mflodman@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/49329004

Cr-Commit-Position: refs/heads/master@{#9169}
This commit is contained in:
Henrik Lundin 2015-05-11 12:44:23 +02:00
parent 092041c1cd
commit 64dad838e6
16 changed files with 126 additions and 34 deletions

View File

@ -1304,6 +1304,9 @@ JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
"Ljava/util/List;"); "Ljava/util/List;");
jobject j_ice_servers = GetObjectField(jni, j_rtc_config, j_ice_servers_id); jobject j_ice_servers = GetObjectField(jni, j_rtc_config, j_ice_servers_id);
jfieldID j_audio_jitter_buffer_max_packets_id = GetFieldID(
jni, j_rtc_config_class, "audioJitterBufferMaxPackets",
"I");
PeerConnectionInterface::RTCConfiguration rtc_config; PeerConnectionInterface::RTCConfiguration rtc_config;
rtc_config.type = rtc_config.type =
@ -1312,6 +1315,8 @@ JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
rtc_config.tcp_candidate_policy = rtc_config.tcp_candidate_policy =
JavaTcpCandidatePolicyToNativeType(jni, j_tcp_candidate_policy); JavaTcpCandidatePolicyToNativeType(jni, j_tcp_candidate_policy);
JavaIceServersToJsepIceServers(jni, j_ice_servers, &rtc_config.servers); JavaIceServersToJsepIceServers(jni, j_ice_servers, &rtc_config.servers);
rtc_config.audio_jitter_buffer_max_packets =
GetIntField(jni, j_rtc_config, j_audio_jitter_buffer_max_packets_id);
PCOJava* observer = reinterpret_cast<PCOJava*>(observer_p); PCOJava* observer = reinterpret_cast<PCOJava*>(observer_p);
observer->SetConstraints(new ConstraintsWrapper(jni, j_constraints)); observer->SetConstraints(new ConstraintsWrapper(jni, j_constraints));

View File

@ -128,12 +128,14 @@ public class PeerConnection {
public List<IceServer> iceServers; public List<IceServer> iceServers;
public BundlePolicy bundlePolicy; public BundlePolicy bundlePolicy;
public TcpCandidatePolicy tcpCandidatePolicy; public TcpCandidatePolicy tcpCandidatePolicy;
public int audioJitterBufferMaxPackets;
public RTCConfiguration(List<IceServer> iceServers) { public RTCConfiguration(List<IceServer> iceServers) {
iceTransportsType = IceTransportsType.ALL; iceTransportsType = IceTransportsType.ALL;
bundlePolicy = BundlePolicy.BALANCED; bundlePolicy = BundlePolicy.BALANCED;
tcpCandidatePolicy = TcpCandidatePolicy.ENABLED; tcpCandidatePolicy = TcpCandidatePolicy.ENABLED;
this.iceServers = iceServers; this.iceServers = iceServers;
audioJitterBufferMaxPackets = 50;
} }
}; };

View File

@ -382,9 +382,7 @@ bool PeerConnection::Initialize(
// Initialize the WebRtcSession. It creates transport channels etc. // Initialize the WebRtcSession. It creates transport channels etc.
if (!session_->Initialize(factory_->options(), constraints, if (!session_->Initialize(factory_->options(), constraints,
dtls_identity_service, dtls_identity_service, configuration))
configuration.type,
configuration.bundle_policy))
return false; return false;
// Register PeerConnection as receiver of local ice candidates. // Register PeerConnection as receiver of local ice candidates.

View File

@ -211,11 +211,13 @@ class PeerConnectionInterface : public rtc::RefCountInterface {
IceServers servers; IceServers servers;
BundlePolicy bundle_policy; BundlePolicy bundle_policy;
TcpCandidatePolicy tcp_candidate_policy; TcpCandidatePolicy tcp_candidate_policy;
int audio_jitter_buffer_max_packets;
RTCConfiguration() RTCConfiguration()
: type(kAll), : type(kAll),
bundle_policy(kBundlePolicyBalanced), bundle_policy(kBundlePolicyBalanced),
tcp_candidate_policy(kTcpCandidatePolicyEnabled) {} tcp_candidate_policy(kTcpCandidatePolicyEnabled),
audio_jitter_buffer_max_packets(50) {}
}; };
struct RTCOfferAnswerOptions { struct RTCOfferAnswerOptions {

View File

@ -521,9 +521,8 @@ bool WebRtcSession::Initialize(
const PeerConnectionFactoryInterface::Options& options, const PeerConnectionFactoryInterface::Options& options,
const MediaConstraintsInterface* constraints, const MediaConstraintsInterface* constraints,
DTLSIdentityServiceInterface* dtls_identity_service, DTLSIdentityServiceInterface* dtls_identity_service,
PeerConnectionInterface::IceTransportsType ice_transport_type, const PeerConnectionInterface::RTCConfiguration& rtc_configuration) {
PeerConnectionInterface::BundlePolicy bundle_policy) { bundle_policy_ = rtc_configuration.bundle_policy;
bundle_policy_ = bundle_policy;
// TODO(perkj): Take |constraints| into consideration. Return false if not all // TODO(perkj): Take |constraints| into consideration. Return false if not all
// mandatory constraints can be fulfilled. Note that |constraints| // mandatory constraints can be fulfilled. Note that |constraints|
@ -640,6 +639,9 @@ bool WebRtcSession::Initialize(
MediaConstraintsInterface::kCombinedAudioVideoBwe, MediaConstraintsInterface::kCombinedAudioVideoBwe,
&audio_options_.combined_audio_video_bwe); &audio_options_.combined_audio_video_bwe);
audio_options_.audio_jitter_buffer_max_packets.Set(
rtc_configuration.audio_jitter_buffer_max_packets);
const cricket::VideoCodec default_codec( const cricket::VideoCodec default_codec(
JsepSessionDescription::kDefaultVideoCodecId, JsepSessionDescription::kDefaultVideoCodecId,
JsepSessionDescription::kDefaultVideoCodecName, JsepSessionDescription::kDefaultVideoCodecName,
@ -667,7 +669,7 @@ bool WebRtcSession::Initialize(
webrtc_session_desc_factory_->SetSdesPolicy(cricket::SEC_DISABLED); webrtc_session_desc_factory_->SetSdesPolicy(cricket::SEC_DISABLED);
} }
port_allocator()->set_candidate_filter( port_allocator()->set_candidate_filter(
ConvertIceTransportTypeToCandidateFilter(ice_transport_type)); ConvertIceTransportTypeToCandidateFilter(rtc_configuration.type));
return true; return true;
} }

View File

@ -117,11 +117,11 @@ class WebRtcSession : public cricket::BaseSession,
MediaStreamSignaling* mediastream_signaling); MediaStreamSignaling* mediastream_signaling);
virtual ~WebRtcSession(); virtual ~WebRtcSession();
bool Initialize(const PeerConnectionFactoryInterface::Options& options, bool Initialize(
const MediaConstraintsInterface* constraints, const PeerConnectionFactoryInterface::Options& options,
DTLSIdentityServiceInterface* dtls_identity_service, const MediaConstraintsInterface* constraints,
PeerConnectionInterface::IceTransportsType ice_transport_type, DTLSIdentityServiceInterface* dtls_identity_service,
PeerConnectionInterface::BundlePolicy bundle_policy); const PeerConnectionInterface::RTCConfiguration& rtc_configuration);
// Deletes the voice, video and data channel and changes the session state // Deletes the voice, video and data channel and changes the session state
// to STATE_RECEIVEDTERMINATE. // to STATE_RECEIVEDTERMINATE.
void Terminate(); void Terminate();

View File

@ -156,6 +156,8 @@ static const char kSdpWithRtx[] =
"a=rtpmap:96 rtx/90000\r\n" "a=rtpmap:96 rtx/90000\r\n"
"a=fmtp:96 apt=0\r\n"; "a=fmtp:96 apt=0\r\n";
static const int kAudioJitterBufferMaxPackets = 50;
// Add some extra |newlines| to the |message| after |line|. // Add some extra |newlines| to the |message| after |line|.
static void InjectAfter(const std::string& line, static void InjectAfter(const std::string& line,
const std::string& newlines, const std::string& newlines,
@ -383,8 +385,7 @@ class WebRtcSessionTest : public testing::Test {
void Init( void Init(
DTLSIdentityServiceInterface* identity_service, DTLSIdentityServiceInterface* identity_service,
PeerConnectionInterface::IceTransportsType ice_transport_type, const PeerConnectionInterface::RTCConfiguration& rtc_configuration) {
PeerConnectionInterface::BundlePolicy bundle_policy) {
ASSERT_TRUE(session_.get() == NULL); ASSERT_TRUE(session_.get() == NULL);
session_.reset(new WebRtcSessionForTest( session_.reset(new WebRtcSessionForTest(
channel_manager_.get(), rtc::Thread::Current(), channel_manager_.get(), rtc::Thread::Current(),
@ -398,33 +399,51 @@ class WebRtcSessionTest : public testing::Test {
observer_.ice_gathering_state_); observer_.ice_gathering_state_);
EXPECT_TRUE(session_->Initialize(options_, constraints_.get(), EXPECT_TRUE(session_->Initialize(options_, constraints_.get(),
identity_service, ice_transport_type, identity_service, rtc_configuration));
bundle_policy));
session_->set_metrics_observer(&metrics_observer_); session_->set_metrics_observer(&metrics_observer_);
} }
void Init() { void Init() {
Init(NULL, PeerConnectionInterface::kAll, PeerConnectionInterface::RTCConfiguration configuration;
PeerConnectionInterface::kBundlePolicyBalanced); configuration.type = PeerConnectionInterface::kAll;
configuration.bundle_policy =
PeerConnectionInterface::kBundlePolicyBalanced;
configuration.audio_jitter_buffer_max_packets =
kAudioJitterBufferMaxPackets;
Init(NULL, configuration);
} }
void InitWithIceTransport( void InitWithIceTransport(
PeerConnectionInterface::IceTransportsType ice_transport_type) { PeerConnectionInterface::IceTransportsType ice_transport_type) {
Init(NULL, ice_transport_type, PeerConnectionInterface::RTCConfiguration configuration;
PeerConnectionInterface::kBundlePolicyBalanced); configuration.type = ice_transport_type;
configuration.bundle_policy =
PeerConnectionInterface::kBundlePolicyBalanced;
configuration.audio_jitter_buffer_max_packets =
kAudioJitterBufferMaxPackets;
Init(NULL, configuration);
} }
void InitWithBundlePolicy( void InitWithBundlePolicy(
PeerConnectionInterface::BundlePolicy bundle_policy) { PeerConnectionInterface::BundlePolicy bundle_policy) {
Init(NULL, PeerConnectionInterface::kAll, bundle_policy); PeerConnectionInterface::RTCConfiguration configuration;
configuration.type = PeerConnectionInterface::kAll;
configuration.bundle_policy = bundle_policy;
configuration.audio_jitter_buffer_max_packets =
kAudioJitterBufferMaxPackets;
Init(NULL, configuration);
} }
void InitWithDtls(bool identity_request_should_fail = false) { void InitWithDtls(bool identity_request_should_fail = false) {
FakeIdentityService* identity_service = new FakeIdentityService(); FakeIdentityService* identity_service = new FakeIdentityService();
identity_service->set_should_fail(identity_request_should_fail); identity_service->set_should_fail(identity_request_should_fail);
Init(identity_service, PeerConnectionInterface::RTCConfiguration configuration;
PeerConnectionInterface::kAll, configuration.type = PeerConnectionInterface::kAll;
PeerConnectionInterface::kBundlePolicyBalanced); configuration.bundle_policy =
PeerConnectionInterface::kBundlePolicyBalanced;
configuration.audio_jitter_buffer_max_packets =
kAudioJitterBufferMaxPackets;
Init(identity_service, configuration);
} }
void InitWithDtmfCodec() { void InitWithDtmfCodec() {

View File

@ -150,6 +150,8 @@ struct AudioOptions {
noise_suppression.SetFrom(change.noise_suppression); noise_suppression.SetFrom(change.noise_suppression);
highpass_filter.SetFrom(change.highpass_filter); highpass_filter.SetFrom(change.highpass_filter);
stereo_swapping.SetFrom(change.stereo_swapping); stereo_swapping.SetFrom(change.stereo_swapping);
audio_jitter_buffer_max_packets.SetFrom(
change.audio_jitter_buffer_max_packets);
typing_detection.SetFrom(change.typing_detection); typing_detection.SetFrom(change.typing_detection);
aecm_generate_comfort_noise.SetFrom(change.aecm_generate_comfort_noise); aecm_generate_comfort_noise.SetFrom(change.aecm_generate_comfort_noise);
conference_mode.SetFrom(change.conference_mode); conference_mode.SetFrom(change.conference_mode);
@ -180,6 +182,7 @@ struct AudioOptions {
noise_suppression == o.noise_suppression && noise_suppression == o.noise_suppression &&
highpass_filter == o.highpass_filter && highpass_filter == o.highpass_filter &&
stereo_swapping == o.stereo_swapping && stereo_swapping == o.stereo_swapping &&
audio_jitter_buffer_max_packets == o.audio_jitter_buffer_max_packets &&
typing_detection == o.typing_detection && typing_detection == o.typing_detection &&
aecm_generate_comfort_noise == o.aecm_generate_comfort_noise && aecm_generate_comfort_noise == o.aecm_generate_comfort_noise &&
conference_mode == o.conference_mode && conference_mode == o.conference_mode &&
@ -210,6 +213,8 @@ struct AudioOptions {
ost << ToStringIfSet("ns", noise_suppression); ost << ToStringIfSet("ns", noise_suppression);
ost << ToStringIfSet("hf", highpass_filter); ost << ToStringIfSet("hf", highpass_filter);
ost << ToStringIfSet("swap", stereo_swapping); ost << ToStringIfSet("swap", stereo_swapping);
ost << ToStringIfSet("audio_jitter_buffer_max_packets",
audio_jitter_buffer_max_packets);
ost << ToStringIfSet("typing", typing_detection); ost << ToStringIfSet("typing", typing_detection);
ost << ToStringIfSet("comfort_noise", aecm_generate_comfort_noise); ost << ToStringIfSet("comfort_noise", aecm_generate_comfort_noise);
ost << ToStringIfSet("conference", conference_mode); ost << ToStringIfSet("conference", conference_mode);
@ -248,6 +253,8 @@ struct AudioOptions {
Settable<bool> highpass_filter; Settable<bool> highpass_filter;
// Audio processing to swap the left and right channels. // Audio processing to swap the left and right channels.
Settable<bool> stereo_swapping; Settable<bool> stereo_swapping;
// Audio receiver jitter buffer (NetEq) max capacity in number of packets.
Settable<int> audio_jitter_buffer_max_packets;
// Audio processing to detect typing. // Audio processing to detect typing.
Settable<bool> typing_detection; Settable<bool> typing_detection;
Settable<bool> aecm_generate_comfort_noise; Settable<bool> aecm_generate_comfort_noise;

View File

@ -41,6 +41,7 @@
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/base/gunit.h" #include "webrtc/base/gunit.h"
#include "webrtc/base/stringutils.h" #include "webrtc/base/stringutils.h"
#include "webrtc/config.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h" #include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace cricket { namespace cricket {
@ -213,7 +214,8 @@ class FakeWebRtcVoiceEngine
send_audio_level_ext_(-1), send_audio_level_ext_(-1),
receive_audio_level_ext_(-1), receive_audio_level_ext_(-1),
send_absolute_sender_time_ext_(-1), send_absolute_sender_time_ext_(-1),
receive_absolute_sender_time_ext_(-1) { receive_absolute_sender_time_ext_(-1),
neteq_capacity(-1) {
memset(&send_codec, 0, sizeof(send_codec)); memset(&send_codec, 0, sizeof(send_codec));
memset(&rx_agc_config, 0, sizeof(rx_agc_config)); memset(&rx_agc_config, 0, sizeof(rx_agc_config));
} }
@ -249,6 +251,7 @@ class FakeWebRtcVoiceEngine
webrtc::CodecInst send_codec; webrtc::CodecInst send_codec;
webrtc::PacketTime last_rtp_packet_time; webrtc::PacketTime last_rtp_packet_time;
std::list<std::string> packets; std::list<std::string> packets;
int neteq_capacity;
}; };
FakeWebRtcVoiceEngine(const cricket::AudioCodec* const* codecs, FakeWebRtcVoiceEngine(const cricket::AudioCodec* const* codecs,
@ -391,7 +394,7 @@ class FakeWebRtcVoiceEngine
true); true);
} }
} }
int AddChannel() { int AddChannel(const webrtc::Config& config) {
if (fail_create_channel_) { if (fail_create_channel_) {
return -1; return -1;
} }
@ -401,6 +404,9 @@ class FakeWebRtcVoiceEngine
GetCodec(i, codec); GetCodec(i, codec);
ch->recv_codecs.push_back(codec); ch->recv_codecs.push_back(codec);
} }
if (config.Get<webrtc::NetEqCapacityConfig>().enabled) {
ch->neteq_capacity = config.Get<webrtc::NetEqCapacityConfig>().capacity;
}
channels_[++last_channel_] = ch; channels_[++last_channel_] = ch;
return last_channel_; return last_channel_;
} }
@ -447,10 +453,11 @@ class FakeWebRtcVoiceEngine
return &audio_processing_; return &audio_processing_;
} }
WEBRTC_FUNC(CreateChannel, ()) { WEBRTC_FUNC(CreateChannel, ()) {
return AddChannel(); webrtc::Config empty_config;
return AddChannel(empty_config);
} }
WEBRTC_FUNC(CreateChannel, (const webrtc::Config& /*config*/)) { WEBRTC_FUNC(CreateChannel, (const webrtc::Config& config)) {
return AddChannel(); return AddChannel(config);
} }
WEBRTC_FUNC(DeleteChannel, (int channel)) { WEBRTC_FUNC(DeleteChannel, (int channel)) {
WEBRTC_CHECK_CHANNEL(channel); WEBRTC_CHECK_CHANNEL(channel);
@ -1243,6 +1250,11 @@ class FakeWebRtcVoiceEngine
WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz, WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz,
webrtc::AudioFrame* frame)); webrtc::AudioFrame* frame));
WEBRTC_STUB(SetExternalMixing, (int channel, bool enable)); WEBRTC_STUB(SetExternalMixing, (int channel, bool enable));
int GetNetEqCapacity() const {
auto ch = channels_.find(last_channel_);
ASSERT(ch != channels_.end());
return ch->second->neteq_capacity;
}
private: private:
int GetNumDevices(int& num) { int GetNumDevices(int& num) {

View File

@ -353,6 +353,7 @@ static AudioOptions GetDefaultEngineOptions() {
options.noise_suppression.Set(true); options.noise_suppression.Set(true);
options.highpass_filter.Set(true); options.highpass_filter.Set(true);
options.stereo_swapping.Set(false); options.stereo_swapping.Set(false);
options.audio_jitter_buffer_max_packets.Set(50);
options.typing_detection.Set(true); options.typing_detection.Set(true);
options.conference_mode.Set(false); options.conference_mode.Set(false);
options.adjust_agc_delta.Set(0); options.adjust_agc_delta.Set(0);
@ -955,6 +956,14 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
} }
} }
int audio_jitter_buffer_max_packets;
if (options.audio_jitter_buffer_max_packets.Get(
&audio_jitter_buffer_max_packets)) {
LOG(LS_INFO) << "NetEq capacity is " << audio_jitter_buffer_max_packets;
voe_config_.Set<webrtc::NetEqCapacityConfig>(
new webrtc::NetEqCapacityConfig(audio_jitter_buffer_max_packets));
}
bool typing_detection; bool typing_detection;
if (options.typing_detection.Get(&typing_detection)) { if (options.typing_detection.Get(&typing_detection)) {
LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection; LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection;

View File

@ -46,6 +46,7 @@
#include "webrtc/base/thread_checker.h" #include "webrtc/base/thread_checker.h"
#include "webrtc/call.h" #include "webrtc/call.h"
#include "webrtc/common.h" #include "webrtc/common.h"
#include "webrtc/config.h"
#if !defined(LIBPEERCONNECTION_LIB) && \ #if !defined(LIBPEERCONNECTION_LIB) && \
!defined(LIBPEERCONNECTION_IMPLEMENTATION) !defined(LIBPEERCONNECTION_IMPLEMENTATION)

View File

@ -2882,6 +2882,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(typing_detection_enabled); EXPECT_TRUE(typing_detection_enabled);
EXPECT_EQ(ec_mode, webrtc::kEcConference); EXPECT_EQ(ec_mode, webrtc::kEcConference);
EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression); EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
EXPECT_EQ(50, voe_.GetNetEqCapacity()); // From GetDefaultEngineOptions().
// Turn echo cancellation off // Turn echo cancellation off
options.echo_cancellation.Set(false); options.echo_cancellation.Set(false);

View File

@ -113,6 +113,20 @@ struct VideoEncoderConfig {
int min_transmit_bitrate_bps; int min_transmit_bitrate_bps;
}; };
// Controls the capacity of the packet buffer in NetEq. The capacity is the
// maximum number of packets that the buffer can contain. If the limit is
// exceeded, the buffer will be flushed. The capacity does not affect the actual
// audio delay in the general case, since this is governed by the target buffer
// level (calculated from the jitter profile). It is only in the rare case of
// severe network freezes that a higher capacity will lead to a (transient)
// increase in audio delay.
struct NetEqCapacityConfig {
NetEqCapacityConfig() : enabled(false), capacity(0) {}
explicit NetEqCapacityConfig(int value) : enabled(true), capacity(value) {}
bool enabled;
int capacity;
};
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_CONFIG_H_ #endif // WEBRTC_CONFIG_H_

View File

@ -10,6 +10,7 @@
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h" #include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h" #include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h" #include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h" #include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
@ -20,13 +21,20 @@ namespace webrtc {
// Create module // Create module
AudioCodingModule* AudioCodingModule::Create(int id) { AudioCodingModule* AudioCodingModule::Create(int id) {
return Create(id, Clock::GetRealTimeClock()); Config config;
config.id = id;
config.clock = Clock::GetRealTimeClock();
return Create(config);
} }
AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) { AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) {
AudioCodingModule::Config config; Config config;
config.id = id; config.id = id;
config.clock = clock; config.clock = clock;
return Create(config);
}
AudioCodingModule* AudioCodingModule::Create(const Config& config) {
return new acm2::AudioCodingModuleImpl(config); return new acm2::AudioCodingModuleImpl(config);
} }

View File

@ -99,6 +99,7 @@ class AudioCodingModule {
// //
static AudioCodingModule* Create(int id); static AudioCodingModule* Create(int id);
static AudioCodingModule* Create(int id, Clock* clock); static AudioCodingModule* Create(int id, Clock* clock);
static AudioCodingModule* Create(const Config& config);
virtual ~AudioCodingModule() {}; virtual ~AudioCodingModule() {};
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////

View File

@ -10,9 +10,12 @@
#include "webrtc/voice_engine/channel.h" #include "webrtc/voice_engine/channel.h"
#include <algorithm>
#include "webrtc/base/format_macros.h" #include "webrtc/base/format_macros.h"
#include "webrtc/base/timeutils.h" #include "webrtc/base/timeutils.h"
#include "webrtc/common.h" #include "webrtc/common.h"
#include "webrtc/config.h"
#include "webrtc/modules/audio_device/include/audio_device.h" #include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h" #include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h" #include "webrtc/modules/interface/module_common_types.h"
@ -757,8 +760,6 @@ Channel::Channel(int32_t channelId,
VoEModuleId(instanceId, channelId), Clock::GetRealTimeClock(), this, VoEModuleId(instanceId, channelId), Clock::GetRealTimeClock(), this,
this, this, rtp_payload_registry_.get())), this, this, rtp_payload_registry_.get())),
telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()), telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
audio_coding_(AudioCodingModule::Create(
VoEModuleId(instanceId, channelId))),
_rtpDumpIn(*RtpDump::CreateRtpDump()), _rtpDumpIn(*RtpDump::CreateRtpDump()),
_rtpDumpOut(*RtpDump::CreateRtpDump()), _rtpDumpOut(*RtpDump::CreateRtpDump()),
_outputAudioLevel(), _outputAudioLevel(),
@ -828,6 +829,16 @@ Channel::Channel(int32_t channelId,
{ {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId), WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::Channel() - ctor"); "Channel::Channel() - ctor");
AudioCodingModule::Config acm_config;
acm_config.id = VoEModuleId(instanceId, channelId);
if (config.Get<NetEqCapacityConfig>().enabled) {
// Clamping the buffer capacity at 20 packets. While going lower will
// probably work, it makes little sense.
acm_config.neteq_config.max_packets_in_buffer =
std::max(20, config.Get<NetEqCapacityConfig>().capacity);
}
audio_coding_.reset(AudioCodingModule::Create(acm_config));
_inbandDtmfQueue.ResetDtmf(); _inbandDtmfQueue.ResetDtmf();
_inbandDtmfGenerator.Init(); _inbandDtmfGenerator.Init();
_outputAudioLevel.Clear(); _outputAudioLevel.Clear();