(Auto)update libjingle 66033941-> 66098243
git-svn-id: http://webrtc.googlecode.com/svn/trunk@6044 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
7f3a041d23
commit
0d34f1446a
@ -59,7 +59,6 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
|
||||
id_ = id;
|
||||
return 0;
|
||||
}
|
||||
#if defined(USE_WEBRTC_DEV_BRANCH)
|
||||
virtual void RegisterCaptureDataCallback(
|
||||
webrtc::VideoCaptureDataCallback& callback) {
|
||||
callback_ = &callback;
|
||||
@ -79,37 +78,6 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
|
||||
virtual void EnableNoPictureAlarm(const bool enable) {
|
||||
// not implemented
|
||||
}
|
||||
#else
|
||||
virtual int32_t RegisterCaptureDataCallback(
|
||||
webrtc::VideoCaptureDataCallback& callback) {
|
||||
callback_ = &callback;
|
||||
return 0;
|
||||
}
|
||||
virtual int32_t DeRegisterCaptureDataCallback() {
|
||||
callback_ = NULL;
|
||||
return 0;
|
||||
}
|
||||
virtual int32_t RegisterCaptureCallback(
|
||||
webrtc::VideoCaptureFeedBack& callback) {
|
||||
return -1; // not implemented
|
||||
}
|
||||
virtual int32_t DeRegisterCaptureCallback() {
|
||||
return 0;
|
||||
}
|
||||
virtual int32_t SetCaptureDelay(int32_t delay) {
|
||||
delay_ = delay;
|
||||
return 0;
|
||||
}
|
||||
virtual int32_t CaptureDelay() {
|
||||
return delay_;
|
||||
}
|
||||
virtual int32_t EnableFrameRateCallback(const bool enable) {
|
||||
return -1; // not implemented
|
||||
}
|
||||
virtual int32_t EnableNoPictureAlarm(const bool enable) {
|
||||
return -1; // not implemented
|
||||
}
|
||||
#endif
|
||||
virtual int32_t StartCapture(
|
||||
const webrtc::VideoCaptureCapability& cap) {
|
||||
if (running_) return -1;
|
||||
|
@ -41,18 +41,6 @@
|
||||
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
|
||||
#include "talk/media/webrtc/webrtcvie.h"
|
||||
|
||||
#if !defined(USE_WEBRTC_DEV_BRANCH)
|
||||
namespace webrtc {
|
||||
|
||||
// This function is 'inline' to avoid link errors.
|
||||
inline bool operator==(const webrtc::VideoCodec& c1,
|
||||
const webrtc::VideoCodec& c2) {
|
||||
return memcmp(&c1, &c2, sizeof(c1)) == 0;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace cricket {
|
||||
|
||||
#define WEBRTC_CHECK_CAPTURER(capturer) \
|
||||
@ -307,9 +295,7 @@ class FakeWebRtcVideoEngine
|
||||
overuse_observer_(NULL) {
|
||||
ssrcs_[0] = 0; // default ssrc.
|
||||
memset(&send_codec, 0, sizeof(send_codec));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
memset(&overuse_options_, 0, sizeof(overuse_options_));
|
||||
#endif
|
||||
}
|
||||
int capture_id_;
|
||||
int original_channel_id_;
|
||||
@ -349,9 +335,7 @@ class FakeWebRtcVideoEngine
|
||||
unsigned int reserved_transmit_bitrate_bps_;
|
||||
bool suspend_below_min_bitrate_;
|
||||
webrtc::CpuOveruseObserver* overuse_observer_;
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::CpuOveruseOptions overuse_options_;
|
||||
#endif
|
||||
};
|
||||
class Capturer : public webrtc::ViEExternalCapture {
|
||||
public:
|
||||
@ -553,12 +537,10 @@ class FakeWebRtcVideoEngine
|
||||
WEBRTC_ASSERT_CHANNEL(channel);
|
||||
return channels_.find(channel)->second->overuse_observer_;
|
||||
}
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::CpuOveruseOptions GetCpuOveruseOptions(int channel) const {
|
||||
WEBRTC_ASSERT_CHANNEL(channel);
|
||||
return channels_.find(channel)->second->overuse_options_;
|
||||
}
|
||||
#endif
|
||||
int GetRtxSsrc(int channel, int simulcast_idx) const {
|
||||
WEBRTC_ASSERT_CHANNEL(channel);
|
||||
if (channels_.find(channel)->second->rtx_ssrcs_.find(simulcast_idx) ==
|
||||
@ -570,16 +552,7 @@ class FakeWebRtcVideoEngine
|
||||
bool ReceiveCodecRegistered(int channel,
|
||||
const webrtc::VideoCodec& codec) const {
|
||||
WEBRTC_ASSERT_CHANNEL(channel);
|
||||
#if !defined(USE_WEBRTC_DEV_BRANCH)
|
||||
const std::vector<webrtc::VideoCodec>& codecs =
|
||||
channels_.find(channel)->second->recv_codecs;
|
||||
return std::find(codecs.begin(), codecs.end(), codec) != codecs.end();
|
||||
#else
|
||||
// TODO(mallinath) - Remove this specilization after this change is pushed
|
||||
// to googlecode and operator== from VideoCodecDerived moved inside
|
||||
// VideoCodec.
|
||||
return true;
|
||||
#endif
|
||||
};
|
||||
bool ExternalDecoderRegistered(int channel,
|
||||
unsigned int pl_type) const {
|
||||
@ -690,14 +663,12 @@ class FakeWebRtcVideoEngine
|
||||
return 0;
|
||||
}
|
||||
WEBRTC_STUB(CpuOveruseMeasures, (int, int*, int*, int*, int*));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(SetCpuOveruseOptions,
|
||||
(int channel, const webrtc::CpuOveruseOptions& options)) {
|
||||
WEBRTC_CHECK_CHANNEL(channel);
|
||||
channels_[channel]->overuse_options_ = options;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
WEBRTC_STUB(ConnectAudioChannel, (const int, const int));
|
||||
WEBRTC_STUB(DisconnectAudioChannel, (const int));
|
||||
WEBRTC_FUNC(StartSend, (const int channel)) {
|
||||
@ -891,10 +862,8 @@ class FakeWebRtcVideoEngine
|
||||
// Not using WEBRTC_STUB due to bool return value
|
||||
virtual bool IsIPv6Enabled(int channel) { return true; }
|
||||
WEBRTC_STUB(SetMTU, (int, unsigned int));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_STUB(ReceivedBWEPacket, (const int, int64_t, int,
|
||||
const webrtc::RTPHeader&));
|
||||
#endif
|
||||
|
||||
// webrtc::ViERender
|
||||
WEBRTC_STUB(RegisterVideoRenderModule, (webrtc::VideoRender&));
|
||||
@ -1104,19 +1073,15 @@ class FakeWebRtcVideoEngine
|
||||
channels_[channel]->transmission_smoothing_ = enable;
|
||||
return 0;
|
||||
}
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(SetReservedTransmitBitrate, (int channel,
|
||||
WEBRTC_FUNC(SetReservedTransmitBitrate, (int channel,
|
||||
unsigned int reserved_transmit_bitrate_bps)) {
|
||||
WEBRTC_CHECK_CHANNEL(channel);
|
||||
channels_[channel]->reserved_transmit_bitrate_bps_ =
|
||||
reserved_transmit_bitrate_bps;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_STUB_CONST(GetRtcpPacketTypeCounters, (int,
|
||||
webrtc::RtcpPacketTypeCounter*, webrtc::RtcpPacketTypeCounter*));
|
||||
#endif
|
||||
WEBRTC_STUB_CONST(GetReceivedRTCPStatistics, (const int, unsigned short&,
|
||||
unsigned int&, unsigned int&, unsigned int&, int&));
|
||||
WEBRTC_STUB_CONST(GetSentRTCPStatistics, (const int, unsigned short&,
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
|
||||
#include "talk/base/basictypes.h"
|
||||
#include "talk/base/gunit.h"
|
||||
#include "talk/base/stringutils.h"
|
||||
@ -61,7 +60,6 @@ static const int kFakeDeviceId = 0;
|
||||
static const int kFakeDeviceId = 1;
|
||||
#endif
|
||||
|
||||
|
||||
// Verify the header extension ID, if enabled, is within the bounds specified in
|
||||
// [RFC5285]: 1-14 inclusive.
|
||||
#define WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id) \
|
||||
@ -699,7 +697,6 @@ class FakeWebRtcVoiceEngine
|
||||
std::string(static_cast<const char*>(data), length));
|
||||
return 0;
|
||||
}
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(ReceivedRTPPacket, (int channel, const void* data,
|
||||
unsigned int length,
|
||||
const webrtc::PacketTime& packet_time)) {
|
||||
@ -710,7 +707,6 @@ class FakeWebRtcVoiceEngine
|
||||
channels_[channel]->last_rtp_packet_time = packet_time;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
WEBRTC_STUB(ReceivedRTCPPacket, (int channel, const void* data,
|
||||
unsigned int length));
|
||||
@ -733,18 +729,6 @@ class FakeWebRtcVoiceEngine
|
||||
return 0;
|
||||
}
|
||||
WEBRTC_STUB(GetRemoteSSRC, (int channel, unsigned int& ssrc));
|
||||
#ifndef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(SetRTPAudioLevelIndicationStatus, (int channel, bool enable,
|
||||
unsigned char id)) {
|
||||
WEBRTC_CHECK_CHANNEL(channel);
|
||||
WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id);
|
||||
channels_[channel]->send_audio_level_ext_ = (enable) ? id : -1;
|
||||
return 0;
|
||||
}
|
||||
WEBRTC_STUB(GetRTPAudioLevelIndicationStatus, (int channel, bool& enable,
|
||||
unsigned char& id));
|
||||
#endif
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(SetSendAudioLevelIndicationStatus, (int channel, bool enable,
|
||||
unsigned char id)) {
|
||||
WEBRTC_CHECK_CHANNEL(channel);
|
||||
@ -766,7 +750,6 @@ class FakeWebRtcVoiceEngine
|
||||
channels_[channel]->receive_absolute_sender_time_ext_ = (enable) ? id : -1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
WEBRTC_STUB(GetRemoteCSRCs, (int channel, unsigned int arrCSRC[15]));
|
||||
WEBRTC_STUB(SetRTCPStatus, (int channel, bool enable));
|
||||
@ -847,7 +830,6 @@ class FakeWebRtcVoiceEngine
|
||||
unsigned short payloadSize));
|
||||
WEBRTC_STUB(GetLastRemoteTimeStamp, (int channel,
|
||||
uint32_t* lastRemoteTimeStamp));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC(SetVideoEngineBWETarget, (int channel,
|
||||
webrtc::ViENetwork* vie_network,
|
||||
int video_channel)) {
|
||||
@ -856,7 +838,6 @@ class FakeWebRtcVoiceEngine
|
||||
channels_[channel]->video_channel = video_channel;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// webrtc::VoEVideoSync
|
||||
WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
|
||||
|
@ -269,13 +269,8 @@ CaptureState WebRtcVideoCapturer::Start(const VideoFormat& capture_format) {
|
||||
|
||||
std::string camera_id(GetId());
|
||||
uint32 start = talk_base::Time();
|
||||
#if defined(USE_WEBRTC_DEV_BRANCH)
|
||||
module_->RegisterCaptureDataCallback(*this);
|
||||
if (module_->StartCapture(cap) != 0) {
|
||||
#else
|
||||
if (module_->RegisterCaptureDataCallback(*this) != 0 ||
|
||||
module_->StartCapture(cap) != 0) {
|
||||
#endif
|
||||
LOG(LS_ERROR) << "Camera '" << camera_id << "' failed to start";
|
||||
return CS_FAILED;
|
||||
}
|
||||
|
@ -64,9 +64,6 @@
|
||||
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
|
||||
|
||||
#if !defined(LIBPEERCONNECTION_LIB)
|
||||
#ifndef HAVE_WEBRTC_VIDEO
|
||||
#error Need webrtc video
|
||||
#endif
|
||||
#include "talk/media/webrtc/webrtcmediaengine.h"
|
||||
|
||||
WRME_EXPORT
|
||||
@ -162,73 +159,6 @@ static bool IsRembEnabled(const VideoCodec& codec) {
|
||||
kParamValueEmpty));
|
||||
}
|
||||
|
||||
// TODO(mallinath) - Remove this after trunk of webrtc is pushed to GTP.
|
||||
#if !defined(USE_WEBRTC_DEV_BRANCH)
|
||||
bool operator==(const webrtc::VideoCodecVP8& lhs,
|
||||
const webrtc::VideoCodecVP8& rhs) {
|
||||
return lhs.pictureLossIndicationOn == rhs.pictureLossIndicationOn &&
|
||||
lhs.feedbackModeOn == rhs.feedbackModeOn &&
|
||||
lhs.complexity == rhs.complexity &&
|
||||
lhs.resilience == rhs.resilience &&
|
||||
lhs.numberOfTemporalLayers == rhs.numberOfTemporalLayers &&
|
||||
lhs.denoisingOn == rhs.denoisingOn &&
|
||||
lhs.errorConcealmentOn == rhs.errorConcealmentOn &&
|
||||
lhs.automaticResizeOn == rhs.automaticResizeOn &&
|
||||
lhs.frameDroppingOn == rhs.frameDroppingOn &&
|
||||
lhs.keyFrameInterval == rhs.keyFrameInterval;
|
||||
}
|
||||
|
||||
bool operator!=(const webrtc::VideoCodecVP8& lhs,
|
||||
const webrtc::VideoCodecVP8& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
bool operator==(const webrtc::SimulcastStream& lhs,
|
||||
const webrtc::SimulcastStream& rhs) {
|
||||
return lhs.width == rhs.width &&
|
||||
lhs.height == rhs.height &&
|
||||
lhs.numberOfTemporalLayers == rhs.numberOfTemporalLayers &&
|
||||
lhs.maxBitrate == rhs.maxBitrate &&
|
||||
lhs.targetBitrate == rhs.targetBitrate &&
|
||||
lhs.minBitrate == rhs.minBitrate &&
|
||||
lhs.qpMax == rhs.qpMax;
|
||||
}
|
||||
|
||||
bool operator!=(const webrtc::SimulcastStream& lhs,
|
||||
const webrtc::SimulcastStream& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
bool operator==(const webrtc::VideoCodec& lhs,
|
||||
const webrtc::VideoCodec& rhs) {
|
||||
bool ret = lhs.codecType == rhs.codecType &&
|
||||
(_stricmp(lhs.plName, rhs.plName) == 0) &&
|
||||
lhs.plType == rhs.plType &&
|
||||
lhs.width == rhs.width &&
|
||||
lhs.height == rhs.height &&
|
||||
lhs.startBitrate == rhs.startBitrate &&
|
||||
lhs.maxBitrate == rhs.maxBitrate &&
|
||||
lhs.minBitrate == rhs.minBitrate &&
|
||||
lhs.maxFramerate == rhs.maxFramerate &&
|
||||
lhs.qpMax == rhs.qpMax &&
|
||||
lhs.numberOfSimulcastStreams == rhs.numberOfSimulcastStreams &&
|
||||
lhs.mode == rhs.mode;
|
||||
if (ret && lhs.codecType == webrtc::kVideoCodecVP8) {
|
||||
ret &= (lhs.codecSpecific.VP8 == rhs.codecSpecific.VP8);
|
||||
}
|
||||
|
||||
for (unsigned char i = 0; i < rhs.numberOfSimulcastStreams && ret; ++i) {
|
||||
ret &= (lhs.simulcastStream[i] == rhs.simulcastStream[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool operator!=(const webrtc::VideoCodec& lhs,
|
||||
const webrtc::VideoCodec& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct FlushBlackFrameData : public talk_base::MessageData {
|
||||
FlushBlackFrameData(uint32 s, int64 t) : ssrc(s), timestamp(t) {
|
||||
}
|
||||
@ -917,7 +847,6 @@ static void UpdateVideoCodec(const cricket::VideoFormat& video_format,
|
||||
video_format.interval);
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
static bool GetCpuOveruseOptions(const VideoOptions& options,
|
||||
webrtc::CpuOveruseOptions* overuse_options) {
|
||||
int underuse_threshold = 0;
|
||||
@ -947,7 +876,6 @@ static bool GetCpuOveruseOptions(const VideoOptions& options,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
WebRtcVideoEngine::WebRtcVideoEngine() {
|
||||
Construct(new ViEWrapper(), new ViETraceWrapper(), NULL,
|
||||
@ -2505,7 +2433,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
sinfo.capture_queue_delay_ms_per_s = capture_queue_delay_ms_per_s;
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::RtcpPacketTypeCounter rtcp_sent;
|
||||
webrtc::RtcpPacketTypeCounter rtcp_received;
|
||||
if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
|
||||
@ -2519,11 +2446,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
sinfo.nacks_rcvd = -1;
|
||||
LOG_RTCERR1(GetRtcpPacketTypeCounters, channel_id);
|
||||
}
|
||||
#else
|
||||
sinfo.firs_rcvd = -1;
|
||||
sinfo.plis_rcvd = -1;
|
||||
sinfo.nacks_rcvd = -1;
|
||||
#endif
|
||||
|
||||
// Get received RTCP statistics for the sender (reported by the remote
|
||||
// client in a RTCP packet), if available.
|
||||
@ -2613,7 +2535,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
rinfo.framerate_output = fps;
|
||||
channel->decoder_observer()->ExportTo(&rinfo);
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::RtcpPacketTypeCounter rtcp_sent;
|
||||
webrtc::RtcpPacketTypeCounter rtcp_received;
|
||||
if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
|
||||
@ -2627,11 +2548,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
rinfo.nacks_sent = -1;
|
||||
LOG_RTCERR1(GetRtcpPacketTypeCounters, channel->channel_id());
|
||||
}
|
||||
#else
|
||||
rinfo.firs_sent = -1;
|
||||
rinfo.plis_sent = -1;
|
||||
rinfo.nacks_sent = -1;
|
||||
#endif
|
||||
|
||||
// Get our locally created statistics of the received RTP stream.
|
||||
webrtc::RtcpStatistics incoming_stream_rtcp_stats;
|
||||
@ -2661,7 +2577,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
|
||||
// TODO(jiayl): remove the condition when the necessary changes are available
|
||||
// outside the dev branch.
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
if (options.include_received_propagation_stats) {
|
||||
webrtc::ReceiveBandwidthEstimatorStats additional_stats;
|
||||
// Only call for the default channel because the returned stats are
|
||||
@ -2679,7 +2594,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
||||
|
||||
engine_->vie()->rtp()->GetPacerQueuingDelayMs(
|
||||
recv_channels_[0]->channel_id(), &bwe.bucket_delay);
|
||||
#endif
|
||||
|
||||
// Calculations done above per send/receive stream.
|
||||
bwe.actual_enc_bitrate = video_bitrate_sent;
|
||||
@ -2961,13 +2875,11 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
|
||||
conference_mode_turned_off = true;
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
bool improved_wifi_bwe_changed =
|
||||
options.use_improved_wifi_bandwidth_estimator.IsSet() &&
|
||||
options_.use_improved_wifi_bandwidth_estimator !=
|
||||
options.use_improved_wifi_bandwidth_estimator;
|
||||
|
||||
#endif
|
||||
|
||||
// Save the options, to be interpreted where appropriate.
|
||||
// Use options_.SetAll() instead of assignment so that unset value in options
|
||||
@ -3099,7 +3011,6 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
|
||||
LOG(LS_WARNING) << "Cannot disable video suspension once it is enabled";
|
||||
}
|
||||
}
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
if (improved_wifi_bwe_changed) {
|
||||
LOG(LS_INFO) << "Improved WIFI BWE called.";
|
||||
webrtc::Config config;
|
||||
@ -3122,7 +3033,6 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3557,7 +3467,6 @@ bool WebRtcVideoMediaChannel::ConfigureSending(int channel_id,
|
||||
send_channel->SetCpuOveruseDetection(true);
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::CpuOveruseOptions overuse_options;
|
||||
if (GetCpuOveruseOptions(options_, &overuse_options)) {
|
||||
if (engine()->vie()->base()->SetCpuOveruseOptions(channel_id,
|
||||
@ -3565,7 +3474,6 @@ bool WebRtcVideoMediaChannel::ConfigureSending(int channel_id,
|
||||
LOG_RTCERR1(SetCpuOveruseOptions, channel_id);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Register encoder observer for outgoing framerate and bitrate.
|
||||
if (engine()->vie()->codec()->RegisterEncoderObserver(
|
||||
@ -3936,11 +3844,9 @@ bool WebRtcVideoMediaChannel::MaybeResetVieSendCodec(
|
||||
// Disable denoising for screencasting.
|
||||
bool enable_denoising =
|
||||
options_.video_noise_reduction.GetWithDefaultIfUnset(false);
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
int screencast_min_bitrate =
|
||||
options_.screencast_min_bitrate.GetWithDefaultIfUnset(0);
|
||||
bool leaky_bucket = options_.video_leaky_bucket.GetWithDefaultIfUnset(false);
|
||||
#endif
|
||||
bool denoising = !is_screencast && enable_denoising;
|
||||
bool reset_send_codec =
|
||||
target_width != cur_width || target_height != cur_height ||
|
||||
@ -3954,28 +3860,17 @@ bool WebRtcVideoMediaChannel::MaybeResetVieSendCodec(
|
||||
vie_codec.height = target_height;
|
||||
vie_codec.maxFramerate = target_codec.maxFramerate;
|
||||
vie_codec.startBitrate = target_codec.startBitrate;
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
vie_codec.targetBitrate = 0;
|
||||
#endif
|
||||
vie_codec.codecSpecific.VP8.automaticResizeOn = automatic_resize;
|
||||
vie_codec.codecSpecific.VP8.denoisingOn = denoising;
|
||||
vie_codec.codecSpecific.VP8.frameDroppingOn = vp8_frame_dropping;
|
||||
bool maybe_change_start_bitrate = !is_screencast;
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
// TODO(pbos): When USE_WEBRTC_DEV_BRANCH is removed, remove
|
||||
// maybe_change_start_bitrate as well. MaybeChangeStartBitrate should be
|
||||
// called for all content.
|
||||
maybe_change_start_bitrate = true;
|
||||
#endif
|
||||
if (maybe_change_start_bitrate)
|
||||
MaybeChangeStartBitrate(channel_id, &vie_codec);
|
||||
MaybeChangeStartBitrate(channel_id, &vie_codec);
|
||||
|
||||
if (engine()->vie()->codec()->SetSendCodec(channel_id, vie_codec) != 0) {
|
||||
LOG_RTCERR1(SetSendCodec, channel_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
if (is_screencast) {
|
||||
engine()->vie()->rtp()->SetMinTransmitBitrate(channel_id,
|
||||
screencast_min_bitrate);
|
||||
@ -3991,7 +3886,6 @@ bool WebRtcVideoMediaChannel::MaybeResetVieSendCodec(
|
||||
engine()->vie()->rtp()->SetTransmissionSmoothingStatus(channel_id,
|
||||
leaky_bucket);
|
||||
}
|
||||
#endif
|
||||
if (reset) {
|
||||
*reset = true;
|
||||
}
|
||||
|
@ -939,7 +939,6 @@ TEST_F(WebRtcVideoEngineTestFake, AdditiveVideoOptions) {
|
||||
EXPECT_TRUE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
TEST_F(WebRtcVideoEngineTestFake, SetCpuOveruseOptionsWithCaptureJitterMethod) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
|
||||
@ -1051,7 +1050,6 @@ TEST_F(WebRtcVideoEngineTestFake, SetCpuOveruseOptionsWithEncodeUsageMethod) {
|
||||
EXPECT_FALSE(cpu_option.enable_capture_jitter_method);
|
||||
EXPECT_TRUE(cpu_option.enable_encode_usage_method);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Test that AddRecvStream doesn't create new channel for 1:1 call.
|
||||
TEST_F(WebRtcVideoEngineTestFake, AddRecvStream1On1) {
|
||||
|
@ -220,22 +220,6 @@ static bool IsNackEnabled(const AudioCodec& codec) {
|
||||
kParamValueEmpty));
|
||||
}
|
||||
|
||||
// TODO(mallinath) - Remove this after trunk of webrtc is pushed to GTP.
|
||||
#if !defined(USE_WEBRTC_DEV_BRANCH)
|
||||
bool operator==(const webrtc::CodecInst& lhs, const webrtc::CodecInst& rhs) {
|
||||
return lhs.pltype == rhs.pltype &&
|
||||
(_stricmp(lhs.plname, rhs.plname) == 0) &&
|
||||
lhs.plfreq == rhs.plfreq &&
|
||||
lhs.pacsize == rhs.pacsize &&
|
||||
lhs.channels == rhs.channels &&
|
||||
lhs.rate == rhs.rate;
|
||||
}
|
||||
|
||||
bool operator!=(const webrtc::CodecInst& lhs, const webrtc::CodecInst& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Gets the default set of options applied to the engine. Historically, these
|
||||
// were supplied as a combination of flags from the channel manager (ec, agc,
|
||||
// ns, and highpass) and the rest hardcoded in InitInternal.
|
||||
@ -373,632 +357,6 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
|
||||
log_filter_(SeverityToFilter(kDefaultLogSeverity)),
|
||||
is_dumping_aec_(false),
|
||||
desired_local_monitor_enable_(false),
|
||||
tx_processor_ssrc_(0),
|
||||
rx_processor_ssrc_(0) {
|
||||
Construct();
|
||||
}
|
||||
|
||||
void WebRtcVoiceEngine::Construct() {
|
||||
SetTraceFilter(log_filter_);
|
||||
initialized_ = false;
|
||||
LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
|
||||
SetTraceOptions("");
|
||||
if (tracing_->SetTraceCallback(this) == -1) {
|
||||
LOG_RTCERR0(SetTraceCallback);
|
||||
}
|
||||
if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
|
||||
LOG_RTCERR0(RegisterVoiceEngineObserver);
|
||||
}
|
||||
// Clear the default agc state.
|
||||
memset(&default_agc_config_, 0, sizeof(default_agc_config_));
|
||||
|
||||
// Load our audio codec list.
|
||||
ConstructCodecs();
|
||||
|
||||
// Load our RTP Header extensions.
|
||||
rtp_header_extensions_.push_back(
|
||||
RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
|
||||
kRtpAudioLevelHeaderExtensionDefaultId));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
rtp_header_extensions_.push_back(
|
||||
RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
|
||||
kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
|
||||
#endif
|
||||
options_ = GetDefaultEngineOptions();
|
||||
}
|
||||
|
||||
static bool IsOpus(const AudioCodec& codec) {
|
||||
return (_stricmp(codec.name.c_str(), kOpusCodecName) == 0);
|
||||
}
|
||||
|
||||
static bool IsIsac(const AudioCodec& codec) {
|
||||
return (_stricmp(codec.name.c_str(), kIsacCodecName) == 0);
|
||||
}
|
||||
|
||||
// True if params["stereo"] == "1"
|
||||
static bool IsOpusStereoEnabled(const AudioCodec& codec) {
|
||||
CodecParameterMap::const_iterator param =
|
||||
codec.params.find(kCodecParamStereo);
|
||||
if (param == codec.params.end()) {
|
||||
return false;
|
||||
}
|
||||
return param->second == kParamValueTrue;
|
||||
}
|
||||
|
||||
static bool IsValidOpusBitrate(int bitrate) {
|
||||
return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
|
||||
}
|
||||
|
||||
// Returns 0 if params[kCodecParamMaxAverageBitrate] is not defined or invalid.
|
||||
// Returns the value of params[kCodecParamMaxAverageBitrate] otherwise.
|
||||
static int GetOpusBitrateFromParams(const AudioCodec& codec) {
|
||||
int bitrate = 0;
|
||||
if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) {
|
||||
return 0;
|
||||
}
|
||||
if (!IsValidOpusBitrate(bitrate)) {
|
||||
LOG(LS_WARNING) << "Codec parameter \"maxaveragebitrate\" has an "
|
||||
<< "invalid value: " << bitrate;
|
||||
return 0;
|
||||
}
|
||||
return bitrate;
|
||||
}
|
||||
|
||||
void WebRtcVoiceEngine::ConstructCodecs() {
|
||||
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
|
||||
int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
|
||||
for (int i = 0; i < ncodecs; ++i) {
|
||||
webrtc::CodecInst voe_codec;
|
||||
if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
|
||||
// Skip uncompressed formats.
|
||||
if (_stricmp(voe_codec.plname, kL16CodecName) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const CodecPref* pref = NULL;
|
||||
for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
|
||||
if (_stricmp(kCodecPrefs[j].name, voe_codec.plname) == 0 &&
|
||||
kCodecPrefs[j].clockrate == voe_codec.plfreq &&
|
||||
kCodecPrefs[j].channels == voe_codec.channels) {
|
||||
pref = &kCodecPrefs[j];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pref) {
|
||||
// Use the payload type that we've configured in our pref table;
|
||||
// use the offset in our pref table to determine the sort order.
|
||||
AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
|
||||
voe_codec.rate, voe_codec.channels,
|
||||
ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
|
||||
LOG(LS_INFO) << ToString(codec);
|
||||
if (IsIsac(codec)) {
|
||||
// Indicate auto-bandwidth in signaling.
|
||||
codec.bitrate = 0;
|
||||
}
|
||||
if (IsOpus(codec)) {
|
||||
// Only add fmtp parameters that differ from the spec.
|
||||
if (kPreferredMinPTime != kOpusDefaultMinPTime) {
|
||||
codec.params[kCodecParamMinPTime] =
|
||||
talk_base::ToString(kPreferredMinPTime);
|
||||
}
|
||||
if (kPreferredMaxPTime != kOpusDefaultMaxPTime) {
|
||||
codec.params[kCodecParamMaxPTime] =
|
||||
talk_base::ToString(kPreferredMaxPTime);
|
||||
}
|
||||
// TODO(hellner): Add ptime, sprop-stereo, stereo and useinbandfec
|
||||
// when they can be set to values other than the default.
|
||||
}
|
||||
codecs_.push_back(codec);
|
||||
} else {
|
||||
LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Make sure they are in local preference order.
|
||||
std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
|
||||
}
|
||||
|
||||
WebRtcVoiceEngine::~WebRtcVoiceEngine() {
|
||||
LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
|
||||
if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
|
||||
LOG_RTCERR0(DeRegisterVoiceEngineObserver);
|
||||
}
|
||||
if (adm_) {
|
||||
voe_wrapper_.reset();
|
||||
adm_->Release();
|
||||
adm_ = NULL;
|
||||
}
|
||||
if (adm_sc_) {
|
||||
voe_wrapper_sc_.reset();
|
||||
adm_sc_->Release();
|
||||
adm_sc_ = NULL;
|
||||
}
|
||||
|
||||
// Test to see if the media processor was deregistered properly
|
||||
ASSERT(SignalRxMediaFrame.is_empty());
|
||||
ASSERT(SignalTxMediaFrame.is_empty());
|
||||
|
||||
tracing_->SetTraceCallback(NULL);
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::Init(talk_base::Thread* worker_thread) {
|
||||
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
|
||||
bool res = InitInternal();
|
||||
if (res) {
|
||||
LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!";
|
||||
} else {
|
||||
LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed";
|
||||
Terminate();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::InitInternal() {
|
||||
// Temporarily turn logging level up for the Init call
|
||||
int old_filter = log_filter_;
|
||||
int extended_filter = log_filter_ | SeverityToFilter(talk_base::LS_INFO);
|
||||
SetTraceFilter(extended_filter);
|
||||
SetTraceOptions("");
|
||||
|
||||
// Init WebRtc VoiceEngine.
|
||||
if (voe_wrapper_->base()->Init(adm_) == -1) {
|
||||
LOG_RTCERR0_EX(Init, voe_wrapper_->error());
|
||||
SetTraceFilter(old_filter);
|
||||
return false;
|
||||
}
|
||||
|
||||
SetTraceFilter(old_filter);
|
||||
SetTraceOptions(log_options_);
|
||||
|
||||
// Log the VoiceEngine version info
|
||||
char buffer[1024] = "";
|
||||
voe_wrapper_->base()->GetVersion(buffer);
|
||||
LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
|
||||
LogMultiline(talk_base::LS_INFO, buffer);
|
||||
|
||||
// Save the default AGC configuration settings. This must happen before
|
||||
// calling SetOptions or the default will be overwritten.
|
||||
if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
|
||||
LOG_RTCERR0(GetAgcConfig);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set defaults for options, so that ApplyOptions applies them explicitly
|
||||
// when we clear option (channel) overrides. External clients can still
|
||||
// modify the defaults via SetOptions (on the media engine).
|
||||
if (!SetOptions(GetDefaultEngineOptions())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Print our codec list again for the call diagnostic log
|
||||
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
|
||||
for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
|
||||
it != codecs_.end(); ++it) {
|
||||
LOG(LS_INFO) << ToString(*it);
|
||||
}
|
||||
|
||||
// Disable the DTMF playout when a tone is sent.
|
||||
// PlayDtmfTone will be used if local playout is needed.
|
||||
if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
|
||||
LOG_RTCERR1(SetDtmfFeedbackStatus, false);
|
||||
}
|
||||
|
||||
initialized_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::EnsureSoundclipEngineInit() {
|
||||
if (voe_wrapper_sc_initialized_) {
|
||||
return true;
|
||||
}
|
||||
// Note that, if initialization fails, voe_wrapper_sc_initialized_ will still
|
||||
// be false, so subsequent calls to EnsureSoundclipEngineInit will
|
||||
// probably just fail again. That's acceptable behavior.
|
||||
#if defined(LINUX) && !defined(HAVE_LIBPULSE)
|
||||
voe_wrapper_sc_->hw()->SetAudioDeviceLayer(webrtc::kAudioLinuxAlsa);
|
||||
#endif
|
||||
|
||||
// Initialize the VoiceEngine instance that we'll use to play out sound clips.
|
||||
if (voe_wrapper_sc_->base()->Init(adm_sc_) == -1) {
|
||||
LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
|
||||
return false;
|
||||
}
|
||||
|
||||
// On Windows, tell it to use the default sound (not communication) devices.
|
||||
// First check whether there is a valid sound device for playback.
|
||||
// TODO(juberti): Clean this up when we support setting the soundclip device.
|
||||
#ifdef WIN32
|
||||
// The SetPlayoutDevice may not be implemented in the case of external ADM.
|
||||
// TODO(ronghuawu): We should only check the adm_sc_ here, but current
|
||||
// PeerConnection interface never set the adm_sc_, so need to check both
|
||||
// in order to determine if the external adm is used.
|
||||
if (!adm_ && !adm_sc_) {
|
||||
int num_of_devices = 0;
|
||||
if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
|
||||
num_of_devices > 0) {
|
||||
if (voe_wrapper_sc_->hw()->SetPlayoutDevice(kDefaultSoundclipDeviceId)
|
||||
== -1) {
|
||||
LOG_RTCERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
|
||||
voe_wrapper_sc_->error());
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
LOG(LS_WARNING) << "No valid sound playout device found.";
|
||||
}
|
||||
}
|
||||
#endif
|
||||
voe_wrapper_sc_initialized_ = true;
|
||||
LOG(LS_INFO) << "Initialized WebRtc soundclip engine.";
|
||||
return true;
|
||||
}
|
||||
|
||||
void WebRtcVoiceEngine::Terminate() {
|
||||
LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
|
||||
initialized_ = false;
|
||||
|
||||
StopAecDump();
|
||||
|
||||
if (voe_wrapper_sc_) {
|
||||
voe_wrapper_sc_initialized_ = false;
|
||||
voe_wrapper_sc_->base()->Terminate();
|
||||
}
|
||||
voe_wrapper_->base()->Terminate();
|
||||
desired_local_monitor_enable_ = false;
|
||||
}
|
||||
|
||||
int WebRtcVoiceEngine::GetCapabilities() {
|
||||
return AUDIO_SEND | AUDIO_RECV;
|
||||
}
|
||||
|
||||
VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
|
||||
WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this);
|
||||
if (!ch->valid()) {
|
||||
delete ch;
|
||||
ch = NULL;
|
||||
}
|
||||
return ch;
|
||||
}
|
||||
|
||||
SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
|
||||
if (!EnsureSoundclipEngineInit()) {
|
||||
LOG(LS_ERROR) << "Unable to create soundclip: soundclip engine failed to "
|
||||
<< "initialize.";
|
||||
return NULL;
|
||||
}
|
||||
WebRtcSoundclipMedia *soundclip = new WebRtcSoundclipMedia(this);
|
||||
if (!soundclip->Init() || !soundclip->Enable()) {
|
||||
delete soundclip;
|
||||
return NULL;
|
||||
}
|
||||
return soundclip;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::SetOptions(const AudioOptions& options) {
|
||||
if (!ApplyOptions(options)) {
|
||||
return false;
|
||||
}
|
||||
options_ = options;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::SetOptionOverrides(const AudioOptions& overrides) {
|
||||
LOG(LS_INFO) << "Setting option overrides: " << overrides.ToString();
|
||||
if (!ApplyOptions(overrides)) {
|
||||
return false;
|
||||
}
|
||||
option_overrides_ = overrides;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::ClearOptionOverrides() {
|
||||
LOG(LS_INFO) << "Clearing option overrides.";
|
||||
AudioOptions options = options_;
|
||||
// Only call ApplyOptions if |options_overrides_| contains overrided options.
|
||||
// ApplyOptions affects NS, AGC other options that is shared between
|
||||
// all WebRtcVoiceEngineChannels.
|
||||
if (option_overrides_ == AudioOptions()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!ApplyOptions(options)) {
|
||||
return false;
|
||||
}
|
||||
option_overrides_ = AudioOptions();
|
||||
return true;
|
||||
}
|
||||
|
||||
// AudioOptions defaults are set in InitInternal (for options with corresponding
|
||||
// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
|
||||
bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
|
||||
AudioOptions options = options_in; // The options are modified below.
|
||||
// kEcConference is AEC with high suppression.
|
||||
webrtc::EcModes ec_mode = webrtc::kEcConference;
|
||||
webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
|
||||
webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
|
||||
webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
|
||||
bool aecm_comfort_noise = false;
|
||||
if (options.aecm_generate_comfort_noise.Get(&aecm_comfort_noise)) {
|
||||
LOG(LS_VERBOSE) << "Comfort noise explicitly set to "
|
||||
<< aecm_comfort_noise << " (default is false).";
|
||||
}
|
||||
|
||||
#if defined(IOS)
|
||||
// On iOS, VPIO provides built-in EC and AGC.
|
||||
options.echo_cancellation.Set(false);
|
||||
options.auto_gain_control.Set(false);
|
||||
#elif defined(ANDROID)
|
||||
ec_mode = webrtc::kEcAecm;
|
||||
#endif
|
||||
|
||||
#if defined(IOS) || defined(ANDROID)
|
||||
// Set the AGC mode for iOS as well despite disabling it above, to avoid
|
||||
// unsupported configuration errors from webrtc.
|
||||
agc_mode = webrtc::kAgcFixedDigital;
|
||||
options.typing_detection.Set(false);
|
||||
options.experimental_agc.Set(false);
|
||||
options.experimental_aec.Set(false);
|
||||
options.experimental_ns.Set(false);
|
||||
#endif
|
||||
|
||||
LOG(LS_INFO) << "Applying audio options: " << options.ToString();
|
||||
|
||||
webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
|
||||
|
||||
bool echo_cancellation;
|
||||
if (options.echo_cancellation.Get(&echo_cancellation)) {
|
||||
if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
|
||||
LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
|
||||
return false;
|
||||
} else {
|
||||
LOG(LS_VERBOSE) << "Echo control set to " << echo_cancellation
|
||||
<< " with mode " << ec_mode;
|
||||
}
|
||||
#if !defined(ANDROID)
|
||||
// TODO(ajm): Remove the error return on Android from webrtc.
|
||||
if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
|
||||
LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
if (ec_mode == webrtc::kEcAecm) {
|
||||
if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
|
||||
LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool auto_gain_control;
|
||||
if (options.auto_gain_control.Get(&auto_gain_control)) {
|
||||
if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
|
||||
LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
|
||||
return false;
|
||||
} else {
|
||||
LOG(LS_VERBOSE) << "Auto gain set to " << auto_gain_control
|
||||
<< " with mode " << agc_mode;
|
||||
}
|
||||
}
|
||||
|
||||
if (options.tx_agc_target_dbov.IsSet() ||
|
||||
options.tx_agc_digital_compression_gain.IsSet() ||
|
||||
options.tx_agc_limiter.IsSet()) {
|
||||
// Override default_agc_config_. Generally, an unset option means "leave
|
||||
// the VoE bits alone" in this function, so we want whatever is set to be
|
||||
// stored as the new "default". If we didn't, then setting e.g.
|
||||
// tx_agc_target_dbov would reset digital compression gain and limiter
|
||||
// settings.
|
||||
// Also, if we don't update default_agc_config_, then adjust_agc_delta
|
||||
// would be an offset from the original values, and not whatever was set
|
||||
// explicitly.
|
||||
default_agc_config_.targetLeveldBOv =
|
||||
options.tx_agc_target_dbov.GetWithDefaultIfUnset(
|
||||
default_agc_config_.targetLeveldBOv);
|
||||
default_agc_config_.digitalCompressionGaindB =
|
||||
options.tx_agc_digital_compression_gain.GetWithDefaultIfUnset(
|
||||
default_agc_config_.digitalCompressionGaindB);
|
||||
default_agc_config_.limiterEnable =
|
||||
options.tx_agc_limiter.GetWithDefaultIfUnset(
|
||||
default_agc_config_.limiterEnable);
|
||||
if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
|
||||
LOG_RTCERR3(SetAgcConfig,
|
||||
default_agc_config_.targetLeveldBOv,
|
||||
default_agc_config_.digitalCompressionGaindB,
|
||||
default_agc_config_.limiterEnable);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool noise_suppression;
|
||||
if (options.noise_suppression.Get(&noise_suppression)) {
|
||||
if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
|
||||
LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
|
||||
return false;
|
||||
} else {
|
||||
LOG(LS_VERBOSE) << "Noise suppression set to " << noise_suppression
|
||||
<< " with mode " << ns_mode;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
bool experimental_ns;
|
||||
if (options.experimental_ns.Get(&experimental_ns)) {
|
||||
webrtc::AudioProcessing* audioproc =
|
||||
voe_wrapper_->base()->audio_processing();
|
||||
// We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
|
||||
// returns NULL on audio_processing().
|
||||
if (audioproc) {
|
||||
if (audioproc->EnableExperimentalNs(experimental_ns) == -1) {
|
||||
LOG_RTCERR1(EnableExperimentalNs, experimental_ns);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
LOG(LS_VERBOSE) << "Experimental noise suppression set to "
|
||||
<< experimental_ns;
|
||||
}
|
||||
}
|
||||
#endif // USE_WEBRTC_DEV_BRANCH
|
||||
|
||||
bool highpass_filter;
|
||||
if (options.highpass_filter.Get(&highpass_filter)) {
|
||||
LOG(LS_INFO) << "High pass filter enabled? " << highpass_filter;
|
||||
if (voep->EnableHighPassFilter(highpass_filter) == -1) {
|
||||
LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool stereo_swapping;
|
||||
if (options.stereo_swapping.Get(&stereo_swapping)) {
|
||||
LOG(LS_INFO) << "Stereo swapping enabled? " << stereo_swapping;
|
||||
voep->EnableStereoChannelSwapping(stereo_swapping);
|
||||
if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
|
||||
LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool typing_detection;
|
||||
if (options.typing_detection.Get(&typing_detection)) {
|
||||
LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection;
|
||||
if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
|
||||
// In case of error, log the info and continue
|
||||
LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
|
||||
}
|
||||
}
|
||||
|
||||
int adjust_agc_delta;
|
||||
if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
|
||||
LOG(LS_INFO) << "Adjust agc delta is " << adjust_agc_delta;
|
||||
if (!AdjustAgcLevel(adjust_agc_delta)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool aec_dump;
|
||||
if (options.aec_dump.Get(&aec_dump)) {
|
||||
LOG(LS_INFO) << "Aec dump is enabled? " << aec_dump;
|
||||
if (aec_dump)
|
||||
StartAecDump(kAecDumpByAudioOptionFilename);
|
||||
else
|
||||
StopAecDump();
|
||||
}
|
||||
|
||||
bool experimental_aec;
|
||||
if (options.experimental_aec.Get(&experimental_aec)) {
|
||||
LOG(LS_INFO) << "Experimental aec is " << experimental_aec;
|
||||
webrtc::AudioProcessing* audioproc =
|
||||
voe_wrapper_->base()->audio_processing();
|
||||
// We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
|
||||
// returns NULL on audio_processing().
|
||||
if (audioproc) {
|
||||
webrtc::Config config;
|
||||
config.Set<webrtc::DelayCorrection>(
|
||||
new webrtc::DelayCorrection(experimental_aec));
|
||||
audioproc->SetExtraOptions(config);
|
||||
}
|
||||
}
|
||||
|
||||
uint32 recording_sample_rate;
|
||||
if (options.recording_sample_rate.Get(&recording_sample_rate)) {
|
||||
LOG(LS_INFO) << "Recording sample rate is " << recording_sample_rate;
|
||||
if (voe_wrapper_->hw()->SetRecordingSampleRate(recording_sample_rate)) {
|
||||
LOG_RTCERR1(SetRecordingSampleRate, recording_sample_rate);
|
||||
}
|
||||
}
|
||||
|
||||
uint32 playout_sample_rate;
|
||||
if (options.playout_sample_rate.Get(&playout_sample_rate)) {
|
||||
LOG(LS_INFO) << "Playout sample rate is " << playout_sample_rate;
|
||||
if (voe_wrapper_->hw()->SetPlayoutSampleRate(playout_sample_rate)) {
|
||||
LOG_RTCERR1(SetPlayoutSampleRate, playout_sample_rate);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceEngine::SetDelayOffset(int offset) {
|
||||
voe_wrapper_->processing()->SetDelayOffsetMs(offset);
|
||||
if (voe_wrapper_->processing()->DelayOffsetMs() != offset) {
|
||||
LOG_RTCERR1(SetDelayOffsetMs, offset);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ResumeEntry {
|
||||
ResumeEntry(WebRtcVoiceMediaChannel *c, bool p, SendFlags s)
|
||||
: channel(c),
|
||||
playout(p),
|
||||
send(s) {
|
||||
}
|
||||
|
||||
WebRtcVoiceMediaChannel *channel;
|
||||
bool playout;
|
||||
SendFlags send;
|
||||
};
|
||||
|
||||
// TODO(juberti): Refactor this so that the core logic can be used to set the
|
||||
// soundclip device. At that time, reinstate the soundclip pause/resume code.
|
||||
bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
|
||||
const Device* out_device) {
|
||||
#if !defined(IOS)
|
||||
int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
|
||||
kDefaultAudioDeviceId;
|
||||
int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
|
||||
kDefaultAudioDeviceId;
|
||||
// The device manager uses -1 as the default device, which was the case for
|
||||
// VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
|
||||
#ifndef WIN32
|
||||
if (-1 == in_id) {
|
||||
in_id = kDefaultAudioDeviceId;
|
||||
}
|
||||
if (-1 == out_id) {
|
||||
out_id = kDefaultAudioDeviceId;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string in_name = (in_id != kDefaultAudioDeviceId) ?
|
||||
in_device->name : "Default device";
|
||||
std::string out_name = (out_id != kDefaultAudioDeviceId) ?
|
||||
out_device->name : "Default device";
|
||||
LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
|
||||
<< ") and speaker to (id=" << out_id << ", name=" << out_name
|
||||
<< ")";
|
||||
|
||||
// If we're running the local monitor, we need to stop it first.
|
||||
bool ret = true;
|
||||
if (!PauseLocalMonitor()) {
|
||||
LOG(LS_WARNING) << "Failed to pause local monitor";
|
||||
ret = false;
|
||||
}
|
||||
|
||||
// Must also pause all audio playback and capture.
|
||||
for (ChannelList::const_iterator i = channels_.begin();
|
||||
i != channels_.end(); ++i) {
|
||||
WebRtcVoiceMediaChannel *channel = *i;
|
||||
if (!channel->PausePlayout()) {
|
||||
LOG(LS_WARNING) << "Failed to pause playout";
|
||||
ret = false;
|
||||
}
|
||||
if (!channel->PauseSend()) {
|
||||
LOG(LS_WARNING) << "Failed to pause send";
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Find the recording device id in VoiceEngine and set recording device.
|
||||
if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
|
||||
ret = false;
|
||||
}
|
||||
if (ret) {
|
||||
if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
|
||||
LOG_RTCERR2(SetRecordingDevice, in_name, in_id);
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Find the playout device id in VoiceEngine and set playout device.
|
||||
@ -1719,14 +1077,12 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
|
||||
int sample_rate,
|
||||
int number_of_channels,
|
||||
int number_of_frames) OVERRIDE {
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
voe_audio_transport_->OnData(channel_,
|
||||
audio_data,
|
||||
bits_per_sample,
|
||||
sample_rate,
|
||||
number_of_channels,
|
||||
number_of_frames);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Callback from the |renderer_| when it is going away. In case Start() has
|
||||
@ -2225,7 +1581,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodec(
|
||||
|
||||
bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
|
||||
const std::vector<RtpHeaderExtension>& extensions) {
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
const RtpHeaderExtension* send_time_extension =
|
||||
FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
|
||||
|
||||
@ -2239,7 +1594,6 @@ bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2247,18 +1601,8 @@ bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
|
||||
const std::vector<RtpHeaderExtension>& extensions) {
|
||||
const RtpHeaderExtension* audio_level_extension =
|
||||
FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
const RtpHeaderExtension* send_time_extension =
|
||||
FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
|
||||
#endif
|
||||
|
||||
#ifndef USE_WEBRTC_DEV_BRANCH
|
||||
if (!SetHeaderExtension(
|
||||
&webrtc::VoERTP_RTCP::SetRTPAudioLevelIndicationStatus, voe_channel(),
|
||||
audio_level_extension)) {
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
if (!SetHeaderExtension(
|
||||
&webrtc::VoERTP_RTCP::SetSendAudioLevelIndicationStatus, voe_channel(),
|
||||
audio_level_extension)) {
|
||||
@ -2269,18 +1613,10 @@ bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
|
||||
send_time_extension)) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (ChannelMap::const_iterator channel_it = send_channels_.begin();
|
||||
channel_it != send_channels_.end(); ++channel_it) {
|
||||
int channel_id = channel_it->second->channel();
|
||||
#ifndef USE_WEBRTC_DEV_BRANCH
|
||||
if (!SetHeaderExtension(
|
||||
&webrtc::VoERTP_RTCP::SetRTPAudioLevelIndicationStatus, channel_id,
|
||||
audio_level_extension)) {
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
if (!SetHeaderExtension(
|
||||
&webrtc::VoERTP_RTCP::SetSendAudioLevelIndicationStatus, channel_id,
|
||||
audio_level_extension)) {
|
||||
@ -2291,7 +1627,6 @@ bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
|
||||
send_time_extension)) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2454,12 +1789,8 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
|
||||
|
||||
// Save the channel to send_channels_, so that RemoveSendStream() can still
|
||||
// delete the channel in case failure happens below.
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::AudioTransport* audio_transport =
|
||||
engine()->voe()->base()->audio_transport();
|
||||
#else
|
||||
webrtc::AudioTransport* audio_transport = NULL;
|
||||
#endif
|
||||
send_channels_.insert(std::make_pair(
|
||||
sp.first_ssrc(),
|
||||
new WebRtcVoiceChannelRenderer(channel, audio_transport)));
|
||||
@ -2556,12 +1887,8 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
|
||||
|
||||
// Reuse default channel for recv stream in non-conference mode call
|
||||
// when the default channel is not being used.
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::AudioTransport* audio_transport =
|
||||
engine()->voe()->base()->audio_transport();
|
||||
#else
|
||||
webrtc::AudioTransport* audio_transport = NULL;
|
||||
#endif
|
||||
if (!InConferenceMode() && default_receive_ssrc_ == 0) {
|
||||
LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
|
||||
<< " reuse default channel";
|
||||
|
@ -50,7 +50,6 @@
|
||||
#error "Bogus include."
|
||||
#endif
|
||||
|
||||
|
||||
namespace cricket {
|
||||
|
||||
// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
|
||||
|
@ -246,32 +246,25 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
|
||||
EXPECT_EQ(expected_bitrate, temp_codec.rate);
|
||||
}
|
||||
|
||||
|
||||
void TestSetSendRtpHeaderExtensions(int channel_id) {
|
||||
std::vector<cricket::RtpHeaderExtension> extensions;
|
||||
|
||||
// Ensure extensions are off by default.
|
||||
EXPECT_EQ(-1, voe_.GetSendAudioLevelId(channel_id));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Ensure unknown extensions won't cause an error.
|
||||
extensions.push_back(cricket::RtpHeaderExtension(
|
||||
"urn:ietf:params:unknownextention", 1));
|
||||
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(-1, voe_.GetSendAudioLevelId(channel_id));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Ensure extensions stay off with an empty list of headers.
|
||||
extensions.clear();
|
||||
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(-1, voe_.GetSendAudioLevelId(channel_id));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Ensure audio levels are enabled if the audio-level header is specified
|
||||
// (but AST is still off).
|
||||
@ -279,65 +272,49 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
|
||||
"urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
|
||||
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(8, voe_.GetSendAudioLevelId(channel_id));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
// Ensure audio level and AST are enabled if the extensions are specified.
|
||||
extensions.push_back(cricket::RtpHeaderExtension(
|
||||
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time", 12));
|
||||
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(8, voe_.GetSendAudioLevelId(channel_id));
|
||||
EXPECT_EQ(12, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Ensure all extensions go back off with an empty list.
|
||||
extensions.clear();
|
||||
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(-1, voe_.GetSendAudioLevelId(channel_id));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetSendAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
}
|
||||
|
||||
void TestSetRecvRtpHeaderExtensions(int channel_id) {
|
||||
std::vector<cricket::RtpHeaderExtension> extensions;
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
// Ensure extensions are off by default.
|
||||
EXPECT_EQ(-1, voe_.GetReceiveAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Ensure unknown extensions won't cause an error.
|
||||
extensions.push_back(cricket::RtpHeaderExtension(
|
||||
"urn:ietf:params:unknownextention", 1));
|
||||
EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetReceiveAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// An empty list shouldn't cause any headers to be enabled.
|
||||
extensions.clear();
|
||||
EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetReceiveAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
// Nor should indicating we can receive the absolute sender time header.
|
||||
extensions.push_back(cricket::RtpHeaderExtension(
|
||||
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time", 11));
|
||||
EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
|
||||
EXPECT_EQ(11, voe_.GetReceiveAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
|
||||
// Resetting to an empty list shouldn't cause any headers to be enabled.
|
||||
extensions.clear();
|
||||
EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
EXPECT_EQ(-1, voe_.GetReceiveAbsoluteSenderTimeId(channel_id));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -2686,7 +2663,6 @@ TEST_F(WebRtcVoiceEngineTestFake, InitDoesNotOverwriteDefaultAgcConfig) {
|
||||
EXPECT_EQ(set_config.limiterEnable, config.limiterEnable);
|
||||
}
|
||||
|
||||
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
talk_base::scoped_ptr<cricket::VoiceMediaChannel> channel1(
|
||||
@ -2886,7 +2862,6 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOutputScaling) {
|
||||
EXPECT_DOUBLE_EQ(1, right);
|
||||
}
|
||||
|
||||
|
||||
// Tests for the actual WebRtc VoE library.
|
||||
|
||||
// Tests that the library initializes and shuts down properly.
|
||||
|
Loading…
x
Reference in New Issue
Block a user