Update talk to 51664136.
R=mallinath@webrtc.org Review URL: https://webrtc-codereview.appspot.com/2148004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@4649 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
a957570d62
commit
cadf9040cb
@ -324,15 +324,13 @@ void DataChannel::ConnectToDataSession() {
|
||||
data_session_->SignalDataReceived.connect(this, &DataChannel::OnDataReceived);
|
||||
cricket::StreamParams params =
|
||||
cricket::StreamParams::CreateLegacy(id());
|
||||
data_session_->media_channel()->AddSendStream(params);
|
||||
data_session_->media_channel()->AddRecvStream(params);
|
||||
data_session_->AddRecvStream(params);
|
||||
data_session_->AddSendStream(params);
|
||||
}
|
||||
|
||||
void DataChannel::DisconnectFromDataSession() {
|
||||
if (data_session_->media_channel() != NULL) {
|
||||
data_session_->media_channel()->RemoveSendStream(id());
|
||||
data_session_->media_channel()->RemoveRecvStream(id());
|
||||
}
|
||||
data_session_->RemoveSendStream(id());
|
||||
data_session_->RemoveRecvStream(id());
|
||||
data_session_->SignalReadyToSendData.disconnect(this);
|
||||
data_session_->SignalDataReceived.disconnect(this);
|
||||
data_session_ = NULL;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "talk/app/webrtc/jsep.h"
|
||||
#include "talk/app/webrtc/mediastreamsignaling.h"
|
||||
#include "talk/app/webrtc/test/fakeconstraints.h"
|
||||
#include "talk/app/webrtc/test/fakedtlsidentityservice.h"
|
||||
#include "talk/app/webrtc/webrtcsession.h"
|
||||
#include "talk/base/gunit.h"
|
||||
#include "talk/media/base/fakemediaengine.h"
|
||||
@ -92,7 +93,8 @@ class SctpDataChannelTest : public testing::Test {
|
||||
constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, true);
|
||||
constraints.AddMandatory(MediaConstraintsInterface::kEnableSctpDataChannels,
|
||||
true);
|
||||
ASSERT_TRUE(session_.Initialize(&constraints, NULL));
|
||||
ASSERT_TRUE(session_.Initialize(&constraints,
|
||||
new FakeIdentityService()));
|
||||
talk_base::scoped_refptr<CreateSessionDescriptionObserverForTest> observer
|
||||
= new CreateSessionDescriptionObserverForTest();
|
||||
session_.CreateOffer(observer.get(), NULL);
|
||||
@ -116,7 +118,6 @@ class SctpDataChannelTest : public testing::Test {
|
||||
session_.data_channel()->SignalReadyToSendData(true);
|
||||
}
|
||||
}
|
||||
|
||||
cricket::FakeMediaEngine* media_engine_;
|
||||
cricket::FakeDataEngine* data_engine_;
|
||||
talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
|
||||
|
@ -54,6 +54,9 @@ const char MediaConstraintsInterface::kNoiseReduction[] = "googNoiseReduction";
|
||||
const char MediaConstraintsInterface::kLeakyBucket[] = "googLeakyBucket";
|
||||
const char MediaConstraintsInterface::kTemporalLayeredScreencast[] =
|
||||
"googTemporalLayeredScreencast";
|
||||
// TODO(ronghuawu): Remove once cpu overuse detection is stable.
|
||||
const char MediaConstraintsInterface::kCpuOveruseDetection[] =
|
||||
"googCpuOveruseDetection";
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -202,7 +205,9 @@ bool NewFormatWithConstraints(
|
||||
} else if (constraint.key == MediaConstraintsInterface::kNoiseReduction ||
|
||||
constraint.key == MediaConstraintsInterface::kLeakyBucket ||
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kTemporalLayeredScreencast) {
|
||||
MediaConstraintsInterface::kTemporalLayeredScreencast ||
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kCpuOveruseDetection) {
|
||||
// These are actually options, not constraints, so they can be satisfied
|
||||
// regardless of the format.
|
||||
return true;
|
||||
@ -316,6 +321,9 @@ bool ExtractVideoOptions(const MediaConstraintsInterface* all_constraints,
|
||||
all_valid &= ExtractOption(all_constraints,
|
||||
MediaConstraintsInterface::kTemporalLayeredScreencast,
|
||||
&(options->video_temporal_layer_screencast));
|
||||
all_valid &= ExtractOption(all_constraints,
|
||||
MediaConstraintsInterface::kCpuOveruseDetection,
|
||||
&(options->cpu_overuse_detection));
|
||||
|
||||
return all_valid;
|
||||
}
|
||||
|
@ -339,6 +339,8 @@ TEST_F(LocalVideoSourceTest, SetValidOptionValues) {
|
||||
MediaConstraintsInterface::kTemporalLayeredScreencast, "false");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kLeakyBucket, "true");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuOveruseDetection, "true");
|
||||
|
||||
CreateLocalVideoSource(&constraints);
|
||||
|
||||
@ -350,6 +352,8 @@ TEST_F(LocalVideoSourceTest, SetValidOptionValues) {
|
||||
EXPECT_FALSE(value);
|
||||
EXPECT_TRUE(local_source_->options()->video_leaky_bucket.Get(&value));
|
||||
EXPECT_TRUE(value);
|
||||
EXPECT_TRUE(local_source_->options()->
|
||||
cpu_overuse_detection.GetWithDefaultIfUnset(false));
|
||||
}
|
||||
|
||||
TEST_F(LocalVideoSourceTest, OptionNotSet) {
|
||||
@ -357,6 +361,7 @@ TEST_F(LocalVideoSourceTest, OptionNotSet) {
|
||||
CreateLocalVideoSource(&constraints);
|
||||
bool value;
|
||||
EXPECT_FALSE(local_source_->options()->video_noise_reduction.Get(&value));
|
||||
EXPECT_FALSE(local_source_->options()->cpu_overuse_detection.Get(&value));
|
||||
}
|
||||
|
||||
TEST_F(LocalVideoSourceTest, MandatoryOptionOverridesOptional) {
|
||||
|
@ -86,6 +86,7 @@ class MediaConstraintsInterface {
|
||||
static const char kLeakyBucket[]; // googLeakyBucket
|
||||
// googTemporalLayeredScreencast
|
||||
static const char kTemporalLayeredScreencast[];
|
||||
static const char kCpuOveruseDetection[];
|
||||
|
||||
// Constraint keys for CreateOffer / CreateAnswer
|
||||
// Specified by the W3C PeerConnection spec
|
||||
|
@ -1316,7 +1316,11 @@ void BuildRtpContentAttributes(
|
||||
|
||||
// RFC 4566
|
||||
// b=AS:<bandwidth>
|
||||
if (media_desc->bandwidth() >= 1000) {
|
||||
// We should always use the default bandwidth for RTP-based data
|
||||
// channels. Don't allow SDP to set the bandwidth, because that
|
||||
// would give JS the opportunity to "break the Internet".
|
||||
if (media_desc->bandwidth() >= 1000 &&
|
||||
media_type != cricket::MEDIA_TYPE_DATA) {
|
||||
InitLine(kLineTypeSessionBandwidth, kApplicationSpecificMaximum, &os);
|
||||
os << kSdpDelimiterColon << (media_desc->bandwidth() / 1000);
|
||||
AddLine(os.str(), message);
|
||||
@ -2105,6 +2109,10 @@ bool ParseMediaDescription(const std::string& message,
|
||||
message, cricket::MEDIA_TYPE_DATA, mline_index, protocol,
|
||||
codec_preference, pos, &content_name,
|
||||
&transport, candidates, error));
|
||||
// We should always use the default bandwidth for RTP-based data
|
||||
// channels. Don't allow SDP to set the bandwidth, because that
|
||||
// would give JS the opportunity to "break the Internet".
|
||||
content->set_bandwidth(cricket::kAutoBandwidth);
|
||||
} else {
|
||||
LOG(LS_WARNING) << "Unsupported media type: " << line;
|
||||
continue;
|
||||
|
@ -1427,6 +1427,25 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithSctpDataChannel) {
|
||||
EXPECT_EQ(message, expected_sdp);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithDataChannelAndBandwidth) {
|
||||
AddRtpDataChannel();
|
||||
data_desc_->set_bandwidth(100*1000);
|
||||
JsepSessionDescription jsep_desc(kDummyString);
|
||||
|
||||
ASSERT_TRUE(jsep_desc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
|
||||
std::string message = webrtc::SdpSerialize(jsep_desc);
|
||||
|
||||
std::string expected_sdp = kSdpString;
|
||||
expected_sdp.append(kSdpRtpDataChannelString);
|
||||
// We want to test that serializing data content ignores bandwidth
|
||||
// settings (it should always be the default). Thus, we don't do
|
||||
// the following:
|
||||
// InjectAfter("a=mid:data_content_name\r\n",
|
||||
// "b=AS:100\r\n",
|
||||
// &expected_sdp);
|
||||
EXPECT_EQ(message, expected_sdp);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmap) {
|
||||
AddExtmap();
|
||||
JsepSessionDescription desc_with_extmap("dummy");
|
||||
@ -1739,6 +1758,29 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannels) {
|
||||
EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpDataChannelsAndBandwidth) {
|
||||
AddRtpDataChannel();
|
||||
JsepSessionDescription jdesc(kDummyString);
|
||||
// We want to test that deserializing data content ignores bandwidth
|
||||
// settings (it should always be the default). Thus, we don't do
|
||||
// the following:
|
||||
// DataContentDescription* dcd = static_cast<DataContentDescription*>(
|
||||
// GetFirstDataContent(&desc_)->description);
|
||||
// dcd->set_bandwidth(100 * 1000);
|
||||
ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
|
||||
|
||||
std::string sdp_with_bandwidth = kSdpString;
|
||||
sdp_with_bandwidth.append(kSdpRtpDataChannelString);
|
||||
InjectAfter("a=mid:data_content_name\r\n",
|
||||
"b=AS:100\r\n",
|
||||
&sdp_with_bandwidth);
|
||||
JsepSessionDescription jdesc_with_bandwidth(kDummyString);
|
||||
|
||||
EXPECT_TRUE(
|
||||
SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth));
|
||||
EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_with_bandwidth));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithSessionLevelExtmap) {
|
||||
TestDeserializeExtmap(true, false);
|
||||
}
|
||||
|
@ -938,7 +938,6 @@ talk_base::scoped_refptr<DataChannel> WebRtcSession::CreateDataChannel(
|
||||
if (data_channel_.get()) {
|
||||
channel->SetReceiveSsrc(new_config.id);
|
||||
channel->SetSendSsrc(new_config.id);
|
||||
channel->ConnectToDataSession();
|
||||
}
|
||||
if (!config->negotiated) {
|
||||
talk_base::Buffer *payload = new talk_base::Buffer;
|
||||
|
@ -2484,7 +2484,7 @@ TEST_F(WebRtcSessionTest, TestRtpDataChannelConstraintTakesPrecedence) {
|
||||
webrtc::MediaConstraintsInterface::kEnableSctpDataChannels, true);
|
||||
constraints_->AddOptional(
|
||||
webrtc::MediaConstraintsInterface::kEnableDtlsSrtp, true);
|
||||
Init(NULL);
|
||||
Init(new FakeIdentityService());
|
||||
|
||||
SetLocalDescriptionWithDataChannel();
|
||||
EXPECT_EQ(cricket::DCT_RTP, data_engine_->last_channel_type());
|
||||
@ -2508,7 +2508,7 @@ TEST_F(WebRtcSessionTest, TestSctpDataChannelWithDtls) {
|
||||
webrtc::MediaConstraintsInterface::kEnableSctpDataChannels, true);
|
||||
constraints_->AddOptional(
|
||||
webrtc::MediaConstraintsInterface::kEnableDtlsSrtp, true);
|
||||
Init(NULL);
|
||||
Init(new FakeIdentityService());
|
||||
|
||||
SetLocalDescriptionWithDataChannel();
|
||||
EXPECT_EQ(cricket::DCT_SCTP, data_engine_->last_channel_type());
|
||||
|
@ -864,8 +864,6 @@ class FakeVideoEngine : public FakeBaseEngine {
|
||||
renderer_ = r;
|
||||
return true;
|
||||
}
|
||||
bool SetVideoCapturer(VideoCapturer* /*capturer*/) { return true; }
|
||||
VideoCapturer* GetVideoCapturer() const { return NULL; }
|
||||
bool SetCapture(bool capture) {
|
||||
capture_ = capture;
|
||||
return true;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#ifndef TALK_MEDIA_BASE_FAKEVIDEORENDERER_H_
|
||||
#define TALK_MEDIA_BASE_FAKEVIDEORENDERER_H_
|
||||
|
||||
#include "talk/base/logging.h"
|
||||
#include "talk/base/sigslot.h"
|
||||
#include "talk/media/base/videoframe.h"
|
||||
#include "talk/media/base/videorenderer.h"
|
||||
@ -62,6 +63,13 @@ class FakeVideoRenderer : public VideoRenderer {
|
||||
if (!frame ||
|
||||
frame->GetWidth() != static_cast<size_t>(width_) ||
|
||||
frame->GetHeight() != static_cast<size_t>(height_)) {
|
||||
if (!frame) {
|
||||
LOG(LS_WARNING) << "RenderFrame expected non-null frame.";
|
||||
} else {
|
||||
LOG(LS_WARNING) << "RenderFrame expected frame of size " << width_
|
||||
<< "x" << height_ << " but received frame of size "
|
||||
<< frame->GetWidth() << "x" << frame->GetHeight();
|
||||
}
|
||||
++errors_;
|
||||
return false;
|
||||
}
|
||||
|
@ -236,15 +236,18 @@ struct VideoOptions {
|
||||
adapt_input_to_cpu_usage.SetFrom(change.adapt_input_to_cpu_usage);
|
||||
adapt_cpu_with_smoothing.SetFrom(change.adapt_cpu_with_smoothing);
|
||||
adapt_view_switch.SetFrom(change.adapt_view_switch);
|
||||
video_adapt_third.SetFrom(change.video_adapt_third);
|
||||
video_noise_reduction.SetFrom(change.video_noise_reduction);
|
||||
video_three_layers.SetFrom(change.video_three_layers);
|
||||
video_enable_camera_list.SetFrom(change.video_enable_camera_list);
|
||||
video_one_layer_screencast.SetFrom(change.video_one_layer_screencast);
|
||||
video_one_to_one.SetFrom(change.video_one_to_one);
|
||||
video_high_bitrate.SetFrom(change.video_high_bitrate);
|
||||
video_watermark.SetFrom(change.video_watermark);
|
||||
video_temporal_layer_screencast.SetFrom(
|
||||
change.video_temporal_layer_screencast);
|
||||
video_leaky_bucket.SetFrom(change.video_leaky_bucket);
|
||||
cpu_overuse_detection.SetFrom(change.cpu_overuse_detection);
|
||||
conference_mode.SetFrom(change.conference_mode);
|
||||
process_adaptation_threshhold.SetFrom(change.process_adaptation_threshhold);
|
||||
system_low_adaptation_threshhold.SetFrom(
|
||||
@ -259,14 +262,17 @@ struct VideoOptions {
|
||||
adapt_input_to_cpu_usage == o.adapt_input_to_cpu_usage &&
|
||||
adapt_cpu_with_smoothing == o.adapt_cpu_with_smoothing &&
|
||||
adapt_view_switch == o.adapt_view_switch &&
|
||||
video_adapt_third == o.video_adapt_third &&
|
||||
video_noise_reduction == o.video_noise_reduction &&
|
||||
video_three_layers == o.video_three_layers &&
|
||||
video_enable_camera_list == o.video_enable_camera_list &&
|
||||
video_one_layer_screencast == o.video_one_layer_screencast &&
|
||||
video_one_to_one == o.video_one_to_one &&
|
||||
video_high_bitrate == o.video_high_bitrate &&
|
||||
video_watermark == o.video_watermark &&
|
||||
video_temporal_layer_screencast == o.video_temporal_layer_screencast &&
|
||||
video_leaky_bucket == o.video_leaky_bucket &&
|
||||
cpu_overuse_detection == o.cpu_overuse_detection &&
|
||||
conference_mode == o.conference_mode &&
|
||||
process_adaptation_threshhold == o.process_adaptation_threshhold &&
|
||||
system_low_adaptation_threshhold ==
|
||||
@ -283,16 +289,18 @@ struct VideoOptions {
|
||||
ost << ToStringIfSet("cpu adaption", adapt_input_to_cpu_usage);
|
||||
ost << ToStringIfSet("cpu adaptation smoothing", adapt_cpu_with_smoothing);
|
||||
ost << ToStringIfSet("adapt view switch", adapt_view_switch);
|
||||
ost << ToStringIfSet("video adapt third", video_adapt_third);
|
||||
ost << ToStringIfSet("noise reduction", video_noise_reduction);
|
||||
ost << ToStringIfSet("3 layers", video_three_layers);
|
||||
ost << ToStringIfSet("camera list", video_enable_camera_list);
|
||||
ost << ToStringIfSet("1 layer screencast",
|
||||
video_one_layer_screencast);
|
||||
ost << ToStringIfSet("1 layer screencast", video_one_layer_screencast);
|
||||
ost << ToStringIfSet("1 to 1", video_one_to_one);
|
||||
ost << ToStringIfSet("high bitrate", video_high_bitrate);
|
||||
ost << ToStringIfSet("watermark", video_watermark);
|
||||
ost << ToStringIfSet("video temporal layer screencast",
|
||||
video_temporal_layer_screencast);
|
||||
ost << ToStringIfSet("leaky bucket", video_leaky_bucket);
|
||||
ost << ToStringIfSet("cpu overuse detection", cpu_overuse_detection);
|
||||
ost << ToStringIfSet("conference mode", conference_mode);
|
||||
ost << ToStringIfSet("process", process_adaptation_threshhold);
|
||||
ost << ToStringIfSet("low", system_low_adaptation_threshhold);
|
||||
@ -310,6 +318,8 @@ struct VideoOptions {
|
||||
Settable<bool> adapt_cpu_with_smoothing;
|
||||
// Enable Adapt View Switch?
|
||||
Settable<bool> adapt_view_switch;
|
||||
// Enable video adapt third?
|
||||
Settable<bool> video_adapt_third;
|
||||
// Enable denoising?
|
||||
Settable<bool> video_noise_reduction;
|
||||
// Experimental: Enable multi layer?
|
||||
@ -318,6 +328,8 @@ struct VideoOptions {
|
||||
Settable<bool> video_enable_camera_list;
|
||||
// Experimental: Enable one layer screencast?
|
||||
Settable<bool> video_one_layer_screencast;
|
||||
// Experimental: Enable one to one?
|
||||
Settable<bool> video_one_to_one;
|
||||
// Experimental: Enable WebRtc higher bitrate?
|
||||
Settable<bool> video_high_bitrate;
|
||||
// Experimental: Add watermark to the rendered video image.
|
||||
@ -326,6 +338,10 @@ struct VideoOptions {
|
||||
Settable<bool> video_temporal_layer_screencast;
|
||||
// Enable WebRTC leaky bucket when sending media packets.
|
||||
Settable<bool> video_leaky_bucket;
|
||||
// Enable WebRTC Cpu Overuse Detection, which is a new version of the CPU
|
||||
// adaptation algorithm. So this option will override the
|
||||
// |adapt_input_to_cpu_usage|.
|
||||
Settable<bool> cpu_overuse_detection;
|
||||
// Use conference mode?
|
||||
Settable<bool> conference_mode;
|
||||
// Threshhold for process cpu adaptation. (Process limit)
|
||||
|
@ -28,8 +28,16 @@
|
||||
#ifndef TALK_MEDIA_BASE_TESTUTILS_H_
|
||||
#define TALK_MEDIA_BASE_TESTUTILS_H_
|
||||
|
||||
#ifdef LINUX
|
||||
#include <X11/Xlib.h>
|
||||
// X defines a few macros that stomp on types that gunit.h uses.
|
||||
#undef None
|
||||
#undef Bool
|
||||
#endif
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#if !defined(DISABLE_YUV)
|
||||
#include "libyuv/compare.h"
|
||||
#endif
|
||||
@ -237,6 +245,37 @@ bool ContainsMatchingCodec(const std::vector<C>& codecs, const C& codec) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#define MAYBE_SKIP_SCREENCAST_TEST() \
|
||||
if (!cricket::IsScreencastingAvailable()) { \
|
||||
LOG(LS_WARNING) << "Skipping test, since it doesn't have the requisite " \
|
||||
<< "X environment for screen capture."; \
|
||||
return; \
|
||||
} \
|
||||
|
||||
#ifdef LINUX
|
||||
struct XDisplay {
|
||||
XDisplay() : display_(XOpenDisplay(NULL)) { }
|
||||
~XDisplay() { if (display_) XCloseDisplay(display_); }
|
||||
bool IsValid() const { return display_ != NULL; }
|
||||
operator Display*() { return display_; }
|
||||
private:
|
||||
Display* display_;
|
||||
};
|
||||
#endif
|
||||
|
||||
// Returns true if screencasting is available. When false, anything that uses
|
||||
// screencasting features may fail.
|
||||
inline bool IsScreencastingAvailable() {
|
||||
#ifdef LINUX
|
||||
XDisplay display;
|
||||
if (!display.IsValid()) {
|
||||
LOG(LS_WARNING) << "No X Display available.";
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // TALK_MEDIA_BASE_TESTUTILS_H_
|
||||
|
@ -45,86 +45,100 @@ static const float kCpuLoadWeightCoefficient = 0.4f;
|
||||
// The seed value for the cpu load moving average.
|
||||
static const float kCpuLoadInitialAverage = 0.5f;
|
||||
|
||||
// TODO(fbarchard): Consider making scale factor table settable, to allow
|
||||
// application to select quality vs performance tradeoff.
|
||||
// TODO(fbarchard): Add framerate scaling to tables for 1/2 framerate.
|
||||
// List of scale factors that adapter will scale by.
|
||||
#if defined(IOS) || defined(ANDROID)
|
||||
// Mobile needs 1/4 scale for VGA (640 x 360) to QQVGA (160 x 90)
|
||||
// or 1/4 scale for HVGA (480 x 270) to QQHVGA (120 x 67)
|
||||
static const int kMinNumPixels = 120 * 67;
|
||||
static float kScaleFactors[] = {
|
||||
1.f / 1.f, // Full size.
|
||||
3.f / 4.f, // 3/4 scale.
|
||||
1.f / 2.f, // 1/2 scale.
|
||||
3.f / 8.f, // 3/8 scale.
|
||||
1.f / 4.f, // 1/4 scale.
|
||||
};
|
||||
#else
|
||||
// Desktop needs 1/8 scale for HD (1280 x 720) to QQVGA (160 x 90)
|
||||
static const int kMinNumPixels = 160 * 100;
|
||||
static float kScaleFactors[] = {
|
||||
1.f / 1.f, // Full size.
|
||||
3.f / 4.f, // 3/4 scale.
|
||||
1.f / 2.f, // 1/2 scale.
|
||||
3.f / 8.f, // 3/8 scale.
|
||||
1.f / 4.f, // 1/4 scale.
|
||||
static const float kScaleFactors[] = {
|
||||
1.f / 1.f, // Full size.
|
||||
3.f / 4.f, // 3/4 scale.
|
||||
1.f / 2.f, // 1/2 scale.
|
||||
3.f / 8.f, // 3/8 scale.
|
||||
1.f / 4.f, // 1/4 scale.
|
||||
3.f / 16.f, // 3/16 scale.
|
||||
1.f / 8.f // 1/8 scale.
|
||||
1.f / 8.f, // 1/8 scale.
|
||||
0.f // End of table.
|
||||
};
|
||||
#endif
|
||||
|
||||
static const int kNumScaleFactors = ARRAY_SIZE(kScaleFactors);
|
||||
// TODO(fbarchard): Use this table (optionally) for CPU and GD as well.
|
||||
static const float kViewScaleFactors[] = {
|
||||
1.f / 1.f, // Full size.
|
||||
3.f / 4.f, // 3/4 scale.
|
||||
2.f / 3.f, // 2/3 scale. // Allow 1080p to 720p.
|
||||
1.f / 2.f, // 1/2 scale.
|
||||
3.f / 8.f, // 3/8 scale.
|
||||
1.f / 3.f, // 1/3 scale. // Allow 1080p to 360p.
|
||||
1.f / 4.f, // 1/4 scale.
|
||||
3.f / 16.f, // 3/16 scale.
|
||||
1.f / 8.f, // 1/8 scale.
|
||||
0.f // End of table.
|
||||
};
|
||||
|
||||
const float* VideoAdapter::GetViewScaleFactors() const {
|
||||
return scale_third_ ? kViewScaleFactors : kScaleFactors;
|
||||
}
|
||||
|
||||
// For resolutions that would scale down a little instead of up a little,
|
||||
// bias toward scaling up a little. This will tend to choose 3/4 scale instead
|
||||
// of 2/3 scale, when the 2/3 is not an exact match.
|
||||
static const float kUpBias = -0.9f;
|
||||
// Find the scale factor that, when applied to width and height, is closest
|
||||
// to num_pixels.
|
||||
float VideoAdapter::FindClosestScale(int width, int height,
|
||||
int target_num_pixels) {
|
||||
float VideoAdapter::FindScale(const float* scale_factors,
|
||||
const float upbias,
|
||||
int width, int height,
|
||||
int target_num_pixels) {
|
||||
const float kMinNumPixels = 160 * 90;
|
||||
if (!target_num_pixels) {
|
||||
return 0.f;
|
||||
}
|
||||
int best_distance = INT_MAX;
|
||||
int best_index = kNumScaleFactors - 1; // Default to max scale.
|
||||
for (int i = 0; i < kNumScaleFactors; ++i) {
|
||||
int test_num_pixels = static_cast<int>(width * kScaleFactors[i] *
|
||||
height * kScaleFactors[i]);
|
||||
int diff = test_num_pixels - target_num_pixels;
|
||||
float best_distance = static_cast<float>(INT_MAX);
|
||||
float best_scale = 1.f; // Default to unscaled if nothing matches.
|
||||
float pixels = static_cast<float>(width * height);
|
||||
for (int i = 0; ; ++i) {
|
||||
float scale = scale_factors[i];
|
||||
float test_num_pixels = pixels * scale * scale;
|
||||
// Do not consider scale factors that produce too small images.
|
||||
// Scale factor of 0 at end of table will also exit here.
|
||||
if (test_num_pixels < kMinNumPixels) {
|
||||
break;
|
||||
}
|
||||
float diff = target_num_pixels - test_num_pixels;
|
||||
// If resolution is higher than desired, bias the difference based on
|
||||
// preference for slightly larger for nearest, or avoid completely if
|
||||
// looking for lower resolutions only.
|
||||
if (diff < 0) {
|
||||
diff = -diff;
|
||||
diff = diff * kUpBias;
|
||||
}
|
||||
if (diff < best_distance) {
|
||||
best_distance = diff;
|
||||
best_index = i;
|
||||
best_scale = scale;
|
||||
if (best_distance == 0) { // Found exact match.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return kScaleFactors[best_index];
|
||||
return best_scale;
|
||||
}
|
||||
|
||||
// Find the closest scale factor.
|
||||
float VideoAdapter::FindClosestScale(int width, int height,
|
||||
int target_num_pixels) {
|
||||
return FindScale(kScaleFactors, kUpBias,
|
||||
width, height, target_num_pixels);
|
||||
}
|
||||
|
||||
// Find the closest view scale factor.
|
||||
float VideoAdapter::FindClosestViewScale(int width, int height,
|
||||
int target_num_pixels) {
|
||||
return FindScale(GetViewScaleFactors(), kUpBias,
|
||||
width, height, target_num_pixels);
|
||||
}
|
||||
|
||||
// Finds the scale factor that, when applied to width and height, produces
|
||||
// fewer than num_pixels.
|
||||
static const float kUpAvoidBias = -1000000000.f;
|
||||
float VideoAdapter::FindLowerScale(int width, int height,
|
||||
int target_num_pixels) {
|
||||
if (!target_num_pixels) {
|
||||
return 0.f;
|
||||
}
|
||||
int best_distance = INT_MAX;
|
||||
int best_index = kNumScaleFactors - 1; // Default to max scale.
|
||||
for (int i = 0; i < kNumScaleFactors; ++i) {
|
||||
int test_num_pixels = static_cast<int>(width * kScaleFactors[i] *
|
||||
height * kScaleFactors[i]);
|
||||
int diff = target_num_pixels - test_num_pixels;
|
||||
if (diff >= 0 && diff < best_distance) {
|
||||
best_distance = diff;
|
||||
best_index = i;
|
||||
if (best_distance == 0) { // Found exact match.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return kScaleFactors[best_index];
|
||||
return FindScale(GetViewScaleFactors(), kUpAvoidBias,
|
||||
width, height, target_num_pixels);
|
||||
}
|
||||
|
||||
// There are several frame sizes used by Adapter. This explains them
|
||||
@ -147,6 +161,12 @@ float VideoAdapter::FindLowerScale(int width, int height,
|
||||
// Implementation of VideoAdapter
|
||||
VideoAdapter::VideoAdapter()
|
||||
: output_num_pixels_(INT_MAX),
|
||||
scale_third_(false),
|
||||
frames_(0),
|
||||
adapted_frames_(0),
|
||||
adaption_changes_(0),
|
||||
previous_width(0),
|
||||
previous_height(0),
|
||||
black_output_(false),
|
||||
is_black_(false),
|
||||
interval_next_frame_(0) {
|
||||
@ -208,6 +228,7 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
|
||||
if (!in_frame || !out_frame) {
|
||||
return false;
|
||||
}
|
||||
++frames_;
|
||||
|
||||
// Update input to actual frame dimensions.
|
||||
SetInputFormat(*in_frame);
|
||||
@ -236,8 +257,9 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
|
||||
return true;
|
||||
}
|
||||
|
||||
float scale = 1.f;
|
||||
if (output_num_pixels_) {
|
||||
float scale = VideoAdapter::FindClosestScale(
|
||||
scale = VideoAdapter::FindClosestViewScale(
|
||||
static_cast<int>(in_frame->GetWidth()),
|
||||
static_cast<int>(in_frame->GetHeight()),
|
||||
output_num_pixels_);
|
||||
@ -251,9 +273,45 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
|
||||
}
|
||||
|
||||
*out_frame = output_frame_.get();
|
||||
|
||||
// Show VAdapt log every 300 frames. (10 seconds)
|
||||
// TODO(fbarchard): Consider GetLogSeverity() to change interval to less
|
||||
// for LS_VERBOSE and more for LS_INFO.
|
||||
bool show = frames_ % 300 == 0;
|
||||
if (in_frame->GetWidth() != (*out_frame)->GetWidth() ||
|
||||
in_frame->GetHeight() != (*out_frame)->GetHeight()) {
|
||||
++adapted_frames_;
|
||||
}
|
||||
// TODO(fbarchard): LOG the previous output resolution and track input
|
||||
// resolution changes as well. Consider dropping the statistics into their
|
||||
// own class which could be queried publically.
|
||||
bool changed = false;
|
||||
if (previous_width && (previous_width != (*out_frame)->GetWidth() ||
|
||||
previous_height != (*out_frame)->GetHeight())) {
|
||||
show = true;
|
||||
++adaption_changes_;
|
||||
changed = true;
|
||||
}
|
||||
if (show) {
|
||||
// TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed
|
||||
// in default calls.
|
||||
LOG(LS_INFO) << "VAdapt Frame: " << adapted_frames_
|
||||
<< " / " << frames_
|
||||
<< " Changes: " << adaption_changes_
|
||||
<< " Input: " << in_frame->GetWidth()
|
||||
<< "x" << in_frame->GetHeight()
|
||||
<< " Scale: " << scale
|
||||
<< " Output: " << (*out_frame)->GetWidth()
|
||||
<< "x" << (*out_frame)->GetHeight()
|
||||
<< " Changed: " << (changed ? "true" : "false");
|
||||
}
|
||||
previous_width = (*out_frame)->GetWidth();
|
||||
previous_height = (*out_frame)->GetHeight();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Scale or Blacken the frame. Returns true if successful.
|
||||
bool VideoAdapter::StretchToOutputFrame(const VideoFrame* in_frame) {
|
||||
int output_width = output_format_.width;
|
||||
int output_height = output_format_.height;
|
||||
@ -409,37 +467,12 @@ void CoordinatedVideoAdapter::OnEncoderResolutionRequest(
|
||||
<< " To: " << new_width << "x" << new_height;
|
||||
}
|
||||
|
||||
// A CPU request for new resolution
|
||||
void CoordinatedVideoAdapter::OnCpuLoadUpdated(
|
||||
int current_cpus, int max_cpus, float process_load, float system_load) {
|
||||
// A Bandwidth GD request for new resolution
|
||||
void CoordinatedVideoAdapter::OnCpuResolutionRequest(AdaptRequest request) {
|
||||
talk_base::CritScope cs(&request_critical_section_);
|
||||
if (!cpu_adaptation_) {
|
||||
return;
|
||||
}
|
||||
// Update the moving average of system load. Even if we aren't smoothing,
|
||||
// we'll still calculate this information, in case smoothing is later enabled.
|
||||
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
|
||||
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
|
||||
if (cpu_smoothing_) {
|
||||
system_load = system_load_average_;
|
||||
}
|
||||
// If we haven't started taking samples yet, wait until we have at least
|
||||
// the correct number of samples per the wait time.
|
||||
if (cpu_adapt_wait_time_ == 0) {
|
||||
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
|
||||
}
|
||||
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
|
||||
process_load, system_load);
|
||||
// Make sure we're not adapting too quickly.
|
||||
if (request != KEEP) {
|
||||
if (talk_base::TimeIsLater(talk_base::Time(),
|
||||
cpu_adapt_wait_time_)) {
|
||||
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
|
||||
<< talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
|
||||
request = KEEP;
|
||||
}
|
||||
}
|
||||
|
||||
// Update how many times we have downgraded due to the cpu load.
|
||||
switch (request) {
|
||||
case DOWNGRADE:
|
||||
@ -482,13 +515,46 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated(
|
||||
LOG(LS_INFO) << "VAdapt CPU Request: "
|
||||
<< (DOWNGRADE == request ? "down" :
|
||||
(UPGRADE == request ? "up" : "keep"))
|
||||
<< " Process: " << process_load
|
||||
<< " System: " << system_load
|
||||
<< " Steps: " << cpu_downgrade_count_
|
||||
<< " Changed: " << (changed ? "true" : "false")
|
||||
<< " To: " << new_width << "x" << new_height;
|
||||
}
|
||||
|
||||
// A CPU request for new resolution
|
||||
// TODO(fbarchard): Move outside adapter.
|
||||
void CoordinatedVideoAdapter::OnCpuLoadUpdated(
|
||||
int current_cpus, int max_cpus, float process_load, float system_load) {
|
||||
talk_base::CritScope cs(&request_critical_section_);
|
||||
if (!cpu_adaptation_) {
|
||||
return;
|
||||
}
|
||||
// Update the moving average of system load. Even if we aren't smoothing,
|
||||
// we'll still calculate this information, in case smoothing is later enabled.
|
||||
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
|
||||
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
|
||||
if (cpu_smoothing_) {
|
||||
system_load = system_load_average_;
|
||||
}
|
||||
// If we haven't started taking samples yet, wait until we have at least
|
||||
// the correct number of samples per the wait time.
|
||||
if (cpu_adapt_wait_time_ == 0) {
|
||||
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
|
||||
}
|
||||
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
|
||||
process_load, system_load);
|
||||
// Make sure we're not adapting too quickly.
|
||||
if (request != KEEP) {
|
||||
if (talk_base::TimeIsLater(talk_base::Time(),
|
||||
cpu_adapt_wait_time_)) {
|
||||
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
|
||||
<< talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
|
||||
request = KEEP;
|
||||
}
|
||||
}
|
||||
|
||||
OnCpuResolutionRequest(request);
|
||||
}
|
||||
|
||||
// Called by cpu adapter on up requests.
|
||||
bool CoordinatedVideoAdapter::IsMinimumFormat(int pixels) {
|
||||
// Find closest scale factor that matches input resolution to min_num_pixels
|
||||
@ -522,51 +588,46 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
|
||||
input = new_output;
|
||||
}
|
||||
int old_num_pixels = GetOutputNumPixels();
|
||||
// Find resolution that respects ViewRequest or less pixels.
|
||||
int view_desired_num_pixels = view_desired_num_pixels_;
|
||||
int min_num_pixels = view_desired_num_pixels_;
|
||||
if (!input.IsSize0x0()) {
|
||||
float scale = FindLowerScale(input.width, input.height, min_num_pixels);
|
||||
min_num_pixels = view_desired_num_pixels =
|
||||
static_cast<int>(input.width * input.height * scale * scale + .5f);
|
||||
}
|
||||
// Reduce resolution further, if necessary, based on encoder bandwidth (GD).
|
||||
int min_num_pixels = INT_MAX;
|
||||
adapt_reason_ = 0;
|
||||
|
||||
// Reduce resolution based on encoder bandwidth (GD).
|
||||
if (encoder_desired_num_pixels_ &&
|
||||
(encoder_desired_num_pixels_ < min_num_pixels)) {
|
||||
adapt_reason_ |= ADAPTREASON_BANDWIDTH;
|
||||
min_num_pixels = encoder_desired_num_pixels_;
|
||||
}
|
||||
// Reduce resolution further, if necessary, based on CPU.
|
||||
// Reduce resolution based on CPU.
|
||||
if (cpu_adaptation_ && cpu_desired_num_pixels_ &&
|
||||
(cpu_desired_num_pixels_ < min_num_pixels)) {
|
||||
(cpu_desired_num_pixels_ <= min_num_pixels)) {
|
||||
if (cpu_desired_num_pixels_ < min_num_pixels) {
|
||||
adapt_reason_ = ADAPTREASON_CPU;
|
||||
} else {
|
||||
adapt_reason_ |= ADAPTREASON_CPU;
|
||||
}
|
||||
min_num_pixels = cpu_desired_num_pixels_;
|
||||
}
|
||||
|
||||
// Determine which factors are keeping adapter resolution low.
|
||||
// Caveat: Does not consider framerate.
|
||||
adapt_reason_ = static_cast<AdaptReason>(0);
|
||||
if (view_desired_num_pixels == min_num_pixels) {
|
||||
adapt_reason_ |= ADAPTREASON_VIEW;
|
||||
// Round resolution for GD or CPU to allow 1/2 to map to 9/16.
|
||||
if (!input.IsSize0x0() && min_num_pixels != INT_MAX) {
|
||||
float scale = FindClosestScale(input.width, input.height, min_num_pixels);
|
||||
min_num_pixels = static_cast<int>(input.width * scale + .5f) *
|
||||
static_cast<int>(input.height * scale + .5f);
|
||||
}
|
||||
if (encoder_desired_num_pixels_ == min_num_pixels) {
|
||||
adapt_reason_ |= ADAPTREASON_BANDWIDTH;
|
||||
// Reduce resolution based on View Request.
|
||||
if (view_desired_num_pixels_ <= min_num_pixels) {
|
||||
if (view_desired_num_pixels_ < min_num_pixels) {
|
||||
adapt_reason_ = ADAPTREASON_VIEW;
|
||||
} else {
|
||||
adapt_reason_ |= ADAPTREASON_VIEW;
|
||||
}
|
||||
min_num_pixels = view_desired_num_pixels_;
|
||||
}
|
||||
if (cpu_desired_num_pixels_ == min_num_pixels) {
|
||||
adapt_reason_ |= ADAPTREASON_CPU;
|
||||
}
|
||||
|
||||
// Prevent going below QQVGA.
|
||||
if (min_num_pixels > 0 && min_num_pixels < kMinNumPixels) {
|
||||
min_num_pixels = kMinNumPixels;
|
||||
}
|
||||
SetOutputNumPixels(min_num_pixels);
|
||||
|
||||
// Find closest scale factor that matches input resolution to min_num_pixels
|
||||
// and set that for output resolution. This is not needed for VideoAdapter,
|
||||
// but provides feedback to unittests and users on expected resolution.
|
||||
// Actual resolution is based on input frame.
|
||||
// Snap to a scale factor.
|
||||
float scale = 1.0f;
|
||||
if (!input.IsSize0x0()) {
|
||||
scale = FindClosestScale(input.width, input.height, min_num_pixels);
|
||||
scale = FindLowerScale(input.width, input.height, min_num_pixels);
|
||||
min_num_pixels = static_cast<int>(input.width * scale + .5f) *
|
||||
static_cast<int>(input.height * scale + .5f);
|
||||
}
|
||||
if (scale == 1.0f) {
|
||||
adapt_reason_ = 0;
|
||||
@ -574,6 +635,8 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
|
||||
*new_width = new_output.width = static_cast<int>(input.width * scale + .5f);
|
||||
*new_height = new_output.height = static_cast<int>(input.height * scale +
|
||||
.5f);
|
||||
SetOutputNumPixels(min_num_pixels);
|
||||
|
||||
new_output.interval = view_desired_interval_;
|
||||
SetOutputFormat(new_output);
|
||||
int new_num_pixels = GetOutputNumPixels();
|
||||
|
@ -65,16 +65,34 @@ class VideoAdapter {
|
||||
// the output frame.
|
||||
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame);
|
||||
|
||||
void set_scale_third(bool enable) {
|
||||
LOG(LS_INFO) << "Video Adapter third scaling is now "
|
||||
<< (enable ? "enabled" : "disabled");
|
||||
scale_third_ = enable;
|
||||
}
|
||||
bool scale_third() const { return scale_third_; }
|
||||
|
||||
protected:
|
||||
float FindClosestScale(int width, int height, int target_num_pixels);
|
||||
float FindClosestViewScale(int width, int height, int target_num_pixels);
|
||||
float FindLowerScale(int width, int height, int target_num_pixels);
|
||||
|
||||
private:
|
||||
const float* GetViewScaleFactors() const;
|
||||
float FindScale(const float* scale_factors,
|
||||
const float upbias, int width, int height,
|
||||
int target_num_pixels);
|
||||
bool StretchToOutputFrame(const VideoFrame* in_frame);
|
||||
|
||||
VideoFormat input_format_;
|
||||
VideoFormat output_format_;
|
||||
int output_num_pixels_;
|
||||
bool scale_third_; // True if adapter allows scaling to 1/3 and 2/3.
|
||||
int frames_; // Number of input frames.
|
||||
int adapted_frames_; // Number of frames scaled.
|
||||
int adaption_changes_; // Number of changes in scale factor.
|
||||
size_t previous_width; // Previous adapter output width.
|
||||
size_t previous_height; // Previous adapter output height.
|
||||
bool black_output_; // Flag to tell if we need to black output_frame_.
|
||||
bool is_black_; // Flag to tell if output_frame_ is currently black.
|
||||
int64 interval_next_frame_;
|
||||
@ -176,6 +194,8 @@ class CoordinatedVideoAdapter
|
||||
void OnOutputFormatRequest(const VideoFormat& format);
|
||||
// Handle the resolution request from the encoder due to bandwidth changes.
|
||||
void OnEncoderResolutionRequest(int width, int height, AdaptRequest request);
|
||||
// Handle the resolution request for CPU overuse.
|
||||
void OnCpuResolutionRequest(AdaptRequest request);
|
||||
// Handle the CPU load provided by a CPU monitor.
|
||||
void OnCpuLoadUpdated(int current_cpus, int max_cpus,
|
||||
float process_load, float system_load);
|
||||
|
@ -107,6 +107,7 @@ void VideoCapturer::Construct() {
|
||||
SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured);
|
||||
scaled_width_ = 0;
|
||||
scaled_height_ = 0;
|
||||
screencast_max_pixels_ = 0;
|
||||
muted_ = false;
|
||||
black_frame_count_down_ = kNumBlackFramesOnMute;
|
||||
}
|
||||
@ -323,11 +324,16 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
||||
#if !defined(DISABLE_YUV)
|
||||
if (IsScreencast()) {
|
||||
int scaled_width, scaled_height;
|
||||
int desired_screencast_fps = capture_format_.get() ?
|
||||
VideoFormat::IntervalToFps(capture_format_->interval) :
|
||||
kDefaultScreencastFps;
|
||||
ComputeScale(captured_frame->width, captured_frame->height,
|
||||
desired_screencast_fps, &scaled_width, &scaled_height);
|
||||
if (screencast_max_pixels_ > 0) {
|
||||
ComputeScaleMaxPixels(captured_frame->width, captured_frame->height,
|
||||
screencast_max_pixels_, &scaled_width, &scaled_height);
|
||||
} else {
|
||||
int desired_screencast_fps = capture_format_.get() ?
|
||||
VideoFormat::IntervalToFps(capture_format_->interval) :
|
||||
kDefaultScreencastFps;
|
||||
ComputeScale(captured_frame->width, captured_frame->height,
|
||||
desired_screencast_fps, &scaled_width, &scaled_height);
|
||||
}
|
||||
|
||||
if (scaled_width != scaled_width_ || scaled_height != scaled_height_) {
|
||||
LOG(LS_VERBOSE) << "Scaling Screencast from "
|
||||
|
@ -254,6 +254,17 @@ class VideoCapturer
|
||||
|
||||
const VideoProcessors& video_processors() const { return video_processors_; }
|
||||
|
||||
// If 'screencast_max_pixels' is set greater than zero, screencasts will be
|
||||
// scaled to be no larger than this value.
|
||||
// If set to zero, the max pixels will be limited to
|
||||
// Retina MacBookPro 15" resolution of 2880 x 1800.
|
||||
// For high fps, maximum pixels limit is set based on common 24" monitor
|
||||
// resolution of 2048 x 1280.
|
||||
int screencast_max_pixels() const { return screencast_max_pixels_; }
|
||||
void set_screencast_max_pixels(int p) {
|
||||
screencast_max_pixels_ = talk_base::_max(0, p);
|
||||
}
|
||||
|
||||
protected:
|
||||
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
|
||||
void OnFrameCaptured(VideoCapturer* video_capturer,
|
||||
@ -313,6 +324,7 @@ class VideoCapturer
|
||||
bool enable_camera_list_;
|
||||
int scaled_width_; // Current output size from ComputeScale.
|
||||
int scaled_height_;
|
||||
int screencast_max_pixels_; // Downscale screencasts further if requested.
|
||||
bool muted_;
|
||||
int black_frame_count_down_;
|
||||
|
||||
|
@ -194,6 +194,39 @@ TEST_F(VideoCapturerTest, CameraOffOnMute) {
|
||||
EXPECT_EQ(33, video_frames_received());
|
||||
}
|
||||
|
||||
TEST_F(VideoCapturerTest, ScreencastScaledMaxPixels) {
|
||||
capturer_.SetScreencast(true);
|
||||
|
||||
int kWidth = 1280;
|
||||
int kHeight = 720;
|
||||
|
||||
// Screencasts usually have large weird dimensions and are ARGB.
|
||||
std::vector<cricket::VideoFormat> formats;
|
||||
formats.push_back(cricket::VideoFormat(kWidth, kHeight,
|
||||
cricket::VideoFormat::FpsToInterval(5), cricket::FOURCC_ARGB));
|
||||
formats.push_back(cricket::VideoFormat(2 * kWidth, 2 * kHeight,
|
||||
cricket::VideoFormat::FpsToInterval(5), cricket::FOURCC_ARGB));
|
||||
capturer_.ResetSupportedFormats(formats);
|
||||
|
||||
|
||||
EXPECT_EQ(0, capturer_.screencast_max_pixels());
|
||||
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
|
||||
2 * kWidth,
|
||||
2 * kHeight,
|
||||
cricket::VideoFormat::FpsToInterval(30),
|
||||
cricket::FOURCC_ARGB)));
|
||||
EXPECT_TRUE(capturer_.IsRunning());
|
||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||
renderer_.SetSize(2 * kWidth, 2 * kHeight, 0);
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
EXPECT_EQ(1, renderer_.num_rendered_frames());
|
||||
|
||||
capturer_.set_screencast_max_pixels(kWidth * kHeight);
|
||||
renderer_.SetSize(kWidth, kHeight, 0);
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
EXPECT_EQ(2, renderer_.num_rendered_frames());
|
||||
}
|
||||
|
||||
TEST_F(VideoCapturerTest, TestFourccMatch) {
|
||||
cricket::VideoFormat desired(640, 480,
|
||||
cricket::VideoFormat::FpsToInterval(30),
|
||||
|
@ -98,23 +98,17 @@ static float FindLowerScale(int width, int height, int target_num_pixels) {
|
||||
return kScaleFactors[best_index];
|
||||
}
|
||||
|
||||
// Compute a size to scale frames to that is below maximum compression
|
||||
// and rendering size with the same aspect ratio.
|
||||
void ComputeScale(int frame_width, int frame_height, int fps,
|
||||
int* scaled_width, int* scaled_height) {
|
||||
// Computes a scale less to fit in max_pixels while maintaining aspect ratio.
|
||||
void ComputeScaleMaxPixels(int frame_width, int frame_height, int max_pixels,
|
||||
int* scaled_width, int* scaled_height) {
|
||||
ASSERT(scaled_width != NULL);
|
||||
ASSERT(scaled_height != NULL);
|
||||
ASSERT(max_pixels > 0);
|
||||
// For VP8 the values for max width and height can be found here
|
||||
// webrtc/src/video_engine/vie_defines.h (kViEMaxCodecWidth and
|
||||
// kViEMaxCodecHeight)
|
||||
const int kMaxWidth = 4096;
|
||||
const int kMaxHeight = 3072;
|
||||
// Maximum pixels limit is set to Retina MacBookPro 15" resolution of
|
||||
// 2880 x 1800 as of 4/18/2013.
|
||||
// For high fps, maximum pixels limit is set based on common 24" monitor
|
||||
// resolution of 2048 x 1280 as of 6/13/2013. The Retina resolution is
|
||||
// therefore reduced to 1440 x 900.
|
||||
int kMaxPixels = (fps > 5) ? 2048 * 1280 : 2880 * 1800;
|
||||
int new_frame_width = frame_width;
|
||||
int new_frame_height = frame_height;
|
||||
|
||||
@ -129,12 +123,12 @@ void ComputeScale(int frame_width, int frame_height, int fps,
|
||||
new_frame_height = kMaxHeight;
|
||||
}
|
||||
// Limit number of pixels.
|
||||
if (new_frame_width * new_frame_height > kMaxPixels) {
|
||||
if (new_frame_width * new_frame_height > max_pixels) {
|
||||
// Compute new width such that width * height is less than maximum but
|
||||
// maintains original captured frame aspect ratio.
|
||||
new_frame_width = static_cast<int>(sqrtf(static_cast<float>(
|
||||
kMaxPixels) * new_frame_width / new_frame_height));
|
||||
new_frame_height = kMaxPixels / new_frame_width;
|
||||
max_pixels) * new_frame_width / new_frame_height));
|
||||
new_frame_height = max_pixels / new_frame_width;
|
||||
}
|
||||
// Snap to a scale factor that is less than or equal to target pixels.
|
||||
float scale = FindLowerScale(frame_width, frame_height,
|
||||
@ -143,6 +137,20 @@ void ComputeScale(int frame_width, int frame_height, int fps,
|
||||
*scaled_height = static_cast<int>(frame_height * scale + .5f);
|
||||
}
|
||||
|
||||
// Compute a size to scale frames to that is below maximum compression
|
||||
// and rendering size with the same aspect ratio.
|
||||
void ComputeScale(int frame_width, int frame_height, int fps,
|
||||
int* scaled_width, int* scaled_height) {
|
||||
// Maximum pixels limit is set to Retina MacBookPro 15" resolution of
|
||||
// 2880 x 1800 as of 4/18/2013.
|
||||
// For high fps, maximum pixels limit is set based on common 24" monitor
|
||||
// resolution of 2048 x 1280 as of 6/13/2013. The Retina resolution is
|
||||
// therefore reduced to 1440 x 900.
|
||||
int max_pixels = (fps > 5) ? 2048 * 1280 : 2880 * 1800;
|
||||
ComputeScaleMaxPixels(
|
||||
frame_width, frame_height, max_pixels, scaled_width, scaled_height);
|
||||
}
|
||||
|
||||
// Compute size to crop video frame to.
|
||||
// If cropped_format_* is 0, return the frame_* size as is.
|
||||
void ComputeCrop(int cropped_format_width,
|
||||
@ -209,6 +217,14 @@ void ComputeCrop(int cropped_format_width,
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the frame size that makes pixels square pixel aspect ratio.
|
||||
void ComputeScaleToSquarePixels(int in_width, int in_height,
|
||||
int pixel_width, int pixel_height,
|
||||
int* scaled_width, int* scaled_height) {
|
||||
*scaled_width = in_width; // Keep width the same.
|
||||
*scaled_height = in_height * pixel_width / pixel_height;
|
||||
}
|
||||
|
||||
// The C++ standard requires a namespace-scope definition of static const
|
||||
// integral types even when they are initialized in the declaration (see
|
||||
// [class.static.data]/4), but MSVC with /Ze is non-conforming and treats that
|
||||
|
@ -25,7 +25,7 @@
|
||||
//
|
||||
// Common definition for video, including fourcc and VideoFormat.
|
||||
|
||||
#ifndef TALK_MEDIA_BASE_VIDEOCOMMON_H_
|
||||
#ifndef TALK_MEDIA_BASE_VIDEOCOMMON_H_ // NOLINT
|
||||
#define TALK_MEDIA_BASE_VIDEOCOMMON_H_
|
||||
|
||||
#include <string>
|
||||
@ -147,6 +147,15 @@ inline std::string GetFourccName(uint32 fourcc) {
|
||||
return name;
|
||||
}
|
||||
|
||||
// Computes a scale less to fit in max_pixels while maintaining aspect ratio.
|
||||
void ComputeScaleMaxPixels(int frame_width, int frame_height, int max_pixels,
|
||||
int* scaled_width, int* scaled_height);
|
||||
|
||||
// For low fps, max pixels limit is set to Retina MacBookPro 15" resolution of
|
||||
// 2880 x 1800 as of 4/18/2013.
|
||||
// For high fps, maximum pixels limit is set based on common 24" monitor
|
||||
// resolution of 2048 x 1280 as of 6/13/2013. The Retina resolution is
|
||||
// therefore reduced to 1440 x 900.
|
||||
void ComputeScale(int frame_width, int frame_height, int fps,
|
||||
int* scaled_width, int* scaled_height);
|
||||
|
||||
@ -158,6 +167,11 @@ void ComputeCrop(int cropped_format_width, int cropped_format_height,
|
||||
int rotation,
|
||||
int* cropped_width, int* cropped_height);
|
||||
|
||||
// Compute the frame size that makes pixels square pixel aspect ratio.
|
||||
void ComputeScaleToSquarePixels(int in_width, int in_height,
|
||||
int pixel_width, int pixel_height,
|
||||
int* scaled_width, int* scaled_height);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Definition of VideoFormat.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@ -239,4 +253,4 @@ struct VideoFormat : VideoFormatPod {
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // TALK_MEDIA_BASE_VIDEOCOMMON_H_
|
||||
#endif // TALK_MEDIA_BASE_VIDEOCOMMON_H_ // NOLINT
|
||||
|
@ -287,4 +287,16 @@ TEST(VideoCommonTest, TestComputeCrop) {
|
||||
EXPECT_EQ(768, cropped_height);
|
||||
}
|
||||
|
||||
TEST(VideoCommonTest, TestComputeScaleToSquarePixels) {
|
||||
int scaled_width, scaled_height;
|
||||
|
||||
// Pixel aspect ratio is 4:3. Logical aspect ratio is 16:9. Expect scale
|
||||
// to square pixels with physical aspect ratio of 16:9.
|
||||
ComputeScaleToSquarePixels(640, 270,
|
||||
4, 3, // 4 x 3 pixel aspect ratio
|
||||
&scaled_width, &scaled_height);
|
||||
EXPECT_EQ(640, scaled_width);
|
||||
EXPECT_EQ(360, scaled_height);
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
@ -23,7 +23,7 @@
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef TALK_MEDIA_BASE_VIDEOENGINE_UNITTEST_H_
|
||||
#ifndef TALK_MEDIA_BASE_VIDEOENGINE_UNITTEST_H_ // NOLINT
|
||||
#define TALK_MEDIA_BASE_VIDEOENGINE_UNITTEST_H_
|
||||
|
||||
#include <string>
|
||||
@ -1667,4 +1667,4 @@ class VideoMediaChannelTest : public testing::Test,
|
||||
cricket::FakeVideoRenderer renderer2_;
|
||||
};
|
||||
|
||||
#endif // TALK_MEDIA_BASE_VIDEOENGINE_UNITTEST_H_
|
||||
#endif // TALK_MEDIA_BASE_VIDEOENGINE_UNITTEST_H_ NOLINT
|
||||
|
@ -465,6 +465,48 @@ class WebRtcVideoChannelRecvInfo {
|
||||
DecoderMap registered_decoders_;
|
||||
};
|
||||
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
class WebRtcOveruseObserver : public webrtc::CpuOveruseObserver {
|
||||
public:
|
||||
explicit WebRtcOveruseObserver(CoordinatedVideoAdapter* video_adapter)
|
||||
: video_adapter_(video_adapter),
|
||||
enabled_(false) {
|
||||
}
|
||||
|
||||
// TODO(mflodman): Consider sending resolution as part of event, to let
|
||||
// adapter know what resolution the request is based on. Helps eliminate stale
|
||||
// data, race conditions.
|
||||
virtual void OveruseDetected() OVERRIDE {
|
||||
talk_base::CritScope cs(&crit_);
|
||||
if (!enabled_) {
|
||||
return;
|
||||
}
|
||||
|
||||
video_adapter_->OnCpuResolutionRequest(CoordinatedVideoAdapter::DOWNGRADE);
|
||||
}
|
||||
|
||||
virtual void NormalUsage() OVERRIDE {
|
||||
talk_base::CritScope cs(&crit_);
|
||||
if (!enabled_) {
|
||||
return;
|
||||
}
|
||||
|
||||
video_adapter_->OnCpuResolutionRequest(CoordinatedVideoAdapter::UPGRADE);
|
||||
}
|
||||
|
||||
void Enable(bool enable) {
|
||||
talk_base::CritScope cs(&crit_);
|
||||
enabled_ = enable;
|
||||
}
|
||||
|
||||
private:
|
||||
CoordinatedVideoAdapter* video_adapter_;
|
||||
bool enabled_;
|
||||
talk_base::CriticalSection crit_;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
public:
|
||||
typedef std::map<int, webrtc::VideoEncoder*> EncoderMap; // key: payload type
|
||||
@ -481,6 +523,9 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
capturer_updated_(false),
|
||||
interval_(0),
|
||||
video_adapter_(new CoordinatedVideoAdapter) {
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get()));
|
||||
#endif
|
||||
SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable);
|
||||
if (cpu_monitor) {
|
||||
cpu_monitor->SignalUpdate.connect(
|
||||
@ -534,6 +579,11 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
int CurrentAdaptReason() const {
|
||||
return video_adapter_->adapt_reason();
|
||||
}
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
webrtc::CpuOveruseObserver* overuse_observer() {
|
||||
return overuse_observer_.get();
|
||||
}
|
||||
#endif
|
||||
|
||||
StreamParams* stream_params() { return stream_params_.get(); }
|
||||
void set_stream_params(const StreamParams& sp) {
|
||||
@ -572,7 +622,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
}
|
||||
|
||||
void ApplyCpuOptions(const VideoOptions& options) {
|
||||
bool cpu_adapt, cpu_smoothing;
|
||||
bool cpu_adapt, cpu_smoothing, adapt_third;
|
||||
float low, med, high;
|
||||
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
|
||||
video_adapter_->set_cpu_adaptation(cpu_adapt);
|
||||
@ -589,7 +639,18 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
if (options.system_high_adaptation_threshhold.Get(&high)) {
|
||||
video_adapter_->set_high_system_threshold(high);
|
||||
}
|
||||
if (options.video_adapt_third.Get(&adapt_third)) {
|
||||
video_adapter_->set_scale_third(adapt_third);
|
||||
}
|
||||
}
|
||||
|
||||
void SetCpuOveruseDetection(bool enable) {
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
overuse_observer_->Enable(enable);
|
||||
video_adapter_->set_cpu_adaptation(enable);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ProcessFrame(const VideoFrame& original_frame, bool mute,
|
||||
VideoFrame** processed_frame) {
|
||||
if (!mute) {
|
||||
@ -642,6 +703,9 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
||||
int64 interval_;
|
||||
|
||||
talk_base::scoped_ptr<CoordinatedVideoAdapter> video_adapter_;
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
|
||||
#endif
|
||||
};
|
||||
|
||||
const WebRtcVideoEngine::VideoCodecPref
|
||||
@ -2481,6 +2545,9 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
|
||||
bool buffer_latency_changed = options.buffered_mode_latency.IsSet() &&
|
||||
(options_.buffered_mode_latency != options.buffered_mode_latency);
|
||||
|
||||
bool cpu_overuse_detection_changed = options.cpu_overuse_detection.IsSet() &&
|
||||
(options_.cpu_overuse_detection != options.cpu_overuse_detection);
|
||||
|
||||
bool conference_mode_turned_off = false;
|
||||
if (options_.conference_mode.IsSet() && options.conference_mode.IsSet() &&
|
||||
options_.conference_mode.GetWithDefaultIfUnset(false) &&
|
||||
@ -2558,6 +2625,15 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (cpu_overuse_detection_changed) {
|
||||
bool cpu_overuse_detection =
|
||||
options_.cpu_overuse_detection.GetWithDefaultIfUnset(false);
|
||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
WebRtcVideoChannelSendInfo* send_channel = iter->second;
|
||||
send_channel->SetCpuOveruseDetection(cpu_overuse_detection);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2702,8 +2778,8 @@ bool WebRtcVideoMediaChannel::SendFrame(
|
||||
frame_i420.y_pitch = frame_out->GetYPitch();
|
||||
frame_i420.u_pitch = frame_out->GetUPitch();
|
||||
frame_i420.v_pitch = frame_out->GetVPitch();
|
||||
frame_i420.width = static_cast<unsigned short>(frame_out->GetWidth());
|
||||
frame_i420.height = static_cast<unsigned short>(frame_out->GetHeight());
|
||||
frame_i420.width = static_cast<uint16>(frame_out->GetWidth());
|
||||
frame_i420.height = static_cast<uint16>(frame_out->GetHeight());
|
||||
|
||||
int64 timestamp_ntp_ms = 0;
|
||||
// TODO(justinlin): Reenable after Windows issues with clock drift are fixed.
|
||||
@ -2966,10 +3042,21 @@ bool WebRtcVideoMediaChannel::ConfigureSending(int channel_id,
|
||||
new WebRtcVideoChannelSendInfo(channel_id, vie_capture,
|
||||
external_capture,
|
||||
engine()->cpu_monitor()));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
if (engine()->vie()->base()->RegisterCpuOveruseObserver(
|
||||
channel_id, send_channel->overuse_observer())) {
|
||||
LOG_RTCERR1(RegisterCpuOveruseObserver, channel_id);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
send_channel->ApplyCpuOptions(options_);
|
||||
send_channel->SignalCpuAdaptationUnable.connect(this,
|
||||
&WebRtcVideoMediaChannel::OnCpuAdaptationUnable);
|
||||
|
||||
if (options_.cpu_overuse_detection.GetWithDefaultIfUnset(false)) {
|
||||
send_channel->SetCpuOveruseDetection(true);
|
||||
}
|
||||
|
||||
// Register encoder observer for outgoing framerate and bitrate.
|
||||
if (engine()->vie()->codec()->RegisterEncoderObserver(
|
||||
channel_id, *send_channel->encoder_observer()) != 0) {
|
||||
|
@ -86,8 +86,7 @@ class FakeViEWrapper : public cricket::ViEWrapper {
|
||||
|
||||
// Test fixture to test WebRtcVideoEngine with a fake webrtc::VideoEngine.
|
||||
// Useful for testing failure paths.
|
||||
class WebRtcVideoEngineTestFake :
|
||||
public testing::Test,
|
||||
class WebRtcVideoEngineTestFake : public testing::Test,
|
||||
public sigslot::has_slots<> {
|
||||
public:
|
||||
WebRtcVideoEngineTestFake()
|
||||
|
@ -1632,18 +1632,11 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
|
||||
}
|
||||
|
||||
bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
const std::vector<AudioCodec>& codecs) {
|
||||
// TODO(xians): Break down this function into SetSendCodecs(channel, codecs)
|
||||
// to support per-channel codecs.
|
||||
|
||||
// Disable DTMF, VAD, and FEC unless we know the other side wants them.
|
||||
dtmf_allowed_ = false;
|
||||
for (ChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
engine()->voe()->codec()->SetVADStatus(iter->second.channel, false);
|
||||
engine()->voe()->rtp()->SetNACKStatus(iter->second.channel, false, 0);
|
||||
engine()->voe()->rtp()->SetFECStatus(iter->second.channel, false);
|
||||
}
|
||||
int channel, const std::vector<AudioCodec>& codecs) {
|
||||
// Disable VAD, and FEC unless we know the other side wants them.
|
||||
engine()->voe()->codec()->SetVADStatus(channel, false);
|
||||
engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
|
||||
engine()->voe()->rtp()->SetFECStatus(channel, false);
|
||||
|
||||
// Scan through the list to figure out the codec to use for sending, along
|
||||
// with the proper configuration for VAD and DTMF.
|
||||
@ -1700,16 +1693,11 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
// about it.
|
||||
if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
|
||||
_stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
|
||||
for (ChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
|
||||
iter->second.channel, it->id) == -1) {
|
||||
LOG_RTCERR2(SetSendTelephoneEventPayloadType,
|
||||
iter->second.channel, it->id);
|
||||
return false;
|
||||
}
|
||||
if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
|
||||
channel, it->id) == -1) {
|
||||
LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
|
||||
return false;
|
||||
}
|
||||
dtmf_allowed_ = true;
|
||||
}
|
||||
|
||||
// Turn voice activity detection/comfort noise on if supported.
|
||||
@ -1732,35 +1720,30 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
<< " not supported.";
|
||||
continue;
|
||||
}
|
||||
// Loop through the existing send channels and set the CN payloadtype
|
||||
// and the VAD status.
|
||||
for (ChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
int channel = iter->second.channel;
|
||||
// The CN payload type for 8000 Hz clockrate is fixed at 13.
|
||||
if (cn_freq != webrtc::kFreq8000Hz) {
|
||||
if (engine()->voe()->codec()->SetSendCNPayloadType(
|
||||
channel, it->id, cn_freq) == -1) {
|
||||
LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
|
||||
// TODO(ajm): This failure condition will be removed from VoE.
|
||||
// Restore the return here when we update to a new enough webrtc.
|
||||
//
|
||||
// Not returning false because the SetSendCNPayloadType will fail if
|
||||
// the channel is already sending.
|
||||
// This can happen if the remote description is applied twice, for
|
||||
// example in the case of ROAP on top of JSEP, where both side will
|
||||
// send the offer.
|
||||
}
|
||||
// Set the CN payloadtype and the VAD status.
|
||||
// The CN payload type for 8000 Hz clockrate is fixed at 13.
|
||||
if (cn_freq != webrtc::kFreq8000Hz) {
|
||||
if (engine()->voe()->codec()->SetSendCNPayloadType(
|
||||
channel, it->id, cn_freq) == -1) {
|
||||
LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
|
||||
// TODO(ajm): This failure condition will be removed from VoE.
|
||||
// Restore the return here when we update to a new enough webrtc.
|
||||
//
|
||||
// Not returning false because the SetSendCNPayloadType will fail if
|
||||
// the channel is already sending.
|
||||
// This can happen if the remote description is applied twice, for
|
||||
// example in the case of ROAP on top of JSEP, where both side will
|
||||
// send the offer.
|
||||
}
|
||||
}
|
||||
|
||||
// Only turn on VAD if we have a CN payload type that matches the
|
||||
// clockrate for the codec we are going to use.
|
||||
if (it->clockrate == send_codec.plfreq) {
|
||||
LOG(LS_INFO) << "Enabling VAD";
|
||||
if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
|
||||
LOG_RTCERR2(SetVADStatus, channel, true);
|
||||
return false;
|
||||
}
|
||||
// Only turn on VAD if we have a CN payload type that matches the
|
||||
// clockrate for the codec we are going to use.
|
||||
if (it->clockrate == send_codec.plfreq) {
|
||||
LOG(LS_INFO) << "Enabling VAD";
|
||||
if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
|
||||
LOG_RTCERR2(SetVADStatus, channel, true);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1780,28 +1763,22 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
// Enable redundant encoding of the specified codec. Treat any
|
||||
// failure as a fatal internal error.
|
||||
LOG(LS_INFO) << "Enabling FEC";
|
||||
for (ChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
if (engine()->voe()->rtp()->SetFECStatus(iter->second.channel,
|
||||
true, it->id) == -1) {
|
||||
LOG_RTCERR3(SetFECStatus, iter->second.channel, true, it->id);
|
||||
return false;
|
||||
}
|
||||
if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
|
||||
LOG_RTCERR3(SetFECStatus, channel, true, it->id);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
send_codec = voe_codec;
|
||||
nack_enabled_ = IsNackEnabled(*it);
|
||||
SetNack(send_channels_, nack_enabled_);
|
||||
SetNack(channel, nack_enabled_);
|
||||
}
|
||||
first = false;
|
||||
// Set the codec immediately, since SetVADStatus() depends on whether
|
||||
// the current codec is mono or stereo.
|
||||
if (!SetSendCodec(send_codec))
|
||||
if (!SetSendCodec(channel, send_codec))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
SetNack(receive_channels_, nack_enabled_);
|
||||
|
||||
|
||||
// If we're being asked to set an empty list of codecs, due to a buggy client,
|
||||
// choose the most common format: PCMU
|
||||
@ -1809,10 +1786,39 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
|
||||
AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
|
||||
engine()->FindWebRtcCodec(codec, &send_codec);
|
||||
if (!SetSendCodec(send_codec))
|
||||
if (!SetSendCodec(channel, send_codec))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Always update the |send_codec_| to the currently set send codec.
|
||||
send_codec_.reset(new webrtc::CodecInst(send_codec));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
||||
const std::vector<AudioCodec>& codecs) {
|
||||
dtmf_allowed_ = false;
|
||||
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
|
||||
it != codecs.end(); ++it) {
|
||||
// Find the DTMF telephone event "codec".
|
||||
if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
|
||||
_stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
|
||||
dtmf_allowed_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Cache the codecs in order to configure the channel created later.
|
||||
send_codecs_ = codecs;
|
||||
for (ChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
if (!SetSendCodecs(iter->second.channel, codecs)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
SetNack(receive_channels_, nack_enabled_);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1820,17 +1826,16 @@ void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
|
||||
bool nack_enabled) {
|
||||
for (ChannelMap::const_iterator it = channels.begin();
|
||||
it != channels.end(); ++it) {
|
||||
SetNack(it->first, it->second.channel, nack_enabled_);
|
||||
SetNack(it->second.channel, nack_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
void WebRtcVoiceMediaChannel::SetNack(uint32 ssrc, int channel,
|
||||
bool nack_enabled) {
|
||||
void WebRtcVoiceMediaChannel::SetNack(int channel, bool nack_enabled) {
|
||||
if (nack_enabled) {
|
||||
LOG(LS_INFO) << "Enabling NACK for stream " << ssrc;
|
||||
LOG(LS_INFO) << "Enabling NACK for channel " << channel;
|
||||
engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets);
|
||||
} else {
|
||||
LOG(LS_INFO) << "Disabling NACK for stream " << ssrc;
|
||||
LOG(LS_INFO) << "Disabling NACK for channel " << channel;
|
||||
engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
|
||||
}
|
||||
}
|
||||
@ -1845,10 +1850,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodec(
|
||||
return false;
|
||||
}
|
||||
|
||||
// All SetSendCodec calls were successful. Update the global state
|
||||
// accordingly.
|
||||
send_codec_.reset(new webrtc::CodecInst(send_codec));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2098,8 +2099,8 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set the current codec to be used for the new channel.
|
||||
if (send_codec_ && !SetSendCodec(channel, *send_codec_))
|
||||
// Set the current codecs to be used for the new channel.
|
||||
if (!send_codecs_.empty() && !SetSendCodecs(channel, send_codecs_))
|
||||
return false;
|
||||
|
||||
return ChangeSend(channel, desired_send_);
|
||||
@ -2223,7 +2224,7 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
|
||||
SetPlayout(voe_channel(), false);
|
||||
}
|
||||
}
|
||||
SetNack(ssrc, channel, nack_enabled_);
|
||||
SetNack(channel, nack_enabled_);
|
||||
|
||||
receive_channels_.insert(
|
||||
std::make_pair(ssrc, WebRtcVoiceChannelInfo(channel, NULL)));
|
||||
@ -2547,7 +2548,24 @@ bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
|
||||
|
||||
// Send the event.
|
||||
if (flags & cricket::DF_SEND) {
|
||||
int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
|
||||
int channel = -1;
|
||||
if (ssrc == 0) {
|
||||
bool default_channel_is_inuse = false;
|
||||
for (ChannelMap::const_iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
if (IsDefaultChannel(iter->second.channel)) {
|
||||
default_channel_is_inuse = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (default_channel_is_inuse) {
|
||||
channel = voe_channel();
|
||||
} else if (!send_channels_.empty()) {
|
||||
channel = send_channels_.begin()->second.channel;
|
||||
}
|
||||
} else {
|
||||
channel = GetSendChannelNum(ssrc);
|
||||
}
|
||||
if (channel == -1) {
|
||||
LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
|
||||
<< ssrc << " is not in use.";
|
||||
|
@ -377,7 +377,7 @@ class WebRtcVoiceMediaChannel
|
||||
struct WebRtcVoiceChannelInfo;
|
||||
typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
|
||||
|
||||
void SetNack(uint32 ssrc, int channel, bool nack_enabled);
|
||||
void SetNack(int channel, bool nack_enabled);
|
||||
void SetNack(const ChannelMap& channels, bool nack_enabled);
|
||||
bool SetSendCodec(const webrtc::CodecInst& send_codec);
|
||||
bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
|
||||
@ -392,10 +392,12 @@ class WebRtcVoiceMediaChannel
|
||||
bool IsDefaultChannel(int channel_id) const {
|
||||
return channel_id == voe_channel();
|
||||
}
|
||||
bool SetSendCodecs(int channel, const std::vector<AudioCodec>& codecs);
|
||||
|
||||
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
|
||||
std::set<int> ringback_channels_; // channels playing ringback
|
||||
std::vector<AudioCodec> recv_codecs_;
|
||||
std::vector<AudioCodec> send_codecs_;
|
||||
talk_base::scoped_ptr<webrtc::CodecInst> send_codec_;
|
||||
AudioOptions options_;
|
||||
bool dtmf_allowed_;
|
||||
|
@ -143,7 +143,18 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
|
||||
engine_.Terminate();
|
||||
}
|
||||
|
||||
void TestInsertDtmf(uint32 ssrc, int channel_id) {
|
||||
void TestInsertDtmf(uint32 ssrc, bool caller) {
|
||||
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
|
||||
channel_ = engine_.CreateChannel();
|
||||
EXPECT_TRUE(channel_ != NULL);
|
||||
if (caller) {
|
||||
// if this is a caller, local description will be applied and add the
|
||||
// send stream.
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrc1)));
|
||||
}
|
||||
int channel_id = voe_.GetLastChannel();
|
||||
|
||||
// Test we can only InsertDtmf when the other side supports telephone-event.
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
codecs.push_back(kPcmuCodec);
|
||||
@ -154,6 +165,14 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
|
||||
codecs.push_back(kTelephoneEventCodec);
|
||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||
EXPECT_TRUE(channel_->CanInsertDtmf());
|
||||
|
||||
if (!caller) {
|
||||
// There's no active send channel yet.
|
||||
EXPECT_FALSE(channel_->InsertDtmf(ssrc, 2, 123, cricket::DF_SEND));
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrc1)));
|
||||
}
|
||||
|
||||
// Check we fail if the ssrc is invalid.
|
||||
EXPECT_FALSE(channel_->InsertDtmf(-1, 1, 111, cricket::DF_SEND));
|
||||
|
||||
@ -923,8 +942,8 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusMaxAverageBitrate) {
|
||||
EXPECT_EQ(200000, gcodec.rate);
|
||||
}
|
||||
|
||||
// Test that we can enable NACK with opus.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNack) {
|
||||
// Test that we can enable NACK with opus as caller.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCaller) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
@ -936,6 +955,26 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNack) {
|
||||
EXPECT_TRUE(voe_.GetNACK(channel_num));
|
||||
}
|
||||
|
||||
// Test that we can enable NACK with opus as callee.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCallee) {
|
||||
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
|
||||
channel_ = engine_.CreateChannel();
|
||||
EXPECT_TRUE(channel_ != NULL);
|
||||
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
codecs.push_back(kOpusCodec);
|
||||
codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
|
||||
cricket::kParamValueEmpty));
|
||||
EXPECT_FALSE(voe_.GetNACK(channel_num));
|
||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||
EXPECT_FALSE(voe_.GetNACK(channel_num));
|
||||
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrc1)));
|
||||
EXPECT_TRUE(voe_.GetNACK(channel_num));
|
||||
}
|
||||
|
||||
// Test that we can enable NACK on receive streams.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
@ -1136,8 +1175,8 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
|
||||
EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we set VAD and DTMF types correctly.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMF) {
|
||||
// Test that we set VAD and DTMF types correctly as caller.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
@ -1163,6 +1202,39 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMF) {
|
||||
EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we set VAD and DTMF types correctly as callee.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
|
||||
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
|
||||
channel_ = engine_.CreateChannel();
|
||||
EXPECT_TRUE(channel_ != NULL);
|
||||
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
codecs.push_back(kIsacCodec);
|
||||
codecs.push_back(kPcmuCodec);
|
||||
// TODO(juberti): cn 32000
|
||||
codecs.push_back(kCn16000Codec);
|
||||
codecs.push_back(kCn8000Codec);
|
||||
codecs.push_back(kTelephoneEventCodec);
|
||||
codecs.push_back(kRedCodec);
|
||||
codecs[0].id = 96;
|
||||
codecs[2].id = 97; // wideband CN
|
||||
codecs[4].id = 98; // DTMF
|
||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrc1)));
|
||||
|
||||
webrtc::CodecInst gcodec;
|
||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||
EXPECT_EQ(96, gcodec.pltype);
|
||||
EXPECT_STREQ("ISAC", gcodec.plname);
|
||||
EXPECT_TRUE(voe_.GetVAD(channel_num));
|
||||
EXPECT_FALSE(voe_.GetFEC(channel_num));
|
||||
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
|
||||
EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
|
||||
EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we only apply VAD if we have a CN codec that matches the
|
||||
// send codec clockrate.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
|
||||
@ -1227,8 +1299,8 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
|
||||
EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we set up FEC correctly.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsRED) {
|
||||
// Test that we set up FEC correctly as caller.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsREDAsCaller) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
@ -1247,6 +1319,31 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsRED) {
|
||||
EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we set up FEC correctly as callee.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsREDAsCallee) {
|
||||
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
|
||||
channel_ = engine_.CreateChannel();
|
||||
EXPECT_TRUE(channel_ != NULL);
|
||||
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
std::vector<cricket::AudioCodec> codecs;
|
||||
codecs.push_back(kRedCodec);
|
||||
codecs.push_back(kIsacCodec);
|
||||
codecs.push_back(kPcmuCodec);
|
||||
codecs[0].id = 127;
|
||||
codecs[0].params[""] = "96/96";
|
||||
codecs[1].id = 96;
|
||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrc1)));
|
||||
webrtc::CodecInst gcodec;
|
||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||
EXPECT_EQ(96, gcodec.pltype);
|
||||
EXPECT_STREQ("ISAC", gcodec.plname);
|
||||
EXPECT_TRUE(voe_.GetFEC(channel_num));
|
||||
EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
|
||||
}
|
||||
|
||||
// Test that we set up FEC correctly if params are omitted.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsREDNoParams) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
@ -1947,18 +2044,24 @@ TEST_F(WebRtcVoiceEngineTestFake, StreamCleanup) {
|
||||
EXPECT_EQ(0, voe_.GetNumChannels());
|
||||
}
|
||||
|
||||
// Test the InsertDtmf on default send stream.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStream) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
TestInsertDtmf(0, channel_num);
|
||||
// Test the InsertDtmf on default send stream as caller.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCaller) {
|
||||
TestInsertDtmf(0, true);
|
||||
}
|
||||
|
||||
// Test the InsertDtmf on specified send stream.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStream) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
int channel_num = voe_.GetLastChannel();
|
||||
TestInsertDtmf(kSsrc1, channel_num);
|
||||
// Test the InsertDtmf on default send stream as callee
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCallee) {
|
||||
TestInsertDtmf(0, false);
|
||||
}
|
||||
|
||||
// Test the InsertDtmf on specified send stream as caller.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCaller) {
|
||||
TestInsertDtmf(kSsrc1, true);
|
||||
}
|
||||
|
||||
// Test the InsertDtmf on specified send stream as callee.
|
||||
TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCallee) {
|
||||
TestInsertDtmf(kSsrc1, false);
|
||||
}
|
||||
|
||||
// Test that we can play a ringback tone properly in a single-stream call.
|
||||
|
@ -1266,6 +1266,7 @@ TEST_F(P2PTransportChannelTest, TestTcpConnectionsFromActiveToPassive) {
|
||||
DestroyChannels();
|
||||
}
|
||||
|
||||
|
||||
// Test what happens when we have 2 users behind the same NAT. This can lead
|
||||
// to interesting behavior because the STUN server will only give out the
|
||||
// address of the outermost NAT.
|
||||
@ -1503,3 +1504,4 @@ TEST_F(P2PTransportChannelTest, TestIceConfigWillPassDownToPort) {
|
||||
|
||||
TestSendRecv(1);
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,8 @@ enum {
|
||||
MSG_SETRENDERER,
|
||||
MSG_ADDRECVSTREAM,
|
||||
MSG_REMOVERECVSTREAM,
|
||||
MSG_ADDSENDSTREAM,
|
||||
MSG_REMOVESENDSTREAM,
|
||||
MSG_SETRINGBACKTONE,
|
||||
MSG_PLAYRINGBACKTONE,
|
||||
MSG_SETMAXSENDBANDWIDTH,
|
||||
@ -74,7 +76,7 @@ enum {
|
||||
MSG_DATARECEIVED,
|
||||
MSG_SETCAPTURER,
|
||||
MSG_ISSCREENCASTING,
|
||||
MSG_SCREENCASTFPS,
|
||||
MSG_GETSCREENCASTDETAILS,
|
||||
MSG_SETSCREENCASTFACTORY,
|
||||
MSG_FIRSTPACKETRECEIVED,
|
||||
MSG_SESSION_ERROR,
|
||||
@ -334,12 +336,14 @@ struct IsScreencastingMessageData : public talk_base::MessageData {
|
||||
bool result;
|
||||
};
|
||||
|
||||
struct ScreencastFpsMessageData : public talk_base::MessageData {
|
||||
explicit ScreencastFpsMessageData(uint32 s)
|
||||
: ssrc(s), result(0) {
|
||||
struct VideoChannel::ScreencastDetailsMessageData :
|
||||
public talk_base::MessageData {
|
||||
explicit ScreencastDetailsMessageData(uint32 s)
|
||||
: ssrc(s), fps(0), screencast_max_pixels(0) {
|
||||
}
|
||||
uint32 ssrc;
|
||||
int result;
|
||||
int fps;
|
||||
int screencast_max_pixels;
|
||||
};
|
||||
|
||||
struct SetScreenCaptureFactoryMessageData : public talk_base::MessageData {
|
||||
@ -480,6 +484,18 @@ bool BaseChannel::RemoveRecvStream(uint32 ssrc) {
|
||||
return data.result;
|
||||
}
|
||||
|
||||
bool BaseChannel::AddSendStream(const StreamParams& sp) {
|
||||
StreamMessageData data(sp);
|
||||
Send(MSG_ADDSENDSTREAM, &data);
|
||||
return data.result;
|
||||
}
|
||||
|
||||
bool BaseChannel::RemoveSendStream(uint32 ssrc) {
|
||||
SsrcMessageData data(ssrc);
|
||||
Send(MSG_REMOVESENDSTREAM, &data);
|
||||
return data.result;
|
||||
}
|
||||
|
||||
bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
|
||||
ContentAction action) {
|
||||
SetContentData data(content, action);
|
||||
@ -1149,6 +1165,16 @@ bool BaseChannel::RemoveRecvStream_w(uint32 ssrc) {
|
||||
return media_channel()->RemoveRecvStream(ssrc);
|
||||
}
|
||||
|
||||
bool BaseChannel::AddSendStream_w(const StreamParams& sp) {
|
||||
ASSERT(worker_thread() == talk_base::Thread::Current());
|
||||
return media_channel()->AddSendStream(sp);
|
||||
}
|
||||
|
||||
bool BaseChannel::RemoveSendStream_w(uint32 ssrc) {
|
||||
ASSERT(worker_thread() == talk_base::Thread::Current());
|
||||
return media_channel()->RemoveSendStream(ssrc);
|
||||
}
|
||||
|
||||
bool BaseChannel::UpdateLocalStreams_w(const std::vector<StreamParams>& streams,
|
||||
ContentAction action) {
|
||||
if (!VERIFY(action == CA_OFFER || action == CA_ANSWER ||
|
||||
@ -1359,6 +1385,16 @@ void BaseChannel::OnMessage(talk_base::Message *pmsg) {
|
||||
data->result = RemoveRecvStream_w(data->ssrc);
|
||||
break;
|
||||
}
|
||||
case MSG_ADDSENDSTREAM: {
|
||||
StreamMessageData* data = static_cast<StreamMessageData*>(pmsg->pdata);
|
||||
data->result = AddSendStream_w(data->sp);
|
||||
break;
|
||||
}
|
||||
case MSG_REMOVESENDSTREAM: {
|
||||
SsrcMessageData* data = static_cast<SsrcMessageData*>(pmsg->pdata);
|
||||
data->result = RemoveSendStream_w(data->ssrc);
|
||||
break;
|
||||
}
|
||||
case MSG_SETMAXSENDBANDWIDTH: {
|
||||
SetBandwidthData* data = static_cast<SetBandwidthData*>(pmsg->pdata);
|
||||
data->result = SetMaxSendBandwidth_w(data->value);
|
||||
@ -1964,10 +2000,16 @@ bool VideoChannel::IsScreencasting() {
|
||||
return data.result;
|
||||
}
|
||||
|
||||
int VideoChannel::ScreencastFps(uint32 ssrc) {
|
||||
ScreencastFpsMessageData data(ssrc);
|
||||
Send(MSG_SCREENCASTFPS, &data);
|
||||
return data.result;
|
||||
int VideoChannel::GetScreencastFps(uint32 ssrc) {
|
||||
ScreencastDetailsMessageData data(ssrc);
|
||||
Send(MSG_GETSCREENCASTDETAILS, &data);
|
||||
return data.fps;
|
||||
}
|
||||
|
||||
int VideoChannel::GetScreencastMaxPixels(uint32 ssrc) {
|
||||
ScreencastDetailsMessageData data(ssrc);
|
||||
Send(MSG_GETSCREENCASTDETAILS, &data);
|
||||
return data.screencast_max_pixels;
|
||||
}
|
||||
|
||||
bool VideoChannel::SendIntraFrame() {
|
||||
@ -2184,14 +2226,16 @@ bool VideoChannel::IsScreencasting_w() const {
|
||||
return !screencast_capturers_.empty();
|
||||
}
|
||||
|
||||
int VideoChannel::ScreencastFps_w(uint32 ssrc) const {
|
||||
ScreencastMap::const_iterator iter = screencast_capturers_.find(ssrc);
|
||||
void VideoChannel::ScreencastDetails_w(
|
||||
ScreencastDetailsMessageData* data) const {
|
||||
ScreencastMap::const_iterator iter = screencast_capturers_.find(data->ssrc);
|
||||
if (iter == screencast_capturers_.end()) {
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
VideoCapturer* capturer = iter->second;
|
||||
const VideoFormat* video_format = capturer->GetCaptureFormat();
|
||||
return VideoFormat::IntervalToFps(video_format->interval);
|
||||
data->fps = VideoFormat::IntervalToFps(video_format->interval);
|
||||
data->screencast_max_pixels = capturer->screencast_max_pixels();
|
||||
}
|
||||
|
||||
void VideoChannel::SetScreenCaptureFactory_w(
|
||||
@ -2262,10 +2306,10 @@ void VideoChannel::OnMessage(talk_base::Message *pmsg) {
|
||||
data->result = IsScreencasting_w();
|
||||
break;
|
||||
}
|
||||
case MSG_SCREENCASTFPS: {
|
||||
ScreencastFpsMessageData* data =
|
||||
static_cast<ScreencastFpsMessageData*>(pmsg->pdata);
|
||||
data->result = ScreencastFps_w(data->ssrc);
|
||||
case MSG_GETSCREENCASTDETAILS: {
|
||||
ScreencastDetailsMessageData* data =
|
||||
static_cast<ScreencastDetailsMessageData*>(pmsg->pdata);
|
||||
ScreencastDetails_w(data);
|
||||
break;
|
||||
}
|
||||
case MSG_SENDINTRAFRAME: {
|
||||
|
@ -119,6 +119,8 @@ class BaseChannel
|
||||
// Multiplexing
|
||||
bool AddRecvStream(const StreamParams& sp);
|
||||
bool RemoveRecvStream(uint32 ssrc);
|
||||
bool AddSendStream(const StreamParams& sp);
|
||||
bool RemoveSendStream(uint32 ssrc);
|
||||
|
||||
// Monitoring
|
||||
void StartConnectionMonitor(int cms);
|
||||
@ -277,6 +279,8 @@ class BaseChannel
|
||||
void ChannelNotWritable_w();
|
||||
bool AddRecvStream_w(const StreamParams& sp);
|
||||
bool RemoveRecvStream_w(uint32 ssrc);
|
||||
bool AddSendStream_w(const StreamParams& sp);
|
||||
bool RemoveSendStream_w(uint32 ssrc);
|
||||
virtual bool ShouldSetupDtlsSrtp() const;
|
||||
// Do the DTLS key expansion and impose it on the SRTP/SRTCP filters.
|
||||
// |rtcp_channel| indicates whether to set up the RTP or RTCP filter.
|
||||
@ -488,13 +492,13 @@ class VideoChannel : public BaseChannel {
|
||||
// TODO(pthatcher): Refactor to use a "capture id" instead of an
|
||||
// ssrc here as the "key".
|
||||
VideoCapturer* AddScreencast(uint32 ssrc, const ScreencastId& id);
|
||||
VideoCapturer* GetScreencastCapturer(uint32 ssrc);
|
||||
bool SetCapturer(uint32 ssrc, VideoCapturer* capturer);
|
||||
bool RemoveScreencast(uint32 ssrc);
|
||||
// True if we've added a screencast. Doesn't matter if the capturer
|
||||
// has been started or not.
|
||||
bool IsScreencasting();
|
||||
int ScreencastFps(uint32 ssrc);
|
||||
int GetScreencastFps(uint32 ssrc);
|
||||
int GetScreencastMaxPixels(uint32 ssrc);
|
||||
// Get statistics about the current media session.
|
||||
bool GetStats(VideoMediaInfo* stats);
|
||||
|
||||
@ -525,6 +529,7 @@ class VideoChannel : public BaseChannel {
|
||||
|
||||
private:
|
||||
typedef std::map<uint32, VideoCapturer*> ScreencastMap;
|
||||
struct ScreencastDetailsMessageData;
|
||||
|
||||
// overrides from BaseChannel
|
||||
virtual void ChangeState();
|
||||
@ -544,12 +549,11 @@ class VideoChannel : public BaseChannel {
|
||||
void SetRenderer_w(uint32 ssrc, VideoRenderer* renderer);
|
||||
|
||||
VideoCapturer* AddScreencast_w(uint32 ssrc, const ScreencastId& id);
|
||||
VideoCapturer* GetScreencastCapturer_w(uint32 ssrc);
|
||||
bool SetCapturer_w(uint32 ssrc, VideoCapturer* capturer);
|
||||
bool RemoveScreencast_w(uint32 ssrc);
|
||||
void OnScreencastWindowEvent_s(uint32 ssrc, talk_base::WindowEvent we);
|
||||
bool IsScreencasting_w() const;
|
||||
int ScreencastFps_w(uint32 ssrc) const;
|
||||
void ScreencastDetails_w(ScreencastDetailsMessageData* d) const;
|
||||
void SetScreenCaptureFactory_w(
|
||||
ScreenCapturerFactory* screencapture_factory);
|
||||
bool GetStats_w(VideoMediaInfo* stats);
|
||||
@ -590,11 +594,6 @@ class DataChannel : public BaseChannel {
|
||||
~DataChannel();
|
||||
bool Init();
|
||||
|
||||
// downcasts a MediaChannel
|
||||
virtual DataMediaChannel* media_channel() const {
|
||||
return static_cast<DataMediaChannel*>(BaseChannel::media_channel());
|
||||
}
|
||||
|
||||
virtual bool SendData(const SendDataParams& params,
|
||||
const talk_base::Buffer& payload,
|
||||
SendDataResult* result);
|
||||
@ -616,6 +615,12 @@ class DataChannel : public BaseChannel {
|
||||
// both local and remote descriptions are set, and the channel is unblocked.
|
||||
sigslot::signal1<bool> SignalReadyToSendData;
|
||||
|
||||
protected:
|
||||
// downcasts a MediaChannel.
|
||||
virtual DataMediaChannel* media_channel() const {
|
||||
return static_cast<DataMediaChannel*>(BaseChannel::media_channel());
|
||||
}
|
||||
|
||||
private:
|
||||
struct SendDataMessageData : public talk_base::MessageData {
|
||||
SendDataMessageData(const SendDataParams& params,
|
||||
|
Loading…
x
Reference in New Issue
Block a user