Implement conference-mode temporal-layer screencast.
Renames VideoStream::temporal_layers to temporal_layer_thresholds_bps to convey that it contains thresholds needed to ramp up between them (1 threshold -> 2 temporal layers, etc.). R=mflodman@webrtc.org, stefan@webrtc.org BUG=1788,1667 Review URL: https://webrtc-codereview.appspot.com/23269004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7578 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
3bf3d238c8
commit
b7ed7799e7
@ -118,6 +118,8 @@ static const int kDefaultQpMax = 56;
|
||||
|
||||
static const int kDefaultRtcpReceiverReportSsrc = 1;
|
||||
|
||||
static const int kConferenceModeTemporalLayerBitrateBps = 100000;
|
||||
|
||||
// External video encoders are given payloads 120-127. This also means that we
|
||||
// only support up to 8 external payload types.
|
||||
static const int kExternalVideoPayloadTypeBase = 120;
|
||||
@ -1740,6 +1742,14 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetDimensions(
|
||||
encoder_config.streams = encoder_factory_->CreateVideoStreams(
|
||||
codec, parameters_.options, parameters_.config.rtp.ssrcs.size());
|
||||
|
||||
// Conference mode screencast uses 2 temporal layers split at 100kbit.
|
||||
if (parameters_.options.conference_mode.GetWithDefaultIfUnset(false) &&
|
||||
is_screencast && encoder_config.streams.size() == 1) {
|
||||
encoder_config.streams[0].temporal_layer_thresholds_bps.clear();
|
||||
encoder_config.streams[0].temporal_layer_thresholds_bps.push_back(
|
||||
kConferenceModeTemporalLayerBitrateBps);
|
||||
}
|
||||
|
||||
bool stream_reconfigured = stream_->ReconfigureVideoEncoder(encoder_config);
|
||||
|
||||
encoder_factory_->DestroyVideoEncoderSettings(
|
||||
|
@ -1273,6 +1273,43 @@ TEST_F(WebRtcVideoChannel2Test, UsesCorrectSettingsForScreencast) {
|
||||
|
||||
EXPECT_EQ(capture_format_hd.width, encoder_config.streams.front().width);
|
||||
EXPECT_EQ(capture_format_hd.height, encoder_config.streams.front().height);
|
||||
EXPECT_TRUE(encoder_config.streams[0].temporal_layer_thresholds_bps.empty());
|
||||
|
||||
EXPECT_TRUE(channel_->SetCapturer(last_ssrc_, NULL));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannel2Test,
|
||||
ConferenceModeScreencastConfiguresTemporalLayer) {
|
||||
static const int kConferenceScreencastTemporalBitrateBps = 100000;
|
||||
VideoOptions options;
|
||||
options.conference_mode.Set(true);
|
||||
channel_->SetOptions(options);
|
||||
|
||||
AddSendStream();
|
||||
|
||||
cricket::FakeVideoCapturer capturer;
|
||||
capturer.SetScreencast(true);
|
||||
EXPECT_TRUE(channel_->SetCapturer(last_ssrc_, &capturer));
|
||||
cricket::VideoFormat capture_format_hd =
|
||||
capturer.GetSupportedFormats()->front();
|
||||
EXPECT_EQ(cricket::CS_RUNNING, capturer.Start(capture_format_hd));
|
||||
|
||||
EXPECT_TRUE(channel_->SetSend(true));
|
||||
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
|
||||
FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
|
||||
|
||||
webrtc::VideoEncoderConfig encoder_config = send_stream->GetEncoderConfig();
|
||||
|
||||
// Verify screencast settings.
|
||||
encoder_config = send_stream->GetEncoderConfig();
|
||||
EXPECT_EQ(webrtc::VideoEncoderConfig::kScreenshare,
|
||||
encoder_config.content_type);
|
||||
ASSERT_EQ(1u, encoder_config.streams.size());
|
||||
ASSERT_EQ(1u, encoder_config.streams[0].temporal_layer_thresholds_bps.size());
|
||||
EXPECT_EQ(kConferenceScreencastTemporalBitrateBps,
|
||||
encoder_config.streams[0].temporal_layer_thresholds_bps[0]);
|
||||
|
||||
EXPECT_TRUE(channel_->SetCapturer(last_ssrc_, NULL));
|
||||
}
|
||||
|
@ -39,10 +39,10 @@ std::string VideoStream::ToString() const {
|
||||
ss << ", max_bitrate_bps:" << max_bitrate_bps;
|
||||
ss << ", max_qp: " << max_qp;
|
||||
|
||||
ss << ", temporal_layers: {";
|
||||
for (size_t i = 0; i < temporal_layers.size(); ++i) {
|
||||
ss << temporal_layers[i];
|
||||
if (i != temporal_layers.size() - 1)
|
||||
ss << ", temporal_layer_thresholds_bps: {";
|
||||
for (size_t i = 0; i < temporal_layer_thresholds_bps.size(); ++i) {
|
||||
ss << temporal_layer_thresholds_bps[i];
|
||||
if (i != temporal_layer_thresholds_bps.size() - 1)
|
||||
ss << "}, {";
|
||||
}
|
||||
ss << '}';
|
||||
|
@ -104,8 +104,17 @@ struct VideoStream {
|
||||
|
||||
int max_qp;
|
||||
|
||||
// Bitrate thresholds for enabling additional temporal layers.
|
||||
std::vector<int> temporal_layers;
|
||||
// Bitrate thresholds for enabling additional temporal layers. Since these are
|
||||
// thresholds in between layers, we have one additional layer. One threshold
|
||||
// gives two temporal layers, one below the threshold and one above, two give
|
||||
// three, and so on.
|
||||
// The VideoEncoder may redistribute bitrates over the temporal layers so a
|
||||
// bitrate threshold of 100k and an estimate of 105k does not imply that we
|
||||
// get 100k in one temporal layer and 5k in the other, just that the bitrate
|
||||
// in the first temporal layer should not exceed 100k.
|
||||
// TODO(pbos): Apart from a special case for two-layer screencast these
|
||||
// thresholds are not propagated to the VideoEncoder. To be implemented.
|
||||
std::vector<int> temporal_layer_thresholds_bps;
|
||||
};
|
||||
|
||||
struct VideoEncoderConfig {
|
||||
|
@ -306,12 +306,18 @@ bool VideoSendStream::ReconfigureVideoEncoder(
|
||||
} else {
|
||||
video_codec.codecType = kVideoCodecGeneric;
|
||||
}
|
||||
|
||||
switch (config.content_type) {
|
||||
case VideoEncoderConfig::kRealtimeVideo:
|
||||
video_codec.mode = kRealtimeVideo;
|
||||
break;
|
||||
case VideoEncoderConfig::kScreenshare:
|
||||
video_codec.mode = kScreensharing;
|
||||
if (config.streams.size() == 1 &&
|
||||
config.streams[0].temporal_layer_thresholds_bps.size() == 1) {
|
||||
video_codec.targetBitrate =
|
||||
config.streams[0].temporal_layer_thresholds_bps[0] / 1000;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -327,7 +333,8 @@ bool VideoSendStream::ReconfigureVideoEncoder(
|
||||
config.encoder_specific_settings);
|
||||
}
|
||||
video_codec.codecSpecific.VP8.numberOfTemporalLayers =
|
||||
static_cast<unsigned char>(streams.back().temporal_layers.size());
|
||||
static_cast<unsigned char>(
|
||||
streams.back().temporal_layer_thresholds_bps.size() + 1);
|
||||
} else {
|
||||
// TODO(pbos): Support encoder_settings codec-agnostically.
|
||||
assert(config.encoder_specific_settings == NULL);
|
||||
@ -360,8 +367,8 @@ bool VideoSendStream::ReconfigureVideoEncoder(
|
||||
sim_stream->targetBitrate = streams[i].target_bitrate_bps / 1000;
|
||||
sim_stream->maxBitrate = streams[i].max_bitrate_bps / 1000;
|
||||
sim_stream->qpMax = streams[i].max_qp;
|
||||
sim_stream->numberOfTemporalLayers =
|
||||
static_cast<unsigned char>(streams[i].temporal_layers.size());
|
||||
sim_stream->numberOfTemporalLayers = static_cast<unsigned char>(
|
||||
streams[i].temporal_layer_thresholds_bps.size() + 1);
|
||||
|
||||
video_codec.width = std::max(video_codec.width,
|
||||
static_cast<unsigned short>(streams[i].width));
|
||||
|
@ -201,7 +201,7 @@ TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
|
||||
|
||||
virtual void PerformTest() OVERRIDE {
|
||||
EXPECT_EQ(kEventSignaled, Wait())
|
||||
<< "Timed out while waiting single RTP packet.";
|
||||
<< "Timed out while waiting for a single RTP packet.";
|
||||
}
|
||||
|
||||
class DelayedEncoder : public test::FakeEncoder {
|
||||
@ -1440,8 +1440,8 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
|
||||
send_config->encoder_settings.payload_name = "VP8";
|
||||
|
||||
for (size_t i = 0; i < encoder_config->streams.size(); ++i) {
|
||||
encoder_config->streams[i].temporal_layers.resize(
|
||||
kNumberOfTemporalLayers);
|
||||
encoder_config->streams[i].temporal_layer_thresholds_bps.resize(
|
||||
kNumberOfTemporalLayers - 1);
|
||||
}
|
||||
|
||||
encoder_config->encoder_specific_settings = &vp8_settings_;
|
||||
@ -1550,4 +1550,44 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
|
||||
static const int kScreencastTargetBitrateKbps = 200;
|
||||
class ScreencastTargetBitrateTest : public test::SendTest,
|
||||
public test::FakeEncoder {
|
||||
public:
|
||||
ScreencastTargetBitrateTest()
|
||||
: SendTest(kDefaultTimeoutMs),
|
||||
test::FakeEncoder(Clock::GetRealTimeClock()) {}
|
||||
|
||||
private:
|
||||
virtual int32_t InitEncode(const VideoCodec* config,
|
||||
int32_t number_of_cores,
|
||||
uint32_t max_payload_size) {
|
||||
EXPECT_EQ(static_cast<unsigned int>(kScreencastTargetBitrateKbps),
|
||||
config->targetBitrate);
|
||||
observation_complete_->Set();
|
||||
return test::FakeEncoder::InitEncode(
|
||||
config, number_of_cores, max_payload_size);
|
||||
}
|
||||
virtual void ModifyConfigs(
|
||||
VideoSendStream::Config* send_config,
|
||||
std::vector<VideoReceiveStream::Config>* receive_configs,
|
||||
VideoEncoderConfig* encoder_config) OVERRIDE {
|
||||
send_config->encoder_settings.encoder = this;
|
||||
EXPECT_EQ(1u, encoder_config->streams.size());
|
||||
EXPECT_TRUE(
|
||||
encoder_config->streams[0].temporal_layer_thresholds_bps.empty());
|
||||
encoder_config->streams[0].temporal_layer_thresholds_bps.push_back(
|
||||
kScreencastTargetBitrateKbps * 1000);
|
||||
encoder_config->content_type = VideoEncoderConfig::kScreenshare;
|
||||
}
|
||||
|
||||
virtual void PerformTest() OVERRIDE {
|
||||
EXPECT_EQ(kEventSignaled, Wait())
|
||||
<< "Timed out while waiting for the encoder to be initialized.";
|
||||
}
|
||||
} test;
|
||||
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
Loading…
x
Reference in New Issue
Block a user