Merge VP8 changes.

R=stefan@webrtc.org
BUG=

Review URL: https://webrtc-codereview.appspot.com/35389004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7841 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
pbos@webrtc.org 2014-12-09 10:36:40 +00:00
parent e04a93bcf5
commit 9115cde6c9
15 changed files with 3482 additions and 394 deletions

View File

@ -241,6 +241,10 @@
'video_coding/codecs/test/videoprocessor_unittest.cc',
'video_coding/codecs/vp8/default_temporal_layers_unittest.cc',
'video_coding/codecs/vp8/reference_picture_selection_unittest.cc',
'video_coding/codecs/vp8/screenshare_layers_unittest.cc',
'video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc',
'video_coding/codecs/vp8/simulcast_unittest.cc',
'video_coding/codecs/vp8/simulcast_unittest.h',
'video_coding/main/interface/mock/mock_vcm_callbacks.h',
'video_coding/main/source/decoding_state_unittest.cc',
'video_coding/main/source/jitter_buffer_unittest.cc',

View File

@ -125,13 +125,18 @@ source_set("webrtc_vp8") {
sources = [
"codecs/vp8/default_temporal_layers.cc",
"codecs/vp8/default_temporal_layers.h",
"codecs/vp8/include/vp8.h",
"codecs/vp8/include/vp8_common_types.h",
"codecs/vp8/realtime_temporal_layers.cc",
"codecs/vp8/reference_picture_selection.cc",
"codecs/vp8/reference_picture_selection.h",
"codecs/vp8/include/vp8.h",
"codecs/vp8/include/vp8_common_types.h",
"codecs/vp8/screenshare_layers.cc",
"codecs/vp8/screenshare_layers.h",
"codecs/vp8/simulcast_encoder_adapter.cc",
"codecs/vp8/simulcast_encoder_adapter.h",
"codecs/vp8/temporal_layers.h",
"codecs/vp8/vp8_factory.cc",
"codecs/vp8/vp8_factory.h",
"codecs/vp8/vp8_impl.cc",
"codecs/vp8/vp8_impl.h",
]
@ -159,6 +164,12 @@ source_set("webrtc_vp8") {
"//third_party/libvpx",
]
}
if (rtc_build_libyuv) {
deps += [ "//third_party/libyuv" ]
} else {
# Need to add a directory normally exported by libyuv.
include_dirs += [ "//third_party/libyuv/include" ]
}
}
source_set("webrtc_vp9") {

View File

@ -0,0 +1,159 @@
/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include <stdlib.h>
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
namespace webrtc {
enum { kOneSecond90Khz = 90000 };
ScreenshareLayers::ScreenshareLayers(int num_temporal_layers,
uint8_t initial_tl0_pic_idx,
FrameDropper* tl0_frame_dropper,
FrameDropper* tl1_frame_dropper)
: tl0_frame_dropper_(tl0_frame_dropper),
tl1_frame_dropper_(tl1_frame_dropper),
number_of_temporal_layers_(num_temporal_layers),
last_base_layer_sync_(false),
tl0_pic_idx_(initial_tl0_pic_idx),
active_layer_(0),
framerate_(5),
last_sync_timestamp_(-1) {
assert(num_temporal_layers > 0);
assert(num_temporal_layers <= 2);
assert(tl0_frame_dropper && tl1_frame_dropper);
}
int ScreenshareLayers::CurrentLayerId() const {
// Codec does not use temporal layers for screenshare.
return 0;
}
int ScreenshareLayers::EncodeFlags(uint32_t timestamp) {
if (number_of_temporal_layers_ <= 1) {
// No flags needed for 1 layer screenshare.
return 0;
}
CalculateFramerate(timestamp);
int flags = 0;
// Note that ARF on purpose isn't used in this scheme since it is allocated
// for the last key frame to make key frame caching possible.
if (tl0_frame_dropper_->DropFrame()) {
// Must drop TL0, encode TL1 instead.
if (tl1_frame_dropper_->DropFrame()) {
// Must drop both TL0 and TL1.
flags = -1;
} else {
active_layer_ = 1;
if (TimeToSync(timestamp)) {
last_sync_timestamp_ = timestamp;
// Allow predicting from only TL0 to allow participants to switch to the
// high bitrate stream. This means predicting only from the LAST
// reference frame, but only updating GF to not corrupt TL0.
flags = VP8_EFLAG_NO_REF_ARF;
flags |= VP8_EFLAG_NO_REF_GF;
flags |= VP8_EFLAG_NO_UPD_ARF;
flags |= VP8_EFLAG_NO_UPD_LAST;
} else {
// Allow predicting from both TL0 and TL1.
flags = VP8_EFLAG_NO_REF_ARF;
flags |= VP8_EFLAG_NO_UPD_ARF;
flags |= VP8_EFLAG_NO_UPD_LAST;
}
}
} else {
active_layer_ = 0;
// Since this is TL0 we only allow updating and predicting from the LAST
// reference frame.
flags = VP8_EFLAG_NO_UPD_GF;
flags |= VP8_EFLAG_NO_UPD_ARF;
flags |= VP8_EFLAG_NO_REF_GF;
flags |= VP8_EFLAG_NO_REF_ARF;
}
// Make sure both frame droppers leak out bits.
tl0_frame_dropper_->Leak(framerate_);
tl1_frame_dropper_->Leak(framerate_);
return flags;
}
bool ScreenshareLayers::ConfigureBitrates(int bitrate_kbit,
int max_bitrate_kbit,
int framerate,
vpx_codec_enc_cfg_t* cfg) {
if (framerate > 0) {
framerate_ = framerate;
}
tl0_frame_dropper_->SetRates(bitrate_kbit, framerate_);
tl1_frame_dropper_->SetRates(max_bitrate_kbit, framerate_);
return true;
}
void ScreenshareLayers::FrameEncoded(unsigned int size, uint32_t timestamp) {
if (active_layer_ == 0) {
tl0_frame_dropper_->Fill(size, true);
}
tl1_frame_dropper_->Fill(size, true);
}
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
CodecSpecificInfoVP8 *vp8_info,
uint32_t timestamp) {
if (number_of_temporal_layers_ == 1) {
vp8_info->temporalIdx = kNoTemporalIdx;
vp8_info->layerSync = false;
vp8_info->tl0PicIdx = kNoTl0PicIdx;
} else {
vp8_info->temporalIdx = active_layer_;
if (base_layer_sync) {
vp8_info->temporalIdx = 0;
last_sync_timestamp_ = timestamp;
} else if (last_base_layer_sync_ && vp8_info->temporalIdx != 0) {
// Regardless of pattern the frame after a base layer sync will always
// be a layer sync.
last_sync_timestamp_ = timestamp;
}
vp8_info->layerSync = (last_sync_timestamp_ == timestamp);
if (vp8_info->temporalIdx == 0) {
tl0_pic_idx_++;
}
last_base_layer_sync_ = base_layer_sync;
vp8_info->tl0PicIdx = tl0_pic_idx_;
}
}
bool ScreenshareLayers::TimeToSync(uint32_t timestamp) const {
const uint32_t timestamp_diff = timestamp - last_sync_timestamp_;
return last_sync_timestamp_ < 0 || timestamp_diff > kOneSecond90Khz;
}
void ScreenshareLayers::CalculateFramerate(uint32_t timestamp) {
timestamp_list_.push_front(timestamp);
// Remove timestamps older than 1 second from the list.
uint32_t timestamp_diff = timestamp - timestamp_list_.back();
while (timestamp_diff > kOneSecond90Khz) {
timestamp_list_.pop_back();
timestamp_diff = timestamp - timestamp_list_.back();
}
// If we have encoded frames within the last second, that number of frames
// is a reasonable first estimate of the framerate.
framerate_ = timestamp_list_.size();
if (timestamp_diff > 0) {
// Estimate the framerate by dividing the number of timestamp diffs with
// the sum of the timestamp diffs (with rounding).
framerate_ = (kOneSecond90Khz * (timestamp_list_.size() - 1) +
timestamp_diff / 2) / timestamp_diff;
}
}
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
#include <list>
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
#include "webrtc/typedefs.h"
// libvpx forward declaration.
typedef struct vpx_codec_enc_cfg vpx_codec_enc_cfg_t;
namespace webrtc {
struct CodecSpecificInfoVP8;
class ScreenshareLayers : public TemporalLayers {
public:
ScreenshareLayers(int num_temporal_layers,
uint8_t initial_tl0_pic_idx,
FrameDropper* tl0_frame_dropper,
FrameDropper* tl1_frame_dropper);
virtual ~ScreenshareLayers() {}
// Returns the recommended VP8 encode flags needed. May refresh the decoder
// and/or update the reference buffers.
virtual int EncodeFlags(uint32_t timestamp);
virtual bool ConfigureBitrates(int bitrate_kbit,
int max_bitrate_kbit,
int framerate,
vpx_codec_enc_cfg_t* cfg);
virtual void PopulateCodecSpecific(bool base_layer_sync,
CodecSpecificInfoVP8 *vp8_info,
uint32_t timestamp);
virtual void FrameEncoded(unsigned int size, uint32_t timestamp);
virtual int CurrentLayerId() const;
private:
void CalculateFramerate(uint32_t timestamp);
bool TimeToSync(uint32_t timestamp) const;
FrameDropper* tl0_frame_dropper_;
FrameDropper* tl1_frame_dropper_;
int number_of_temporal_layers_;
bool last_base_layer_sync_;
uint8_t tl0_pic_idx_;
int active_layer_;
std::list<uint32_t> timestamp_list_;
int framerate_;
int64_t last_sync_timestamp_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_

View File

@ -0,0 +1,244 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
using ::testing::_;
using ::testing::NiceMock;
using ::testing::Return;
namespace webrtc {
enum { kTimestampDelta5Fps = 90000 / 5 }; // 5 frames per second at 90 kHz.
enum { kTimestampDelta30Fps = 90000 / 30 }; // 30 frames per second at 90 kHz.
enum { kFrameSize = 2500 };
const int kFlagsTL0 = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
const int kFlagsTL1 = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
const int kFlagsTL1Sync = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
class ScreenshareLayerTest : public ::testing::Test {
protected:
void SetEncodeExpectations(bool drop_tl0, bool drop_tl1, int framerate) {
EXPECT_CALL(tl0_frame_dropper_, DropFrame())
.Times(1)
.WillRepeatedly(Return(drop_tl0));
if (drop_tl0) {
EXPECT_CALL(tl1_frame_dropper_, DropFrame())
.Times(1)
.WillRepeatedly(Return(drop_tl1));
}
EXPECT_CALL(tl0_frame_dropper_, Leak(framerate))
.Times(1);
EXPECT_CALL(tl1_frame_dropper_, Leak(framerate))
.Times(1);
if (drop_tl0) {
EXPECT_CALL(tl0_frame_dropper_, Fill(_, _))
.Times(0);
if (drop_tl1) {
EXPECT_CALL(tl1_frame_dropper_, Fill(_, _))
.Times(0);
} else {
EXPECT_CALL(tl1_frame_dropper_, Fill(kFrameSize, true))
.Times(1);
}
} else {
EXPECT_CALL(tl0_frame_dropper_, Fill(kFrameSize, true))
.Times(1);
EXPECT_CALL(tl1_frame_dropper_, Fill(kFrameSize, true))
.Times(1);
}
}
void EncodeFrame(uint32_t timestamp,
bool base_sync,
CodecSpecificInfoVP8* vp8_info,
int* flags) {
*flags = layers_->EncodeFlags(timestamp);
layers_->PopulateCodecSpecific(base_sync, vp8_info, timestamp);
layers_->FrameEncoded(kFrameSize, timestamp);
}
NiceMock<MockFrameDropper> tl0_frame_dropper_;
NiceMock<MockFrameDropper> tl1_frame_dropper_;
scoped_ptr<ScreenshareLayers> layers_;
};
TEST_F(ScreenshareLayerTest, 1Layer) {
layers_.reset(new ScreenshareLayers(1, 0, &tl0_frame_dropper_,
&tl1_frame_dropper_));
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, 5, NULL));
int flags = 0;
uint32_t timestamp = 0;
CodecSpecificInfoVP8 vp8_info;
// One layer screenshare should not use the frame dropper as all frames will
// belong to the base layer.
EXPECT_CALL(tl0_frame_dropper_, DropFrame())
.Times(0);
EXPECT_CALL(tl1_frame_dropper_, DropFrame())
.Times(0);
flags = layers_->EncodeFlags(timestamp);
EXPECT_EQ(0, flags);
layers_->PopulateCodecSpecific(false, &vp8_info, timestamp);
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
EXPECT_EQ(kNoTl0PicIdx, vp8_info.tl0PicIdx);
layers_->FrameEncoded(kFrameSize, timestamp);
EXPECT_CALL(tl0_frame_dropper_, DropFrame())
.Times(0);
EXPECT_CALL(tl1_frame_dropper_, DropFrame())
.Times(0);
flags = layers_->EncodeFlags(timestamp);
EXPECT_EQ(0, flags);
timestamp += kTimestampDelta5Fps;
layers_->PopulateCodecSpecific(false, &vp8_info, timestamp);
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
EXPECT_EQ(kNoTl0PicIdx, vp8_info.tl0PicIdx);
layers_->FrameEncoded(kFrameSize, timestamp);
}
TEST_F(ScreenshareLayerTest, 2Layer) {
layers_.reset(new ScreenshareLayers(2, 0, &tl0_frame_dropper_,
&tl1_frame_dropper_));
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, 5, NULL));
int flags = 0;
uint32_t timestamp = 0;
uint8_t expected_tl0_idx = 0;
CodecSpecificInfoVP8 vp8_info;
SetEncodeExpectations(false, false, 1);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kFlagsTL0, flags);
EXPECT_EQ(0, vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
++expected_tl0_idx;
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
EXPECT_CALL(tl1_frame_dropper_, SetRates(1000, 1))
.Times(1);
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, -1, NULL));
// Insert 5 frames at 30 fps. All should belong to TL0.
for (int i = 0; i < 5; ++i) {
timestamp += kTimestampDelta30Fps;
// First iteration has a framerate based on a single frame, thus 1.
SetEncodeExpectations(false, false, 30);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(0, vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
++expected_tl0_idx;
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
}
// Drop two frames from TL0, thus being coded in TL1.
timestamp += kTimestampDelta30Fps;
SetEncodeExpectations(true, false, 30);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kFlagsTL1Sync, flags);
EXPECT_EQ(1, vp8_info.temporalIdx);
EXPECT_TRUE(vp8_info.layerSync);
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
timestamp += kTimestampDelta30Fps;
SetEncodeExpectations(true, false, 30);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kFlagsTL1, flags);
EXPECT_EQ(1, vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
}
TEST_F(ScreenshareLayerTest, 2LayersPeriodicSync) {
layers_.reset(new ScreenshareLayers(2, 0, &tl0_frame_dropper_,
&tl1_frame_dropper_));
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, 5, NULL));
int flags = 0;
uint32_t timestamp = 0;
CodecSpecificInfoVP8 vp8_info;
const int kNumFrames = 10;
const bool kDrops[kNumFrames] = {false, true, true, true, true,
true, true, true, true, true};
const int kExpectedFramerates[kNumFrames] = {1, 5, 5, 5, 5, 5, 5, 5, 5, 5};
const bool kExpectedSyncs[kNumFrames] = {false, true, false, false, false,
false, false, true, false, false};
const int kExpectedTemporalIdx[kNumFrames] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
for (int i = 0; i < kNumFrames; ++i) {
timestamp += kTimestampDelta5Fps;
SetEncodeExpectations(kDrops[i], false, kExpectedFramerates[i]);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kExpectedTemporalIdx[i], vp8_info.temporalIdx);
EXPECT_EQ(kExpectedSyncs[i], vp8_info.layerSync) << "Iteration: " << i;
EXPECT_EQ(1, vp8_info.tl0PicIdx);
}
}
TEST_F(ScreenshareLayerTest, 2LayersToggling) {
layers_.reset(new ScreenshareLayers(2, 0, &tl0_frame_dropper_,
&tl1_frame_dropper_));
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, 5, NULL));
int flags = 0;
uint32_t timestamp = 0;
CodecSpecificInfoVP8 vp8_info;
const int kNumFrames = 10;
const bool kDrops[kNumFrames] = {false, true, false, true, false,
true, false, true, false, true};
const int kExpectedFramerates[kNumFrames] = {1, 5, 5, 5, 5, 5, 5, 5, 5, 5};
const bool kExpectedSyncs[kNumFrames] = {false, true, false, false, false,
false, false, true, false, false};
const int kExpectedTemporalIdx[kNumFrames] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1};
const int kExpectedTl0Idx[kNumFrames] = {1, 1, 2, 2, 3, 3, 4, 4, 5, 5};
for (int i = 0; i < kNumFrames; ++i) {
timestamp += kTimestampDelta5Fps;
SetEncodeExpectations(kDrops[i], false, kExpectedFramerates[i]);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kExpectedTemporalIdx[i], vp8_info.temporalIdx);
EXPECT_EQ(kExpectedSyncs[i], vp8_info.layerSync) << "Iteration: " << i;
EXPECT_EQ(kExpectedTl0Idx[i], vp8_info.tl0PicIdx);
}
}
TEST_F(ScreenshareLayerTest, 2LayersBothDrops) {
layers_.reset(new ScreenshareLayers(2, 0, &tl0_frame_dropper_,
&tl1_frame_dropper_));
EXPECT_TRUE(layers_->ConfigureBitrates(100, 1000, 5, NULL));
int flags = 0;
uint32_t timestamp = 0;
uint8_t expected_tl0_idx = 0;
CodecSpecificInfoVP8 vp8_info;
SetEncodeExpectations(false, false, 1);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kFlagsTL0, flags);
EXPECT_EQ(0, vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
++expected_tl0_idx;
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
timestamp += kTimestampDelta5Fps;
SetEncodeExpectations(true, false, 5);
EncodeFrame(timestamp, false, &vp8_info, &flags);
EXPECT_EQ(kFlagsTL1Sync, flags);
EXPECT_EQ(1, vp8_info.temporalIdx);
EXPECT_TRUE(vp8_info.layerSync);
EXPECT_EQ(expected_tl0_idx, vp8_info.tl0PicIdx);
timestamp += kTimestampDelta5Fps;
SetEncodeExpectations(true, true, 5);
flags = layers_->EncodeFlags(timestamp);
EXPECT_EQ(-1, flags);
}
} // namespace webrtc

View File

@ -0,0 +1,493 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include <algorithm>
// NOTE(ajm): Path provided by gyp.
#include "libyuv/scale.h" // NOLINT
#include "webrtc/common.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
namespace {
const unsigned int kDefaultMinQp = 2;
const unsigned int kDefaultMaxQp = 56;
// Max qp for lowest spatial resolution when doing simulcast.
const unsigned int kLowestResMaxQp = 45;
uint32_t SumStreamTargetBitrate(int streams, const webrtc::VideoCodec& codec) {
uint32_t bitrate_sum = 0;
for (int i = 0; i < streams; ++i) {
bitrate_sum += codec.simulcastStream[i].targetBitrate;
}
return bitrate_sum;
}
uint32_t SumStreamMaxBitrate(int streams, const webrtc::VideoCodec& codec) {
uint32_t bitrate_sum = 0;
for (int i = 0; i < streams; ++i) {
bitrate_sum += codec.simulcastStream[i].maxBitrate;
}
return bitrate_sum;
}
int NumberOfStreams(const webrtc::VideoCodec& codec) {
int streams =
codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
if (simulcast_max_bitrate == 0) {
streams = 1;
}
return streams;
}
bool ValidSimulcastResolutions(const webrtc::VideoCodec& codec,
int num_streams) {
if (codec.width != codec.simulcastStream[num_streams - 1].width ||
codec.height != codec.simulcastStream[num_streams - 1].height) {
return false;
}
for (int i = 0; i < num_streams; ++i) {
if (codec.width * codec.simulcastStream[i].height !=
codec.height * codec.simulcastStream[i].width) {
return false;
}
}
return true;
}
int VerifyCodec(const webrtc::VideoCodec* inst) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->maxFramerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// allow zero to represent an unspecified maxBitRate
if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->width <= 1 || inst->height <= 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->codecSpecific.VP8.feedbackModeOn &&
inst->numberOfSimulcastStreams > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->codecSpecific.VP8.automaticResizeOn &&
inst->numberOfSimulcastStreams > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
return WEBRTC_VIDEO_CODEC_OK;
}
// TL1 FrameDropper's max time to drop frames.
const float kTl1MaxTimeToDropFrames = 20.0f;
struct ScreenshareTemporalLayersFactory : webrtc::TemporalLayers::Factory {
ScreenshareTemporalLayersFactory()
: tl1_frame_dropper_(kTl1MaxTimeToDropFrames) {}
virtual ~ScreenshareTemporalLayersFactory() {}
virtual webrtc::TemporalLayers* Create(int num_temporal_layers,
uint8_t initial_tl0_pic_idx) const {
return new webrtc::ScreenshareLayers(num_temporal_layers,
rand(),
&tl0_frame_dropper_,
&tl1_frame_dropper_);
}
mutable webrtc::FrameDropper tl0_frame_dropper_;
mutable webrtc::FrameDropper tl1_frame_dropper_;
};
} // namespace
namespace webrtc {
SimulcastEncoderAdapter::SimulcastEncoderAdapter(
scoped_ptr<VideoEncoderFactory> factory)
: factory_(factory.Pass()), encoded_complete_callback_(NULL) {
memset(&codec_, 0, sizeof(webrtc::VideoCodec));
}
SimulcastEncoderAdapter::~SimulcastEncoderAdapter() {
Release();
}
int SimulcastEncoderAdapter::Release() {
while (!streaminfos_.empty()) {
VideoEncoder* encoder = streaminfos_.back().encoder;
factory_->Destroy(encoder);
streaminfos_.pop_back();
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t max_payload_size) {
if (number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
int ret = VerifyCodec(inst);
if (ret < 0) {
return ret;
}
ret = Release();
if (ret < 0) {
return ret;
}
int number_of_streams = NumberOfStreams(*inst);
bool doing_simulcast = (number_of_streams > 1);
if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
codec_ = *inst;
// Special mode when screensharing on a single stream.
if (number_of_streams == 1 && inst->mode == kScreensharing) {
screensharing_extra_options_.reset(new Config());
screensharing_extra_options_->Set<TemporalLayers::Factory>(
new ScreenshareTemporalLayersFactory());
codec_.extra_options = screensharing_extra_options_.get();
}
// Create |number_of_streams| of encoder instances and init them.
for (int i = 0; i < number_of_streams; ++i) {
VideoCodec stream_codec;
bool send_stream = true;
if (!doing_simulcast) {
stream_codec = codec_;
stream_codec.numberOfSimulcastStreams = 1;
} else {
bool highest_resolution_stream = (i == (number_of_streams - 1));
PopulateStreamCodec(&codec_, i, highest_resolution_stream,
&stream_codec, &send_stream);
}
// TODO(ronghuawu): Remove once this is handled in VP8EncoderImpl.
if (stream_codec.qpMax < kDefaultMinQp) {
stream_codec.qpMax = kDefaultMaxQp;
}
VideoEncoder* encoder = factory_->Create();
ret = encoder->InitEncode(&stream_codec,
number_of_cores,
max_payload_size);
if (ret < 0) {
Release();
return ret;
}
encoder->RegisterEncodeCompleteCallback(this);
streaminfos_.push_back(StreamInfo(encoder,
stream_codec.width,
stream_codec.height,
send_stream));
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::Encode(
const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<VideoFrameType>* frame_types) {
if (!Initialized()) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoded_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
// All active streams should generate a key frame if
// a key frame is requested by any stream.
bool send_key_frame = false;
if (frame_types) {
for (size_t i = 0; i < frame_types->size(); ++i) {
if (frame_types->at(i) == kKeyFrame) {
send_key_frame = true;
break;
}
}
}
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
if (streaminfos_[stream_idx].key_frame_request &&
streaminfos_[stream_idx].send_stream) {
send_key_frame = true;
break;
}
}
int src_width = input_image.width();
int src_height = input_image.height();
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
std::vector<VideoFrameType> stream_frame_types;
if (send_key_frame) {
stream_frame_types.push_back(kKeyFrame);
streaminfos_[stream_idx].key_frame_request = false;
} else {
stream_frame_types.push_back(kDeltaFrame);
}
int dst_width = streaminfos_[stream_idx].width;
int dst_height = streaminfos_[stream_idx].height;
// If scaling isn't required, because the input resolution
// matches the destination or the input image is empty (e.g.
// a keyframe request for encoders with internal camera
// sources), pass the image on directly. Otherwise, we'll
// scale it to match what the encoder expects (below).
if ((dst_width == src_width && dst_height == src_height) ||
input_image.IsZeroSize()) {
streaminfos_[stream_idx].encoder->Encode(input_image,
codec_specific_info,
&stream_frame_types);
} else {
I420VideoFrame dst_frame;
// Making sure that destination frame is of sufficient size.
// Aligning stride values based on width.
dst_frame.CreateEmptyFrame(dst_width, dst_height,
dst_width, (dst_width + 1) / 2,
(dst_width + 1) / 2);
libyuv::I420Scale(input_image.buffer(kYPlane),
input_image.stride(kYPlane),
input_image.buffer(kUPlane),
input_image.stride(kUPlane),
input_image.buffer(kVPlane),
input_image.stride(kVPlane),
src_width, src_height,
dst_frame.buffer(kYPlane),
dst_frame.stride(kYPlane),
dst_frame.buffer(kUPlane),
dst_frame.stride(kUPlane),
dst_frame.buffer(kVPlane),
dst_frame.stride(kVPlane),
dst_width, dst_height,
libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
streaminfos_[stream_idx].encoder->Encode(dst_frame,
codec_specific_info,
&stream_frame_types);
}
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
encoded_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
int rtt) {
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
streaminfos_[stream_idx].encoder->SetChannelParameters(packet_loss, rtt);
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::SetRates(uint32_t new_bitrate_kbit,
uint32_t new_framerate) {
if (!Initialized()) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (new_framerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) {
new_bitrate_kbit = codec_.maxBitrate;
}
if (new_bitrate_kbit < codec_.minBitrate) {
new_bitrate_kbit = codec_.minBitrate;
}
if (codec_.numberOfSimulcastStreams > 0 &&
new_bitrate_kbit < codec_.simulcastStream[0].minBitrate) {
new_bitrate_kbit = codec_.simulcastStream[0].minBitrate;
}
codec_.maxFramerate = new_framerate;
bool send_stream = true;
uint32_t stream_bitrate = 0;
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
stream_bitrate = GetStreamBitrate(stream_idx,
new_bitrate_kbit,
&send_stream);
// Need a key frame if we have not sent this stream before.
if (send_stream && !streaminfos_[stream_idx].send_stream) {
streaminfos_[stream_idx].key_frame_request = true;
}
streaminfos_[stream_idx].send_stream = send_stream;
// TODO(holmer): This is a temporary hack for screensharing, where we
// interpret the startBitrate as the encoder target bitrate. This is
// to allow for a different max bitrate, so if the codec can't meet
// the target we still allow it to overshoot up to the max before dropping
// frames. This hack should be improved.
if (codec_.targetBitrate > 0 &&
(codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
stream_bitrate = std::min(codec_.maxBitrate, stream_bitrate);
// TODO(ronghuawu): Can't change max bitrate via the VideoEncoder
// interface. And VP8EncoderImpl doesn't take negative framerate.
// max_bitrate = std::min(codec_.maxBitrate, stream_bitrate);
// new_framerate = -1;
}
streaminfos_[stream_idx].encoder->SetRates(stream_bitrate, new_framerate);
}
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t SimulcastEncoderAdapter::Encoded(
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
size_t stream_idx = GetStreamIndex(encodedImage);
CodecSpecificInfo stream_codec_specific = *codecSpecificInfo;
CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
vp8Info->simulcastIdx = stream_idx;
if (streaminfos_[stream_idx].send_stream) {
return encoded_complete_callback_->Encoded(encodedImage,
&stream_codec_specific,
fragmentation);
} else {
EncodedImage dummy_image;
// Required in case padding is applied to dropped frames.
dummy_image._timeStamp = encodedImage._timeStamp;
dummy_image.capture_time_ms_ = encodedImage.capture_time_ms_;
dummy_image._encodedWidth = encodedImage._encodedWidth;
dummy_image._encodedHeight = encodedImage._encodedHeight;
dummy_image._length = 0;
dummy_image._frameType = kSkipFrame;
vp8Info->keyIdx = kNoKeyIdx;
return encoded_complete_callback_->Encoded(dummy_image,
&stream_codec_specific, NULL);
}
}
uint32_t SimulcastEncoderAdapter::GetStreamBitrate(int stream_idx,
uint32_t new_bitrate_kbit,
bool* send_stream) const {
if (streaminfos_.size() == 1) {
*send_stream = true;
return new_bitrate_kbit;
}
// The bitrate needed to start sending this stream is given by the
// minimum bitrate allowed for encoding this stream, plus the sum target
// rates of all lower streams.
uint32_t sum_target_lower_streams =
SumStreamTargetBitrate(stream_idx, codec_);
uint32_t bitrate_to_send_this_layer =
codec_.simulcastStream[stream_idx].minBitrate + sum_target_lower_streams;
if (new_bitrate_kbit >= bitrate_to_send_this_layer) {
// We have enough bandwidth to send this stream.
*send_stream = true;
// Bitrate for this stream is the new bitrate (|new_bitrate_kbit|) minus the
// sum target rates of the lower streams, and capped to a maximum bitrate.
// The maximum cap depends on whether we send the next higher stream.
// If we will be sending the next higher stream, |max_rate| is given by
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
codec_.simulcastStream[stream_idx + 1].minBitrate) {
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
}
return std::min(new_bitrate_kbit - sum_target_lower_streams, max_rate);
} else {
// For the highest stream (highest resolution), the |targetBitRate| and
// |maxBitrate| are not used. Any excess bitrate (above the targets of
// all lower streams) is given to this (highest resolution) stream.
return new_bitrate_kbit - sum_target_lower_streams;
}
} else {
// Not enough bitrate for this stream.
// Return our max bitrate of |stream_idx| - 1, but we don't send it. We need
// to keep this resolution coding in order for the multi-encoder to work.
*send_stream = false;
return codec_.simulcastStream[stream_idx - 1].maxBitrate;
}
}
void SimulcastEncoderAdapter::PopulateStreamCodec(
const webrtc::VideoCodec* inst,
int stream_index,
bool highest_resolution_stream,
webrtc::VideoCodec* stream_codec,
bool* send_stream) {
*stream_codec = *inst;
// Stream specific settings.
stream_codec->codecSpecific.VP8.numberOfTemporalLayers =
inst->simulcastStream[stream_index].numberOfTemporalLayers;
stream_codec->numberOfSimulcastStreams = 0;
stream_codec->width = inst->simulcastStream[stream_index].width;
stream_codec->height = inst->simulcastStream[stream_index].height;
stream_codec->maxBitrate = inst->simulcastStream[stream_index].maxBitrate;
stream_codec->minBitrate = inst->simulcastStream[stream_index].minBitrate;
stream_codec->qpMax = inst->simulcastStream[stream_index].qpMax;
// Settings that are based on stream/resolution.
if (stream_index == 0) {
// Settings for lowest spatial resolutions.
stream_codec->qpMax = kLowestResMaxQp;
}
if (!highest_resolution_stream) {
// For resolutions below CIF, set the codec |complexity| parameter to
// kComplexityHigher, which maps to cpu_used = -4.
int pixels_per_frame = stream_codec->width * stream_codec->height;
if (pixels_per_frame < 352 * 288) {
stream_codec->codecSpecific.VP8.complexity = webrtc::kComplexityHigher;
}
// Turn off denoising for all streams but the highest resolution.
stream_codec->codecSpecific.VP8.denoisingOn = false;
}
// TODO(ronghuawu): what to do with targetBitrate.
int stream_bitrate = GetStreamBitrate(stream_index,
inst->startBitrate,
send_stream);
stream_codec->startBitrate = stream_bitrate;
}
size_t SimulcastEncoderAdapter::GetStreamIndex(
const EncodedImage& encodedImage) {
uint32_t width = encodedImage._encodedWidth;
uint32_t height = encodedImage._encodedHeight;
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
if (streaminfos_[stream_idx].width == width &&
streaminfos_[stream_idx].height == height) {
return stream_idx;
}
}
// should not be here
assert(false);
return 0;
}
bool SimulcastEncoderAdapter::Initialized() const {
return !streaminfos_.empty();
}
} // namespace webrtc

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
#include <vector>
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class VideoEncoderFactory {
public:
virtual VideoEncoder* Create() = 0;
virtual void Destroy(VideoEncoder* encoder) = 0;
virtual ~VideoEncoderFactory() {}
};
// SimulcastEncoderAdapter implements simulcast support by creating multiple
// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
// All the public interfaces are expected to be called from the same thread,
// e.g the encoder thread.
class SimulcastEncoderAdapter : public VP8Encoder,
public EncodedImageCallback {
public:
explicit SimulcastEncoderAdapter(scoped_ptr<VideoEncoderFactory> factory);
virtual ~SimulcastEncoderAdapter();
// Implements VideoEncoder
virtual int Release() OVERRIDE;
virtual int InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t max_payload_size) OVERRIDE;
virtual int Encode(const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<VideoFrameType>* frame_types) OVERRIDE;
virtual int RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) OVERRIDE;
virtual int SetChannelParameters(uint32_t packet_loss, int rtt) OVERRIDE;
virtual int SetRates(uint32_t new_bitrate_kbit,
uint32_t new_framerate) OVERRIDE;
// Implements EncodedImageCallback
virtual int32_t Encoded(
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo = NULL,
const RTPFragmentationHeader* fragmentation = NULL) OVERRIDE;
private:
struct StreamInfo {
StreamInfo()
: encoder(NULL), width(0), height(0),
key_frame_request(false), send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
unsigned short width,
unsigned short height,
bool send_stream)
: encoder(encoder),
width(width),
height(height),
key_frame_request(false),
send_stream(send_stream) {}
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
unsigned short width;
unsigned short height;
bool key_frame_request;
bool send_stream;
};
// Get the stream bitrate, for the stream |stream_idx|, given the bitrate
// |new_bitrate_kbit|. The function also returns whether there's enough
// bandwidth to send this stream via |send_stream|.
uint32_t GetStreamBitrate(int stream_idx,
uint32_t new_bitrate_kbit,
bool* send_stream) const;
// Populate the codec settings for each stream.
void PopulateStreamCodec(const webrtc::VideoCodec* inst,
int stream_index,
bool highest_resolution_stream,
webrtc::VideoCodec* stream_codec,
bool* send_stream);
// Get the stream index according to |encodedImage|.
size_t GetStreamIndex(const EncodedImage& encodedImage);
bool Initialized() const;
scoped_ptr<VideoEncoderFactory> factory_;
scoped_ptr<Config> screensharing_extra_options_;
VideoCodec codec_;
std::vector<StreamInfo> streaminfos_;
EncodedImageCallback* encoded_complete_callback_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_

View File

@ -0,0 +1,304 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
namespace webrtc {
namespace testing {
static VP8Encoder* CreateTestEncoderAdapter() {
VP8EncoderFactoryConfig::set_use_simulcast_adapter(true);
return VP8Encoder::Create();
}
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
public:
TestSimulcastEncoderAdapter()
: TestVp8Simulcast(CreateTestEncoderAdapter(),
VP8Decoder::Create()) {}
protected:
virtual void SetUp() {
TestVp8Simulcast::SetUp();
}
virtual void TearDown() {
TestVp8Simulcast::TearDown();
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
}
};
TEST_F(TestSimulcastEncoderAdapter, TestKeyFrameRequestsOnAllStreams) {
TestVp8Simulcast::TestKeyFrameRequestsOnAllStreams();
}
TEST_F(TestSimulcastEncoderAdapter, TestPaddingAllStreams) {
TestVp8Simulcast::TestPaddingAllStreams();
}
TEST_F(TestSimulcastEncoderAdapter, TestPaddingTwoStreams) {
TestVp8Simulcast::TestPaddingTwoStreams();
}
TEST_F(TestSimulcastEncoderAdapter, TestPaddingTwoStreamsOneMaxedOut) {
TestVp8Simulcast::TestPaddingTwoStreamsOneMaxedOut();
}
TEST_F(TestSimulcastEncoderAdapter, TestPaddingOneStream) {
TestVp8Simulcast::TestPaddingOneStream();
}
TEST_F(TestSimulcastEncoderAdapter, TestPaddingOneStreamTwoMaxedOut) {
TestVp8Simulcast::TestPaddingOneStreamTwoMaxedOut();
}
TEST_F(TestSimulcastEncoderAdapter, TestSendAllStreams) {
TestVp8Simulcast::TestSendAllStreams();
}
TEST_F(TestSimulcastEncoderAdapter, TestDisablingStreams) {
TestVp8Simulcast::TestDisablingStreams();
}
TEST_F(TestSimulcastEncoderAdapter, TestSwitchingToOneStream) {
TestVp8Simulcast::TestSwitchingToOneStream();
}
TEST_F(TestSimulcastEncoderAdapter, TestSwitchingToOneOddStream) {
TestVp8Simulcast::TestSwitchingToOneOddStream();
}
TEST_F(TestSimulcastEncoderAdapter, TestRPSIEncodeDecode) {
TestVp8Simulcast::TestRPSIEncodeDecode();
}
TEST_F(TestSimulcastEncoderAdapter, TestStrideEncodeDecode) {
TestVp8Simulcast::TestStrideEncodeDecode();
}
TEST_F(TestSimulcastEncoderAdapter, TestSaptioTemporalLayers333PatternEncoder) {
TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder();
}
TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
TestVp8Simulcast::TestSpatioTemporalLayers321PatternEncoder();
}
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
// implemented for SimulcastEncoderAdapter.
TEST_F(TestSimulcastEncoderAdapter,
DISABLED_TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestRPSIEncoder) {
TestVp8Simulcast::TestRPSIEncoder();
}
class MockVideoEncoder : public VideoEncoder {
public:
int32_t InitEncode(const VideoCodec* codecSettings,
int32_t numberOfCores,
size_t maxPayloadSize) {
codec_ = *codecSettings;
return 0;
}
int32_t Encode(const I420VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<VideoFrameType>* frame_types) { return 0; }
int32_t RegisterEncodeCompleteCallback(EncodedImageCallback* callback) {
return 0;
}
int32_t Release() {
return 0;
}
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) {
return 0;
}
MOCK_METHOD2(SetChannelParameters,
int32_t(uint32_t packetLoss, int rtt));
virtual ~MockVideoEncoder() {
}
const VideoCodec& codec() const { return codec_; }
private:
VideoCodec codec_;
};
class MockVideoEncoderFactory : public VideoEncoderFactory {
public:
virtual VideoEncoder* Create() OVERRIDE {
MockVideoEncoder* encoder = new MockVideoEncoder();
encoders_.push_back(encoder);
return encoder;
}
virtual void Destroy(VideoEncoder* encoder) OVERRIDE {
delete encoder;
}
virtual ~MockVideoEncoderFactory() {}
const std::vector<MockVideoEncoder*>& encoders() const { return encoders_; }
private:
std::vector<MockVideoEncoder*> encoders_;
};
class TestSimulcastEncoderAdapterFakeHelper {
public:
TestSimulcastEncoderAdapterFakeHelper()
: factory_(new MockVideoEncoderFactory()) {}
// Can only be called once as the SimulcastEncoderAdapter will take the
// ownership of |factory_|.
VP8Encoder* CreateMockEncoderAdapter() {
scoped_ptr<VideoEncoderFactory> scoped_factory(factory_);
return new SimulcastEncoderAdapter(scoped_factory.Pass());
}
void ExpectCallSetChannelParameters(uint32_t packetLoss, int rtt) {
EXPECT_TRUE(!factory_->encoders().empty());
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
EXPECT_CALL(*factory_->encoders()[i],
SetChannelParameters(packetLoss, rtt)).Times(1);
}
}
MockVideoEncoderFactory* factory() { return factory_; }
private:
MockVideoEncoderFactory* factory_;
};
static const int kTestTemporalLayerProfile[3] = {3, 2, 1};
class TestSimulcastEncoderAdapterFake : public ::testing::Test {
public:
TestSimulcastEncoderAdapterFake()
: helper_(new TestSimulcastEncoderAdapterFakeHelper()),
adapter_(helper_->CreateMockEncoderAdapter()) {}
virtual ~TestSimulcastEncoderAdapterFake() {}
void SetupCodec() {
TestVp8Simulcast::DefaultSettings(
&codec_,
static_cast<const int*>(kTestTemporalLayerProfile));
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
}
void VerifyCodec(const VideoCodec& ref, int stream_index) {
const VideoCodec& target =
helper_->factory()->encoders()[stream_index]->codec();
EXPECT_EQ(ref.codecType, target.codecType);
EXPECT_EQ(0, strcmp(ref.plName, target.plName));
EXPECT_EQ(ref.plType, target.plType);
EXPECT_EQ(ref.width, target.width);
EXPECT_EQ(ref.height, target.height);
EXPECT_EQ(ref.startBitrate, target.startBitrate);
EXPECT_EQ(ref.maxBitrate, target.maxBitrate);
EXPECT_EQ(ref.minBitrate, target.minBitrate);
EXPECT_EQ(ref.maxFramerate, target.maxFramerate);
EXPECT_EQ(ref.codecSpecific.VP8.pictureLossIndicationOn,
target.codecSpecific.VP8.pictureLossIndicationOn);
EXPECT_EQ(ref.codecSpecific.VP8.feedbackModeOn,
target.codecSpecific.VP8.feedbackModeOn);
EXPECT_EQ(ref.codecSpecific.VP8.complexity,
target.codecSpecific.VP8.complexity);
EXPECT_EQ(ref.codecSpecific.VP8.resilience,
target.codecSpecific.VP8.resilience);
EXPECT_EQ(ref.codecSpecific.VP8.numberOfTemporalLayers,
target.codecSpecific.VP8.numberOfTemporalLayers);
EXPECT_EQ(ref.codecSpecific.VP8.denoisingOn,
target.codecSpecific.VP8.denoisingOn);
EXPECT_EQ(ref.codecSpecific.VP8.errorConcealmentOn,
target.codecSpecific.VP8.errorConcealmentOn);
EXPECT_EQ(ref.codecSpecific.VP8.automaticResizeOn,
target.codecSpecific.VP8.automaticResizeOn);
EXPECT_EQ(ref.codecSpecific.VP8.frameDroppingOn,
target.codecSpecific.VP8.frameDroppingOn);
EXPECT_EQ(ref.codecSpecific.VP8.keyFrameInterval,
target.codecSpecific.VP8.keyFrameInterval);
EXPECT_EQ(ref.qpMax, target.qpMax);
EXPECT_EQ(0, target.numberOfSimulcastStreams);
EXPECT_EQ(ref.mode, target.mode);
EXPECT_EQ(ref.extra_options, target.extra_options);
// No need to compare simulcastStream as numberOfSimulcastStreams should
// always be 0.
}
void InitRefCodec(int stream_index, VideoCodec* ref_codec) {
*ref_codec = codec_;
ref_codec->codecSpecific.VP8.numberOfTemporalLayers =
kTestTemporalLayerProfile[stream_index];
ref_codec->width = codec_.simulcastStream[stream_index].width;
ref_codec->height = codec_.simulcastStream[stream_index].height;
ref_codec->maxBitrate = codec_.simulcastStream[stream_index].maxBitrate;
ref_codec->minBitrate = codec_.simulcastStream[stream_index].minBitrate;
ref_codec->qpMax = codec_.simulcastStream[stream_index].qpMax;
}
void VerifyCodecSettings() {
EXPECT_EQ(3u, helper_->factory()->encoders().size());
VideoCodec ref_codec;
// stream 0, the lowest resolution stream.
InitRefCodec(0, &ref_codec);
ref_codec.qpMax = 45;
ref_codec.codecSpecific.VP8.complexity = webrtc::kComplexityHigher;
ref_codec.codecSpecific.VP8.denoisingOn = false;
ref_codec.startBitrate = 100; // Should equal to the target bitrate.
VerifyCodec(ref_codec, 0);
// stream 1
InitRefCodec(1, &ref_codec);
ref_codec.codecSpecific.VP8.denoisingOn = false;
ref_codec.startBitrate = 300;
VerifyCodec(ref_codec, 1);
// stream 2, the biggest resolution stream.
InitRefCodec(2, &ref_codec);
ref_codec.startBitrate = 600;
VerifyCodec(ref_codec, 2);
}
protected:
scoped_ptr<TestSimulcastEncoderAdapterFakeHelper> helper_;
scoped_ptr<VP8Encoder> adapter_;
VideoCodec codec_;
};
TEST_F(TestSimulcastEncoderAdapterFake, InitEncode) {
SetupCodec();
VerifyCodecSettings();
}
TEST_F(TestSimulcastEncoderAdapterFake, SetChannelParameters) {
SetupCodec();
const uint32_t packetLoss = 5;
const int rtt = 30;
helper_->ExpectCallSetChannelParameters(packetLoss, rtt);
adapter_->SetChannelParameters(packetLoss, rtt);
}
} // namespace testing
} // namespace webrtc

View File

@ -0,0 +1,95 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
namespace webrtc {
namespace testing {
class TestVp8Impl
: public TestVp8Simulcast {
public:
TestVp8Impl()
: TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
protected:
virtual void SetUp() {
TestVp8Simulcast::SetUp();
}
virtual void TearDown() {
TestVp8Simulcast::TearDown();
}
};
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
TestVp8Simulcast::TestKeyFrameRequestsOnAllStreams();
}
TEST_F(TestVp8Impl, TestPaddingAllStreams) {
TestVp8Simulcast::TestPaddingAllStreams();
}
TEST_F(TestVp8Impl, TestPaddingTwoStreams) {
TestVp8Simulcast::TestPaddingTwoStreams();
}
TEST_F(TestVp8Impl, TestPaddingTwoStreamsOneMaxedOut) {
TestVp8Simulcast::TestPaddingTwoStreamsOneMaxedOut();
}
TEST_F(TestVp8Impl, TestPaddingOneStream) {
TestVp8Simulcast::TestPaddingOneStream();
}
TEST_F(TestVp8Impl, TestPaddingOneStreamTwoMaxedOut) {
TestVp8Simulcast::TestPaddingOneStreamTwoMaxedOut();
}
TEST_F(TestVp8Impl, TestSendAllStreams) {
TestVp8Simulcast::TestSendAllStreams();
}
TEST_F(TestVp8Impl, TestDisablingStreams) {
TestVp8Simulcast::TestDisablingStreams();
}
TEST_F(TestVp8Impl, TestSwitchingToOneStream) {
TestVp8Simulcast::TestSwitchingToOneStream();
}
TEST_F(TestVp8Impl, TestSwitchingToOneOddStream) {
TestVp8Simulcast::TestSwitchingToOneOddStream();
}
TEST_F(TestVp8Impl, TestRPSIEncoder) {
TestVp8Simulcast::TestRPSIEncoder();
}
TEST_F(TestVp8Impl, TestRPSIEncodeDecode) {
TestVp8Simulcast::TestRPSIEncodeDecode();
}
TEST_F(TestVp8Impl, TestSaptioTemporalLayers333PatternEncoder) {
TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder();
}
TEST_F(TestVp8Impl, TestSpatioTemporalLayers321PatternEncoder) {
TestVp8Simulcast::TestSpatioTemporalLayers321PatternEncoder();
}
TEST_F(TestVp8Impl, TestStrideEncodeDecode) {
TestVp8Simulcast::TestStrideEncodeDecode();
}
TEST_F(TestVp8Impl, TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
} // namespace testing
} // namespace webrtc

File diff suppressed because it is too large Load Diff

View File

@ -27,21 +27,30 @@
}],
],
'sources': [
'reference_picture_selection.h',
'reference_picture_selection.cc',
'include/vp8.h',
'include/vp8_common_types.h',
'vp8_factory.cc',
'vp8_impl.cc',
'default_temporal_layers.cc',
'default_temporal_layers.h',
'include/vp8.h',
'include/vp8_common_types.h',
'realtime_temporal_layers.cc',
'reference_picture_selection.cc',
'reference_picture_selection.h',
'screenshare_layers.cc',
'screenshare_layers.h',
'simulcast_encoder_adapter.cc',
'simulcast_encoder_adapter.h',
'temporal_layers.h',
'vp8_factory.cc',
'vp8_factory.h',
'vp8_impl.cc',
'vp8_impl.h',
],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
'include_dirs': [
'<(libyuv_dir)/include',
],
},
], # targets
'conditions': [

View File

@ -6,15 +6,37 @@
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h"
namespace webrtc {
bool VP8EncoderFactoryConfig::use_simulcast_adapter_ = false;
class VP8EncoderImplFactory : public VideoEncoderFactory {
public:
virtual VideoEncoder* Create() OVERRIDE {
return new VP8EncoderImpl();
}
virtual void Destroy(VideoEncoder* encoder) OVERRIDE {
delete encoder;
}
virtual ~VP8EncoderImplFactory() {}
};
VP8Encoder* VP8Encoder::Create() {
return new VP8EncoderImpl();
if (VP8EncoderFactoryConfig::use_simulcast_adapter()) {
scoped_ptr<VideoEncoderFactory> factory(new VP8EncoderImplFactory());
return new SimulcastEncoderAdapter(factory.Pass());
} else {
return new VP8EncoderImpl();
}
}
VP8Decoder* VP8Decoder::Create() {

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
namespace webrtc {
// VP8EncoderFactoryConfig is the interface to control the VP8Encoder::Create
// to create VP8EncoderImpl or SimulcastEncoderAdapter+VP8EncoderImpl.
// TODO(ronghuawu): Remove when SimulcastEncoderAdapter+VP8EncoderImpl is ready
// to replace VP8EncoderImpl.
class VP8EncoderFactoryConfig {
public:
static void set_use_simulcast_adapter(bool use_simulcast_adapter) {
use_simulcast_adapter_ = use_simulcast_adapter;
}
static bool use_simulcast_adapter() { return use_simulcast_adapter_; }
private:
static bool use_simulcast_adapter_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_

File diff suppressed because it is too large Load Diff

View File

@ -10,24 +10,28 @@
* WEBRTC VP8 wrapper interface
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_IMPL_H_
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
#include <vector>
// NOTE: This include order must remain to avoid compile errors, even though
// it breaks the style guide.
#include "vpx/vpx_encoder.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
#include "webrtc/common_video/interface/i420_video_frame.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
#include "webrtc/modules/video_coding/utility/quality_scaler.h"
// VPX forward declaration
typedef struct vpx_codec_ctx vpx_codec_ctx_t;
typedef struct vpx_codec_ctx vpx_dec_ctx_t;
typedef struct vpx_codec_enc_cfg vpx_codec_enc_cfg_t;
typedef struct vpx_image vpx_image_t;
typedef struct vpx_ref_frame vpx_ref_frame_t;
struct vpx_codec_cx_pkt;
namespace webrtc {
class TemporalLayers;
class ReferencePictureSelection;
class VP8EncoderImpl : public VP8Encoder {
public:
@ -52,44 +56,64 @@ class VP8EncoderImpl : public VP8Encoder {
virtual int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate);
private:
void SetupTemporalLayers(int num_streams, int num_temporal_layers,
const VideoCodec& codec);
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
// Call encoder initialize function and set control settings.
int InitAndSetControlSettings(const VideoCodec* inst);
int InitAndSetControlSettings();
// Update frame size for codec.
int UpdateCodecFrameSize(const I420VideoFrame& input_image);
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
int stream_idx,
uint32_t timestamp,
bool only_predicting_from_key_frame);
int GetEncodedPartitions(const I420VideoFrame& input_image);
int GetEncodedPartitions(const I420VideoFrame& input_image,
bool only_predicting_from_key_frame);
// Get the stream bitrate, for the stream |stream_idx|, given the bitrate
// |new_bitrate_kbit|.
int GetStreamBitrate(int stream_idx,
uint32_t new_bitrate_kbit,
bool* send_stream) const;
// Set the stream state for stream |stream_idx|.
void SetStreamState(bool send_stream, int stream_idx);
// Determine maximum target for Intra frames
//
// Input:
// - optimal_buffer_size : Optimal buffer size
// Return Value : Max target size for Intra frames represented as
// percentage of the per frame bandwidth
uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
EncodedImage encoded_image_;
EncodedImageCallback* encoded_complete_callback_;
VideoCodec codec_;
bool inited_;
int64_t timestamp_;
uint16_t picture_id_;
bool feedback_mode_;
int cpu_speed_;
int qp_max_;
uint32_t rc_max_intra_target_;
int token_partitions_;
ReferencePictureSelection* rps_;
TemporalLayers* temporal_layers_;
vpx_codec_ctx_t* encoder_;
vpx_codec_enc_cfg_t* config_;
vpx_image_t* raw_;
ReferencePictureSelection rps_;
std::vector<TemporalLayers*> temporal_layers_;
bool down_scale_requested_;
uint32_t down_scale_bitrate_;
FrameDropper tl0_frame_dropper_;
FrameDropper tl1_frame_dropper_;
std::vector<uint16_t> picture_id_;
std::vector<int> last_key_frame_picture_id_;
std::vector<bool> key_frame_request_;
std::vector<bool> send_stream_;
std::vector<int> cpu_speed_;
std::vector<vpx_image_t> raw_images_;
std::vector<EncodedImage> encoded_images_;
std::vector<vpx_codec_ctx_t> encoders_;
std::vector<vpx_codec_enc_cfg_t> configurations_;
std::vector<vpx_rational_t> downsampling_factors_;
QualityScaler quality_scaler_;
}; // end of VP8Encoder class
}; // end of VP8EncoderImpl class
class VP8DecoderImpl : public VP8Decoder {
public:
@ -117,7 +141,7 @@ class VP8DecoderImpl : public VP8Decoder {
// Copy reference image from this _decoder to the _decoder in copyTo. Set
// which frame type to copy in _refFrame->frame_type before the call to
// this function.
int CopyReference(VP8Decoder* copy);
int CopyReference(VP8DecoderImpl* copy);
int DecodePartitions(const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation);
@ -130,15 +154,17 @@ class VP8DecoderImpl : public VP8Decoder {
DecodedImageCallback* decode_complete_callback_;
bool inited_;
bool feedback_mode_;
vpx_dec_ctx_t* decoder_;
vpx_codec_ctx_t* decoder_;
VideoCodec codec_;
EncodedImage last_keyframe_;
int image_format_;
vpx_ref_frame_t* ref_frame_;
int propagation_cnt_;
bool mfqe_enabled_;
int last_frame_width_;
int last_frame_height_;
bool key_frame_required_;
}; // end of VP8Decoder class
}; // end of VP8DecoderImpl class
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_IMPL_H_
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_