(Auto)update libjingle 63111035-> 63293120
git-svn-id: http://webrtc.googlecode.com/svn/trunk@5717 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
dcc301be07
commit
e9793ab8b8
@ -72,6 +72,10 @@ const char MediaConstraintsInterface::kTemporalLayeredScreencast[] =
|
||||
// TODO(ronghuawu): Remove once cpu overuse detection is stable.
|
||||
const char MediaConstraintsInterface::kCpuOveruseDetection[] =
|
||||
"googCpuOveruseDetection";
|
||||
const char MediaConstraintsInterface::kCpuUnderuseThreshold[] =
|
||||
"googCpuUnderuseThreshold";
|
||||
const char MediaConstraintsInterface::kCpuOveruseThreshold[] =
|
||||
"googCpuOveruseThreshold";
|
||||
|
||||
// Constraint keys for CreateOffer / CreateAnswer defined in W3C specification.
|
||||
const char MediaConstraintsInterface::kOfferToReceiveAudio[] =
|
||||
|
@ -90,6 +90,8 @@ class MediaConstraintsInterface {
|
||||
// googTemporalLayeredScreencast
|
||||
static const char kTemporalLayeredScreencast[];
|
||||
static const char kCpuOveruseDetection[];
|
||||
static const char kCpuUnderuseThreshold[];
|
||||
static const char kCpuOveruseThreshold[];
|
||||
|
||||
// Constraint keys for CreateOffer / CreateAnswer
|
||||
// Specified by the W3C PeerConnection spec
|
||||
|
@ -183,7 +183,11 @@ bool NewFormatWithConstraints(
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kTemporalLayeredScreencast ||
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kCpuOveruseDetection) {
|
||||
MediaConstraintsInterface::kCpuOveruseDetection ||
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kCpuUnderuseThreshold ||
|
||||
constraint.key ==
|
||||
MediaConstraintsInterface::kCpuOveruseThreshold) {
|
||||
// These are actually options, not constraints, so they can be satisfied
|
||||
// regardless of the format.
|
||||
return true;
|
||||
@ -267,6 +271,22 @@ const cricket::VideoFormat& GetBestCaptureFormat(
|
||||
return *best_it;
|
||||
}
|
||||
|
||||
// Set |option| to the highest-priority value of |key| in the optional
|
||||
// constraints if the key is found and has a valid value.
|
||||
void ExtractOptionalOption(const MediaConstraintsInterface* all_constraints,
|
||||
const std::string& key, cricket::Settable<int>* option) {
|
||||
if (!all_constraints) {
|
||||
return;
|
||||
}
|
||||
std::string string_value;
|
||||
int value;
|
||||
if (all_constraints->GetOptional().FindFirst(key, &string_value)) {
|
||||
if (talk_base::FromString(string_value, &value)) {
|
||||
option->Set(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set |option| to the highest-priority value of |key| in the constraints.
|
||||
// Return false if the key is mandatory, and the value is invalid.
|
||||
bool ExtractOption(const MediaConstraintsInterface* all_constraints,
|
||||
@ -300,6 +320,12 @@ bool ExtractVideoOptions(const MediaConstraintsInterface* all_constraints,
|
||||
all_valid &= ExtractOption(all_constraints,
|
||||
MediaConstraintsInterface::kCpuOveruseDetection,
|
||||
&(options->cpu_overuse_detection));
|
||||
ExtractOptionalOption(all_constraints,
|
||||
MediaConstraintsInterface::kCpuUnderuseThreshold,
|
||||
&(options->cpu_underuse_threshold));
|
||||
ExtractOptionalOption(all_constraints,
|
||||
MediaConstraintsInterface::kCpuOveruseThreshold,
|
||||
&(options->cpu_overuse_threshold));
|
||||
|
||||
return all_valid;
|
||||
}
|
||||
|
@ -372,6 +372,10 @@ TEST_F(VideoSourceTest, SetValidOptionValues) {
|
||||
MediaConstraintsInterface::kLeakyBucket, "true");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuOveruseDetection, "true");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuUnderuseThreshold, 12);
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuOveruseThreshold, 22);
|
||||
|
||||
CreateVideoSource(&constraints);
|
||||
|
||||
@ -385,6 +389,10 @@ TEST_F(VideoSourceTest, SetValidOptionValues) {
|
||||
EXPECT_TRUE(value);
|
||||
EXPECT_TRUE(source_->options()->
|
||||
cpu_overuse_detection.GetWithDefaultIfUnset(false));
|
||||
EXPECT_EQ(12, source_->options()->
|
||||
cpu_underuse_threshold.GetWithDefaultIfUnset(23));
|
||||
EXPECT_EQ(22, source_->options()->
|
||||
cpu_overuse_threshold.GetWithDefaultIfUnset(23));
|
||||
}
|
||||
|
||||
TEST_F(VideoSourceTest, OptionNotSet) {
|
||||
@ -393,6 +401,9 @@ TEST_F(VideoSourceTest, OptionNotSet) {
|
||||
bool value;
|
||||
EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
|
||||
EXPECT_FALSE(source_->options()->cpu_overuse_detection.Get(&value));
|
||||
int int_value;
|
||||
EXPECT_FALSE(source_->options()->cpu_underuse_threshold.Get(&int_value));
|
||||
EXPECT_FALSE(source_->options()->cpu_overuse_threshold.Get(&int_value));
|
||||
}
|
||||
|
||||
TEST_F(VideoSourceTest, MandatoryOptionOverridesOptional) {
|
||||
@ -445,6 +456,10 @@ TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
|
||||
MediaConstraintsInterface::kNoiseReduction, "true");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kLeakyBucket, "not boolean");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuUnderuseThreshold, "12");
|
||||
constraints.AddOptional(
|
||||
MediaConstraintsInterface::kCpuOveruseThreshold, "not int");
|
||||
|
||||
CreateVideoSource(&constraints);
|
||||
|
||||
@ -454,6 +469,10 @@ TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
|
||||
EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
|
||||
EXPECT_TRUE(value);
|
||||
EXPECT_FALSE(source_->options()->video_leaky_bucket.Get(&value));
|
||||
int int_value = 0;
|
||||
EXPECT_TRUE(source_->options()->cpu_underuse_threshold.Get(&int_value));
|
||||
EXPECT_EQ(12, int_value);
|
||||
EXPECT_FALSE(source_->options()->cpu_overuse_threshold.Get(&int_value));
|
||||
}
|
||||
|
||||
TEST_F(VideoSourceTest, InvalidOptionValueMandatory) {
|
||||
|
@ -307,7 +307,6 @@ struct VideoOptions {
|
||||
video_noise_reduction.SetFrom(change.video_noise_reduction);
|
||||
video_one_layer_screencast.SetFrom(change.video_one_layer_screencast);
|
||||
video_high_bitrate.SetFrom(change.video_high_bitrate);
|
||||
video_watermark.SetFrom(change.video_watermark);
|
||||
video_temporal_layer_screencast.SetFrom(
|
||||
change.video_temporal_layer_screencast);
|
||||
video_temporal_layer_realtime.SetFrom(
|
||||
@ -315,6 +314,8 @@ struct VideoOptions {
|
||||
video_leaky_bucket.SetFrom(change.video_leaky_bucket);
|
||||
video_highest_bitrate.SetFrom(change.video_highest_bitrate);
|
||||
cpu_overuse_detection.SetFrom(change.cpu_overuse_detection);
|
||||
cpu_underuse_threshold.SetFrom(change.cpu_underuse_threshold);
|
||||
cpu_overuse_threshold.SetFrom(change.cpu_overuse_threshold);
|
||||
conference_mode.SetFrom(change.conference_mode);
|
||||
process_adaptation_threshhold.SetFrom(change.process_adaptation_threshhold);
|
||||
system_low_adaptation_threshhold.SetFrom(
|
||||
@ -338,12 +339,13 @@ struct VideoOptions {
|
||||
video_noise_reduction == o.video_noise_reduction &&
|
||||
video_one_layer_screencast == o.video_one_layer_screencast &&
|
||||
video_high_bitrate == o.video_high_bitrate &&
|
||||
video_watermark == o.video_watermark &&
|
||||
video_temporal_layer_screencast == o.video_temporal_layer_screencast &&
|
||||
video_temporal_layer_realtime == o.video_temporal_layer_realtime &&
|
||||
video_leaky_bucket == o.video_leaky_bucket &&
|
||||
video_highest_bitrate == o.video_highest_bitrate &&
|
||||
cpu_overuse_detection == o.cpu_overuse_detection &&
|
||||
cpu_underuse_threshold == o.cpu_underuse_threshold &&
|
||||
cpu_overuse_threshold == o.cpu_overuse_threshold &&
|
||||
conference_mode == o.conference_mode &&
|
||||
process_adaptation_threshhold == o.process_adaptation_threshhold &&
|
||||
system_low_adaptation_threshhold ==
|
||||
@ -369,7 +371,6 @@ struct VideoOptions {
|
||||
ost << ToStringIfSet("noise reduction", video_noise_reduction);
|
||||
ost << ToStringIfSet("1 layer screencast", video_one_layer_screencast);
|
||||
ost << ToStringIfSet("high bitrate", video_high_bitrate);
|
||||
ost << ToStringIfSet("watermark", video_watermark);
|
||||
ost << ToStringIfSet("video temporal layer screencast",
|
||||
video_temporal_layer_screencast);
|
||||
ost << ToStringIfSet("video temporal layer realtime",
|
||||
@ -377,6 +378,8 @@ struct VideoOptions {
|
||||
ost << ToStringIfSet("leaky bucket", video_leaky_bucket);
|
||||
ost << ToStringIfSet("highest video bitrate", video_highest_bitrate);
|
||||
ost << ToStringIfSet("cpu overuse detection", cpu_overuse_detection);
|
||||
ost << ToStringIfSet("cpu underuse threshold", cpu_underuse_threshold);
|
||||
ost << ToStringIfSet("cpu overuse threshold", cpu_overuse_threshold);
|
||||
ost << ToStringIfSet("conference mode", conference_mode);
|
||||
ost << ToStringIfSet("process", process_adaptation_threshhold);
|
||||
ost << ToStringIfSet("low", system_low_adaptation_threshhold);
|
||||
@ -409,8 +412,6 @@ struct VideoOptions {
|
||||
Settable<bool> video_one_layer_screencast;
|
||||
// Experimental: Enable WebRtc higher bitrate?
|
||||
Settable<bool> video_high_bitrate;
|
||||
// Experimental: Add watermark to the rendered video image.
|
||||
Settable<bool> video_watermark;
|
||||
// Experimental: Enable WebRTC layered screencast.
|
||||
Settable<bool> video_temporal_layer_screencast;
|
||||
// Experimental: Enable WebRTC temporal layer strategy for realtime video.
|
||||
@ -423,6 +424,10 @@ struct VideoOptions {
|
||||
// adaptation algorithm. So this option will override the
|
||||
// |adapt_input_to_cpu_usage|.
|
||||
Settable<bool> cpu_overuse_detection;
|
||||
// Low threshold for cpu overuse adaptation in ms. (Adapt up)
|
||||
Settable<int> cpu_underuse_threshold;
|
||||
// High threshold for cpu overuse adaptation in ms. (Adapt down)
|
||||
Settable<int> cpu_overuse_threshold;
|
||||
// Use conference mode?
|
||||
Settable<bool> conference_mode;
|
||||
// Threshhold for process cpu adaptation. (Process limit)
|
||||
|
@ -173,17 +173,12 @@ struct FlushBlackFrameData : public talk_base::MessageData {
|
||||
class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
|
||||
public:
|
||||
explicit WebRtcRenderAdapter(VideoRenderer* renderer)
|
||||
: renderer_(renderer), width_(0), height_(0), watermark_enabled_(false) {
|
||||
: renderer_(renderer), width_(0), height_(0) {
|
||||
}
|
||||
|
||||
virtual ~WebRtcRenderAdapter() {
|
||||
}
|
||||
|
||||
void set_watermark_enabled(bool enable) {
|
||||
talk_base::CritScope cs(&crit_);
|
||||
watermark_enabled_ = enable;
|
||||
}
|
||||
|
||||
void SetRenderer(VideoRenderer* renderer) {
|
||||
talk_base::CritScope cs(&crit_);
|
||||
renderer_ = renderer;
|
||||
@ -250,7 +245,6 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
|
||||
video_frame.Alias(buffer, buffer_size, width_, height_,
|
||||
1, 1, elapsed_time, time_stamp, 0);
|
||||
|
||||
|
||||
// Sanity check on decoded frame size.
|
||||
if (buffer_size != static_cast<int>(VideoFrame::SizeOf(width_, height_))) {
|
||||
LOG(LS_WARNING) << "WebRtcRenderAdapter received a strange frame size: "
|
||||
@ -294,7 +288,6 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
|
||||
unsigned int width_;
|
||||
unsigned int height_;
|
||||
talk_base::RateTracker frame_rate_tracker_;
|
||||
bool watermark_enabled_;
|
||||
};
|
||||
|
||||
class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
|
||||
|
@ -36,12 +36,6 @@
|
||||
|
||||
namespace cricket {
|
||||
|
||||
static const int kWatermarkWidth = 8;
|
||||
static const int kWatermarkHeight = 8;
|
||||
static const int kWatermarkOffsetFromLeft = 8;
|
||||
static const int kWatermarkOffsetFromBottom = 8;
|
||||
static const unsigned char kWatermarkMaxYValue = 64;
|
||||
|
||||
// Class that wraps ownerhip semantics of a buffer passed to it.
|
||||
// * Buffers passed using Attach() become owned by this FrameBuffer and will be
|
||||
// destroyed on FrameBuffer destruction.
|
||||
@ -296,29 +290,6 @@ void WebRtcVideoFrame::Attach(
|
||||
rotation_ = rotation;
|
||||
}
|
||||
|
||||
// Add a square watermark near the left-low corner. clamp Y.
|
||||
// Returns false on error.
|
||||
bool WebRtcVideoFrame::AddWatermark() {
|
||||
size_t w = GetWidth();
|
||||
size_t h = GetHeight();
|
||||
|
||||
if (w < kWatermarkWidth + kWatermarkOffsetFromLeft ||
|
||||
h < kWatermarkHeight + kWatermarkOffsetFromBottom) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint8* buffer = GetYPlane();
|
||||
for (size_t x = kWatermarkOffsetFromLeft;
|
||||
x < kWatermarkOffsetFromLeft + kWatermarkWidth; ++x) {
|
||||
for (size_t y = h - kWatermarkOffsetFromBottom - kWatermarkHeight;
|
||||
y < h - kWatermarkOffsetFromBottom; ++y) {
|
||||
buffer[y * w + x] =
|
||||
talk_base::_min(buffer[y * w + x], kWatermarkMaxYValue);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
webrtc::VideoFrame* WebRtcVideoFrame::frame() {
|
||||
return video_buffer_->frame();
|
||||
}
|
||||
|
@ -67,7 +67,6 @@ class WebRtcVideoFrame : public VideoFrame {
|
||||
size_t pixel_width, size_t pixel_height, int64 elapsed_time,
|
||||
int64 time_stamp, int rotation);
|
||||
|
||||
bool AddWatermark();
|
||||
webrtc::VideoFrame* frame();
|
||||
const webrtc::VideoFrame* frame() const;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user