Update talk to 60420316.

TBR=wu@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/7719005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5447 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
mallinath@webrtc.org
2014-01-28 06:45:52 +00:00
parent 69ff90e832
commit 752a017809
13 changed files with 109 additions and 88 deletions

View File

@@ -36,9 +36,9 @@ namespace cricket {
// TODO(fbarchard): Make downgrades settable
static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU.
// The number of milliseconds of data to require before acting on cpu sampling
// information.
static const size_t kCpuLoadMinSampleTime = 5000;
// The number of cpu samples to require before adapting. This value depends on
// the cpu monitor sampling frequency being 2000ms.
static const int kCpuLoadMinSamples = 3;
// The amount of weight to give to each new cpu load sample. The lower the
// value, the slower we'll adapt to changing cpu conditions.
static const float kCpuLoadWeightCoefficient = 0.4f;
@@ -165,8 +165,8 @@ VideoAdapter::VideoAdapter()
frames_(0),
adapted_frames_(0),
adaption_changes_(0),
previous_width(0),
previous_height(0),
previous_width_(0),
previous_height_(0),
black_output_(false),
is_black_(false),
interval_next_frame_(0) {
@@ -240,7 +240,7 @@ int VideoAdapter::GetOutputNumPixels() const {
// TODO(fbarchard): Add AdaptFrameRate function that only drops frames but
// not resolution.
bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
const VideoFrame** out_frame) {
VideoFrame** out_frame) {
talk_base::CritScope cs(&critical_section_);
if (!in_frame || !out_frame) {
return false;
@@ -306,8 +306,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
// resolution changes as well. Consider dropping the statistics into their
// own class which could be queried publically.
bool changed = false;
if (previous_width && (previous_width != (*out_frame)->GetWidth() ||
previous_height != (*out_frame)->GetHeight())) {
if (previous_width_ && (previous_width_ != (*out_frame)->GetWidth() ||
previous_height_ != (*out_frame)->GetHeight())) {
show = true;
++adaption_changes_;
changed = true;
@@ -325,8 +325,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
<< "x" << (*out_frame)->GetHeight()
<< " Changed: " << (changed ? "true" : "false");
}
previous_width = (*out_frame)->GetWidth();
previous_height = (*out_frame)->GetHeight();
previous_width_ = (*out_frame)->GetWidth();
previous_height_ = (*out_frame)->GetHeight();
return true;
}
@@ -382,7 +382,8 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter()
view_adaptation_(true),
view_switch_(false),
cpu_downgrade_count_(0),
cpu_adapt_wait_time_(0),
cpu_load_min_samples_(kCpuLoadMinSamples),
cpu_load_num_samples_(0),
high_system_threshold_(kHighSystemCpuThreshold),
low_system_threshold_(kLowSystemCpuThreshold),
process_threshold_(kProcessCpuThreshold),
@@ -552,22 +553,18 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated(
// we'll still calculate this information, in case smoothing is later enabled.
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
++cpu_load_num_samples_;
if (cpu_smoothing_) {
system_load = system_load_average_;
}
// If we haven't started taking samples yet, wait until we have at least
// the correct number of samples per the wait time.
if (cpu_adapt_wait_time_ == 0) {
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
}
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
process_load, system_load);
// Make sure we're not adapting too quickly.
if (request != KEEP) {
if (talk_base::TimeIsLater(talk_base::Time(),
cpu_adapt_wait_time_)) {
if (cpu_load_num_samples_ < cpu_load_min_samples_) {
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
<< talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
<< (cpu_load_min_samples_ - cpu_load_num_samples_)
<< " more samples";
request = KEEP;
}
}
@@ -688,7 +685,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
if (changed) {
// When any adaptation occurs, historic CPU load levels are no longer
// accurate. Clear out our state so we can re-learn at the new normal.
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
cpu_load_num_samples_ = 0;
system_load_average_ = kCpuLoadInitialAverage;
}

View File

@@ -62,7 +62,7 @@ class VideoAdapter {
// successfully. Return false otherwise.
// output_frame_ is owned by the VideoAdapter that has the best knowledge on
// the output frame.
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame);
bool AdaptFrame(const VideoFrame* in_frame, VideoFrame** out_frame);
void set_scale_third(bool enable) {
LOG(LS_INFO) << "Video Adapter third scaling is now "
@@ -90,8 +90,8 @@ class VideoAdapter {
int frames_; // Number of input frames.
int adapted_frames_; // Number of frames scaled.
int adaption_changes_; // Number of changes in scale factor.
size_t previous_width; // Previous adapter output width.
size_t previous_height; // Previous adapter output height.
size_t previous_width_; // Previous adapter output width.
size_t previous_height_; // Previous adapter output height.
bool black_output_; // Flag to tell if we need to black output_frame_.
bool is_black_; // Flag to tell if output_frame_ is currently black.
int64 interval_next_frame_;
@@ -149,14 +149,15 @@ class CoordinatedVideoAdapter
// When the video is decreased, set the waiting time for CPU adaptation to
// decrease video again.
void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) {
if (cpu_adapt_wait_time_ != static_cast<int>(cpu_adapt_wait_time)) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: "
<< cpu_adapt_wait_time_ << " to "
<< cpu_adapt_wait_time;
cpu_adapt_wait_time_ = static_cast<int>(cpu_adapt_wait_time);
void set_cpu_load_min_samples(int cpu_load_min_samples) {
if (cpu_load_min_samples_ != cpu_load_min_samples) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Min Samples from: "
<< cpu_load_min_samples_ << " to "
<< cpu_load_min_samples;
cpu_load_min_samples_ = cpu_load_min_samples;
}
}
int cpu_load_min_samples() const { return cpu_load_min_samples_; }
// CPU system load high threshold for reducing resolution. e.g. 0.85f
void set_high_system_threshold(float high_system_threshold) {
ASSERT(high_system_threshold <= 1.0f);
@@ -220,7 +221,8 @@ class CoordinatedVideoAdapter
bool view_adaptation_; // True if view adaptation is enabled.
bool view_switch_; // True if view switch is enabled.
int cpu_downgrade_count_;
int cpu_adapt_wait_time_;
int cpu_load_min_samples_;
int cpu_load_num_samples_;
// cpu system load thresholds relative to max cpus.
float high_system_threshold_;
float low_system_threshold_;

View File

@@ -475,14 +475,25 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
<< desired_width << " x " << desired_height;
return;
}
if (!muted_ && !ApplyProcessors(&i420_frame)) {
VideoFrame* adapted_frame = &i420_frame;
if (!SignalAdaptFrame.is_empty() && !IsScreencast()) {
VideoFrame* out_frame = NULL;
SignalAdaptFrame(this, adapted_frame, &out_frame);
if (!out_frame) {
return; // VideoAdapter dropped the frame.
}
adapted_frame = out_frame;
}
if (!muted_ && !ApplyProcessors(adapted_frame)) {
// Processor dropped the frame.
return;
}
if (muted_) {
i420_frame.SetToBlack();
adapted_frame->SetToBlack();
}
SignalVideoFrame(this, &i420_frame);
SignalVideoFrame(this, adapted_frame);
#endif // VIDEO_FRAME_NAME
}

View File

@@ -255,7 +255,14 @@ class VideoCapturer
// Signal the captured frame to downstream.
sigslot::signal2<VideoCapturer*, const CapturedFrame*,
sigslot::multi_threaded_local> SignalFrameCaptured;
// Signal the captured frame converted to I420 to downstream.
// A VideoAdapter should be hooked up to SignalAdaptFrame which will be
// called before forwarding the frame to SignalVideoFrame. The parameters
// are this capturer instance, the input video frame and output frame
// pointer, respectively.
sigslot::signal3<VideoCapturer*, const VideoFrame*, VideoFrame**,
sigslot::multi_threaded_local> SignalAdaptFrame;
// Signal the captured and possibly adapted frame to downstream consumers
// such as the encoder.
sigslot::signal2<VideoCapturer*, const VideoFrame*,
sigslot::multi_threaded_local> SignalVideoFrame;

View File

@@ -583,13 +583,12 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
external_capture_(external_capture),
capturer_updated_(false),
interval_(0),
video_adapter_(new CoordinatedVideoAdapter),
cpu_monitor_(cpu_monitor) {
overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get()));
SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable);
overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_));
SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable);
if (cpu_monitor) {
cpu_monitor->SignalUpdate.connect(
video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated);
&video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
}
}
@@ -599,7 +598,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
bool sending() const { return sending_; }
void set_muted(bool on) {
// TODO(asapersson): add support.
// video_adapter_->SetBlackOutput(on);
// video_adapter_.SetBlackOutput(on);
muted_ = on;
}
bool muted() {return muted_; }
@@ -614,7 +613,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
if (video_format_ != cricket::VideoFormat()) {
interval_ = video_format_.interval;
}
video_adapter_->OnOutputFormatRequest(video_format_);
video_adapter_.OnOutputFormatRequest(video_format_);
}
void set_interval(int64 interval) {
if (video_format() == cricket::VideoFormat()) {
@@ -627,17 +626,13 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
VideoFormat format(codec.width, codec.height,
VideoFormat::FpsToInterval(codec.maxFramerate),
FOURCC_I420);
if (video_adapter_->output_format().IsSize0x0()) {
video_adapter_->SetOutputFormat(format);
if (video_adapter_.output_format().IsSize0x0()) {
video_adapter_.SetOutputFormat(format);
}
}
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) {
*out_frame = NULL;
return video_adapter_->AdaptFrame(in_frame, out_frame);
}
int CurrentAdaptReason() const {
return video_adapter_->adapt_reason();
return video_adapter_.adapt_reason();
}
webrtc::CpuOveruseObserver* overuse_observer() {
return overuse_observer_.get();
@@ -674,40 +669,51 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
// be zero, and all frames may be dropped.
// Consider fixing this by having video_adapter keep a pointer to the
// video capturer.
video_adapter_->SetInputFormat(*capture_format);
video_adapter_.SetInputFormat(*capture_format);
}
// TODO(thorcarpenter): When the adapter supports "only frame dropping"
// mode, also hook it up to screencast capturers.
video_capturer->SignalAdaptFrame.connect(
this, &WebRtcVideoChannelSendInfo::AdaptFrame);
}
}
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input,
VideoFrame** adapted) {
video_adapter_.AdaptFrame(input, adapted);
}
void ApplyCpuOptions(const VideoOptions& options) {
bool cpu_adapt, cpu_smoothing, adapt_third;
float low, med, high;
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter_->set_cpu_adaptation(cpu_adapt);
video_adapter_.set_cpu_adaptation(cpu_adapt);
}
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter_->set_cpu_smoothing(cpu_smoothing);
video_adapter_.set_cpu_smoothing(cpu_smoothing);
}
if (options.process_adaptation_threshhold.Get(&med)) {
video_adapter_->set_process_threshold(med);
video_adapter_.set_process_threshold(med);
}
if (options.system_low_adaptation_threshhold.Get(&low)) {
video_adapter_->set_low_system_threshold(low);
video_adapter_.set_low_system_threshold(low);
}
if (options.system_high_adaptation_threshhold.Get(&high)) {
video_adapter_->set_high_system_threshold(high);
video_adapter_.set_high_system_threshold(high);
}
if (options.video_adapt_third.Get(&adapt_third)) {
video_adapter_->set_scale_third(adapt_third);
video_adapter_.set_scale_third(adapt_third);
}
}
void SetCpuOveruseDetection(bool enable) {
if (cpu_monitor_ && enable) {
cpu_monitor_->SignalUpdate.disconnect(video_adapter_.get());
cpu_monitor_->SignalUpdate.disconnect(&video_adapter_);
}
overuse_observer_->Enable(enable);
video_adapter_->set_cpu_adaptation(enable);
video_adapter_.set_cpu_adaptation(enable);
}
void ProcessFrame(const VideoFrame& original_frame, bool mute,
@@ -761,7 +767,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
int64 interval_;
talk_base::scoped_ptr<CoordinatedVideoAdapter> video_adapter_;
CoordinatedVideoAdapter video_adapter_;
talk_base::CpuMonitor* cpu_monitor_;
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
};
@@ -2854,7 +2860,16 @@ bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc,
return true;
}
// TODO(zhurunz): Add unittests to test this function.
bool WebRtcVideoMediaChannel::GetVideoAdapter(
uint32 ssrc, CoordinatedVideoAdapter** video_adapter) {
SendChannelMap::iterator it = send_channels_.find(ssrc);
if (it == send_channels_.end()) {
return false;
}
*video_adapter = it->second->video_adapter();
return true;
}
void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer,
const VideoFrame* frame) {
// If the |capturer| is registered to any send channel, then send the frame

View File

@@ -60,12 +60,13 @@ class CpuMonitor;
namespace cricket {
class CoordinatedVideoAdapter;
class ViETraceWrapper;
class ViEWrapper;
class VideoCapturer;
class VideoFrame;
class VideoProcessor;
class VideoRenderer;
class ViETraceWrapper;
class ViEWrapper;
class VoiceMediaChannel;
class WebRtcDecoderObserver;
class WebRtcEncoderObserver;
@@ -227,10 +228,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
int local_renderer_h_;
VideoRenderer* local_renderer_;
// Critical section to protect the media processor register/unregister
// while processing a frame
talk_base::CriticalSection signal_media_critical_;
talk_base::scoped_ptr<talk_base::CpuMonitor> cpu_monitor_;
};
@@ -289,12 +286,11 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
// Public functions for use by tests and other specialized code.
uint32 send_ssrc() const { return 0; }
bool GetRenderer(uint32 ssrc, VideoRenderer** renderer);
bool GetVideoAdapter(uint32 ssrc, CoordinatedVideoAdapter** video_adapter);
void SendFrame(VideoCapturer* capturer, const VideoFrame* frame);
bool SendFrame(WebRtcVideoChannelSendInfo* channel_info,
const VideoFrame* frame, bool is_screencast);
void AdaptAndSendFrame(VideoCapturer* capturer, const VideoFrame* frame);
// Thunk functions for use with HybridVideoEngine
void OnLocalFrame(VideoCapturer* capturer, const VideoFrame* frame) {
SendFrame(0u, frame, capturer->IsScreencast());

View File

@@ -36,6 +36,7 @@
#include "talk/media/base/fakevideorenderer.h"
#include "talk/media/base/mediachannel.h"
#include "talk/media/base/testutils.h"
#include "talk/media/base/videoadapter.h"
#include "talk/media/base/videoengine_unittest.h"
#include "talk/media/webrtc/fakewebrtcvideocapturemodule.h"
#include "talk/media/webrtc/fakewebrtcvideoengine.h"