From 752a01780914ab1da18aeb606c74f6d3b25ce3ec Mon Sep 17 00:00:00 2001 From: "mallinath@webrtc.org" Date: Tue, 28 Jan 2014 06:45:52 +0000 Subject: [PATCH] Update talk to 60420316. TBR=wu@webrtc.org Review URL: https://webrtc-codereview.appspot.com/7719005 git-svn-id: http://webrtc.googlecode.com/svn/trunk@5447 4adac7df-926f-26a2-2b94-8c16560cd09d --- .../peerconnectioninterface_unittest.cc | 1 - talk/base/asyncsocket.h | 2 - talk/base/fileutils.cc | 2 + talk/base/fileutils.h | 6 +- talk/base/socket.h | 23 +++---- talk/libjingle.gyp | 2 - talk/media/base/videoadapter.cc | 37 +++++------ talk/media/base/videoadapter.h | 22 ++++--- talk/media/base/videocapturer.cc | 17 ++++- talk/media/base/videocapturer.h | 9 ++- talk/media/webrtc/webrtcvideoengine.cc | 63 ++++++++++++------- talk/media/webrtc/webrtcvideoengine.h | 12 ++-- .../webrtc/webrtcvideoengine_unittest.cc | 1 + 13 files changed, 109 insertions(+), 88 deletions(-) diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc index e47e5b17f..4f3f18520 100644 --- a/talk/app/webrtc/peerconnectioninterface_unittest.cc +++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc @@ -179,7 +179,6 @@ class MockPeerConnectionObserver : public PeerConnectionObserver { EXPECT_EQ(pc_->ice_gathering_state(), new_state); } virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { - EXPECT_NE(PeerConnectionInterface::kIceGatheringNew, pc_->ice_gathering_state()); diff --git a/talk/base/asyncsocket.h b/talk/base/asyncsocket.h index 2854558ad..97859a752 100644 --- a/talk/base/asyncsocket.h +++ b/talk/base/asyncsocket.h @@ -27,7 +27,6 @@ #ifndef TALK_BASE_ASYNCSOCKET_H_ #define TALK_BASE_ASYNCSOCKET_H_ -#ifndef __native_client__ #include "talk/base/common.h" #include "talk/base/sigslot.h" @@ -139,5 +138,4 @@ class AsyncSocketAdapter : public AsyncSocket, public sigslot::has_slots<> { } // namespace talk_base -#endif // __native_client__ #endif // TALK_BASE_ASYNCSOCKET_H_ diff --git a/talk/base/fileutils.cc b/talk/base/fileutils.cc index d73997afe..bbe1c36e9 100644 --- a/talk/base/fileutils.cc +++ b/talk/base/fileutils.cc @@ -297,6 +297,7 @@ bool CreateUniqueFile(Pathname& path, bool create_empty) { return true; } +#ifdef HAS_PLATFORM_FILE // Taken from Chromium's base/platform_file_*.cc. // TODO(grunell): Remove when Chromium has started to use AEC in each source. // http://crbug.com/264611. @@ -320,5 +321,6 @@ bool ClosePlatformFile(PlatformFile file) { return close(file); #endif } +#endif // HAS_PLATFORM_FILE } // namespace talk_base diff --git a/talk/base/fileutils.h b/talk/base/fileutils.h index fba0d000b..3d68af2cb 100644 --- a/talk/base/fileutils.h +++ b/talk/base/fileutils.h @@ -458,14 +458,18 @@ bool CreateUniqueFile(Pathname& path, bool create_empty); // TODO(grunell): Remove when Chromium has started to use AEC in each source. // http://crbug.com/264611. #if defined(WIN32) +#define HAS_PLATFORM_FILE 1 typedef HANDLE PlatformFile; const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE; -#elif defined(POSIX) +#elif defined(POSIX) && !defined(__native_client__) +#define HAS_PLATFORM_FILE 1 typedef int PlatformFile; const PlatformFile kInvalidPlatformFileValue = -1; #endif +#ifdef HAS_PLATFORM_FILE FILE* FdopenPlatformFileForWriting(PlatformFile file); bool ClosePlatformFile(PlatformFile file); +#endif } // namespace talk_base diff --git a/talk/base/socket.h b/talk/base/socket.h index 56e3ebcee..47f55225d 100644 --- a/talk/base/socket.h +++ b/talk/base/socket.h @@ -2,40 +2,32 @@ * libjingle * Copyright 2004--2005, Google Inc. * - * Redistribution and use in source and binary forms, with or without + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - * 1. Redistributions of source code must retain the above copyright notice, + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products + * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TALK_BASE_SOCKET_H__ #define TALK_BASE_SOCKET_H__ -#if defined(__native_client__) -namespace talk_base { -// These should never be defined or instantiated. -class Socket; -class AsyncSocket; -} // namespace talk_base -#else - #include #ifdef POSIX @@ -207,5 +199,4 @@ class Socket { } // namespace talk_base -#endif // !__native_client__ #endif // TALK_BASE_SOCKET_H__ diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp index 38a165be8..e77e48a3e 100755 --- a/talk/libjingle.gyp +++ b/talk/libjingle.gyp @@ -509,8 +509,6 @@ 'xmpp/pubsub_task.h', 'xmpp/pubsubclient.cc', 'xmpp/pubsubclient.h', - 'xmpp/pubsubstateclient.cc', - 'xmpp/pubsubstateclient.h', 'xmpp/pubsubtasks.cc', 'xmpp/pubsubtasks.h', 'xmpp/receivetask.cc', diff --git a/talk/media/base/videoadapter.cc b/talk/media/base/videoadapter.cc index 588f1950e..3cd6cac96 100644 --- a/talk/media/base/videoadapter.cc +++ b/talk/media/base/videoadapter.cc @@ -36,9 +36,9 @@ namespace cricket { // TODO(fbarchard): Make downgrades settable static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU. -// The number of milliseconds of data to require before acting on cpu sampling -// information. -static const size_t kCpuLoadMinSampleTime = 5000; +// The number of cpu samples to require before adapting. This value depends on +// the cpu monitor sampling frequency being 2000ms. +static const int kCpuLoadMinSamples = 3; // The amount of weight to give to each new cpu load sample. The lower the // value, the slower we'll adapt to changing cpu conditions. static const float kCpuLoadWeightCoefficient = 0.4f; @@ -165,8 +165,8 @@ VideoAdapter::VideoAdapter() frames_(0), adapted_frames_(0), adaption_changes_(0), - previous_width(0), - previous_height(0), + previous_width_(0), + previous_height_(0), black_output_(false), is_black_(false), interval_next_frame_(0) { @@ -240,7 +240,7 @@ int VideoAdapter::GetOutputNumPixels() const { // TODO(fbarchard): Add AdaptFrameRate function that only drops frames but // not resolution. bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, - const VideoFrame** out_frame) { + VideoFrame** out_frame) { talk_base::CritScope cs(&critical_section_); if (!in_frame || !out_frame) { return false; @@ -306,8 +306,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, // resolution changes as well. Consider dropping the statistics into their // own class which could be queried publically. bool changed = false; - if (previous_width && (previous_width != (*out_frame)->GetWidth() || - previous_height != (*out_frame)->GetHeight())) { + if (previous_width_ && (previous_width_ != (*out_frame)->GetWidth() || + previous_height_ != (*out_frame)->GetHeight())) { show = true; ++adaption_changes_; changed = true; @@ -325,8 +325,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, << "x" << (*out_frame)->GetHeight() << " Changed: " << (changed ? "true" : "false"); } - previous_width = (*out_frame)->GetWidth(); - previous_height = (*out_frame)->GetHeight(); + previous_width_ = (*out_frame)->GetWidth(); + previous_height_ = (*out_frame)->GetHeight(); return true; } @@ -382,7 +382,8 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter() view_adaptation_(true), view_switch_(false), cpu_downgrade_count_(0), - cpu_adapt_wait_time_(0), + cpu_load_min_samples_(kCpuLoadMinSamples), + cpu_load_num_samples_(0), high_system_threshold_(kHighSystemCpuThreshold), low_system_threshold_(kLowSystemCpuThreshold), process_threshold_(kProcessCpuThreshold), @@ -552,22 +553,18 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated( // we'll still calculate this information, in case smoothing is later enabled. system_load_average_ = kCpuLoadWeightCoefficient * system_load + (1.0f - kCpuLoadWeightCoefficient) * system_load_average_; + ++cpu_load_num_samples_; if (cpu_smoothing_) { system_load = system_load_average_; } - // If we haven't started taking samples yet, wait until we have at least - // the correct number of samples per the wait time. - if (cpu_adapt_wait_time_ == 0) { - cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); - } AdaptRequest request = FindCpuRequest(current_cpus, max_cpus, process_load, system_load); // Make sure we're not adapting too quickly. if (request != KEEP) { - if (talk_base::TimeIsLater(talk_base::Time(), - cpu_adapt_wait_time_)) { + if (cpu_load_num_samples_ < cpu_load_min_samples_) { LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until " - << talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms"; + << (cpu_load_min_samples_ - cpu_load_num_samples_) + << " more samples"; request = KEEP; } } @@ -688,7 +685,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width, if (changed) { // When any adaptation occurs, historic CPU load levels are no longer // accurate. Clear out our state so we can re-learn at the new normal. - cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); + cpu_load_num_samples_ = 0; system_load_average_ = kCpuLoadInitialAverage; } diff --git a/talk/media/base/videoadapter.h b/talk/media/base/videoadapter.h index 38a8c9d63..272df72de 100644 --- a/talk/media/base/videoadapter.h +++ b/talk/media/base/videoadapter.h @@ -62,7 +62,7 @@ class VideoAdapter { // successfully. Return false otherwise. // output_frame_ is owned by the VideoAdapter that has the best knowledge on // the output frame. - bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame); + bool AdaptFrame(const VideoFrame* in_frame, VideoFrame** out_frame); void set_scale_third(bool enable) { LOG(LS_INFO) << "Video Adapter third scaling is now " @@ -90,8 +90,8 @@ class VideoAdapter { int frames_; // Number of input frames. int adapted_frames_; // Number of frames scaled. int adaption_changes_; // Number of changes in scale factor. - size_t previous_width; // Previous adapter output width. - size_t previous_height; // Previous adapter output height. + size_t previous_width_; // Previous adapter output width. + size_t previous_height_; // Previous adapter output height. bool black_output_; // Flag to tell if we need to black output_frame_. bool is_black_; // Flag to tell if output_frame_ is currently black. int64 interval_next_frame_; @@ -149,14 +149,15 @@ class CoordinatedVideoAdapter // When the video is decreased, set the waiting time for CPU adaptation to // decrease video again. - void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) { - if (cpu_adapt_wait_time_ != static_cast(cpu_adapt_wait_time)) { - LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: " - << cpu_adapt_wait_time_ << " to " - << cpu_adapt_wait_time; - cpu_adapt_wait_time_ = static_cast(cpu_adapt_wait_time); + void set_cpu_load_min_samples(int cpu_load_min_samples) { + if (cpu_load_min_samples_ != cpu_load_min_samples) { + LOG(LS_INFO) << "VAdapt Change Cpu Adapt Min Samples from: " + << cpu_load_min_samples_ << " to " + << cpu_load_min_samples; + cpu_load_min_samples_ = cpu_load_min_samples; } } + int cpu_load_min_samples() const { return cpu_load_min_samples_; } // CPU system load high threshold for reducing resolution. e.g. 0.85f void set_high_system_threshold(float high_system_threshold) { ASSERT(high_system_threshold <= 1.0f); @@ -220,7 +221,8 @@ class CoordinatedVideoAdapter bool view_adaptation_; // True if view adaptation is enabled. bool view_switch_; // True if view switch is enabled. int cpu_downgrade_count_; - int cpu_adapt_wait_time_; + int cpu_load_min_samples_; + int cpu_load_num_samples_; // cpu system load thresholds relative to max cpus. float high_system_threshold_; float low_system_threshold_; diff --git a/talk/media/base/videocapturer.cc b/talk/media/base/videocapturer.cc index 355cc64dd..b2f41dcfb 100644 --- a/talk/media/base/videocapturer.cc +++ b/talk/media/base/videocapturer.cc @@ -475,14 +475,25 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*, << desired_width << " x " << desired_height; return; } - if (!muted_ && !ApplyProcessors(&i420_frame)) { + + VideoFrame* adapted_frame = &i420_frame; + if (!SignalAdaptFrame.is_empty() && !IsScreencast()) { + VideoFrame* out_frame = NULL; + SignalAdaptFrame(this, adapted_frame, &out_frame); + if (!out_frame) { + return; // VideoAdapter dropped the frame. + } + adapted_frame = out_frame; + } + + if (!muted_ && !ApplyProcessors(adapted_frame)) { // Processor dropped the frame. return; } if (muted_) { - i420_frame.SetToBlack(); + adapted_frame->SetToBlack(); } - SignalVideoFrame(this, &i420_frame); + SignalVideoFrame(this, adapted_frame); #endif // VIDEO_FRAME_NAME } diff --git a/talk/media/base/videocapturer.h b/talk/media/base/videocapturer.h index 933fc8250..15c016fd1 100644 --- a/talk/media/base/videocapturer.h +++ b/talk/media/base/videocapturer.h @@ -255,7 +255,14 @@ class VideoCapturer // Signal the captured frame to downstream. sigslot::signal2 SignalFrameCaptured; - // Signal the captured frame converted to I420 to downstream. + // A VideoAdapter should be hooked up to SignalAdaptFrame which will be + // called before forwarding the frame to SignalVideoFrame. The parameters + // are this capturer instance, the input video frame and output frame + // pointer, respectively. + sigslot::signal3 SignalAdaptFrame; + // Signal the captured and possibly adapted frame to downstream consumers + // such as the encoder. sigslot::signal2 SignalVideoFrame; diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc index ca0ed414c..8c5cda9eb 100644 --- a/talk/media/webrtc/webrtcvideoengine.cc +++ b/talk/media/webrtc/webrtcvideoengine.cc @@ -583,13 +583,12 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { external_capture_(external_capture), capturer_updated_(false), interval_(0), - video_adapter_(new CoordinatedVideoAdapter), cpu_monitor_(cpu_monitor) { - overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get())); - SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable); + overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_)); + SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable); if (cpu_monitor) { cpu_monitor->SignalUpdate.connect( - video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated); + &video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated); } } @@ -599,7 +598,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { bool sending() const { return sending_; } void set_muted(bool on) { // TODO(asapersson): add support. - // video_adapter_->SetBlackOutput(on); + // video_adapter_.SetBlackOutput(on); muted_ = on; } bool muted() {return muted_; } @@ -614,7 +613,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { if (video_format_ != cricket::VideoFormat()) { interval_ = video_format_.interval; } - video_adapter_->OnOutputFormatRequest(video_format_); + video_adapter_.OnOutputFormatRequest(video_format_); } void set_interval(int64 interval) { if (video_format() == cricket::VideoFormat()) { @@ -627,17 +626,13 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { VideoFormat format(codec.width, codec.height, VideoFormat::FpsToInterval(codec.maxFramerate), FOURCC_I420); - if (video_adapter_->output_format().IsSize0x0()) { - video_adapter_->SetOutputFormat(format); + if (video_adapter_.output_format().IsSize0x0()) { + video_adapter_.SetOutputFormat(format); } } - bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) { - *out_frame = NULL; - return video_adapter_->AdaptFrame(in_frame, out_frame); - } int CurrentAdaptReason() const { - return video_adapter_->adapt_reason(); + return video_adapter_.adapt_reason(); } webrtc::CpuOveruseObserver* overuse_observer() { return overuse_observer_.get(); @@ -674,40 +669,51 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { // be zero, and all frames may be dropped. // Consider fixing this by having video_adapter keep a pointer to the // video capturer. - video_adapter_->SetInputFormat(*capture_format); + video_adapter_.SetInputFormat(*capture_format); } + // TODO(thorcarpenter): When the adapter supports "only frame dropping" + // mode, also hook it up to screencast capturers. + video_capturer->SignalAdaptFrame.connect( + this, &WebRtcVideoChannelSendInfo::AdaptFrame); } } + CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; } + + void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input, + VideoFrame** adapted) { + video_adapter_.AdaptFrame(input, adapted); + } + void ApplyCpuOptions(const VideoOptions& options) { bool cpu_adapt, cpu_smoothing, adapt_third; float low, med, high; if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) { - video_adapter_->set_cpu_adaptation(cpu_adapt); + video_adapter_.set_cpu_adaptation(cpu_adapt); } if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) { - video_adapter_->set_cpu_smoothing(cpu_smoothing); + video_adapter_.set_cpu_smoothing(cpu_smoothing); } if (options.process_adaptation_threshhold.Get(&med)) { - video_adapter_->set_process_threshold(med); + video_adapter_.set_process_threshold(med); } if (options.system_low_adaptation_threshhold.Get(&low)) { - video_adapter_->set_low_system_threshold(low); + video_adapter_.set_low_system_threshold(low); } if (options.system_high_adaptation_threshhold.Get(&high)) { - video_adapter_->set_high_system_threshold(high); + video_adapter_.set_high_system_threshold(high); } if (options.video_adapt_third.Get(&adapt_third)) { - video_adapter_->set_scale_third(adapt_third); + video_adapter_.set_scale_third(adapt_third); } } void SetCpuOveruseDetection(bool enable) { if (cpu_monitor_ && enable) { - cpu_monitor_->SignalUpdate.disconnect(video_adapter_.get()); + cpu_monitor_->SignalUpdate.disconnect(&video_adapter_); } overuse_observer_->Enable(enable); - video_adapter_->set_cpu_adaptation(enable); + video_adapter_.set_cpu_adaptation(enable); } void ProcessFrame(const VideoFrame& original_frame, bool mute, @@ -761,7 +767,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { int64 interval_; - talk_base::scoped_ptr video_adapter_; + CoordinatedVideoAdapter video_adapter_; talk_base::CpuMonitor* cpu_monitor_; talk_base::scoped_ptr overuse_observer_; }; @@ -2854,7 +2860,16 @@ bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc, return true; } -// TODO(zhurunz): Add unittests to test this function. +bool WebRtcVideoMediaChannel::GetVideoAdapter( + uint32 ssrc, CoordinatedVideoAdapter** video_adapter) { + SendChannelMap::iterator it = send_channels_.find(ssrc); + if (it == send_channels_.end()) { + return false; + } + *video_adapter = it->second->video_adapter(); + return true; +} + void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer, const VideoFrame* frame) { // If the |capturer| is registered to any send channel, then send the frame diff --git a/talk/media/webrtc/webrtcvideoengine.h b/talk/media/webrtc/webrtcvideoengine.h index d4949c473..fa1b24881 100644 --- a/talk/media/webrtc/webrtcvideoengine.h +++ b/talk/media/webrtc/webrtcvideoengine.h @@ -60,12 +60,13 @@ class CpuMonitor; namespace cricket { +class CoordinatedVideoAdapter; +class ViETraceWrapper; +class ViEWrapper; class VideoCapturer; class VideoFrame; class VideoProcessor; class VideoRenderer; -class ViETraceWrapper; -class ViEWrapper; class VoiceMediaChannel; class WebRtcDecoderObserver; class WebRtcEncoderObserver; @@ -227,10 +228,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>, int local_renderer_h_; VideoRenderer* local_renderer_; - // Critical section to protect the media processor register/unregister - // while processing a frame - talk_base::CriticalSection signal_media_critical_; - talk_base::scoped_ptr cpu_monitor_; }; @@ -289,12 +286,11 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler, // Public functions for use by tests and other specialized code. uint32 send_ssrc() const { return 0; } bool GetRenderer(uint32 ssrc, VideoRenderer** renderer); + bool GetVideoAdapter(uint32 ssrc, CoordinatedVideoAdapter** video_adapter); void SendFrame(VideoCapturer* capturer, const VideoFrame* frame); bool SendFrame(WebRtcVideoChannelSendInfo* channel_info, const VideoFrame* frame, bool is_screencast); - void AdaptAndSendFrame(VideoCapturer* capturer, const VideoFrame* frame); - // Thunk functions for use with HybridVideoEngine void OnLocalFrame(VideoCapturer* capturer, const VideoFrame* frame) { SendFrame(0u, frame, capturer->IsScreencast()); diff --git a/talk/media/webrtc/webrtcvideoengine_unittest.cc b/talk/media/webrtc/webrtcvideoengine_unittest.cc index e331188b5..386ec0c52 100644 --- a/talk/media/webrtc/webrtcvideoengine_unittest.cc +++ b/talk/media/webrtc/webrtcvideoengine_unittest.cc @@ -36,6 +36,7 @@ #include "talk/media/base/fakevideorenderer.h" #include "talk/media/base/mediachannel.h" #include "talk/media/base/testutils.h" +#include "talk/media/base/videoadapter.h" #include "talk/media/base/videoengine_unittest.h" #include "talk/media/webrtc/fakewebrtcvideocapturemodule.h" #include "talk/media/webrtc/fakewebrtcvideoengine.h"