Update talk to 60923971

Review URL: https://webrtc-codereview.appspot.com/7909004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5475 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
mallinath@webrtc.org 2014-02-03 16:57:16 +00:00
parent 422fdbf502
commit 67ee6b9a62
51 changed files with 1442 additions and 292 deletions

View File

@ -118,9 +118,6 @@ bool JsepSessionDescription::AddCandidate(
}
if (mediasection_index >= number_of_mediasections())
return false;
if (candidate_collection_[mediasection_index].HasCandidate(candidate)) {
return true; // Silently ignore this candidate if we already have it.
}
const std::string content_name =
description_->contents()[mediasection_index].name;
const cricket::TransportInfo* transport_info =
@ -137,10 +134,15 @@ bool JsepSessionDescription::AddCandidate(
updated_candidate.set_password(transport_info->description.ice_pwd);
}
candidate_collection_[mediasection_index].add(
new JsepIceCandidate(candidate->sdp_mid(),
static_cast<int>(mediasection_index),
updated_candidate));
scoped_ptr<JsepIceCandidate> updated_candidate_wrapper(
new JsepIceCandidate(candidate->sdp_mid(),
static_cast<int>(mediasection_index),
updated_candidate));
if (!candidate_collection_[mediasection_index].HasCandidate(
updated_candidate_wrapper.get()))
candidate_collection_[mediasection_index].add(
updated_candidate_wrapper.release());
return true;
}

View File

@ -204,6 +204,28 @@ TEST_F(JsepSessionDescriptionTest, AddBadCandidate) {
EXPECT_FALSE(jsep_desc_->AddCandidate(&bad_candidate2));
}
// Tests that repeatedly adding the same candidate, with or without credentials,
// does not increase the number of candidates in the description.
TEST_F(JsepSessionDescriptionTest, AddCandidateDuplicates) {
JsepIceCandidate jsep_candidate("", 0, candidate_);
EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
EXPECT_EQ(1u, jsep_desc_->candidates(0)->count());
// Add the same candidate again. It should be ignored.
EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
EXPECT_EQ(1u, jsep_desc_->candidates(0)->count());
// Create a new candidate, identical except that the ufrag and pwd are now
// populated.
candidate_.set_username(kCandidateUfragVoice);
candidate_.set_password(kCandidatePwdVoice);
JsepIceCandidate jsep_candidate_with_credentials("", 0, candidate_);
// This should also be identified as redundant and ignored.
EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate_with_credentials));
EXPECT_EQ(1u, jsep_desc_->candidates(0)->count());
}
// Test that we can serialize a JsepSessionDescription and deserialize it again.
TEST_F(JsepSessionDescriptionTest, SerializeDeserialize) {
std::string sdp = Serialize(jsep_desc_.get());

View File

@ -56,14 +56,38 @@ void TrackHandler::OnChanged() {
}
}
LocalAudioSinkAdapter::LocalAudioSinkAdapter() : sink_(NULL) {}
LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {}
void LocalAudioSinkAdapter::OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) {
talk_base::CritScope lock(&lock_);
if (sink_) {
sink_->OnData(audio_data, bits_per_sample, sample_rate,
number_of_channels, number_of_frames);
}
}
void LocalAudioSinkAdapter::SetSink(cricket::AudioRenderer::Sink* sink) {
talk_base::CritScope lock(&lock_);
ASSERT(!sink || !sink_);
sink_ = sink;
}
LocalAudioTrackHandler::LocalAudioTrackHandler(
AudioTrackInterface* track,
uint32 ssrc,
AudioProviderInterface* provider)
: TrackHandler(track, ssrc),
audio_track_(track),
provider_(provider) {
provider_(provider),
sink_adapter_(new LocalAudioSinkAdapter()) {
OnEnabledChanged();
track->AddSink(sink_adapter_.get());
}
LocalAudioTrackHandler::~LocalAudioTrackHandler() {
@ -74,6 +98,7 @@ void LocalAudioTrackHandler::OnStateChanged() {
}
void LocalAudioTrackHandler::Stop() {
audio_track_->RemoveSink(sink_adapter_.get());
cricket::AudioOptions options;
provider_->SetAudioSend(ssrc(), false, options, NULL);
}
@ -84,8 +109,13 @@ void LocalAudioTrackHandler::OnEnabledChanged() {
options = static_cast<LocalAudioSource*>(
audio_track_->GetSource())->options();
}
provider_->SetAudioSend(ssrc(), audio_track_->enabled(), options,
audio_track_->GetRenderer());
// Use the renderer if the audio track has one, otherwise use the sink
// adapter owned by this class.
cricket::AudioRenderer* renderer = audio_track_->GetRenderer() ?
audio_track_->GetRenderer() : sink_adapter_.get();
ASSERT(renderer);
provider_->SetAudioSend(ssrc(), audio_track_->enabled(), options, renderer);
}
RemoteAudioTrackHandler::RemoteAudioTrackHandler(

View File

@ -40,6 +40,7 @@
#include "talk/app/webrtc/mediastreamprovider.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/base/thread.h"
#include "talk/media/base/audiorenderer.h"
namespace webrtc {
@ -67,6 +68,28 @@ class TrackHandler : public ObserverInterface {
bool enabled_;
};
// LocalAudioSinkAdapter receives data callback as a sink to the local
// AudioTrack, and passes the data to the sink of AudioRenderer.
class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
public cricket::AudioRenderer {
public:
LocalAudioSinkAdapter();
virtual ~LocalAudioSinkAdapter();
private:
// AudioSinkInterface implementation.
virtual void OnData(const void* audio_data, int bits_per_sample,
int sample_rate, int number_of_channels,
int number_of_frames) OVERRIDE;
// cricket::AudioRenderer implementation.
virtual void SetSink(cricket::AudioRenderer::Sink* sink) OVERRIDE;
cricket::AudioRenderer::Sink* sink_;
// Critical section protecting |sink_|.
talk_base::CriticalSection lock_;
};
// LocalAudioTrackHandler listen to events on a local AudioTrack instance
// connected to a PeerConnection and orders the |provider| to executes the
// requested change.
@ -86,6 +109,10 @@ class LocalAudioTrackHandler : public TrackHandler {
private:
AudioTrackInterface* audio_track_;
AudioProviderInterface* provider_;
// Used to pass the data callback from the |audio_track_| to the other
// end of cricket::AudioRenderer.
talk_base::scoped_ptr<LocalAudioSinkAdapter> sink_adapter_;
};
// RemoteAudioTrackHandler listen to events on a remote AudioTrack instance

View File

@ -147,15 +147,33 @@ class VideoTrackInterface : public MediaStreamTrackInterface {
class AudioSourceInterface : public MediaSourceInterface {
};
// Interface for receiving audio data from a AudioTrack.
class AudioTrackSinkInterface {
public:
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) = 0;
protected:
virtual ~AudioTrackSinkInterface() {}
};
class AudioTrackInterface : public MediaStreamTrackInterface {
public:
// TODO(xians): Figure out if the following interface should be const or not.
virtual AudioSourceInterface* GetSource() const = 0;
// Adds/Removes a sink that will receive the audio data from the track.
// TODO(xians): Make them pure virtual after Chrome implements these
// interfaces.
virtual void AddSink(AudioTrackSinkInterface* sink) {}
virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
// Gets a pointer to the audio renderer of this AudioTrack.
// The pointer is valid for the lifetime of this AudioTrack.
// TODO(xians): Make the following interface pure virtual once Chrome has its
// implementation.
// TODO(xians): Remove the following interface after Chrome switches to
// AddSink() and RemoveSink() interfaces.
virtual cricket::AudioRenderer* GetRenderer() { return NULL; }
protected:

View File

@ -180,7 +180,6 @@ class MockPeerConnectionObserver : public PeerConnectionObserver {
EXPECT_EQ(pc_->ice_gathering_state(), new_state);
}
virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {
EXPECT_NE(PeerConnectionInterface::kIceGatheringNew,
pc_->ice_gathering_state());

View File

@ -786,7 +786,7 @@ bool WebRtcSession::ProcessIceMessage(const IceCandidateInterface* candidate) {
return false;
}
return UseCandidatesInSessionDescription(remote_desc_.get());
return UseCandidate(candidate);
}
bool WebRtcSession::GetTrackIdBySsrc(uint32 ssrc, std::string* id) {

View File

@ -1421,10 +1421,12 @@ TEST_F(WebRtcSessionTest, TestAddRemoteCandidate) {
EXPECT_EQ(1, candidates->at(0)->candidate().component());
EXPECT_EQ(2, candidates->at(1)->candidate().component());
// |ice_candidate3| is identical to |ice_candidate2|. It can be added
// successfully, but the total count of candidates will not increase.
candidate.set_component(2);
JsepIceCandidate ice_candidate3(kMediaContentName0, 0, candidate);
EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate3));
ASSERT_EQ(3u, candidates->count());
ASSERT_EQ(2u, candidates->count());
JsepIceCandidate bad_ice_candidate("bad content name", 99, candidate);
EXPECT_FALSE(session_->ProcessIceMessage(&bad_ice_candidate));

View File

@ -27,7 +27,6 @@
#ifndef TALK_BASE_ASYNCSOCKET_H_
#define TALK_BASE_ASYNCSOCKET_H_
#ifndef __native_client__
#include "talk/base/common.h"
#include "talk/base/sigslot.h"
@ -139,5 +138,4 @@ class AsyncSocketAdapter : public AsyncSocket, public sigslot::has_slots<> {
} // namespace talk_base
#endif // __native_client__
#endif // TALK_BASE_ASYNCSOCKET_H_

View File

@ -33,9 +33,10 @@
#ifdef WIN32
#include "talk/base/win32.h"
#else
#include <sys/types.h>
#include <dirent.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
@ -463,7 +464,10 @@ const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE;
#elif defined(POSIX)
typedef int PlatformFile;
const PlatformFile kInvalidPlatformFileValue = -1;
#else
#error Unsupported platform
#endif
FILE* FdopenPlatformFileForWriting(PlatformFile file);
bool ClosePlatformFile(PlatformFile file);

View File

@ -250,6 +250,89 @@ bool ConfigParser::ParseLine(std::string* key, std::string* value) {
return true;
}
#if !defined(GOOGLE_CHROME_BUILD) && !defined(CHROMIUM_BUILD)
static bool ExpectLineFromStream(FileStream* stream,
std::string* out) {
StreamResult res = stream->ReadLine(out);
if (res != SR_SUCCESS) {
if (res != SR_EOS) {
LOG(LS_ERROR) << "Error when reading from stream";
} else {
LOG(LS_ERROR) << "Incorrect number of lines in stream";
}
return false;
}
return true;
}
static void ExpectEofFromStream(FileStream* stream) {
std::string unused;
StreamResult res = stream->ReadLine(&unused);
if (res == SR_SUCCESS) {
LOG(LS_WARNING) << "Ignoring unexpected extra lines from stream";
} else if (res != SR_EOS) {
LOG(LS_WARNING) << "Error when checking for extra lines from stream";
}
}
// For caching the lsb_release output (reading it invokes a sub-process and
// hence is somewhat expensive).
static std::string lsb_release_string;
static CriticalSection lsb_release_string_critsec;
std::string ReadLinuxLsbRelease() {
CritScope cs(&lsb_release_string_critsec);
if (!lsb_release_string.empty()) {
// Have cached result from previous call.
return lsb_release_string;
}
// No cached result. Run lsb_release and parse output.
POpenStream lsb_release_output;
if (!lsb_release_output.Open("lsb_release -idrcs", "r", NULL)) {
LOG_ERR(LS_ERROR) << "Can't run lsb_release";
return lsb_release_string; // empty
}
// Read in the command's output and build the string.
std::ostringstream sstr;
std::string line;
int wait_status;
if (!ExpectLineFromStream(&lsb_release_output, &line)) {
return lsb_release_string; // empty
}
sstr << "DISTRIB_ID=" << line;
if (!ExpectLineFromStream(&lsb_release_output, &line)) {
return lsb_release_string; // empty
}
sstr << " DISTRIB_DESCRIPTION=\"" << line << '"';
if (!ExpectLineFromStream(&lsb_release_output, &line)) {
return lsb_release_string; // empty
}
sstr << " DISTRIB_RELEASE=" << line;
if (!ExpectLineFromStream(&lsb_release_output, &line)) {
return lsb_release_string; // empty
}
sstr << " DISTRIB_CODENAME=" << line;
// Should not be anything left.
ExpectEofFromStream(&lsb_release_output);
lsb_release_output.Close();
wait_status = lsb_release_output.GetWaitStatus();
if (wait_status == -1 ||
!WIFEXITED(wait_status) ||
WEXITSTATUS(wait_status) != 0) {
LOG(LS_WARNING) << "Unexpected exit status from lsb_release";
}
lsb_release_string = sstr.str();
return lsb_release_string;
}
#endif
std::string ReadLinuxUname() {
struct utsname buf;

View File

@ -121,6 +121,11 @@ class ProcCpuInfo {
ConfigParser::MapVector sections_;
};
#if !defined(GOOGLE_CHROME_BUILD) && !defined(CHROMIUM_BUILD)
// Builds a string containing the info from lsb_release on a single line.
std::string ReadLinuxLsbRelease();
#endif
// Returns the output of "uname".
std::string ReadLinuxUname();

View File

@ -105,6 +105,14 @@ TEST(ConfigParser, ParseConfig) {
EXPECT_EQ(true, parser.Parse(&key_val_pairs));
}
#if !defined(GOOGLE_CHROME_BUILD) && !defined(CHROMIUM_BUILD)
TEST(ReadLinuxLsbRelease, ReturnsSomething) {
std::string str = ReadLinuxLsbRelease();
// ChromeOS don't have lsb_release
// EXPECT_FALSE(str.empty());
}
#endif
TEST(ReadLinuxUname, ReturnsSomething) {
std::string str = ReadLinuxUname();
EXPECT_FALSE(str.empty());

View File

@ -224,11 +224,11 @@ OpenSSLCertificate* OpenSSLCertificate::FromPEMString(
BIO* bio = BIO_new_mem_buf(const_cast<char*>(pem_string.c_str()), -1);
if (!bio)
return NULL;
(void)BIO_set_close(bio, BIO_NOCLOSE);
BIO_set_mem_eof_return(bio, 0);
X509 *x509 = PEM_read_bio_X509(bio, NULL, NULL,
const_cast<char*>("\0"));
BIO_free(bio);
BIO_free(bio); // Frees the BIO, but not the pointed-to string.
if (!x509)
return NULL;
@ -364,11 +364,10 @@ SSLIdentity* OpenSSLIdentity::FromPEMStrings(
LOG(LS_ERROR) << "Failed to create a new BIO buffer.";
return NULL;
}
(void)BIO_set_close(bio, BIO_NOCLOSE);
BIO_set_mem_eof_return(bio, 0);
EVP_PKEY *pkey = PEM_read_bio_PrivateKey(bio, NULL, NULL,
const_cast<char*>("\0"));
BIO_free(bio);
BIO_free(bio); // Frees the BIO, but not the pointed-to string.
if (!pkey) {
LOG(LS_ERROR) << "Failed to create the private key from PEM string.";
@ -392,5 +391,3 @@ bool OpenSSLIdentity::ConfigureIdentity(SSL_CTX* ctx) {
} // namespace talk_base
#endif // HAVE_OPENSSL_SSL_H

View File

@ -2,40 +2,32 @@
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_BASE_SOCKET_H__
#define TALK_BASE_SOCKET_H__
#if defined(__native_client__)
namespace talk_base {
// These should never be defined or instantiated.
class Socket;
class AsyncSocket;
} // namespace talk_base
#else
#include <errno.h>
#ifdef POSIX
@ -207,5 +199,4 @@ class Socket {
} // namespace talk_base
#endif // !__native_client__
#endif // TALK_BASE_SOCKET_H__

View File

@ -32,6 +32,8 @@
#ifdef LINUX
#include <X11/Xlib.h>
#include <X11/extensions/Xrandr.h>
// X defines a few macros that stomp on types that gunit.h uses.
#undef None
#undef Bool
@ -601,6 +603,16 @@ inline bool IsScreencastingAvailable() {
LOG(LS_WARNING) << "No X Display available.";
return false;
}
int ignored_int, major_version, minor_version;
if (!XRRQueryExtension(display, &ignored_int, &ignored_int) ||
!XRRQueryVersion(display, &major_version, &minor_version) ||
major_version < 1 ||
(major_version < 2 && minor_version < 3)) {
LOG(LS_WARNING) << "XRandr version: " << major_version << "."
<< minor_version;
LOG(LS_WARNING) << "XRandr is not supported or is too old (pre 1.3).";
return false;
}
#endif
return true;
}

View File

@ -30,18 +30,37 @@
namespace cricket {
// Abstract interface for holding the voice channel IDs.
// Abstract interface for rendering the audio data.
class AudioRenderer {
public:
class Sink {
public:
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) = 0;
protected:
virtual ~Sink() {}
};
// Sets a sink to the AudioRenderer. There can be only one sink connected
// to the renderer at a time.
virtual void SetSink(Sink* sink) {}
// Add the WebRtc VoE channel to the renderer.
// For local stream, multiple WebRtc VoE channels can be connected to the
// renderer. While for remote stream, only one WebRtc VoE channel can be
// connected to the renderer.
virtual void AddChannel(int channel_id) = 0;
// TODO(xians): Remove this interface after Chrome switches to the
// AudioRenderer::Sink interface.
virtual void AddChannel(int channel_id) {}
// Remove the WebRtc VoE channel from the renderer.
// This method is called when the VoE channel is going away.
virtual void RemoveChannel(int channel_id) = 0;
// TODO(xians): Remove this interface after Chrome switches to the
// AudioRenderer::Sink interface.
virtual void RemoveChannel(int channel_id) {}
protected:
virtual ~AudioRenderer() {}

View File

@ -36,9 +36,9 @@ namespace cricket {
// TODO(fbarchard): Make downgrades settable
static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU.
// The number of milliseconds of data to require before acting on cpu sampling
// information.
static const size_t kCpuLoadMinSampleTime = 5000;
// The number of cpu samples to require before adapting. This value depends on
// the cpu monitor sampling frequency being 2000ms.
static const int kCpuLoadMinSamples = 3;
// The amount of weight to give to each new cpu load sample. The lower the
// value, the slower we'll adapt to changing cpu conditions.
static const float kCpuLoadWeightCoefficient = 0.4f;
@ -165,8 +165,8 @@ VideoAdapter::VideoAdapter()
frames_(0),
adapted_frames_(0),
adaption_changes_(0),
previous_width(0),
previous_height(0),
previous_width_(0),
previous_height_(0),
black_output_(false),
is_black_(false),
interval_next_frame_(0) {
@ -240,7 +240,7 @@ int VideoAdapter::GetOutputNumPixels() const {
// TODO(fbarchard): Add AdaptFrameRate function that only drops frames but
// not resolution.
bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
const VideoFrame** out_frame) {
VideoFrame** out_frame) {
talk_base::CritScope cs(&critical_section_);
if (!in_frame || !out_frame) {
return false;
@ -306,8 +306,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
// resolution changes as well. Consider dropping the statistics into their
// own class which could be queried publically.
bool changed = false;
if (previous_width && (previous_width != (*out_frame)->GetWidth() ||
previous_height != (*out_frame)->GetHeight())) {
if (previous_width_ && (previous_width_ != (*out_frame)->GetWidth() ||
previous_height_ != (*out_frame)->GetHeight())) {
show = true;
++adaption_changes_;
changed = true;
@ -325,8 +325,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
<< "x" << (*out_frame)->GetHeight()
<< " Changed: " << (changed ? "true" : "false");
}
previous_width = (*out_frame)->GetWidth();
previous_height = (*out_frame)->GetHeight();
previous_width_ = (*out_frame)->GetWidth();
previous_height_ = (*out_frame)->GetHeight();
return true;
}
@ -382,7 +382,8 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter()
view_adaptation_(true),
view_switch_(false),
cpu_downgrade_count_(0),
cpu_adapt_wait_time_(0),
cpu_load_min_samples_(kCpuLoadMinSamples),
cpu_load_num_samples_(0),
high_system_threshold_(kHighSystemCpuThreshold),
low_system_threshold_(kLowSystemCpuThreshold),
process_threshold_(kProcessCpuThreshold),
@ -552,22 +553,18 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated(
// we'll still calculate this information, in case smoothing is later enabled.
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
++cpu_load_num_samples_;
if (cpu_smoothing_) {
system_load = system_load_average_;
}
// If we haven't started taking samples yet, wait until we have at least
// the correct number of samples per the wait time.
if (cpu_adapt_wait_time_ == 0) {
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
}
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
process_load, system_load);
// Make sure we're not adapting too quickly.
if (request != KEEP) {
if (talk_base::TimeIsLater(talk_base::Time(),
cpu_adapt_wait_time_)) {
if (cpu_load_num_samples_ < cpu_load_min_samples_) {
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
<< talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
<< (cpu_load_min_samples_ - cpu_load_num_samples_)
<< " more samples";
request = KEEP;
}
}
@ -688,7 +685,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
if (changed) {
// When any adaptation occurs, historic CPU load levels are no longer
// accurate. Clear out our state so we can re-learn at the new normal.
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
cpu_load_num_samples_ = 0;
system_load_average_ = kCpuLoadInitialAverage;
}

View File

@ -62,7 +62,7 @@ class VideoAdapter {
// successfully. Return false otherwise.
// output_frame_ is owned by the VideoAdapter that has the best knowledge on
// the output frame.
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame);
bool AdaptFrame(const VideoFrame* in_frame, VideoFrame** out_frame);
void set_scale_third(bool enable) {
LOG(LS_INFO) << "Video Adapter third scaling is now "
@ -90,8 +90,8 @@ class VideoAdapter {
int frames_; // Number of input frames.
int adapted_frames_; // Number of frames scaled.
int adaption_changes_; // Number of changes in scale factor.
size_t previous_width; // Previous adapter output width.
size_t previous_height; // Previous adapter output height.
size_t previous_width_; // Previous adapter output width.
size_t previous_height_; // Previous adapter output height.
bool black_output_; // Flag to tell if we need to black output_frame_.
bool is_black_; // Flag to tell if output_frame_ is currently black.
int64 interval_next_frame_;
@ -149,14 +149,15 @@ class CoordinatedVideoAdapter
// When the video is decreased, set the waiting time for CPU adaptation to
// decrease video again.
void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) {
if (cpu_adapt_wait_time_ != static_cast<int>(cpu_adapt_wait_time)) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: "
<< cpu_adapt_wait_time_ << " to "
<< cpu_adapt_wait_time;
cpu_adapt_wait_time_ = static_cast<int>(cpu_adapt_wait_time);
void set_cpu_load_min_samples(int cpu_load_min_samples) {
if (cpu_load_min_samples_ != cpu_load_min_samples) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Min Samples from: "
<< cpu_load_min_samples_ << " to "
<< cpu_load_min_samples;
cpu_load_min_samples_ = cpu_load_min_samples;
}
}
int cpu_load_min_samples() const { return cpu_load_min_samples_; }
// CPU system load high threshold for reducing resolution. e.g. 0.85f
void set_high_system_threshold(float high_system_threshold) {
ASSERT(high_system_threshold <= 1.0f);
@ -220,7 +221,8 @@ class CoordinatedVideoAdapter
bool view_adaptation_; // True if view adaptation is enabled.
bool view_switch_; // True if view switch is enabled.
int cpu_downgrade_count_;
int cpu_adapt_wait_time_;
int cpu_load_min_samples_;
int cpu_load_num_samples_;
// cpu system load thresholds relative to max cpus.
float high_system_threshold_;
float low_system_threshold_;

View File

@ -475,14 +475,25 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
<< desired_width << " x " << desired_height;
return;
}
if (!muted_ && !ApplyProcessors(&i420_frame)) {
VideoFrame* adapted_frame = &i420_frame;
if (!SignalAdaptFrame.is_empty() && !IsScreencast()) {
VideoFrame* out_frame = NULL;
SignalAdaptFrame(this, adapted_frame, &out_frame);
if (!out_frame) {
return; // VideoAdapter dropped the frame.
}
adapted_frame = out_frame;
}
if (!muted_ && !ApplyProcessors(adapted_frame)) {
// Processor dropped the frame.
return;
}
if (muted_) {
i420_frame.SetToBlack();
adapted_frame->SetToBlack();
}
SignalVideoFrame(this, &i420_frame);
SignalVideoFrame(this, adapted_frame);
#endif // VIDEO_FRAME_NAME
}

View File

@ -255,7 +255,14 @@ class VideoCapturer
// Signal the captured frame to downstream.
sigslot::signal2<VideoCapturer*, const CapturedFrame*,
sigslot::multi_threaded_local> SignalFrameCaptured;
// Signal the captured frame converted to I420 to downstream.
// A VideoAdapter should be hooked up to SignalAdaptFrame which will be
// called before forwarding the frame to SignalVideoFrame. The parameters
// are this capturer instance, the input video frame and output frame
// pointer, respectively.
sigslot::signal3<VideoCapturer*, const VideoFrame*, VideoFrame**,
sigslot::multi_threaded_local> SignalAdaptFrame;
// Signal the captured and possibly adapted frame to downstream consumers
// such as the encoder.
sigslot::signal2<VideoCapturer*, const VideoFrame*,
sigslot::multi_threaded_local> SignalVideoFrame;

View File

@ -1184,6 +1184,8 @@ class VideoMediaChannelTest : public testing::Test,
// some (e.g. 1) of these 3 frames after the renderer is set again.
EXPECT_GT_FRAME_ON_RENDERER_WAIT(
renderer1, 2, DefaultCodec().width, DefaultCodec().height, kTimeout);
// Detach |renderer1| before exit as there might be frames come late.
EXPECT_TRUE(channel_->SetRenderer(kSsrc, NULL));
}
// Tests the behavior of incoming streams in a conference scenario.

View File

@ -0,0 +1,263 @@
#include "talk/media/base/yuvframegenerator.h"
#include <string.h>
#include <sstream>
#include "talk/base/basictypes.h"
#include "talk/base/common.h"
namespace cricket {
// These values were figured out by trial and error. If you change any
// basic parameters e.g. unit-bar size or bars-x-offset, you may need to change
// background-width/background-height.
const int kBarcodeBackgroundWidth = 160;
const int kBarcodeBackgroundHeight = 100;
const int kBarsXOffset = 12;
const int kBarsYOffset = 4;
const int kUnitBarSize = 2;
const int kBarcodeNormalBarHeight = 80;
const int kBarcodeGuardBarHeight = 96;
const int kBarcodeMaxEncodableDigits = 7;
const int kBarcodeMaxEncodableValue = 9999999;
YuvFrameGenerator::YuvFrameGenerator(int width, int height,
bool enable_barcode) {
width_ = width;
height_ = height;
frame_index_ = 0;
int size = width_ * height_;
int qsize = size / 4;
frame_data_size_ = size + 2 * qsize;
y_data_ = new uint8[size];
u_data_ = new uint8[qsize];
v_data_ = new uint8[qsize];
if (enable_barcode) {
ASSERT(width_ >= kBarcodeBackgroundWidth);
ASSERT(height_>= kBarcodeBackgroundHeight);
barcode_start_x_ = 0;
barcode_start_y_ = height_ - kBarcodeBackgroundHeight;
} else {
barcode_start_x_ = -1;
barcode_start_y_ = -1;
}
}
YuvFrameGenerator::~YuvFrameGenerator() {
delete y_data_;
delete u_data_;
delete v_data_;
}
void YuvFrameGenerator::GenerateNextFrame(uint8* frame_buffer,
int32 barcode_value) {
int size = width_ * height_;
int qsize = size / 4;
memset(y_data_, 0, size);
memset(u_data_, 0, qsize);
memset(v_data_, 0, qsize);
DrawLandscape(y_data_, width_, height_);
DrawGradientX(u_data_, width_/2, height_/2);
DrawGradientY(v_data_, width_/2, height_/2);
DrawMovingLineX(u_data_, width_/2, height_/2, frame_index_);
DrawMovingLineY(v_data_, width_/2, height_/2, frame_index_);
DrawBouncingCube(y_data_, width_, height_, frame_index_);
if (barcode_value >= 0) {
ASSERT(barcode_start_x_ != -1);
DrawBarcode(barcode_value);
}
memcpy(frame_buffer, y_data_, size);
frame_buffer += size;
memcpy(frame_buffer, u_data_, qsize);
frame_buffer += qsize;
memcpy(frame_buffer, v_data_, qsize);
frame_index_ = (frame_index_ + 1) & 0x0000FFFF;
}
void YuvFrameGenerator::DrawLandscape(uint8 *p, int w, int h) {
int x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
p[x + y * w] = x % (y+1);
if (((x > w / 2 - (w / 32)) && (x < w / 2 + (w / 32))) ||
((y > h / 2 - (h / 32)) && (y < h / 2 + (h / 32)))) {
p[x + y * w] = (((x + y) / 8 % 2)) ? 255 : 0;
}
}
}
}
void YuvFrameGenerator::DrawGradientX(uint8 *p, int w, int h) {
int x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
p[x + y * w] = (x << 8) / w;
}
}
}
void YuvFrameGenerator::DrawGradientY(uint8 *p, int w, int h) {
int x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
p[x + y * w] = (y << 8) / h;
}
}
}
void YuvFrameGenerator::DrawMovingLineX(uint8 *p, int w, int h, int n) {
int x, y;
x = n % (w * 2);
if (x >= w) x = w + w - x - 1;
for (y = 0; y < h; y++) {
p[x + y * w] = 255;
}
}
void YuvFrameGenerator::DrawMovingLineY(uint8 *p, int w, int h, int n) {
int x, y;
y = n % (h * 2);
if (y >= h) y = h + h - y - 1;
for (x = 0; x < w; x++) {
p[x + y * w] = 255;
}
}
void YuvFrameGenerator::DrawBouncingCube(uint8 *p, int w, int h, int n) {
int x, y, pw, ph, px, py;
pw = w / 16;
ph = h / 16;
px = n % (w * 2);
py = n % (h * 2);
if (px >= w) px = w + w - px - 1;
if (py >= h) py = h + h - py - 1;
for (y = py - ph; y < py + ph; y++) {
if (y >=0 && y < h) {
for (x = px - pw; x < px + pw; x++) {
if (x >= 0 && x < w) {
p[x + y * w] = 255;
}
}
}
}
}
void YuvFrameGenerator::GetBarcodeBounds(int* top, int* left,
int* width, int* height) {
ASSERT(barcode_start_x_ != -1);
*top = barcode_start_y_;
*left = barcode_start_x_;
*width = kBarcodeBackgroundWidth;
*height = kBarcodeBackgroundHeight;
}
static void ComputeBarcodeDigits(uint32 value, std::stringstream* result) {
// Serialize |value| as 7-char string, padded with 0's to the left.
result->width(kBarcodeMaxEncodableDigits);
result->fill('0');
*result << value;
// Compute check-digit and append to result. Steps described here:
// http://en.wikipedia.org/wiki/European_Article_Number#Calculation_of_checksum_digit
int sum = 0;
for (int pos = 1; pos <= kBarcodeMaxEncodableDigits; pos++) {
char next_char;
result->get(next_char);
uint8 digit = next_char - '0';
sum += digit * (pos % 2 ? 3 : 1);
}
uint8 check_digit = sum % 10;
if (check_digit != 0) {
check_digit = 10 - check_digit;
}
*result << static_cast<int>(check_digit);
result->seekg(0);
}
void YuvFrameGenerator::DrawBarcode(uint32 value) {
std::stringstream value_str_stream;
ComputeBarcodeDigits(value, &value_str_stream);
// Draw white filled rectangle as background to barcode.
DrawBlockRectangle(y_data_, barcode_start_x_, barcode_start_y_,
kBarcodeBackgroundWidth, kBarcodeBackgroundHeight,
width_, 255);
DrawBlockRectangle(u_data_, barcode_start_x_ / 2, barcode_start_y_ / 2,
kBarcodeBackgroundWidth / 2, kBarcodeBackgroundHeight / 2,
width_ / 2, 128);
DrawBlockRectangle(v_data_, barcode_start_x_ / 2, barcode_start_y_ / 2,
kBarcodeBackgroundWidth / 2, kBarcodeBackgroundHeight / 2,
width_ / 2, 128);
// Scan through chars (digits) and draw black bars.
int x = barcode_start_x_ + kBarsXOffset;
int y = barcode_start_y_ + kBarsYOffset;
int pos = 0;
x = DrawSideGuardBars(x, y, kBarcodeGuardBarHeight);
while (true) {
char next_char;
value_str_stream.get(next_char);
if (!value_str_stream.good()) {
break;
}
if (pos++ == 4) {
x = DrawMiddleGuardBars(x, y, kBarcodeGuardBarHeight);
}
uint8 digit = next_char - '0';
x = DrawEanEncodedDigit(digit, x, y, kBarcodeNormalBarHeight, pos > 4);
}
x = DrawSideGuardBars(x, y, kBarcodeGuardBarHeight);
}
int YuvFrameGenerator::DrawMiddleGuardBars(int x, int y, int height) {
x += kUnitBarSize;
DrawBlockRectangle(y_data_, x, y, kUnitBarSize, height, width_, 0);
x += (kUnitBarSize * 2);
DrawBlockRectangle(y_data_, x, y, kUnitBarSize, height, width_, 0);
return x + (kUnitBarSize * 2);
}
int YuvFrameGenerator::DrawSideGuardBars(int x, int y, int height) {
DrawBlockRectangle(y_data_, x, y, kUnitBarSize, height, width_, 0);
x += (kUnitBarSize * 2);
DrawBlockRectangle(y_data_, x, y, kUnitBarSize, height, width_, 0);
return x + kUnitBarSize;
}
// For each digit: 0-9, |kEanEncodings| contains a bit-mask indicating
// which bars are black (1) and which are blank (0). These are for the L-code
// only. R-code values are bitwise negation of these. Reference:
// http://en.wikipedia.org/wiki/European_Article_Number#Binary_encoding_of_data_digits_into_EAN-13_barcode // NOLINT
const uint8 kEanEncodings[] = { 13, 25, 19, 61, 35, 49, 47, 59, 55, 11 };
int YuvFrameGenerator::DrawEanEncodedDigit(int digit, int x, int y,
int height, bool flip) {
uint8 ean_encoding = kEanEncodings[digit];
if (flip) {
ean_encoding = ~ean_encoding;
}
uint8 mask = 0x40;
for (int i = 6; i >= 0; i--, mask >>= 1) {
if (ean_encoding & mask) {
DrawBlockRectangle(y_data_, x, y, kUnitBarSize, height, width_, 0);
}
x += kUnitBarSize;
}
return x;
}
void YuvFrameGenerator::DrawBlockRectangle(uint8* p,
int x_start, int y_start, int width, int height, int pitch, uint8 value) {
for (int x = x_start; x < x_start + width; x++) {
for (int y = y_start; y < y_start + height; y++) {
p[x + y * pitch] = value;
}
}
}
} // namespace cricket

View File

@ -0,0 +1,78 @@
// Generates YUV420 frames with a "landscape with striped crosshair" in the
// Y-plane, plus a horizontal gradient in the U-plane and a vertical one in the
// V-plane. This makes for a nice mix of colours that is suited for both
// catching visual errors and making sure e.g. YUV->RGB/BGR conversion looks
// the same on different platforms.
// There is also a solid box bouncing around in the Y-plane, and two differently
// coloured lines bouncing horizontally and vertically in the U and V plane.
// This helps illustrating how the frame boundary goes, and can aid as a quite
// handy visual help for noticing e.g. packet loss if the frames are encoded
// and sent over the network.
#ifndef TALK_MEDIA_BASE_YUVFRAMEGENERATOR_H_
#define TALK_MEDIA_BASE_YUVFRAMEGENERATOR_H_
#include "talk/base/basictypes.h"
namespace cricket {
class YuvFrameGenerator {
public:
// Constructs a frame-generator that produces frames of size |width|x|height|.
// If |enable_barcode| is specified, barcodes can be included in the frames
// when calling |GenerateNextFrame(uint8*, uint32)|. If |enable_barcode| is
// |true| then |width|x|height| should be at least 160x100; otherwise this
// constructor will abort.
YuvFrameGenerator(int width, int height, bool enable_barcode);
~YuvFrameGenerator();
int GetFrameSize() { return frame_data_size_; }
// Generate the next frame and return it in the provided |frame_buffer|. If
// barcode_value is not |nullptr| the value referred by it will be encoded
// into a barcode in the frame. The value should in the range:
// [0..9,999,999]. If the value exceeds this range or barcodes were not
// requested in the constructor, this function will abort.
void GenerateNextFrame(uint8* frame_buffer, int32 barcode_value);
int GetHeight() { return height_; }
int GetWidth() { return width_; }
// Fetch the bounds of the barcode from the generator. The barcode will
// always be at this location. This function will abort if barcodes were not
// requested in the constructor.
void GetBarcodeBounds(int* top, int* left, int* width, int* height);
private:
void DrawLandscape(uint8 *p, int w, int h);
void DrawGradientX(uint8 *p, int w, int h);
void DrawGradientY(uint8 *p, int w, int h);
void DrawMovingLineX(uint8 *p, int w, int h, int n);
void DrawMovingLineY(uint8 *p, int w, int h, int n);
void DrawBouncingCube(uint8 *p, int w, int h, int n);
void DrawBarcode(uint32 value);
int DrawSideGuardBars(int x, int y, int height);
int DrawMiddleGuardBars(int x, int y, int height);
int DrawEanEncodedDigit(int digit, int x, int y, int height, bool r_code);
void DrawBlockRectangle(uint8* p, int x_start, int y_start,
int width, int height, int pitch, uint8 value);
private:
int width_;
int height_;
int frame_index_;
int frame_data_size_;
uint8* y_data_;
uint8* u_data_;
uint8* v_data_;
int barcode_start_x_;
int barcode_start_y_;
DISALLOW_COPY_AND_ASSIGN(YuvFrameGenerator);
};
} // namespace cricket
#endif // TALK_MEDIA_BASE_YUVFRAMEGENERATOR_H_

View File

@ -37,6 +37,7 @@
#include "talk/media/base/mediacommon.h"
#include "talk/media/devices/deviceinfo.h"
#include "talk/media/devices/filevideocapturer.h"
#include "talk/media/devices/yuvframescapturer.h"
#if !defined(IOS)
@ -67,7 +68,6 @@ namespace cricket {
// Initialize to empty string.
const char DeviceManagerInterface::kDefaultDeviceName[] = "";
class DefaultVideoCapturerFactory : public VideoCapturerFactory {
public:
DefaultVideoCapturerFactory() {}
@ -180,12 +180,27 @@ bool DeviceManager::GetVideoCaptureDevice(const std::string& name,
}
}
// If |name| is a valid name for a file, return a file video capturer device.
// If |name| is a valid name for a file or yuvframedevice,
// return a fake video capturer device.
if (GetFakeVideoCaptureDevice(name, out)) {
return true;
}
return false;
}
bool DeviceManager::GetFakeVideoCaptureDevice(const std::string& name,
Device* out) const {
if (talk_base::Filesystem::IsFile(name)) {
*out = FileVideoCapturer::CreateFileVideoCapturerDevice(name);
return true;
}
if (name == YuvFramesCapturer::kYuvFrameDeviceName) {
*out = YuvFramesCapturer::CreateYuvFramesCapturerDevice();
return true;
}
return false;
}
@ -205,19 +220,12 @@ VideoCapturer* DeviceManager::CreateVideoCapturer(const Device& device) const {
LOG_F(LS_ERROR) << " should never be called!";
return NULL;
#else
// TODO(hellner): Throw out the creation of a file video capturer once the
// refactoring is completed.
if (FileVideoCapturer::IsFileVideoCapturerDevice(device)) {
FileVideoCapturer* capturer = new FileVideoCapturer;
if (!capturer->Init(device)) {
delete capturer;
return NULL;
}
LOG(LS_INFO) << "Created file video capturer " << device.name;
capturer->set_repeat(talk_base::kForever);
VideoCapturer* capturer = ConstructFakeVideoCapturer(device);
if (capturer) {
return capturer;
}
VideoCapturer* capturer = device_video_capturer_factory_->Create(device);
capturer = device_video_capturer_factory_->Create(device);
if (!capturer) {
return NULL;
}
@ -232,6 +240,29 @@ VideoCapturer* DeviceManager::CreateVideoCapturer(const Device& device) const {
#endif
}
VideoCapturer* DeviceManager::ConstructFakeVideoCapturer(
const Device& device) const {
// TODO(hellner): Throw out the creation of a file video capturer once the
// refactoring is completed.
if (FileVideoCapturer::IsFileVideoCapturerDevice(device)) {
FileVideoCapturer* capturer = new FileVideoCapturer;
if (!capturer->Init(device)) {
delete capturer;
return NULL;
}
LOG(LS_INFO) << "Created file video capturer " << device.name;
capturer->set_repeat(talk_base::kForever);
return capturer;
}
if (YuvFramesCapturer::IsYuvFramesCapturerDevice(device)) {
YuvFramesCapturer* capturer = new YuvFramesCapturer();
capturer->Init();
return capturer;
}
return NULL;
}
bool DeviceManager::GetWindows(
std::vector<talk_base::WindowDescription>* descriptions) {
if (!window_picker_) {

View File

@ -201,6 +201,8 @@ class DeviceManager : public DeviceManagerInterface {
// The exclusion_list MUST be a NULL terminated list.
static bool ShouldDeviceBeIgnored(const std::string& device_name,
const char* const exclusion_list[]);
bool GetFakeVideoCaptureDevice(const std::string& name, Device* out) const;
VideoCapturer* ConstructFakeVideoCapturer(const Device& device) const;
bool initialized_;
talk_base::scoped_ptr<VideoCapturerFactory> device_video_capturer_factory_;

View File

@ -0,0 +1,173 @@
#include "talk/media/devices/yuvframescapturer.h"
#include "talk/base/bytebuffer.h"
#include "talk/base/criticalsection.h"
#include "talk/base/logging.h"
#include "talk/base/thread.h"
#include "webrtc/system_wrappers/interface/clock.h"
namespace cricket {
///////////////////////////////////////////////////////////////////////
// Definition of private class YuvFramesThread that periodically generates
// frames.
///////////////////////////////////////////////////////////////////////
class YuvFramesCapturer::YuvFramesThread
: public talk_base::Thread, public talk_base::MessageHandler {
public:
explicit YuvFramesThread(YuvFramesCapturer* capturer)
: capturer_(capturer),
finished_(false) {
}
virtual ~YuvFramesThread() {
Stop();
}
// Override virtual method of parent Thread. Context: Worker Thread.
virtual void Run() {
// Read the first frame and start the message pump. The pump runs until
// Stop() is called externally or Quit() is called by OnMessage().
int waiting_time_ms = 0;
if (capturer_) {
capturer_->ReadFrame(true);
PostDelayed(waiting_time_ms, this);
Thread::Run();
}
talk_base::CritScope cs(&crit_);
finished_ = true;
}
// Override virtual method of parent MessageHandler. Context: Worker Thread.
virtual void OnMessage(talk_base::Message* /*pmsg*/) {
int waiting_time_ms = 0;
if (capturer_) {
capturer_->ReadFrame(false);
PostDelayed(waiting_time_ms, this);
} else {
Quit();
}
}
// Check if Run() is finished.
bool Finished() const {
talk_base::CritScope cs(&crit_);
return finished_;
}
private:
YuvFramesCapturer* capturer_;
mutable talk_base::CriticalSection crit_;
bool finished_;
DISALLOW_COPY_AND_ASSIGN(YuvFramesThread);
};
/////////////////////////////////////////////////////////////////////
// Implementation of class YuvFramesCapturer.
/////////////////////////////////////////////////////////////////////
const char* YuvFramesCapturer::kYuvFrameDeviceName = "YuvFramesGenerator";
// TODO(shaowei): allow width_ and height_ to be configurable.
YuvFramesCapturer::YuvFramesCapturer()
: frames_generator_thread(NULL),
width_(640),
height_(480),
frame_index_(0),
barcode_interval_(1) {
}
YuvFramesCapturer::~YuvFramesCapturer() {
Stop();
delete[] static_cast<char*>(captured_frame_.data);
}
void YuvFramesCapturer::Init() {
int size = width_ * height_;
int qsize = size / 4;
frame_generator_ = new YuvFrameGenerator(width_, height_, true);
frame_data_size_ = size + 2 * qsize;
captured_frame_.data = new char[frame_data_size_];
captured_frame_.fourcc = FOURCC_IYUV;
captured_frame_.pixel_height = 1;
captured_frame_.pixel_width = 1;
captured_frame_.width = width_;
captured_frame_.height = height_;
captured_frame_.data_size = frame_data_size_;
// Enumerate the supported formats. We have only one supported format.
VideoFormat format(width_, height_, VideoFormat::kMinimumInterval,
FOURCC_IYUV);
std::vector<VideoFormat> supported;
supported.push_back(format);
SetSupportedFormats(supported);
}
CaptureState YuvFramesCapturer::Start(const VideoFormat& capture_format) {
if (IsRunning()) {
LOG(LS_ERROR) << "Yuv Frame Generator is already running";
return CS_FAILED;
}
SetCaptureFormat(&capture_format);
barcode_reference_timestamp_millis_ =
static_cast<int64>(talk_base::Time()) * 1000;
// Create a thread to generate frames.
frames_generator_thread = new YuvFramesThread(this);
bool ret = frames_generator_thread->Start();
if (ret) {
LOG(LS_INFO) << "Yuv Frame Generator started";
return CS_RUNNING;
} else {
LOG(LS_ERROR) << "Yuv Frame Generator failed to start";
return CS_FAILED;
}
}
bool YuvFramesCapturer::IsRunning() {
return frames_generator_thread && !frames_generator_thread->Finished();
}
void YuvFramesCapturer::Stop() {
if (frames_generator_thread) {
frames_generator_thread->Stop();
frames_generator_thread = NULL;
LOG(LS_INFO) << "Yuv Frame Generator stopped";
}
SetCaptureFormat(NULL);
}
bool YuvFramesCapturer::GetPreferredFourccs(std::vector<uint32>* fourccs) {
if (!fourccs) {
return false;
}
fourccs->push_back(GetSupportedFormats()->at(0).fourcc);
return true;
}
// Executed in the context of YuvFramesThread.
void YuvFramesCapturer::ReadFrame(bool first_frame) {
// 1. Signal the previously read frame to downstream.
if (!first_frame) {
SignalFrameCaptured(this, &captured_frame_);
}
uint8* buffer = new uint8[frame_data_size_];
frame_generator_->GenerateNextFrame(buffer, GetBarcodeValue());
frame_index_++;
memmove(captured_frame_.data, buffer, frame_data_size_);
delete[] buffer;
}
int32 YuvFramesCapturer::GetBarcodeValue() {
if (barcode_reference_timestamp_millis_ == -1 ||
frame_index_ % barcode_interval_ != 0) {
return -1;
}
int64 now_millis = static_cast<int64>(talk_base::Time()) * 1000;
return static_cast<int32>(now_millis - barcode_reference_timestamp_millis_);
}
} // namespace cricket

View File

@ -0,0 +1,71 @@
#ifndef TALK_MEDIA_DEVICES_YUVFRAMESCAPTURER_H_
#define TALK_MEDIA_DEVICES_YUVFRAMESCAPTURER_H_
#include <string>
#include <vector>
#include "talk/base/stream.h"
#include "talk/base/stringutils.h"
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/yuvframegenerator.h"
namespace talk_base {
class FileStream;
}
namespace cricket {
// Simulated video capturer that periodically reads frames from a file.
class YuvFramesCapturer : public VideoCapturer {
public:
YuvFramesCapturer();
YuvFramesCapturer(int width, int height);
virtual ~YuvFramesCapturer();
static const char* kYuvFrameDeviceName;
static Device CreateYuvFramesCapturerDevice() {
std::stringstream id;
id << kYuvFrameDeviceName;
return Device(id.str(), id.str());
}
static bool IsYuvFramesCapturerDevice(const Device& device) {
return talk_base::starts_with(device.id.c_str(), kYuvFrameDeviceName);
}
void Init();
// Override virtual methods of parent class VideoCapturer.
virtual CaptureState Start(const VideoFormat& capture_format);
virtual void Stop();
virtual bool IsRunning();
virtual bool IsScreencast() const { return false; }
protected:
// Override virtual methods of parent class VideoCapturer.
virtual bool GetPreferredFourccs(std::vector<uint32>* fourccs);
// Read a frame and determine how long to wait for the next frame.
void ReadFrame(bool first_frame);
private:
class YuvFramesThread; // Forward declaration, defined in .cc.
YuvFrameGenerator* frame_generator_;
CapturedFrame captured_frame_;
YuvFramesThread* frames_generator_thread;
int width_;
int height_;
uint32 frame_data_size_;
uint32 frame_index_;
int64 barcode_reference_timestamp_millis_;
int32 barcode_interval_;
int32 GetBarcodeValue();
DISALLOW_COPY_AND_ASSIGN(YuvFramesCapturer);
};
} // namespace cricket
#endif // TALK_MEDIA_DEVICES_YUVFRAMESCAPTURER_H_

View File

@ -59,6 +59,7 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
id_ = id;
return 0;
}
#if defined(USE_WEBRTC_DEV_BRANCH)
virtual void RegisterCaptureDataCallback(
webrtc::VideoCaptureDataCallback& callback) {
callback_ = &callback;
@ -70,6 +71,40 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
virtual void DeRegisterCaptureCallback() {
// Not implemented.
}
virtual void SetCaptureDelay(int32_t delay) { delay_ = delay; }
virtual int32_t CaptureDelay() { return delay_; }
virtual void EnableFrameRateCallback(const bool enable) {
// not implemented
}
virtual void EnableNoPictureAlarm(const bool enable) {
// not implemented
}
#else
virtual int32_t RegisterCaptureDataCallback(
webrtc::VideoCaptureDataCallback& callback) {
callback_ = &callback;
}
virtual void DeRegisterCaptureDataCallback() { callback_ = NULL; }
virtual void RegisterCaptureCallback(webrtc::VideoCaptureFeedBack& callback) {
// Not implemented.
}
virtual void DeRegisterCaptureCallback() {
// Not implemented.
}
virtual int32_t SetCaptureDelay(int32_t delay) {
delay_ = delay;
return 0;
}
virtual int32_t CaptureDelay() {
return delay_;
}
virtual int32_t EnableFrameRateCallback(const bool enable) {
return -1; // not implemented
}
virtual int32_t EnableNoPictureAlarm(const bool enable) {
return -1; // not implemented
}
#endif
virtual int32_t StartCapture(
const webrtc::VideoCaptureCapability& cap) {
if (running_) return -1;
@ -93,8 +128,7 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
settings = cap_;
return 0;
}
virtual void SetCaptureDelay(int32_t delay) { delay_ = delay; }
virtual int32_t CaptureDelay() { return delay_; }
virtual int32_t SetCaptureRotation(
webrtc::VideoCaptureRotation rotation) {
return -1; // not implemented
@ -103,12 +137,6 @@ class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
const webrtc::VideoCodec& codec) {
return NULL; // not implemented
}
virtual void EnableFrameRateCallback(const bool enable) {
// not implemented
}
virtual void EnableNoPictureAlarm(const bool enable) {
// not implemented
}
virtual int32_t AddRef() {
return 0;
}

View File

@ -248,6 +248,7 @@ CaptureState WebRtcVideoCapturer::Start(const VideoFormat& capture_format) {
return CS_NO_DEVICE;
}
talk_base::CritScope cs(&critical_section_stopping_);
// TODO(hellner): weird to return failure when it is in fact actually running.
if (IsRunning()) {
LOG(LS_ERROR) << "The capturer is already running";
@ -264,8 +265,13 @@ CaptureState WebRtcVideoCapturer::Start(const VideoFormat& capture_format) {
std::string camera_id(GetId());
uint32 start = talk_base::Time();
#if defined(USE_WEBRTC_DEV_BRANCH)
module_->RegisterCaptureDataCallback(*this);
if (module_->StartCapture(cap) != 0) {
#else
if (module_->RegisterCaptureDataCallback(*this) != 0 ||
module_->StartCapture(cap) != 0) {
#endif
LOG(LS_ERROR) << "Camera '" << camera_id << "' failed to start";
return CS_FAILED;
}

View File

@ -583,13 +583,12 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
external_capture_(external_capture),
capturer_updated_(false),
interval_(0),
video_adapter_(new CoordinatedVideoAdapter),
cpu_monitor_(cpu_monitor) {
overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get()));
SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable);
overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_));
SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable);
if (cpu_monitor) {
cpu_monitor->SignalUpdate.connect(
video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated);
&video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
}
}
@ -599,7 +598,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
bool sending() const { return sending_; }
void set_muted(bool on) {
// TODO(asapersson): add support.
// video_adapter_->SetBlackOutput(on);
// video_adapter_.SetBlackOutput(on);
muted_ = on;
}
bool muted() {return muted_; }
@ -614,7 +613,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
if (video_format_ != cricket::VideoFormat()) {
interval_ = video_format_.interval;
}
video_adapter_->OnOutputFormatRequest(video_format_);
video_adapter_.OnOutputFormatRequest(video_format_);
}
void set_interval(int64 interval) {
if (video_format() == cricket::VideoFormat()) {
@ -627,17 +626,13 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
VideoFormat format(codec.width, codec.height,
VideoFormat::FpsToInterval(codec.maxFramerate),
FOURCC_I420);
if (video_adapter_->output_format().IsSize0x0()) {
video_adapter_->SetOutputFormat(format);
if (video_adapter_.output_format().IsSize0x0()) {
video_adapter_.SetOutputFormat(format);
}
}
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) {
*out_frame = NULL;
return video_adapter_->AdaptFrame(in_frame, out_frame);
}
int CurrentAdaptReason() const {
return video_adapter_->adapt_reason();
return video_adapter_.adapt_reason();
}
webrtc::CpuOveruseObserver* overuse_observer() {
return overuse_observer_.get();
@ -663,6 +658,12 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
return;
}
capturer_updated_ = true;
// Disconnect from the previous video capturer.
if (video_capturer_) {
video_capturer_->SignalAdaptFrame.disconnect(this);
}
video_capturer_ = video_capturer;
if (video_capturer && !video_capturer->IsScreencast()) {
const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
@ -674,40 +675,51 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
// be zero, and all frames may be dropped.
// Consider fixing this by having video_adapter keep a pointer to the
// video capturer.
video_adapter_->SetInputFormat(*capture_format);
video_adapter_.SetInputFormat(*capture_format);
}
// TODO(thorcarpenter): When the adapter supports "only frame dropping"
// mode, also hook it up to screencast capturers.
video_capturer->SignalAdaptFrame.connect(
this, &WebRtcVideoChannelSendInfo::AdaptFrame);
}
}
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input,
VideoFrame** adapted) {
video_adapter_.AdaptFrame(input, adapted);
}
void ApplyCpuOptions(const VideoOptions& options) {
bool cpu_adapt, cpu_smoothing, adapt_third;
float low, med, high;
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter_->set_cpu_adaptation(cpu_adapt);
video_adapter_.set_cpu_adaptation(cpu_adapt);
}
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter_->set_cpu_smoothing(cpu_smoothing);
video_adapter_.set_cpu_smoothing(cpu_smoothing);
}
if (options.process_adaptation_threshhold.Get(&med)) {
video_adapter_->set_process_threshold(med);
video_adapter_.set_process_threshold(med);
}
if (options.system_low_adaptation_threshhold.Get(&low)) {
video_adapter_->set_low_system_threshold(low);
video_adapter_.set_low_system_threshold(low);
}
if (options.system_high_adaptation_threshhold.Get(&high)) {
video_adapter_->set_high_system_threshold(high);
video_adapter_.set_high_system_threshold(high);
}
if (options.video_adapt_third.Get(&adapt_third)) {
video_adapter_->set_scale_third(adapt_third);
video_adapter_.set_scale_third(adapt_third);
}
}
void SetCpuOveruseDetection(bool enable) {
if (cpu_monitor_ && enable) {
cpu_monitor_->SignalUpdate.disconnect(video_adapter_.get());
cpu_monitor_->SignalUpdate.disconnect(&video_adapter_);
}
overuse_observer_->Enable(enable);
video_adapter_->set_cpu_adaptation(enable);
video_adapter_.set_cpu_adaptation(enable);
}
void ProcessFrame(const VideoFrame& original_frame, bool mute,
@ -761,7 +773,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
int64 interval_;
talk_base::scoped_ptr<CoordinatedVideoAdapter> video_adapter_;
CoordinatedVideoAdapter video_adapter_;
talk_base::CpuMonitor* cpu_monitor_;
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
};
@ -2854,7 +2866,16 @@ bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc,
return true;
}
// TODO(zhurunz): Add unittests to test this function.
bool WebRtcVideoMediaChannel::GetVideoAdapter(
uint32 ssrc, CoordinatedVideoAdapter** video_adapter) {
SendChannelMap::iterator it = send_channels_.find(ssrc);
if (it == send_channels_.end()) {
return false;
}
*video_adapter = it->second->video_adapter();
return true;
}
void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer,
const VideoFrame* frame) {
// If the |capturer| is registered to any send channel, then send the frame

View File

@ -60,12 +60,13 @@ class CpuMonitor;
namespace cricket {
class CoordinatedVideoAdapter;
class ViETraceWrapper;
class ViEWrapper;
class VideoCapturer;
class VideoFrame;
class VideoProcessor;
class VideoRenderer;
class ViETraceWrapper;
class ViEWrapper;
class VoiceMediaChannel;
class WebRtcDecoderObserver;
class WebRtcEncoderObserver;
@ -227,10 +228,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
int local_renderer_h_;
VideoRenderer* local_renderer_;
// Critical section to protect the media processor register/unregister
// while processing a frame
talk_base::CriticalSection signal_media_critical_;
talk_base::scoped_ptr<talk_base::CpuMonitor> cpu_monitor_;
};
@ -289,12 +286,11 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
// Public functions for use by tests and other specialized code.
uint32 send_ssrc() const { return 0; }
bool GetRenderer(uint32 ssrc, VideoRenderer** renderer);
bool GetVideoAdapter(uint32 ssrc, CoordinatedVideoAdapter** video_adapter);
void SendFrame(VideoCapturer* capturer, const VideoFrame* frame);
bool SendFrame(WebRtcVideoChannelSendInfo* channel_info,
const VideoFrame* frame, bool is_screencast);
void AdaptAndSendFrame(VideoCapturer* capturer, const VideoFrame* frame);
// Thunk functions for use with HybridVideoEngine
void OnLocalFrame(VideoCapturer* capturer, const VideoFrame* frame) {
SendFrame(0u, frame, capturer->IsScreencast());

View File

@ -36,6 +36,7 @@
#include "talk/media/base/fakevideorenderer.h"
#include "talk/media/base/mediachannel.h"
#include "talk/media/base/testutils.h"
#include "talk/media/base/videoadapter.h"
#include "talk/media/base/videoengine_unittest.h"
#include "talk/media/webrtc/fakewebrtcvideocapturemodule.h"
#include "talk/media/webrtc/fakewebrtcvideoengine.h"

View File

@ -1639,17 +1639,68 @@ int WebRtcVoiceEngine::CreateSoundclipVoiceChannel() {
return CreateVoiceChannel(voe_wrapper_sc_.get());
}
// This struct relies on the generated copy constructor and assignment operator
// since it is used in an stl::map.
struct WebRtcVoiceMediaChannel::WebRtcVoiceChannelInfo {
WebRtcVoiceChannelInfo() : channel(-1), renderer(NULL) {}
WebRtcVoiceChannelInfo(int ch, AudioRenderer* r)
: channel(ch),
renderer(r) {}
~WebRtcVoiceChannelInfo() {}
class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
: public AudioRenderer::Sink {
public:
WebRtcVoiceChannelRenderer(int ch,
webrtc::AudioTransport* voe_audio_transport)
: channel_(ch),
voe_audio_transport_(voe_audio_transport),
renderer_(NULL) {
}
virtual ~WebRtcVoiceChannelRenderer() {
Stop();
}
int channel;
AudioRenderer* renderer;
// Starts the rendering by setting a sink to the renderer to get data
// callback.
// TODO(xians): Make sure Start() is called only once.
void Start(AudioRenderer* renderer) {
ASSERT(renderer != NULL);
if (renderer_) {
ASSERT(renderer_ == renderer);
return;
}
// TODO(xians): Remove AddChannel() call after Chrome turns on APM
// in getUserMedia by default.
renderer->AddChannel(channel_);
renderer->SetSink(this);
renderer_ = renderer;
}
// Stops rendering by setting the sink of the renderer to NULL. No data
// callback will be received after this method.
void Stop() {
if (!renderer_)
return;
renderer_->RemoveChannel(channel_);
renderer_->SetSink(NULL);
renderer_ = NULL;
}
// AudioRenderer::Sink implementation.
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) OVERRIDE {
// TODO(xians): Make new interface in AudioTransport to pass the data to
// WebRtc VoE channel.
}
// Accessor to the VoE channel ID.
int channel() const { return channel_; }
private:
const int channel_;
webrtc::AudioTransport* const voe_audio_transport_;
// Raw pointer to AudioRenderer owned by LocalAudioTrackHandler.
// PeerConnection will make sure invalidating the pointer before the object
// goes away.
AudioRenderer* renderer_;
};
// WebRtcVoiceMediaChannel
@ -1841,8 +1892,8 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
for (ChannelMap::iterator it = receive_channels_.begin();
it != receive_channels_.end() && ret; ++it) {
if (engine()->voe()->codec()->SetRecPayloadType(
it->second.channel, voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, it->second.channel,
it->second->channel(), voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, it->second->channel(),
ToString(voe_codec));
ret = false;
}
@ -2047,7 +2098,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
send_codecs_ = codecs;
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (!SetSendCodecs(iter->second.channel, codecs)) {
if (!SetSendCodecs(iter->second->channel(), codecs)) {
return false;
}
}
@ -2061,7 +2112,7 @@ void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
bool nack_enabled) {
for (ChannelMap::const_iterator it = channels.begin();
it != channels.end(); ++it) {
SetNack(it->second.channel, nack_enabled);
SetNack(it->second->channel(), nack_enabled);
}
}
@ -2081,7 +2132,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodec(
<< ", bitrate=" << send_codec.rate;
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (!SetSendCodec(iter->second.channel, send_codec))
if (!SetSendCodec(iter->second->channel(), send_codec))
return false;
}
@ -2132,9 +2183,9 @@ bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
for (ChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (engine()->voe()->rtp()->SetRTPAudioLevelIndicationStatus(
iter->second.channel, enable, id) == -1) {
iter->second->channel(), enable, id) == -1) {
LOG_RTCERR3(SetRTPAudioLevelIndicationStatus,
iter->second.channel, enable, id);
iter->second->channel(), enable, id);
return false;
}
}
@ -2168,9 +2219,9 @@ bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
}
for (ChannelMap::iterator it = receive_channels_.begin();
it != receive_channels_.end() && result; ++it) {
if (!SetPlayout(it->second.channel, playout)) {
if (!SetPlayout(it->second->channel(), playout)) {
LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
<< it->second.channel << " failed";
<< it->second->channel() << " failed";
result = false;
}
}
@ -2208,7 +2259,7 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
// Change the settings on each send channel.
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (!ChangeSend(iter->second.channel, send))
if (!ChangeSend(iter->second->channel(), send))
return false;
}
@ -2280,7 +2331,7 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
bool default_channel_is_available = true;
for (ChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (IsDefaultChannel(iter->second.channel)) {
if (IsDefaultChannel(iter->second->channel())) {
default_channel_is_available = false;
break;
}
@ -2300,7 +2351,15 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
// Save the channel to send_channels_, so that RemoveSendStream() can still
// delete the channel in case failure happens below.
send_channels_[sp.first_ssrc()] = WebRtcVoiceChannelInfo(channel, NULL);
#ifdef USE_WEBRTC_DEV_BRANCH
webrtc::AudioTransport* audio_transport =
engine()->voe()->base()->audio_transport();
#else
webrtc::AudioTransport* audio_transport = NULL;
#endif
send_channels_.insert(std::make_pair(
sp.first_ssrc(),
new WebRtcVoiceChannelRenderer(channel, audio_transport)));
// Set the send (local) SSRC.
// If there are multiple send SSRCs, we can only set the first one here, and
@ -2319,10 +2378,10 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
// Only update the SSRC for non-default channels.
if (!IsDefaultChannel(it->second.channel)) {
if (engine()->voe()->rtp()->SetLocalSSRC(it->second.channel,
if (!IsDefaultChannel(it->second->channel())) {
if (engine()->voe()->rtp()->SetLocalSSRC(it->second->channel(),
sp.first_ssrc()) != 0) {
LOG_RTCERR2(SetLocalSSRC, it->second.channel, sp.first_ssrc());
LOG_RTCERR2(SetLocalSSRC, it->second->channel(), sp.first_ssrc());
return false;
}
}
@ -2349,12 +2408,13 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
return false;
}
int channel = it->second.channel;
int channel = it->second->channel();
ChangeSend(channel, SEND_NOTHING);
// Notify the audio renderer that the send channel is going away.
if (it->second.renderer)
it->second.renderer->RemoveChannel(channel);
// Delete the WebRtcVoiceChannelRenderer object connected to the channel,
// this will disconnect the audio renderer with the send channel.
delete it->second;
send_channels_.erase(it);
if (IsDefaultChannel(channel)) {
// Do not delete the default channel since the receive channels depend on
@ -2368,7 +2428,6 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
return false;
}
send_channels_.erase(it);
if (send_channels_.empty())
ChangeSend(SEND_NOTHING);
@ -2394,12 +2453,19 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
// Reuse default channel for recv stream in non-conference mode call
// when the default channel is not being used.
#ifdef USE_WEBRTC_DEV_BRANCH
webrtc::AudioTransport* audio_transport =
engine()->voe()->base()->audio_transport();
#else
webrtc::AudioTransport* audio_transport = NULL;
#endif
if (!InConferenceMode() && default_receive_ssrc_ == 0) {
LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
<< " reuse default channel";
default_receive_ssrc_ = sp.first_ssrc();
receive_channels_.insert(std::make_pair(
default_receive_ssrc_, WebRtcVoiceChannelInfo(voe_channel(), NULL)));
default_receive_ssrc_,
new WebRtcVoiceChannelRenderer(voe_channel(), audio_transport)));
return SetPlayout(voe_channel(), playout_);
}
@ -2416,7 +2482,8 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
}
receive_channels_.insert(
std::make_pair(ssrc, WebRtcVoiceChannelInfo(channel, NULL)));
std::make_pair(
ssrc, new WebRtcVoiceChannelRenderer(channel, audio_transport)));
LOG(LS_INFO) << "New audio stream " << ssrc
<< " registered to VoiceEngine channel #"
@ -2493,34 +2560,28 @@ bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
return false;
}
// Delete the WebRtcVoiceChannelRenderer object connected to the channel, this
// will disconnect the audio renderer with the receive channel.
// Cache the channel before the deletion.
const int channel = it->second->channel();
delete it->second;
receive_channels_.erase(it);
if (ssrc == default_receive_ssrc_) {
ASSERT(IsDefaultChannel(it->second.channel));
ASSERT(IsDefaultChannel(channel));
// Recycle the default channel is for recv stream.
if (playout_)
SetPlayout(voe_channel(), false);
if (it->second.renderer)
it->second.renderer->RemoveChannel(voe_channel());
default_receive_ssrc_ = 0;
receive_channels_.erase(it);
return true;
}
// Non default channel.
// Notify the renderer that channel is going away.
if (it->second.renderer)
it->second.renderer->RemoveChannel(it->second.channel);
LOG(LS_INFO) << "Removing audio stream " << ssrc
<< " with VoiceEngine channel #" << it->second.channel << ".";
if (!DeleteChannel(it->second.channel)) {
// Erase the entry anyhow.
receive_channels_.erase(it);
<< " with VoiceEngine channel #" << channel << ".";
if (!DeleteChannel(channel))
return false;
}
receive_channels_.erase(it);
bool enable_default_channel_playout = false;
if (receive_channels_.empty()) {
// The last stream was removed. We can now enable the default
@ -2558,19 +2619,11 @@ bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
return true;
}
AudioRenderer* remote_renderer = it->second.renderer;
if (renderer) {
ASSERT(remote_renderer == NULL || remote_renderer == renderer);
if (!remote_renderer) {
renderer->AddChannel(it->second.channel);
}
} else if (remote_renderer) {
// |renderer| == NULL, remove the channel from the renderer.
remote_renderer->RemoveChannel(it->second.channel);
}
if (renderer)
it->second->Start(renderer);
else
it->second->Stop();
// Assign the new value to the struct.
it->second.renderer = renderer;
return true;
}
@ -2588,17 +2641,11 @@ bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
return true;
}
AudioRenderer* local_renderer = it->second.renderer;
if (renderer) {
ASSERT(local_renderer == NULL || local_renderer == renderer);
if (!local_renderer)
renderer->AddChannel(it->second.channel);
} else if (local_renderer) {
local_renderer->RemoveChannel(it->second.channel);
}
if (renderer)
it->second->Start(renderer);
else
it->second->Stop();
// Assign the new value to the struct.
it->second.renderer = renderer;
return true;
}
@ -2609,7 +2656,7 @@ bool WebRtcVoiceMediaChannel::GetActiveStreams(
actives->clear();
for (ChannelMap::iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
int level = GetOutputLevel(it->second.channel);
int level = GetOutputLevel(it->second->channel());
if (level > 0) {
actives->push_back(std::make_pair(it->first, level));
}
@ -2622,7 +2669,7 @@ int WebRtcVoiceMediaChannel::GetOutputLevel() {
int highest = GetOutputLevel(voe_channel());
for (ChannelMap::iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
int level = GetOutputLevel(it->second.channel);
int level = GetOutputLevel(it->second->channel());
highest = talk_base::_max(level, highest);
}
return highest;
@ -2665,7 +2712,7 @@ bool WebRtcVoiceMediaChannel::SetOutputScaling(
channels.push_back(voe_channel());
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
channels.push_back(it->second.channel);
channels.push_back(it->second->channel());
}
} else { // Collect only the channel of the specified ssrc.
int channel = GetReceiveChannelNum(ssrc);
@ -2801,7 +2848,7 @@ bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
bool default_channel_is_inuse = false;
for (ChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (IsDefaultChannel(iter->second.channel)) {
if (IsDefaultChannel(iter->second->channel())) {
default_channel_is_inuse = true;
break;
}
@ -2809,7 +2856,7 @@ bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
if (default_channel_is_inuse) {
channel = voe_channel();
} else if (!send_channels_.empty()) {
channel = send_channels_.begin()->second.channel;
channel = send_channels_.begin()->second->channel();
}
} else {
channel = GetSendChannelNum(ssrc);
@ -2907,11 +2954,12 @@ void WebRtcVoiceMediaChannel::OnRtcpReceived(
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
// Make sure not sending the same packet to default channel more than once.
if (IsDefaultChannel(iter->second.channel) && has_sent_to_default_channel)
if (IsDefaultChannel(iter->second->channel()) &&
has_sent_to_default_channel)
continue;
engine()->voe()->network()->ReceivedRTCPPacket(
iter->second.channel,
iter->second->channel(),
packet->data(),
static_cast<unsigned int>(packet->length()));
}
@ -3022,7 +3070,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
for (ChannelMap::const_iterator channel_iter = send_channels_.begin();
channel_iter != send_channels_.end(); ++channel_iter) {
const int channel = channel_iter->second.channel;
const int channel = channel_iter->second->channel();
// Fill in the sender info, based on what we know, and what the
// remote side told us it got from its RTCP report.
@ -3094,7 +3142,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
std::vector<int> channels;
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
channels.push_back(it->second.channel);
channels.push_back(it->second->channel());
}
if (channels.empty()) {
channels.push_back(voe_channel());
@ -3172,7 +3220,7 @@ bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
// Check whether this is a sending channel.
for (ChannelMap::const_iterator it = send_channels_.begin();
it != send_channels_.end(); ++it) {
if (it->second.channel == channel_num) {
if (it->second->channel() == channel_num) {
// This is a sending channel.
uint32 local_ssrc = 0;
if (engine()->voe()->rtp()->GetLocalSSRC(
@ -3186,7 +3234,7 @@ bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
// Check whether this is a receiving channel.
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
if (it->second.channel == channel_num) {
if (it->second->channel() == channel_num) {
*ssrc = it->first;
return true;
}
@ -3214,14 +3262,14 @@ int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
ChannelMap::iterator it = receive_channels_.find(ssrc);
if (it != receive_channels_.end())
return it->second.channel;
return it->second->channel();
return (ssrc == default_receive_ssrc_) ? voe_channel() : -1;
}
int WebRtcVoiceMediaChannel::GetSendChannelNum(uint32 ssrc) {
ChannelMap::iterator it = send_channels_.find(ssrc);
if (it != send_channels_.end())
return it->second.channel;
return it->second->channel();
return -1;
}

View File

@ -392,8 +392,11 @@ class WebRtcVoiceMediaChannel
static Error WebRtcErrorToChannelError(int err_code);
private:
struct WebRtcVoiceChannelInfo;
typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
class WebRtcVoiceChannelRenderer;
// Map of ssrc to WebRtcVoiceChannelRenderer object. A new object of
// WebRtcVoiceChannelRenderer will be created for every new stream and
// will be destroyed when the stream goes away.
typedef std::map<uint32, WebRtcVoiceChannelRenderer*> ChannelMap;
void SetNack(int channel, bool nack_enabled);
void SetNack(const ChannelMap& channels, bool nack_enabled);

View File

@ -193,6 +193,9 @@ class DtlsTransportChannelWrapper : public TransportChannelImpl {
virtual void SetIceTiebreaker(uint64 tiebreaker) {
channel_->SetIceTiebreaker(tiebreaker);
}
virtual bool GetIceProtocolType(IceProtocolType* type) const {
return channel_->GetIceProtocolType(type);
}
virtual void SetIceProtocolType(IceProtocolType type) {
channel_->SetIceProtocolType(type);
}

View File

@ -101,6 +101,10 @@ class FakeTransportChannel : public TransportChannelImpl,
virtual void SetIceRole(IceRole role) { role_ = role; }
virtual IceRole GetIceRole() const { return role_; }
virtual void SetIceTiebreaker(uint64 tiebreaker) { tiebreaker_ = tiebreaker; }
virtual bool GetIceProtocolType(IceProtocolType* type) const {
*type = ice_proto_;
return true;
}
virtual void SetIceProtocolType(IceProtocolType type) { ice_proto_ = type; }
virtual void SetIceCredentials(const std::string& ice_ufrag,
const std::string& ice_pwd) {

View File

@ -167,7 +167,7 @@ P2PTransportChannel::P2PTransportChannel(const std::string& content_name,
pending_best_connection_(NULL),
sort_dirty_(false),
was_writable_(false),
protocol_type_(ICEPROTO_GOOGLE),
protocol_type_(ICEPROTO_HYBRID),
remote_ice_mode_(ICEMODE_FULL),
ice_role_(ICEROLE_UNKNOWN),
tiebreaker_(0),
@ -237,6 +237,11 @@ void P2PTransportChannel::SetIceTiebreaker(uint64 tiebreaker) {
tiebreaker_ = tiebreaker;
}
bool P2PTransportChannel::GetIceProtocolType(IceProtocolType* type) const {
*type = protocol_type_;
return true;
}
void P2PTransportChannel::SetIceProtocolType(IceProtocolType type) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
@ -467,7 +472,7 @@ void P2PTransportChannel::OnUnknownAddress(
// Create a new candidate with this address.
std::string type;
if (protocol_type_ == ICEPROTO_RFC5245) {
if (port->IceProtocol() == ICEPROTO_RFC5245) {
type = PRFLX_PORT_TYPE;
} else {
// G-ICE doesn't support prflx candidate.
@ -491,7 +496,7 @@ void P2PTransportChannel::OnUnknownAddress(
new_remote_candidate.GetPriority(ICE_TYPE_PREFERENCE_SRFLX));
}
if (protocol_type_ == ICEPROTO_RFC5245) {
if (port->IceProtocol() == ICEPROTO_RFC5245) {
// RFC 5245
// If the source transport address of the request does not match any
// existing remote candidates, it represents a new peer reflexive remote
@ -884,6 +889,15 @@ void P2PTransportChannel::SortConnections() {
// will be sorted.
UpdateConnectionStates();
if (protocol_type_ == ICEPROTO_HYBRID) {
// If we are in hybrid mode, we are not sending any ping requests, so there
// is no point in sorting the connections. In hybrid state, ports can have
// different protocol than hybrid and protocol may differ from one another.
// Instead just update the state of this channel
UpdateChannelState();
return;
}
// Any changes after this point will require a re-sort.
sort_dirty_ = false;

View File

@ -79,6 +79,7 @@ class P2PTransportChannel : public TransportChannelImpl,
virtual void SetIceRole(IceRole role);
virtual IceRole GetIceRole() const { return ice_role_; }
virtual void SetIceTiebreaker(uint64 tiebreaker);
virtual bool GetIceProtocolType(IceProtocolType* type) const;
virtual void SetIceProtocolType(IceProtocolType type);
virtual void SetIceCredentials(const std::string& ice_ufrag,
const std::string& ice_pwd);

View File

@ -588,6 +588,46 @@ class P2PTransportChannelTestBase : public testing::Test,
TestSendRecv(1);
}
void TestHybridConnectivity(cricket::IceProtocolType proto) {
AddAddress(0, kPublicAddrs[0]);
AddAddress(1, kPublicAddrs[1]);
SetAllocationStepDelay(0, kMinimumStepDelay);
SetAllocationStepDelay(1, kMinimumStepDelay);
SetIceRole(0, cricket::ICEROLE_CONTROLLING);
SetIceProtocol(0, cricket::ICEPROTO_HYBRID);
SetIceTiebreaker(0, kTiebreaker1);
SetIceRole(1, cricket::ICEROLE_CONTROLLED);
SetIceProtocol(1, proto);
SetIceTiebreaker(1, kTiebreaker2);
CreateChannels(1);
// When channel is in hybrid and it's controlling agent, channel will
// receive ping request from the remote. Hence connection is readable.
// Since channel is in hybrid, it will not send any pings, so no writable
// connection. Since channel2 is in controlled state, it will not have
// any connections which are readable or writable, as it didn't received
// pings (or none) with USE-CANDIDATE attribute.
EXPECT_TRUE_WAIT(ep1_ch1()->readable(), 1000);
// Set real protocol type.
ep1_ch1()->SetIceProtocolType(proto);
// Channel should able to send ping requests and connections become writable
// in both directions.
EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() &&
ep2_ch1()->readable() && ep2_ch1()->writable(),
1000);
EXPECT_TRUE(
ep1_ch1()->best_connection() && ep2_ch1()->best_connection() &&
LocalCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[0]) &&
RemoteCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[1]));
TestSendRecv(1);
DestroyChannels();
}
void OnChannelRequestSignaling(cricket::TransportChannelImpl* channel) {
channel->OnSignalingReady();
}
@ -1082,6 +1122,7 @@ TEST_F(P2PTransportChannelTest, HandleUfragPwdChangeAsIce) {
cricket::ICEPROTO_RFC5245);
CreateChannels(1);
TestHandleIceUfragPasswordChanged();
DestroyChannels();
}
// Test that we restart candidate allocation when local ufrag&pwd changed.
@ -1097,6 +1138,7 @@ TEST_F(P2PTransportChannelTest, HandleUfragPwdChangeBundleAsIce) {
CreateChannels(2);
TestHandleIceUfragPasswordChanged();
DestroyChannels();
}
// Test that we restart candidate allocation when local ufrag&pwd changed.
@ -1109,6 +1151,7 @@ TEST_F(P2PTransportChannelTest, HandleUfragPwdChangeAsGice) {
cricket::ICEPROTO_GOOGLE);
CreateChannels(1);
TestHandleIceUfragPasswordChanged();
DestroyChannels();
}
// Test that ICE restart works when bundle is enabled.
@ -1124,6 +1167,7 @@ TEST_F(P2PTransportChannelTest, HandleUfragPwdChangeBundleAsGice) {
CreateChannels(2);
TestHandleIceUfragPasswordChanged();
DestroyChannels();
}
// Test the operation of GetStats.
@ -1389,6 +1433,19 @@ TEST_F(P2PTransportChannelTest, TestIceConfigWillPassDownToPort) {
ep2_ch1()->best_connection());
TestSendRecv(1);
DestroyChannels();
}
// This test verifies channel can handle ice messages when channel is in
// hybrid mode.
TEST_F(P2PTransportChannelTest, TestConnectivityBetweenHybridandIce) {
TestHybridConnectivity(cricket::ICEPROTO_RFC5245);
}
// This test verifies channel can handle Gice messages when channel is in
// hybrid mode.
TEST_F(P2PTransportChannelTest, TestConnectivityBetweenHybridandGice) {
TestHybridConnectivity(cricket::ICEPROTO_GOOGLE);
}
// Verify that we can set DSCP value and retrieve properly from P2PTC.

View File

@ -178,11 +178,10 @@ Port::Port(talk_base::Thread* thread, talk_base::PacketSocketFactory* factory,
password_(password),
timeout_delay_(kPortTimeoutDelay),
enable_port_packets_(false),
ice_protocol_(ICEPROTO_GOOGLE),
ice_protocol_(ICEPROTO_HYBRID),
ice_role_(ICEROLE_UNKNOWN),
tiebreaker_(0),
shared_socket_(true),
default_dscp_(talk_base::DSCP_NO_CHANGE) {
shared_socket_(true) {
Construct();
}
@ -205,11 +204,10 @@ Port::Port(talk_base::Thread* thread, const std::string& type,
password_(password),
timeout_delay_(kPortTimeoutDelay),
enable_port_packets_(false),
ice_protocol_(ICEPROTO_GOOGLE),
ice_protocol_(ICEPROTO_HYBRID),
ice_role_(ICEROLE_UNKNOWN),
tiebreaker_(0),
shared_socket_(false),
default_dscp_(talk_base::DSCP_NO_CHANGE) {
shared_socket_(false) {
ASSERT(factory_ != NULL);
Construct();
}
@ -341,6 +339,10 @@ bool Port::IsGoogleIce() const {
return (ice_protocol_ == ICEPROTO_GOOGLE);
}
bool Port::IsHybridIce() const {
return (ice_protocol_ == ICEPROTO_HYBRID);
}
bool Port::GetStunMessage(const char* data, size_t size,
const talk_base::SocketAddress& addr,
IceMessage** out_msg, std::string* out_username) {
@ -382,7 +384,9 @@ bool Port::GetStunMessage(const char* data, size_t size,
// If the username is bad or unknown, fail with a 401 Unauthorized.
std::string local_ufrag;
std::string remote_ufrag;
if (!ParseStunUsername(stun_msg.get(), &local_ufrag, &remote_ufrag) ||
IceProtocolType remote_protocol_type;
if (!ParseStunUsername(stun_msg.get(), &local_ufrag, &remote_ufrag,
&remote_protocol_type) ||
local_ufrag != username_fragment()) {
LOG_J(LS_ERROR, this) << "Received STUN request with bad local username "
<< local_ufrag << " from "
@ -392,6 +396,15 @@ bool Port::GetStunMessage(const char* data, size_t size,
return true;
}
// Port is initialized to GOOGLE-ICE protocol type. If pings from remote
// are received before the signal message, protocol type may be different.
// Based on the STUN username, we can determine what's the remote protocol.
// This also enables us to send the response back using the same protocol
// as the request.
if (IsHybridIce()) {
SetIceProtocolType(remote_protocol_type);
}
// If ICE, and the MESSAGE-INTEGRITY is bad, fail with a 401 Unauthorized
if (IsStandardIce() &&
!stun_msg->ValidateMessageIntegrity(data, size, password_)) {
@ -453,7 +466,8 @@ bool Port::IsCompatibleAddress(const talk_base::SocketAddress& addr) {
bool Port::ParseStunUsername(const StunMessage* stun_msg,
std::string* local_ufrag,
std::string* remote_ufrag) const {
std::string* remote_ufrag,
IceProtocolType* remote_protocol_type) const {
// The packet must include a username that either begins or ends with our
// fragment. It should begin with our fragment if it is a request and it
// should end with our fragment if it is a response.
@ -465,8 +479,16 @@ bool Port::ParseStunUsername(const StunMessage* stun_msg,
return false;
const std::string username_attr_str = username_attr->GetString();
if (IsStandardIce()) {
size_t colon_pos = username_attr_str.find(":");
size_t colon_pos = username_attr_str.find(":");
// If we are in hybrid mode set the appropriate ice protocol type based on
// the username argument style.
if (IsHybridIce()) {
*remote_protocol_type = (colon_pos != std::string::npos) ?
ICEPROTO_RFC5245 : ICEPROTO_GOOGLE;
} else {
*remote_protocol_type = ice_protocol_;
}
if (*remote_protocol_type == ICEPROTO_RFC5245) {
if (colon_pos != std::string::npos) { // RFRAG:LFRAG
*local_ufrag = username_attr_str.substr(0, colon_pos);
*remote_ufrag = username_attr_str.substr(
@ -474,7 +496,7 @@ bool Port::ParseStunUsername(const StunMessage* stun_msg,
} else {
return false;
}
} else if (IsGoogleIce()) {
} else if (*remote_protocol_type == ICEPROTO_GOOGLE) {
int remote_frag_len = static_cast<int>(username_attr_str.size());
remote_frag_len -= static_cast<int>(username_fragment().size());
if (remote_frag_len < 0)
@ -716,7 +738,7 @@ void Port::CheckTimeout() {
}
const std::string Port::username_fragment() const {
if (IsGoogleIce() &&
if (!IsStandardIce() &&
component_ == ICE_CANDIDATE_COMPONENT_RTCP) {
// In GICE mode, we should adjust username fragment for rtcp component.
return GetRtcpUfragFromRtpUfrag(ice_username_fragment_);
@ -997,7 +1019,7 @@ void Connection::OnReadPacket(
// id's match.
case STUN_BINDING_RESPONSE:
case STUN_BINDING_ERROR_RESPONSE:
if (port_->IceProtocol() == ICEPROTO_GOOGLE ||
if (port_->IsGoogleIce() ||
msg->ValidateMessageIntegrity(
data, size, remote_candidate().password())) {
requests_.CheckResponse(msg.get());

View File

@ -280,7 +280,8 @@ class Port : public PortInterface, public talk_base::MessageHandler,
// stun username attribute if present.
bool ParseStunUsername(const StunMessage* stun_msg,
std::string* local_username,
std::string* remote_username) const;
std::string* remote_username,
IceProtocolType* remote_protocol_type) const;
void CreateStunUsername(const std::string& remote_username,
std::string* stun_username_attr_str) const;
@ -301,10 +302,8 @@ class Port : public PortInterface, public talk_base::MessageHandler,
// Returns if Google ICE protocol is used.
bool IsGoogleIce() const;
// Returns default DSCP value.
talk_base::DiffServCodePoint DefaultDscpValue() const {
return default_dscp_;
}
// Returns if Hybrid ICE protocol is used.
bool IsHybridIce() const;
protected:
enum {
@ -341,9 +340,10 @@ class Port : public PortInterface, public talk_base::MessageHandler,
// Checks if the address in addr is compatible with the port's ip.
bool IsCompatibleAddress(const talk_base::SocketAddress& addr);
// Default DSCP value for this port. Set by TransportChannel.
void SetDefaultDscpValue(talk_base::DiffServCodePoint dscp) {
default_dscp_ = dscp;
// Returns default DSCP value.
talk_base::DiffServCodePoint DefaultDscpValue() const {
// No change from what MediaChannel set.
return talk_base::DSCP_NO_CHANGE;
}
private:
@ -384,9 +384,6 @@ class Port : public PortInterface, public talk_base::MessageHandler,
IceRole ice_role_;
uint64 tiebreaker_;
bool shared_socket_;
// DSCP value for ICE/STUN messages. Set by the P2PTransportChannel after
// port becomes ready.
talk_base::DiffServCodePoint default_dscp_;
// Information to use when going through a proxy.
std::string user_agent_;
talk_base::ProxyInfo proxy_;

View File

@ -1280,21 +1280,37 @@ TEST_F(PortTest, TestSkipCrossFamilyUdp) {
// This test verifies DSCP value set through SetOption interface can be
// get through DefaultDscpValue.
TEST_F(PortTest, TestDefaultDscpValue) {
int dscp;
talk_base::scoped_ptr<UDPPort> udpport(CreateUdpPort(kLocalAddr1));
udpport->SetOption(talk_base::Socket::OPT_DSCP, talk_base::DSCP_CS6);
EXPECT_EQ(talk_base::DSCP_CS6, udpport->DefaultDscpValue());
EXPECT_EQ(0, udpport->SetOption(talk_base::Socket::OPT_DSCP,
talk_base::DSCP_CS6));
EXPECT_EQ(0, udpport->GetOption(talk_base::Socket::OPT_DSCP, &dscp));
talk_base::scoped_ptr<TCPPort> tcpport(CreateTcpPort(kLocalAddr1));
tcpport->SetOption(talk_base::Socket::OPT_DSCP, talk_base::DSCP_AF31);
EXPECT_EQ(talk_base::DSCP_AF31, tcpport->DefaultDscpValue());
EXPECT_EQ(0, tcpport->SetOption(talk_base::Socket::OPT_DSCP,
talk_base::DSCP_AF31));
EXPECT_EQ(0, tcpport->GetOption(talk_base::Socket::OPT_DSCP, &dscp));
EXPECT_EQ(talk_base::DSCP_AF31, dscp);
talk_base::scoped_ptr<StunPort> stunport(
CreateStunPort(kLocalAddr1, nat_socket_factory1()));
stunport->SetOption(talk_base::Socket::OPT_DSCP, talk_base::DSCP_AF41);
EXPECT_EQ(talk_base::DSCP_AF41, stunport->DefaultDscpValue());
talk_base::scoped_ptr<TurnPort> turnport(CreateTurnPort(
EXPECT_EQ(0, stunport->SetOption(talk_base::Socket::OPT_DSCP,
talk_base::DSCP_AF41));
EXPECT_EQ(0, stunport->GetOption(talk_base::Socket::OPT_DSCP, &dscp));
EXPECT_EQ(talk_base::DSCP_AF41, dscp);
talk_base::scoped_ptr<TurnPort> turnport1(CreateTurnPort(
kLocalAddr1, nat_socket_factory1(), PROTO_UDP, PROTO_UDP));
turnport->SetOption(talk_base::Socket::OPT_DSCP, talk_base::DSCP_CS7);
EXPECT_EQ(talk_base::DSCP_CS7, turnport->DefaultDscpValue());
// TODO(mallinath) - Test DSCP through GetOption.
// Socket is created in PrepareAddress.
turnport1->PrepareAddress();
EXPECT_EQ(0, turnport1->SetOption(talk_base::Socket::OPT_DSCP,
talk_base::DSCP_CS7));
EXPECT_EQ(0, turnport1->GetOption(talk_base::Socket::OPT_DSCP, &dscp));
EXPECT_EQ(talk_base::DSCP_CS7, dscp);
// This will verify correct value returned without the socket.
talk_base::scoped_ptr<TurnPort> turnport2(CreateTurnPort(
kLocalAddr1, nat_socket_factory1(), PROTO_UDP, PROTO_UDP));
EXPECT_EQ(0, turnport2->SetOption(talk_base::Socket::OPT_DSCP,
talk_base::DSCP_CS6));
EXPECT_EQ(0, turnport2->GetOption(talk_base::Socket::OPT_DSCP, &dscp));
EXPECT_EQ(talk_base::DSCP_CS6, dscp);
}
// Test sending STUN messages in GICE format.
@ -1665,6 +1681,81 @@ TEST_F(PortTest, TestHandleStunMessageAsIce) {
out_msg->GetErrorCode()->reason());
}
// This test verifies port can handle ICE messages in Hybrid mode and switches
// ICEPROTO_RFC5245 mode after successfully handling the message.
TEST_F(PortTest, TestHandleStunMessageAsIceInHybridMode) {
// Our port will act as the "remote" port.
talk_base::scoped_ptr<TestPort> port(
CreateTestPort(kLocalAddr2, "rfrag", "rpass"));
port->SetIceProtocolType(ICEPROTO_HYBRID);
talk_base::scoped_ptr<IceMessage> in_msg, out_msg;
talk_base::scoped_ptr<ByteBuffer> buf(new ByteBuffer());
talk_base::SocketAddress addr(kLocalAddr1);
std::string username;
// BINDING-REQUEST from local to remote with valid ICE username,
// MESSAGE-INTEGRITY, and FINGERPRINT.
in_msg.reset(CreateStunMessageWithUsername(STUN_BINDING_REQUEST,
"rfrag:lfrag"));
in_msg->AddMessageIntegrity("rpass");
in_msg->AddFingerprint();
WriteStunMessage(in_msg.get(), buf.get());
EXPECT_TRUE(port->GetStunMessage(buf->Data(), buf->Length(), addr,
out_msg.accept(), &username));
EXPECT_TRUE(out_msg.get() != NULL);
EXPECT_EQ("lfrag", username);
EXPECT_EQ(ICEPROTO_RFC5245, port->IceProtocol());
}
// This test verifies port can handle GICE messages in Hybrid mode and switches
// ICEPROTO_GOOGLE mode after successfully handling the message.
TEST_F(PortTest, TestHandleStunMessageAsGiceInHybridMode) {
// Our port will act as the "remote" port.
talk_base::scoped_ptr<TestPort> port(
CreateTestPort(kLocalAddr2, "rfrag", "rpass"));
port->SetIceProtocolType(ICEPROTO_HYBRID);
talk_base::scoped_ptr<IceMessage> in_msg, out_msg;
talk_base::scoped_ptr<ByteBuffer> buf(new ByteBuffer());
talk_base::SocketAddress addr(kLocalAddr1);
std::string username;
// BINDING-REQUEST from local to remote with valid GICE username and no M-I.
in_msg.reset(CreateStunMessageWithUsername(STUN_BINDING_REQUEST,
"rfraglfrag"));
WriteStunMessage(in_msg.get(), buf.get());
EXPECT_TRUE(port->GetStunMessage(buf->Data(), buf->Length(), addr,
out_msg.accept(), &username));
EXPECT_TRUE(out_msg.get() != NULL); // Succeeds, since this is GICE.
EXPECT_EQ("lfrag", username);
EXPECT_EQ(ICEPROTO_GOOGLE, port->IceProtocol());
}
// Verify port is not switched out of RFC5245 mode if GICE message is received
// in that mode.
TEST_F(PortTest, TestHandleStunMessageAsGiceInIceMode) {
// Our port will act as the "remote" port.
talk_base::scoped_ptr<TestPort> port(
CreateTestPort(kLocalAddr2, "rfrag", "rpass"));
port->SetIceProtocolType(ICEPROTO_RFC5245);
talk_base::scoped_ptr<IceMessage> in_msg, out_msg;
talk_base::scoped_ptr<ByteBuffer> buf(new ByteBuffer());
talk_base::SocketAddress addr(kLocalAddr1);
std::string username;
// BINDING-REQUEST from local to remote with valid GICE username and no M-I.
in_msg.reset(CreateStunMessageWithUsername(STUN_BINDING_REQUEST,
"rfraglfrag"));
WriteStunMessage(in_msg.get(), buf.get());
// Should fail as there is no MI and fingerprint.
EXPECT_FALSE(port->GetStunMessage(buf->Data(), buf->Length(), addr,
out_msg.accept(), &username));
EXPECT_EQ(ICEPROTO_RFC5245, port->IceProtocol());
}
// Tests handling of GICE binding requests with missing or incorrect usernames.
TEST_F(PortTest, TestHandleStunMessageAsGiceBadUsername) {
talk_base::scoped_ptr<TestPort> port(

View File

@ -97,7 +97,10 @@ class RawTransportChannel : public TransportChannelImpl,
virtual IceRole GetIceRole() const { return ICEROLE_UNKNOWN; }
virtual void SetIceRole(IceRole role) {}
virtual void SetIceTiebreaker(uint64 tiebreaker) {}
virtual bool GetIceProtocolType(IceProtocolType* type) const { return false; }
virtual void SetIceProtocolType(IceProtocolType type) {}
virtual void SetIceUfrag(const std::string& ice_ufrag) {}
virtual void SetIcePwd(const std::string& ice_pwd) {}
virtual void SetRemoteIceMode(IceMode mode) {}

View File

@ -359,14 +359,6 @@ int RelayPort::SendTo(const void* data, size_t size,
int RelayPort::SetOption(talk_base::Socket::Option opt, int value) {
int result = 0;
// DSCP option is not passed to the socket.
// TODO(mallinath) - After we have the support on socket,
// remove this specialization.
if (opt == talk_base::Socket::OPT_DSCP) {
SetDefaultDscpValue(static_cast<talk_base::DiffServCodePoint>(value));
return result;
}
for (size_t i = 0; i < entries_.size(); ++i) {
if (entries_[i]->SetSocketOption(opt, value) < 0) {
result = -1;

View File

@ -230,12 +230,6 @@ int UDPPort::SendTo(const void* data, size_t size,
}
int UDPPort::SetOption(talk_base::Socket::Option opt, int value) {
// TODO(mallinath) - After we have the support on socket,
// remove this specialization.
if (opt == talk_base::Socket::OPT_DSCP) {
SetDefaultDscpValue(static_cast<talk_base::DiffServCodePoint>(value));
return 0;
}
return socket_->SetOption(opt, value);
}

View File

@ -167,14 +167,6 @@ int TCPPort::GetOption(talk_base::Socket::Option opt, int* value) {
}
int TCPPort::SetOption(talk_base::Socket::Option opt, int value) {
// If we are setting DSCP value, pass value to base Port and return.
// TODO(mallinath) - After we have the support on socket,
// remove this specialization.
if (opt == talk_base::Socket::OPT_DSCP) {
SetDefaultDscpValue(static_cast<talk_base::DiffServCodePoint>(value));
return 0;
}
if (socket_) {
return socket_->SetOption(opt, value);
} else {

View File

@ -208,15 +208,14 @@ TransportChannelImpl* Transport::CreateChannel_w(int component) {
// Push down our transport state to the new channel.
impl->SetIceRole(ice_role_);
impl->SetIceTiebreaker(tiebreaker_);
if (local_description_) {
// TODO(ronghuawu): Change CreateChannel_w to be able to return error since
// below Apply**Description_w calls can fail.
// TODO(ronghuawu): Change CreateChannel_w to be able to return error since
// below Apply**Description_w calls can fail.
if (local_description_)
ApplyLocalTransportDescription_w(impl, NULL);
if (remote_description_) {
ApplyRemoteTransportDescription_w(impl, NULL);
ApplyNegotiatedTransportDescription_w(impl, NULL);
}
}
if (remote_description_)
ApplyRemoteTransportDescription_w(impl, NULL);
if (local_description_ && remote_description_)
ApplyNegotiatedTransportDescription_w(impl, NULL);
impl->SignalReadableState.connect(this, &Transport::OnChannelReadableState);
impl->SignalWritableState.connect(this, &Transport::OnChannelWritableState);
@ -684,6 +683,21 @@ bool Transport::SetRemoteTransportDescription_w(
bool Transport::ApplyLocalTransportDescription_w(TransportChannelImpl* ch,
std::string* error_desc) {
// If existing protocol_type is HYBRID, we may have not chosen the final
// protocol type, so update the channel protocol type from the
// local description. Otherwise, skip updating the protocol type.
// We check for HYBRID to avoid accidental changes; in the case of a
// session renegotiation, the new offer will have the google-ice ICE option,
// so we need to make sure we don't switch back from ICE mode to HYBRID
// when this happens.
// There are some other ways we could have solved this, but this is the
// simplest. The ultimate solution will be to get rid of GICE altogether.
IceProtocolType protocol_type;
if (ch->GetIceProtocolType(&protocol_type) &&
protocol_type == ICEPROTO_HYBRID) {
ch->SetIceProtocolType(
TransportProtocolFromDescription(local_description()));
}
ch->SetIceCredentials(local_description_->ice_ufrag,
local_description_->ice_pwd);
return true;

View File

@ -54,6 +54,7 @@ class TransportChannelImpl : public TransportChannel {
virtual void SetIceRole(IceRole role) = 0;
virtual void SetIceTiebreaker(uint64 tiebreaker) = 0;
// To toggle G-ICE/ICE.
virtual bool GetIceProtocolType(IceProtocolType* type) const = 0;
virtual void SetIceProtocolType(IceProtocolType type) = 0;
// SetIceCredentials only need to be implemented by the ICE
// transport channels. Non-ICE transport channels can just ignore.

View File

@ -304,14 +304,6 @@ Connection* TurnPort::CreateConnection(const Candidate& address,
}
int TurnPort::SetOption(talk_base::Socket::Option opt, int value) {
// DSCP option is not passed to the socket.
// TODO(mallinath) - After we have the support on socket,
// remove this specialization.
if (opt == talk_base::Socket::OPT_DSCP) {
SetDefaultDscpValue(static_cast<talk_base::DiffServCodePoint>(value));
return 0;
}
if (!socket_) {
// If socket is not created yet, these options will be applied during socket
// creation.
@ -322,8 +314,14 @@ int TurnPort::SetOption(talk_base::Socket::Option opt, int value) {
}
int TurnPort::GetOption(talk_base::Socket::Option opt, int* value) {
if (!socket_)
return -1;
if (!socket_) {
SocketOptionsMap::const_iterator it = socket_options_.find(opt);
if (it == socket_options_.end()) {
return -1;
}
*value = it->second;
return 0;
}
return socket_->GetOption(opt, value);
}

View File

@ -179,6 +179,12 @@ class TurnPortTest : public testing::Test,
local_address.ipaddr(), 0, 0,
kIceUfrag1, kIcePwd1,
server_address, credentials));
// Set ICE protocol type to ICEPROTO_RFC5245, as port by default will be
// in Hybrid mode. Protocol type is necessary to send correct type STUN ping
// messages.
// This TURN port will be the controlling.
turn_port_->SetIceProtocolType(cricket::ICEPROTO_RFC5245);
turn_port_->SetIceRole(cricket::ICEROLE_CONTROLLING);
turn_port_->SignalPortComplete.connect(this,
&TurnPortTest::OnTurnPortComplete);
turn_port_->SignalPortError.connect(this,
@ -192,6 +198,10 @@ class TurnPortTest : public testing::Test,
udp_port_.reset(UDPPort::Create(main_, &socket_factory_, &network_,
kLocalAddr2.ipaddr(), 0, 0,
kIceUfrag2, kIcePwd2));
// Set protocol type to RFC5245, as turn port is also in same mode.
// UDP port will be controlled.
udp_port_->SetIceProtocolType(cricket::ICEPROTO_RFC5245);
udp_port_->SetIceRole(cricket::ICEROLE_CONTROLLED);
udp_port_->SignalPortComplete.connect(
this, &TurnPortTest::OnUdpPortComplete);
}