Update talk to 51960985.

R=wu@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/2188004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4696 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
mallinath@webrtc.org
2013-09-06 22:56:28 +00:00
parent b159c2e3dd
commit 1b15f4226f
16 changed files with 198 additions and 198 deletions

View File

@@ -130,7 +130,25 @@ TEST_F(MessageQueueTest, DiposeHandlerWithPostedMessagePending) {
EXPECT_TRUE(deleted);
}
TEST(MessageQueueManager, DISABLED_Clear) {
struct UnwrapMainThreadScope {
UnwrapMainThreadScope() : rewrap_(Thread::Current() != NULL) {
if (rewrap_) ThreadManager::Instance()->UnwrapCurrentThread();
}
~UnwrapMainThreadScope() {
if (rewrap_) ThreadManager::Instance()->WrapCurrentThread();
}
private:
bool rewrap_;
};
TEST(MessageQueueManager, Clear) {
UnwrapMainThreadScope s;
if (MessageQueueManager::IsInitialized()) {
LOG(LS_INFO) << "Unable to run MessageQueueManager::Clear test, since the "
<< "MessageQueueManager was already initialized by some "
<< "other test in this run.";
return;
}
bool deleted = false;
DeletedMessageHandler* handler = new DeletedMessageHandler(&deleted);
delete handler;

View File

@@ -241,7 +241,6 @@ struct VideoOptions {
video_three_layers.SetFrom(change.video_three_layers);
video_enable_camera_list.SetFrom(change.video_enable_camera_list);
video_one_layer_screencast.SetFrom(change.video_one_layer_screencast);
video_one_to_one.SetFrom(change.video_one_to_one);
video_high_bitrate.SetFrom(change.video_high_bitrate);
video_watermark.SetFrom(change.video_watermark);
video_temporal_layer_screencast.SetFrom(
@@ -267,7 +266,6 @@ struct VideoOptions {
video_three_layers == o.video_three_layers &&
video_enable_camera_list == o.video_enable_camera_list &&
video_one_layer_screencast == o.video_one_layer_screencast &&
video_one_to_one == o.video_one_to_one &&
video_high_bitrate == o.video_high_bitrate &&
video_watermark == o.video_watermark &&
video_temporal_layer_screencast == o.video_temporal_layer_screencast &&
@@ -294,7 +292,6 @@ struct VideoOptions {
ost << ToStringIfSet("3 layers", video_three_layers);
ost << ToStringIfSet("camera list", video_enable_camera_list);
ost << ToStringIfSet("1 layer screencast", video_one_layer_screencast);
ost << ToStringIfSet("1 to 1", video_one_to_one);
ost << ToStringIfSet("high bitrate", video_high_bitrate);
ost << ToStringIfSet("watermark", video_watermark);
ost << ToStringIfSet("video temporal layer screencast",
@@ -328,8 +325,6 @@ struct VideoOptions {
Settable<bool> video_enable_camera_list;
// Experimental: Enable one layer screencast?
Settable<bool> video_one_layer_screencast;
// Experimental: Enable one to one?
Settable<bool> video_one_to_one;
// Experimental: Enable WebRtc higher bitrate?
Settable<bool> video_high_bitrate;
// Experimental: Add watermark to the rendered video image.

View File

@@ -175,12 +175,6 @@ VideoAdapter::VideoAdapter()
VideoAdapter::~VideoAdapter() {
}
void VideoAdapter::SetInputFormat(const VideoFrame& in_frame) {
talk_base::CritScope cs(&critical_section_);
input_format_.width = static_cast<int>(in_frame.GetWidth());
input_format_.height = static_cast<int>(in_frame.GetHeight());
}
void VideoAdapter::SetInputFormat(const VideoFormat& format) {
talk_base::CritScope cs(&critical_section_);
input_format_ = format;
@@ -188,6 +182,29 @@ void VideoAdapter::SetInputFormat(const VideoFormat& format) {
output_format_.interval, input_format_.interval);
}
void CoordinatedVideoAdapter::SetInputFormat(const VideoFormat& format) {
int previous_width = input_format().width;
int previous_height = input_format().height;
bool is_resolution_change = previous_width > 0 && format.width > 0 &&
(previous_width != format.width ||
previous_height != format.height);
VideoAdapter::SetInputFormat(format);
if (is_resolution_change) {
int width, height;
// Trigger the adaptation logic again, to potentially reset the adaptation
// state for things like view requests that may not longer be capping
// output (or may now cap output).
AdaptToMinimumFormat(&width, &height);
LOG(LS_INFO) << "VAdapt Input Resolution Change: "
<< "Previous input resolution: "
<< previous_width << "x" << previous_height
<< " New input resolution: "
<< format.width << "x" << format.height
<< " New output resolution: "
<< width << "x" << height;
}
}
void VideoAdapter::SetOutputFormat(const VideoFormat& format) {
talk_base::CritScope cs(&critical_section_);
output_format_ = format;
@@ -231,7 +248,9 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
++frames_;
// Update input to actual frame dimensions.
SetInputFormat(*in_frame);
VideoFormat format(in_frame->GetWidth(), in_frame->GetHeight(),
input_format_.interval, input_format_.fourcc);
SetInputFormat(format);
// Drop the input frame if necessary.
bool should_drop = false;

View File

@@ -45,8 +45,7 @@ class VideoAdapter {
VideoAdapter();
virtual ~VideoAdapter();
void SetInputFormat(const VideoFrame& in_frame);
void SetInputFormat(const VideoFormat& format);
virtual void SetInputFormat(const VideoFormat& format);
void SetOutputFormat(const VideoFormat& format);
// Constrain output resolution to this many pixels overall
void SetOutputNumPixels(int num_pixels);
@@ -120,6 +119,8 @@ class CoordinatedVideoAdapter
CoordinatedVideoAdapter();
virtual ~CoordinatedVideoAdapter() {}
virtual void SetInputFormat(const VideoFormat& format);
// Enable or disable video adaptation due to the change of the CPU load.
void set_cpu_adaptation(bool enable) { cpu_adaptation_ = enable; }
bool cpu_adaptation() const { return cpu_adaptation_; }

View File

@@ -103,6 +103,7 @@ VideoCapturer::VideoCapturer(talk_base::Thread* thread) : thread_(thread) {
void VideoCapturer::Construct() {
ClearAspectRatio();
enable_camera_list_ = false;
square_pixel_aspect_ratio_ = false;
capture_state_ = CS_STOPPED;
SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured);
scaled_width_ = 0;
@@ -335,18 +336,19 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
desired_screencast_fps, &scaled_width, &scaled_height);
}
if (scaled_width != scaled_width_ || scaled_height != scaled_height_) {
LOG(LS_VERBOSE) << "Scaling Screencast from "
<< captured_frame->width << "x"
<< captured_frame->height << " to "
<< scaled_width << "x" << scaled_height;
scaled_width_ = scaled_width;
scaled_height_ = scaled_height;
}
if (FOURCC_ARGB == captured_frame->fourcc &&
(scaled_width != captured_frame->width ||
scaled_height != captured_frame->height)) {
CapturedFrame* scaled_frame = const_cast<CapturedFrame*>(captured_frame);
scaled_height != captured_frame->height)) {
if (scaled_width != scaled_width_ || scaled_height != scaled_height_) {
LOG(LS_INFO) << "Scaling Screencast from "
<< captured_frame->width << "x"
<< captured_frame->height << " to "
<< scaled_width << "x" << scaled_height;
scaled_width_ = scaled_width;
scaled_height_ = scaled_height;
}
CapturedFrame* modified_frame =
const_cast<CapturedFrame*>(captured_frame);
// Compute new width such that width * height is less than maximum but
// maintains original captured frame aspect ratio.
// Round down width to multiple of 4 so odd width won't round up beyond
@@ -355,17 +357,88 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
libyuv::ARGBScale(reinterpret_cast<const uint8*>(captured_frame->data),
captured_frame->width * 4, captured_frame->width,
captured_frame->height,
reinterpret_cast<uint8*>(scaled_frame->data),
reinterpret_cast<uint8*>(modified_frame->data),
scaled_width * 4, scaled_width, scaled_height,
libyuv::kFilterBilinear);
scaled_frame->width = scaled_width;
scaled_frame->height = scaled_height;
scaled_frame->data_size = scaled_width * 4 * scaled_height;
modified_frame->width = scaled_width;
modified_frame->height = scaled_height;
modified_frame->data_size = scaled_width * 4 * scaled_height;
}
}
const int kYuy2Bpp = 2;
const int kArgbBpp = 4;
// TODO(fbarchard): Make a helper function to adjust pixels to square.
// TODO(fbarchard): Hook up experiment to scaling.
// TODO(fbarchard): Avoid scale and convert if muted.
// Temporary buffer is scoped here so it will persist until i420_frame.Init()
// makes a copy of the frame, converting to I420.
talk_base::scoped_array<uint8> temp_buffer;
// YUY2 can be scaled vertically using an ARGB scaler. Aspect ratio is only
// a problem on OSX. OSX always converts webcams to YUY2 or UYVY.
bool can_scale =
FOURCC_YUY2 == CanonicalFourCC(captured_frame->fourcc) ||
FOURCC_UYVY == CanonicalFourCC(captured_frame->fourcc);
// If pixels are not square, optionally use vertical scaling to make them
// square. Square pixels simplify the rest of the pipeline, including
// effects and rendering.
if (can_scale && square_pixel_aspect_ratio_ &&
captured_frame->pixel_width != captured_frame->pixel_height) {
int scaled_width, scaled_height;
// modified_frame points to the captured_frame but with const casted away
// so it can be modified.
CapturedFrame* modified_frame = const_cast<CapturedFrame*>(captured_frame);
// Compute the frame size that makes pixels square pixel aspect ratio.
ComputeScaleToSquarePixels(captured_frame->width, captured_frame->height,
captured_frame->pixel_width,
captured_frame->pixel_height,
&scaled_width, &scaled_height);
if (scaled_width != scaled_width_ || scaled_height != scaled_height_) {
LOG(LS_INFO) << "Scaling WebCam from "
<< captured_frame->width << "x"
<< captured_frame->height << " to "
<< scaled_width << "x" << scaled_height
<< " for PAR "
<< captured_frame->pixel_width << "x"
<< captured_frame->pixel_height;
scaled_width_ = scaled_width;
scaled_height_ = scaled_height;
}
const int modified_frame_size = scaled_width * scaled_height * kYuy2Bpp;
uint8* temp_buffer_data;
// Pixels are wide and short; Increasing height. Requires temporary buffer.
if (scaled_height > captured_frame->height) {
temp_buffer.reset(new uint8[modified_frame_size]);
temp_buffer_data = temp_buffer.get();
} else {
// Pixels are narrow and tall; Decreasing height. Scale will be done
// in place.
temp_buffer_data = reinterpret_cast<uint8*>(captured_frame->data);
}
// Use ARGBScaler to vertically scale the YUY2 image, adjusting for 16 bpp.
libyuv::ARGBScale(reinterpret_cast<const uint8*>(captured_frame->data),
captured_frame->width * kYuy2Bpp, // Stride for YUY2.
captured_frame->width * kYuy2Bpp / kArgbBpp, // Width.
abs(captured_frame->height), // Height.
temp_buffer_data,
scaled_width * kYuy2Bpp, // Stride for YUY2.
scaled_width * kYuy2Bpp / kArgbBpp, // Width.
abs(scaled_height), // New height.
libyuv::kFilterBilinear);
modified_frame->width = scaled_width;
modified_frame->height = scaled_height;
modified_frame->pixel_width = 1;
modified_frame->pixel_height = 1;
modified_frame->data_size = modified_frame_size;
modified_frame->data = temp_buffer_data;
}
#endif // !DISABLE_YUV
// Size to crop captured frame to. This adjusts the captured frames
// aspect ratio to match the final view aspect ratio, considering pixel
// Size to crop captured frame to. This adjusts the captured frames
// aspect ratio to match the final view aspect ratio, considering pixel
// aspect ratio and rotation. The final size may be scaled down by video
// adapter to better match ratio_w_ x ratio_h_.
// Note that abs() of frame height is passed in, because source may be

View File

@@ -236,11 +236,18 @@ class VideoCapturer
bool enable_camera_list() {
return enable_camera_list_;
}
// Enable scaling to ensure square pixels.
void set_square_pixel_aspect_ratio(bool square_pixel_aspect_ratio) {
square_pixel_aspect_ratio_ = square_pixel_aspect_ratio;
}
bool square_pixel_aspect_ratio() {
return square_pixel_aspect_ratio_;
}
// Signal all capture state changes that are not a direct result of calling
// Start().
sigslot::signal2<VideoCapturer*, CaptureState> SignalStateChange;
// TODO(hellner): rename |SignalFrameCaptured| to something like
// |SignalRawFrame| or |SignalNativeFrame|.
// Frame callbacks are multithreaded to allow disconnect and connect to be
// called concurrently. It also ensures that it is safe to call disconnect
// at any time which is needed since the signal may be called from an
@@ -322,6 +329,7 @@ class VideoCapturer
int ratio_w_; // View resolution. e.g. 1280 x 720.
int ratio_h_;
bool enable_camera_list_;
bool square_pixel_aspect_ratio_; // Enable scaling to square pixels.
int scaled_width_; // Current output size from ComputeScale.
int scaled_height_;
int screencast_max_pixels_; // Downscale screencasts further if requested.

View File

@@ -227,6 +227,7 @@ TEST_F(VideoCapturerTest, ScreencastScaledMaxPixels) {
EXPECT_EQ(2, renderer_.num_rendered_frames());
}
TEST_F(VideoCapturerTest, TestFourccMatch) {
cricket::VideoFormat desired(640, 480,
cricket::VideoFormat::FpsToInterval(30),

View File

@@ -222,7 +222,7 @@ void ComputeScaleToSquarePixels(int in_width, int in_height,
int pixel_width, int pixel_height,
int* scaled_width, int* scaled_height) {
*scaled_width = in_width; // Keep width the same.
*scaled_height = in_height * pixel_width / pixel_height;
*scaled_height = in_height * pixel_height / pixel_width;
}
// The C++ standard requires a namespace-scope definition of static const

View File

@@ -292,11 +292,21 @@ TEST(VideoCommonTest, TestComputeScaleToSquarePixels) {
// Pixel aspect ratio is 4:3. Logical aspect ratio is 16:9. Expect scale
// to square pixels with physical aspect ratio of 16:9.
ComputeScaleToSquarePixels(640, 270,
ComputeScaleToSquarePixels(640, 480,
4, 3, // 4 x 3 pixel aspect ratio
&scaled_width, &scaled_height);
EXPECT_EQ(640, scaled_width);
EXPECT_EQ(360, scaled_height);
// Pixel aspect ratio is 3:8. Physical aspect ratio is 4:3. Expect scale
// to square pixels with logical aspect ratio of 1:2.
// Note that 640x1280 will be scaled down by video adapter to view request
// of 640*360 and will end up using 320x640.
ComputeScaleToSquarePixels(640, 480,
3, 8, // 4 x 3 pixel aspect ratio
&scaled_width, &scaled_height);
EXPECT_EQ(640, scaled_width);
EXPECT_EQ(1280, scaled_height);
}
} // namespace cricket

View File

@@ -787,12 +787,6 @@ class FakeWebRtcVideoEngine
// Not using WEBRTC_STUB due to bool return value
virtual bool IsIPv6Enabled(int channel) { return true; }
WEBRTC_STUB(SetMTU, (int, unsigned int));
#ifndef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(SetPacketTimeoutNotification, (const int, bool, int));
WEBRTC_STUB(RegisterObserver, (const int, webrtc::ViENetworkObserver&));
WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (const int, const bool,
const unsigned int));
#endif
// webrtc::ViERender
WEBRTC_STUB(RegisterVideoRenderModule, (webrtc::VideoRender&));

View File

@@ -266,9 +266,6 @@ class FakeWebRtcVoiceEngine
virtual webrtc::AudioProcessing* audio_processing() OVERRIDE {
return NULL;
}
#ifndef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(MaxNumOfChannels, ());
#endif
WEBRTC_FUNC(CreateChannel, ()) {
if (fail_create_channel_) {
return -1;
@@ -619,20 +616,6 @@ class FakeWebRtcVoiceEngine
}
WEBRTC_STUB(ReceivedRTCPPacket, (int channel, const void* data,
unsigned int length));
#ifndef USE_WEBRTC_DEV_BRANCH
// Not using WEBRTC_STUB due to bool return value
WEBRTC_STUB(SetPacketTimeoutNotification, (int channel, bool enable,
int timeoutSeconds));
WEBRTC_STUB(GetPacketTimeoutNotification, (int channel, bool& enable,
int& timeoutSeconds));
WEBRTC_STUB(RegisterDeadOrAliveObserver, (int channel,
webrtc::VoEConnectionObserver& observer));
WEBRTC_STUB(DeRegisterDeadOrAliveObserver, (int channel));
WEBRTC_STUB(GetPeriodicDeadOrAliveStatus, (int channel, bool& enabled,
int& sampleTimeSeconds));
WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (int channel, bool enable,
int sampleTimeSeconds));
#endif
// webrtc::VoERTP_RTCP
WEBRTC_STUB(RegisterRTPObserver, (int channel,
@@ -753,11 +736,7 @@ class FakeWebRtcVoiceEngine
// webrtc::VoEVideoSync
WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
WEBRTC_STUB(GetPlayoutTimestamp, (int channel, unsigned int& timestamp));
#ifdef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp**, webrtc::RtpReceiver**));
#else
WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp*&));
#endif
WEBRTC_STUB(SetInitTimestamp, (int channel, unsigned int timestamp));
WEBRTC_STUB(SetInitSequenceNumber, (int channel, short sequenceNumber));
WEBRTC_STUB(SetMinimumPlayoutDelay, (int channel, int delayMs));

View File

@@ -31,14 +31,7 @@
#include "talk/base/refcount.h"
#include "talk/base/scoped_ref_ptr.h"
#include "talk/media/base/videoframe.h"
#ifdef USE_WEBRTC_DEV_BRANCH
#include "webrtc/common_video/interface/native_handle.h"
#else
#include "webrtc/common_video/interface/i420_video_frame.h"
// Define NativeHandle to an existing type so we don't need to add lots of
// USE_WEBRTC_DEV_BRANCH.
#define NativeHandle I420VideoFrame
#endif
namespace cricket {

View File

@@ -221,9 +221,7 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
virtual int DeliverFrame(unsigned char* buffer, int buffer_size,
uint32_t time_stamp, int64_t render_time
#ifdef USE_WEBRTC_DEV_BRANCH
, void* handle
#endif
) {
talk_base::CritScope cs(&crit_);
frame_rate_tracker_.Update(1);
@@ -238,17 +236,13 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
talk_base::kNumNanosecsPerMillisec;
// Send the rtp timestamp to renderer as the VideoFrame timestamp.
// and the render timestamp as the VideoFrame elapsed_time.
#ifdef USE_WEBRTC_DEV_BRANCH
if (handle == NULL) {
#endif
return DeliverBufferFrame(buffer, buffer_size, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
#ifdef USE_WEBRTC_DEV_BRANCH
} else {
return DeliverTextureFrame(handle, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
}
#endif
}
virtual bool IsTextureSupported() { return true; }

View File

@@ -2824,8 +2824,10 @@ TEST(WebRtcVoiceEngineTest, HasCorrectCodecs) {
cricket::AudioCodec(96, "G722", 16000, 0, 1, 0)));
EXPECT_TRUE(engine.FindCodec(
cricket::AudioCodec(96, "red", 8000, 0, 1, 0)));
#ifndef USE_WEBRTC_DEV_BRANCH
EXPECT_TRUE(engine.FindCodec(
cricket::AudioCodec(96, "CN", 48000, 0, 1, 0)));
#endif
EXPECT_TRUE(engine.FindCodec(
cricket::AudioCodec(96, "CN", 32000, 0, 1, 0)));
EXPECT_TRUE(engine.FindCodec(

View File

@@ -1069,7 +1069,12 @@ void Connection::UpdateState(uint32 now) {
// test we can do is a simple window.
// If other side has not sent ping after connection has become readable, use
// |last_data_received_| as the indication.
if ((read_state_ == STATE_READABLE) &&
// If remote endpoint is doing RFC 5245, it's not required to send ping
// after connection is established. If this connection is serving a data
// channel, it may not be in a position to send media continuously. Do not
// mark connection timeout if it's in RFC5245 mode.
// Below check will be performed with end point if it's doing google-ice.
if (port_->IsGoogleIce() && (read_state_ == STATE_READABLE) &&
(last_ping_received_ + CONNECTION_READ_TIMEOUT <= now) &&
(last_data_received_ + CONNECTION_READ_TIMEOUT <= now)) {
LOG_J(LS_INFO, this) << "Unreadable after "

View File

@@ -40,27 +40,19 @@
namespace cricket {
using talk_base::Bind;
enum {
MSG_CREATECHANNEL = 1,
MSG_DESTROYCHANNEL = 2,
MSG_DESTROYALLCHANNELS = 3,
MSG_CONNECTCHANNELS = 4,
MSG_RESETCHANNELS = 5,
MSG_ONSIGNALINGREADY = 6,
MSG_ONREMOTECANDIDATE = 7,
MSG_READSTATE = 8,
MSG_WRITESTATE = 9,
MSG_REQUESTSIGNALING = 10,
MSG_CANDIDATEREADY = 11,
MSG_ROUTECHANGE = 12,
MSG_CONNECTING = 13,
MSG_CANDIDATEALLOCATIONCOMPLETE = 14,
MSG_ROLECONFLICT = 15,
MSG_SETICEROLE = 16,
MSG_SETLOCALDESCRIPTION = 17,
MSG_SETREMOTEDESCRIPTION = 18,
MSG_GETSTATS = 19,
MSG_SETIDENTITY = 20,
MSG_ONSIGNALINGREADY = 1,
MSG_ONREMOTECANDIDATE,
MSG_READSTATE,
MSG_WRITESTATE,
MSG_REQUESTSIGNALING,
MSG_CANDIDATEREADY,
MSG_ROUTECHANGE,
MSG_CONNECTING,
MSG_CANDIDATEALLOCATIONCOMPLETE,
MSG_ROLECONFLICT,
};
struct ChannelParams : public talk_base::MessageData {
@@ -81,36 +73,6 @@ struct ChannelParams : public talk_base::MessageData {
Candidate* candidate;
};
struct TransportDescriptionParams : public talk_base::MessageData {
TransportDescriptionParams(const TransportDescription& desc,
ContentAction action)
: desc(desc), action(action), result(false) {}
const TransportDescription& desc;
ContentAction action;
bool result;
};
struct IceRoleParam : public talk_base::MessageData {
explicit IceRoleParam(IceRole role) : role(role) {}
IceRole role;
};
struct StatsParam : public talk_base::MessageData {
explicit StatsParam(TransportStats* stats)
: stats(stats), result(false) {}
TransportStats* stats;
bool result;
};
struct IdentityParam : public talk_base::MessageData {
explicit IdentityParam(talk_base::SSLIdentity* identity)
: identity(identity) {}
talk_base::SSLIdentity* identity;
};
Transport::Transport(talk_base::Thread* signaling_thread,
talk_base::Thread* worker_thread,
const std::string& content_name,
@@ -138,33 +100,28 @@ Transport::~Transport() {
}
void Transport::SetIceRole(IceRole role) {
IceRoleParam param(role);
worker_thread()->Send(this, MSG_SETICEROLE, &param);
worker_thread_->Invoke<void>(Bind(&Transport::SetIceRole_w, this, role));
}
void Transport::SetIdentity(talk_base::SSLIdentity* identity) {
IdentityParam params(identity);
worker_thread()->Send(this, MSG_SETIDENTITY, &params);
worker_thread_->Invoke<void>(Bind(&Transport::SetIdentity_w, this, identity));
}
bool Transport::SetLocalTransportDescription(
const TransportDescription& description, ContentAction action) {
TransportDescriptionParams params(description, action);
worker_thread()->Send(this, MSG_SETLOCALDESCRIPTION, &params);
return params.result;
return worker_thread_->Invoke<bool>(Bind(
&Transport::SetLocalTransportDescription_w, this, description, action));
}
bool Transport::SetRemoteTransportDescription(
const TransportDescription& description, ContentAction action) {
TransportDescriptionParams params(description, action);
worker_thread()->Send(this, MSG_SETREMOTEDESCRIPTION, &params);
return params.result;
return worker_thread_->Invoke<bool>(Bind(
&Transport::SetRemoteTransportDescription_w, this, description, action));
}
TransportChannelImpl* Transport::CreateChannel(int component) {
ChannelParams params(component);
worker_thread()->Send(this, MSG_CREATECHANNEL, &params);
return params.channel;
return worker_thread_->Invoke<TransportChannelImpl*>(Bind(
&Transport::CreateChannel_w, this, component));
}
TransportChannelImpl* Transport::CreateChannel_w(int component) {
@@ -236,8 +193,8 @@ bool Transport::HasChannels() {
}
void Transport::DestroyChannel(int component) {
ChannelParams params(component);
worker_thread()->Send(this, MSG_DESTROYCHANNEL, &params);
worker_thread_->Invoke<void>(Bind(
&Transport::DestroyChannel_w, this, component));
}
void Transport::DestroyChannel_w(int component) {
@@ -271,7 +228,7 @@ void Transport::DestroyChannel_w(int component) {
void Transport::ConnectChannels() {
ASSERT(signaling_thread()->IsCurrent());
worker_thread()->Send(this, MSG_CONNECTCHANNELS, NULL);
worker_thread_->Invoke<void>(Bind(&Transport::ConnectChannels_w, this));
}
void Transport::ConnectChannels_w() {
@@ -312,7 +269,8 @@ void Transport::OnConnecting_s() {
void Transport::DestroyAllChannels() {
ASSERT(signaling_thread()->IsCurrent());
worker_thread()->Send(this, MSG_DESTROYALLCHANNELS, NULL);
worker_thread_->Invoke<void>(
Bind(&Transport::DestroyAllChannels_w, this));
worker_thread()->Clear(this);
signaling_thread()->Clear(this);
destroyed_ = true;
@@ -340,7 +298,7 @@ void Transport::DestroyAllChannels_w() {
void Transport::ResetChannels() {
ASSERT(signaling_thread()->IsCurrent());
worker_thread()->Send(this, MSG_RESETCHANNELS, NULL);
worker_thread_->Invoke<void>(Bind(&Transport::ResetChannels_w, this));
}
void Transport::ResetChannels_w() {
@@ -404,9 +362,8 @@ bool Transport::VerifyCandidate(const Candidate& cand, std::string* error) {
bool Transport::GetStats(TransportStats* stats) {
ASSERT(signaling_thread()->IsCurrent());
StatsParam params(stats);
worker_thread()->Send(this, MSG_GETSTATS, &params);
return params.result;
return worker_thread_->Invoke<bool>(Bind(
&Transport::GetStats_w, this, stats));
}
bool Transport::GetStats_w(TransportStats* stats) {
@@ -427,8 +384,8 @@ bool Transport::GetStats_w(TransportStats* stats) {
}
bool Transport::GetSslRole(talk_base::SSLRole* ssl_role) const {
return worker_thread_->Invoke<bool>(
Bind(&Transport::GetSslRole_w, this, ssl_role));
return worker_thread_->Invoke<bool>(Bind(
&Transport::GetSslRole_w, this, ssl_role));
}
void Transport::OnRemoteCandidates(const std::vector<Candidate>& candidates) {
@@ -740,25 +697,6 @@ bool Transport::NegotiateTransportDescription_w(ContentAction local_role) {
void Transport::OnMessage(talk_base::Message* msg) {
switch (msg->message_id) {
case MSG_CREATECHANNEL: {
ChannelParams* params = static_cast<ChannelParams*>(msg->pdata);
params->channel = CreateChannel_w(params->component);
}
break;
case MSG_DESTROYCHANNEL: {
ChannelParams* params = static_cast<ChannelParams*>(msg->pdata);
DestroyChannel_w(params->component);
}
break;
case MSG_CONNECTCHANNELS:
ConnectChannels_w();
break;
case MSG_RESETCHANNELS:
ResetChannels_w();
break;
case MSG_DESTROYALLCHANNELS:
DestroyAllChannels_w();
break;
case MSG_ONSIGNALINGREADY:
CallChannels_w(&TransportChannelImpl::OnSignalingReady);
break;
@@ -798,36 +736,6 @@ void Transport::OnMessage(talk_base::Message* msg) {
case MSG_ROLECONFLICT:
SignalRoleConflict();
break;
case MSG_SETICEROLE: {
IceRoleParam* param =
static_cast<IceRoleParam*>(msg->pdata);
SetIceRole_w(param->role);
}
break;
case MSG_SETLOCALDESCRIPTION: {
TransportDescriptionParams* params =
static_cast<TransportDescriptionParams*>(msg->pdata);
params->result = SetLocalTransportDescription_w(params->desc,
params->action);
}
break;
case MSG_SETREMOTEDESCRIPTION: {
TransportDescriptionParams* params =
static_cast<TransportDescriptionParams*>(msg->pdata);
params->result = SetRemoteTransportDescription_w(params->desc,
params->action);
}
break;
case MSG_GETSTATS: {
StatsParam* params = static_cast<StatsParam*>(msg->pdata);
params->result = GetStats_w(params->stats);
}
break;
case MSG_SETIDENTITY: {
IdentityParam* params = static_cast<IdentityParam*>(msg->pdata);
SetIdentity_w(params->identity);
}
break;
}
}