Update talk folder to revision=49713299.

TBR=mallinath@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1848004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4380 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrike@webrtc.org 2013-07-22 21:07:49 +00:00
parent 5bb8e7e220
commit 28654cbc22
70 changed files with 516 additions and 716 deletions

View File

@ -139,7 +139,7 @@ bool JsepSessionDescription::AddCandidate(
candidate_collection_[mediasection_index].add(
new JsepIceCandidate(candidate->sdp_mid(),
mediasection_index,
static_cast<int>(mediasection_index),
updated_candidate));
return true;
}

View File

@ -147,7 +147,8 @@ class PeerConnectionTestClientBase
void AddMediaStream(bool audio, bool video) {
std::string label = kStreamLabelBase +
talk_base::ToString<int>(peer_connection_->local_streams()->count());
talk_base::ToString<int>(
static_cast<int>(peer_connection_->local_streams()->count()));
talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream =
peer_connection_factory_->CreateLocalMediaStream(label);
@ -306,11 +307,11 @@ class PeerConnectionTestClientBase
desc->GetTransportDescriptionByName(contents[index].name);
std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it =
ice_ufrag_pwd_.find(index);
ice_ufrag_pwd_.find(static_cast<int>(index));
if (ufragpair_it == ice_ufrag_pwd_.end()) {
ASSERT_FALSE(ExpectIceRestart());
ice_ufrag_pwd_[index] = IceUfragPwdPair(transport_desc->ice_ufrag,
transport_desc->ice_pwd);
ice_ufrag_pwd_[static_cast<int>(index)] =
IceUfragPwdPair(transport_desc->ice_ufrag, transport_desc->ice_pwd);
} else if (ExpectIceRestart()) {
const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag);
@ -1007,13 +1008,13 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
ASSERT_LE(0, initializing_client()->rendered_height());
double initiating_video_ratio =
static_cast<double> (initializing_client()->rendered_width()) /
static_cast<double>(initializing_client()->rendered_width()) /
initializing_client()->rendered_height();
EXPECT_LE(requested_ratio, initiating_video_ratio);
ASSERT_LE(0, receiving_client()->rendered_height());
double receiving_video_ratio =
static_cast<double> (receiving_client()->rendered_width()) /
static_cast<double>(receiving_client()->rendered_width()) /
receiving_client()->rendered_height();
EXPECT_LE(requested_ratio, receiving_video_ratio);
}

View File

@ -189,7 +189,7 @@ bool PeerConnectionFactory::Initialize() {
void PeerConnectionFactory::OnMessage(talk_base::Message* msg) {
switch (msg->message_id) {
case MSG_INIT_FACTORY: {
InitMessageData* pdata = static_cast<InitMessageData*> (msg->pdata);
InitMessageData* pdata = static_cast<InitMessageData*>(msg->pdata);
pdata->data() = Initialize_s();
break;
}
@ -199,7 +199,7 @@ void PeerConnectionFactory::OnMessage(talk_base::Message* msg) {
}
case MSG_CREATE_PEERCONNECTION: {
CreatePeerConnectionParams* pdata =
static_cast<CreatePeerConnectionParams*> (msg->pdata);
static_cast<CreatePeerConnectionParams*>(msg->pdata);
pdata->peerconnection = CreatePeerConnection_s(pdata->configuration,
pdata->constraints,
pdata->allocator_factory,
@ -214,7 +214,7 @@ void PeerConnectionFactory::OnMessage(talk_base::Message* msg) {
}
case MSG_CREATE_VIDEOSOURCE: {
CreateVideoSourceParams* pdata =
static_cast<CreateVideoSourceParams*> (msg->pdata);
static_cast<CreateVideoSourceParams*>(msg->pdata);
pdata->source = CreateVideoSource_s(pdata->capturer, pdata->constraints);
break;
}

View File

@ -94,7 +94,7 @@ bool GetFirstSsrc(const cricket::ContentInfo* content_info, int* ssrc) {
return false;
}
const cricket::MediaContentDescription* media_desc =
static_cast<const cricket::MediaContentDescription*> (
static_cast<const cricket::MediaContentDescription*>(
content_info->description);
if (!media_desc || media_desc->streams().empty()) {
return false;

View File

@ -632,7 +632,7 @@ void CreateTracksFromSsrcInfos(const SsrcInfoVec& ssrc_infos,
void GetMediaStreamLabels(const ContentInfo* content,
std::set<std::string>* labels) {
const MediaContentDescription* media_desc =
static_cast<const MediaContentDescription*> (
static_cast<const MediaContentDescription*>(
content->description);
const cricket::StreamParamsVec& streams = media_desc->streams();
for (cricket::StreamParamsVec::const_iterator it = streams.begin();
@ -1123,7 +1123,7 @@ void BuildMediaDescription(const ContentInfo* content_info,
// trunk/cppguide.xml?showone=Streams#Streams
std::ostringstream os;
const MediaContentDescription* media_desc =
static_cast<const MediaContentDescription*> (
static_cast<const MediaContentDescription*>(
content_info->description);
ASSERT(media_desc != NULL);
@ -1928,7 +1928,7 @@ void MaybeCreateStaticPayloadAudioCodecs(
if (!media_desc) {
return;
}
int preference = fmts.size();
int preference = static_cast<int>(fmts.size());
std::vector<int>::const_iterator it = fmts.begin();
bool add_new_codec = false;
for (; it != fmts.end(); ++it) {

View File

@ -1240,7 +1240,7 @@ void TestMismatch(const std::string& string1, const std::string& string2) {
int position = 0;
for (size_t i = 0; i < string1.length() && i < string2.length(); ++i) {
if (string1.c_str()[i] != string2.c_str()[i]) {
position = i;
position = static_cast<int>(i);
break;
}
}

View File

@ -629,25 +629,24 @@ SessionDescriptionInterface* WebRtcSession::CreateAnswer(
bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
std::string* err_desc) {
// Takes the ownership of |desc| regardless of the result.
talk_base::scoped_ptr<SessionDescriptionInterface> desc_temp(desc);
if (error() != cricket::BaseSession::ERROR_NONE) {
delete desc;
return BadLocalSdp(SessionErrorMsg(error()), err_desc);
}
if (!desc || !desc->description()) {
delete desc;
return BadLocalSdp(kInvalidSdp, err_desc);
}
Action action = GetAction(desc->type());
if (!ExpectSetLocalDescription(action)) {
std::string type = desc->type();
delete desc;
return BadLocalSdp(BadStateErrMsg(type, state()), err_desc);
}
if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
!VerifyCrypto(desc->description())) {
delete desc;
return BadLocalSdp(kSdpWithoutCrypto, err_desc);
}
@ -665,10 +664,10 @@ bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
UpdateSessionDescriptionSecurePolicy(desc->description());
set_local_description(desc->description()->Copy());
local_desc_.reset(desc);
local_desc_.reset(desc_temp.release());
// Transport and Media channels will be created only when offer is set.
if (action == kOffer && !CreateChannels(desc->description())) {
if (action == kOffer && !CreateChannels(local_desc_->description())) {
// TODO(mallinath) - Handle CreateChannel failure, as new local description
// is applied. Restore back to old description.
return BadLocalSdp(kCreateChannelFailed, err_desc);
@ -676,10 +675,10 @@ bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
// Remove channel and transport proxies, if MediaContentDescription is
// rejected.
RemoveUnusedChannelsAndTransports(desc->description());
RemoveUnusedChannelsAndTransports(local_desc_->description());
if (!UpdateSessionState(action, cricket::CS_LOCAL,
desc->description(), err_desc)) {
local_desc_->description(), err_desc)) {
return false;
}
// Kick starting the ice candidates allocation.
@ -697,19 +696,19 @@ bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
std::string* err_desc) {
// Takes the ownership of |desc| regardless of the result.
talk_base::scoped_ptr<SessionDescriptionInterface> desc_temp(desc);
if (error() != cricket::BaseSession::ERROR_NONE) {
delete desc;
return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
}
if (!desc || !desc->description()) {
delete desc;
return BadRemoteSdp(kInvalidSdp, err_desc);
}
Action action = GetAction(desc->type());
if (!ExpectSetRemoteDescription(action)) {
std::string type = desc->type();
delete desc;
return BadRemoteSdp(BadStateErrMsg(type, state()), err_desc);
}
@ -720,7 +719,6 @@ bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
!VerifyCrypto(desc->description())) {
delete desc;
return BadRemoteSdp(kSdpWithoutCrypto, err_desc);
}
@ -746,7 +744,6 @@ bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
// Update remote MediaStreams.
mediastream_signaling_->OnRemoteDescriptionChanged(desc);
if (local_description() && !UseCandidatesInSessionDescription(desc)) {
delete desc;
return BadRemoteSdp(kInvalidCandidates, err_desc);
}
@ -758,7 +755,7 @@ bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
// that indicates the remote peer requests ice restart.
ice_restart_latch_->CheckForRemoteIceRestart(remote_desc_.get(),
desc);
remote_desc_.reset(desc);
remote_desc_.reset(desc_temp.release());
if (error() != cricket::BaseSession::ERROR_NONE) {
return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
}
@ -1245,7 +1242,7 @@ bool WebRtcSession::GetLocalCandidateMediaIndex(const std::string& content_name,
const ContentInfos& contents = BaseSession::local_description()->contents();
for (size_t index = 0; index < contents.size(); ++index) {
if (contents[index].name == content_name) {
*sdp_mline_index = index;
*sdp_mline_index = static_cast<int>(index);
content_found = true;
break;
}
@ -1428,7 +1425,7 @@ void WebRtcSession::UpdateSessionDescriptionSecurePolicy(
iter != sdesc->contents().end(); ++iter) {
if (cricket::IsMediaContent(&*iter)) {
MediaContentDescription* mdesc =
static_cast<MediaContentDescription*> (iter->description);
static_cast<MediaContentDescription*>(iter->description);
if (mdesc) {
mdesc->set_crypto_required(
session_desc_factory_.secure() == cricket::SEC_REQUIRED);

View File

@ -140,8 +140,10 @@ class WebRtcSession : public cricket::BaseSession,
SessionDescriptionInterface* CreateAnswer(
const MediaConstraintsInterface* constraints);
// The ownership of |desc| will be transferred after this call.
bool SetLocalDescription(SessionDescriptionInterface* desc,
std::string* err_desc);
// The ownership of |desc| will be transferred after this call.
bool SetRemoteDescription(SessionDescriptionInterface* desc,
std::string* err_desc);
bool ProcessIceMessage(const IceCandidateInterface* ice_candidate);

View File

@ -496,12 +496,12 @@ class WebRtcSessionTest : public testing::Test {
// Create a SDP without Crypto.
cricket::MediaSessionOptions options;
options.has_video = true;
scoped_ptr<JsepSessionDescription> offer(
JsepSessionDescription* offer(
CreateRemoteOffer(options, cricket::SEC_DISABLED));
ASSERT_TRUE(offer.get() != NULL);
ASSERT_TRUE(offer != NULL);
VerifyNoCryptoParams(offer->description(), false);
SetRemoteDescriptionExpectError("Called with a SDP without crypto enabled",
offer.release());
offer);
const webrtc::SessionDescriptionInterface* answer =
session_->CreateAnswer(NULL);
// Answer should be NULL as no crypto params in offer.
@ -832,7 +832,7 @@ class WebRtcSessionTest : public testing::Test {
const cricket::ContentDescription* description = content->description;
ASSERT(description != NULL);
const cricket::AudioContentDescription* audio_content_desc =
static_cast<const cricket::AudioContentDescription*> (description);
static_cast<const cricket::AudioContentDescription*>(description);
ASSERT(audio_content_desc != NULL);
for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
if (audio_content_desc->codecs()[i].name == "CN")
@ -2184,16 +2184,16 @@ TEST_F(WebRtcSessionTest, TestIceOfferGIceOnlyAnswer) {
SetLocalDescriptionWithoutError(ice_only_offer);
std::string original_offer_sdp;
EXPECT_TRUE(offer->ToString(&original_offer_sdp));
talk_base::scoped_ptr<SessionDescriptionInterface> pranswer_with_gice(
SessionDescriptionInterface* pranswer_with_gice =
CreateSessionDescription(JsepSessionDescription::kPrAnswer,
original_offer_sdp, NULL));
original_offer_sdp, NULL);
SetRemoteDescriptionExpectError(kPushDownPranswerTDFailed,
pranswer_with_gice.get());
talk_base::scoped_ptr<SessionDescriptionInterface> answer_with_gice(
pranswer_with_gice);
SessionDescriptionInterface* answer_with_gice =
CreateSessionDescription(JsepSessionDescription::kAnswer,
original_offer_sdp, NULL));
original_offer_sdp, NULL);
SetRemoteDescriptionExpectError(kPushDownAnswerTDFailed,
answer_with_gice.get());
answer_with_gice);
}
// Verifing local offer and remote answer have matching m-lines as per RFC 3264.
@ -2207,13 +2207,13 @@ TEST_F(WebRtcSessionTest, TestIncorrectMLinesInRemoteAnswer) {
cricket::SessionDescription* answer_copy = answer->description()->Copy();
answer_copy->RemoveContentByName("video");
talk_base::scoped_ptr<JsepSessionDescription> modified_answer(
new JsepSessionDescription(JsepSessionDescription::kAnswer));
JsepSessionDescription* modified_answer =
new JsepSessionDescription(JsepSessionDescription::kAnswer);
EXPECT_TRUE(modified_answer->Initialize(answer_copy,
answer->session_id(),
answer->session_version()));
SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer.get());
SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer);
// Modifying content names.
std::string sdp;
@ -2227,9 +2227,9 @@ TEST_F(WebRtcSessionTest, TestIncorrectMLinesInRemoteAnswer) {
kAudioMidReplaceStr.length(),
&sdp);
talk_base::scoped_ptr<SessionDescriptionInterface> modified_answer1(
CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL));
SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer1.get());
SessionDescriptionInterface* modified_answer1 =
CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL);
SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer1);
SetRemoteDescriptionWithoutError(answer.release());
}
@ -2245,13 +2245,13 @@ TEST_F(WebRtcSessionTest, TestIncorrectMLinesInLocalAnswer) {
cricket::SessionDescription* answer_copy = answer->description()->Copy();
answer_copy->RemoveContentByName("video");
talk_base::scoped_ptr<JsepSessionDescription> modified_answer(
new JsepSessionDescription(JsepSessionDescription::kAnswer));
JsepSessionDescription* modified_answer =
new JsepSessionDescription(JsepSessionDescription::kAnswer);
EXPECT_TRUE(modified_answer->Initialize(answer_copy,
answer->session_id(),
answer->session_version()));
SetLocalDescriptionExpectError(kMlineMismatch, modified_answer.get());
SetLocalDescriptionExpectError(kMlineMismatch, modified_answer);
SetLocalDescriptionWithoutError(answer);
}
@ -2388,9 +2388,9 @@ TEST_F(WebRtcSessionTest, TestSessionContentError) {
video_channel_->set_fail_set_send_codecs(true);
mediastream_signaling_.SendAudioVideoStream2();
talk_base::scoped_ptr<SessionDescriptionInterface> answer(
CreateRemoteAnswer(session_->local_description()));
SetRemoteDescriptionExpectError("ERROR_CONTENT", answer.get());
SessionDescriptionInterface* answer =
CreateRemoteAnswer(session_->local_description());
SetRemoteDescriptionExpectError("ERROR_CONTENT", answer);
}
// Runs the loopback call test with BUNDLE and STUN disabled.

View File

@ -25,6 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <signal.h>
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
@ -51,14 +52,14 @@ namespace talk_base {
void Break() {
#if WIN32
::DebugBreak();
#elif OSX // !WIN32
__asm__("int $3");
#else // !OSX && !WIN32
#if _DEBUG_HAVE_BACKTRACE
OutputTrace();
#else // !WIN32
// On POSIX systems, SIGTRAP signals debuggers to break without killing the
// process. If a debugger isn't attached, the uncaught SIGTRAP will crash the
// app.
raise(SIGTRAP);
#endif
abort();
#endif // !OSX && !WIN32
// If a debugger wasn't attached, we will have crashed by this point. If a
// debugger is attached, we'll continue from here.
}
static AssertLogger custom_assert_logger_ = NULL;

View File

@ -112,7 +112,8 @@ void SetCustomAssertLogger(AssertLogger logger);
namespace talk_base {
// Break causes the debugger to stop executing, or the program to abort.
// If a debugger is attached, triggers a debugger breakpoint. If a debugger is
// not attached, forces program termination.
void Break();
inline bool Assert(bool result, const char* function, const char* file,

View File

@ -270,7 +270,7 @@ uint32 CreateRandomId() {
}
uint64 CreateRandomId64() {
return static_cast<uint64> (CreateRandomId()) << 32 | CreateRandomId();
return static_cast<uint64>(CreateRandomId()) << 32 | CreateRandomId();
}
uint32 CreateRandomNonZeroId() {

View File

@ -53,7 +53,7 @@ void MD5Update(MD5Context* ctx, const uint8* buf, size_t len) {
if ((ctx->bits[0] = t + (static_cast<uint32>(len) << 3)) < t) {
ctx->bits[1]++; // Carry from low to high.
}
ctx->bits[1] += len >> 29;
ctx->bits[1] += static_cast<uint32>(len >> 29);
t = (t >> 3) & 0x3f; // Bytes already in shsInfo->data.
// Handle any leading odd-sized chunks.

View File

@ -207,6 +207,7 @@ class SSLStreamAdapterTestBase : public testing::Test,
~SSLStreamAdapterTestBase() {
// Put it back for the next test.
talk_base::SetRandomTestMode(false);
talk_base::CleanupSSL();
}
static void SetUpTestCase() {
@ -571,14 +572,13 @@ class SSLStreamAdapterTestDTLS : public SSLStreamAdapterTestBase {
}
virtual void ReadData(talk_base::StreamInterface *stream) {
unsigned char *buffer = new unsigned char[2000];
unsigned char buffer[2000];
size_t bread;
int err2;
talk_base::StreamResult r;
for (;;) {
r = stream->Read(buffer, 2000,
&bread, &err2);
r = stream->Read(buffer, 2000, &bread, &err2);
if (r == talk_base::SR_ERROR) {
// Unfortunately, errors are the way that the stream adapter
@ -595,7 +595,8 @@ class SSLStreamAdapterTestDTLS : public SSLStreamAdapterTestBase {
// Now parse the datagram
ASSERT_EQ(packet_size_, bread);
uint32_t packet_num = *(reinterpret_cast<uint32_t *>(buffer));
unsigned char* ptr_to_buffer = buffer;
uint32_t packet_num = *(reinterpret_cast<uint32_t *>(ptr_to_buffer));
for (size_t i = 4; i < packet_size_; i++) {
ASSERT_EQ((packet_num & 0xff), buffer[i]);

View File

@ -36,7 +36,9 @@
# flood of chromium-style warnings.
'clang_use_chrome_plugins%': 0,
'libpeer_target_type%': 'static_library',
'java_home%': '<!(python -c "import os; print os.getenv(\'JAVA_HOME\');")',
# TODO(henrike): make sure waterfall bots have $JAVA_HOME configured
# properly and remove the default value below. See issue 2113.
'java_home%': '<!(python -c "import os; print os.getenv(\'JAVA_HOME\', \'/usr/lib/jvm/java-6-sun\');")',
# Whether or not to build the ObjectiveC PeerConnection API & tests.
'libjingle_objc%' : 0,
},

View File

@ -499,7 +499,9 @@ void MainWnd::LayoutConnectUI(bool show) {
size_t y = rc.bottom / 2;
for (size_t i = 0; i < ARRAYSIZE(windows); ++i) {
size_t top = y - (windows[i].height / 2);
::MoveWindow(windows[i].wnd, x, top, windows[i].width, windows[i].height,
::MoveWindow(windows[i].wnd, static_cast<int>(x), static_cast<int>(top),
static_cast<int>(windows[i].width),
static_cast<int>(windows[i].height),
TRUE);
x += kSeparator + windows[i].width;
if (windows[i].text[0] != 'X')

View File

@ -264,7 +264,7 @@ void PeerConnectionClient::OnHangingGetConnect(talk_base::AsyncSocket* socket) {
char buffer[1024];
sprintfn(buffer, sizeof(buffer),
"GET /wait?peer_id=%i HTTP/1.0\r\n\r\n", my_id_);
int len = strlen(buffer);
int len = static_cast<int>(strlen(buffer));
int sent = socket->Send(buffer, len);
ASSERT(sent == len);
UNUSED2(sent, len);
@ -357,7 +357,7 @@ void PeerConnectionClient::OnRead(talk_base::AsyncSocket* socket) {
if (my_id_ == -1) {
// First response. Let's store our server assigned ID.
ASSERT(state_ == SIGNING_IN);
my_id_ = peer_id;
my_id_ = static_cast<int>(peer_id);
ASSERT(my_id_ != -1);
// The body of the response will be a list of already connected peers.
@ -427,7 +427,8 @@ void PeerConnectionClient::OnHangingGetRead(talk_base::AsyncSocket* socket) {
}
}
} else {
OnMessageFromPeer(peer_id, notification_data_.substr(pos));
OnMessageFromPeer(static_cast<int>(peer_id),
notification_data_.substr(pos));
}
}

View File

@ -27,6 +27,7 @@
#include "talk/examples/peerconnection/server/data_socket.h"
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -131,7 +132,8 @@ bool DataSocket::OnDataAvailable(bool* close_socket) {
}
bool DataSocket::Send(const std::string& data) const {
return send(socket_, data.data(), data.length(), 0) != SOCKET_ERROR;
return send(socket_, data.data(), static_cast<int>(data.length()), 0) !=
SOCKET_ERROR;
}
bool DataSocket::Send(const std::string& status, bool connection_close,
@ -151,7 +153,8 @@ bool DataSocket::Send(const std::string& status, bool connection_close,
if (!content_type.empty())
buffer += "Content-Type: " + content_type + "\r\n";
buffer += "Content-Length: " + int2str(data.size()) + "\r\n";
buffer += "Content-Length: " + int2str(static_cast<int>(data.size())) +
"\r\n";
if (!extra_headers.empty()) {
buffer += extra_headers;

View File

@ -679,17 +679,15 @@
'base/winping.cc',
'base/winping.h',
],
'link_settings': {
'libraries': [
'-lcrypt32.lib',
'-liphlpapi.lib',
'-lsecur32.lib',
],
},
# Suppress warnings about WIN32_LEAN_AND_MEAN.
'msvs_disabled_warnings': [4005],
'msvs_settings': {
'VCLibrarianTool': {
'AdditionalDependencies': [
'crypt32.lib',
'iphlpapi.lib',
'secur32.lib',
],
},
},
}],
['os_posix==1', {
'sources': [

View File

@ -39,11 +39,8 @@ CaptureRenderAdapter::CaptureRenderAdapter(VideoCapturer* video_capturer)
}
CaptureRenderAdapter::~CaptureRenderAdapter() {
// Have to disconnect here since |video_capturer_| lives on past the
// destruction of this object.
if (video_capturer_) {
video_capturer_->SignalVideoFrame.disconnect(this);
}
// has_slots destructor will disconnect us from any signals we may be
// connected to.
}
CaptureRenderAdapter* CaptureRenderAdapter::Create(
@ -111,7 +108,8 @@ void CaptureRenderAdapter::MaybeSetRenderingSize(const VideoFrame* frame) {
const bool new_resolution = iter->render_width != frame->GetWidth() ||
iter->render_height != frame->GetHeight();
if (new_resolution) {
if (iter->renderer->SetSize(frame->GetWidth(), frame->GetHeight(), 0)) {
if (iter->renderer->SetSize(static_cast<int>(frame->GetWidth()),
static_cast<int>(frame->GetHeight()), 0)) {
iter->render_width = frame->GetWidth();
iter->render_height = frame->GetHeight();
} else {

View File

@ -69,7 +69,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface,
talk_base::CritScope cs(&crit_);
int bytes = 0;
for (size_t i = 0; i < rtp_packets_.size(); ++i) {
bytes += rtp_packets_[i].length();
bytes += static_cast<int>(rtp_packets_[i].length());
}
return bytes;
}
@ -83,7 +83,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface,
int NumRtpPackets() {
talk_base::CritScope cs(&crit_);
return rtp_packets_.size();
return static_cast<int>(rtp_packets_.size());
}
int NumRtpPackets(uint32 ssrc) {
@ -95,7 +95,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface,
int NumSentSsrcs() {
talk_base::CritScope cs(&crit_);
return sent_ssrcs_.size();
return static_cast<int>(sent_ssrcs_.size());
}
// Note: callers are responsible for deleting the returned buffer.
@ -109,7 +109,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface,
int NumRtcpPackets() {
talk_base::CritScope cs(&crit_);
return rtcp_packets_.size();
return static_cast<int>(rtcp_packets_.size());
}
// Note: callers are responsible for deleting the returned buffer.
@ -218,7 +218,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface,
}
if (ssrc == cur_ssrc) {
if (bytes) {
*bytes += rtp_packets_[i].length();
*bytes += static_cast<int>(rtp_packets_[i].length());
}
if (packets) {
++(*packets);

View File

@ -84,7 +84,7 @@ class FakeVideoCapturer : public cricket::VideoCapturer {
if (fourcc == cricket::FOURCC_ARGB) {
size = width * 4 * height;
} else if (fourcc == cricket::FOURCC_I420) {
size = cricket::VideoFrame::SizeOf(width, height);
size = static_cast<uint32>(cricket::VideoFrame::SizeOf(width, height));
} else {
return false; // Unsupported FOURCC.
}

View File

@ -163,7 +163,8 @@ class FileMediaEngineTest : public testing::Test {
for (size_t i = 0; i < ssrc_count; ++i) {
ret &= RtpTestUtility::WriteTestPackets(
RtpTestUtility::GetTestPacketCount(), false,
RtpTestUtility::kDefaultSsrc + i, &writer);
static_cast<uint32>(RtpTestUtility::kDefaultSsrc + i),
&writer);
}
return ret;
}

View File

@ -225,18 +225,12 @@ class HybridVideoEngine : public HybridVideoEngineInterface {
bool SetCaptureDevice(const Device* device) {
return video2_.SetCaptureDevice(device);
}
bool SetVideoCapturer(VideoCapturer* capturer) {
return video2_.SetVideoCapturer(capturer);
}
VideoCapturer* GetVideoCapturer() const {
return video2_.GetVideoCapturer();
}
bool SetLocalRenderer(VideoRenderer* renderer) {
return video2_.SetLocalRenderer(renderer);
}
bool SetCapture(bool capture) {
return video2_.SetCapture(capture);
}
sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
virtual bool HasCodec1(const VideoCodec& codec) {

View File

@ -234,6 +234,7 @@ struct VideoOptions {
void SetAll(const VideoOptions& change) {
adapt_input_to_encoder.SetFrom(change.adapt_input_to_encoder);
adapt_input_to_cpu_usage.SetFrom(change.adapt_input_to_cpu_usage);
adapt_cpu_with_smoothing.SetFrom(change.adapt_cpu_with_smoothing);
adapt_view_switch.SetFrom(change.adapt_view_switch);
video_noise_reduction.SetFrom(change.video_noise_reduction);
video_three_layers.SetFrom(change.video_three_layers);
@ -256,6 +257,7 @@ struct VideoOptions {
bool operator==(const VideoOptions& o) const {
return adapt_input_to_encoder == o.adapt_input_to_encoder &&
adapt_input_to_cpu_usage == o.adapt_input_to_cpu_usage &&
adapt_cpu_with_smoothing == o.adapt_cpu_with_smoothing &&
adapt_view_switch == o.adapt_view_switch &&
video_noise_reduction == o.video_noise_reduction &&
video_three_layers == o.video_three_layers &&
@ -279,6 +281,7 @@ struct VideoOptions {
ost << "VideoOptions {";
ost << ToStringIfSet("encoder adaption", adapt_input_to_encoder);
ost << ToStringIfSet("cpu adaption", adapt_input_to_cpu_usage);
ost << ToStringIfSet("cpu adaptation smoothing", adapt_cpu_with_smoothing);
ost << ToStringIfSet("adapt view switch", adapt_view_switch);
ost << ToStringIfSet("noise reduction", video_noise_reduction);
ost << ToStringIfSet("3 layers", video_three_layers);
@ -303,6 +306,8 @@ struct VideoOptions {
Settable<bool> adapt_input_to_encoder;
// Enable CPU adaptation?
Settable<bool> adapt_input_to_cpu_usage;
// Enable CPU adaptation smoothing?
Settable<bool> adapt_cpu_with_smoothing;
// Enable Adapt View Switch?
Settable<bool> adapt_view_switch;
// Enable denoising?

View File

@ -125,10 +125,6 @@ class MediaEngineInterface {
// TODO(tschmelcher): Add method for selecting the soundclip device.
virtual bool SetSoundDevices(const Device* in_device,
const Device* out_device) = 0;
// Sets the externally provided video capturer. The ssrc is the ssrc of the
// (video) stream for which the video capturer should be set.
virtual bool SetVideoCapturer(VideoCapturer* capturer) = 0;
virtual VideoCapturer* GetVideoCapturer() const = 0;
// Device configuration
// Gets the current speaker volume, as a value between 0 and 255.
@ -145,8 +141,6 @@ class MediaEngineInterface {
virtual bool SetLocalMonitor(bool enable) = 0;
// Installs a callback for raw frames from the local camera.
virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0;
// Starts/stops local camera.
virtual bool SetVideoCapture(bool capture) = 0;
virtual const std::vector<AudioCodec>& audio_codecs() = 0;
virtual const std::vector<RtpHeaderExtension>&
@ -233,12 +227,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
const Device* out_device) {
return voice_.SetDevices(in_device, out_device);
}
virtual bool SetVideoCapturer(VideoCapturer* capturer) {
return video_.SetVideoCapturer(capturer);
}
virtual VideoCapturer* GetVideoCapturer() const {
return video_.GetVideoCapturer();
}
virtual bool GetOutputVolume(int* level) {
return voice_.GetOutputVolume(level);
@ -256,9 +244,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
virtual bool SetLocalRenderer(VideoRenderer* renderer) {
return video_.SetLocalRenderer(renderer);
}
virtual bool SetVideoCapture(bool capture) {
return video_.SetCapture(capture);
}
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_.codecs();
@ -364,15 +349,12 @@ class NullVideoEngine {
return true;
}
bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
bool SetCapture(bool capture) { return true; }
const std::vector<VideoCodec>& codecs() { return codecs_; }
const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
return rtp_header_extensions_;
}
void SetLogging(int min_sev, const char* filter) {}
VideoFormat GetStartCaptureFormat() const { return VideoFormat(); }
bool SetVideoCapturer(VideoCapturer* capturer) { return true; }
VideoCapturer* GetVideoCapturer() const { return NULL; }
sigslot::signal2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
private:

View File

@ -186,9 +186,10 @@ bool RtpTestUtility::VerifyTestPacketsFromStream(
result &= rtp_packet.ReadFromByteBuffer(&buf);
result &= rtp_packet.SameExceptSeqNumTimestampSsrc(
kTestRawRtpPackets[index],
kTestRawRtpPackets[index].sequence_number +
loop * GetTestPacketCount(),
kTestRawRtpPackets[index].timestamp + loop * kRtpTimestampIncrease,
static_cast<uint16>(kTestRawRtpPackets[index].sequence_number +
loop * GetTestPacketCount()),
static_cast<uint32>(kTestRawRtpPackets[index].timestamp +
loop * kRtpTimestampIncrease),
ssrc);
}
}

View File

@ -27,16 +27,23 @@
#include <limits.h> // For INT_MAX
#include "talk/media/base/constants.h"
#include "talk/base/logging.h"
#include "talk/base/timeutils.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/videoframe.h"
namespace cricket {
// TODO(fbarchard): Make downgrades settable
static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU.
static const int kDefaultDowngradeWaitTimeMs = 2000;
// The number of milliseconds of data to require before acting on cpu sampling
// information.
static const size_t kCpuLoadMinSampleTime = 5000;
// The amount of weight to give to each new cpu load sample. The lower the
// value, the slower we'll adapt to changing cpu conditions.
static const float kCpuLoadWeightCoefficient = 0.4f;
// The seed value for the cpu load moving average.
static const float kCpuLoadInitialAverage = 0.5f;
// TODO(fbarchard): Consider making scale factor table settable, to allow
// application to select quality vs performance tradeoff.
@ -150,8 +157,8 @@ VideoAdapter::~VideoAdapter() {
void VideoAdapter::SetInputFormat(const VideoFrame& in_frame) {
talk_base::CritScope cs(&critical_section_);
input_format_.width = in_frame.GetWidth();
input_format_.height = in_frame.GetHeight();
input_format_.width = static_cast<int>(in_frame.GetWidth());
input_format_.height = static_cast<int>(in_frame.GetHeight());
}
void VideoAdapter::SetInputFormat(const VideoFormat& format) {
@ -230,9 +237,10 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
}
if (output_num_pixels_) {
float scale = VideoAdapter::FindClosestScale(in_frame->GetWidth(),
in_frame->GetHeight(),
output_num_pixels_);
float scale = VideoAdapter::FindClosestScale(
static_cast<int>(in_frame->GetWidth()),
static_cast<int>(in_frame->GetHeight()),
output_num_pixels_);
output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f);
output_format_.height = static_cast<int>(in_frame->GetHeight() * scale +
.5f);
@ -291,11 +299,12 @@ bool VideoAdapter::StretchToOutputFrame(const VideoFrame* in_frame) {
// Implementation of CoordinatedVideoAdapter
CoordinatedVideoAdapter::CoordinatedVideoAdapter()
: cpu_adaptation_(false),
cpu_smoothing_(false),
gd_adaptation_(true),
view_adaptation_(true),
view_switch_(false),
cpu_downgrade_count_(0),
cpu_downgrade_wait_time_(0),
cpu_adapt_wait_time_(0),
high_system_threshold_(kHighSystemCpuThreshold),
low_system_threshold_(kLowSystemCpuThreshold),
process_threshold_(kProcessCpuThreshold),
@ -303,7 +312,8 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter()
view_desired_interval_(0),
encoder_desired_num_pixels_(INT_MAX),
cpu_desired_num_pixels_(INT_MAX),
adapt_reason_(0) {
adapt_reason_(0),
system_load_average_(kCpuLoadInitialAverage) {
}
// Helper function to UPGRADE or DOWNGRADE a number of pixels
@ -406,28 +416,40 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated(
if (!cpu_adaptation_) {
return;
}
// Update the moving average of system load. Even if we aren't smoothing,
// we'll still calculate this information, in case smoothing is later enabled.
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
if (cpu_smoothing_) {
system_load = system_load_average_;
}
// If we haven't started taking samples yet, wait until we have at least
// the correct number of samples per the wait time.
if (cpu_adapt_wait_time_ == 0) {
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
}
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
process_load, system_load);
// Make sure we're not adapting too quickly.
if (request != KEEP) {
if (talk_base::TimeIsLater(talk_base::Time(),
cpu_adapt_wait_time_)) {
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
<< talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
request = KEEP;
}
}
// Update how many times we have downgraded due to the cpu load.
switch (request) {
case DOWNGRADE:
// Ignore downgrades if we have downgraded the maximum times.
if (cpu_downgrade_count_ < kMaxCpuDowngrades) {
// Ignore downgrades if we have downgraded the maximum times or we just
// downgraded in a short time.
if (cpu_downgrade_wait_time_ != 0 &&
talk_base::TimeIsLater(talk_base::Time(),
cpu_downgrade_wait_time_)) {
LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade until "
<< talk_base::TimeUntil(cpu_downgrade_wait_time_)
<< " ms.";
request = KEEP;
} else {
++cpu_downgrade_count_;
}
++cpu_downgrade_count_;
} else {
LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade "
"because maximum downgrades reached";
SignalCpuAdaptationUnable();
LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade "
"because maximum downgrades reached";
SignalCpuAdaptationUnable();
}
break;
case UPGRADE:
@ -517,9 +539,6 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
if (cpu_adaptation_ && cpu_desired_num_pixels_ &&
(cpu_desired_num_pixels_ < min_num_pixels)) {
min_num_pixels = cpu_desired_num_pixels_;
// Update the cpu_downgrade_wait_time_ if we are going to downgrade video.
cpu_downgrade_wait_time_ =
talk_base::TimeAfter(kDefaultDowngradeWaitTimeMs);
}
// Determine which factors are keeping adapter resolution low.
@ -582,6 +601,14 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
<< "x" << new_output.height
<< " Changed: " << (changed ? "true" : "false")
<< " Reason: " << kReasons[adapt_reason_];
if (changed) {
// When any adaptation occurs, historic CPU load levels are no longer
// accurate. Clear out our state so we can re-learn at the new normal.
cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
system_load_average_ = kCpuLoadInitialAverage;
}
return changed;
}

View File

@ -105,6 +105,15 @@ class CoordinatedVideoAdapter
// Enable or disable video adaptation due to the change of the CPU load.
void set_cpu_adaptation(bool enable) { cpu_adaptation_ = enable; }
bool cpu_adaptation() const { return cpu_adaptation_; }
// Enable or disable smoothing when doing CPU adaptation. When smoothing is
// enabled, system CPU load is tracked using an exponential weighted
// average.
void set_cpu_smoothing(bool enable) {
LOG(LS_INFO) << "CPU smoothing is now "
<< (enable ? "enabled" : "disabled");
cpu_smoothing_ = enable;
}
bool cpu_smoothing() const { return cpu_smoothing_; }
// Enable or disable video adaptation due to the change of the GD
void set_gd_adaptation(bool enable) { gd_adaptation_ = enable; }
bool gd_adaptation() const { return gd_adaptation_; }
@ -121,12 +130,12 @@ class CoordinatedVideoAdapter
// When the video is decreased, set the waiting time for CPU adaptation to
// decrease video again.
void set_cpu_downgrade_wait_time(uint32 cpu_downgrade_wait_time) {
if (cpu_downgrade_wait_time_ != static_cast<int>(cpu_downgrade_wait_time)) {
LOG(LS_INFO) << "VAdapt Change Cpu Downgrade Wait Time from: "
<< cpu_downgrade_wait_time_ << " to "
<< cpu_downgrade_wait_time;
cpu_downgrade_wait_time_ = static_cast<int>(cpu_downgrade_wait_time);
void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) {
if (cpu_adapt_wait_time_ != static_cast<int>(cpu_adapt_wait_time)) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: "
<< cpu_adapt_wait_time_ << " to "
<< cpu_adapt_wait_time;
cpu_adapt_wait_time_ = static_cast<int>(cpu_adapt_wait_time);
}
}
// CPU system load high threshold for reducing resolution. e.g. 0.85f
@ -175,7 +184,7 @@ class CoordinatedVideoAdapter
private:
// Adapt to the minimum of the formats the server requests, the CPU wants, and
// the encoder wants. Returns true if resolution changed.
// the encoder wants. Returns true if resolution changed.
bool AdaptToMinimumFormat(int* new_width, int* new_height);
bool IsMinimumFormat(int pixels);
void StepPixelCount(CoordinatedVideoAdapter::AdaptRequest request,
@ -185,11 +194,12 @@ class CoordinatedVideoAdapter
float process_load, float system_load);
bool cpu_adaptation_; // True if cpu adaptation is enabled.
bool cpu_smoothing_; // True if cpu smoothing is enabled (with adaptation).
bool gd_adaptation_; // True if gd adaptation is enabled.
bool view_adaptation_; // True if view adaptation is enabled.
bool view_switch_; // True if view switch is enabled.
int cpu_downgrade_count_;
int cpu_downgrade_wait_time_;
int cpu_adapt_wait_time_;
// cpu system load thresholds relative to max cpus.
float high_system_threshold_;
float low_system_threshold_;
@ -205,6 +215,10 @@ class CoordinatedVideoAdapter
// The critical section to protect handling requests.
talk_base::CriticalSection request_critical_section_;
// The weighted average of cpu load over time. It's always updated (if cpu
// adaptation is on), but only used if cpu_smoothing_ is set.
float system_load_average_;
DISALLOW_COPY_AND_ASSIGN(CoordinatedVideoAdapter);
};

View File

@ -76,7 +76,7 @@ inline std::ostream& operator<<(std::ostream& s, const cricket::VideoCodec& c) {
}
inline int TimeBetweenSend(const cricket::VideoCodec& codec) {
return static_cast<int> (
return static_cast<int>(
cricket::VideoFormat::FpsToInterval(codec.framerate) /
talk_base::kNumNanosecsPerMillisec);
}
@ -95,12 +95,12 @@ class VideoEngineOverride : public T {
}
bool is_camera_on() const { return T::GetVideoCapturer()->IsRunning(); }
void set_has_senders(bool has_senders) {
cricket::VideoCapturer* video_capturer = T::GetVideoCapturer();
if (has_senders) {
this->RegisterSender(this,
&VideoEngineOverride<T>::OnLocalFrame,
&VideoEngineOverride<T>::OnLocalFrameFormat);
video_capturer->SignalVideoFrame.connect(this,
&VideoEngineOverride<T>::OnLocalFrame);
} else {
this->UnregisterSender(this);
video_capturer->SignalVideoFrame.disconnect(this);
}
}
void OnLocalFrame(cricket::VideoCapturer*,
@ -164,39 +164,6 @@ class VideoEngineTest : public testing::Test {
}
#endif
// Tests starting and stopping the capturer.
void SetCapture() {
EXPECT_FALSE(engine_.GetVideoCapturer());
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
ResetCapturer();
EXPECT_TRUE(engine_.GetVideoCapturer() != NULL);
EXPECT_FALSE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(true));
EXPECT_TRUE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(false));
EXPECT_FALSE(engine_.is_camera_on());
engine_.set_has_senders(true);
EXPECT_TRUE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(true));
EXPECT_TRUE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(false));
EXPECT_TRUE(engine_.is_camera_on());
engine_.set_has_senders(false);
EXPECT_FALSE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(true));
EXPECT_TRUE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetCapture(false));
EXPECT_FALSE(engine_.is_camera_on());
EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
EXPECT_TRUE(engine_.GetVideoCapturer() == NULL);
engine_.Terminate();
}
void ResetCapturer() {
cricket::Device device("test", "device");
video_capturer_.reset(new cricket::FakeVideoCapturer);
EXPECT_TRUE(engine_.SetVideoCapturer(video_capturer_.get()));
}
void ConstrainNewCodecBody() {
cricket::VideoCodec empty, in, out;
cricket::VideoCodec max_settings(engine_.codecs()[0].id,
@ -482,9 +449,6 @@ class VideoMediaChannelTest : public testing::Test,
virtual void SetUp() {
cricket::Device device("test", "device");
EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
video_capturer_.reset(new cricket::FakeVideoCapturer);
EXPECT_TRUE(video_capturer_.get() != NULL);
EXPECT_TRUE(engine_.SetVideoCapturer(video_capturer_.get()));
channel_.reset(engine_.CreateChannel(NULL));
EXPECT_TRUE(channel_.get() != NULL);
ConnectVideoChannelError();
@ -494,19 +458,34 @@ class VideoMediaChannelTest : public testing::Test,
media_error_ = cricket::VideoMediaChannel::ERROR_NONE;
channel_->SetRecvCodecs(engine_.codecs());
EXPECT_TRUE(channel_->AddSendStream(DefaultSendStreamParams()));
video_capturer_.reset(new cricket::FakeVideoCapturer);
cricket::VideoFormat format(640, 480,
cricket::VideoFormat::FpsToInterval(30),
cricket::FOURCC_I420);
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
}
void SetUpSecondStream() {
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc)));
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc+2)));
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
// SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
EXPECT_FALSE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrc)));
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrc+2)));
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
video_capturer_2_.reset(new cricket::FakeVideoCapturer());
cricket::VideoFormat format(640, 480,
cricket::VideoFormat::FpsToInterval(30),
cricket::FOURCC_I420);
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(format));
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
// Make the second renderer available for use by a new stream.
EXPECT_TRUE(channel_->SetRenderer(kSsrc+2, &renderer2_));
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
}
virtual void TearDown() {
channel_.reset();
@ -529,6 +508,19 @@ class VideoMediaChannelTest : public testing::Test,
bool SetOneCodec(const cricket::VideoCodec& codec) {
std::vector<cricket::VideoCodec> codecs;
codecs.push_back(codec);
cricket::VideoFormat capture_format(codec.width, codec.height,
cricket::VideoFormat::FpsToInterval(codec.framerate),
cricket::FOURCC_I420);
if (video_capturer_) {
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format));
}
if (video_capturer_2_) {
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(capture_format));
}
bool sending = channel_->sending();
bool success = SetSend(false);
if (success)
@ -550,6 +542,9 @@ class VideoMediaChannelTest : public testing::Test,
return NumRtpPackets();
}
bool SendFrame() {
if (video_capturer_2_) {
video_capturer_2_->CaptureFrame();
}
return video_capturer_.get() &&
video_capturer_->CaptureFrame();
}
@ -705,6 +700,7 @@ class VideoMediaChannelTest : public testing::Test,
// Test that SetSend works.
void SetSend() {
EXPECT_FALSE(channel_->sending());
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
EXPECT_FALSE(channel_->sending());
EXPECT_TRUE(SetSend(true));
@ -877,6 +873,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->SetOptions(vmo));
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(1234)));
channel_->UpdateAspectRatio(640, 400);
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_TRUE(SendFrame());
@ -948,6 +945,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(SetDefaultCodec());
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(999)));
EXPECT_TRUE(channel_->SetCapturer(999u, video_capturer_.get()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(WaitAndSendFrame(0));
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
@ -1012,6 +1010,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(789u)));
EXPECT_TRUE(channel_->SetCapturer(789u, video_capturer_.get()));
EXPECT_EQ(rtp_packets, NumRtpPackets());
// Wait 30ms to guarantee the engine does not drop the frame.
EXPECT_TRUE(WaitAndSendFrame(30));
@ -1236,13 +1235,25 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
cricket::FOURCC_I420));
++captured_frames;
EXPECT_FRAME_WAIT(captured_frames, format.width, format.height, kTimeout);
// Wait until frame of right size is captured.
EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= captured_frames &&
format.width == renderer_.width() &&
format.height == renderer_.height(), kTimeout);
EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
EXPECT_EQ(format.width, renderer_.width());
EXPECT_EQ(format.height, renderer_.height());
captured_frames = renderer_.num_rendered_frames() + 1;
EXPECT_FALSE(renderer_.black_frame());
EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
// Make sure a black frame is generated as no new frame is captured.
// A black frame should be the resolution of the send codec.
++captured_frames;
EXPECT_FRAME_WAIT(captured_frames, codec.width, codec.height, kTimeout);
// Make sure a black frame is generated within the specified timeout.
// The black frame should be the resolution of the send codec.
EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= captured_frames &&
codec.width == renderer_.width() &&
codec.height == renderer_.height() &&
renderer_.black_frame(), kTimeout);
EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
EXPECT_EQ(codec.width, renderer_.width());
EXPECT_EQ(codec.height, renderer_.height());
EXPECT_TRUE(renderer_.black_frame());
// The black frame has the same timestamp as the next frame since it's
@ -1263,13 +1274,18 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_EQ(0, renderer_.num_rendered_frames());
EXPECT_TRUE(SendFrame());
EXPECT_FRAME_WAIT(1, 640, 400, kTimeout);
// Remove the capturer.
EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
// No capturer was added, so this RemoveCapturer should
// fail.
EXPECT_FALSE(channel_->SetCapturer(kSsrc, NULL));
// Wait for kTimeout, to make sure no frames are sent
WAIT(renderer_.num_rendered_frames() != 1, kTimeout);
// Still a single frame, from the original SendFrame() call.
EXPECT_EQ(1, renderer_.num_rendered_frames());
// Wait for frames to stop flowing.
talk_base::Thread::Current()->ProcessMessages(300);
int num_frames = renderer_.num_rendered_frames();
// Wait to make sure no more frames are sent
WAIT(renderer_.num_rendered_frames() != num_frames, 300);
// Verify no more frames were sent.
EXPECT_EQ(num_frames, renderer_.num_rendered_frames());
}
// Tests that we can add and remove capturer as unique sources.
@ -1328,6 +1344,9 @@ class VideoMediaChannelTest : public testing::Test,
// Capture a frame with additional capturer2, frames should be received
EXPECT_TRUE(capturer2->CaptureCustomFrame(1024, 768, cricket::FOURCC_I420));
EXPECT_FRAME_ON_RENDERER_WAIT(renderer2, 1, 1024, 768, kTimeout);
// Successfully remove the capturer.
EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
// Fail to re-remove the capturer.
EXPECT_FALSE(channel_->SetCapturer(kSsrc, NULL));
// The capturers must be unregistered here as it runs out of it's scope
// next.
@ -1372,8 +1391,9 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
cricket::FOURCC_ARGB));
EXPECT_TRUE(capturer->CaptureFrame());
EXPECT_FRAME_ON_RENDERER_WAIT(renderer, 2, kScaledWidth, kScaledHeight,
kTimeout);
EXPECT_EQ_WAIT(2, renderer.num_rendered_frames(), kTimeout);
EXPECT_TRUE_WAIT(kScaledWidth == renderer.width() &&
kScaledHeight == renderer.height(), kTimeout);
EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
}
@ -1626,12 +1646,15 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_FALSE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc)));
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
SendAndReceive(codec);
EXPECT_TRUE(channel_->RemoveSendStream(0));
}
VideoEngineOverride<E> engine_;
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_2_;
talk_base::scoped_ptr<C> channel_;
cricket::FakeNetworkInterface network_interface_;
cricket::FakeVideoRenderer renderer_;

View File

@ -87,8 +87,8 @@ bool VideoFrame::CopyToPlanes(
uint8* dst_y, uint8* dst_u, uint8* dst_v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v) const {
#if !defined(DISABLE_YUV)
int32 src_width = GetWidth();
int32 src_height = GetHeight();
int32 src_width = static_cast<int>(GetWidth());
int32 src_height = static_cast<int>(GetHeight());
return libyuv::I420Copy(GetYPlane(), GetYPitch(),
GetUPlane(), GetUPitch(),
GetVPlane(), GetVPitch(),
@ -147,7 +147,8 @@ void VideoFrame::StretchToPlanes(
} else if (src_width * height < src_height * width) {
// Reduce the input height.
src_height = src_width * height / width;
int32 iheight_offset = (GetHeight() - src_height) >> 2;
int32 iheight_offset = static_cast<int32>(
(GetHeight() - src_height) >> 2);
iheight_offset <<= 1; // Ensure that iheight_offset is even.
src_y += iheight_offset * GetYPitch();
src_u += iheight_offset / 2 * GetUPitch();
@ -160,9 +161,9 @@ void VideoFrame::StretchToPlanes(
// Scale to the output I420 frame.
libyuv::Scale(src_y, src_u, src_v,
GetYPitch(), GetUPitch(), GetVPitch(),
src_width, src_height,
static_cast<int>(src_width), static_cast<int>(src_height),
dst_y, dst_u, dst_v, dst_pitch_y, dst_pitch_u, dst_pitch_v,
width, height, interpolate);
static_cast<int>(width), static_cast<int>(height), interpolate);
#endif
}
@ -180,7 +181,9 @@ size_t VideoFrame::StretchToBuffer(size_t dst_width, size_t dst_height,
uint8* dst_u = dst_y + dst_width * dst_height;
uint8* dst_v = dst_u + ((dst_width + 1) >> 1) * ((dst_height + 1) >> 1);
StretchToPlanes(dst_y, dst_u, dst_v,
dst_width, (dst_width + 1) >> 1, (dst_width + 1) >> 1,
static_cast<int32>(dst_width),
static_cast<int32>((dst_width + 1) >> 1),
static_cast<int32>((dst_width + 1) >> 1),
dst_width, dst_height, interpolate, vert_crop);
}
return needed;
@ -203,7 +206,8 @@ void VideoFrame::StretchToFrame(VideoFrame* dst,
VideoFrame* VideoFrame::Stretch(size_t dst_width, size_t dst_height,
bool interpolate, bool vert_crop) const {
VideoFrame* dest = CreateEmptyFrame(dst_width, dst_height,
VideoFrame* dest = CreateEmptyFrame(static_cast<int>(dst_width),
static_cast<int>(dst_height),
GetPixelWidth(), GetPixelHeight(),
GetElapsedTime(), GetTimeStamp());
if (dest) {
@ -217,7 +221,9 @@ bool VideoFrame::SetToBlack() {
return libyuv::I420Rect(GetYPlane(), GetYPitch(),
GetUPlane(), GetUPitch(),
GetVPlane(), GetVPitch(),
0, 0, GetWidth(), GetHeight(),
0, 0,
static_cast<int>(GetWidth()),
static_cast<int>(GetHeight()),
16, 128, 128) == 0;
#else
int uv_size = GetUPitch() * GetChromaHeight();

View File

@ -152,21 +152,24 @@ class FakeDeviceManager : public DeviceManagerInterface {
void SetAudioInputDevices(const std::vector<std::string>& devices) {
input_devices_.clear();
for (size_t i = 0; i < devices.size(); ++i) {
input_devices_.push_back(Device(devices[i], i));
input_devices_.push_back(Device(devices[i],
static_cast<int>(i)));
}
SignalDevicesChange();
}
void SetAudioOutputDevices(const std::vector<std::string>& devices) {
output_devices_.clear();
for (size_t i = 0; i < devices.size(); ++i) {
output_devices_.push_back(Device(devices[i], i));
output_devices_.push_back(Device(devices[i],
static_cast<int>(i)));
}
SignalDevicesChange();
}
void SetVideoCaptureDevices(const std::vector<std::string>& devices) {
vidcap_devices_.clear();
for (size_t i = 0; i < devices.size(); ++i) {
vidcap_devices_.push_back(Device(devices[i], i));
vidcap_devices_.push_back(Device(devices[i],
static_cast<int>(i)));
}
SignalDevicesChange();
}

View File

@ -157,7 +157,7 @@ FileVideoCapturer::FileVideoCapturer()
FileVideoCapturer::~FileVideoCapturer() {
Stop();
delete[] static_cast<char*> (captured_frame_.data);
delete[] static_cast<char*>(captured_frame_.data);
}
bool FileVideoCapturer::Init(const Device& device) {
@ -330,7 +330,7 @@ bool FileVideoCapturer::ReadFrame(bool first_frame, int* wait_time_ms) {
// 2.2 Reallocate memory for the frame data if necessary.
if (frame_buffer_size_ < captured_frame_.data_size) {
frame_buffer_size_ = captured_frame_.data_size;
delete[] static_cast<char*> (captured_frame_.data);
delete[] static_cast<char*>(captured_frame_.data);
captured_frame_.data = new char[frame_buffer_size_];
}
// 2.3 Read the frame adata.

View File

@ -27,7 +27,6 @@
#include "talk/media/sctp/sctpdataengine.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <vector>
@ -40,14 +39,6 @@
#include "talk/media/base/streamparams.h"
#include "usrsctplib/usrsctp.h"
#ifdef _WIN32
// EINPROGRESS gets #defined to WSAEINPROGRESS in some headers above, which
// is not 112. 112 is the value defined in <errno.h>. usrsctp uses 112 for
// EINPROGRESS.
#undef EINPROGRESS
#define EINPROGRESS (112)
#endif
namespace cricket {
// This is the SCTP port to use. It is passed along the wire and the listener
@ -343,8 +334,9 @@ bool SctpDataMediaChannel::Connect() {
sockaddr_conn remote_sconn = GetSctpSockAddr(remote_port_);
int connect_result = usrsctp_connect(
sock_, reinterpret_cast<sockaddr *>(&remote_sconn), sizeof(remote_sconn));
if (connect_result < 0 && errno != EINPROGRESS) {
LOG_ERRNO(LS_ERROR) << debug_name_ << "Failed usrsctp_connect";
if (connect_result < 0 && errno != SCTP_EINPROGRESS) {
LOG_ERRNO(LS_ERROR) << debug_name_ << "Failed usrsctp_connect. got errno="
<< errno << ", but wanted " << SCTP_EINPROGRESS;
CloseSctpSocket();
return false;
}

View File

@ -28,9 +28,19 @@
#ifndef TALK_MEDIA_SCTP_SCTPDATAENGINE_H_
#define TALK_MEDIA_SCTP_SCTPDATAENGINE_H_
#include <errno.h>
#include <string>
#include <vector>
namespace cricket {
// Some ERRNO values get re-#defined to WSA* equivalents in some talk/
// headers. We save the original ones in an enum.
enum PreservedErrno {
SCTP_EINPROGRESS = EINPROGRESS,
SCTP_EWOULDBLOCK = EWOULDBLOCK
};
} // namespace cricket
#include "talk/base/buffer.h"
#include "talk/base/scoped_ptr.h"
#include "talk/media/base/codec.h"

View File

@ -53,7 +53,7 @@ class FakeWebRtcDeviceInfo : public webrtc::VideoCaptureModule::DeviceInfo {
dev->caps.push_back(cap);
}
virtual uint32_t NumberOfDevices() {
return devices_.size();
return static_cast<int>(devices_.size());
}
virtual int32_t GetDeviceName(uint32_t device_num,
char* device_name,
@ -77,7 +77,7 @@ class FakeWebRtcDeviceInfo : public webrtc::VideoCaptureModule::DeviceInfo {
virtual int32_t NumberOfCapabilities(const char* device_id) {
Device* dev = GetDeviceById(device_id);
if (!dev) return -1;
return dev->caps.size();
return static_cast<int32_t>(dev->caps.size());
}
virtual int32_t GetCapability(const char* device_id,
const uint32_t device_cap_num,

View File

@ -392,7 +392,7 @@ class FakeWebRtcVideoEngine
return -1;
}
int GetNumChannels() const { return channels_.size(); }
int GetNumChannels() const { return static_cast<int>(channels_.size()); }
bool IsChannel(int channel) const {
return (channels_.find(channel) != channels_.end());
}
@ -401,7 +401,7 @@ class FakeWebRtcVideoEngine
}
int GetLastCapturer() const { return last_capturer_; }
int GetNumCapturers() const { return capturers_.size(); }
int GetNumCapturers() const { return static_cast<int>(capturers_.size()); }
void set_fail_alloc_capturer(bool fail_alloc_capturer) {
fail_alloc_capturer_ = fail_alloc_capturer;
}
@ -497,7 +497,8 @@ class FakeWebRtcVideoEngine
}
int GetNumSsrcs(int channel) const {
WEBRTC_ASSERT_CHANNEL(channel);
return channels_.find(channel)->second->ssrcs_.size();
return static_cast<int>(
channels_.find(channel)->second->ssrcs_.size());
}
bool GetIsTransmitting(int channel) const {
WEBRTC_ASSERT_CHANNEL(channel);
@ -518,7 +519,8 @@ class FakeWebRtcVideoEngine
};
int GetNumExternalDecoderRegistered(int channel) const {
WEBRTC_ASSERT_CHANNEL(channel);
return channels_.find(channel)->second->ext_decoder_pl_types_.size();
return static_cast<int>(
channels_.find(channel)->second->ext_decoder_pl_types_.size());
};
bool ExternalEncoderRegistered(int channel,
unsigned int pl_type) const {
@ -528,13 +530,15 @@ class FakeWebRtcVideoEngine
};
int GetNumExternalEncoderRegistered(int channel) const {
WEBRTC_ASSERT_CHANNEL(channel);
return channels_.find(channel)->second->ext_encoder_pl_types_.size();
return static_cast<int>(
channels_.find(channel)->second->ext_encoder_pl_types_.size());
};
int GetTotalNumExternalEncoderRegistered() const {
std::map<int, Channel*>::const_iterator it;
int total_num_registered = 0;
for (it = channels_.begin(); it != channels_.end(); ++it)
total_num_registered += it->second->ext_encoder_pl_types_.size();
total_num_registered +=
static_cast<int>(it->second->ext_encoder_pl_types_.size());
return total_num_registered;
}
void SetSendBitrates(int channel, unsigned int video_bitrate,
@ -708,10 +712,8 @@ class FakeWebRtcVideoEngine
WEBRTC_STUB(DeregisterDecoderObserver, (const int));
WEBRTC_STUB(SendKeyFrame, (const int));
WEBRTC_STUB(WaitForFirstKeyFrame, (const int, const bool));
#ifdef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(StartDebugRecording, (int, const char*));
WEBRTC_STUB(StopDebugRecording, (int));
#endif
// webrtc::ViECapture
WEBRTC_STUB(NumberOfCaptureDevices, ());
@ -783,12 +785,10 @@ class FakeWebRtcVideoEngine
// Not using WEBRTC_STUB due to bool return value
virtual bool IsIPv6Enabled(int channel) { return true; }
WEBRTC_STUB(SetMTU, (int, unsigned int));
#ifndef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(SetPacketTimeoutNotification, (const int, bool, int));
WEBRTC_STUB(RegisterObserver, (const int, webrtc::ViENetworkObserver&));
WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (const int, const bool,
const unsigned int));
#endif
// webrtc::ViERender
WEBRTC_STUB(RegisterVideoRenderModule, (webrtc::VideoRender&));

View File

@ -609,7 +609,6 @@ class FakeWebRtcVoiceEngine
}
WEBRTC_STUB(ReceivedRTCPPacket, (int channel, const void* data,
unsigned int length));
#ifndef USE_WEBRTC_DEV_BRANCH
// Not using WEBRTC_STUB due to bool return value
WEBRTC_STUB(SetPacketTimeoutNotification, (int channel, bool enable,
int timeoutSeconds));
@ -622,7 +621,6 @@ class FakeWebRtcVoiceEngine
int& sampleTimeSeconds));
WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (int channel, bool enable,
int sampleTimeSeconds));
#endif
// webrtc::VoERTP_RTCP
WEBRTC_STUB(RegisterRTPObserver, (int channel,
@ -743,11 +741,7 @@ class FakeWebRtcVoiceEngine
// webrtc::VoEVideoSync
WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
WEBRTC_STUB(GetPlayoutTimestamp, (int channel, unsigned int& timestamp));
#ifdef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp**, webrtc::RtpReceiver**));
#else
WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp*&));
#endif
WEBRTC_STUB(SetInitTimestamp, (int channel, unsigned int timestamp));
WEBRTC_STUB(SetInitSequenceNumber, (int channel, short sequenceNumber));
WEBRTC_STUB(SetMinimumPlayoutDelay, (int channel, int delayMs));

View File

@ -92,7 +92,7 @@ class WebRtcPassthroughRender : public webrtc::VideoRender {
}
virtual uint32_t GetNumIncomingRenderStreams() const {
return stream_render_map_.size();
return static_cast<uint32_t>(stream_render_map_.size());
}
virtual bool HasIncomingRenderStream(const uint32_t stream_id) const;

View File

@ -257,7 +257,7 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
}
int framerate() {
talk_base::CritScope cs(&crit_);
return frame_rate_tracker_.units_second();
return static_cast<int>(frame_rate_tracker_.units_second());
}
VideoRenderer* renderer() {
talk_base::CritScope cs(&crit_);
@ -356,7 +356,7 @@ class WebRtcLocalStreamInfo {
}
int framerate() {
talk_base::CritScope cs(&crit_);
return rate_tracker_.units_second();
return static_cast<int>(rate_tracker_.units_second());
}
void GetLastFrameInfo(
size_t* width, size_t* height, int64* elapsed_time) const {
@ -525,17 +525,27 @@ class WebRtcVideoChannelSendInfo {
if (video_capturer && !video_capturer->IsScreencast()) {
const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
if (capture_format) {
// TODO(thorcarpenter): This is broken. Video capturer doesn't have
// a capture format until the capturer is started. So, if
// the capturer is started immediately after calling set_video_capturer
// video adapter may not have the input format set, the interval may
// be zero, and all frames may be dropped.
// Consider fixing this by having video_adapter keep a pointer to the
// video capturer.
video_adapter_->SetInputFormat(*capture_format);
}
}
}
void ApplyCpuOptions(const VideoOptions& options) {
bool cpu_adapt;
bool cpu_adapt, cpu_smoothing;
float low, med, high;
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter_->set_cpu_adaptation(cpu_adapt);
}
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter_->set_cpu_smoothing(cpu_smoothing);
}
if (options.process_adaptation_threshhold.Get(&med)) {
video_adapter_->set_process_threshold(med);
}
@ -552,8 +562,9 @@ class WebRtcVideoChannelSendInfo {
*processed_frame = original_frame.Copy();
} else {
WebRtcVideoFrame* black_frame = new WebRtcVideoFrame();
black_frame->InitToBlack(original_frame.GetWidth(),
original_frame.GetHeight(), 1, 1,
black_frame->InitToBlack(static_cast<int>(original_frame.GetWidth()),
static_cast<int>(original_frame.GetHeight()),
1, 1,
original_frame.GetElapsedTime(),
original_frame.GetTimeStamp());
*processed_frame = black_frame;
@ -675,8 +686,6 @@ void WebRtcVideoEngine::Construct(ViEWrapper* vie_wrapper,
render_module_.reset(new WebRtcPassthroughRender());
local_renderer_w_ = local_renderer_h_ = 0;
local_renderer_ = NULL;
video_capturer_ = NULL;
frame_listeners_ = 0;
capture_started_ = false;
decoder_factory_ = NULL;
encoder_factory_ = NULL;
@ -712,7 +721,6 @@ void WebRtcVideoEngine::Construct(ViEWrapper* vie_wrapper,
}
WebRtcVideoEngine::~WebRtcVideoEngine() {
ClearCapturer();
LOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine";
if (initialized_) {
Terminate();
@ -791,7 +799,6 @@ bool WebRtcVideoEngine::InitVideoEngine() {
void WebRtcVideoEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
initialized_ = false;
SetCapture(false);
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
*render_module_.get()) != 0) {
@ -847,132 +854,12 @@ WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
return channel;
}
bool WebRtcVideoEngine::SetVideoCapturer(VideoCapturer* capturer) {
return SetCapturer(capturer);
}
VideoCapturer* WebRtcVideoEngine::GetVideoCapturer() const {
return video_capturer_;
}
bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
local_renderer_w_ = local_renderer_h_ = 0;
local_renderer_ = renderer;
return true;
}
bool WebRtcVideoEngine::SetCapture(bool capture) {
bool old_capture = capture_started_;
capture_started_ = capture;
CaptureState result = UpdateCapturingState();
if (result == CS_FAILED || result == CS_NO_DEVICE) {
capture_started_ = old_capture;
return false;
}
return true;
}
CaptureState WebRtcVideoEngine::UpdateCapturingState() {
bool capture = capture_started_ && frame_listeners_;
CaptureState result = CS_RUNNING;
if (!IsCapturing() && capture) { // Start capturing.
if (video_capturer_ == NULL) {
return CS_NO_DEVICE;
}
VideoFormat capture_format;
if (!video_capturer_->GetBestCaptureFormat(default_codec_format_,
&capture_format)) {
LOG(LS_WARNING) << "Unsupported format:"
<< " width=" << default_codec_format_.width
<< " height=" << default_codec_format_.height
<< ". Supported formats are:";
const std::vector<VideoFormat>* formats =
video_capturer_->GetSupportedFormats();
if (formats) {
for (std::vector<VideoFormat>::const_iterator i = formats->begin();
i != formats->end(); ++i) {
const VideoFormat& format = *i;
LOG(LS_WARNING) << " " << GetFourccName(format.fourcc) << ":"
<< format.width << "x" << format.height << "x"
<< format.framerate();
}
}
return CS_FAILED;
}
// Start the video capturer.
result = video_capturer_->Start(capture_format);
if (CS_RUNNING != result && CS_STARTING != result) {
LOG(LS_ERROR) << "Failed to start the video capturer";
return result;
}
} else if (IsCapturing() && !capture) { // Stop capturing.
video_capturer_->Stop();
result = CS_STOPPED;
}
return result;
}
bool WebRtcVideoEngine::IsCapturing() const {
return (video_capturer_ != NULL) && video_capturer_->IsRunning();
}
// TODO(thorcarpenter): Remove this fn, it's only used for unittests!
void WebRtcVideoEngine::OnFrameCaptured(VideoCapturer* capturer,
const CapturedFrame* frame) {
// Crop to desired aspect ratio.
int cropped_width, cropped_height;
ComputeCrop(default_codec_format_.width, default_codec_format_.height,
frame->width, abs(frame->height),
frame->pixel_width, frame->pixel_height,
frame->rotation, &cropped_width, &cropped_height);
// This CapturedFrame* will already be in I420. In the future, when
// WebRtcVideoFrame has support for independent planes, we can just attach
// to it and update the pointers when cropping.
WebRtcVideoFrame i420_frame;
if (!i420_frame.Init(frame, cropped_width, cropped_height)) {
LOG(LS_ERROR) << "Couldn't convert to I420! "
<< cropped_width << " x " << cropped_height;
return;
}
// TODO(janahan): This is the trigger point for Tx video processing.
// Once the capturer refactoring is done, we will move this into the
// capturer...it's not there right now because that image is in not in the
// I420 color space.
// The clients that subscribe will obtain meta info from the frame.
// When this trigger is switched over to capturer, need to pass in the real
// ssrc.
bool drop_frame = false;
{
talk_base::CritScope cs(&signal_media_critical_);
SignalMediaFrame(kDummyVideoSsrc, &i420_frame, &drop_frame);
}
if (drop_frame) {
LOG(LS_VERBOSE) << "Media Effects dropped a frame.";
return;
}
// Send I420 frame to the local renderer.
if (local_renderer_) {
if (local_renderer_w_ != static_cast<int>(i420_frame.GetWidth()) ||
local_renderer_h_ != static_cast<int>(i420_frame.GetHeight())) {
local_renderer_->SetSize(local_renderer_w_ = i420_frame.GetWidth(),
local_renderer_h_ = i420_frame.GetHeight(), 0);
}
local_renderer_->RenderFrame(&i420_frame);
}
// Send I420 frame to the registered senders.
talk_base::CritScope cs(&channels_crit_);
for (VideoChannels::iterator it = channels_.begin();
it != channels_.end(); ++it) {
if ((*it)->sending()) (*it)->SendFrame(capturer, &i420_frame);
}
}
const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
return video_codecs_;
}
@ -1004,7 +891,7 @@ bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) {
const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
encoder_factory_->codecs();
for (size_t j = 0; j < codecs.size(); ++j) {
VideoCodec codec(GetExternalVideoPayloadType(j),
VideoCodec codec(GetExternalVideoPayloadType(static_cast<int>(j)),
codecs[j].name, 0, 0, 0, 0);
if (codec.Matches(in))
return true;
@ -1136,7 +1023,7 @@ bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
for (size_t i = 0; i < codecs.size(); ++i) {
if (_stricmp(in_codec.name.c_str(), codecs[i].name.c_str()) == 0) {
out_codec->codecType = codecs[i].type;
out_codec->plType = GetExternalVideoPayloadType(i);
out_codec->plType = GetExternalVideoPayloadType(static_cast<int>(i));
talk_base::strcpyn(out_codec->plName, sizeof(out_codec->plName),
codecs[i].name.c_str(), codecs[i].name.length());
found = true;
@ -1267,12 +1154,13 @@ bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
for (size_t i = 0; i < codecs.size(); ++i) {
if (!found)
found = (in_codec.name == codecs[i].name);
VideoCodec codec(GetExternalVideoPayloadType(i),
codecs[i].name,
codecs[i].max_width,
codecs[i].max_height,
codecs[i].max_fps,
codecs.size() + ARRAY_SIZE(kVideoCodecPrefs) - i);
VideoCodec codec(
GetExternalVideoPayloadType(static_cast<int>(i)),
codecs[i].name,
codecs[i].max_width,
codecs[i].max_height,
codecs[i].max_fps,
static_cast<int>(codecs.size() + ARRAY_SIZE(kVideoCodecPrefs) - i));
AddDefaultFeedbackParams(&codec);
video_codecs_.push_back(codec);
external_codec_names.insert(codecs[i].name);
@ -1287,7 +1175,7 @@ bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
if (found && !is_external_codec) {
VideoCodec codec(pref.payload_type, pref.name,
in_codec.width, in_codec.height, in_codec.framerate,
ARRAY_SIZE(kVideoCodecPrefs) - i);
static_cast<int>(ARRAY_SIZE(kVideoCodecPrefs) - i));
if (_stricmp(kVp8PayloadName, codec.name.c_str()) == 0) {
AddDefaultFeedbackParams(&codec);
}
@ -1298,32 +1186,6 @@ bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
return true;
}
bool WebRtcVideoEngine::SetCapturer(VideoCapturer* capturer) {
if (capturer == NULL) {
// Stop capturing before clearing the capturer.
if (!SetCapture(false)) {
LOG(LS_WARNING) << "Camera failed to stop";
return false;
}
ClearCapturer();
return true;
}
// Hook up signals and install the supplied capturer.
SignalCaptureStateChange.repeat(capturer->SignalStateChange);
capturer->SignalFrameCaptured.connect(this,
&WebRtcVideoEngine::OnFrameCaptured);
ClearCapturer();
video_capturer_ = capturer;
// Possibly restart the capturer if it is supposed to be running.
CaptureState result = UpdateCapturingState();
if (result == CS_FAILED || result == CS_NO_DEVICE) {
LOG(LS_WARNING) << "Camera failed to restart";
return false;
}
return true;
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) {
@ -1340,22 +1202,7 @@ bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) {
int WebRtcVideoEngine::GetNumOfChannels() {
talk_base::CritScope cs(&channels_crit_);
return channels_.size();
}
void WebRtcVideoEngine::IncrementFrameListeners() {
if (++frame_listeners_ == 1) {
UpdateCapturingState();
}
// In the unlikely event of wrapparound.
ASSERT(frame_listeners_ >= 0);
}
void WebRtcVideoEngine::DecrementFrameListeners() {
if (--frame_listeners_ == 0) {
UpdateCapturingState();
}
ASSERT(frame_listeners_ >= 0);
return static_cast<int>(channels_.size());
}
void WebRtcVideoEngine::Print(webrtc::TraceLevel level, const char* trace,
@ -1384,20 +1231,6 @@ void WebRtcVideoEngine::Print(webrtc::TraceLevel level, const char* trace,
}
}
bool WebRtcVideoEngine::RegisterProcessor(
VideoProcessor* video_processor) {
talk_base::CritScope cs(&signal_media_critical_);
SignalMediaFrame.connect(video_processor,
&VideoProcessor::OnFrame);
return true;
}
bool WebRtcVideoEngine::UnregisterProcessor(
VideoProcessor* video_processor) {
talk_base::CritScope cs(&signal_media_critical_);
SignalMediaFrame.disconnect(video_processor);
return true;
}
webrtc::VideoDecoder* WebRtcVideoEngine::CreateExternalDecoder(
webrtc::VideoCodecType type) {
if (decoder_factory_ == NULL) {
@ -1442,10 +1275,6 @@ bool WebRtcVideoEngine::IsExternalEncoderCodecType(
return false;
}
void WebRtcVideoEngine::ClearCapturer() {
video_capturer_ = NULL;
}
void WebRtcVideoEngine::SetExternalDecoderFactory(
WebRtcVideoDecoderFactory* decoder_factory) {
decoder_factory_ = decoder_factory;
@ -1996,9 +1825,6 @@ bool WebRtcVideoMediaChannel::StartSend(
}
send_channel->set_sending(true);
if (!send_channel->video_capturer()) {
engine_->IncrementFrameListeners();
}
return true;
}
@ -2022,9 +1848,6 @@ bool WebRtcVideoMediaChannel::StopSend(
return false;
}
send_channel->set_sending(false);
if (!send_channel->video_capturer()) {
engine_->DecrementFrameListeners();
}
return true;
}
@ -2185,9 +2008,6 @@ bool WebRtcVideoMediaChannel::RemoveCapturer(uint32 ssrc) {
}
capturer->SignalVideoFrame.disconnect(this);
send_channel->set_video_capturer(NULL);
if (send_channel->sending()) {
engine_->IncrementFrameListeners();
}
const int64 timestamp = send_channel->local_stream_info()->time_stamp();
if (send_codec_) {
QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
@ -2261,8 +2081,8 @@ bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
sinfo.firs_rcvd = -1;
sinfo.nacks_rcvd = -1;
sinfo.rtt_ms = -1;
sinfo.frame_width = channel_stream_info->width();
sinfo.frame_height = channel_stream_info->height();
sinfo.frame_width = static_cast<int>(channel_stream_info->width());
sinfo.frame_height = static_cast<int>(channel_stream_info->height());
sinfo.framerate_input = channel_stream_info->framerate();
sinfo.framerate_sent = send_channel->encoder_observer()->framerate();
sinfo.nominal_bitrate = send_channel->encoder_observer()->bitrate();
@ -2415,9 +2235,6 @@ bool WebRtcVideoMediaChannel::SetCapturer(uint32 ssrc,
return false;
}
VideoCapturer* old_capturer = send_channel->video_capturer();
if (send_channel->sending() && !old_capturer) {
engine_->DecrementFrameListeners();
}
if (old_capturer) {
old_capturer->SignalVideoFrame.disconnect(this);
}
@ -2454,9 +2271,10 @@ void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
which_channel = video_channel();
}
engine()->vie()->network()->ReceivedRTPPacket(which_channel,
packet->data(),
packet->length());
engine()->vie()->network()->ReceivedRTPPacket(
which_channel,
packet->data(),
static_cast<int>(packet->length()));
}
void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
@ -2480,9 +2298,10 @@ void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
if (type == kRtcpTypeSR) {
int which_channel = GetRecvChannelNum(ssrc);
if (which_channel != -1 && !IsDefaultChannel(which_channel)) {
engine_->vie()->network()->ReceivedRTCPPacket(which_channel,
packet->data(),
packet->length());
engine_->vie()->network()->ReceivedRTCPPacket(
which_channel,
packet->data(),
static_cast<int>(packet->length()));
}
}
// SR may continue RR and any RR entry may correspond to any one of the send
@ -2492,9 +2311,10 @@ void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
int channel_id = send_channel->channel_id();
engine_->vie()->network()->ReceivedRTCPPacket(channel_id,
packet->data(),
packet->length());
engine_->vie()->network()->ReceivedRTCPPacket(
channel_id,
packet->data(),
static_cast<int>(packet->length()));
}
}
@ -2765,6 +2585,9 @@ void WebRtcVideoMediaChannel::AdaptAndSendFrame(VideoCapturer* capturer,
SendFrame(capturer, frame);
return;
}
// TODO(thorcarpenter): This is broken. One capturer registered on two ssrc
// will not send any video to the second ssrc send channel. We should remove
// GetSendChannel(capturer) and pass in an ssrc here.
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(capturer);
if (!send_channel) {
SendFrame(capturer, frame);
@ -2819,8 +2642,10 @@ bool WebRtcVideoMediaChannel::SendFrame(
}
// Checks if we need to reset vie send codec.
if (!MaybeResetVieSendCodec(send_channel, frame->GetWidth(),
frame->GetHeight(), is_screencast, NULL)) {
if (!MaybeResetVieSendCodec(send_channel,
static_cast<int>(frame->GetWidth()),
static_cast<int>(frame->GetHeight()),
is_screencast, NULL)) {
LOG(LS_ERROR) << "MaybeResetVieSendCodec failed with "
<< frame->GetWidth() << "x" << frame->GetHeight();
return false;
@ -2843,8 +2668,8 @@ bool WebRtcVideoMediaChannel::SendFrame(
frame_i420.y_pitch = frame_out->GetYPitch();
frame_i420.u_pitch = frame_out->GetUPitch();
frame_i420.v_pitch = frame_out->GetVPitch();
frame_i420.width = frame_out->GetWidth();
frame_i420.height = frame_out->GetHeight();
frame_i420.width = static_cast<unsigned short>(frame_out->GetWidth());
frame_i420.height = static_cast<unsigned short>(frame_out->GetHeight());
int64 timestamp_ntp_ms = 0;
// TODO(justinlin): Reenable after Windows issues with clock drift are fixed.
@ -3518,7 +3343,7 @@ void WebRtcVideoMediaChannel::MaybeChangeStartBitrate(
void WebRtcVideoMediaChannel::OnMessage(talk_base::Message* msg) {
FlushBlackFrameData* black_frame_data =
static_cast<FlushBlackFrameData*> (msg->pdata);
static_cast<FlushBlackFrameData*>(msg->pdata);
FlushBlackFrame(black_frame_data->ssrc, black_frame_data->timestamp);
delete black_frame_data;
}
@ -3548,7 +3373,7 @@ void WebRtcVideoMediaChannel::QueueBlackFrame(uint32 ssrc, int64 timestamp,
FlushBlackFrameData* black_frame_data = new FlushBlackFrameData(
ssrc,
timestamp);
const int delay_ms = static_cast<int> (
const int delay_ms = static_cast<int>(
2 * cricket::VideoFormat::FpsToInterval(framerate) *
talk_base::kNumMillisecsPerSec / talk_base::kNumNanosecsPerSec);
worker_thread()->PostDelayed(delay_ms, this, 0, black_frame_data);

View File

@ -113,16 +113,8 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
void SetLogging(int min_sev, const char* filter);
// If capturer is NULL, unregisters the capturer and stops capturing.
// Otherwise sets the capturer and starts capturing.
bool SetVideoCapturer(VideoCapturer* capturer);
VideoCapturer* GetVideoCapturer() const;
bool SetLocalRenderer(VideoRenderer* renderer);
bool SetCapture(bool capture);
sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
CaptureState UpdateCapturingState();
bool IsCapturing() const;
void OnFrameCaptured(VideoCapturer* capturer, const CapturedFrame* frame);
// Set the VoiceEngine for A/V sync. This can only be called before Init.
bool SetVoiceEngine(WebRtcVoiceEngine* voice_engine);
@ -137,9 +129,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
// Enable the render module with timing control.
bool EnableTimedRender();
bool RegisterProcessor(VideoProcessor* video_processor);
bool UnregisterProcessor(VideoProcessor* video_processor);
// Returns an external decoder for the given codec type. The return value
// can be NULL if decoder factory is not given or it does not support the
// codec type. The caller takes the ownership of the returned object.
@ -175,9 +164,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
bool ShouldIgnoreTrace(const std::string& trace);
int GetNumOfChannels();
void IncrementFrameListeners();
void DecrementFrameListeners();
VideoFormat GetStartCaptureFormat() const { return default_codec_format_; }
talk_base::CpuMonitor* cpu_monitor() { return cpu_monitor_.get(); }
@ -209,11 +195,9 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
void SetTraceFilter(int filter);
void SetTraceOptions(const std::string& options);
bool InitVideoEngine();
bool SetCapturer(VideoCapturer* capturer);
// webrtc::TraceCallback implementation.
virtual void Print(webrtc::TraceLevel level, const char* trace, int length);
void ClearCapturer();
// WebRtcVideoEncoderFactory::Observer implementation.
virtual void OnCodecsAvailable();
@ -234,8 +218,6 @@ class WebRtcVideoEngine : public sigslot::has_slots<>,
talk_base::CriticalSection channels_crit_;
VideoChannels channels_;
VideoCapturer* video_capturer_;
int frame_listeners_;
bool capture_started_;
int local_renderer_w_;
int local_renderer_h_;

View File

@ -202,11 +202,8 @@ class WebRtcVideoMediaChannelTest
virtual cricket::VideoCodec DefaultCodec() { return kVP8Codec; }
virtual void SetUp() {
Base::SetUp();
// Need to start the capturer to allow us to pump in frames.
engine_.SetCapture(true);
}
virtual void TearDown() {
engine_.SetCapture(false);
Base::TearDown();
}
};
@ -1356,41 +1353,6 @@ TEST_F(WebRtcVideoEngineTest, CreateChannel) {
delete channel;
}
TEST_F(WebRtcVideoMediaChannelTest, TestVideoProcessor_DropFrames) {
// Connect a video processor.
cricket::FakeMediaProcessor vp;
vp.set_drop_frames(false);
EXPECT_TRUE(engine_.RegisterProcessor(&vp));
EXPECT_EQ(0, vp.dropped_frame_count());
// Send the first frame with default codec.
int packets = NumRtpPackets();
cricket::VideoCodec codec(DefaultCodec());
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
EXPECT_TRUE(WaitAndSendFrame(30));
EXPECT_FRAME_WAIT(1, codec.width, codec.height, kTimeout);
// Verify frame was sent.
EXPECT_TRUE_WAIT(NumRtpPackets() > packets, kTimeout);
packets = NumRtpPackets();
EXPECT_EQ(0, vp.dropped_frame_count());
// Send another frame and expect it to be sent.
EXPECT_TRUE(WaitAndSendFrame(30));
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
EXPECT_TRUE_WAIT(NumRtpPackets() > packets, kTimeout);
packets = NumRtpPackets();
EXPECT_EQ(0, vp.dropped_frame_count());
// Attempt to send a frame and expect it to be dropped.
vp.set_drop_frames(true);
EXPECT_TRUE(WaitAndSendFrame(30));
DrainOutgoingPackets();
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
EXPECT_EQ(packets, NumRtpPackets());
EXPECT_EQ(1, vp.dropped_frame_count());
// Disconnect video processor.
EXPECT_TRUE(engine_.UnregisterProcessor(&vp));
}
TEST_F(WebRtcVideoMediaChannelTest, SetRecvCodecs) {
std::vector<cricket::VideoCodec> codecs;
codecs.push_back(kVP8Codec);
@ -1433,7 +1395,7 @@ TEST_F(WebRtcVideoMediaChannelTest, SendManyResizeOnce) {
}
TEST_F(WebRtcVideoMediaChannelTest, SendVp8HdAndReceiveAdaptedVp8Vga) {
EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
channel_->UpdateAspectRatio(1280, 720);
video_capturer_.reset(new cricket::FakeVideoCapturer);
const std::vector<cricket::VideoFormat>* formats =
@ -1502,33 +1464,6 @@ TEST_F(WebRtcVideoMediaChannelTest, AddRemoveSendStreams) {
Base::AddRemoveSendStreams();
}
TEST_F(WebRtcVideoMediaChannelTest, SetVideoCapturer) {
// Use 123 to verify there's no assumption to the module id
FakeWebRtcVideoCaptureModule* vcm =
new FakeWebRtcVideoCaptureModule(NULL, 123);
talk_base::scoped_ptr<cricket::WebRtcVideoCapturer> capturer(
new cricket::WebRtcVideoCapturer);
EXPECT_TRUE(capturer->Init(vcm));
EXPECT_TRUE(engine_.SetVideoCapturer(capturer.get()));
EXPECT_FALSE(engine_.IsCapturing());
EXPECT_TRUE(engine_.SetCapture(true));
cricket::VideoCodec codec(DefaultCodec());
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(channel_->SetSend(true));
EXPECT_TRUE(engine_.IsCapturing());
EXPECT_EQ(engine_.default_codec_format().width, vcm->cap().width);
EXPECT_EQ(engine_.default_codec_format().height, vcm->cap().height);
EXPECT_EQ(cricket::VideoFormat::IntervalToFps(
engine_.default_codec_format().interval),
vcm->cap().maxFPS);
EXPECT_EQ(webrtc::kVideoI420, vcm->cap().rawType);
EXPECT_EQ(webrtc::kVideoCodecUnknown, vcm->cap().codecType);
EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
EXPECT_FALSE(engine_.IsCapturing());
}
TEST_F(WebRtcVideoMediaChannelTest, SimulateConference) {
Base::SimulateConference();
}

View File

@ -62,8 +62,8 @@ void FrameBuffer::SetData(char* data, size_t length) {
data_.reset(data);
length_ = length;
uint8_t* new_memory = reinterpret_cast<uint8_t*>(data);
uint32_t new_length = length;
uint32_t new_size = length;
uint32_t new_length = static_cast<int>(length);
uint32_t new_size = static_cast<int>(length);
video_frame_.Swap(new_memory, new_length, new_size);
}
@ -150,7 +150,7 @@ const uint8* WebRtcVideoFrame::GetUPlane() const {
const uint8* WebRtcVideoFrame::GetVPlane() const {
uint8_t* buffer = frame()->Buffer();
if (buffer) {
int uv_size = GetChromaSize();
int uv_size = static_cast<int>(GetChromaSize());
buffer += frame()->Width() * frame()->Height() + uv_size;
}
return buffer;
@ -172,7 +172,7 @@ uint8* WebRtcVideoFrame::GetUPlane() {
uint8* WebRtcVideoFrame::GetVPlane() {
uint8_t* buffer = frame()->Buffer();
if (buffer) {
int uv_size = GetChromaSize();
int uv_size = static_cast<int>(GetChromaSize());
buffer += frame()->Width() * frame()->Height() + uv_size;
}
return buffer;
@ -192,7 +192,7 @@ VideoFrame* WebRtcVideoFrame::Copy() const {
}
bool WebRtcVideoFrame::MakeExclusive() {
const int length = video_buffer_->length();
const int length = static_cast<int>(video_buffer_->length());
RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length);
memcpy(exclusive_buffer->data(), video_buffer_->data(), length);
Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(),
@ -228,7 +228,10 @@ size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
if (libyuv::ConvertFromI420(GetYPlane(), GetYPitch(), GetUPlane(),
GetUPitch(), GetVPlane(), GetVPitch(), buffer,
stride_rgb, width, height, to_fourcc)) {
stride_rgb,
static_cast<int>(width),
static_cast<int>(height),
to_fourcc)) {
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
return 0; // 0 indicates error
}

View File

@ -2344,9 +2344,10 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
}
// Pass it off to the decoder.
engine()->voe()->network()->ReceivedRTPPacket(which_channel,
packet->data(),
packet->length());
engine()->voe()->network()->ReceivedRTPPacket(
which_channel,
packet->data(),
static_cast<unsigned int>(packet->length()));
}
void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
@ -2357,9 +2358,10 @@ void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
which_channel = voe_channel();
}
engine()->voe()->network()->ReceivedRTCPPacket(which_channel,
packet->data(),
packet->length());
engine()->voe()->network()->ReceivedRTCPPacket(
which_channel,
packet->data(),
static_cast<unsigned int>(packet->length()));
}
bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
@ -2543,7 +2545,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
rinfo.jitter_buffer_ms = ns.currentBufferSize;
rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
rinfo.expand_rate =
static_cast<float> (ns.currentExpandRate) / (1 << 14);
static_cast<float>(ns.currentExpandRate) / (1 << 14);
}
if (engine()->voe()->sync()) {
int playout_buffer_delay_ms = 0;
@ -2757,7 +2759,7 @@ VoiceMediaChannel::Error
int WebRtcSoundclipStream::Read(void *buf, int len) {
size_t res = 0;
mem_.Read(buf, len, &res, NULL);
return res;
return static_cast<int>(res);
}
int WebRtcSoundclipStream::Rewind() {

View File

@ -206,7 +206,8 @@ class DtlsTestClient : public sigslot::has_slots<> {
// against, and make sure that it doesn't look like DTLS.
memset(packet.get(), sent & 0xff, size);
packet[0] = (srtp) ? 0x80 : 0x00;
talk_base::SetBE32(packet.get() + kPacketNumOffset, sent);
talk_base::SetBE32(packet.get() + kPacketNumOffset,
static_cast<uint32>(sent));
// Only set the bypass flag if we've activated DTLS.
int flags = (identity_.get() && srtp) ? cricket::PF_SRTP_BYPASS : 0;
@ -342,7 +343,7 @@ class DtlsTransportChannelTest : public testing::Test {
}
void SetChannelCount(size_t channel_ct) {
channel_ct_ = channel_ct;
channel_ct_ = static_cast<int>(channel_ct);
}
void PrepareDtls(bool c1, bool c2) {
if (c1) {

View File

@ -175,7 +175,7 @@ class FakeTransportChannel : public TransportChannelImpl,
} else {
talk_base::Thread::Current()->Send(this, 0, packet);
}
return len;
return static_cast<int>(len);
}
virtual int SetOption(talk_base::Socket::Option opt, int value) {
return true;

View File

@ -473,7 +473,7 @@ bool Port::ParseStunUsername(const StunMessage* stun_msg,
return false;
}
} else if (IsGoogleIce()) {
int remote_frag_len = username_attr_str.size();
int remote_frag_len = static_cast<int>(username_attr_str.size());
remote_frag_len -= static_cast<int>(username_fragment().size());
if (remote_frag_len < 0)
return false;
@ -752,8 +752,10 @@ class ConnectionRequest : public StunRequest {
// connection_ already holds this ping, so subtract one from count.
if (connection_->port()->send_retransmit_count_attribute()) {
request->AddAttribute(new StunUInt32Attribute(STUN_ATTR_RETRANSMIT_COUNT,
connection_->pings_since_last_response_.size() - 1));
request->AddAttribute(new StunUInt32Attribute(
STUN_ATTR_RETRANSMIT_COUNT,
static_cast<uint32>(
connection_->pings_since_last_response_.size() - 1)));
}
// Adding ICE-specific attributes to the STUN request message.

View File

@ -187,7 +187,7 @@ class TestPort : public Port {
last_stun_buf_.reset(buf);
last_stun_msg_.reset(msg);
}
return size;
return static_cast<int>(size);
}
virtual int SetOption(talk_base::Socket::Option opt, int value) {
return 0;
@ -789,10 +789,10 @@ class FakeAsyncPacketSocket : public AsyncPacketSocket {
// Send a packet.
virtual int Send(const void *pv, size_t cb) {
return cb;
return static_cast<int>(cb);
}
virtual int SendTo(const void *pv, size_t cb, const SocketAddress& addr) {
return cb;
return static_cast<int>(cb);
}
virtual int Close() {
return 0;
@ -2258,4 +2258,3 @@ TEST_F(PortTest, TestIceLiteConnectivity) {
EXPECT_TRUE(msg->GetByteString(STUN_ATTR_USE_CANDIDATE) != NULL);
ch1.Stop();
}

View File

@ -115,7 +115,7 @@ void PortAllocatorSessionMuxer::OnSessionProxyDestroyed(
}
void PortAllocatorSessionMuxer::OnMessage(talk_base::Message *pmsg) {
ProxyObjData* proxy = static_cast<ProxyObjData*> (pmsg->pdata);
ProxyObjData* proxy = static_cast<ProxyObjData*>(pmsg->pdata);
switch (pmsg->message_id) {
case MSG_SEND_ALLOCATION_DONE:
SendAllocationDone_w(proxy->data());

View File

@ -63,7 +63,7 @@ class TestSessionChannel : public sigslot::has_slots<> {
void OnCandidatesReady(PortAllocatorSession* session,
const std::vector<Candidate>& candidates) {
EXPECT_EQ(proxy_session_, session);
candidates_count_ += candidates.size();
candidates_count_ += static_cast<int>(candidates.size());
}
void OnCandidatesAllocationDone(PortAllocatorSession* session) {
EXPECT_EQ(proxy_session_, session);

View File

@ -428,7 +428,7 @@ uint32 PseudoTcp::GetBytesInFlight() const {
uint32 PseudoTcp::GetBytesBufferedNotSent() const {
size_t buffered_bytes = 0;
m_sbuf.GetBuffered(&buffered_bytes);
return m_snd_una + buffered_bytes - m_snd_nxt;
return static_cast<uint32>(m_snd_una + buffered_bytes - m_snd_nxt);
}
uint32 PseudoTcp::GetRoundTripTimeEstimateMs() const {
@ -461,15 +461,16 @@ int PseudoTcp::Recv(char* buffer, size_t len) {
if (uint32(available_space) - m_rcv_wnd >=
talk_base::_min<uint32>(m_rbuf_len / 2, m_mss)) {
bool bWasClosed = (m_rcv_wnd == 0); // !?! Not sure about this was closed business
m_rcv_wnd = available_space;
// TODO(jbeda): !?! Not sure about this was closed business
bool bWasClosed = (m_rcv_wnd == 0);
m_rcv_wnd = static_cast<uint32>(available_space);
if (bWasClosed) {
attemptSend(sfImmediateAck);
}
}
return read;
return static_cast<int>(read);
}
int PseudoTcp::Send(const char* buffer, size_t len) {
@ -516,18 +517,19 @@ uint32 PseudoTcp::queue(const char* data, uint32 len, bool bCtrl) {
// We can concatenate data if the last segment is the same type
// (control v. regular data), and has not been transmitted yet
if (!m_slist.empty() && (m_slist.back().bCtrl == bCtrl) && (m_slist.back().xmit == 0)) {
if (!m_slist.empty() && (m_slist.back().bCtrl == bCtrl) &&
(m_slist.back().xmit == 0)) {
m_slist.back().len += len;
} else {
size_t snd_buffered = 0;
m_sbuf.GetBuffered(&snd_buffered);
SSegment sseg(m_snd_una + snd_buffered, len, bCtrl);
SSegment sseg(static_cast<uint32>(m_snd_una + snd_buffered), len, bCtrl);
m_slist.push_back(sseg);
}
size_t written = 0;
m_sbuf.Write(data, len, &written, NULL);
return written;
return static_cast<uint32>(written);
}
IPseudoTcpNotify::WriteResult PseudoTcp::packet(uint32 seq, uint8 flags,
@ -1184,8 +1186,8 @@ PseudoTcp::queueConnectMessage() {
buf.WriteUInt8(1);
buf.WriteUInt8(m_rwnd_scale);
}
m_snd_wnd = buf.Length();
queue(buf.Data(), buf.Length(), true);
m_snd_wnd = static_cast<uint32>(buf.Length());
queue(buf.Data(), static_cast<uint32>(buf.Length()), true);
}
void
@ -1290,7 +1292,7 @@ PseudoTcp::resizeReceiveBuffer(uint32 new_size) {
size_t available_space = 0;
m_rbuf.GetWriteRemaining(&available_space);
m_rcv_wnd = available_space;
m_rcv_wnd = static_cast<uint32>(available_space);
}
} // namespace cricket

View File

@ -319,7 +319,7 @@ class PseudoTcpTest : public PseudoTcpTestBase {
LOG(LS_VERBOSE) << "Flow Controlled";
}
} else {
sent = tosend = 0;
sent = static_cast<int>(tosend = 0);
}
} while (sent > 0);
*done = (tosend == 0);
@ -439,7 +439,7 @@ class PseudoTcpTestPingPong : public PseudoTcpTestBase {
LOG(LS_VERBOSE) << "Flow Controlled";
}
} else {
sent = tosend = 0;
sent = static_cast<int>(tosend = 0);
}
} while (sent > 0);
}
@ -507,11 +507,11 @@ class PseudoTcpTestReceiveWindow : public PseudoTcpTestBase {
}
uint32 EstimateReceiveWindowSize() const {
return recv_position_[0];
return static_cast<uint32>(recv_position_[0]);
}
uint32 EstimateSendWindowSize() const {
return send_position_[0] - recv_position_[0];
return static_cast<uint32>(send_position_[0] - recv_position_[0]);
}
private:
@ -566,12 +566,13 @@ class PseudoTcpTestReceiveWindow : public PseudoTcpTestBase {
LOG(LS_VERBOSE) << "Flow Controlled";
}
} else {
sent = tosend = 0;
sent = static_cast<int>(tosend = 0);
}
} while (sent > 0);
// At this point, we've filled up the available space in the send queue.
int message_queue_size = talk_base::Thread::Current()->size();
int message_queue_size =
static_cast<int>(talk_base::Thread::Current()->size());
// The message queue will always have at least 2 messages, an RCLOCK and
// an LCLOCK, since they are added back on the delay queue at the same time
// they are pulled off and therefore are never really removed.

View File

@ -349,7 +349,7 @@ int RelayPort::SendTo(const void* data, size_t size,
}
// The caller of the function is expecting the number of user data bytes,
// rather than the size of the packet.
return size;
return static_cast<int>(size);
}
int RelayPort::SetOption(talk_base::Socket::Option opt, int value) {

View File

@ -162,7 +162,7 @@ void RelayServer::RemoveInternalServerSocket(
}
int RelayServer::GetConnectionCount() const {
return connections_.size();
return static_cast<int>(connections_.size());
}
talk_base::SocketAddressPair RelayServer::GetConnection(int connection) const {

View File

@ -95,12 +95,12 @@ class RelayServerTest : public testing::Test {
void Send1(const StunMessage* msg) {
talk_base::ByteBuffer buf;
msg->Write(&buf);
SendRaw1(buf.Data(), buf.Length());
SendRaw1(buf.Data(), static_cast<int>(buf.Length()));
}
void Send2(const StunMessage* msg) {
talk_base::ByteBuffer buf;
msg->Write(&buf);
SendRaw2(buf.Data(), buf.Length());
SendRaw2(buf.Data(), static_cast<int>(buf.Length()));
}
void SendRaw1(const char* data, int len) {
return Send(client1_.get(), data, len, server_int_addr);
@ -192,7 +192,7 @@ class RelayServerTest : public testing::Test {
TEST_F(RelayServerTest, TestBadRequest) {
talk_base::scoped_ptr<StunMessage> res;
SendRaw1(bad, std::strlen(bad));
SendRaw1(bad, static_cast<int>(std::strlen(bad)));
res.reset(Receive1());
ASSERT_TRUE(!res);
@ -335,7 +335,7 @@ TEST_F(RelayServerTest, TestRemoteBadRequest) {
Allocate();
Bind();
SendRaw1(bad, std::strlen(bad));
SendRaw1(bad, static_cast<int>(std::strlen(bad)));
EXPECT_TRUE(Receive1() == NULL);
EXPECT_TRUE(Receive2() == NULL);
}
@ -481,7 +481,7 @@ TEST_F(RelayServerTest, TestSendRaw) {
Send1(req.get());
EXPECT_EQ(msg1, ReceiveRaw2());
SendRaw2(msg2, std::strlen(msg2));
SendRaw2(msg2, static_cast<int>(std::strlen(msg2)));
res.reset(Receive1());
ASSERT_TRUE(res);
@ -534,6 +534,6 @@ TEST_F(RelayServerTest, TestExpiration) {
EXPECT_EQ("Operation Not Supported", err->reason());
// Also verify that traffic from the external client is ignored.
SendRaw2(msg2, std::strlen(msg2));
SendRaw2(msg2, static_cast<int>(std::strlen(msg2)));
EXPECT_TRUE(ReceiveRaw1().empty());
}

View File

@ -921,7 +921,7 @@ class TestClient : public sigslot::has_slots<> {
}
uint32 sent_stanza_count() const {
return sent_stanzas.size();
return static_cast<uint32>(sent_stanzas.size());
}
const buzz::XmlElement* stanza() const {

View File

@ -98,7 +98,7 @@ bool StunMessage::AddAttribute(StunAttribute* attr) {
if (attr_length % 4 != 0) {
attr_length += (4 - (attr_length % 4));
}
length_ += attr_length + 4;
length_ += static_cast<uint16>(attr_length + 4);
return true;
}
@ -203,7 +203,8 @@ bool StunMessage::ValidateMessageIntegrity(const char* data, size_t size,
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |0 0| STUN Message Type | Message Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
talk_base::SetBE16(temp_data.get() + 2, new_adjusted_len);
talk_base::SetBE16(temp_data.get() + 2,
static_cast<uint16>(new_adjusted_len));
}
char hmac[kStunMessageIntegritySize];
@ -238,8 +239,8 @@ bool StunMessage::AddMessageIntegrity(const char* key,
if (!Write(&buf))
return false;
int msg_len_for_hmac = buf.Length() -
kStunAttributeHeaderSize - msg_integrity_attr->length();
int msg_len_for_hmac = static_cast<int>(
buf.Length() - kStunAttributeHeaderSize - msg_integrity_attr->length());
char hmac[kStunMessageIntegritySize];
size_t ret = talk_base::ComputeHmac(talk_base::DIGEST_SHA_1,
key, keylen,
@ -299,8 +300,8 @@ bool StunMessage::AddFingerprint() {
if (!Write(&buf))
return false;
int msg_len_for_crc32 = buf.Length() -
kStunAttributeHeaderSize - fingerprint_attr->length();
int msg_len_for_crc32 = static_cast<int>(
buf.Length() - kStunAttributeHeaderSize - fingerprint_attr->length());
uint32 c = talk_base::ComputeCrc32(buf.Data(), msg_len_for_crc32);
// Insert the correct CRC-32, XORed with a constant, into the attribute.
@ -380,7 +381,7 @@ bool StunMessage::Write(ByteBuffer* buf) const {
for (size_t i = 0; i < attrs_->size(); ++i) {
buf->WriteUInt16((*attrs_)[i]->type());
buf->WriteUInt16((*attrs_)[i]->length());
buf->WriteUInt16(static_cast<uint16>((*attrs_)[i]->length()));
if (!(*attrs_)[i]->Write(buf))
return false;
}
@ -408,7 +409,8 @@ StunAttributeValueType StunMessage::GetAttributeValueType(int type) const {
StunAttribute* StunMessage::CreateAttribute(int type, size_t length) /*const*/ {
StunAttributeValueType value_type = GetAttributeValueType(type);
return StunAttribute::Create(value_type, type, length, this);
return StunAttribute::Create(value_type, type,
static_cast<uint16>(length), this);
}
const StunAttribute* StunMessage::GetAttribute(int type) const {
@ -767,7 +769,7 @@ bool StunByteStringAttribute::Write(ByteBuffer* buf) const {
void StunByteStringAttribute::SetBytes(char* bytes, size_t length) {
delete [] bytes_;
bytes_ = bytes;
SetLength(length);
SetLength(static_cast<uint16>(length));
}
StunErrorCodeAttribute::StunErrorCodeAttribute(uint16 type, int code,

View File

@ -924,7 +924,7 @@ TEST_F(StunTest, WriteMessageWithIPv6AddressAttribute) {
talk_base::ByteBuffer out;
EXPECT_TRUE(msg.Write(&out));
ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv6MappedAddress));
int len1 = out.Length();
int len1 = static_cast<int>(out.Length());
std::string bytes;
out.ReadString(&bytes, len1);
ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@ -955,7 +955,7 @@ TEST_F(StunTest, WriteMessageWithIPv4AddressAttribute) {
talk_base::ByteBuffer out;
EXPECT_TRUE(msg.Write(&out));
ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv4MappedAddress));
int len1 = out.Length();
int len1 = static_cast<int>(out.Length());
std::string bytes;
out.ReadString(&bytes, len1);
ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@ -986,7 +986,7 @@ TEST_F(StunTest, WriteMessageWithIPv6XorAddressAttribute) {
talk_base::ByteBuffer out;
EXPECT_TRUE(msg.Write(&out));
ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv6XorMappedAddress));
int len1 = out.Length();
int len1 = static_cast<int>(out.Length());
std::string bytes;
out.ReadString(&bytes, len1);
ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@ -1017,7 +1017,7 @@ TEST_F(StunTest, WriteMessageWithIPv4XoreAddressAttribute) {
talk_base::ByteBuffer out;
EXPECT_TRUE(msg.Write(&out));
ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv4XorMappedAddress));
int len1 = out.Length();
int len1 = static_cast<int>(out.Length());
std::string bytes;
out.ReadString(&bytes, len1);
ASSERT_EQ(0, std::memcmp(bytes.c_str(),

View File

@ -58,7 +58,7 @@ class StunServerTest : public testing::Test {
void Send(const StunMessage& msg) {
talk_base::ByteBuffer buf;
msg.Write(&buf);
Send(buf.Data(), buf.Length());
Send(buf.Data(), static_cast<int>(buf.Length()));
}
void Send(const char* buf, int len) {
client_->SendTo(buf, len, server_addr);
@ -113,7 +113,7 @@ TEST_F(StunServerTest, TestBad) {
const char* bad = "this is a completely nonsensical message whose only "
"purpose is to make the parser go 'ack'. it doesn't "
"look anything like a normal stun message";
Send(bad, std::strlen(bad));
Send(bad, static_cast<int>(std::strlen(bad)));
StunMessage* msg = Receive();
ASSERT_TRUE(msg == NULL);

View File

@ -332,7 +332,7 @@ int TurnPort::SendTo(const void* data, size_t size,
// The caller of the function is expecting the number of user data bytes,
// rather than the size of the packet.
return size;
return static_cast<int>(size);
}
void TurnPort::OnReadPacket(talk_base::AsyncPacketSocket* socket,
@ -901,7 +901,7 @@ int TurnEntry::Send(const void* data, size_t size, bool payload) {
} else {
// If the channel is bound, we can send the data as a Channel Message.
buf.WriteUInt16(channel_id_);
buf.WriteUInt16(size);
buf.WriteUInt16(static_cast<uint16>(size));
buf.WriteBytes(reinterpret_cast<const char*>(data), size);
}
return port_->Send(buf.Data(), buf.Length());

View File

@ -847,7 +847,7 @@ void TurnServer::Allocation::OnExternalPacket(
// There is a channel bound to this address. Send as a channel message.
talk_base::ByteBuffer buf;
buf.WriteUInt16(channel->id());
buf.WriteUInt16(size);
buf.WriteUInt16(static_cast<uint16>(size));
buf.WriteBytes(data, size);
server_->Send(&conn_, buf);
} else if (HasPermission(addr.ipaddr())) {

View File

@ -615,7 +615,7 @@ void BaseChannel::SetReadyToSend(TransportChannel* channel, bool ready) {
bool BaseChannel::PacketIsRtcp(const TransportChannel* channel,
const char* data, size_t len) {
return (channel == rtcp_transport_channel_ ||
rtcp_mux_filter_.DemuxRtcp(data, len));
rtcp_mux_filter_.DemuxRtcp(data, static_cast<int>(len)));
}
bool BaseChannel::SendPacket(bool rtcp, talk_base::Buffer* packet) {
@ -669,9 +669,10 @@ bool BaseChannel::SendPacket(bool rtcp, talk_base::Buffer* packet) {
if (srtp_filter_.IsActive()) {
bool res;
char* data = packet->data();
int len = packet->length();
int len = static_cast<int>(packet->length());
if (!rtcp) {
res = srtp_filter_.ProtectRtp(data, len, packet->capacity(), &len);
res = srtp_filter_.ProtectRtp(data, len,
static_cast<int>(packet->capacity()), &len);
if (!res) {
int seq_num = -1;
uint32 ssrc = 0;
@ -683,7 +684,9 @@ bool BaseChannel::SendPacket(bool rtcp, talk_base::Buffer* packet) {
return false;
}
} else {
res = srtp_filter_.ProtectRtcp(data, len, packet->capacity(), &len);
res = srtp_filter_.ProtectRtcp(data, len,
static_cast<int>(packet->capacity()),
&len);
if (!res) {
int type = -1;
GetRtcpType(data, len, &type);
@ -761,7 +764,7 @@ void BaseChannel::HandlePacket(bool rtcp, talk_base::Buffer* packet) {
// Unprotect the packet, if needed.
if (srtp_filter_.IsActive()) {
char* data = packet->data();
int len = packet->length();
int len = static_cast<int>(packet->length());
bool res;
if (!rtcp) {
res = srtp_filter_.UnprotectRtp(data, len, &len);
@ -1009,15 +1012,21 @@ bool BaseChannel::SetupDtlsSrtp(bool rtcp_channel) {
}
if (rtcp_channel) {
ret = srtp_filter_.SetRtcpParams(selected_cipher,
&(*send_key)[0], send_key->size(),
selected_cipher,
&(*recv_key)[0], recv_key->size());
ret = srtp_filter_.SetRtcpParams(
selected_cipher,
&(*send_key)[0],
static_cast<int>(send_key->size()),
selected_cipher,
&(*recv_key)[0],
static_cast<int>(recv_key->size()));
} else {
ret = srtp_filter_.SetRtpParams(selected_cipher,
&(*send_key)[0], send_key->size(),
selected_cipher,
&(*recv_key)[0], recv_key->size());
ret = srtp_filter_.SetRtpParams(
selected_cipher,
&(*send_key)[0],
static_cast<int>(send_key->size()),
selected_cipher,
&(*recv_key)[0],
static_cast<int>(recv_key->size()));
}
if (!ret)

View File

@ -377,64 +377,78 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
}
bool SendRtp1() {
return media_channel1_->SendRtp(rtp_packet_.c_str(), rtp_packet_.size());
return media_channel1_->SendRtp(rtp_packet_.c_str(),
static_cast<int>(rtp_packet_.size()));
}
bool SendRtp2() {
return media_channel2_->SendRtp(rtp_packet_.c_str(), rtp_packet_.size());
return media_channel2_->SendRtp(rtp_packet_.c_str(),
static_cast<int>(rtp_packet_.size()));
}
bool SendRtcp1() {
return media_channel1_->SendRtcp(rtcp_packet_.c_str(), rtcp_packet_.size());
return media_channel1_->SendRtcp(rtcp_packet_.c_str(),
static_cast<int>(rtcp_packet_.size()));
}
bool SendRtcp2() {
return media_channel2_->SendRtcp(rtcp_packet_.c_str(), rtcp_packet_.size());
return media_channel2_->SendRtcp(rtcp_packet_.c_str(),
static_cast<int>(rtcp_packet_.size()));
}
// Methods to send custom data.
bool SendCustomRtp1(uint32 ssrc, int sequence_number) {
std::string data(CreateRtpData(ssrc, sequence_number));
return media_channel1_->SendRtp(data.c_str(), data.size());
return media_channel1_->SendRtp(data.c_str(),
static_cast<int>(data.size()));
}
bool SendCustomRtp2(uint32 ssrc, int sequence_number) {
std::string data(CreateRtpData(ssrc, sequence_number));
return media_channel2_->SendRtp(data.c_str(), data.size());
return media_channel2_->SendRtp(data.c_str(),
static_cast<int>(data.size()));
}
bool SendCustomRtcp1(uint32 ssrc) {
std::string data(CreateRtcpData(ssrc));
return media_channel1_->SendRtcp(data.c_str(), data.size());
return media_channel1_->SendRtcp(data.c_str(),
static_cast<int>(data.size()));
}
bool SendCustomRtcp2(uint32 ssrc) {
std::string data(CreateRtcpData(ssrc));
return media_channel2_->SendRtcp(data.c_str(), data.size());
return media_channel2_->SendRtcp(data.c_str(),
static_cast<int>(data.size()));
}
bool CheckRtp1() {
return media_channel1_->CheckRtp(rtp_packet_.c_str(), rtp_packet_.size());
return media_channel1_->CheckRtp(rtp_packet_.c_str(),
static_cast<int>(rtp_packet_.size()));
}
bool CheckRtp2() {
return media_channel2_->CheckRtp(rtp_packet_.c_str(), rtp_packet_.size());
return media_channel2_->CheckRtp(rtp_packet_.c_str(),
static_cast<int>(rtp_packet_.size()));
}
bool CheckRtcp1() {
return media_channel1_->CheckRtcp(rtcp_packet_.c_str(),
rtcp_packet_.size());
static_cast<int>(rtcp_packet_.size()));
}
bool CheckRtcp2() {
return media_channel2_->CheckRtcp(rtcp_packet_.c_str(),
rtcp_packet_.size());
static_cast<int>(rtcp_packet_.size()));
}
// Methods to check custom data.
bool CheckCustomRtp1(uint32 ssrc, int sequence_number) {
std::string data(CreateRtpData(ssrc, sequence_number));
return media_channel1_->CheckRtp(data.c_str(), data.size());
return media_channel1_->CheckRtp(data.c_str(),
static_cast<int>(data.size()));
}
bool CheckCustomRtp2(uint32 ssrc, int sequence_number) {
std::string data(CreateRtpData(ssrc, sequence_number));
return media_channel2_->CheckRtp(data.c_str(), data.size());
return media_channel2_->CheckRtp(data.c_str(),
static_cast<int>(data.size()));
}
bool CheckCustomRtcp1(uint32 ssrc) {
std::string data(CreateRtcpData(ssrc));
return media_channel1_->CheckRtcp(data.c_str(), data.size());
return media_channel1_->CheckRtcp(data.c_str(),
static_cast<int>(data.size()));
}
bool CheckCustomRtcp2(uint32 ssrc) {
std::string data(CreateRtcpData(ssrc));
return media_channel2_->CheckRtcp(data.c_str(), data.size());
return media_channel2_->CheckRtcp(data.c_str(),
static_cast<int>(data.size()));
}
std::string CreateRtpData(uint32 ssrc, int sequence_number) {
std::string data(rtp_packet_);
@ -1744,7 +1758,7 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
void TestSrtpError() {
static const unsigned char kBadPacket[] = {
0x90, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
0x84, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
};
CreateChannels(RTCP | SECURE, RTCP | SECURE);
EXPECT_FALSE(channel1_->secure());

View File

@ -713,26 +713,6 @@ bool ChannelManager::SetLocalRenderer(VideoRenderer* renderer) {
return ret;
}
bool ChannelManager::SetVideoCapturer(VideoCapturer* capturer) {
bool ret = true;
if (initialized_) {
ret = worker_thread_->Invoke<bool>(
Bind(&MediaEngineInterface::SetVideoCapturer,
media_engine_.get(), capturer));
}
return ret;
}
bool ChannelManager::SetVideoCapture(bool capture) {
bool ret = initialized_ && worker_thread_->Invoke<bool>(
Bind(&MediaEngineInterface::SetVideoCapture,
media_engine_.get(), capture));
if (ret) {
capturing_ = capture;
}
return ret;
}
void ChannelManager::SetVoiceLogging(int level, const char* filter) {
if (initialized_) {
worker_thread_->Invoke<void>(

View File

@ -166,8 +166,6 @@ class ChannelManager : public talk_base::MessageHandler,
// Sets the externally provided video capturer. The ssrc is the ssrc of the
// (video) stream for which the video capturer should be set.
bool SetVideoCapturer(VideoCapturer* capturer);
// Starts and stops the local camera and renders it to the local renderer.
bool SetVideoCapture(bool capture);
bool capturing() const { return capturing_; }
// Configures the logging output of the mediaengine(s).

View File

@ -524,20 +524,6 @@ TEST_F(ChannelManagerTest, SetLogging) {
EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
}
// Test that SetVideoCapture passes through the right value.
TEST_F(ChannelManagerTest, SetVideoCapture) {
// Should fail until we are initialized.
EXPECT_FALSE(fme_->capture());
EXPECT_FALSE(cm_->SetVideoCapture(true));
EXPECT_FALSE(fme_->capture());
EXPECT_TRUE(cm_->Init());
EXPECT_FALSE(fme_->capture());
EXPECT_TRUE(cm_->SetVideoCapture(true));
EXPECT_TRUE(fme_->capture());
EXPECT_TRUE(cm_->SetVideoCapture(false));
EXPECT_FALSE(fme_->capture());
}
// Test that the Video/Voice Processors register and unregister
TEST_F(ChannelManagerTest, RegisterProcessors) {
cricket::FakeMediaProcessor fmp;

View File

@ -92,7 +92,7 @@ static bool CreateCryptoParams(int tag, const std::string& cipher,
#ifdef HAVE_SRTP
static bool AddCryptoParams(const std::string& cipher_suite,
CryptoParamsVec *out) {
int size = out->size();
int size = static_cast<int>(out->size());
out->resize(size + 1);
return CreateCryptoParams(size, cipher_suite, &out->at(size));

View File

@ -329,11 +329,11 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
}
ASSERT_TRUE(desc.get() != NULL);
const cricket::MediaContentDescription* audio_media_desc =
static_cast<const cricket::MediaContentDescription*> (
static_cast<const cricket::MediaContentDescription*>(
desc.get()->GetContentDescriptionByName("audio"));
ASSERT_TRUE(audio_media_desc != NULL);
const cricket::MediaContentDescription* video_media_desc =
static_cast<const cricket::MediaContentDescription*> (
static_cast<const cricket::MediaContentDescription*>(
desc.get()->GetContentDescriptionByName("video"));
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_TRUE(CompareCryptoParams(audio_media_desc->cryptos(),
@ -345,7 +345,7 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
// Verify the selected crypto is one from the reference audio
// media content.
const cricket::MediaContentDescription* ref_audio_media_desc =
static_cast<const cricket::MediaContentDescription*> (
static_cast<const cricket::MediaContentDescription*>(
ref_desc.get()->GetContentDescriptionByName("audio"));
bool found = false;
for (size_t i = 0; i < ref_audio_media_desc->cryptos().size(); ++i) {
@ -394,7 +394,7 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
const cricket::ContentDescription* description = content->description;
ASSERT(description != NULL);
const cricket::AudioContentDescription* audio_content_desc =
static_cast<const cricket::AudioContentDescription*> (description);
static_cast<const cricket::AudioContentDescription*>(description);
ASSERT(audio_content_desc != NULL);
for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
if (audio_content_desc->codecs()[i].name == "CN")
@ -1751,7 +1751,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("audio"));
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*> (
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("video"));
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_EQ(2u, audio_media_desc->cryptos().size());
@ -1768,10 +1768,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
ASSERT_TRUE(answer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*> (
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("audio"));
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*> (
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("video"));
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_EQ(1u, audio_media_desc->cryptos().size());
@ -1789,10 +1789,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
ASSERT_TRUE(answer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*> (
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("audio"));
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*> (
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("video"));
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_TRUE(audio_media_desc->cryptos().empty());

View File

@ -322,7 +322,7 @@ void Jid::PrepDomainLabel(
std::string* buf, bool* valid) {
*valid = false;
int start_len = buf->length();
int start_len = static_cast<int>(buf->length());
for (std::string::const_iterator i = start; i < end; ++i) {
bool char_valid = true;
unsigned char ch = *i;
@ -338,7 +338,7 @@ void Jid::PrepDomainLabel(
}
}
int count = buf->length() - start_len;
int count = static_cast<int>(buf->length() - start_len);
if (count == 0) {
return;
}

View File

@ -379,7 +379,7 @@ void XmppClient::Private::OnSocketRead() {
return;
//#ifdef _DEBUG
client_->SignalLogInput(bytes, bytes_read);
client_->SignalLogInput(bytes, static_cast<int>(bytes_read));
//#endif
engine_->HandleInput(bytes, bytes_read);
@ -403,7 +403,7 @@ void XmppClient::Private::OnStateChange(int state) {
void XmppClient::Private::WriteOutput(const char* bytes, size_t len) {
//#ifdef _DEBUG
client_->SignalLogOutput(bytes, len);
client_->SignalLogOutput(bytes, static_cast<int>(len));
//#endif
socket_->Write(bytes, len);