* Update to use libjingle r85.

* Remove (most of) local libjingle mods. Only webrtcvideoengine and webrtcvoiceengine are left now, because the refcounted module has not yet been released to libjingle, so I can't submit the changes to libjingle at the moment.
* Update the peerconnection client sample app.
Review URL: http://webrtc-codereview.appspot.com/151004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@625 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
wu@webrtc.org 2011-09-19 21:59:33 +00:00
parent 86b85db67e
commit cb99f78653
37 changed files with 644 additions and 7057 deletions

2
DEPS
View File

@ -4,7 +4,7 @@ vars = {
"googlecode_url": "http://%s.googlecode.com/svn",
"chromium_trunk" : "http://src.chromium.org/svn/trunk",
"chromium_revision": "98568",
"libjingle_revision": "77",
"libjingle_revision": "85",
}
# NOTE: Prefer revision numbers to tags.

View File

@ -69,8 +69,7 @@ bool Conductor::InitializePeerConnection() {
talk_base::SocketAddress());
peer_connection_factory_.reset(
new webrtc::PeerConnectionFactory(GetPeerConnectionString(),
port_allocator,
new webrtc::PeerConnectionFactory(port_allocator,
worker_thread_.get()));
if (!peer_connection_factory_->Initialize()) {
main_wnd_->MessageBox("Error",

View File

@ -19,6 +19,8 @@
#include "talk/base/win32.h"
#include "talk/session/phone/mediachannel.h"
#include "talk/session/phone/videocommon.h"
#include "talk/session/phone/videoframe.h"
#include "talk/session/phone/videorenderer.h"
class MainWndCallback {
public:

View File

@ -24,8 +24,11 @@
'_USE_32BIT_TIME_T',
'SAFE_TO_DEFINE_TALK_BASE_LOGGING_MACROS',
'EXPAT_RELATIVE_PATH',
'JSONCPP_RELATIVE_PATH',
'WEBRTC_RELATIVE_PATH',
'HAVE_WEBRTC',
'HAVE_WEBRTC_VIDEO',
'HAVE_WEBRTC_VOICE',
],
'configurations': {
'Debug': {
@ -109,7 +112,6 @@
['inside_chromium_build==1', {
'include_dirs': [
'<(overrides)',
'<(libjingle_mods)/source',
'<(libjingle_orig)/source',
'../..', # the third_party folder for webrtc includes
'../../third_party/expat/files',
@ -117,7 +119,6 @@
'direct_dependent_settings': {
'include_dirs': [
'<(overrides)',
'<(libjingle_mods)/source',
'<(libjingle_orig)/source',
'../../third_party/expat/files'
],
@ -130,7 +131,6 @@
'include_dirs': [
# the third_party folder for webrtc/ includes (non-chromium).
'../../src',
'<(libjingle_mods)/source',
'<(libjingle_orig)/source',
'../../third_party/expat/files',
],
@ -270,8 +270,8 @@
'<(libjingle_orig)/source/talk/base/httpcommon.h',
'<(libjingle_orig)/source/talk/base/httprequest.cc',
'<(libjingle_orig)/source/talk/base/httprequest.h',
'<(libjingle_mods)/source/talk/base/json.cc',
'<(libjingle_mods)/source/talk/base/json.h',
'<(libjingle_orig)/source/talk/base/json.cc',
'<(libjingle_orig)/source/talk/base/json.h',
'<(libjingle_orig)/source/talk/base/linked_ptr.h',
'<(libjingle_orig)/source/talk/base/logging.cc',
'<(libjingle_orig)/source/talk/base/md5.h',
@ -336,6 +336,21 @@
'<(libjingle_orig)/source/talk/base/time.h',
'<(libjingle_orig)/source/talk/base/urlencode.cc',
'<(libjingle_orig)/source/talk/base/urlencode.h',
'<(libjingle_orig)/source/talk/base/worker.cc',
'<(libjingle_orig)/source/talk/base/worker.h',
'<(libjingle_orig)/source/talk/sound/automaticallychosensoundsystem.h',
'<(libjingle_orig)/source/talk/sound/platformsoundsystem.cc',
'<(libjingle_orig)/source/talk/sound/platformsoundsystem.h',
'<(libjingle_orig)/source/talk/sound/platformsoundsystemfactory.cc',
'<(libjingle_orig)/source/talk/sound/platformsoundsystemfactory.h',
'<(libjingle_orig)/source/talk/sound/sounddevicelocator.h',
'<(libjingle_orig)/source/talk/sound/soundinputstreaminterface.h',
'<(libjingle_orig)/source/talk/sound/soundoutputstreaminterface.h',
'<(libjingle_orig)/source/talk/sound/soundsystemfactory.h',
'<(libjingle_orig)/source/talk/sound/soundsysteminterface.cc',
'<(libjingle_orig)/source/talk/sound/soundsysteminterface.h',
'<(libjingle_orig)/source/talk/sound/soundsystemproxy.cc',
'<(libjingle_orig)/source/talk/sound/soundsystemproxy.h',
'<(libjingle_orig)/source/talk/xmllite/xmlbuilder.cc',
'<(libjingle_orig)/source/talk/xmllite/xmlbuilder.h',
'<(libjingle_orig)/source/talk/xmllite/xmlconstants.cc',
@ -409,6 +424,16 @@
'sources': [
'<(libjingle_orig)/source/talk/base/linux.cc',
'<(libjingle_orig)/source/talk/base/linux.h',
'<(libjingle_orig)/source/talk/sound/alsasoundsystem.cc',
'<(libjingle_orig)/source/talk/sound/alsasoundsystem.h',
'<(libjingle_orig)/source/talk/sound/alsasymboltable.cc',
'<(libjingle_orig)/source/talk/sound/alsasymboltable.h',
'<(libjingle_orig)/source/talk/sound/linuxsoundsystem.cc',
'<(libjingle_orig)/source/talk/sound/linuxsoundsystem.h',
'<(libjingle_orig)/source/talk/sound/pulseaudiosoundsystem.cc',
'<(libjingle_orig)/source/talk/sound/pulseaudiosoundsystem.h',
'<(libjingle_orig)/source/talk/sound/pulseaudiosymboltable.cc',
'<(libjingle_orig)/source/talk/sound/pulseaudiosymboltable.h',
],
}],
['OS=="mac"', {
@ -494,7 +519,7 @@
'<(libjingle_orig)/source/talk/session/phone/call.h',
'<(libjingle_orig)/source/talk/session/phone/channel.cc',
'<(libjingle_orig)/source/talk/session/phone/channel.h',
'<(libjingle_mods)/source/talk/session/phone/channelmanager.cc',
'<(libjingle_orig)/source/talk/session/phone/channelmanager.cc',
'<(libjingle_orig)/source/talk/session/phone/channelmanager.h',
'<(libjingle_orig)/source/talk/session/phone/codec.cc',
'<(libjingle_orig)/source/talk/session/phone/codec.h',
@ -502,7 +527,7 @@
'<(libjingle_orig)/source/talk/session/phone/currentspeakermonitor.cc',
'<(libjingle_orig)/source/talk/session/phone/currentspeakermonitor.h',
'<(libjingle_mods)/source/talk/session/phone/devicemanager.cc',
'<(libjingle_mods)/source/talk/session/phone/devicemanager.h',
'<(libjingle_orig)/source/talk/session/phone/devicemanager.h',
'<(libjingle_orig)/source/talk/session/phone/filemediaengine.cc',
'<(libjingle_orig)/source/talk/session/phone/filemediaengine.h',
'<(libjingle_orig)/source/talk/session/phone/mediachannel.h',
@ -512,6 +537,7 @@
'<(libjingle_orig)/source/talk/session/phone/mediamessages.h',
'<(libjingle_orig)/source/talk/session/phone/mediamonitor.cc',
'<(libjingle_orig)/source/talk/session/phone/mediamonitor.h',
'<(libjingle_orig)/source/talk/session/phone/mediasession.cc',
'<(libjingle_orig)/source/talk/session/phone/mediasessionclient.cc',
'<(libjingle_orig)/source/talk/session/phone/mediasessionclient.h',
'<(libjingle_orig)/source/talk/session/phone/mediasink.h',
@ -523,19 +549,20 @@
'<(libjingle_orig)/source/talk/session/phone/rtputils.h',
'<(libjingle_orig)/source/talk/session/phone/soundclip.cc',
'<(libjingle_orig)/source/talk/session/phone/soundclip.h',
'<(libjingle_mods)/source/talk/session/phone/srtpfilter.cc',
'<(libjingle_orig)/source/talk/session/phone/srtpfilter.cc',
'<(libjingle_orig)/source/talk/session/phone/srtpfilter.h',
'<(libjingle_orig)/source/talk/session/phone/videocommon.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcpassthroughrender.cc',
'<(libjingle_orig)/source/talk/session/phone/voicechannel.h',
'<(libjingle_mods)/source/talk/session/phone/webrtccommon.h',
'<(libjingle_orig)/source/talk/session/phone/webrtccommon.h',
'<(libjingle_mods)/source/talk/session/phone/webrtcvideoengine.cc',
'<(libjingle_mods)/source/talk/session/phone/webrtcvideoengine.h',
'<(libjingle_mods)/source/talk/session/phone/webrtcvideoframe.cc',
'<(libjingle_mods)/source/talk/session/phone/webrtcvideoframe.h',
'<(libjingle_mods)/source/talk/session/phone/webrtcvie.h',
'<(libjingle_mods)/source/talk/session/phone/webrtcvoe.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcvideoengine.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcvideoframe.cc',
'<(libjingle_orig)/source/talk/session/phone/webrtcvideoframe.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcvie.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcvoe.h',
'<(libjingle_mods)/source/talk/session/phone/webrtcvoiceengine.cc',
'<(libjingle_mods)/source/talk/session/phone/webrtcvoiceengine.h',
'<(libjingle_orig)/source/talk/session/phone/webrtcvoiceengine.h',
'<(libjingle_orig)/source/talk/session/tunnel/pseudotcpchannel.cc',
'<(libjingle_orig)/source/talk/session/tunnel/pseudotcpchannel.h',
'<(libjingle_orig)/source/talk/session/tunnel/tunnelsessionclient.cc',
@ -592,21 +619,17 @@
'target_name': 'libjingle_app',
'type': '<(library)',
'sources': [
'<(libjingle_mods)/source/talk/app/webrtc/peerconnection.h',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnectionfactory.h',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnectionfactory.cc',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnection_impl.cc',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnection_impl.h',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnection_proxy.cc',
'<(libjingle_mods)/source/talk/app/webrtc/peerconnection_proxy.h',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcdevicemanager.cc',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcdevicemanager.h',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcmediaengine.cc',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcmediaengine.h',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcsession.cc',
'<(libjingle_mods)/source/talk/app/webrtc/webrtcsession.h',
'<(libjingle_mods)/source/talk/app/webrtc/webrtc_json.cc',
'<(libjingle_mods)/source/talk/app/webrtc/webrtc_json.h',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnection.h',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionfactory.h',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionfactory.cc',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionimpl.cc',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionimpl.h',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionproxy.cc',
'<(libjingle_orig)/source/talk/app/webrtc/peerconnectionproxy.h',
'<(libjingle_orig)/source/talk/app/webrtc/webrtcsession.cc',
'<(libjingle_orig)/source/talk/app/webrtc/webrtcsession.h',
'<(libjingle_orig)/source/talk/app/webrtc/webrtcjson.cc',
'<(libjingle_orig)/source/talk/app/webrtc/webrtcjson.h',
],
'dependencies': [
'jsoncpp',

View File

@ -1,134 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_H_
// TODO(mallinath) - Add a factory class or some kind of PeerConnection manager
// to support multiple PeerConnection object instantiation. This class will
// create ChannelManager object and pass it to PeerConnection object. Otherwise
// each PeerConnection object will have its own ChannelManager hence MediaEngine
// and VoiceEngine/VideoEngine.
#include <string>
namespace cricket {
class VideoRenderer;
}
namespace talk_base {
class Thread;
}
namespace webrtc {
class PeerConnectionObserver {
public:
virtual void OnError() = 0;
// serialized signaling message
virtual void OnSignalingMessage(const std::string& msg) = 0;
// Triggered when a remote peer accepts a media connection.
virtual void OnAddStream(const std::string& stream_id, bool video) = 0;
// Triggered when a remote peer closes a media stream.
virtual void OnRemoveStream(const std::string& stream_id, bool video) = 0;
protected:
// Dtor protected as objects shouldn't be deleted via this interface.
~PeerConnectionObserver() {}
};
class PeerConnection {
public:
enum ReadyState {
NEW = 0,
NEGOTIATING,
ACTIVE,
CLOSED,
};
virtual ~PeerConnection() {}
// Register a listener
virtual void RegisterObserver(PeerConnectionObserver* observer) = 0;
// SignalingMessage in json format
virtual bool SignalingMessage(const std::string& msg) = 0;
// Asynchronously adds a local stream device to the peer
// connection. The operation is complete when
// PeerConnectionObserver::OnLocalStreamInitialized is called.
virtual bool AddStream(const std::string& stream_id, bool video) = 0;
// Asynchronously removes a local stream device from the peer
// connection. The operation is complete when
// PeerConnectionObserver::OnRemoveStream is called.
virtual bool RemoveStream(const std::string& stream_id) = 0;
// Info the peerconnection that it is time to return the signaling
// information. The operation is complete when
// PeerConnectionObserver::OnSignalingMessage is called.
virtual bool Connect() = 0;
// Remove all the streams and tear down the session.
// After the Close() is called, the OnSignalingMessage will be invoked
// asynchronously. And before OnSignalingMessage is called,
// OnRemoveStream will be called for each stream that was active.
// TODO(ronghuawu): Add an event such as onclose, or onreadystatechanged
// when the readystate reaches the closed state (no more streams in the
// peerconnection object.
virtual bool Close() = 0;
// Set the audio input & output devices based on the given device name.
// An empty device name means to use the default audio device.
virtual bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) = 0;
// Set the video renderer for the camera preview.
virtual bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer) = 0;
// Set the video renderer for the specified stream.
virtual bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) = 0;
// Set video capture device
// For Chromium the cam_device should use the capture session id.
// For standalone app, cam_device is the camera name. It will try to
// set the default capture device when cam_device is "".
virtual bool SetVideoCapture(const std::string& cam_device) = 0;
// Returns the state of the PeerConnection object. See the ReadyState
// enum for valid values.
virtual ReadyState GetReadyState() = 0;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_PEERCONNECTION_H_

View File

@ -1,224 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/peerconnection_impl.h"
#include "talk/app/webrtc/webrtc_json.h"
#include "talk/app/webrtc/webrtcsession.h"
#include "talk/base/basicpacketsocketfactory.h"
#include "talk/base/helpers.h"
#include "talk/base/logging.h"
#include "talk/base/stringencode.h"
#include "talk/p2p/base/session.h"
#include "talk/p2p/client/basicportallocator.h"
namespace webrtc {
PeerConnectionImpl::PeerConnectionImpl(
cricket::PortAllocator* port_allocator,
cricket::ChannelManager* channel_manager,
talk_base::Thread* signaling_thread)
: port_allocator_(port_allocator),
channel_manager_(channel_manager),
signaling_thread_(signaling_thread),
event_callback_(NULL),
session_(NULL) {
}
PeerConnectionImpl::~PeerConnectionImpl() {
}
bool PeerConnectionImpl::Init() {
std::string sid;
talk_base::CreateRandomString(8, &sid);
const bool incoming = false; // default outgoing direction
session_.reset(CreateMediaSession(sid, incoming));
if (session_.get() == NULL) {
ASSERT(false && "failed to initialize a session");
return false;
}
return true;
}
void PeerConnectionImpl::RegisterObserver(PeerConnectionObserver* observer) {
// This assert is to catch cases where two observer pointers are registered.
// We only support one and if another is to be used, the current one must be
// cleared first.
ASSERT(observer == NULL || event_callback_ == NULL);
event_callback_ = observer;
}
bool PeerConnectionImpl::SignalingMessage(
const std::string& signaling_message) {
// Deserialize signaling message
cricket::SessionDescription* incoming_sdp = NULL;
std::vector<cricket::Candidate> candidates;
if (!ParseJSONSignalingMessage(signaling_message,
incoming_sdp, &candidates)) {
return false;
}
bool ret = false;
if (GetReadyState() == NEW) {
// set direction to incoming, as message received first
session_->set_incoming(true);
ret = session_->OnInitiateMessage(incoming_sdp, candidates);
} else {
ret = session_->OnRemoteDescription(incoming_sdp, candidates);
}
return ret;
}
WebRtcSession* PeerConnectionImpl::CreateMediaSession(
const std::string& id, bool incoming) {
ASSERT(port_allocator_ != NULL);
WebRtcSession* session = new WebRtcSession(id, incoming,
port_allocator_, channel_manager_, signaling_thread_);
if (session->Initiate()) {
session->SignalAddStream.connect(
this,
&PeerConnectionImpl::OnAddStream);
session->SignalRemoveStream.connect(
this,
&PeerConnectionImpl::OnRemoveStream);
session->SignalLocalDescription.connect(
this,
&PeerConnectionImpl::OnLocalDescription);
session->SignalFailedCall.connect(
this,
&PeerConnectionImpl::OnFailedCall);
} else {
delete session;
session = NULL;
}
return session;
}
bool PeerConnectionImpl::AddStream(const std::string& stream_id, bool video) {
bool ret = false;
if (session_->HasStream(stream_id)) {
ASSERT(false && "A stream with this name already exists");
} else {
if (!video) {
ret = !session_->HasAudioStream() &&
session_->CreateVoiceChannel(stream_id);
} else {
ret = !session_->HasVideoStream() &&
session_->CreateVideoChannel(stream_id);
}
}
return ret;
}
bool PeerConnectionImpl::RemoveStream(const std::string& stream_id) {
return session_->RemoveStream(stream_id);
}
void PeerConnectionImpl::OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
if (!desc) {
LOG(WARNING) << "no local SDP ";
return;
}
std::string message;
if (GetJSONSignalingMessage(desc, candidates, &message)) {
if (event_callback_) {
event_callback_->OnSignalingMessage(message);
}
}
}
void PeerConnectionImpl::OnFailedCall() {
// TODO(mallinath): implement.
}
bool PeerConnectionImpl::SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) {
return channel_manager_->SetAudioOptions(wave_in_device,
wave_out_device,
opts);
}
bool PeerConnectionImpl::SetLocalVideoRenderer(
cricket::VideoRenderer* renderer) {
return channel_manager_->SetLocalRenderer(renderer);
}
bool PeerConnectionImpl::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
return session_->SetVideoRenderer(stream_id, renderer);
}
bool PeerConnectionImpl::SetVideoCapture(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device);
}
bool PeerConnectionImpl::Connect() {
return session_->Connect();
}
// TODO(mallinath) - Close is not used anymore, should be removed.
bool PeerConnectionImpl::Close() {
session_->RemoveAllStreams();
return true;
}
void PeerConnectionImpl::OnAddStream(const std::string& stream_id,
bool video) {
if (event_callback_) {
event_callback_->OnAddStream(stream_id, video);
}
}
void PeerConnectionImpl::OnRemoveStream(const std::string& stream_id,
bool video) {
if (event_callback_) {
event_callback_->OnRemoveStream(stream_id, video);
}
}
PeerConnectionImpl::ReadyState PeerConnectionImpl::GetReadyState() {
ReadyState ready_state;
cricket::BaseSession::State state = session_->state();
if (state == cricket::BaseSession::STATE_INIT) {
ready_state = NEW;
} else if (state == cricket::BaseSession::STATE_INPROGRESS) {
ready_state = ACTIVE;
} else if (state == cricket::BaseSession::STATE_DEINIT) {
ready_state = CLOSED;
} else {
ready_state = NEGOTIATING;
}
return ready_state;
}
} // namespace webrtc

View File

@ -1,100 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_
#include <string>
#include <vector>
#include "talk/app/webrtc/peerconnection.h"
#include "talk/base/sigslot.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/thread.h"
#include "talk/session/phone/channelmanager.h"
namespace cricket {
class ChannelManager;
class PortAllocator;
class SessionDescription;
}
namespace webrtc {
class WebRtcSession;
class PeerConnectionImpl : public PeerConnection,
public sigslot::has_slots<> {
public:
PeerConnectionImpl(cricket::PortAllocator* port_allocator,
cricket::ChannelManager* channel_manager,
talk_base::Thread* signaling_thread);
virtual ~PeerConnectionImpl();
// PeerConnection interfaces
virtual void RegisterObserver(PeerConnectionObserver* observer);
virtual bool SignalingMessage(const std::string& msg);
virtual bool AddStream(const std::string& stream_id, bool video);
virtual bool RemoveStream(const std::string& stream_id);
virtual bool Connect();
virtual bool Close();
virtual bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
virtual bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer);
virtual bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
virtual bool SetVideoCapture(const std::string& cam_device);
virtual ReadyState GetReadyState();
cricket::ChannelManager* channel_manager() {
return channel_manager_;
}
// Callbacks from PeerConnectionImplCallbacks
void OnAddStream(const std::string& stream_id, bool video);
void OnRemoveStream(const std::string& stream_id, bool video);
void OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates);
void OnFailedCall();
bool Init();
private:
bool ParseConfigString(const std::string& config,
talk_base::SocketAddress* stun_addr);
void SendRemoveSignal(WebRtcSession* session);
WebRtcSession* CreateMediaSession(const std::string& id, bool incoming);
cricket::PortAllocator* port_allocator_;
cricket::ChannelManager* channel_manager_;
talk_base::Thread* signaling_thread_;
PeerConnectionObserver* event_callback_;
talk_base::scoped_ptr<WebRtcSession> session_;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_

View File

@ -1,313 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/peerconnection_proxy.h"
#include "talk/app/webrtc/peerconnection_impl.h"
#include "talk/base/logging.h"
namespace webrtc {
enum {
MSG_WEBRTC_ADDSTREAM = 1,
MSG_WEBRTC_CLOSE,
MSG_WEBRTC_CONNECT,
MSG_WEBRTC_INIT,
MSG_WEBRTC_REGISTEROBSERVER,
MSG_WEBRTC_RELEASE,
MSG_WEBRTC_REMOVESTREAM,
MSG_WEBRTC_SETAUDIODEVICE,
MSG_WEBRTC_SETLOCALRENDERER,
MSG_WEBRTC_SETVIDEOCAPTURE,
MSG_WEBRTC_SETVIDEORENDERER,
MSG_WEBRTC_SIGNALINGMESSAGE,
MSG_WEBRTC_GETREADYSTATE,
};
struct AddStreamParams : public talk_base::MessageData {
AddStreamParams(const std::string& stream_id, bool video)
: stream_id(stream_id),
video(video),
result(false) {}
std::string stream_id;
bool video;
bool result;
};
struct RemoveStreamParams : public talk_base::MessageData {
explicit RemoveStreamParams(const std::string& stream_id)
: stream_id(stream_id),
result(false) {}
std::string stream_id;
bool result;
};
struct SignalingMsgParams : public talk_base::MessageData {
explicit SignalingMsgParams(const std::string& signaling_message)
: signaling_message(signaling_message),
result(false) {}
std::string signaling_message;
bool result;
};
struct SetAudioDeviceParams : public talk_base::MessageData {
SetAudioDeviceParams(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts)
: wave_in_device(wave_in_device), wave_out_device(wave_out_device),
opts(opts), result(false) {}
std::string wave_in_device;
std::string wave_out_device;
int opts;
bool result;
};
struct SetLocalRendererParams : public talk_base::MessageData {
explicit SetLocalRendererParams(cricket::VideoRenderer* renderer)
: renderer(renderer), result(false) {}
cricket::VideoRenderer* renderer;
bool result;
};
struct SetVideoRendererParams : public talk_base::MessageData {
SetVideoRendererParams(const std::string& stream_id,
cricket::VideoRenderer* renderer)
: stream_id(stream_id), renderer(renderer), result(false) {}
std::string stream_id;
cricket::VideoRenderer* renderer;
bool result;
};
struct SetVideoCaptureParams : public talk_base::MessageData {
explicit SetVideoCaptureParams(const std::string& cam_device)
: cam_device(cam_device), result(false) {}
std::string cam_device;
bool result;
};
struct RegisterObserverParams : public talk_base::MessageData {
explicit RegisterObserverParams(PeerConnectionObserver* observer)
: observer(observer), result(false) {}
PeerConnectionObserver* observer;
bool result;
};
struct ResultParams : public talk_base::MessageData {
ResultParams()
: result(false) {}
bool result;
};
PeerConnectionProxy::PeerConnectionProxy(
cricket::PortAllocator* port_allocator,
cricket::ChannelManager* channel_manager,
talk_base::Thread* signaling_thread)
: peerconnection_impl_(new PeerConnectionImpl(port_allocator,
channel_manager, signaling_thread)),
signaling_thread_(signaling_thread) {
}
PeerConnectionProxy::~PeerConnectionProxy() {
ResultParams params;
Send(MSG_WEBRTC_RELEASE, &params);
}
bool PeerConnectionProxy::Init() {
ResultParams params;
return (Send(MSG_WEBRTC_INIT, &params) && params.result);
}
void PeerConnectionProxy::RegisterObserver(PeerConnectionObserver* observer) {
RegisterObserverParams params(observer);
Send(MSG_WEBRTC_REGISTEROBSERVER, &params);
}
bool PeerConnectionProxy::SignalingMessage(
const std::string& signaling_message) {
SignalingMsgParams params(signaling_message);
return (Send(MSG_WEBRTC_SIGNALINGMESSAGE, &params) && params.result);
}
bool PeerConnectionProxy::AddStream(const std::string& stream_id, bool video) {
AddStreamParams params(stream_id, video);
return (Send(MSG_WEBRTC_ADDSTREAM, &params) && params.result);
}
bool PeerConnectionProxy::RemoveStream(const std::string& stream_id) {
RemoveStreamParams params(stream_id);
return (Send(MSG_WEBRTC_REMOVESTREAM, &params) && params.result);
}
bool PeerConnectionProxy::SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) {
SetAudioDeviceParams params(wave_in_device, wave_out_device, opts);
return (Send(MSG_WEBRTC_SETAUDIODEVICE, &params) && params.result);
}
bool PeerConnectionProxy::SetLocalVideoRenderer(
cricket::VideoRenderer* renderer) {
SetLocalRendererParams params(renderer);
return (Send(MSG_WEBRTC_SETLOCALRENDERER, &params) && params.result);
}
bool PeerConnectionProxy::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
SetVideoRendererParams params(stream_id, renderer);
return (Send(MSG_WEBRTC_SETVIDEORENDERER, &params) && params.result);
}
bool PeerConnectionProxy::SetVideoCapture(const std::string& cam_device) {
SetVideoCaptureParams params(cam_device);
return (Send(MSG_WEBRTC_SETVIDEOCAPTURE, &params) && params.result);
}
PeerConnection::ReadyState PeerConnectionProxy::GetReadyState() {
PeerConnection::ReadyState ready_state = NEW;
Send(MSG_WEBRTC_GETREADYSTATE,
reinterpret_cast<talk_base::MessageData*>(&ready_state));
return ready_state;
}
bool PeerConnectionProxy::Connect() {
ResultParams params;
return (Send(MSG_WEBRTC_CONNECT, &params) && params.result);
}
bool PeerConnectionProxy::Close() {
ResultParams params;
return (Send(MSG_WEBRTC_CLOSE, &params) && params.result);
}
bool PeerConnectionProxy::Send(uint32 id, talk_base::MessageData* data) {
if (!signaling_thread_)
return false;
signaling_thread_->Send(this, id, data);
return true;
}
void PeerConnectionProxy::OnMessage(talk_base::Message* message) {
talk_base::MessageData* data = message->pdata;
switch (message->message_id) {
case MSG_WEBRTC_ADDSTREAM: {
AddStreamParams* params = reinterpret_cast<AddStreamParams*>(data);
params->result = peerconnection_impl_->AddStream(
params->stream_id, params->video);
break;
}
case MSG_WEBRTC_SIGNALINGMESSAGE: {
SignalingMsgParams* params =
reinterpret_cast<SignalingMsgParams*>(data);
params->result = peerconnection_impl_->SignalingMessage(
params->signaling_message);
break;
}
case MSG_WEBRTC_REMOVESTREAM: {
RemoveStreamParams* params = reinterpret_cast<RemoveStreamParams*>(data);
params->result = peerconnection_impl_->RemoveStream(
params->stream_id);
break;
}
case MSG_WEBRTC_SETAUDIODEVICE: {
SetAudioDeviceParams* params =
reinterpret_cast<SetAudioDeviceParams*>(data);
params->result = peerconnection_impl_->SetAudioDevice(
params->wave_in_device, params->wave_out_device, params->opts);
break;
}
case MSG_WEBRTC_SETLOCALRENDERER: {
SetLocalRendererParams* params =
reinterpret_cast<SetLocalRendererParams*>(data);
params->result = peerconnection_impl_->SetLocalVideoRenderer(
params->renderer);
break;
}
case MSG_WEBRTC_SETVIDEOCAPTURE: {
SetVideoCaptureParams* params =
reinterpret_cast<SetVideoCaptureParams*>(data);
params->result = peerconnection_impl_->SetVideoCapture(
params->cam_device);
break;
}
case MSG_WEBRTC_GETREADYSTATE: {
PeerConnection::ReadyState* ready_state =
reinterpret_cast<PeerConnection::ReadyState*>(data);
*ready_state = peerconnection_impl_->GetReadyState();
break;
}
case MSG_WEBRTC_SETVIDEORENDERER: {
SetVideoRendererParams* params =
reinterpret_cast<SetVideoRendererParams*>(data);
params->result = peerconnection_impl_->SetVideoRenderer(
params->stream_id, params->renderer);
break;
}
case MSG_WEBRTC_CONNECT: {
ResultParams* params =
reinterpret_cast<ResultParams*>(data);
params->result = peerconnection_impl_->Connect();
break;
}
case MSG_WEBRTC_CLOSE: {
ResultParams* params =
reinterpret_cast<ResultParams*>(data);
params->result = peerconnection_impl_->Close();
break;
}
case MSG_WEBRTC_INIT: {
ResultParams* params =
reinterpret_cast<ResultParams*>(data);
params->result = peerconnection_impl_->Init();
break;
}
case MSG_WEBRTC_REGISTEROBSERVER: {
RegisterObserverParams* params =
reinterpret_cast<RegisterObserverParams*>(data);
peerconnection_impl_->RegisterObserver(params->observer);
break;
}
case MSG_WEBRTC_RELEASE: {
peerconnection_impl_.reset();
break;
}
default: {
ASSERT(false);
break;
}
}
}
} // namespace webrtc

View File

@ -1,82 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_PROXY_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_PROXY_H_
#include <string>
#include "talk/app/webrtc/peerconnection.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/thread.h"
namespace cricket {
class ChannelManager;
class PortAllocator;
}
namespace webrtc {
class PeerConnectionImpl;
class PeerConnectionProxy : public PeerConnection,
public talk_base::MessageHandler {
public:
PeerConnectionProxy(cricket::PortAllocator* port_allocator,
cricket::ChannelManager* channel_manager,
talk_base::Thread* signaling_thread);
virtual ~PeerConnectionProxy();
// PeerConnection interface implementation.
virtual void RegisterObserver(PeerConnectionObserver* observer);
virtual bool SignalingMessage(const std::string& msg);
virtual bool AddStream(const std::string& stream_id, bool video);
virtual bool RemoveStream(const std::string& stream_id);
virtual bool Connect();
virtual bool Close();
virtual bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
virtual bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer);
virtual bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
virtual bool SetVideoCapture(const std::string& cam_device);
virtual ReadyState GetReadyState();
private:
bool Init();
bool Send(uint32 id, talk_base::MessageData* data);
virtual void OnMessage(talk_base::Message* message);
talk_base::scoped_ptr<PeerConnectionImpl> peerconnection_impl_;
talk_base::Thread* signaling_thread_;
friend class PeerConnectionFactory;
};
}
#endif // TALK_APP_WEBRTC_PEERCONNECTION_PROXY_H_

View File

@ -1,31 +0,0 @@
/*
* libjingle
* Copyright 2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "base/gunit.h"
TEST(PeerConnectionTest, InitializationReceiveSanity) {
// TODO(henrike): implement.
}

View File

@ -1,119 +0,0 @@
#include "talk/app/webrtc/peerconnectionfactory.h"
#include "talk/app/webrtc/peerconnection_proxy.h"
#include "talk/base/logging.h"
#include "talk/p2p/client/basicportallocator.h"
#include "talk/session/phone/channelmanager.h"
namespace {
// The number of the tokens in the config string.
static const size_t kConfigTokens = 2;
// The default stun port.
static const int kDefaultStunPort = 3478;
// NOTE: Must be in the same order as the enum.
static const char* kValidServiceTypes[
webrtc::PeerConnectionFactory::SERVICE_COUNT] = {
"STUN", "STUNS", "TURN", "TURNS" };
}
namespace webrtc {
PeerConnectionFactory::PeerConnectionFactory(
const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
cricket::DeviceManager* device_manager,
talk_base::Thread* worker_thread)
: config_(config),
initialized_(false),
port_allocator_(port_allocator),
channel_manager_(new cricket::ChannelManager(
media_engine, device_manager, worker_thread)) {
}
PeerConnectionFactory::PeerConnectionFactory(
const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread)
: config_(config),
initialized_(false),
port_allocator_(port_allocator),
channel_manager_(new cricket::ChannelManager(worker_thread)) {
}
PeerConnectionFactory::~PeerConnectionFactory() {
}
bool PeerConnectionFactory::Initialize() {
ASSERT(channel_manager_.get());
std::vector<talk_base::SocketAddress> stun_hosts;
talk_base::SocketAddress stun_addr;
if (!ParseConfigString(config_, &stun_addr))
return false;
stun_hosts.push_back(stun_addr);
initialized_ = channel_manager_->Init();
return initialized_;
}
PeerConnection* PeerConnectionFactory::CreatePeerConnection(
talk_base::Thread* signaling_thread) {
PeerConnectionProxy* pc = NULL;
if (initialized_) {
pc = new PeerConnectionProxy(
port_allocator_.get(), channel_manager_.get(), signaling_thread);
if (!pc->Init()) {
LOG(LERROR) << "Error in initializing PeerConnection";
delete pc;
pc = NULL;
}
} else {
LOG(LERROR) << "PeerConnectionFactory is not initialize";
}
return pc;
}
bool PeerConnectionFactory::ParseConfigString(
const std::string& config, talk_base::SocketAddress* stun_addr) {
std::vector<std::string> tokens;
talk_base::tokenize(config_, ' ', &tokens);
if (tokens.size() != kConfigTokens) {
LOG(WARNING) << "Invalid config string";
return false;
}
service_type_ = INVALID;
const std::string& type = tokens[0];
for (size_t i = 0; i < SERVICE_COUNT; ++i) {
if (type.compare(kValidServiceTypes[i]) == 0) {
service_type_ = static_cast<ServiceType>(i);
break;
}
}
if (service_type_ == SERVICE_COUNT) {
LOG(WARNING) << "Invalid service type: " << type;
return false;
}
std::string service_address = tokens[1];
int port;
tokens.clear();
talk_base::tokenize(service_address, ':', &tokens);
if (tokens.size() != kConfigTokens) {
port = kDefaultStunPort;
} else {
port = atoi(tokens[1].c_str());
if (port <= 0 || port > 0xffff) {
LOG(WARNING) << "Invalid port: " << tokens[1];
return false;
}
}
stun_addr->SetIP(service_address);
stun_addr->SetPort(port);
return true;
}
} // namespace webrtc

View File

@ -1,64 +0,0 @@
#ifndef TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_
#define TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_
#include <string>
#include <vector>
#include "talk/base/scoped_ptr.h"
namespace cricket {
class ChannelManager;
class DeviceManager;
class MediaEngine;
class PortAllocator;
} // namespace cricket
namespace talk_base {
class SocketAddress;
class Thread;
} // namespace talk_base
namespace webrtc {
class PeerConnection;
class PeerConnectionFactory {
public:
// NOTE: The order of the enum values must be in sync with the array
// in Initialize().
enum ServiceType {
STUN = 0,
STUNS,
TURN,
TURNS,
SERVICE_COUNT,
INVALID
};
PeerConnectionFactory(const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
cricket::DeviceManager* device_manager,
talk_base::Thread* worker_thread);
PeerConnectionFactory(const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread);
virtual ~PeerConnectionFactory();
bool Initialize();
PeerConnection* CreatePeerConnection(talk_base::Thread* signaling_thread);
private:
bool ParseConfigString(const std::string&, talk_base::SocketAddress*);
ServiceType service_type_;
std::string config_;
bool initialized_;
talk_base::scoped_ptr<cricket::PortAllocator> port_allocator_;
talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_

View File

@ -1,57 +0,0 @@
# -*- Python -*-
import talk
Import('env')
# local sources
talk.Library(
env,
name = 'webrtc',
srcs = [
'peerconnection_impl.cc',
'peerconnection_proxy.cc',
'peerconnectionfactory.cc',
'webrtc_json.cc',
'webrtcsession.cc',
],
)
talk.Unittest(
env,
name = 'webrtc',
srcs = [
'webrtcsession_unittest.cc',
'peerconnection_unittest.cc',
],
libs = [
'srtp',
'base',
'jpeg',
'json',
'webrtc',
'p2p',
'phone',
'xmpp',
'xmllite',
'yuvscaler'
],
include_talk_media_libs = True,
mac_libs = [
'crypto',
'ssl',
],
mac_FRAMEWORKS = [
'Foundation',
'IOKit',
'QTKit',
],
lin_libs = [
'rt',
'dl',
'sound',
'X11',
'Xext',
'Xfixes',
'Xrandr'
],
)

View File

@ -1,516 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtc_json.h"
#include <stdio.h>
#include <locale>
#include <string>
#include "talk/base/json.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/codec.h"
namespace {
// Before [de]serializing, we need to work around a bug in jsoncpp where
// locale sensitive string conversion functions are used (e.g. sprintf,
// sscanf and stl). The problem is that depending on what the current
// locale is, numbers might be formatted differently than the jsoncpp code
// otherwise expects. E.g. sprintf might format a number as "1,234" and
// the parser assumes that it would be "1.234".
class AutoSwitchToClassicLocale {
public:
AutoSwitchToClassicLocale() {
const char* locale_name = setlocale(LC_NUMERIC, NULL);
if (locale_name)
saved_locale_ = locale_name;
// Switch the CRT to "C".
setlocale(LC_NUMERIC, "C");
// Switch STL to classic.
cxx_locale_ = std::locale::global(std::locale::classic());
}
~AutoSwitchToClassicLocale() {
// Switch the locale back to what it was before.
std::locale::global(cxx_locale_);
setlocale(LC_NUMERIC, saved_locale_.c_str());
}
private:
std::string saved_locale_;
std::locale cxx_locale_;
};
}
namespace webrtc {
static const int kIceComponent = 1;
static const int kIceFoundation = 1;
bool GetConnectionMediator(const Json::Value& value,
std::string* connection_mediator) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
if (!GetStringFromJsonObject(value,
"connectionmediator",
connection_mediator)) {
LOG(LS_WARNING) << "Failed to parse JSON for value: "
<< value.toStyledString();
return false;
}
return true;
}
bool GetStunServer(const Json::Value& value, StunServiceDetails* stunServer) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
Json::Value stun;
if (GetValueFromJsonObject(value, "stun_service", &stun)) {
if (stun.type() == Json::objectValue) {
if (!GetStringFromJsonObject(stun, "host", &stunServer->host) ||
!GetStringFromJsonObject(stun, "service", &stunServer->service) ||
!GetStringFromJsonObject(stun, "protocol", &stunServer->protocol)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
LOG(LS_WARNING) << "Failed to find the stun_service member.";
return false;
}
} else {
LOG(LS_WARNING) << "Wrong ValueType. Expect Json::objectValue).";
return false;
}
return true;
}
bool GetTurnServer(const Json::Value& value, std::string* turn_server) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
Json::Value turn;
if (GetValueFromJsonObject(value, "turn_service", &turn)) {
if (turn.type() == Json::objectValue) {
if (!GetStringFromJsonObject(turn, "host", turn_server)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
LOG(LS_WARNING) << "Wrong ValueType. Expect Json::objectValue).";
return false;
}
}
return true;
}
bool GetJSONSignalingMessage(
const cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates,
std::string* signaling_message) {
// See documentation for AutoSwitchToClassicLocale.
AutoSwitchToClassicLocale auto_switch;
const cricket::ContentInfo* audio_content = GetFirstAudioContent(sdp);
const cricket::ContentInfo* video_content = GetFirstVideoContent(sdp);
std::vector<Json::Value> media;
if (audio_content) {
Json::Value value;
BuildMediaMessage(*audio_content, candidates, false, &value);
media.push_back(value);
}
if (video_content) {
Json::Value value;
BuildMediaMessage(*video_content, candidates, true, &value);
media.push_back(value);
}
Json::Value signal;
Append(&signal, "media", media);
// Now serialize.
*signaling_message = Serialize(signal);
return true;
}
bool BuildMediaMessage(
const cricket::ContentInfo& content_info,
const std::vector<cricket::Candidate>& candidates,
bool video,
Json::Value* params) {
if (video) {
Append(params, "label", 2); // always video 2
} else {
Append(params, "label", 1); // always audio 1
}
const cricket::MediaContentDescription* media_info =
static_cast<const cricket::MediaContentDescription*> (
content_info.description);
if (media_info->rtcp_mux()) {
Append(params, "rtcp_mux", std::string("supported"));
}
std::vector<Json::Value> rtpmap;
if (!BuildRtpMapParams(content_info, video, &rtpmap)) {
return false;
}
Append(params, "rtpmap", rtpmap);
Json::Value attributes;
std::vector<Json::Value> jcandidates;
if (!BuildAttributes(candidates, video, &jcandidates)) {
return false;
}
Append(&attributes, "candidate", jcandidates);
Append(params, "attributes", attributes);
return true;
}
bool BuildRtpMapParams(const cricket::ContentInfo& content_info,
bool video,
std::vector<Json::Value>* rtpmap) {
if (!video) {
const cricket::AudioContentDescription* audio_offer =
static_cast<const cricket::AudioContentDescription*>(
content_info.description);
std::vector<cricket::AudioCodec>::const_iterator iter =
audio_offer->codecs().begin();
std::vector<cricket::AudioCodec>::const_iterator iter_end =
audio_offer->codecs().end();
for (; iter != iter_end; ++iter) {
Json::Value codec;
std::string codec_str(std::string("audio/").append(iter->name));
// adding clockrate
Append(&codec, "clockrate", iter->clockrate);
Append(&codec, "codec", codec_str);
Json::Value codec_id;
Append(&codec_id, talk_base::ToString(iter->id), codec);
rtpmap->push_back(codec_id);
}
} else {
const cricket::VideoContentDescription* video_offer =
static_cast<const cricket::VideoContentDescription*>(
content_info.description);
std::vector<cricket::VideoCodec>::const_iterator iter =
video_offer->codecs().begin();
std::vector<cricket::VideoCodec>::const_iterator iter_end =
video_offer->codecs().end();
for (; iter != iter_end; ++iter) {
Json::Value codec;
std::string codec_str(std::string("video/").append(iter->name));
Append(&codec, "codec", codec_str);
Json::Value codec_id;
Append(&codec_id, talk_base::ToString(iter->id), codec);
rtpmap->push_back(codec_id);
}
}
return true;
}
bool BuildAttributes(const std::vector<cricket::Candidate>& candidates,
bool video,
std::vector<Json::Value>* jcandidates) {
std::vector<cricket::Candidate>::const_iterator iter =
candidates.begin();
std::vector<cricket::Candidate>::const_iterator iter_end =
candidates.end();
for (; iter != iter_end; ++iter) {
if ((video && (!iter->name().compare("video_rtcp") ||
(!iter->name().compare("video_rtp")))) ||
(!video && (!iter->name().compare("rtp") ||
(!iter->name().compare("rtcp"))))) {
Json::Value candidate;
Append(&candidate, "component", kIceComponent);
Append(&candidate, "foundation", kIceFoundation);
Append(&candidate, "generation", iter->generation());
Append(&candidate, "proto", iter->protocol());
Append(&candidate, "priority", iter->preference());
Append(&candidate, "ip", iter->address().IPAsString());
Append(&candidate, "port", iter->address().PortAsString());
Append(&candidate, "type", iter->type());
Append(&candidate, "name", iter->name());
Append(&candidate, "network_name", iter->network_name());
Append(&candidate, "username", iter->username());
Append(&candidate, "password", iter->password());
jcandidates->push_back(candidate);
}
}
return true;
}
std::string Serialize(const Json::Value& value) {
Json::StyledWriter writer;
return writer.write(value);
}
bool Deserialize(const std::string& message, Json::Value* value) {
Json::Reader reader;
return reader.parse(message, *value);
}
bool ParseJSONSignalingMessage(const std::string& signaling_message,
cricket::SessionDescription*& sdp,
std::vector<cricket::Candidate>* candidates) {
ASSERT(!sdp); // expect this to be NULL
// See documentation for AutoSwitchToClassicLocale.
AutoSwitchToClassicLocale auto_switch;
// first deserialize message
Json::Value value;
if (!Deserialize(signaling_message, &value)) {
return false;
}
// get media objects
std::vector<Json::Value> mlines = ReadValues(value, "media");
if (mlines.empty()) {
// no m-lines found
return false;
}
sdp = new cricket::SessionDescription();
// get codec information
for (size_t i = 0; i < mlines.size(); ++i) {
if (mlines[i]["label"].asInt() == 1) {
cricket::AudioContentDescription* audio_content =
new cricket::AudioContentDescription();
ParseAudioCodec(mlines[i], audio_content);
audio_content->set_rtcp_mux(ParseRTCPMux(mlines[i]));
audio_content->SortCodecs();
sdp->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, audio_content);
ParseICECandidates(mlines[i], candidates);
} else {
cricket::VideoContentDescription* video_content =
new cricket::VideoContentDescription();
ParseVideoCodec(mlines[i], video_content);
video_content->set_rtcp_mux(ParseRTCPMux(mlines[i]));
video_content->SortCodecs();
sdp->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, video_content);
ParseICECandidates(mlines[i], candidates);
}
}
return true;
}
bool ParseRTCPMux(const Json::Value& value) {
Json::Value rtcp_mux(ReadValue(value, "rtcp_mux"));
if (!rtcp_mux.empty()) {
if (rtcp_mux.asString().compare("supported") == 0) {
return true;
}
}
return false;
}
bool ParseAudioCodec(const Json::Value& value,
cricket::AudioContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
std::vector<Json::Value>::const_iterator iter =
rtpmap.begin();
std::vector<Json::Value>::const_iterator iter_end =
rtpmap.end();
for (; iter != iter_end; ++iter) {
cricket::AudioCodec codec;
std::string pltype(iter->begin().memberName());
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info((*iter)[pltype]);
std::string codec_name(ReadString(codec_info, "codec"));
std::vector<std::string> tokens;
talk_base::split(codec_name, '/', &tokens);
codec.name = tokens[1];
codec.clockrate = ReadUInt(codec_info, "clockrate");
content->AddCodec(codec);
}
return true;
}
bool ParseVideoCodec(const Json::Value& value,
cricket::VideoContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
std::vector<Json::Value>::const_iterator iter =
rtpmap.begin();
std::vector<Json::Value>::const_iterator iter_end =
rtpmap.end();
for (; iter != iter_end; ++iter) {
cricket::VideoCodec codec;
std::string pltype(iter->begin().memberName());
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info((*iter)[pltype]);
std::vector<std::string> tokens;
talk_base::split(codec_info["codec"].asString(), '/', &tokens);
codec.name = tokens[1];
content->AddCodec(codec);
}
return true;
}
bool ParseICECandidates(const Json::Value& value,
std::vector<cricket::Candidate>* candidates) {
Json::Value attributes(ReadValue(value, "attributes"));
std::string ice_pwd(ReadString(attributes, "ice-pwd"));
std::string ice_ufrag(ReadString(attributes, "ice-ufrag"));
std::vector<Json::Value> jcandidates(ReadValues(attributes, "candidate"));
std::vector<Json::Value>::const_iterator iter =
jcandidates.begin();
std::vector<Json::Value>::const_iterator iter_end =
jcandidates.end();
char buffer[16];
for (; iter != iter_end; ++iter) {
cricket::Candidate cand;
std::string str;
str = ReadUInt(*iter, "generation");
cand.set_generation_str(str);
str = ReadString(*iter, "proto");
cand.set_protocol(str);
double priority = ReadDouble(*iter, "priority");
talk_base::sprintfn(buffer, ARRAY_SIZE(buffer), "%f", priority);
cand.set_preference_str(buffer);
talk_base::SocketAddress addr;
str = ReadString(*iter, "ip");
addr.SetIP(str);
str = ReadString(*iter, "port");
int port;
talk_base::FromString(str, &port);
addr.SetPort(port);
cand.set_address(addr);
str = ReadString(*iter, "type");
cand.set_type(str);
str = ReadString(*iter, "name");
cand.set_name(str);
str = ReadString(*iter, "network_name");
cand.set_network_name(str);
str = ReadString(*iter, "username");
cand.set_username(str);
str = ReadString(*iter, "password");
cand.set_password(str);
candidates->push_back(cand);
}
return true;
}
std::vector<Json::Value> ReadValues(
const Json::Value& value, const std::string& key) {
std::vector<Json::Value> objects;
for (Json::ArrayIndex i = 0; i < value[key].size(); ++i) {
objects.push_back(value[key][i]);
}
return objects;
}
Json::Value ReadValue(const Json::Value& value, const std::string& key) {
return value[key];
}
std::string ReadString(const Json::Value& value, const std::string& key) {
return value[key].asString();
}
uint32 ReadUInt(const Json::Value& value, const std::string& key) {
return value[key].asUInt();
}
double ReadDouble(const Json::Value& value, const std::string& key) {
return value[key].asDouble();
}
// Add values
void Append(Json::Value* object, const std::string& key, bool value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, char * value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, double value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, int value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key,
const std::string& value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, uint32 value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key,
const Json::Value& value) {
(*object)[key] = value;
}
void Append(Json::Value* object,
const std::string & key,
const std::vector<Json::Value>& values) {
for (std::vector<Json::Value>::const_iterator iter = values.begin();
iter != values.end(); ++iter) {
(*object)[key].append(*iter);
}
}
} // namespace webrtc

View File

@ -1,126 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTC_JSON_H_
#define TALK_APP_WEBRTC_WEBRTC_JSON_H_
#include <string>
#include <vector>
#ifdef WEBRTC_RELATIVE_PATH
#include "json/json.h"
#else
#include "third_party/jsoncpp/json.h"
#endif
#include "talk/session/phone/codec.h"
#include "talk/p2p/base/candidate.h"
namespace Json {
class Value;
}
namespace cricket {
class AudioContentDescription;
class VideoContentDescription;
struct ContentInfo;
class SessionDescription;
}
struct StunServiceDetails {
std::string host;
std::string service;
std::string protocol;
};
namespace webrtc {
bool GetConnectionMediator(const Json::Value& value,
std::string* connection_mediator);
bool GetStunServer(const Json::Value& value, StunServiceDetails* stun);
bool GetTurnServer(const Json::Value& value, std::string* turn_server);
bool FromJsonToAVCodec(const Json::Value& value,
cricket::AudioContentDescription* audio,
cricket::VideoContentDescription* video);
std::vector<Json::Value> ReadValues(const Json::Value& value,
const std::string& key);
bool BuildMediaMessage(
const cricket::ContentInfo& content_info,
const std::vector<cricket::Candidate>& candidates,
bool video,
Json::Value* value);
bool GetJSONSignalingMessage(
const cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates,
std::string* signaling_message);
bool BuildRtpMapParams(
const cricket::ContentInfo& audio_offer,
bool video,
std::vector<Json::Value>* rtpmap);
bool BuildAttributes(const std::vector<cricket::Candidate>& candidates,
bool video,
std::vector<Json::Value>* jcandidates);
std::string Serialize(const Json::Value& value);
bool Deserialize(const std::string& message, Json::Value& value);
bool ParseJSONSignalingMessage(const std::string& signaling_message,
cricket::SessionDescription*& sdp,
std::vector<cricket::Candidate>* candidates);
bool ParseAudioCodec(const Json::Value& value,
cricket::AudioContentDescription* content);
bool ParseVideoCodec(const Json::Value& value,
cricket::VideoContentDescription* content);
bool ParseICECandidates(const Json::Value& value,
std::vector<cricket::Candidate>* candidates);
bool ParseRTCPMux(const Json::Value& value);
Json::Value ReadValue(const Json::Value& value, const std::string& key);
std::string ReadString(const Json::Value& value, const std::string& key);
double ReadDouble(const Json::Value& value, const std::string& key);
uint32 ReadUInt(const Json::Value& value, const std::string& key);
// Add values
void Append(Json::Value* object, const std::string& key, bool value);
void Append(Json::Value* object, const std::string& key, char * value);
void Append(Json::Value* object, const std::string& key, double value);
void Append(Json::Value* object, const std::string& key, int value);
void Append(Json::Value* object, const std::string& key,
const std::string& value);
void Append(Json::Value* object, const std::string& key, uint32 value);
void Append(Json::Value* object, const std::string& key,
const Json::Value& value);
void Append(Json::Value* object,
const std::string& key,
const std::vector<Json::Value>& values);
}
#endif // TALK_APP_WEBRTC_WEBRTC_JSON_H_

View File

@ -1,81 +0,0 @@
/*
* libjingle
* Copyright 2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtcdevicemanager.h"
#include <vector>
using cricket::Device;
using cricket::DeviceManager;
const int WebRtcDeviceManager::kDefaultDeviceId = -1;
WebRtcDeviceManager::WebRtcDeviceManager()
: DeviceManager(),
default_device_(DeviceManager::kDefaultDeviceName, kDefaultDeviceId) {
}
WebRtcDeviceManager::~WebRtcDeviceManager() {
Terminate();
}
bool WebRtcDeviceManager::Init() {
return true;
}
void WebRtcDeviceManager::Terminate() {
}
bool WebRtcDeviceManager::GetAudioInputDevices(
std::vector<Device>* devs) {
return GetDefaultDevices(devs);
}
bool WebRtcDeviceManager::GetAudioOutputDevices(
std::vector<Device>* devs) {
return GetDefaultDevices(devs);
}
bool WebRtcDeviceManager::GetVideoCaptureDevices(
std::vector<Device>* devs) {
return GetDefaultDevices(devs);
}
bool WebRtcDeviceManager::GetDefaultVideoCaptureDevice(
Device* device) {
*device = default_device_;
return true;
}
bool WebRtcDeviceManager::GetDefaultDevices(
std::vector<cricket::Device>* devs) {
if (!devs)
return false;
devs->clear();
devs->push_back(default_device_);
return true;
}

View File

@ -1,53 +0,0 @@
/*
* libjingle
* Copyright 2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTCDEVICEMANAGER_H_
#define TALK_APP_WEBRTC_WEBRTCDEVICEMANAGER_H_
#include <vector>
#include "talk/session/phone/devicemanager.h"
class WebRtcDeviceManager : public cricket::DeviceManager {
public:
WebRtcDeviceManager();
~WebRtcDeviceManager();
virtual bool Init();
virtual void Terminate();
virtual bool GetAudioInputDevices(std::vector<cricket::Device>* devs);
virtual bool GetAudioOutputDevices(std::vector<cricket::Device>* devs);
virtual bool GetVideoCaptureDevices(std::vector<cricket::Device>* devs);
virtual bool GetDefaultVideoCaptureDevice(cricket::Device* device);
private:
static const int kDefaultDeviceId;
bool GetDefaultDevices(std::vector<cricket::Device>* devs);
cricket::Device default_device_;
};
#endif // TALK_APP_WEBRTC_WEBRTCDEVICEMANAGER_H_

View File

@ -1,143 +0,0 @@
/*
* libjingle
* Copyright 2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtcmediaengine.h"
#include <vector>
#include "talk/session/phone/webrtcvoiceengine.h"
#include "talk/session/phone/webrtcvideoengine.h"
WebRtcMediaEngine::WebRtcMediaEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc, webrtc::VideoCaptureModule* vcm)
: voice_(new cricket::WebRtcVoiceEngine(adm, adm_sc)),
video_(new cricket::WebRtcVideoEngine(voice_.get(), vcm)) {
}
WebRtcMediaEngine::~WebRtcMediaEngine() {
}
bool WebRtcMediaEngine::Init() {
if (!voice_->Init())
return false;
if (!video_->Init()) {
voice_->Terminate();
return false;
}
SignalVideoCaptureResult.repeat(video_->SignalCaptureResult);
return true;
}
void WebRtcMediaEngine::Terminate() {
video_->Terminate();
voice_->Terminate();
}
int WebRtcMediaEngine::GetCapabilities() {
return (voice_->GetCapabilities() | video_->GetCapabilities());
}
cricket::VoiceMediaChannel* WebRtcMediaEngine::CreateChannel() {
return voice_->CreateChannel();
}
cricket::VideoMediaChannel* WebRtcMediaEngine::CreateVideoChannel(
cricket::VoiceMediaChannel* channel) {
return video_->CreateChannel(channel);
}
cricket::SoundclipMedia* WebRtcMediaEngine::CreateSoundclip() {
return voice_->CreateSoundclip();
}
bool WebRtcMediaEngine::SetAudioOptions(int o) {
return voice_->SetOptions(o);
}
bool WebRtcMediaEngine::SetVideoOptions(int o) {
return video_->SetOptions(o);
}
bool WebRtcMediaEngine::SetDefaultVideoEncoderConfig(
const cricket::VideoEncoderConfig& config) {
return video_->SetDefaultEncoderConfig(config);
}
bool WebRtcMediaEngine::SetSoundDevices(const cricket::Device* in_device,
const cricket::Device* out_device) {
return voice_->SetDevices(in_device, out_device);
}
bool WebRtcMediaEngine::SetVideoCaptureDevice(
const cricket::Device* cam_device) {
return video_->SetCaptureDevice(cam_device);
}
bool WebRtcMediaEngine::GetOutputVolume(int* level) {
return voice_->GetOutputVolume(level);
}
bool WebRtcMediaEngine::SetOutputVolume(int level) {
return voice_->SetOutputVolume(level);
}
int WebRtcMediaEngine::GetInputLevel() {
return voice_->GetInputLevel();
}
bool WebRtcMediaEngine::SetLocalMonitor(bool enable) {
return voice_->SetLocalMonitor(enable);
}
bool WebRtcMediaEngine::SetLocalRenderer(cricket::VideoRenderer* renderer) {
return video_->SetLocalRenderer(renderer);
}
cricket::CaptureResult WebRtcMediaEngine::SetVideoCapture(bool capture) {
return video_->SetCapture(capture);
}
const std::vector<cricket::AudioCodec>& WebRtcMediaEngine::audio_codecs() {
return voice_->codecs();
}
const std::vector<cricket::VideoCodec>& WebRtcMediaEngine::video_codecs() {
return video_->codecs();
}
void WebRtcMediaEngine::SetVoiceLogging(int min_sev, const char* filter) {
return voice_->SetLogging(min_sev, filter);
}
void WebRtcMediaEngine::SetVideoLogging(int min_sev, const char* filter) {
return video_->SetLogging(min_sev, filter);
}
bool WebRtcMediaEngine::SetVideoCaptureModule(
webrtc::VideoCaptureModule* vcm) {
return video_->SetCaptureModule(vcm);
}

View File

@ -1,89 +0,0 @@
/*
* libjingle
* Copyright 2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTCMEDIAENGINE_H_
#define TALK_APP_WEBRTC_WEBRTCMEDIAENGINE_H_
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/mediaengine.h"
namespace cricket {
class WebRtcVideoEngine;
class WebRtcVoiceEngine;
}
namespace webrtc {
class AudioDeviceModule;
class VideoCaptureModule;
}
// TODO(ronghuawu): chromium doesn't need to know the
// detail about the cricket::MediaEngine interface.
class WebRtcMediaEngine : public cricket::MediaEngine {
public:
WebRtcMediaEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc, webrtc::VideoCaptureModule* vcm);
virtual ~WebRtcMediaEngine();
virtual bool Init();
virtual void Terminate();
virtual int GetCapabilities();
virtual cricket::VoiceMediaChannel *CreateChannel();
virtual cricket::VideoMediaChannel *CreateVideoChannel(
cricket::VoiceMediaChannel* channel);
virtual cricket::SoundclipMedia *CreateSoundclip();
virtual bool SetAudioOptions(int o);
virtual bool SetVideoOptions(int o);
virtual bool SetDefaultVideoEncoderConfig(
const cricket::VideoEncoderConfig& config);
virtual bool SetSoundDevices(const cricket::Device* in_device,
const cricket::Device* out_device);
virtual bool SetVideoCaptureDevice(const cricket::Device* cam_device);
virtual bool GetOutputVolume(int* level);
virtual bool SetOutputVolume(int level);
virtual int GetInputLevel();
virtual bool SetLocalMonitor(bool enable);
virtual bool SetLocalRenderer(cricket::VideoRenderer* renderer);
virtual cricket::CaptureResult SetVideoCapture(bool capture);
virtual const std::vector<cricket::AudioCodec>& audio_codecs();
virtual const std::vector<cricket::VideoCodec>& video_codecs();
virtual void SetVoiceLogging(int min_sev, const char* filter);
virtual void SetVideoLogging(int min_sev, const char* filter);
// Allow the VCM be set later if not ready during the construction time
bool SetVideoCaptureModule(webrtc::VideoCaptureModule* vcm);
protected:
WebRtcMediaEngine();
talk_base::scoped_ptr<cricket::WebRtcVoiceEngine> voice_;
talk_base::scoped_ptr<cricket::WebRtcVideoEngine> video_;
};
#endif // TALK_APP_WEBRTC_WEBRTCMEDIAENGINE_H_

View File

@ -1,600 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtcsession.h"
#include <string>
#include <vector>
#include "talk/base/common.h"
#include "talk/base/json.h"
#include "talk/base/scoped_ptr.h"
#include "talk/p2p/base/constants.h"
#include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/base/p2ptransport.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/voicechannel.h"
namespace webrtc {
enum {
MSG_CANDIDATE_TIMEOUT = 101,
MSG_WEBRTC_CREATE_TRANSPORT,
MSG_WEBRTC_DELETE_TRANSPORT,
};
static const int kAudioMonitorPollFrequency = 100;
static const int kMonitorPollFrequency = 1000;
// We allow 30 seconds to establish a connection; beyond that we consider
// it an error
static const int kCallSetupTimeout = 30 * 1000;
// A loss of connectivity is probably due to the Internet connection going
// down, and it might take a while to come back on wireless networks, so we
// use a longer timeout for that.
static const int kCallLostTimeout = 60 * 1000;
static const char kVideoStream[] = "video_rtp";
static const char kAudioStream[] = "rtp";
WebRtcSession::WebRtcSession(
const std::string& id,
bool incoming,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr,
talk_base::Thread* signaling_thread)
: BaseSession(signaling_thread),
transport_(NULL),
channel_manager_(channelmgr),
transports_writable_(false),
muted_(false),
camera_muted_(false),
setup_timeout_(kCallSetupTimeout),
signaling_thread_(signaling_thread),
id_(id),
incoming_(incoming),
port_allocator_(allocator) {
BaseSession::sid_ = id;
}
WebRtcSession::~WebRtcSession() {
RemoveAllStreams();
if (state_ != STATE_RECEIVEDTERMINATE) {
Terminate();
}
signaling_thread_->Send(this, MSG_WEBRTC_DELETE_TRANSPORT, NULL);
}
bool WebRtcSession::Initiate() {
if (signaling_thread_ == NULL)
return false;
signaling_thread_->Send(this, MSG_WEBRTC_CREATE_TRANSPORT, NULL);
if (transport_ == NULL)
return false;
transport_->set_allow_local_ips(true);
// start transports
transport_->SignalRequestSignaling.connect(
this, &WebRtcSession::OnRequestSignaling);
transport_->SignalCandidatesReady.connect(
this, &WebRtcSession::OnCandidatesReady);
transport_->SignalWritableState.connect(
this, &WebRtcSession::OnWritableState);
// Limit the amount of time that setting up a call may take.
StartTransportTimeout(kCallSetupTimeout);
return true;
}
cricket::Transport* WebRtcSession::CreateTransport() {
ASSERT(signaling_thread()->IsCurrent());
return new cricket::P2PTransport(
talk_base::Thread::Current(),
channel_manager_->worker_thread(), port_allocator());
}
bool WebRtcSession::CreateVoiceChannel(const std::string& stream_id) {
StreamInfo* stream_info = new StreamInfo(stream_id);
stream_info->video = false;
streams_.push_back(stream_info);
// RTCP disabled
cricket::VoiceChannel* voice_channel =
channel_manager_->CreateVoiceChannel(this, stream_id, true);
ASSERT(voice_channel != NULL);
stream_info->channel = voice_channel;
return true;
}
bool WebRtcSession::CreateVideoChannel(const std::string& stream_id) {
StreamInfo* stream_info = new StreamInfo(stream_id);
stream_info->video = true;
streams_.push_back(stream_info);
// RTCP disabled
cricket::VideoChannel* video_channel =
channel_manager_->CreateVideoChannel(this, stream_id, true, NULL);
ASSERT(video_channel != NULL);
stream_info->channel = video_channel;
return true;
}
cricket::TransportChannel* WebRtcSession::CreateChannel(
const std::string& content_name,
const std::string& name) {
if (!transport_) {
return NULL;
}
std::string type;
if (content_name.compare(kVideoStream) == 0) {
type = cricket::NS_GINGLE_VIDEO;
} else {
type = cricket::NS_GINGLE_AUDIO;
}
cricket::TransportChannel* transport_channel =
transport_->CreateChannel(name, type);
ASSERT(transport_channel != NULL);
return transport_channel;
}
cricket::TransportChannel* WebRtcSession::GetChannel(
const std::string& content_name, const std::string& name) {
if (!transport_)
return NULL;
return transport_->GetChannel(name);
}
void WebRtcSession::DestroyChannel(
const std::string& content_name, const std::string& name) {
if (!transport_)
return;
transport_->DestroyChannel(name);
}
void WebRtcSession::OnMessage(talk_base::Message* message) {
switch (message->message_id) {
case MSG_CANDIDATE_TIMEOUT:
if (transport_->writable()) {
// This should never happen: The timout triggered even
// though a call was successfully set up.
ASSERT(false);
}
SignalFailedCall();
break;
case MSG_WEBRTC_CREATE_TRANSPORT:
transport_ = CreateTransport();
break;
case MSG_WEBRTC_DELETE_TRANSPORT:
if (transport_) {
delete transport_;
transport_ = NULL;
}
break;
default:
cricket::BaseSession::OnMessage(message);
break;
}
}
bool WebRtcSession::Connect() {
if (streams_.empty()) {
// nothing to initiate
return false;
}
// lets connect all the transport channels created before for this session
transport_->ConnectChannels();
// create an offer now. This is to call SetState
// Actual offer will be send when OnCandidatesReady callback received
cricket::SessionDescription* offer = CreateOffer();
set_local_description(offer);
SetState((incoming()) ? STATE_SENTACCEPT : STATE_SENTINITIATE);
// Enable all the channels
EnableAllStreams();
SetVideoCapture(true);
return true;
}
bool WebRtcSession::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
bool ret = false;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* stream_info = (*iter);
if (stream_info->stream_id.compare(stream_id) == 0) {
ASSERT(stream_info->channel != NULL);
ASSERT(stream_info->video);
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*>(
stream_info->channel);
ret = channel->SetRenderer(0, renderer);
break;
}
}
return ret;
}
bool WebRtcSession::SetVideoCapture(bool capture) {
channel_manager_->SetVideoCapture(capture);
return true;
}
bool WebRtcSession::RemoveStream(const std::string& stream_id) {
bool ret = false;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (sinfo->stream_id.compare(stream_id) == 0) {
if (!sinfo->video) {
cricket::VoiceChannel* channel = static_cast<cricket::VoiceChannel*> (
sinfo->channel);
channel->Enable(false);
channel_manager_->DestroyVoiceChannel(channel);
} else {
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*> (
sinfo->channel);
channel->Enable(false);
channel_manager_->DestroyVideoChannel(channel);
}
// channel and transport will be deleted in
// DestroyVoiceChannel/DestroyVideoChannel
streams_.erase(iter);
ret = true;
break;
}
}
if (!ret) {
LOG(LERROR) << "No streams found for stream id " << stream_id;
// TODO(ronghuawu): trigger onError callback
}
return ret;
}
void WebRtcSession::EnableAllStreams() {
StreamMap::const_iterator i;
for (i = streams_.begin(); i != streams_.end(); ++i) {
cricket::BaseChannel* channel = (*i)->channel;
if (channel)
channel->Enable(true);
}
}
void WebRtcSession::RemoveAllStreams() {
// signaling_thread_->Post(this, MSG_RTC_REMOVEALLSTREAMS);
// First build a list of streams to remove and then remove them.
// The reason we do this is that if we remove the streams inside the
// loop, a stream might get removed while we're enumerating and the iterator
// will become invalid (and we crash).
// streams_ entry will be removed from ChannelManager callback method
// DestroyChannel
std::vector<std::string> streams_to_remove;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter)
streams_to_remove.push_back((*iter)->stream_id);
for (std::vector<std::string>::iterator i = streams_to_remove.begin();
i != streams_to_remove.end(); ++i) {
RemoveStream(*i);
}
}
bool WebRtcSession::HasStream(const std::string& stream_id) const {
StreamMap::const_iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (stream_id.compare(sinfo->stream_id) == 0) {
return true;
}
}
return false;
}
bool WebRtcSession::HasStream(bool video) const {
StreamMap::const_iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (sinfo->video == video) {
return true;
}
}
return false;
}
bool WebRtcSession::HasAudioStream() const {
return HasStream(false);
}
bool WebRtcSession::HasVideoStream() const {
return HasStream(true);
}
talk_base::Thread* WebRtcSession::worker_thread() {
return channel_manager_->worker_thread();
}
void WebRtcSession::OnRequestSignaling(cricket::Transport* transport) {
transport->OnSignalingReady();
}
void WebRtcSession::OnWritableState(cricket::Transport* transport) {
ASSERT(transport == transport_);
const bool transports_writable = transport_->writable();
if (transports_writable) {
if (transports_writable != transports_writable_) {
signaling_thread_->Clear(this, MSG_CANDIDATE_TIMEOUT);
} else {
// At one point all channels were writable and we had full connectivity,
// but then we lost it. Start the timeout again to kill the call if it
// doesn't come back.
StartTransportTimeout(kCallLostTimeout);
}
transports_writable_ = transports_writable;
}
NotifyTransportState();
return;
}
void WebRtcSession::StartTransportTimeout(int timeout) {
talk_base::Thread::Current()->PostDelayed(timeout, this,
MSG_CANDIDATE_TIMEOUT,
NULL);
}
void WebRtcSession::NotifyTransportState() {
}
bool WebRtcSession::OnInitiateMessage(
cricket::SessionDescription* offer,
const std::vector<cricket::Candidate>& candidates) {
if (!offer) {
LOG(LERROR) << "No SessionDescription from peer";
return false;
}
talk_base::scoped_ptr<cricket::SessionDescription> answer;
answer.reset(CreateAnswer(offer));
const cricket::ContentInfo* audio_content = GetFirstAudioContent(
answer.get());
const cricket::ContentInfo* video_content = GetFirstVideoContent(
answer.get());
if (!audio_content && !video_content) {
return false;
}
bool ret = true;
if (audio_content) {
ret = !HasAudioStream() &&
CreateVoiceChannel(audio_content->name);
if (!ret) {
LOG(LERROR) << "Failed to create voice channel for "
<< audio_content->name;
return false;
}
}
if (video_content) {
ret = !HasVideoStream() &&
CreateVideoChannel(video_content->name);
if (!ret) {
LOG(LERROR) << "Failed to create video channel for "
<< video_content->name;
return false;
}
}
// Provide remote candidates to the transport
transport_->OnRemoteCandidates(candidates);
set_remote_description(offer);
SetState(STATE_RECEIVEDINITIATE);
transport_->ConnectChannels();
EnableAllStreams();
set_local_description(answer.release());
SetState(STATE_SENTACCEPT);
// AddStream called only once with Video label
if (video_content) {
SignalAddStream(video_content->name, true);
} else {
SignalAddStream(audio_content->name, false);
}
return true;
}
bool WebRtcSession::OnRemoteDescription(
cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
if (state() == STATE_SENTACCEPT ||
state() == STATE_RECEIVEDACCEPT ||
state() == STATE_INPROGRESS) {
transport_->OnRemoteCandidates(candidates);
return true;
}
// Session description is always accepted.
set_remote_description(desc);
SetState(STATE_RECEIVEDACCEPT);
// Will trigger OnWritableState() if successful.
transport_->OnRemoteCandidates(candidates);
if (!incoming()) {
// Trigger OnAddStream callback at the initiator
const cricket::ContentInfo* video_content = GetFirstVideoContent(desc);
if (video_content && !SendSignalAddStream(true)) {
LOG(LERROR) << "failed to find video stream in map";
ASSERT(false);
} else {
const cricket::ContentInfo* audio_content = GetFirstAudioContent(desc);
if (audio_content && !SendSignalAddStream(false)) {
LOG(LERROR) << "failed to find audio stream in map";
ASSERT(false);
}
}
}
return true;
}
bool WebRtcSession::SendSignalAddStream(bool video) {
StreamMap::const_iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (sinfo->video == video) {
SignalAddStream(sinfo->stream_id, video);
return true;
}
}
return false;
}
cricket::SessionDescription* WebRtcSession::CreateOffer() {
cricket::SessionDescription* offer = new cricket::SessionDescription();
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if ((*iter)->video) {
// add video codecs, if there is video stream added
cricket::VideoContentDescription* video =
new cricket::VideoContentDescription();
std::vector<cricket::VideoCodec> video_codecs;
channel_manager_->GetSupportedVideoCodecs(&video_codecs);
for (VideoCodecs::const_iterator codec = video_codecs.begin();
codec != video_codecs.end(); ++codec) {
video->AddCodec(*codec);
}
// enabling RTCP mux by default at both ends, without
// exchanging it through signaling message.
video->set_rtcp_mux(true);
video->SortCodecs();
offer->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, video);
} else {
cricket::AudioContentDescription* audio =
new cricket::AudioContentDescription();
std::vector<cricket::AudioCodec> audio_codecs;
channel_manager_->GetSupportedAudioCodecs(&audio_codecs);
for (AudioCodecs::const_iterator codec = audio_codecs.begin();
codec != audio_codecs.end(); ++codec) {
audio->AddCodec(*codec);
}
// enabling RTCP mux by default at both ends, without
// exchanging it through signaling message.
audio->set_rtcp_mux(true);
audio->SortCodecs();
offer->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, audio);
}
}
return offer;
}
cricket::SessionDescription* WebRtcSession::CreateAnswer(
const cricket::SessionDescription* offer) {
cricket::SessionDescription* answer = new cricket::SessionDescription();
const cricket::ContentInfo* audio_content = GetFirstAudioContent(offer);
if (audio_content) {
const cricket::AudioContentDescription* audio_offer =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
cricket::AudioContentDescription* audio_accept =
new cricket::AudioContentDescription();
AudioCodecs audio_codecs;
channel_manager_->GetSupportedAudioCodecs(&audio_codecs);
for (AudioCodecs::const_iterator ours = audio_codecs.begin();
ours != audio_codecs.end(); ++ours) {
for (AudioCodecs::const_iterator theirs = audio_offer->codecs().begin();
theirs != audio_offer->codecs().end(); ++theirs) {
if (ours->Matches(*theirs)) {
cricket::AudioCodec negotiated(*ours);
negotiated.id = theirs->id;
audio_accept->AddCodec(negotiated);
}
}
}
// RTCP mux is set based on what present in incoming offer
audio_accept->set_rtcp_mux(audio_offer->rtcp_mux());
audio_accept->SortCodecs();
answer->AddContent(audio_content->name, audio_content->type, audio_accept);
}
const cricket::ContentInfo* video_content = GetFirstVideoContent(offer);
if (video_content) {
const cricket::VideoContentDescription* video_offer =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
cricket::VideoContentDescription* video_accept =
new cricket::VideoContentDescription();
VideoCodecs video_codecs;
channel_manager_->GetSupportedVideoCodecs(&video_codecs);
for (VideoCodecs::const_iterator ours = video_codecs.begin();
ours != video_codecs.end(); ++ours) {
for (VideoCodecs::const_iterator theirs = video_offer->codecs().begin();
theirs != video_offer->codecs().end(); ++theirs) {
if (ours->Matches(*theirs)) {
cricket::VideoCodec negotiated(*ours);
negotiated.id = theirs->id;
video_accept->AddCodec(negotiated);
}
}
}
// RTCP mux is set based on what present in incoming offer
video_accept->set_rtcp_mux(video_offer->rtcp_mux());
video_accept->SortCodecs();
answer->AddContent(video_content->name, video_content->type, video_accept);
}
return answer;
}
void WebRtcSession::SetError(Error error) {
BaseSession::SetError(error);
}
void WebRtcSession::OnCandidatesReady(
cricket::Transport* transport,
const std::vector<cricket::Candidate>& candidates) {
std::vector<cricket::Candidate>::const_iterator iter;
for (iter = candidates.begin(); iter != candidates.end(); ++iter) {
local_candidates_.push_back(*iter);
}
SignalLocalDescription(local_description(), candidates);
}
} /* namespace webrtc */

View File

@ -1,222 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTCSESSION_H_
#define TALK_APP_WEBRTC_WEBRTCSESSION_H_
#include <map>
#include <string>
#include <vector>
#include "talk/base/logging.h"
#include "talk/base/messagehandler.h"
#include "talk/p2p/base/candidate.h"
#include "talk/p2p/base/session.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediachannel.h"
namespace cricket {
class ChannelManager;
class Transport;
class TransportChannel;
class VoiceChannel;
class VideoChannel;
struct ConnectionInfo;
}
namespace Json {
class Value;
}
namespace webrtc {
typedef std::vector<cricket::AudioCodec> AudioCodecs;
typedef std::vector<cricket::VideoCodec> VideoCodecs;
class WebRtcSession : public cricket::BaseSession {
public:
WebRtcSession(const std::string& id,
bool incoming,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr,
talk_base::Thread* signaling_thread);
~WebRtcSession();
bool Initiate();
bool Connect();
bool OnRemoteDescription(cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates);
bool OnInitiateMessage(cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates);
bool CreateVoiceChannel(const std::string& stream_id);
bool CreateVideoChannel(const std::string& stream_id);
bool RemoveStream(const std::string& stream_id);
void RemoveAllStreams();
// Returns true if we have either a voice or video stream matching this label.
bool HasStream(const std::string& label) const;
bool HasStream(bool video) const;
// Returns true if there's one or more audio channels in the session.
bool HasAudioStream() const;
// Returns true if there's one or more video channels in the session.
bool HasVideoStream() const;
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
// This signal occurs when all the streams have been removed.
// It is triggered by a successful call to the RemoveAllStream or
// the OnRemoteDescription with stream deleted signaling message with the
// candidates port equal to 0.
sigslot::signal1<WebRtcSession*> SignalRemoveStreamMessage;
// This signal indicates a stream has been added properly.
// It is triggered by a successful call to the OnInitiateMessage or
// the OnRemoteDescription and if it's going to the STATE_RECEIVEDACCEPT.
sigslot::signal2<const std::string&, bool> SignalAddStream;
// This signal occurs when one stream is removed with the signaling
// message from the remote peer with the candidates port equal to 0.
sigslot::signal2<const std::string&, bool> SignalRemoveStream;
// This signal occurs when the local candidate is ready
sigslot::signal2<const cricket::SessionDescription*,
const std::vector<cricket::Candidate>&> SignalLocalDescription;
// This signal triggers when setting up or resuming a call has not been
// successful before a certain time out.
sigslot::signal0<> SignalFailedCall;
bool muted() const { return muted_; }
bool camera_muted() const { return camera_muted_; }
const std::vector<cricket::Candidate>& local_candidates() {
return local_candidates_;
}
const std::string& id() const { return id_; }
void set_incoming(bool incoming) { incoming_ = incoming; }
bool incoming() const { return incoming_; }
cricket::PortAllocator* port_allocator() const { return port_allocator_; }
talk_base::Thread* signaling_thread() const { return signaling_thread_; }
protected:
// methods from cricket::BaseSession
virtual void SetError(cricket::BaseSession::Error error);
virtual cricket::TransportChannel* CreateChannel(
const std::string& content_name, const std::string& name);
virtual cricket::TransportChannel* GetChannel(
const std::string& content_name, const std::string& name);
virtual void DestroyChannel(
const std::string& content_name, const std::string& name);
private:
struct StreamInfo {
explicit StreamInfo(const std::string stream_id)
: channel(NULL),
video(false),
stream_id(stream_id) {}
StreamInfo()
: channel(NULL),
video(false) {}
cricket::BaseChannel* channel;
bool video;
std::string stream_id;
};
// Not really a map (vector).
typedef std::vector<StreamInfo*> StreamMap;
// Dummy functions inherited from cricket::BaseSession.
// They should never be called.
virtual bool Accept(const cricket::SessionDescription* sdesc) {
return true;
}
virtual bool Reject(const std::string& reason) {
return true;
}
virtual bool TerminateWithReason(const std::string& reason) {
return true;
}
virtual talk_base::Thread* worker_thread();
// methods signaled by the transport
void OnRequestSignaling(cricket::Transport* transport);
void OnCandidatesReady(cricket::Transport* transport,
const std::vector<cricket::Candidate>& candidates);
void OnWritableState(cricket::Transport* transport);
void OnTransportError(cricket::Transport* transport);
void OnChannelGone(cricket::Transport* transport);
bool CheckForStreamDeleteMessage(
const std::vector<cricket::Candidate>& candidates);
void ProcessTerminateAccept(cricket::SessionDescription* desc);
void UpdateTransportWritableState();
bool CheckAllTransportsWritable();
void StartTransportTimeout(int timeout);
void NotifyTransportState();
cricket::SessionDescription* CreateOffer();
cricket::SessionDescription* CreateAnswer(
const cricket::SessionDescription* answer);
// from MessageHandler
virtual void OnMessage(talk_base::Message* message);
virtual cricket::Transport* CreateTransport();
cricket::Transport* GetTransport();
typedef std::map<std::string, cricket::TransportChannel*> TransportChannelMap;
bool SetVideoCapture(bool capture);
void EnableAllStreams();
bool SendSignalAddStream(bool video);
cricket::Transport* transport_;
cricket::ChannelManager* channel_manager_;
std::vector<StreamInfo*> streams_;
TransportChannelMap transport_channels_;
bool transports_writable_;
bool muted_;
bool camera_muted_;
int setup_timeout_;
std::vector<cricket::Candidate> local_candidates_;
talk_base::Thread* signaling_thread_;
std::string id_;
bool incoming_;
cricket::PortAllocator* port_allocator_;
static const char kIncomingDirection[];
static const char kOutgoingDirection[];
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_WEBRTCSESSION_H_

View File

@ -1,605 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <list>
#include "base/gunit.h"
#include "base/helpers.h"
#include "talk/app/webrtc/webrtcsession.h"
#include "talk/base/fakenetwork.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/thread.h"
#include "talk/p2p/base/portallocator.h"
#include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/client/fakeportallocator.h"
#include "talk/session/phone/fakesession.h"
#include "talk/session/phone/mediasessionclient.h"
namespace {
cricket::VideoContentDescription* CopyVideoContentDescription(
const cricket::VideoContentDescription* video_description) {
cricket::VideoContentDescription* new_video_description =
new cricket::VideoContentDescription();
cricket::VideoCodecs::const_iterator iter =
video_description->codecs().begin();
for (; iter != video_description->codecs().end(); iter++) {
new_video_description->AddCodec(*iter);
}
new_video_description->SortCodecs();
return new_video_description;
}
cricket::AudioContentDescription* CopyAudioContentDescription(
const cricket::AudioContentDescription* audio_description) {
cricket::AudioContentDescription* new_audio_description =
new cricket::AudioContentDescription();
cricket::AudioCodecs::const_iterator iter =
audio_description->codecs().begin();
for (; iter != audio_description->codecs().end(); iter++) {
new_audio_description->AddCodec(*iter);
}
new_audio_description->SortCodecs();
return new_audio_description;
}
const cricket::ContentDescription* CopyContentDescription(
const cricket::ContentDescription* original) {
const cricket::MediaContentDescription* media =
static_cast<const cricket::MediaContentDescription*>(original);
const cricket::ContentDescription* new_content_description = NULL;
if (media->type() == cricket::MEDIA_TYPE_VIDEO) {
const cricket::VideoContentDescription* video_description =
static_cast<const cricket::VideoContentDescription*>(original);
new_content_description = static_cast<const cricket::ContentDescription*>
(CopyVideoContentDescription(video_description));
} else if (media->type() == cricket::MEDIA_TYPE_AUDIO) {
const cricket::AudioContentDescription* audio_description =
static_cast<const cricket::AudioContentDescription*>(original);
new_content_description = static_cast<const cricket::ContentDescription*>
(CopyAudioContentDescription(audio_description));
} else {
return NULL;
}
return new_content_description;
}
cricket::ContentInfos CopyContentInfos(const cricket::ContentInfos& original) {
cricket::ContentInfos new_content_infos;
for (cricket::ContentInfos::const_iterator iter = original.begin();
iter != original.end(); iter++) {
cricket::ContentInfo info;
info.name = (*iter).name;
info.type = (*iter).type;
info.description = CopyContentDescription((*iter).description);
new_content_infos.push_back(info);
}
return new_content_infos;
}
cricket::SessionDescription* CopySessionDescription(
const cricket::SessionDescription* original) {
const cricket::ContentInfos& content_infos = original->contents();
cricket::ContentInfos new_content_infos = CopyContentInfos(content_infos);
return new cricket::SessionDescription(new_content_infos);
}
cricket::SessionDescription* GenerateFakeSessionDescription(bool video) {
cricket::SessionDescription* fake_description = new cricket::SessionDescription();
const std::string name = video ? std::string(cricket::CN_VIDEO) :
std::string(cricket::CN_AUDIO);
cricket::ContentDescription* description = NULL;
if (video) {
cricket::VideoContentDescription* video_dsc =
new cricket::VideoContentDescription;
video_dsc->SortCodecs();
description = static_cast<cricket::ContentDescription*>(video_dsc);
} else {
cricket::AudioContentDescription* audio_dsc =
new cricket::AudioContentDescription();
audio_dsc->SortCodecs();
description = static_cast<cricket::ContentDescription*>(audio_dsc);
}
// Cannot fail.
fake_description->AddContent(name, cricket::NS_JINGLE_RTP, description);
return fake_description;
}
void GenerateFakeCandidate(std::vector<cricket::Candidate>* candidates,
bool video) {
// Next add a candidate.
// int port_index = 0;
std::string port_index_as_string("0");
cricket::Candidate candidate;
candidate.set_name("rtp");
candidate.set_protocol("udp");
talk_base::SocketAddress address("127.0.0.1", 1234);
candidate.set_address(address);
candidate.set_preference(1);
candidate.set_username("username" + port_index_as_string);
candidate.set_password(port_index_as_string);
candidate.set_type("local");
candidate.set_network_name("network");
candidate.set_generation(0);
candidates->push_back(candidate);
}
cricket::SessionDescription* GenerateFakeSession(
std::vector<cricket::Candidate>* candidates,
bool video) {
cricket::SessionDescription* fake_description =
GenerateFakeSessionDescription(video);
if (fake_description == NULL) {
return NULL;
}
GenerateFakeCandidate(candidates, video);
return fake_description;
}
} // namespace
class OnSignalImpl
: public sigslot::has_slots<> {
public:
enum CallbackId {
kNone,
kOnAddStream,
kOnRemoveStream,
kOnRtcMediaChannelCreated,
kOnLocalDescription,
kOnFailedCall,
};
OnSignalImpl()
: callback_ids_(),
last_stream_id_(""),
last_was_video_(false),
last_description_ptr_(NULL),
last_candidates_() {
}
virtual ~OnSignalImpl() {
delete last_description_ptr_;
last_description_ptr_ = NULL;
}
void OnAddStream(const std::string& stream_id, bool video) {
callback_ids_.push_back(kOnAddStream);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnRemoveStream(const std::string& stream_id, bool video) {
callback_ids_.push_back(kOnRemoveStream);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnRtcMediaChannelCreated(const std::string& stream_id,
bool video) {
callback_ids_.push_back(kOnRtcMediaChannelCreated);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
callback_ids_.push_back(kOnLocalDescription);
delete last_description_ptr_;
last_description_ptr_ = CopySessionDescription(desc);
last_candidates_.clear();
last_candidates_.insert(last_candidates_.end(), candidates.begin(),
candidates.end());
}
cricket::SessionDescription* GetLocalDescription(
std::vector<cricket::Candidate>* candidates) {
if (last_candidates_.empty()) {
return NULL;
}
if (last_description_ptr_ == NULL) {
return NULL;
}
candidates->insert(candidates->end(), last_candidates_.begin(),
last_candidates_.end());
return CopySessionDescription(last_description_ptr_);
}
void OnFailedCall() {
callback_ids_.push_back(kOnFailedCall);
}
CallbackId PopOldestCallback() {
if (callback_ids_.empty()) {
return kNone;
}
const CallbackId return_value = callback_ids_.front();
callback_ids_.pop_front();
return return_value;
}
CallbackId PeekOldestCallback() {
if (callback_ids_.empty()) {
return kNone;
}
const CallbackId return_value = callback_ids_.front();
return return_value;
}
void Reset() {
callback_ids_.clear();
last_stream_id_ = "";
last_was_video_ = false;
delete last_description_ptr_;
last_description_ptr_ = NULL;
last_candidates_.clear();
}
protected:
std::list<CallbackId> callback_ids_;
std::string last_stream_id_;
bool last_was_video_;
cricket::SessionDescription* last_description_ptr_;
std::vector<cricket::Candidate> last_candidates_;
};
class WebRtcSessionTest : public OnSignalImpl {
public:
static WebRtcSessionTest* CreateWebRtcSessionTest(bool receiving) {
WebRtcSessionTest* return_value =
new WebRtcSessionTest();
if (return_value == NULL) {
return NULL;
}
if (!return_value->Init(receiving)) {
delete return_value;
return NULL;
}
return return_value;
}
bool WaitForCallback(CallbackId id, int timeout_ms) {
bool success = false;
for (int ms = 0; ms < timeout_ms; ms++) {
const CallbackId peek_id = PeekOldestCallback();
if (peek_id == id) {
PopOldestCallback();
success = true;
break;
} else if (peek_id != kNone) {
success = false;
break;
}
talk_base::Thread::Current()->ProcessMessages(1);
}
return success;
}
bool Init(bool receiving) {
if (signaling_thread_ != NULL)
return false;
signaling_thread_ = talk_base::Thread::Current();
receiving_ = receiving;
if (worker_thread_!= NULL)
return false;
worker_thread_ = talk_base::Thread::Current();
cricket::FakePortAllocator* fake_port_allocator =
new cricket::FakePortAllocator(worker_thread_, NULL);
allocator_ = static_cast<cricket::PortAllocator*>(fake_port_allocator);
channel_manager_ = new cricket::ChannelManager(worker_thread_);
if (!channel_manager_->Init())
return false;
talk_base::CreateRandomString(8, &id_);
session_ = new webrtc::WebRtcSession(
id_, receiving_ , allocator_,
channel_manager_,
signaling_thread_);
session_->SignalAddStream.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnAddStream);
session_->SignalRemoveStream.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnRemoveStream);
session_->SignalRtcMediaChannelCreated.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnRtcMediaChannelCreated);
session_->SignalLocalDescription.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnLocalDescription);
session_->SignalFailedCall.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnFailedCall);
return true;
}
void Terminate() {
delete session_;
session_ = NULL;
delete channel_manager_;
channel_manager_ = NULL;
delete allocator_;
allocator_ = NULL;
}
~WebRtcSessionTest() {
Terminate();
}
// All session APIs must be called from the signaling thread.
bool CallInitiate() {
return session_->Initiate();
}
bool CallConnect() {
if (!session_->Connect())
return false;
// This callback does not happen with FakeTransport!
if (!WaitForCallback(kOnLocalDescription, 1000)) {
return false;
}
return true;
}
bool CallOnRemoteDescription(
cricket::SessionDescription* description,
std::vector<cricket::Candidate> candidates) {
if (!session_->OnRemoteDescription(description, candidates)) {
return false;
}
if (!WaitForCallback(kOnAddStream, 1000)) {
return false;
}
return true;
}
bool CallOnInitiateMessage(
cricket::SessionDescription* description,
const std::vector<cricket::Candidate>& candidates) {
if (!session_->OnInitiateMessage(description, candidates)) {
return false;
}
if (!WaitForCallback(kOnAddStream, 1000)) {
return false;
}
return true;
}
bool CallCreateVoiceChannel(const std::string& stream_id) {
if (!session_->CreateVoiceChannel(stream_id)) {
return false;
}
if (!WaitForCallback(kOnRtcMediaChannelCreated, 1000)) {
return false;
}
return true;
}
bool CallCreateVideoChannel(const std::string& stream_id) {
if (!session_->CreateVideoChannel(stream_id)) {
return false;
}
if (!WaitForCallback(kOnRtcMediaChannelCreated, 1000)) {
return false;
}
return true;
}
bool CallRemoveStream(const std::string& stream_id) {
return session_->RemoveStream(stream_id);
}
void CallRemoveAllStreams() {
session_->RemoveAllStreams();
}
bool CallHasStream(const std::string& label) {
return session_->HasStream(label);
}
bool CallHasStream(bool video) {
return session_->HasStream(video);
}
bool CallHasAudioStream() {
return session_->HasAudioStream();
}
bool CallHasVideoStream() {
return session_->HasVideoStream();
}
bool CallSetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
return session_->SetVideoRenderer(stream_id, renderer);
}
const std::vector<cricket::Candidate>& CallLocalCandidates() {
return session_->local_candidates();
}
private:
WebRtcSessionTest()
: session_(NULL),
id_(),
receiving_(false),
allocator_(NULL),
channel_manager_(NULL),
worker_thread_(NULL),
signaling_thread_(NULL) {
}
webrtc::WebRtcSession* session_;
std::string id_;
bool receiving_;
cricket::PortAllocator* allocator_;
cricket::ChannelManager* channel_manager_;
talk_base::Thread* worker_thread_;
talk_base::Thread* signaling_thread_;
};
bool CallbackReceived(WebRtcSessionTest* session, int timeout) {
talk_base::Thread::SleepMs(timeout);
const OnSignalImpl::CallbackId peek_id =
session->PeekOldestCallback();
return peek_id != OnSignalImpl::kNone;
}
void SleepMs(int timeout_ms) {
talk_base::Thread::SleepMs(timeout_ms);
}
TEST(WebRtcSessionTest, InitializationReceiveSanity) {
const bool kReceiving = true;
talk_base::scoped_ptr<WebRtcSessionTest> my_session;
my_session.reset(WebRtcSessionTest::CreateWebRtcSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
// Should return false because no stream has been set up yet.
EXPECT_FALSE(my_session->CallConnect());
const bool kVideo = true;
EXPECT_FALSE(my_session->CallHasStream(kVideo));
EXPECT_FALSE(my_session->CallHasStream(!kVideo));
EXPECT_EQ(OnSignalImpl::kNone,
my_session->PopOldestCallback());
}
TEST(WebRtcSessionTest, AudioSendCallSetUp) {
const bool kReceiving = false;
talk_base::scoped_ptr<WebRtcSessionTest> my_session;
my_session.reset(WebRtcSessionTest::CreateWebRtcSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
ASSERT_TRUE(my_session->CallCreateVoiceChannel("Audio"));
ASSERT_TRUE(my_session->CallConnect());
std::vector<cricket::Candidate> candidates;
cricket::SessionDescription* local_session = my_session->GetLocalDescription(
&candidates);
ASSERT_FALSE(candidates.empty());
ASSERT_FALSE(local_session == NULL);
if (!my_session->CallOnRemoteDescription(local_session, candidates)) {
delete local_session;
FAIL();
}
// All callbacks should be caught by my_session. Assert it.
ASSERT_FALSE(CallbackReceived(my_session.get(), 1000));
ASSERT_TRUE(my_session->CallHasAudioStream() &&
!my_session->CallHasVideoStream());
}
TEST(WebRtcSessionTest, VideoSendCallSetUp) {
const bool kReceiving = false;
talk_base::scoped_ptr<WebRtcSessionTest> my_session;
my_session.reset(WebRtcSessionTest::CreateWebRtcSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
ASSERT_TRUE(my_session->CallCreateVideoChannel("Video"));
ASSERT_TRUE(my_session->CallConnect());
std::vector<cricket::Candidate> candidates;
cricket::SessionDescription* local_session = my_session->GetLocalDescription(
&candidates);
ASSERT_FALSE(candidates.empty());
ASSERT_FALSE(local_session == NULL);
if (!my_session->CallOnRemoteDescription(local_session, candidates)) {
delete local_session;
FAIL();
}
// All callbacks should be caught by my_session. Assert it.
ASSERT_FALSE(CallbackReceived(my_session.get(), 1000));
ASSERT_TRUE(!my_session->CallHasAudioStream() &&
my_session->CallHasVideoStream());
}
TEST(WebRtcSessionTest, AudioReceiveCallSetUp) {
const bool kReceiving = true;
const bool video = false;
talk_base::scoped_ptr<WebRtcSessionTest> my_session;
my_session.reset(WebRtcSessionTest::CreateWebRtcSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
std::vector<cricket::Candidate> candidates;
cricket::SessionDescription* local_session =
GenerateFakeSession(&candidates, video);
ASSERT_FALSE(candidates.empty());
ASSERT_FALSE(local_session == NULL);
if (!my_session->CallOnInitiateMessage(local_session, candidates)) {
delete local_session;
FAIL();
}
ASSERT_TRUE(my_session->CallConnect());
ASSERT_FALSE(CallbackReceived(my_session.get(), 1000));
ASSERT_TRUE(my_session->CallHasAudioStream() &&
!my_session->CallHasVideoStream());
}
TEST(WebRtcSessionTest, VideoReceiveCallSetUp) {
const bool kReceiving = true;
const bool video = true;
talk_base::scoped_ptr<WebRtcSessionTest> my_session;
my_session.reset(WebRtcSessionTest::CreateWebRtcSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
std::vector<cricket::Candidate> candidates;
cricket::SessionDescription* local_session =
GenerateFakeSession(&candidates, video);
ASSERT_FALSE(candidates.empty());
ASSERT_FALSE(local_session == NULL);
if (!my_session->CallOnInitiateMessage(local_session, candidates)) {
delete local_session;
FAIL();
}
ASSERT_TRUE(my_session->CallConnect());
ASSERT_FALSE(CallbackReceived(my_session.get(), 1000));
ASSERT_TRUE(!my_session->CallHasAudioStream() &&
my_session->CallHasVideoStream());
}

View File

@ -1,217 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/base/json.h"
#include <errno.h>
#include <climits>
#include <cstdlib>
#include <sstream>
bool GetStringFromJson(const Json::Value& in, std::string* out) {
if (!in.isString()) {
std::ostringstream s;
if (in.isBool()) {
s << std::boolalpha << in.asBool();
} else if (in.isInt()) {
s << in.asInt();
} else if (in.isUInt()) {
s << in.asUInt();
} else if (in.isDouble()) {
s << in.asDouble();
} else {
return false;
}
*out = s.str();
} else {
*out = in.asString();
}
return true;
}
bool GetIntFromJson(const Json::Value& in, int* out) {
bool ret;
if (!in.isString()) {
ret = in.isConvertibleTo(Json::intValue);
if (ret) {
*out = in.asInt();
}
} else {
long val; // NOLINT
const char* c_str = in.asCString();
char* end_ptr;
errno = 0;
val = strtol(c_str, &end_ptr, 10); // NOLINT
ret = (end_ptr != c_str && *end_ptr == '\0' && !errno &&
val >= INT_MIN && val <= INT_MAX);
*out = val;
}
return ret;
}
bool GetUIntFromJson(const Json::Value& in, unsigned int* out) {
bool ret;
if (!in.isString()) {
ret = in.isConvertibleTo(Json::uintValue);
if (ret) {
*out = in.asUInt();
}
} else {
unsigned long val; // NOLINT
const char* c_str = in.asCString();
char* end_ptr;
errno = 0;
val = strtoul(c_str, &end_ptr, 10); // NOLINT
ret = (end_ptr != c_str && *end_ptr == '\0' && !errno &&
val <= UINT_MAX);
*out = val;
}
return ret;
}
bool GetBoolFromJson(const Json::Value& in, bool* out) {
bool ret;
if (!in.isString()) {
ret = in.isConvertibleTo(Json::booleanValue);
if (ret) {
*out = in.asBool();
}
} else {
if (in.asString() == "true") {
*out = true;
ret = true;
} else if (in.asString() == "false") {
*out = false;
ret = true;
} else {
ret = false;
}
}
return ret;
}
bool GetValueFromJsonArray(const Json::Value& in, size_t n,
Json::Value* out) {
if (!in.isArray() || !in.isValidIndex(n)) {
return false;
}
*out = in[static_cast<unsigned int>(n)];
return true;
}
bool GetIntFromJsonArray(const Json::Value& in, size_t n,
int* out) {
Json::Value x;
return GetValueFromJsonArray(in, n, &x) && GetIntFromJson(x, out);
}
bool GetUIntFromJsonArray(const Json::Value& in, size_t n,
unsigned int* out) {
Json::Value x;
return GetValueFromJsonArray(in, n, &x) && GetUIntFromJson(x, out);
}
bool GetStringFromJsonArray(const Json::Value& in, size_t n,
std::string* out) {
Json::Value x;
return GetValueFromJsonArray(in, n, &x) && GetStringFromJson(x, out);
}
bool GetBoolFromJsonArray(const Json::Value& in, size_t n,
bool* out) {
Json::Value x;
return GetValueFromJsonArray(in, n, &x) && GetBoolFromJson(x, out);
}
bool GetValueFromJsonObject(const Json::Value& in, const std::string& k,
Json::Value* out) {
if (!in.isObject() || !in.isMember(k)) {
return false;
}
*out = in[k];
return true;
}
bool GetIntFromJsonObject(const Json::Value& in, const std::string& k,
int* out) {
Json::Value x;
return GetValueFromJsonObject(in, k, &x) && GetIntFromJson(x, out);
}
bool GetUIntFromJsonObject(const Json::Value& in, const std::string& k,
unsigned int* out) {
Json::Value x;
return GetValueFromJsonObject(in, k, &x) && GetUIntFromJson(x, out);
}
bool GetStringFromJsonObject(const Json::Value& in, const std::string& k,
std::string* out) {
Json::Value x;
return GetValueFromJsonObject(in, k, &x) && GetStringFromJson(x, out);
}
bool GetBoolFromJsonObject(const Json::Value& in, const std::string& k,
bool* out) {
Json::Value x;
return GetValueFromJsonObject(in, k, &x) && GetBoolFromJson(x, out);
}
Json::Value StringVectorToJsonValue(const std::vector<std::string>& strings) {
Json::Value result(Json::arrayValue);
for (size_t i = 0; i < strings.size(); ++i) {
result.append(Json::Value(strings[i]));
}
return result;
}
bool JsonValueToStringVector(const Json::Value& value,
std::vector<std::string> *strings) {
strings->clear();
if (!value.isArray()) {
return false;
}
for (unsigned int i = 0; i < value.size(); ++i) {
if (value[i].isString()) {
strings->push_back(value[i].asString());
} else {
return false;
}
}
return true;
}
std::string JsonValueToString(const Json::Value& json) {
Json::FastWriter w;
std::string value = w.write(json);
return value.substr(0, value.size() - 1); // trim trailing newline
}

View File

@ -1,80 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_BASE_JSON_H_
#define TALK_BASE_JSON_H_
#include <string>
#include <vector>
#include "json/json.h"
// TODO(juberti): Move to talk_base namespace
///////////////////////////////////////////////////////////////////////////////
// JSON Helpers
///////////////////////////////////////////////////////////////////////////////
// Robust conversion operators, better than the ones in JsonCpp.
bool GetIntFromJson(const Json::Value& in, int* out);
bool GetUIntFromJson(const Json::Value& in, unsigned int* out);
bool GetStringFromJson(const Json::Value& in, std::string* out);
bool GetBoolFromJson(const Json::Value& in, bool* out);
// Pull values out of a JSON array.
bool GetValueFromJsonArray(const Json::Value& in, size_t n,
Json::Value* out);
bool GetIntFromJsonArray(const Json::Value& in, size_t n,
int* out);
bool GetUIntFromJsonArray(const Json::Value& in, size_t n,
unsigned int* out);
bool GetStringFromJsonArray(const Json::Value& in, size_t n,
std::string* out);
bool GetBoolFromJsonArray(const Json::Value& in, size_t n,
bool* out);
// Pull values out of a JSON object.
bool GetValueFromJsonObject(const Json::Value& in, const std::string& k,
Json::Value* out);
bool GetIntFromJsonObject(const Json::Value& in, const std::string& k,
int* out);
bool GetUIntFromJsonObject(const Json::Value& in, const std::string& k,
unsigned int* out);
bool GetStringFromJsonObject(const Json::Value& in, const std::string& k,
std::string* out);
bool GetBoolFromJsonObject(const Json::Value& in, const std::string& k,
bool* out);
// Converts vectors of strings to/from JSON arrays.
Json::Value StringVectorToJsonValue(const std::vector<std::string>& strings);
bool JsonValueToStringVector(const Json::Value& value,
std::vector<std::string> *strings);
// Writes out a Json value as a string.
std::string JsonValueToString(const Json::Value& json);
#endif // TALK_BASE_JSON_H_

View File

@ -1,799 +0,0 @@
/*
* libjingle
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/channelmanager.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <algorithm>
#include "talk/base/common.h"
#include "talk/base/logging.h"
#include "talk/base/sigslotrepeater.h"
#include "talk/base/stringencode.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/soundclip.h"
namespace cricket {
enum {
MSG_CREATEVOICECHANNEL = 1,
MSG_DESTROYVOICECHANNEL = 2,
MSG_SETAUDIOOPTIONS = 3,
MSG_GETOUTPUTVOLUME = 4,
MSG_SETOUTPUTVOLUME = 5,
MSG_SETLOCALMONITOR = 6,
MSG_SETVOICELOGGING = 7,
MSG_CREATEVIDEOCHANNEL = 11,
MSG_DESTROYVIDEOCHANNEL = 12,
MSG_SETVIDEOOPTIONS = 13,
MSG_SETLOCALRENDERER = 14,
MSG_SETDEFAULTVIDEOENCODERCONFIG = 15,
MSG_SETVIDEOLOGGING = 16,
MSG_CREATESOUNDCLIP = 17,
MSG_DESTROYSOUNDCLIP = 18,
MSG_CAMERASTARTED = 19,
MSG_SETVIDEOCAPTURE = 20,
};
struct CreationParams : public talk_base::MessageData {
CreationParams(BaseSession* session, const std::string& content_name,
bool rtcp, VoiceChannel* voice_channel)
: session(session),
content_name(content_name),
rtcp(rtcp),
voice_channel(voice_channel),
video_channel(NULL) {}
BaseSession* session;
std::string content_name;
bool rtcp;
VoiceChannel* voice_channel;
VideoChannel* video_channel;
};
struct AudioOptions : public talk_base::MessageData {
AudioOptions(int o, const Device* in, const Device* out)
: options(o), in_device(in), out_device(out) {}
int options;
const Device* in_device;
const Device* out_device;
bool result;
};
struct VolumeLevel : public talk_base::MessageData {
VolumeLevel() : level(-1), result(false) {}
explicit VolumeLevel(int l) : level(l), result(false) {}
int level;
bool result;
};
struct VideoOptions : public talk_base::MessageData {
explicit VideoOptions(const Device* d) : cam_device(d), result(false) {}
const Device* cam_device;
bool result;
};
struct DefaultVideoEncoderConfig : public talk_base::MessageData {
explicit DefaultVideoEncoderConfig(const VideoEncoderConfig& c)
: config(c), result(false) {}
VideoEncoderConfig config;
bool result;
};
struct LocalMonitor : public talk_base::MessageData {
explicit LocalMonitor(bool e) : enable(e), result(false) {}
bool enable;
bool result;
};
struct LocalRenderer : public talk_base::MessageData {
explicit LocalRenderer(VideoRenderer* r) : renderer(r), result(false) {}
VideoRenderer* renderer;
bool result;
};
struct LoggingOptions : public talk_base::MessageData {
explicit LoggingOptions(int lev, const char* f) : level(lev), filter(f) {}
int level;
std::string filter;
};
struct CaptureParams : public talk_base::MessageData {
explicit CaptureParams(bool c) : capture(c), result(CR_FAILURE) {}
bool capture;
CaptureResult result;
};
ChannelManager::ChannelManager(talk_base::Thread* worker_thread)
: media_engine_(MediaEngine::Create()),
device_manager_(new DeviceManager()),
initialized_(false),
main_thread_(talk_base::Thread::Current()),
worker_thread_(worker_thread),
audio_in_device_(DeviceManager::kDefaultDeviceName),
audio_out_device_(DeviceManager::kDefaultDeviceName),
audio_options_(MediaEngine::DEFAULT_AUDIO_OPTIONS),
local_renderer_(NULL),
capturing_(false),
monitoring_(false) {
Construct();
}
ChannelManager::ChannelManager(MediaEngine* me, DeviceManager* dm,
talk_base::Thread* worker_thread)
: media_engine_(me),
device_manager_(dm),
initialized_(false),
main_thread_(talk_base::Thread::Current()),
worker_thread_(worker_thread),
audio_in_device_(DeviceManager::kDefaultDeviceName),
audio_out_device_(DeviceManager::kDefaultDeviceName),
audio_options_(MediaEngine::DEFAULT_AUDIO_OPTIONS),
local_renderer_(NULL),
capturing_(false),
monitoring_(false) {
Construct();
}
void ChannelManager::Construct() {
// Init the device manager immediately, and set up our default video device.
SignalDevicesChange.repeat(device_manager_->SignalDevicesChange);
device_manager_->Init();
// Set camera_device_ to the name of the default video capturer.
SetVideoOptions(DeviceManager::kDefaultDeviceName);
// Camera is started asynchronously, request callbacks when startup
// completes to be able to forward them to the rendering manager.
media_engine_->SignalVideoCaptureResult.connect(
this, &ChannelManager::OnVideoCaptureResult);
}
ChannelManager::~ChannelManager() {
if (initialized_)
Terminate();
}
int ChannelManager::GetCapabilities() {
return media_engine_->GetCapabilities() & device_manager_->GetCapabilities();
}
void ChannelManager::GetSupportedAudioCodecs(
std::vector<AudioCodec>* codecs) const {
codecs->clear();
for (std::vector<AudioCodec>::const_iterator it =
media_engine_->audio_codecs().begin();
it != media_engine_->audio_codecs().end(); ++it) {
codecs->push_back(*it);
}
}
void ChannelManager::GetSupportedVideoCodecs(
std::vector<VideoCodec>* codecs) const {
codecs->clear();
std::vector<VideoCodec>::const_iterator it;
for (it = media_engine_->video_codecs().begin();
it != media_engine_->video_codecs().end(); ++it) {
codecs->push_back(*it);
}
}
bool ChannelManager::Init() {
ASSERT(!initialized_);
if (initialized_) {
return false;
}
ASSERT(worker_thread_ != NULL);
if (worker_thread_) {
if (media_engine_->Init()) {
initialized_ = true;
// Now that we're initialized, apply any stored preferences. A preferred
// device might have been unplugged. In this case, we fallback to the
// default device but keep the user preferences. The preferences are
// changed only when the Javascript FE changes them.
const std::string preferred_audio_in_device = audio_in_device_;
const std::string preferred_audio_out_device = audio_out_device_;
const std::string preferred_camera_device = camera_device_;
Device device;
if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
<< "' is unavailable. Fall back to the default.";
audio_in_device_ = DeviceManager::kDefaultDeviceName;
}
if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
<< "' is unavailable. Fall back to the default.";
audio_out_device_ = DeviceManager::kDefaultDeviceName;
}
if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
if (!camera_device_.empty()) {
LOG(LS_WARNING) << "The preferred camera '" << camera_device_
<< "' is unavailable. Fall back to the default.";
}
camera_device_ = DeviceManager::kDefaultDeviceName;
}
if (!SetAudioOptions(audio_in_device_, audio_out_device_,
audio_options_)) {
LOG(LS_WARNING) << "Failed to SetAudioOptions with"
<< " microphone: " << audio_in_device_
<< " speaker: " << audio_out_device_
<< " options: " << audio_options_;
}
if (!SetVideoOptions(camera_device_) && !camera_device_.empty()) {
LOG(LS_WARNING) << "Failed to SetVideoOptions with camera: "
<< camera_device_;
}
// Restore the user preferences.
audio_in_device_ = preferred_audio_in_device;
audio_out_device_ = preferred_audio_out_device;
camera_device_ = preferred_camera_device;
// Now apply the default video codec that has been set earlier.
if (default_video_encoder_config_.max_codec.id != 0) {
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
}
// And the local renderer.
if (local_renderer_) {
SetLocalRenderer(local_renderer_);
}
}
}
return initialized_;
}
void ChannelManager::Terminate() {
ASSERT(initialized_);
if (!initialized_) {
return;
}
// Need to destroy the voice/video channels
while (!video_channels_.empty()) {
DestroyVideoChannel_w(video_channels_.back());
}
while (!voice_channels_.empty()) {
DestroyVoiceChannel_w(voice_channels_.back());
}
while (!soundclips_.empty()) {
DestroySoundclip_w(soundclips_.back());
}
media_engine_->Terminate();
initialized_ = false;
}
VoiceChannel* ChannelManager::CreateVoiceChannel(
BaseSession* session, const std::string& content_name, bool rtcp) {
CreationParams params(session, content_name, rtcp, NULL);
return (Send(MSG_CREATEVOICECHANNEL, &params)) ? params.voice_channel : NULL;
}
VoiceChannel* ChannelManager::CreateVoiceChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp) {
talk_base::CritScope cs(&crit_);
// This is ok to alloc from a thread other than the worker thread
ASSERT(initialized_);
VoiceMediaChannel* media_channel = media_engine_->CreateChannel();
if (media_channel == NULL)
return NULL;
VoiceChannel* voice_channel = new VoiceChannel(
worker_thread_, media_engine_.get(), media_channel,
session, content_name, rtcp);
voice_channels_.push_back(voice_channel);
return voice_channel;
}
void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) {
if (voice_channel) {
talk_base::TypedMessageData<VoiceChannel *> data(voice_channel);
Send(MSG_DESTROYVOICECHANNEL, &data);
}
}
void ChannelManager::DestroyVoiceChannel_w(VoiceChannel* voice_channel) {
talk_base::CritScope cs(&crit_);
// Destroy voice channel.
ASSERT(initialized_);
VoiceChannels::iterator it = std::find(voice_channels_.begin(),
voice_channels_.end(), voice_channel);
ASSERT(it != voice_channels_.end());
if (it == voice_channels_.end())
return;
voice_channels_.erase(it);
delete voice_channel;
}
VideoChannel* ChannelManager::CreateVideoChannel(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel) {
CreationParams params(session, content_name, rtcp, voice_channel);
return (Send(MSG_CREATEVIDEOCHANNEL, &params)) ? params.video_channel : NULL;
}
VideoChannel* ChannelManager::CreateVideoChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel) {
talk_base::CritScope cs(&crit_);
// This is ok to alloc from a thread other than the worker thread
ASSERT(initialized_);
VideoMediaChannel* media_channel =
// voice_channel can be NULL in case of NullVoiceEngine.
media_engine_->CreateVideoChannel(voice_channel ?
voice_channel->media_channel() : NULL);
if (media_channel == NULL)
return NULL;
VideoChannel* video_channel = new VideoChannel(
worker_thread_, media_engine_.get(), media_channel,
session, content_name, rtcp, voice_channel);
video_channels_.push_back(video_channel);
return video_channel;
}
void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) {
if (video_channel) {
talk_base::TypedMessageData<VideoChannel *> data(video_channel);
Send(MSG_DESTROYVIDEOCHANNEL, &data);
}
}
void ChannelManager::DestroyVideoChannel_w(VideoChannel *video_channel) {
talk_base::CritScope cs(&crit_);
// Destroy voice channel.
ASSERT(initialized_);
VideoChannels::iterator it = std::find(video_channels_.begin(),
video_channels_.end(), video_channel);
ASSERT(it != video_channels_.end());
if (it == video_channels_.end())
return;
video_channels_.erase(it);
delete video_channel;
}
Soundclip* ChannelManager::CreateSoundclip() {
talk_base::TypedMessageData<Soundclip*> data(NULL);
Send(MSG_CREATESOUNDCLIP, &data);
return data.data();
}
Soundclip* ChannelManager::CreateSoundclip_w() {
talk_base::CritScope cs(&crit_);
ASSERT(initialized_);
ASSERT(worker_thread_ == talk_base::Thread::Current());
SoundclipMedia* soundclip_media = media_engine_->CreateSoundclip();
if (!soundclip_media) {
return NULL;
}
Soundclip* soundclip = new Soundclip(worker_thread_, soundclip_media);
soundclips_.push_back(soundclip);
return soundclip;
}
void ChannelManager::DestroySoundclip(Soundclip* soundclip) {
if (soundclip) {
talk_base::TypedMessageData<Soundclip*> data(soundclip);
Send(MSG_DESTROYSOUNDCLIP, &data);
}
}
void ChannelManager::DestroySoundclip_w(Soundclip* soundclip) {
talk_base::CritScope cs(&crit_);
// Destroy soundclip.
ASSERT(initialized_);
Soundclips::iterator it = std::find(soundclips_.begin(),
soundclips_.end(), soundclip);
ASSERT(it != soundclips_.end());
if (it == soundclips_.end())
return;
soundclips_.erase(it);
delete soundclip;
}
bool ChannelManager::GetAudioOptions(std::string* in_name,
std::string* out_name, int* opts) {
*in_name = audio_in_device_;
*out_name = audio_out_device_;
*opts = audio_options_;
return true;
}
bool ChannelManager::SetAudioOptions(const std::string& in_name,
const std::string& out_name, int opts) {
// Get device ids from DeviceManager.
Device in_dev, out_dev;
if (!device_manager_->GetAudioInputDevice(in_name, &in_dev)) {
LOG(LS_WARNING) << "Failed to GetAudioInputDevice: " << in_name;
return false;
}
if (!device_manager_->GetAudioOutputDevice(out_name, &out_dev)) {
LOG(LS_WARNING) << "Failed to GetAudioOutputDevice: " << out_name;
return false;
}
// If we're initialized, pass the settings to the media engine.
bool ret = true;
if (initialized_) {
AudioOptions options(opts, &in_dev, &out_dev);
ret = (Send(MSG_SETAUDIOOPTIONS, &options) && options.result);
}
// If all worked well, save the values for use in GetAudioOptions.
if (ret) {
audio_options_ = opts;
audio_in_device_ = in_name;
audio_out_device_ = out_name;
}
return ret;
}
bool ChannelManager::SetAudioOptions_w(int opts, const Device* in_dev,
const Device* out_dev) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
// Set audio options
bool ret = media_engine_->SetAudioOptions(opts);
// Set the audio devices
if (ret) {
talk_base::CritScope cs(&crit_);
ret = media_engine_->SetSoundDevices(in_dev, out_dev);
}
return ret;
}
bool ChannelManager::GetOutputVolume(int* level) {
VolumeLevel volume;
if (!Send(MSG_GETOUTPUTVOLUME, &volume) || !volume.result) {
return false;
}
*level = volume.level;
return true;
}
bool ChannelManager::GetOutputVolume_w(int* level) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->GetOutputVolume(level);
}
bool ChannelManager::SetOutputVolume(int level) {
VolumeLevel volume(level);
return (Send(MSG_SETOUTPUTVOLUME, &volume) && volume.result);
}
bool ChannelManager::SetOutputVolume_w(int level) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetOutputVolume(level);
}
bool ChannelManager::GetVideoOptions(std::string* cam_name) {
*cam_name = camera_device_;
return true;
}
bool ChannelManager::SetVideoOptions(const std::string& cam_name) {
Device device;
if (!device_manager_->GetVideoCaptureDevice(cam_name, &device)) {
if (!cam_name.empty()) {
LOG(LS_WARNING) << "Device manager can't find camera: " << cam_name;
}
return false;
}
// If we're running, tell the media engine about it.
bool ret = true;
if (initialized_) {
VideoOptions options(&device);
ret = (Send(MSG_SETVIDEOOPTIONS, &options) && options.result);
}
// If everything worked, retain the name of the selected camera.
if (ret) {
camera_device_ = device.name;
}
return ret;
}
bool ChannelManager::SetVideoOptions_w(const Device* cam_device) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
// Set the video input device
return media_engine_->SetVideoCaptureDevice(cam_device);
}
bool ChannelManager::SetDefaultVideoEncoderConfig(const VideoEncoderConfig& c) {
bool ret = true;
if (initialized_) {
DefaultVideoEncoderConfig config(c);
ret = Send(MSG_SETDEFAULTVIDEOENCODERCONFIG, &config) && config.result;
}
if (ret) {
default_video_encoder_config_ = c;
}
return ret;
}
bool ChannelManager::SetDefaultVideoEncoderConfig_w(
const VideoEncoderConfig& c) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetDefaultVideoEncoderConfig(c);
}
bool ChannelManager::SetLocalMonitor(bool enable) {
LocalMonitor monitor(enable);
bool ret = Send(MSG_SETLOCALMONITOR, &monitor) && monitor.result;
if (ret) {
monitoring_ = enable;
}
return ret;
}
bool ChannelManager::SetLocalMonitor_w(bool enable) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetLocalMonitor(enable);
}
bool ChannelManager::SetLocalRenderer(VideoRenderer* renderer) {
bool ret = true;
if (initialized_) {
LocalRenderer local(renderer);
ret = (Send(MSG_SETLOCALRENDERER, &local) && local.result);
}
if (ret) {
local_renderer_ = renderer;
}
return ret;
}
bool ChannelManager::SetLocalRenderer_w(VideoRenderer* renderer) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetLocalRenderer(renderer);
}
CaptureResult ChannelManager::SetVideoCapture(bool capture) {
bool ret;
CaptureParams capture_params(capture);
ret = (Send(MSG_SETVIDEOCAPTURE, &capture_params) &&
(capture_params.result != CR_FAILURE));
if (ret) {
capturing_ = capture;
}
return capture_params.result;
}
CaptureResult ChannelManager::SetVideoCapture_w(bool capture) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetVideoCapture(capture);
}
void ChannelManager::SetVoiceLogging(int level, const char* filter) {
SetMediaLogging(false, level, filter);
}
void ChannelManager::SetVideoLogging(int level, const char* filter) {
SetMediaLogging(true, level, filter);
}
void ChannelManager::SetMediaLogging(bool video, int level,
const char* filter) {
// Can be called before initialization; in this case, the worker function
// is simply called on the main thread.
if (initialized_) {
LoggingOptions options(level, filter);
Send((video) ? MSG_SETVIDEOLOGGING : MSG_SETVOICELOGGING, &options);
} else {
SetMediaLogging_w(video, level, filter);
}
}
void ChannelManager::SetMediaLogging_w(bool video, int level,
const char* filter) {
// Can be called before initialization
ASSERT(worker_thread_ == talk_base::Thread::Current() || !initialized_);
if (video) {
media_engine_->SetVideoLogging(level, filter);
} else {
media_engine_->SetVoiceLogging(level, filter);
}
}
bool ChannelManager::Send(uint32 id, talk_base::MessageData* data) {
if (!worker_thread_ || !initialized_) return false;
worker_thread_->Send(this, id, data);
return true;
}
void ChannelManager::OnVideoCaptureResult(CaptureResult result) {
capturing_ = result == CR_SUCCESS;
main_thread_->Post(this, MSG_CAMERASTARTED,
new talk_base::TypedMessageData<CaptureResult>(result));
}
void ChannelManager::OnMessage(talk_base::Message* message) {
talk_base::MessageData* data = message->pdata;
switch (message->message_id) {
case MSG_CREATEVOICECHANNEL: {
CreationParams* p = static_cast<CreationParams*>(data);
p->voice_channel =
CreateVoiceChannel_w(p->session, p->content_name, p->rtcp);
break;
}
case MSG_DESTROYVOICECHANNEL: {
VoiceChannel* p = static_cast<talk_base::TypedMessageData<VoiceChannel*>*>
(data)->data();
DestroyVoiceChannel_w(p);
break;
}
case MSG_CREATEVIDEOCHANNEL: {
CreationParams* p = static_cast<CreationParams*>(data);
p->video_channel = CreateVideoChannel_w(p->session, p->content_name,
p->rtcp, p->voice_channel);
break;
}
case MSG_DESTROYVIDEOCHANNEL: {
VideoChannel* p = static_cast<talk_base::TypedMessageData<VideoChannel*>*>
(data)->data();
DestroyVideoChannel_w(p);
break;
}
case MSG_CREATESOUNDCLIP: {
talk_base::TypedMessageData<Soundclip*> *p =
static_cast<talk_base::TypedMessageData<Soundclip*>*>(data);
p->data() = CreateSoundclip_w();
break;
}
case MSG_DESTROYSOUNDCLIP: {
talk_base::TypedMessageData<Soundclip*> *p =
static_cast<talk_base::TypedMessageData<Soundclip*>*>(data);
DestroySoundclip_w(p->data());
break;
}
case MSG_SETAUDIOOPTIONS: {
AudioOptions* p = static_cast<AudioOptions*>(data);
p->result = SetAudioOptions_w(p->options,
p->in_device, p->out_device);
break;
}
case MSG_GETOUTPUTVOLUME: {
VolumeLevel* p = static_cast<VolumeLevel*>(data);
p->result = GetOutputVolume_w(&p->level);
break;
}
case MSG_SETOUTPUTVOLUME: {
VolumeLevel* p = static_cast<VolumeLevel*>(data);
p->result = SetOutputVolume_w(p->level);
break;
}
case MSG_SETLOCALMONITOR: {
LocalMonitor* p = static_cast<LocalMonitor*>(data);
p->result = SetLocalMonitor_w(p->enable);
break;
}
case MSG_SETVIDEOOPTIONS: {
VideoOptions* p = static_cast<VideoOptions*>(data);
p->result = SetVideoOptions_w(p->cam_device);
break;
}
case MSG_SETDEFAULTVIDEOENCODERCONFIG: {
DefaultVideoEncoderConfig* p =
static_cast<DefaultVideoEncoderConfig*>(data);
p->result = SetDefaultVideoEncoderConfig_w(p->config);
break;
}
case MSG_SETLOCALRENDERER: {
LocalRenderer* p = static_cast<LocalRenderer*>(data);
p->result = SetLocalRenderer_w(p->renderer);
break;
}
case MSG_SETVIDEOCAPTURE: {
CaptureParams* p = static_cast<CaptureParams*>(data);
p->result = SetVideoCapture_w(p->capture);
break;
}
case MSG_SETVOICELOGGING:
case MSG_SETVIDEOLOGGING: {
LoggingOptions* p = static_cast<LoggingOptions*>(data);
bool video = (message->message_id == MSG_SETVIDEOLOGGING);
SetMediaLogging_w(video, p->level, p->filter.c_str());
break;
}
case MSG_CAMERASTARTED: {
talk_base::TypedMessageData<CaptureResult>* data =
static_cast<talk_base::TypedMessageData<CaptureResult>*>(
message->pdata);
SignalVideoCaptureResult(data->data());
delete data;
break;
}
}
}
static void GetDeviceNames(const std::vector<Device>& devs,
std::vector<std::string>* names) {
names->clear();
for (size_t i = 0; i < devs.size(); ++i) {
names->push_back(devs[i].name);
}
}
bool ChannelManager::GetAudioInputDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetAudioInputDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
bool ChannelManager::GetAudioOutputDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetAudioOutputDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
bool ChannelManager::GetVideoCaptureDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetVideoCaptureDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
} // namespace cricket

View File

@ -35,7 +35,7 @@
#include <ksmedia.h>
#define INITGUID // For PKEY_AudioEndpoint_GUID
#include <mmdeviceapi.h>
#include <MMSystem.h>
#include <mmsystem.h>
#include <functiondiscoverykeys_devpkey.h>
#include <uuids.h>
#include "talk/base/win32.h" // ToUtf8
@ -53,21 +53,24 @@
#include "talk/base/stream.h"
#include "talk/session/phone/libudevsymboltable.h"
#include "talk/session/phone/v4llookup.h"
#if defined(LINUX_SOUND_USED)
#include "talk/sound/platformsoundsystem.h"
#include "talk/sound/platformsoundsystemfactory.h"
#include "talk/sound/sounddevicelocator.h"
#include "talk/sound/soundsysteminterface.h"
#endif
#endif
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/base/thread.h"
#include "talk/session/phone/mediacommon.h"
namespace cricket {
// Initialize to empty string.
const std::string DeviceManager::kDefaultDeviceName;
const char DeviceManagerInterface::kDefaultDeviceName[] = "";
DeviceManagerInterface* DeviceManagerFactory::Create() {
return new DeviceManager();
}
#ifdef WIN32
class DeviceWatcher : public talk_base::Win32Window {
@ -151,7 +154,7 @@ DeviceManager::DeviceManager()
need_couninitialize_(false),
#endif
watcher_(new DeviceWatcher(this))
#ifdef LINUX_SOUND_USED
#ifdef LINUX
, sound_system_(new PlatformSoundSystemFactory())
#endif
{
@ -199,15 +202,15 @@ void DeviceManager::Terminate() {
int DeviceManager::GetCapabilities() {
std::vector<Device> devices;
int caps = MediaEngine::VIDEO_RECV;
int caps = VIDEO_RECV;
if (GetAudioInputDevices(&devices) && !devices.empty()) {
caps |= MediaEngine::AUDIO_SEND;
caps |= AUDIO_SEND;
}
if (GetAudioOutputDevices(&devices) && !devices.empty()) {
caps |= MediaEngine::AUDIO_RECV;
caps |= AUDIO_RECV;
}
if (GetVideoCaptureDevices(&devices) && !devices.empty()) {
caps |= MediaEngine::VIDEO_SEND;
caps |= VIDEO_SEND;
}
return caps;
}
@ -327,7 +330,8 @@ bool DeviceManager::GetAudioDevice(bool is_input, const std::string& name,
bool DeviceManager::GetAudioDevicesByPlatform(bool input,
std::vector<Device>* devs) {
devs->clear();
#if defined(LINUX_SOUND_USED)
#if defined(LINUX)
if (!sound_system_.get()) {
return false;
}

View File

@ -1,109 +0,0 @@
/*
* libjingle
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_DEVICEMANAGER_H_
#define TALK_SESSION_PHONE_DEVICEMANAGER_H_
#include <string>
#include <vector>
#include "talk/base/sigslot.h"
#include "talk/base/stringencode.h"
#ifdef LINUX_SOUND_USED
#include "talk/sound/soundsystemfactory.h"
#endif
namespace cricket {
class DeviceWatcher;
// Used to represent an audio or video capture or render device.
struct Device {
Device() {}
Device(const std::string& first, int second)
: name(first),
id(talk_base::ToString(second)) {
}
Device(const std::string& first, const std::string& second)
: name(first), id(second) {}
std::string name;
std::string id;
};
// DeviceManager manages the audio and video devices on the system.
// Methods are virtual to allow for easy stubbing/mocking in tests.
class DeviceManager {
public:
DeviceManager();
virtual ~DeviceManager();
// Initialization
virtual bool Init();
virtual void Terminate();
bool initialized() const { return initialized_; }
// Capabilities
virtual int GetCapabilities();
// Device enumeration
virtual bool GetAudioInputDevices(std::vector<Device>* devices);
virtual bool GetAudioOutputDevices(std::vector<Device>* devices);
bool GetAudioInputDevice(const std::string& name, Device* out);
bool GetAudioOutputDevice(const std::string& name, Device* out);
virtual bool GetVideoCaptureDevices(std::vector<Device>* devs);
bool GetVideoCaptureDevice(const std::string& name, Device* out);
sigslot::signal0<> SignalDevicesChange;
void OnDevicesChange() { SignalDevicesChange(); }
static const std::string kDefaultDeviceName;
protected:
virtual bool GetAudioDevice(bool is_input, const std::string& name,
Device* out);
virtual bool GetDefaultVideoCaptureDevice(Device* device);
private:
bool GetAudioDevicesByPlatform(bool input, std::vector<Device>* devs);
bool initialized_;
#ifdef WIN32
bool need_couninitialize_;
#endif
DeviceWatcher* watcher_;
#ifdef LINUX_SOUND_USED
SoundSystemHandle sound_system_;
#endif
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_DEVICEMANAGER_H_

View File

@ -1,660 +0,0 @@
/*
* libjingle
* Copyright 2009, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// talk's config.h, generated from mac_config_dot_h for OSX, conflicts with the
// one included by the libsrtp headers. Don't use it. Instead, we keep HAVE_SRTP
// and LOGGING defined in config.h.
#undef HAVE_CONFIG_H
#ifdef OSX
// TODO: For the XCode build, we force SRTP (b/2500074)
#ifndef HAVE_SRTP
#define HAVE_SRTP 1
#endif // HAVE_SRTP
// If LOGGING is not defined, define it to 1 (b/3245816)
#ifndef LOGGING
#define LOGGING 1
#endif // HAVE_SRTP
#endif
#include "talk/session/phone/srtpfilter.h"
#include <algorithm>
#include <cstring>
#include "talk/base/base64.h"
#include "talk/base/logging.h"
#include "talk/base/time.h"
#include "talk/session/phone/rtputils.h"
// Enable this line to turn on SRTP debugging
// #define SRTP_DEBUG
#ifdef HAVE_SRTP
#ifdef SRTP_RELATIVE_PATH
#include "srtp.h" // NOLINT
#else
#include "third_party/libsrtp/include/srtp.h"
#endif // SRTP_RELATIVE_PATH
#ifdef _DEBUG
extern "C" debug_module_t mod_srtp;
extern "C" debug_module_t mod_auth;
extern "C" debug_module_t mod_cipher;
extern "C" debug_module_t mod_stat;
extern "C" debug_module_t mod_alloc;
extern "C" debug_module_t mod_aes_icm;
extern "C" debug_module_t mod_aes_hmac;
#endif
#else
// SrtpFilter needs that constant.
#define SRTP_MASTER_KEY_LEN 30
#endif // HAVE_SRTP
namespace cricket {
const std::string& CS_DEFAULT = CS_AES_CM_128_HMAC_SHA1_80;
const std::string CS_AES_CM_128_HMAC_SHA1_80 = "AES_CM_128_HMAC_SHA1_80";
const std::string CS_AES_CM_128_HMAC_SHA1_32 = "AES_CM_128_HMAC_SHA1_32";
const int SRTP_MASTER_KEY_BASE64_LEN = SRTP_MASTER_KEY_LEN * 4 / 3;
#ifndef HAVE_SRTP
// This helper function is used on systems that don't (yet) have SRTP,
// to log that the functions that require it won't do anything.
namespace {
bool SrtpNotAvailable(const char *func) {
LOG(LS_ERROR) << func << ": SRTP is not available on your system.";
return false;
}
} // anonymous namespace
#endif // !HAVE_SRTP
#ifdef HAVE_SRTP //due to cricket namespace it can't be clubbed with above cond
void EnableSrtpDebugging() {
#ifdef _DEBUG
debug_on(mod_srtp);
debug_on(mod_auth);
debug_on(mod_cipher);
debug_on(mod_stat);
debug_on(mod_alloc);
debug_on(mod_aes_icm);
// debug_on(mod_aes_cbc);
// debug_on(mod_hmac);
#endif
}
#endif
SrtpFilter::SrtpFilter()
: state_(ST_INIT),
send_session_(new SrtpSession()),
recv_session_(new SrtpSession()) {
SignalSrtpError.repeat(send_session_->SignalSrtpError);
SignalSrtpError.repeat(recv_session_->SignalSrtpError);
}
SrtpFilter::~SrtpFilter() {
}
bool SrtpFilter::IsActive() const {
return (state_ == ST_ACTIVE);
}
bool SrtpFilter::SetOffer(const std::vector<CryptoParams>& offer_params,
ContentSource source) {
bool ret = false;
if (state_ == ST_INIT) {
ret = StoreParams(offer_params, source);
} else {
LOG(LS_ERROR) << "Invalid state for SRTP offer";
}
return ret;
}
bool SrtpFilter::SetAnswer(const std::vector<CryptoParams>& answer_params,
ContentSource source) {
bool ret = false;
if ((state_ == ST_SENTOFFER && source == CS_REMOTE) ||
(state_ == ST_RECEIVEDOFFER && source == CS_LOCAL)) {
// If the answer requests crypto, finalize the parameters and apply them.
// Otherwise, complete the negotiation of a unencrypted session.
if (!answer_params.empty()) {
CryptoParams selected_params;
ret = NegotiateParams(answer_params, &selected_params);
if (ret) {
if (state_ == ST_SENTOFFER) {
ret = ApplyParams(selected_params, answer_params[0]);
} else { // ST_RECEIVEDOFFER
ret = ApplyParams(answer_params[0], selected_params);
}
}
} else {
ret = ResetParams();
}
} else {
LOG(LS_ERROR) << "Invalid state for SRTP answer";
}
return ret;
}
bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
return false;
}
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
}
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
return false;
}
return send_session_->ProtectRtcp(p, in_len, max_len, out_len);
}
bool SrtpFilter::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtp(p, in_len, out_len);
}
bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtcp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtcp(p, in_len, out_len);
}
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
send_session_->set_signal_silent_time(signal_silent_time_in_ms);
recv_session_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpFilter::StoreParams(const std::vector<CryptoParams>& params,
ContentSource source) {
offer_params_ = params;
state_ = (source == CS_LOCAL) ? ST_SENTOFFER : ST_RECEIVEDOFFER;
return true;
}
bool SrtpFilter::NegotiateParams(const std::vector<CryptoParams>& answer_params,
CryptoParams* selected_params) {
// We're processing an accept. We should have exactly one set of params,
// unless the offer didn't mention crypto, in which case we shouldn't be here.
bool ret = (answer_params.size() == 1U && !offer_params_.empty());
if (ret) {
// We should find a match between the answer params and the offered params.
std::vector<CryptoParams>::const_iterator it;
for (it = offer_params_.begin(); it != offer_params_.end(); ++it) {
if (answer_params[0].Matches(*it)) {
break;
}
}
if (it != offer_params_.end()) {
*selected_params = *it;
} else {
ret = false;
}
}
if (!ret) {
LOG(LS_WARNING) << "Invalid parameters in SRTP answer";
}
return ret;
}
bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
const CryptoParams& recv_params) {
// TODO: Zero these buffers after use.
bool ret;
uint8 send_key[SRTP_MASTER_KEY_LEN], recv_key[SRTP_MASTER_KEY_LEN];
ret = (ParseKeyParams(send_params.key_params, send_key, sizeof(send_key)) &&
ParseKeyParams(recv_params.key_params, recv_key, sizeof(recv_key)));
if (ret) {
ret = (send_session_->SetSend(send_params.cipher_suite,
send_key, sizeof(send_key)) &&
recv_session_->SetRecv(recv_params.cipher_suite,
recv_key, sizeof(recv_key)));
}
if (ret) {
offer_params_.clear();
state_ = ST_ACTIVE;
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
<< " send cipher_suite " << send_params.cipher_suite
<< " recv cipher_suite " << recv_params.cipher_suite;
} else {
LOG(LS_WARNING) << "Failed to apply negotiated SRTP parameters";
}
return ret;
}
bool SrtpFilter::ResetParams() {
offer_params_.clear();
state_ = ST_INIT;
LOG(LS_INFO) << "SRTP reset to init state";
return true;
}
bool SrtpFilter::ParseKeyParams(const std::string& key_params,
uint8* key, int len) {
// example key_params: "inline:YUJDZGVmZ2hpSktMbW9QUXJzVHVWd3l6MTIzNDU2"
// Fail if key-method is wrong.
if (key_params.find("inline:") != 0) {
return false;
}
// Fail if base64 decode fails, or the key is the wrong size.
std::string key_b64(key_params.substr(7)), key_str;
if (!talk_base::Base64::Decode(key_b64, talk_base::Base64::DO_STRICT,
&key_str, NULL) ||
static_cast<int>(key_str.size()) != len) {
return false;
}
memcpy(key, key_str.c_str(), len);
return true;
}
///////////////////////////////////////////////////////////////////////////////
// SrtpSession
#ifdef HAVE_SRTP
bool SrtpSession::inited_ = false;
std::list<SrtpSession*> SrtpSession::sessions_;
SrtpSession::SrtpSession()
: session_(NULL),
rtp_auth_tag_len_(0),
rtcp_auth_tag_len_(0),
srtp_stat_(new SrtpStat()),
last_send_seq_num_(-1) {
sessions_.push_back(this);
SignalSrtpError.repeat(srtp_stat_->SignalSrtpError);
}
SrtpSession::~SrtpSession() {
sessions_.erase(std::find(sessions_.begin(), sessions_.end(), this));
if (session_) {
srtp_dealloc(session_);
}
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_outbound, cs, key, len);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_inbound, cs, key, len);
}
bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: no SRTP Session";
return false;
}
int need_len = in_len + rtp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddProtectRtpResult(ssrc, err);
}
int seq_num;
GetRtpSeqNum(p, in_len, &seq_num);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTP packet, seqnum="
<< seq_num << ", err=" << err << ", last seqnum="
<< last_send_seq_num_;
return false;
}
last_send_seq_num_ = seq_num;
return true;
}
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
return false;
}
int need_len = in_len + sizeof(uint32) + rtcp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect_rtcp(session_, p, out_len);
srtp_stat_->AddProtectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddUnprotectRtpResult(ssrc, err);
}
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect_rtcp(session_, p, out_len);
srtp_stat_->AddUnprotectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet, err=" << err;
return false;
}
return true;
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpSession::SetKey(int type, const std::string& cs,
const uint8* key, int len) {
if (session_) {
LOG(LS_ERROR) << "Failed to create SRTP session: "
<< "SRTP session already created";
return false;
}
if (!Init()) {
return false;
}
srtp_policy_t policy;
memset(&policy, 0, sizeof(policy));
if (cs == CS_AES_CM_128_HMAC_SHA1_80) {
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
} else if (cs == CS_AES_CM_128_HMAC_SHA1_32) {
crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); // rtp is 32,
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // rtcp still 80
} else {
LOG(LS_WARNING) << "Failed to create SRTP session: unsupported"
<< " cipher_suite " << cs.c_str();
return false;
}
if (!key || len != SRTP_MASTER_KEY_LEN) {
LOG(LS_WARNING) << "Failed to create SRTP session: invalid key";
return false;
}
policy.ssrc.type = static_cast<ssrc_type_t>(type);
policy.ssrc.value = 0;
policy.key = const_cast<uint8*>(key);
// TODO parse window size from WSH session-param
policy.window_size = 1024;
policy.allow_repeat_tx = 1;
policy.next = NULL;
int err = srtp_create(&session_, &policy);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to create SRTP session, err=" << err;
return false;
}
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
return true;
}
bool SrtpSession::Init() {
if (!inited_) {
int err;
err = srtp_init();
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to init SRTP, err=" << err;
return false;
}
err = srtp_install_event_handler(&SrtpSession::HandleEventThunk);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
return false;
}
inited_ = true;
}
return true;
}
void SrtpSession::HandleEvent(const srtp_event_data_t* ev) {
switch (ev->event) {
case event_ssrc_collision:
LOG(LS_INFO) << "SRTP event: SSRC collision";
break;
case event_key_soft_limit:
LOG(LS_INFO) << "SRTP event: reached soft key usage limit";
break;
case event_key_hard_limit:
LOG(LS_INFO) << "SRTP event: reached hard key usage limit";
break;
case event_packet_index_limit:
LOG(LS_INFO) << "SRTP event: reached hard packet limit (2^48 packets)";
break;
default:
LOG(LS_INFO) << "SRTP event: unknown " << ev->event;
break;
}
}
void SrtpSession::HandleEventThunk(srtp_event_data_t* ev) {
for (std::list<SrtpSession*>::iterator it = sessions_.begin();
it != sessions_.end(); ++it) {
if ((*it)->session_ == ev->session) {
(*it)->HandleEvent(ev);
break;
}
}
}
#else // !HAVE_SRTP
SrtpSession::SrtpSession() {
LOG(WARNING) << "SRTP implementation is missing.";
}
SrtpSession::~SrtpSession() {
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtcp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtcp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time) {
// Do nothing.
}
#endif // HAVE_SRTP
///////////////////////////////////////////////////////////////////////////////
// SrtpStat
#ifdef HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::PROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::UNPROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
case err_status_replay_fail:
case err_status_replay_old:
key.error = SrtpFilter::ERROR_REPLAY;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddProtectRtcpResult(int result) {
AddProtectRtpResult(0U, result);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
AddUnprotectRtpResult(0U, result);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
// Handle some cases where error should be signalled right away. For other
// errors, trigger error for the first time seeing it. After that, silent
// the same error for a certain amount of time (default 1 sec).
if (key.error != SrtpFilter::ERROR_NONE) {
// For errors, signal first time and wait for 1 sec.
FailureStat* stat = &(failures_[key]);
uint32 current_time = talk_base::Time();
if (stat->last_signal_time == 0 ||
talk_base::TimeDiff(current_time, stat->last_signal_time) >
static_cast<int>(signal_silent_time_)) {
SignalSrtpError(key.ssrc, key.mode, key.error);
stat->last_signal_time = current_time;
}
}
}
#else // !HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
LOG(WARNING) << "SRTP implementation is missing.";
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddProtectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
SrtpNotAvailable(__FUNCTION__);
}
#endif // HAVE_SRTP
} // namespace cricket

View File

@ -1,84 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#define TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "video_engine/main/interface/vie_base.h"
#include "voice_engine/main/interface/voe_base.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/vie_base.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// Tracing helpers, for easy logging when WebRTC calls fail.
// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
// "StartSend(1) failed, err=XXXX"
// The method GetLastEngineError must be defined in the calling scope.
#define LOG_RTCERR0(func) \
LOG_RTCERR0_EX(func, GetLastEngineError())
#define LOG_RTCERR1(func, a1) \
LOG_RTCERR1_EX(func, a1, GetLastEngineError())
#define LOG_RTCERR2(func, a1, a2) \
LOG_RTCERR2_EX(func, a1, a2, GetLastEngineError())
#define LOG_RTCERR3(func, a1, a2, a3) \
LOG_RTCERR3_EX(func, a1, a2, a3, GetLastEngineError())
#define LOG_RTCERR4(func, a1, a2, a3, a4) \
LOG_RTCERR4_EX(func, a1, a2, a3, a4, GetLastEngineError())
#define LOG_RTCERR5(func, a1, a2, a3, a4, a5) \
LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, GetLastEngineError())
#define LOG_RTCERR6(func, a1, a2, a3, a4, a5, a6) \
LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, GetLastEngineError())
#define LOG_RTCERR0_EX(func, err) LOG(LS_WARNING) \
<< "" << #func << "() failed, err=" << err
#define LOG_RTCERR1_EX(func, a1, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ") failed, err=" << err
#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
<< err
#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ") failed, err=" << err
#define LOG_RTCERR4_EX(func, a1, a2, a3, a4, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ") failed, err=" << err
#define LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ") failed, err=" << err
#define LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ", " << a6 << ") failed, err=" << err
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCCOMMON_H_

View File

@ -25,7 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_WEBRTC
#ifdef HAVE_WEBRTC_VIDEO
#include "talk/session/phone/webrtcvideoengine.h"
@ -34,6 +34,8 @@
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/videorenderer.h"
#include "talk/session/phone/webrtcpassthroughrender.h"
#include "talk/session/phone/webrtcvoiceengine.h"
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/session/phone/webrtcvie.h"
@ -53,9 +55,8 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) {
if (renderer_ == NULL) {
if (renderer_ == NULL)
return 0;
}
width_ = width;
height_ = height;
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
@ -63,14 +64,9 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
virtual int DeliverFrame(unsigned char* buffer, int buffer_size,
unsigned int time_stamp) {
if (renderer_ == NULL) {
if (renderer_ == NULL)
return 0;
}
WebRtcVideoFrame video_frame;
// TODO(ronghuawu): Currently by the time DeliverFrame got called,
// ViE expects the frame will be rendered ASAP. However, the libjingle
// renderer may have its own internal delays. Can you disable the buffering
// inside ViE and surface the timing information to this callback?
video_frame.Attach(buffer, buffer_size, width_, height_, 0, time_stamp);
int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
uint8* buffer_temp;
@ -89,48 +85,72 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
const WebRtcVideoEngine::VideoCodecPref
WebRtcVideoEngine::kVideoCodecPrefs[] = {
{"VP8", 104, 0},
{"H264", 105, 1}
{"VP8", 120, 0},
};
// The formats are sorted by the descending order of width. We use the order to
// find the next format for CPU and bandwidth adaptation.
const VideoFormat WebRtcVideoEngine::kVideoFormats[] = {
// TODO: Understand why we have problem with 16:9 formats.
VideoFormat(1280, 800, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(1280, 720, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(960, 600, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(960, 540, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(640, 400, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(640, 360, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(480, 300, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(480, 270, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(320, 200, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(320, 180, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(240, 150, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(240, 135, VideoFormat::FpsToInterval(30), FOURCC_ANY),
VideoFormat(160, 100, VideoFormat::FpsToInterval(30), FOURCC_ANY),
//VideoFormat(160, 90, VideoFormat::FpsToInterval(30), FOURCC_ANY),
};
// TODO: Understand why 640x400 is not working.
const VideoFormat WebRtcVideoEngine::kDefaultVideoFormat =
VideoFormat(320, 200, VideoFormat::FpsToInterval(30), FOURCC_ANY);
WebRtcVideoEngine::WebRtcVideoEngine()
: vie_wrapper_(new ViEWrapper()),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(NULL),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture)
: vie_wrapper_(new ViEWrapper()),
capture_(capture),
external_capture_(true),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
if(capture_)
capture_->AddRef();
voice_engine_(NULL) {
Construct();
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
ViEWrapper* vie_wrapper)
: vie_wrapper_(vie_wrapper),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
voice_engine_(voice_engine) {
Construct();
}
void WebRtcVideoEngine::Construct() {
initialized_ = false;
capture_id_ = -1;
capture_module_ = NULL;
external_capture_ = false;
log_level_ = kDefaultLogSeverity;
capture_started_ = false;
render_module_.reset(new WebRtcPassthroughRender());
ApplyLogging();
if (vie_wrapper_->engine()->SetTraceCallback(this) != 0) {
LOG_RTCERR1(SetTraceCallback, this);
}
// Set default quality levels for our supported codecs. We override them here
// if we know your cpu performance is low, and they can be updated explicitly
// by calling SetDefaultCodec. For example by a flute preference setting, or
// by the server with a jec in response to our reported system info.
VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
kVideoCodecPrefs[0].name,
kDefaultVideoFormat.width,
kDefaultVideoFormat.height,
kDefaultVideoFormat.framerate(), 0);
if (!SetDefaultCodec(max_codec)) {
LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
}
}
WebRtcVideoEngine::~WebRtcVideoEngine() {
@ -138,21 +158,13 @@ WebRtcVideoEngine::~WebRtcVideoEngine() {
vie_wrapper_->engine()->SetTraceCallback(NULL);
Terminate();
vie_wrapper_.reset();
if (capture_) {
capture_->Release();
}
if (renderer_) {
webrtc::VideoRender::DestroyVideoRender(renderer_);
if (capture_module_) {
capture_module_->Release();
}
}
bool WebRtcVideoEngine::Init() {
LOG(LS_INFO) << "WebRtcVideoEngine::Init";
ApplyLogging();
if (vie_wrapper_->engine()->SetTraceCallback(this) != 0) {
LOG_RTCERR1(SetTraceCallback, this);
}
bool result = InitVideoEngine();
if (result) {
LOG(LS_INFO) << "VideoEngine Init done";
@ -184,28 +196,16 @@ bool WebRtcVideoEngine::InitVideoEngine() {
return false;
}
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::VideoCodec wcodec;
if ((vie_wrapper_->codec()->GetCodec(i, wcodec) == 0) &&
(strncmp(wcodec.plName, "I420", 4) != 0) &&
(strncmp(wcodec.plName, "ULPFEC", 4) != 0) &&
(strncmp(wcodec.plName, "RED", 4) != 0)) {
// ignore I420, FEC(RED and ULPFEC)
VideoCodec codec(wcodec.plType, wcodec.plName, wcodec.width,
wcodec.height, wcodec.maxFramerate, i);
LOG(LS_INFO) << codec.ToString();
video_codecs_.push_back(codec);
}
}
if (vie_wrapper_->render()->RegisterVideoRenderModule(*renderer_) != 0) {
if (vie_wrapper_->render()->RegisterVideoRenderModule(
*render_module_.get()) != 0) {
LOG_RTCERR0(RegisterVideoRenderModule);
return false;
}
std::sort(video_codecs_.begin(), video_codecs_.end(),
&VideoCodec::Preferable);
initialized_ = true;
return true;
}
@ -253,15 +253,6 @@ void WebRtcVideoEngine::Print(const webrtc::TraceLevel level,
}
}
int WebRtcVideoEngine::GetCodecPreference(const char* name) {
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
if (strcmp(kVideoCodecPrefs[i].payload_name, name) == 0) {
return kVideoCodecPrefs[i].pref;
}
}
return -1;
}
void WebRtcVideoEngine::ApplyLogging() {
int filter = 0;
switch (log_level_) {
@ -273,8 +264,33 @@ void WebRtcVideoEngine::ApplyLogging() {
}
}
// Rebuilds the codec list to be only those that are less intensive
// than the specified codec.
bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
if (!FindCodec(in_codec))
return false;
video_codecs_.clear();
bool found = false;
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
const VideoCodecPref& pref(kVideoCodecPrefs[i]);
if (!found)
found = (in_codec.name == pref.name);
if (found) {
VideoCodec codec(pref.payload_type, pref.name,
in_codec.width, in_codec.height, in_codec.framerate,
ARRAY_SIZE(kVideoCodecPrefs) - i);
video_codecs_.push_back(codec);
}
}
ASSERT(found);
return true;
}
void WebRtcVideoEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
initialized_ = false;
SetCapture(false);
if (local_renderer_.get()) {
// If the renderer already set, stop it first
@ -282,7 +298,8 @@ void WebRtcVideoEngine::Terminate() {
LOG_RTCERR1(StopRender, capture_id_);
}
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(*renderer_) != 0)
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
*render_module_.get()) != 0)
LOG_RTCERR0(DeRegisterVideoRenderModule);
if ((vie_wrapper_->base()->DeregisterObserver()) != 0)
@ -296,7 +313,7 @@ void WebRtcVideoEngine::Terminate() {
}
int WebRtcVideoEngine::GetCapabilities() {
return MediaEngine::VIDEO_RECV | MediaEngine::VIDEO_SEND;
return VIDEO_RECV | VIDEO_SEND;
}
bool WebRtcVideoEngine::SetOptions(int options) {
@ -313,8 +330,11 @@ bool WebRtcVideoEngine::ReleaseCaptureDevice() {
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
// Ignore the return value here as the channel may not have connected to
// the capturer yet.
vie_wrapper_->capture()->DisconnectCaptureDevice(
channel->video_channel());
channel->set_connected(false);
}
// ReleaseCaptureDevice
vie_wrapper_->capture()->ReleaseCaptureDevice(capture_id_);
@ -325,7 +345,6 @@ bool WebRtcVideoEngine::ReleaseCaptureDevice() {
}
bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
ASSERT(vie_wrapper_.get());
ASSERT(cam != NULL);
ReleaseCaptureDevice();
@ -333,47 +352,55 @@ bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
webrtc::ViECapture* vie_capture = vie_wrapper_->capture();
// There's an external VCM
if (capture_) {
if (vie_capture->AllocateCaptureDevice(*capture_, capture_id_) != 0)
if (capture_module_) {
if (vie_capture->AllocateCaptureDevice(*capture_module_, capture_id_) != 0)
ASSERT(capture_id_ == -1);
} else if (!external_capture_) {
const unsigned int KMaxDeviceNameLength = 128;
const unsigned int KMaxUniqueIdLength = 256;
char device_name[KMaxDeviceNameLength];
char device_id[KMaxUniqueIdLength];
char device_name[256], device_id[256];
bool found = false;
for (int i = 0; i < vie_capture->NumberOfCaptureDevices(); ++i) {
memset(device_name, 0, KMaxDeviceNameLength);
memset(device_id, 0, KMaxUniqueIdLength);
if (vie_capture->GetCaptureDevice(i, device_name, KMaxDeviceNameLength,
device_id, KMaxUniqueIdLength) == 0) {
// TODO(ronghuawu): We should only compare the device_id here,
if (vie_capture->GetCaptureDevice(i, device_name, sizeof(device_name),
device_id, sizeof(device_id)) == 0) {
// TODO: We should only compare the device_id here,
// however the devicemanager and webrtc use different format for th v4l2
// device id. So here we also compare the device_name for now.
// For example "usb-0000:00:1d.7-6" vs "/dev/video0".
if ((cam->name.compare(reinterpret_cast<char*>(device_name)) == 0) ||
(cam->id.compare(reinterpret_cast<char*>(device_id)) == 0)) {
if (cam->name.compare(device_name) == 0 ||
cam->id.compare(device_id) == 0) {
LOG(INFO) << "Found video capture device: " << device_name;
found = true;
break;
}
}
}
if (!found)
if (!found) {
return false;
if (vie_capture->AllocateCaptureDevice(device_id, KMaxUniqueIdLength,
capture_id_) != 0)
}
if (vie_capture->AllocateCaptureDevice(device_id, strlen(device_id),
capture_id_) != 0) {
ASSERT(capture_id_ == -1);
}
}
if (capture_id_ != -1) {
// Connect to all the channels
// Connect to all the channels if there is any.
WebRtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_capture->ConnectCaptureDevice(capture_id_, channel->video_channel());
// No channel should have been connected yet.
// In case of switching device, all channel connections should have been
// disconnected in ReleaseCaptureDevice() first.
ASSERT(!channel->connected());
if (vie_capture->ConnectCaptureDevice(capture_id_,
channel->video_channel()) == 0) {
channel->set_connected(true);
} else {
LOG(LS_WARNING) << "SetCaptureDevice failed to ConnectCaptureDevice.";
}
}
SetCapture(true);
}
@ -383,20 +410,31 @@ bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
bool WebRtcVideoEngine::SetCaptureModule(webrtc::VideoCaptureModule* vcm) {
ReleaseCaptureDevice();
if (capture_) {
capture_->Release();
if (capture_module_) {
capture_module_->Release();
capture_module_ = NULL;
}
capture_ = vcm;
capture_->AddRef();
if (vcm) {
capture_module_ = vcm;
capture_module_->AddRef();
external_capture_ = true;
} else {
external_capture_ = false;
}
return true;
}
bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
if (local_renderer_.get()) {
// If the renderer already set, stop it first
vie_wrapper_->render()->StopRender(capture_id_);
vie_wrapper_->render()->RemoveRenderer(capture_id_);
// If the renderer already set, stop and remove it first
if (vie_wrapper_->render()->StopRender(capture_id_) != 0) {
LOG_RTCERR1(StopRender, capture_id_);
}
if (vie_wrapper_->render()->RemoveRenderer(capture_id_) != 0) {
LOG_RTCERR1(RemoveRenderer, capture_id_);
}
}
local_renderer_.reset(new WebRtcRenderAdapter(renderer));
@ -456,15 +494,36 @@ WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
return channel;
}
bool WebRtcVideoEngine::FindCodec(const VideoCodec& codec) {
for (size_t i = 0; i < video_codecs_.size(); ++i) {
if (video_codecs_[i].Matches(codec)) {
// Checks to see whether we comprehend and could receive a particular codec
bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) {
for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
const VideoFormat& fmt = kVideoFormats[i];
if ((in.width == 0 && in.height == 0) ||
(fmt.width == in.width && fmt.height == in.height)) {
for (int j = 0; j < ARRAY_SIZE(kVideoCodecPrefs); ++j) {
VideoCodec codec(kVideoCodecPrefs[j].payload_type,
kVideoCodecPrefs[j].name, 0, 0, 0, 0);
if (codec.Matches(in)) {
return true;
}
}
}
}
return false;
}
// SetDefaultCodec may be called while the capturer is running. For example, a
// test call is started in a page with QVGA default codec, and then a real call
// is started in another page with VGA default codec. This is the corner case
// and happens only when a session is started. We ignore this case currently.
bool WebRtcVideoEngine::SetDefaultCodec(const VideoCodec& codec) {
if (!RebuildCodecList(codec)) {
LOG(LS_WARNING) << "Failed to RebuildCodecList";
return false;
}
return true;
}
void WebRtcVideoEngine::ConvertToCricketVideoCodec(
const webrtc::VideoCodec& in_codec, VideoCodec& out_codec) {
out_codec.id = in_codec.plType;
@ -529,8 +588,24 @@ void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
}
}
bool WebRtcVideoEngine::SetVoiceEngine(WebRtcVoiceEngine* voice_engine) {
if (initialized_) {
LOG(LS_WARNING) << "SetVoiceEngine can not be called after Init.";
return false;
}
voice_engine_ = voice_engine;
return true;
}
bool WebRtcVideoEngine::EnableTimedRender() {
if (initialized_) {
LOG(LS_WARNING) << "EnableTimedRender can not be called after Init.";
return false;
}
render_module_.reset(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal));
return true;
}
// WebRtcVideoMediaChannel
@ -540,6 +615,7 @@ WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
voice_channel_(channel),
vie_channel_(-1),
sending_(false),
connected_(false),
render_started_(false),
send_codec_(NULL) {
engine->RegisterChannel(this);
@ -554,6 +630,7 @@ bool WebRtcVideoMediaChannel::Init() {
LOG(LS_INFO) << "WebRtcVideoMediaChannel::Init "
<< "video_channel " << vie_channel_ << " created";
// connect audio channel
if (voice_channel_) {
WebRtcVoiceMediaChannel* channel =
@ -692,14 +769,27 @@ bool WebRtcVideoMediaChannel::SetSend(bool send) {
LOG_RTCERR1(StartSend, vie_channel_);
ret = false;
}
// If the channel has not been connected to the capturer yet,
// connect it now.
if (!connected()) {
if (engine()->video_engine()->capture()->ConnectCaptureDevice(
engine()->capture_id(), vie_channel_) != 0) {
LOG_RTCERR2(ConnectCaptureDevice, engine()->capture_id(), vie_channel_);
ret = false;
} else {
set_connected(true);
}
}
} else { // disable
if (engine()->video_engine()->base()->StopSend(vie_channel_) != 0) {
LOG_RTCERR1(StopSend, vie_channel_);
ret = false;
}
}
if (ret)
if (ret) {
sending_ = send;
}
return ret;
}
@ -718,9 +808,13 @@ bool WebRtcVideoMediaChannel::SetRenderer(
if (ssrc != 0)
return false;
if (remote_renderer_.get()) {
// If the renderer already set, stop it first
engine_->video_engine()->render()->StopRender(vie_channel_);
engine_->video_engine()->render()->RemoveRenderer(vie_channel_);
// If the renderer already set, stop and remove it first
if (engine_->video_engine()->render()->StopRender(vie_channel_) != 0) {
LOG_RTCERR1(StopRender, vie_channel_);
}
if (engine_->video_engine()->render()->RemoveRenderer(vie_channel_) != 0) {
LOG_RTCERR1(RemoveRenderer, vie_channel_);
}
}
remote_renderer_.reset(new WebRtcRenderAdapter(renderer));
@ -906,7 +1000,6 @@ int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return network_interface_->SendPacket(&packet) ? len : -1;
}
@ -923,5 +1016,5 @@ int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
} // namespace cricket
#endif // HAVE_WEBRTC
#endif // HAVE_WEBRTC_VIDEO

View File

@ -1,197 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/videocommon.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/webrtccommon.h"
namespace webrtc {
class VideoCaptureModule;
class VideoRender;
}
namespace cricket {
struct Device;
class VideoRenderer;
class ViEWrapper;
class VoiceMediaChannel;
class WebRtcRenderAdapter;
class WebRtcVideoMediaChannel;
class WebRtcVoiceEngine;
class WebRtcVideoEngine : public webrtc::ViEBaseObserver,
public webrtc::TraceCallback {
public:
// Creates the WebRtcVideoEngine with internal VideoCaptureModule.
WebRtcVideoEngine();
// Creates the WebRtcVideoEngine, and specifies the WebRtcVoiceEngine and
// external VideoCaptureModule to use.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture);
// For testing purposes. Allows the WebRtcVoiceEngine and
// ViEWrapper to be mocks.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, ViEWrapper* vie_wrapper);
~WebRtcVideoEngine();
bool Init();
void Terminate();
WebRtcVideoMediaChannel* CreateChannel(
VoiceMediaChannel* voice_channel);
bool FindCodec(const VideoCodec& codec);
bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
void RegisterChannel(WebRtcVideoMediaChannel* channel);
void UnregisterChannel(WebRtcVideoMediaChannel* channel);
ViEWrapper* video_engine() { return vie_wrapper_.get(); }
int GetLastVideoEngineError();
int GetCapabilities();
bool SetOptions(int options);
bool SetCaptureDevice(const Device* device);
bool SetCaptureModule(webrtc::VideoCaptureModule* vcm);
bool SetLocalRenderer(VideoRenderer* renderer);
CaptureResult SetCapture(bool capture);
const std::vector<VideoCodec>& codecs() const;
void SetLogging(int min_sev, const char* filter);
int GetLastEngineError();
VideoEncoderConfig& default_encoder_config() {
return default_encoder_config_;
}
void ConvertToCricketVideoCodec(const webrtc::VideoCodec& in_codec,
VideoCodec& out_codec);
bool ConvertFromCricketVideoCodec(const VideoCodec& in_codec,
webrtc::VideoCodec& out_codec);
sigslot::signal1<CaptureResult> SignalCaptureResult;
private:
struct VideoCodecPref {
const char* payload_name;
int payload_type;
int pref;
};
static const VideoCodecPref kVideoCodecPrefs[];
int GetCodecPreference(const char* name);
void ApplyLogging();
bool InitVideoEngine();
void PerformanceAlarm(const unsigned int cpu_load);
bool ReleaseCaptureDevice();
virtual void Print(const webrtc::TraceLevel level, const char* trace_string,
const int length);
typedef std::vector<WebRtcVideoMediaChannel*> VideoChannels;
talk_base::scoped_ptr<ViEWrapper> vie_wrapper_;
webrtc::VideoCaptureModule* capture_;
bool external_capture_;
int capture_id_;
webrtc::VideoRender* renderer_;
WebRtcVoiceEngine* voice_engine_;
std::vector<VideoCodec> video_codecs_;
VideoChannels channels_;
int log_level_;
VideoEncoderConfig default_encoder_config_;
bool capture_started_;
talk_base::scoped_ptr<WebRtcRenderAdapter> local_renderer_;
};
class WebRtcVideoMediaChannel : public VideoMediaChannel,
public webrtc::Transport {
public:
WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine, VoiceMediaChannel* voice_channel);
~WebRtcVideoMediaChannel();
bool Init();
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetRender(bool render);
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
virtual bool GetStats(VideoMediaInfo* info);
virtual bool SendIntraFrame();
virtual bool RequestIntraFrame();
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool on);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendBandwidth(bool autobw, int bps);
virtual bool SetOptions(int options);
WebRtcVideoEngine* engine() { return engine_; }
VoiceMediaChannel* voice_channel() { return voice_channel_; }
int video_channel() { return vie_channel_; }
bool sending() { return sending_; }
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
virtual int SendPacket(int channel, const void* data, int len);
virtual int SendRTCPPacket(int channel, const void* data, int len);
private:
void EnableRtcp();
void EnablePLI();
void EnableTMMBR();
WebRtcVideoEngine* engine_;
VoiceMediaChannel* voice_channel_;
int vie_channel_;
bool sending_;
bool render_started_;
talk_base::scoped_ptr<webrtc::VideoCodec> send_codec_;
talk_base::scoped_ptr<WebRtcRenderAdapter> remote_renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_

View File

@ -1,238 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/base/logging.h"
#include "talk/session/phone/videocommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_video/vplib/main/interface/vplib.h"
#else
#include "third_party/webrtc/files/include/vplib.h"
#endif
namespace cricket {
WebRtcVideoFrame::WebRtcVideoFrame() {
}
WebRtcVideoFrame::~WebRtcVideoFrame() {
}
void WebRtcVideoFrame::Attach(uint8* buffer, size_t buffer_size, size_t w,
size_t h, int64 elapsed_time, int64 time_stamp) {
video_frame_.Free();
WebRtc_UWord8* new_memory = buffer;
WebRtc_UWord32 new_length = buffer_size;
WebRtc_UWord32 new_size = buffer_size;
video_frame_.Swap(new_memory, new_length, new_size);
video_frame_.SetWidth(w);
video_frame_.SetHeight(h);
elapsed_time_ = elapsed_time;
video_frame_.SetTimeStamp(time_stamp);
}
void WebRtcVideoFrame::Detach(uint8** buffer, size_t* buffer_size) {
WebRtc_UWord8* new_memory = NULL;
WebRtc_UWord32 new_length = 0;
WebRtc_UWord32 new_size = 0;
video_frame_.Swap(new_memory, new_length, new_size);
*buffer = new_memory;
*buffer_size = new_size;
}
bool WebRtcVideoFrame::InitToBlack(size_t w, size_t h,
int64 elapsed_time, int64 time_stamp) {
size_t buffer_size = w * h * 3 / 2;
uint8* buffer = new uint8[buffer_size];
Attach(buffer, buffer_size, w, h, elapsed_time, time_stamp);
memset(GetYPlane(), 16, w * h);
memset(GetUPlane(), 128, w * h / 4);
memset(GetVPlane(), 128, w * h / 4);
return true;
}
size_t WebRtcVideoFrame::GetWidth() const {
return video_frame_.Width();
}
size_t WebRtcVideoFrame::GetHeight() const {
return video_frame_.Height();
}
const uint8* WebRtcVideoFrame::GetYPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
const uint8* WebRtcVideoFrame::GetUPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
const uint8* WebRtcVideoFrame::GetVPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
uint8* WebRtcVideoFrame::GetYPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
uint8* WebRtcVideoFrame::GetUPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
uint8* WebRtcVideoFrame::GetVPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
VideoFrame* WebRtcVideoFrame::Copy() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (!buffer)
return NULL;
size_t new_buffer_size = video_frame_.Length();
uint8* new_buffer = new uint8[new_buffer_size];
memcpy(new_buffer, buffer, new_buffer_size);
WebRtcVideoFrame* copy = new WebRtcVideoFrame();
copy->Attach(new_buffer, new_buffer_size,
video_frame_.Width(), video_frame_.Height(),
elapsed_time_, video_frame_.TimeStamp());
return copy;
}
size_t WebRtcVideoFrame::CopyToBuffer(
uint8* buffer, size_t size) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
memcpy(buffer, video_frame_.Buffer(), needed);
}
return needed;
}
size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
uint8* buffer,
size_t size,
size_t pitch_rgb) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t width = video_frame_.Width();
size_t height = video_frame_.Height();
// See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
// explanation of pitch and why this is the amount of space we need.
size_t needed = pitch_rgb * (height - 1) + 4 * width;
if (needed > size) {
LOG(LS_WARNING) << "RGB buffer is not large enough";
return 0;
}
webrtc::VideoType outgoingVideoType = webrtc::kUnknown;
switch (to_fourcc) {
case FOURCC_ARGB:
outgoingVideoType = webrtc::kARGB;
break;
default:
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
return 0;
break;
}
if (outgoingVideoType != webrtc::kUnknown)
webrtc::ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
width, height, buffer);
return needed;
}
void WebRtcVideoFrame::StretchToPlanes(
uint8* y, uint8* u, uint8* v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
size_t width, size_t height, bool interpolate, bool crop) const {
// TODO(ronghuawu): Implement StretchToPlanes
}
size_t WebRtcVideoFrame::StretchToBuffer(size_t w, size_t h,
uint8* buffer, size_t size,
bool interpolate,
bool crop) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
uint8* bufy = buffer;
uint8* bufu = bufy + w * h;
uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
interpolate, crop);
}
return needed;
}
void WebRtcVideoFrame::StretchToFrame(VideoFrame* target,
bool interpolate, bool crop) const {
if (!target) return;
StretchToPlanes(target->GetYPlane(),
target->GetUPlane(),
target->GetVPlane(),
target->GetYPitch(),
target->GetUPitch(),
target->GetVPitch(),
target->GetWidth(),
target->GetHeight(),
interpolate, crop);
target->SetElapsedTime(GetElapsedTime());
target->SetTimeStamp(GetTimeStamp());
}
VideoFrame* WebRtcVideoFrame::Stretch(size_t w, size_t h,
bool interpolate, bool crop) const {
// TODO(ronghuawu): implement
return NULL;
}
} // namespace cricket

View File

@ -1,97 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#endif
#include "talk/session/phone/mediachannel.h"
namespace cricket {
// WebRtcVideoFrame only supports I420
class WebRtcVideoFrame : public VideoFrame {
public:
WebRtcVideoFrame();
~WebRtcVideoFrame();
void Attach(uint8* buffer, size_t buffer_size,
size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
void Detach(uint8** buffer, size_t* buffer_size);
bool InitToBlack(size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
bool HasImage() const { return video_frame_.Buffer() != NULL; }
virtual size_t GetWidth() const;
virtual size_t GetHeight() const;
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const { return video_frame_.Width(); }
virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) {
video_frame_.SetTimeStamp(time_stamp);
}
virtual VideoFrame* Copy() const;
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, size_t pitch_rgb) const;
virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
private:
webrtc::VideoFrame video_frame_;
int64 elapsed_time_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_

View File

@ -1,143 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIE_H_
#define TALK_SESSION_PHONE_WEBRTCVIE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#include "modules/video_capture/main/interface/video_capture.h"
#include "modules/video_render/main/interface/video_render.h"
#include "video_engine/main/interface/vie_base.h"
#include "video_engine/main/interface/vie_capture.h"
#include "video_engine/main/interface/vie_codec.h"
#include "video_engine/main/interface/vie_errors.h"
#include "video_engine/main/interface/vie_image_process.h"
#include "video_engine/main/interface/vie_network.h"
#include "video_engine/main/interface/vie_render.h"
#include "video_engine/main/interface/vie_rtp_rtcp.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#include "third_party/webrtc/files/include/video_capture.h"
#include "third_party/webrtc/files/include/video_render.h"
#include "third_party/webrtc/files/include/vie_base.h"
#include "third_party/webrtc/files/include/vie_capture.h"
#include "third_party/webrtc/files/include/vie_codec.h"
#include "third_party/webrtc/files/include/vie_errors.h"
#include "third_party/webrtc/files/include/vie_image_process.h"
#include "third_party/webrtc/files/include/vie_network.h"
#include "third_party/webrtc/files/include/vie_render.h"
#include "third_party/webrtc/files/include/vie_rtp_rtcp.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// all tracing macros should go to a common file
// automatically handles lifetime of VideoEngine
class scoped_vie_engine {
public:
explicit scoped_vie_engine(webrtc::VideoEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_vie_engine() {
if (ptr) {
webrtc::VideoEngine::Delete(ptr);
}
}
webrtc::VideoEngine* get() const { return ptr; }
private:
webrtc::VideoEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing VideoEngine
// interface pointers
template<class T> class scoped_vie_ptr {
public:
explicit scoped_vie_ptr(const scoped_vie_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_vie_ptr(T* p) : ptr(p) {}
~scoped_vie_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class ViEWrapper {
public:
ViEWrapper()
: engine_(webrtc::VideoEngine::Create()),
base_(engine_), codec_(engine_), capture_(engine_),
network_(engine_), render_(engine_), rtp_(engine_),
image_(engine_) {
}
ViEWrapper(webrtc::ViEBase* base, webrtc::ViECodec* codec,
webrtc::ViECapture* capture, webrtc::ViENetwork* network,
webrtc::ViERender* render, webrtc::ViERTP_RTCP* rtp,
webrtc::ViEImageProcess* image)
: engine_(NULL),
base_(base),
codec_(codec),
capture_(capture),
network_(network),
render_(render),
rtp_(rtp),
image_(image) {
}
virtual ~ViEWrapper() {}
webrtc::VideoEngine* engine() { return engine_.get(); }
webrtc::ViEBase* base() { return base_.get(); }
webrtc::ViECodec* codec() { return codec_.get(); }
webrtc::ViECapture* capture() { return capture_.get(); }
webrtc::ViENetwork* network() { return network_.get(); }
webrtc::ViERender* render() { return render_.get(); }
webrtc::ViERTP_RTCP* rtp() { return rtp_.get(); }
webrtc::ViEImageProcess* sync() { return image_.get(); }
int error() { return base_->LastError(); }
private:
scoped_vie_engine engine_;
scoped_vie_ptr<webrtc::ViEBase> base_;
scoped_vie_ptr<webrtc::ViECodec> codec_;
scoped_vie_ptr<webrtc::ViECapture> capture_;
scoped_vie_ptr<webrtc::ViENetwork> network_;
scoped_vie_ptr<webrtc::ViERender> render_;
scoped_vie_ptr<webrtc::ViERTP_RTCP> rtp_;
scoped_vie_ptr<webrtc::ViEImageProcess> image_;
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVIE_H_

View File

@ -1,190 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOE_H_
#define TALK_SESSION_PHONE_WEBRTCVOE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/audio_device/main/interface/audio_device.h"
#include "voice_engine/main/interface/voe_audio_processing.h"
#include "voice_engine/main/interface/voe_base.h"
#include "voice_engine/main/interface/voe_codec.h"
#include "voice_engine/main/interface/voe_dtmf.h"
#include "voice_engine/main/interface/voe_errors.h"
#include "voice_engine/main/interface/voe_file.h"
#include "voice_engine/main/interface/voe_hardware.h"
#include "voice_engine/main/interface/voe_neteq_stats.h"
#include "voice_engine/main/interface/voe_network.h"
#include "voice_engine/main/interface/voe_rtp_rtcp.h"
#include "voice_engine/main/interface/voe_video_sync.h"
#include "voice_engine/main/interface/voe_volume_control.h"
#else
#include "third_party/webrtc/files/include/audio_device.h"
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_audio_processing.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/voe_codec.h"
#include "third_party/webrtc/files/include/voe_dtmf.h"
#include "third_party/webrtc/files/include/voe_errors.h"
#include "third_party/webrtc/files/include/voe_file.h"
#include "third_party/webrtc/files/include/voe_hardware.h"
#include "third_party/webrtc/files/include/voe_neteq_stats.h"
#include "third_party/webrtc/files/include/voe_network.h"
#include "third_party/webrtc/files/include/voe_rtp_rtcp.h"
#include "third_party/webrtc/files/include/voe_video_sync.h"
#include "third_party/webrtc/files/include/voe_volume_control.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// automatically handles lifetime of WebRtc VoiceEngine
class scoped_voe_engine {
public:
explicit scoped_voe_engine(webrtc::VoiceEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_voe_engine() { if (ptr) VERIFY(webrtc::VoiceEngine::Delete(ptr)); }
// Releases the current pointer.
void reset() {
if (ptr) {
VERIFY(webrtc::VoiceEngine::Delete(ptr));
ptr = NULL;
}
}
webrtc::VoiceEngine* get() const { return ptr; }
private:
webrtc::VoiceEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
template<class T>
class scoped_voe_ptr {
public:
explicit scoped_voe_ptr(const scoped_voe_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_voe_ptr(T* p) : ptr(p) {}
~scoped_voe_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
// Releases the current pointer.
void reset() {
if (ptr) {
ptr->Release();
ptr = NULL;
}
}
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class VoEWrapper {
public:
VoEWrapper()
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
base_(engine_), codec_(engine_), dtmf_(engine_), file_(engine_),
hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_),
sync_(engine_), volume_(engine_) {
}
VoEWrapper(webrtc::VoEAudioProcessing* processing,
webrtc::VoEBase* base,
webrtc::VoECodec* codec,
webrtc::VoEDtmf* dtmf,
webrtc::VoEFile* file,
webrtc::VoEHardware* hw,
webrtc::VoENetEqStats* neteq,
webrtc::VoENetwork* network,
webrtc::VoERTP_RTCP* rtp,
webrtc::VoEVideoSync* sync,
webrtc::VoEVolumeControl* volume)
: engine_(NULL),
processing_(processing),
base_(base),
codec_(codec),
dtmf_(dtmf),
file_(file),
hw_(hw),
neteq_(neteq),
network_(network),
rtp_(rtp),
sync_(sync),
volume_(volume) {
}
~VoEWrapper() {}
webrtc::VoiceEngine* engine() const { return engine_.get(); }
webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
webrtc::VoEBase* base() const { return base_.get(); }
webrtc::VoECodec* codec() const { return codec_.get(); }
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
webrtc::VoEFile* file() const { return file_.get(); }
webrtc::VoEHardware* hw() const { return hw_.get(); }
webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
webrtc::VoENetwork* network() const { return network_.get(); }
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
webrtc::VoEVideoSync* sync() const { return sync_.get(); }
webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
int error() { return base_->LastError(); }
private:
scoped_voe_engine engine_;
scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
scoped_voe_ptr<webrtc::VoEBase> base_;
scoped_voe_ptr<webrtc::VoECodec> codec_;
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
scoped_voe_ptr<webrtc::VoEFile> file_;
scoped_voe_ptr<webrtc::VoEHardware> hw_;
scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
scoped_voe_ptr<webrtc::VoENetwork> network_;
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
};
// Adds indirection to static WebRtc functions, allowing them to be mocked.
class VoETraceWrapper {
public:
virtual ~VoETraceWrapper() {}
virtual int SetTraceFilter(const unsigned int filter) {
return webrtc::VoiceEngine::SetTraceFilter(filter);
}
virtual int SetTraceFile(const char* fileNameUTF8) {
return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
}
virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
return webrtc::VoiceEngine::SetTraceCallback(callback);
}
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOE_H_

View File

@ -25,13 +25,8 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// shhhhh{
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
// shhhhh}
#ifdef HAVE_WEBRTC
#ifdef HAVE_WEBRTC_VOICE
#include "talk/session/phone/webrtcvoiceengine.h"
@ -46,6 +41,7 @@
#include "talk/base/helpers.h"
#include "talk/base/logging.h"
#include "talk/base/stringencode.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/webrtcvoe.h"
#ifdef WIN32
@ -77,9 +73,26 @@ static const int kDefaultAudioDeviceId = 0;
#endif
// extension header for audio levels, as defined in
// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-01
// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
static const char kRtpAudioLevelHeaderExtension[] =
"urn:ietf:params:rtp-hdrext:audio-level";
"urn:ietf:params:rtp-hdrext:ssrc-audio-level";
static const char kIsacCodecName[] = "ISAC";
static const char kL16CodecName[] = "L16";
// Dumps an AudioCodec in RFC 2327-ish format.
static std::string ToString(const AudioCodec& codec) {
std::stringstream ss;
ss << codec.name << "/" << codec.clockrate << "/" << codec.channels
<< " (" << codec.id << ")";
return ss.str();
}
static std::string ToString(const webrtc::CodecInst& codec) {
std::stringstream ss;
ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels
<< " (" << codec.pltype << ")";
return ss.str();
}
static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
const char* delim = "\r\n";
@ -90,24 +103,19 @@ static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
// WebRtcVoiceEngine
const WebRtcVoiceEngine::CodecPref WebRtcVoiceEngine::kCodecPrefs[] = {
{ "ISAC", 16000 },
{ "ISAC", 32000 },
{ "ISACLC", 16000 },
{ "speex", 16000 },
{ "IPCMWB", 16000 },
{ "G722", 16000 },
{ "iLBC", 8000 },
{ "speex", 8000 },
{ "GSM", 8000 },
{ "EG711U", 8000 },
{ "EG711A", 8000 },
{ "PCMU", 8000 },
{ "PCMA", 8000 },
{ "CN", 32000 },
{ "CN", 16000 },
{ "CN", 8000 },
{ "red", 8000 },
{ "telephone-event", 8000 },
{ "ISAC", 16000, 103 },
{ "ISAC", 32000, 104 },
{ "speex", 16000, 107 },
{ "G722", 16000, 9 },
{ "ILBC", 8000, 102 },
{ "speex", 8000, 108 },
{ "PCMU", 8000, 0 },
{ "PCMA", 8000, 8 },
{ "CN", 32000, 106 },
{ "CN", 16000, 105 },
{ "CN", 8000, 13 },
{ "red", 8000, 127 },
{ "telephone-event", 8000, 126 },
};
class WebRtcSoundclipMedia : public SoundclipMedia {
@ -199,19 +207,6 @@ WebRtcVoiceEngine::WebRtcVoiceEngine()
Construct();
}
WebRtcVoiceEngine::WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc)
: voe_wrapper_(new VoEWrapper()),
voe_wrapper_sc_(new VoEWrapper()),
tracing_(new VoETraceWrapper()),
adm_(adm),
adm_sc_(adm_sc),
log_level_(kDefaultLogSeverity),
is_dumping_aec_(false),
desired_local_monitor_enable_(false) {
Construct();
}
WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
VoEWrapper* voe_wrapper_sc,
VoETraceWrapper* tracing)
@ -227,16 +222,12 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
}
void WebRtcVoiceEngine::Construct() {
initialized_ = false;
LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
ApplyLogging();
ApplyLogging("");
if (tracing_->SetTraceCallback(this) == -1) {
LOG_RTCERR0(SetTraceCallback);
}
// Update reference counters for the external ADM(s).
if (adm_)
adm_->AddRef();
if (adm_sc_)
adm_sc_->AddRef();
if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
LOG_RTCERR0(RegisterVoiceEngineObserver);
@ -244,24 +235,48 @@ void WebRtcVoiceEngine::Construct() {
// Clear the default agc state.
memset(&default_agc_config_, 0, sizeof(default_agc_config_));
// Load our audio codec list
// Load our audio codec list.
ConstructCodecs();
}
void WebRtcVoiceEngine::ConstructCodecs() {
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::CodecInst gcodec;
if (voe_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
int pref = GetCodecPreference(gcodec.plname, gcodec.plfreq);
if (pref != -1) {
if (gcodec.rate == -1) gcodec.rate = 0;
AudioCodec codec(gcodec.pltype, gcodec.plname, gcodec.plfreq,
gcodec.rate, gcodec.channels, pref);
LOG(LS_INFO) << gcodec.plname << "/" << gcodec.plfreq << "/" \
<< gcodec.channels << " " << gcodec.pltype;
webrtc::CodecInst voe_codec;
if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
// Skip uncompressed formats.
if (_stricmp(voe_codec.plname, kL16CodecName) == 0) {
continue;
}
const CodecPref* pref = NULL;
for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
if (_stricmp(kCodecPrefs[j].name, voe_codec.plname) == 0 &&
kCodecPrefs[j].clockrate == voe_codec.plfreq) {
pref = &kCodecPrefs[j];
break;
}
}
if (pref) {
// Use the payload type that we've configured in our pref table;
// use the offset in our pref table to determine the sort order.
AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
voe_codec.rate, voe_codec.channels,
ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
LOG(LS_INFO) << ToString(codec);
// For ISAC, use 0 to indicate auto bandwidth in our signaling.
if (_stricmp(codec.name.c_str(), kIsacCodecName) == 0) {
codec.bitrate = 0;
}
codecs_.push_back(codec);
} else {
LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
}
}
}
// Make sure they are in local preference order
// Make sure they are in local preference order.
std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
}
@ -301,18 +316,17 @@ bool WebRtcVoiceEngine::InitInternal() {
int old_level = log_level_;
log_level_ = talk_base::_min(log_level_,
static_cast<int>(talk_base::LS_INFO));
ApplyLogging();
ApplyLogging("");
// Init WebRtc VoiceEngine, enabling AEC logging if specified in SetLogging,
// and install the externally provided (and implemented) ADM.
// Init WebRtc VoiceEngine, enabling AEC logging if specified in SetLogging.
if (voe_wrapper_->base()->Init(adm_) == -1) {
LOG_RTCERR0_EX(Init, voe_wrapper_->error());
return false;
}
// Restore the previous log level
// Restore the previous log level and apply the log filter.
log_level_ = old_level;
ApplyLogging();
ApplyLogging(log_filter_);
// Log the VoiceEngine version info
char buffer[1024] = "";
@ -322,7 +336,8 @@ bool WebRtcVoiceEngine::InitInternal() {
// Turn on AEC and AGC by default.
if (!SetOptions(
MediaEngine::ECHO_CANCELLATION | MediaEngine::AUTO_GAIN_CONTROL)) {
MediaEngineInterface::ECHO_CANCELLATION |
MediaEngineInterface::AUTO_GAIN_CONTROL)) {
return false;
}
@ -332,33 +347,11 @@ bool WebRtcVoiceEngine::InitInternal() {
return false;
}
#if !defined(IOS) && !defined(ANDROID)
// VoiceEngine team recommends turning on noise reduction
// with low agressiveness.
if (voe_wrapper_->processing()->SetNsStatus(true) == -1) {
#else
// On mobile, VoiceEngine team recommends moderate aggressiveness.
if (voe_wrapper_->processing()->SetNsStatus(true,
kNsModerateSuppression) == -1) {
#endif
LOG_RTCERR1(SetNsStatus, true);
return false;
}
#if !defined(IOS) && !defined(ANDROID)
// Enable detection for keyboard typing.
if (voe_wrapper_->processing()->SetTypingDetectionStatus(true) == -1) {
// In case of error, log the info and continue.
LOG_RTCERR1(SetTypingDetectionStatus, true);
}
#endif
// Print our codec list again for the call diagnostic log
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
it != codecs_.end(); ++it) {
LOG(LS_INFO) << it->name << "/" << it->clockrate << "/"
<< it->channels << " " << it->id;
LOG(LS_INFO) << ToString(*it);
}
#if defined(LINUX) && !defined(HAVE_LIBPULSE)
@ -366,7 +359,6 @@ bool WebRtcVoiceEngine::InitInternal() {
#endif
// Initialize the VoiceEngine instance that we'll use to play out sound clips.
// Also, install the externally provided (and implemented) ADM.
if (voe_wrapper_sc_->base()->Init(adm_sc_) == -1) {
LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
return false;
@ -374,7 +366,7 @@ bool WebRtcVoiceEngine::InitInternal() {
// On Windows, tell it to use the default sound (not communication) devices.
// First check whether there is a valid sound device for playback.
// TODO(juberti): Clean this up when we support setting the soundclip device.
// TODO: Clean this up when we support setting the soundclip device.
#ifdef WIN32
int num_of_devices = 0;
if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
@ -390,11 +382,13 @@ bool WebRtcVoiceEngine::InitInternal() {
}
#endif
initialized_ = true;
return true;
}
void WebRtcVoiceEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
initialized_ = false;
if (is_dumping_aec_) {
if (voe_wrapper_->processing()->StopDebugRecording() == -1) {
@ -405,12 +399,11 @@ void WebRtcVoiceEngine::Terminate() {
voe_wrapper_sc_->base()->Terminate();
voe_wrapper_->base()->Terminate();
desired_local_monitor_enable_ = false;
}
int WebRtcVoiceEngine::GetCapabilities() {
return MediaEngine::AUDIO_SEND | MediaEngine::AUDIO_RECV;
return AUDIO_SEND | AUDIO_RECV;
}
VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
@ -432,29 +425,58 @@ SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
}
bool WebRtcVoiceEngine::SetOptions(int options) {
// WebRtc team tells us that "auto" mode doesn't work too well,
// so we don't use it.
bool aec = (options & MediaEngine::ECHO_CANCELLATION) ? true : false;
bool agc = (options & MediaEngine::AUTO_GAIN_CONTROL) ? true : false;
#if defined(IOS) || defined(ANDROID)
if (voe_wrapper_->processing()->SetEcStatus(aec, kEcAecm) == -1) {
#else
// NS and typing detection are always on, if supported.
bool aec = (options & MediaEngineInterface::ECHO_CANCELLATION) ? true : false;
bool agc = (options & MediaEngineInterface::AUTO_GAIN_CONTROL) ? true : false;
#if !defined(IOS) && !defined(ANDROID)
if (voe_wrapper_->processing()->SetEcStatus(aec) == -1) {
#endif
LOG_RTCERR1(SetEcStatus, aec);
return false;
}
// TODO (perkj):
// This sets the AGC to use digital AGC since analog AGC can't be supported on
// Chromium at the moment. Change back to analog when it can.
if (voe_wrapper_->processing()->SetAgcStatus(
agc, webrtc::kAgcAdaptiveDigital) == -1) {
if (voe_wrapper_->processing()->SetAgcStatus(agc) == -1) {
LOG_RTCERR1(SetAgcStatus, agc);
return false;
}
if (voe_wrapper_->processing()->SetNsStatus(true) == -1) {
LOG_RTCERR1(SetNsStatus, true);
return false;
}
if (voe_wrapper_->processing()->SetTypingDetectionStatus(true) == -1) {
// In case of error, log the info and continue
LOG_RTCERR1(SetTypingDetectionStatus, true);
}
#else
if (voe_wrapper_->processing()->SetEcStatus(aec, kEcAecm) == -1) {
LOG_RTCERR2(SetEcStatus, aec, kEcAecm);
return false;
}
if (aec) {
// Use speakerphone mode with comfort noise generation for mobile.
if (voe_wrapper_->processing()->SetAecmMode(kAecmSpeakerphone, true) != 0) {
LOG_RTCERR2(SetAecmMode, kAecmSpeakerphone, true);
}
}
// On mobile, GIPS recommends fixed AGC (not adaptive)
if (voe_wrapper_->processing()->SetAgcStatus(agc, kAgcFixedDigital) == -1) {
LOG_RTCERR2(SetAgcStatus, agc, kAgcFixedDigital);
return false;
}
// On mobile, GIPS recommends moderate aggressiveness.
if (voe_wrapper_->processing()->SetNsStatus(true,
kNsModerateSuppression) == -1) {
LOG_RTCERR2(SetNsStatus, ns, kNsModerateSuppression);
return false;
}
// No typing detection support on iOS or Android.
#endif // !IOS && !ANDROID
return true;
}
@ -470,7 +492,7 @@ struct ResumeEntry {
SendFlags send;
};
// TODO(juberti): Refactor this so that the core logic can be used to set the
// TODO: Refactor this so that the core logic can be used to set the
// soundclip device. At that time, reinstate the soundclip pause/resume code.
bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
const Device* out_device) {
@ -689,21 +711,29 @@ bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
return FindWebRtcCodec(in, NULL);
}
// Get the VoiceEngine codec that matches |in|, with the supplied settings.
bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
webrtc::CodecInst* out) {
int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::CodecInst gcodec;
if (voe_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
AudioCodec codec(gcodec.pltype, gcodec.plname,
gcodec.plfreq, gcodec.rate, gcodec.channels, 0);
webrtc::CodecInst voe_codec;
if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
voe_codec.rate, voe_codec.channels, 0);
// Allow arbitrary rates for ISAC to be specified.
if (_stricmp(codec.name.c_str(), kIsacCodecName) == 0) {
codec.bitrate = 0;
}
if (codec.Matches(in)) {
if (out) {
// If the codec is VBR and an explicit rate is specified, use it.
if (in.bitrate != 0 && gcodec.rate == -1) {
gcodec.rate = in.bitrate;
// Fixup the payload type.
voe_codec.pltype = in.id;
// If ISAC is being used, and an explicit bitrate is not specified,
// enable auto bandwidth adjustment.
if (_stricmp(codec.name.c_str(), kIsacCodecName) == 0) {
voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
}
*out = gcodec;
*out = voe_codec;
}
return true;
}
@ -712,6 +742,19 @@ bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
return false;
}
void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
// if min_sev == -1, we keep the current log level.
if (min_sev >= 0) {
log_level_ = min_sev;
}
log_filter_ = filter;
ApplyLogging(initialized_ ? log_filter_ : "");
}
int WebRtcVoiceEngine::GetLastEngineError() {
return voe_wrapper_->error();
}
// We suppport three different logging settings for VoiceEngine:
// 1. Observer callback that goes into talk diagnostic logfile.
// Use --logfile and --loglevel
@ -724,19 +767,24 @@ bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
//
// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
// if min_sev == -1, we keep the current log level.
if (min_sev >= 0) {
log_level_ = min_sev;
void WebRtcVoiceEngine::ApplyLogging(const std::string& log_filter) {
// Set log level.
int filter = 0;
switch (log_level_) {
case talk_base::LS_VERBOSE:
filter |= webrtc::kTraceAll; // fall through
case talk_base::LS_INFO:
filter |= webrtc::kTraceStateInfo; // fall through
case talk_base::LS_WARNING:
filter |= (webrtc::kTraceInfo | webrtc::kTraceWarning); // fall through
case talk_base::LS_ERROR:
filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
}
tracing_->SetTraceFilter(filter);
// voice log level
ApplyLogging();
// Set encrypted trace file.
std::vector<std::string> opts;
talk_base::tokenize(filter, ' ', &opts);
// voice log file
talk_base::tokenize(log_filter, ' ', '"', '"', &opts);
std::vector<std::string>::iterator tracefile =
std::find(opts.begin(), opts.end(), "tracefile");
if (tracefile != opts.end() && ++tracefile != opts.end()) {
@ -747,7 +795,7 @@ void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
}
}
// AEC dump file
// Set AEC dump file
std::vector<std::string>::iterator recordEC =
std::find(opts.begin(), opts.end(), "recordEC");
if (recordEC != opts.end()) {
@ -770,36 +818,18 @@ void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
}
}
int WebRtcVoiceEngine::GetLastEngineError() {
return voe_wrapper_->error();
}
void WebRtcVoiceEngine::ApplyLogging() {
int filter = 0;
switch (log_level_) {
case talk_base::LS_VERBOSE:
filter |= webrtc::kTraceAll; // fall through
case talk_base::LS_INFO:
filter |= webrtc::kTraceStateInfo; // fall through
case talk_base::LS_WARNING:
filter |= (webrtc::kTraceInfo | webrtc::kTraceWarning); // fall through
case talk_base::LS_ERROR:
filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
}
tracing_->SetTraceFilter(filter);
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
static bool ShouldIgnoreTrace(const std::string& trace) {
static const char* kTracesToIgnore[] = {
"\tfailed to GetReportBlockInformation",
"GetRecCodec() failed to get received codec",
"GetRemoteRTCPData() failed to measure statistics dueto lack of received RTP and/or RTCP packets", // NOLINT
"GetRemoteRTCPData() failed to retrieve sender info for remoteside",
"GetRTPStatistics() failed to measure RTT since noRTP packets have been received yet", // NOLINT
"GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
"GetRTPStatistics() failed to retrieve RTT fromthe RTP/RTCP module",
"RTCPReceiver::SenderInfoReceived No received SR",
"webrtc::RTCPReceiver::SenderInfoReceived No received SR",
"StatisticsRTP() no statisitics availble",
NULL
};
@ -852,16 +882,6 @@ void WebRtcVoiceEngine::CallbackOnError(const int channel_num,
}
}
int WebRtcVoiceEngine::GetCodecPreference(const char *name, int clockrate) {
for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
if ((strcmp(kCodecPrefs[i].name, name) == 0) &&
(kCodecPrefs[i].clockrate == clockrate))
return ARRAY_SIZE(kCodecPrefs) - i;
}
LOG(LS_WARNING) << "Unexpected codec \"" << name << "/" << clockrate << "\"";
return -1;
}
bool WebRtcVoiceEngine::FindChannelAndSsrc(
int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
ASSERT(channel != NULL && ssrc != NULL);
@ -962,6 +982,32 @@ bool WebRtcVoiceEngine::SetConferenceMode(bool enable) {
return true;
}
bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc) {
if (initialized_) {
LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
return false;
}
if (adm_) {
adm_->Release();
adm_ = NULL;
}
if (adm) {
adm_ = adm;
adm_->AddRef();
}
if (adm_sc_) {
adm_sc_->Release();
adm_sc_ = NULL;
}
if (adm_sc) {
adm_sc_ = adm_sc;
adm_sc_->AddRef();
}
return true;
}
// WebRtcVoiceMediaChannel
WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
: WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
@ -989,6 +1035,9 @@ WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
// Create a random but nonzero send SSRC
SetSendSsrc(talk_base::CreateRandomNonZeroId());
// Reset all recv codecs; they will be enabled via SetRecvCodecs.
ResetRecvCodecs(voe_channel());
}
WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
@ -1031,26 +1080,22 @@ bool WebRtcVoiceMediaChannel::SetOptions(int flags) {
bool WebRtcVoiceMediaChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
// Update our receive payload types to match what we offered. This only is
// an issue when a different entity (i.e. a server) is generating the offer
// for us.
// Set the payload types to be used for incoming media.
bool ret = true;
for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end() && ret; ++i) {
webrtc::CodecInst gcodec;
if (engine()->FindWebRtcCodec(*i, &gcodec)) {
if (gcodec.pltype != i->id) {
LOG(LS_INFO) << "Updating payload type for " << gcodec.plname
<< " from " << gcodec.pltype << " to " << i->id;
gcodec.pltype = i->id;
LOG(LS_INFO) << "Setting receive voice codecs:";
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
it != codecs.end() && ret; ++it) {
webrtc::CodecInst voe_codec;
if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
LOG(LS_INFO) << ToString(*it);
voe_codec.pltype = it->id;
if (engine()->voe()->codec()->SetRecPayloadType(
voe_channel(), gcodec) == -1) {
LOG_RTCERR1(SetRecPayloadType, voe_channel());
voe_channel(), voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
ret = false;
}
}
} else {
LOG(LS_WARNING) << "Unknown codec " << i->name;
LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
ret = false;
}
}
@ -1071,29 +1116,30 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
webrtc::CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end(); ++i) {
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
it != codecs.end(); ++it) {
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst gcodec;
if (!engine()->FindWebRtcCodec(*i, &gcodec)) {
LOG(LS_WARNING) << "Unknown codec " << i->name;
webrtc::CodecInst voe_codec;
if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(voe_codec);
continue;
}
// Find the DTMF telephone event "codec" and tell VoiceEngine about it.
if (i->name == "telephone-event" || i->name == "audio/telephone-event") {
if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
_stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
voe_channel(), i->id);
voe_channel(), it->id);
dtmf_allowed_ = true;
}
// Turn voice activity detection/comfort noise on if supported.
// Set the wideband CN payload type appropriately.
// (narrowband always uses the static payload type 13).
if (i->name == "CN") {
if (_stricmp(it->name.c_str(), "CN") == 0) {
webrtc::PayloadFrequencies cn_freq;
switch (i->clockrate) {
switch (it->clockrate) {
case 8000:
cn_freq = webrtc::kFreq8000Hz;
break;
@ -1104,14 +1150,14 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
cn_freq = webrtc::kFreq32000Hz;
break;
default:
LOG(LS_WARNING) << "CN frequency " << i->clockrate
LOG(LS_WARNING) << "CN frequency " << it->clockrate
<< " not supported.";
continue;
}
engine()->voe()->codec()->SetVADStatus(voe_channel(), true);
if (cn_freq != webrtc::kFreq8000Hz) {
engine()->voe()->codec()->SetSendCNPayloadType(voe_channel(),
i->id, cn_freq);
it->id, cn_freq);
}
}
@ -1120,24 +1166,23 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// "red", for FEC audio, is a special case where the actual codec to be
// used is specified in params.
if (first) {
if (i->name == "red") {
if (_stricmp(it->name.c_str(), "red") == 0) {
// Parse out the RED parameters. If we fail, just ignore RED;
// we don't support all possible params/usage scenarios.
if (!GetRedSendCodec(*i, codecs, &send_codec)) {
if (!GetRedSendCodec(*it, codecs, &send_codec)) {
continue;
}
// Enable redundant encoding of the specified codec. Treat any
// failure as a fatal internal error.
LOG(LS_INFO) << "Enabling RED";
LOG(LS_INFO) << "Enabling FEC";
if (engine()->voe()->rtp()->SetFECStatus(voe_channel(),
true, i->id) == -1) {
LOG_RTCERR3(SetFECStatus, voe_channel(), true, i->id);
true, it->id) == -1) {
LOG_RTCERR3(SetFECStatus, voe_channel(), true, it->id);
return false;
}
} else {
send_codec = gcodec;
send_codec.pltype = i->id;
send_codec = voe_codec;
}
first = false;
}
@ -1152,11 +1197,11 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
// Set the codec.
LOG(LS_INFO) << "Selected voice codec " << send_codec.plname
<< "/" << send_codec.plfreq;
LOG(LS_INFO) << "Selected voice codec " << ToString(send_codec)
<< ", bitrate=" << send_codec.rate;
if (engine()->voe()->codec()->SetSendCodec(voe_channel(),
send_codec) == -1) {
LOG_RTCERR1(SetSendCodec, voe_channel());
LOG_RTCERR2(SetSendCodec, voe_channel(), ToString(send_codec));
return false;
}
@ -1277,7 +1322,6 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
// Tandberg-bridged conferences have an AGC target that is lower than
// GTV-only levels.
// TODO(ronghuawu): replace 0x80000000 with OPT_AGC_TANDBERG_LEVELS
if ((channel_options_ & 0x80000000) && !agc_adjusted_) {
if (engine()->AdjustAgcLevel(kTandbergDbAdjustment)) {
agc_adjusted_ = true;
@ -1390,6 +1434,24 @@ bool WebRtcVoiceMediaChannel::AddStream(uint32 ssrc) {
return false;
}
// Use the same recv payload types as our default channel.
ResetRecvCodecs(channel);
int ncodecs = engine()->voe()->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::CodecInst voe_codec;
if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
voe_codec.rate = 0; // Needed to make GetRecPayloadType work for ISAC
if (engine()->voe()->codec()->GetRecPayloadType(
voe_channel(), voe_codec) != -1) {
if (engine()->voe()->codec()->SetRecPayloadType(
channel, voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
return false;
}
}
}
}
if (mux_channels_.empty() && playout_) {
// This is the first stream in a multi user meeting. We can now
// disable playback of the default stream. This since the default
@ -1403,7 +1465,7 @@ bool WebRtcVoiceMediaChannel::AddStream(uint32 ssrc) {
mux_channels_[ssrc] = channel;
// TODO(juberti): We should rollback the add if SetPlayout fails.
// TODO: We should rollback the add if SetPlayout fails.
LOG(LS_INFO) << "New audio stream " << ssrc
<< " registered to VoiceEngine channel #"
<< channel << ".";
@ -1433,7 +1495,7 @@ bool WebRtcVoiceMediaChannel::RemoveStream(uint32 ssrc) {
// The last stream was removed. We can now enable the default
// channel for new channels to be played out immediately without
// waiting for AddStream messages.
// TODO(oja): Does the default channel still have it's CN state?
// TODO: Does the default channel still have it's CN state?
LOG(LS_INFO) << "Enabling playback on the default voice channel";
SetPlayout(voe_channel(), true);
}
@ -1465,6 +1527,88 @@ int WebRtcVoiceMediaChannel::GetOutputLevel() {
return highest;
}
bool WebRtcVoiceMediaChannel::SetOutputScaling(
uint32 ssrc, double left, double right) {
talk_base::CritScope lock(&mux_channels_cs_);
// Collect the channels to scale the output volume.
std::vector<int> channels;
if (0 == ssrc) { // Collect all channels, including the default one.
channels.push_back(voe_channel());
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
channels.push_back(it->second);
}
} else { // Collect only the channel of the specified ssrc.
int channel = GetChannel(ssrc);
if (-1 == channel) {
LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
return false;
}
channels.push_back(channel);
}
// Scale the output volume for the collected channels. We first normalize to
// scale the volume and then set the left and right pan.
float scale = static_cast<float>(talk_base::_max(left, right));
if (scale > 0.0001f) {
left /= scale;
right /= scale;
}
for (std::vector<int>::const_iterator it = channels.begin();
it != channels.end(); ++it) {
if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(
*it, scale)) {
LOG_RTCERR2(SetChannelOutputVolumeScaling, *it, scale);
return false;
}
if (-1 == engine()->voe()->volume()->SetOutputVolumePan(
*it, static_cast<float>(left), static_cast<float>(right))) {
LOG_RTCERR3(SetOutputVolumePan, *it, left, right);
// Do not return if fails. SetOutputVolumePan is not available for all
// pltforms.
}
LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
<< " right=" << right * scale
<< " for channel " << *it << " and ssrc " << ssrc;
}
return true;
}
bool WebRtcVoiceMediaChannel::GetOutputScaling(
uint32 ssrc, double* left, double* right) {
if (!left || !right) return false;
talk_base::CritScope lock(&mux_channels_cs_);
// Determine which channel based on ssrc.
int channel = (0 == ssrc) ? voe_channel() : GetChannel(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
return false;
}
float scaling;
if (-1 == engine()->voe()->volume()->GetChannelOutputVolumeScaling(
channel, scaling)) {
LOG_RTCERR2(GetChannelOutputVolumeScaling, channel, scaling);
return false;
}
float left_pan;
float right_pan;
if (-1 == engine()->voe()->volume()->GetOutputVolumePan(
channel, left_pan, right_pan)) {
LOG_RTCERR3(GetOutputVolumePan, channel, left_pan, right_pan);
// If GetOutputVolumePan fails, we use the default left and right pan.
left_pan = 1.0f;
right_pan = 1.0f;
}
*left = scaling * left_pan;
*right = scaling * right_pan;
return true;
}
bool WebRtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
ringback_tone_.reset(new WebRtcSoundclipStream(buf, len));
return true;
@ -1601,7 +1745,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
// In VoiceEngine 3.5, GetRTCPStatistics will return 0 even when it fails,
// causing the stats to contain garbage information. To prevent this, we
// zero the stats structure before calling this API.
// TODO(juberti): Remove this workaround.
// TODO: Remove this workaround.
webrtc::CallStatistics cs;
unsigned int ssrc;
webrtc::CodecInst codec;
@ -1643,7 +1787,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
sinfo.fraction_lost = -1;
sinfo.jitter_ms = -1;
}
// TODO(juberti): Figure out how to get remote packets_lost, ext_seqnum
// TODO: Figure out how to get remote packets_lost, ext_seqnum
sinfo.packets_lost = -1;
sinfo.ext_seqnum = -1;
@ -1801,7 +1945,6 @@ bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
// SetSendCodec, with the desired payload type.
if (codec != all_codecs.end() &&
engine()->FindWebRtcCodec(*codec, send_codec)) {
send_codec->pltype = red_pt;
} else {
LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
return false;
@ -1815,13 +1958,29 @@ bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
LOG_RTCERR2(SetRTCPStatus, voe_channel(), 1);
return false;
}
// TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
// TODO: Enable VQMon and RTCP XR reports, once we know what
// what we want to do with them.
// engine()->voe().EnableVQMon(voe_channel(), true);
// engine()->voe().EnableRTCP_XR(voe_channel(), true);
return true;
}
bool WebRtcVoiceMediaChannel::ResetRecvCodecs(int channel) {
int ncodecs = engine()->voe()->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::CodecInst voe_codec;
if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
voe_codec.pltype = -1;
if (engine()->voe()->codec()->SetRecPayloadType(
channel, voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
return false;
}
}
}
return true;
}
bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
if (playout) {
LOG(LS_INFO) << "Starting playout for channel #" << channel;
@ -1893,4 +2052,4 @@ int WebRtcSoundclipStream::Rewind() {
} // namespace cricket
#endif // HAVE_WEBRTC
#endif // HAVE_WEBRTC_VOICE

View File

@ -1,320 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/stream.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/rtputils.h"
#include "talk/session/phone/webrtccommon.h"
namespace cricket {
// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
// passed into WebRtc, and support looping.
class WebRtcSoundclipStream : public webrtc::InStream {
public:
WebRtcSoundclipStream(const char* buf, size_t len)
: mem_(buf, len), loop_(true) {
}
void set_loop(bool loop) { loop_ = loop; }
virtual int Read(void* buf, int len);
virtual int Rewind();
private:
talk_base::MemoryStream mem_;
bool loop_;
};
// WebRtcMonitorStream is used to monitor a stream coming from WebRtc.
// For now we just dump the data.
class WebRtcMonitorStream : public webrtc::OutStream {
virtual bool Write(const void *buf, int len) {
return true;
}
};
class AudioDeviceModule;
class VoETraceWrapper;
class VoEWrapper;
class WebRtcSoundclipMedia;
class WebRtcVoiceMediaChannel;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine
: public webrtc::VoiceEngineObserver,
public webrtc::TraceCallback {
public:
WebRtcVoiceEngine();
WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc);
// Dependency injection for testing.
WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
VoEWrapper* voe_wrapper_sc,
VoETraceWrapper* tracing);
~WebRtcVoiceEngine();
bool Init();
void Terminate();
int GetCapabilities();
VoiceMediaChannel* CreateChannel();
SoundclipMedia* CreateSoundclip();
bool SetOptions(int options);
bool SetDevices(const Device* in_device, const Device* out_device);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
int GetInputLevel();
bool SetLocalMonitor(bool enable);
const std::vector<AudioCodec>& codecs();
bool FindCodec(const AudioCodec& codec);
bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
void SetLogging(int min_sev, const char* filter);
// For tracking WebRtc channels. Needed because we have to pause them
// all when switching devices.
// May only be called by WebRtcVoiceMediaChannel.
void RegisterChannel(WebRtcVoiceMediaChannel *channel);
void UnregisterChannel(WebRtcVoiceMediaChannel *channel);
// May only be called by WebRtcSoundclipMedia.
void RegisterSoundclip(WebRtcSoundclipMedia *channel);
void UnregisterSoundclip(WebRtcSoundclipMedia *channel);
// Called by WebRtcVoiceMediaChannel to set a gain offset from
// the default AGC target level.
bool AdjustAgcLevel(int delta);
// Called by WebRtcVoiceMediaChannel to configure echo cancellation
// and noise suppression modes.
bool SetConferenceMode(bool enable);
VoEWrapper* voe() { return voe_wrapper_.get(); }
VoEWrapper* voe_sc() { return voe_wrapper_sc_.get(); }
int GetLastEngineError();
private:
typedef std::vector<WebRtcSoundclipMedia *> SoundclipList;
typedef std::vector<WebRtcVoiceMediaChannel *> ChannelList;
struct CodecPref {
const char* name;
int clockrate;
};
void Construct();
bool InitInternal();
void ApplyLogging();
virtual void Print(const webrtc::TraceLevel level,
const char* trace_string, const int length);
virtual void CallbackOnError(const int channel, const int errCode);
static int GetCodecPreference(const char *name, int clockrate);
// Given the device type, name, and id, find device id. Return true and
// set the output parameter rtc_id if successful.
bool FindWebRtcAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
bool FindChannelAndSsrc(int channel_num,
WebRtcVoiceMediaChannel** channel,
uint32* ssrc) const;
bool ChangeLocalMonitor(bool enable);
bool PauseLocalMonitor();
bool ResumeLocalMonitor();
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const CodecPref kCodecPrefs[];
// The primary instance of WebRtc VoiceEngine.
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_;
// A secondary instance, for playing out soundclips (on the 'ring' device).
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_sc_;
talk_base::scoped_ptr<VoETraceWrapper> tracing_;
// The external audio device manager
webrtc::AudioDeviceModule* adm_;
webrtc::AudioDeviceModule* adm_sc_;
int log_level_;
bool is_dumping_aec_;
std::vector<AudioCodec> codecs_;
bool desired_local_monitor_enable_;
talk_base::scoped_ptr<WebRtcMonitorStream> monitor_;
SoundclipList soundclips_;
ChannelList channels_;
// channels_ can be read from WebRtc callback thread. We need a lock on that
// callback as well as the RegisterChannel/UnregisterChannel.
talk_base::CriticalSection channels_cs_;
webrtc::AgcConfig default_agc_config_;
};
// WebRtcMediaChannel is a class that implements the common WebRtc channel
// functionality.
template <class T, class E>
class WebRtcMediaChannel : public T, public webrtc::Transport {
public:
WebRtcMediaChannel(E *engine, int channel)
: engine_(engine), voe_channel_(channel), sequence_number_(-1) {}
E *engine() { return engine_; }
int voe_channel() const { return voe_channel_; }
bool valid() const { return voe_channel_ != -1; }
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
// We need to store the sequence number to be able to pick up
// the same sequence when the device is restarted.
// TODO(oja): Remove when WebRtc has fixed the problem.
int seq_num;
if (!GetRtpSeqNum(data, len, &seq_num)) {
return -1;
}
if (sequence_number() == -1) {
LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
<< seq_num;
}
sequence_number_ = seq_num;
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendPacket(&packet) ? len : -1;
}
virtual int SendRTCPPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendRtcp(&packet) ? len : -1;
}
int sequence_number() const {
return sequence_number_;
}
private:
E *engine_;
int voe_channel_;
int sequence_number_;
};
// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
// WebRtc Voice Engine.
class WebRtcVoiceMediaChannel
: public WebRtcMediaChannel<VoiceMediaChannel,
WebRtcVoiceEngine> {
public:
explicit WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine);
virtual ~WebRtcVoiceMediaChannel();
virtual bool SetOptions(int options);
virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetPlayout(bool playout);
bool PausePlayout();
bool ResumePlayout();
virtual bool SetSend(SendFlags send);
bool PauseSend();
bool ResumeSend();
virtual bool AddStream(uint32 ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
virtual int GetOutputLevel();
virtual bool SetRingbackTone(const char *buf, int len);
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
virtual bool PressDTMF(int event, bool playout);
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool mute);
virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
virtual bool GetStats(VoiceMediaInfo* info);
// Gets last reported error from WebRtc voice engine. This should be only
// called in response a failure.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error);
bool FindSsrc(int channel_num, uint32* ssrc);
void OnError(uint32 ssrc, int error);
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
int GetChannel(uint32 ssrc);
int GetOutputLevel(int channel);
bool GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs,
webrtc::CodecInst* send_codec);
bool EnableRtcp(int channel);
bool SetPlayout(int channel, bool playout);
static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
static Error WebRtcErrorToChannelError(int err_code);
private:
// Tandberg-bridged conferences require a -10dB gain adjustment,
// which is actually +10 in AgcConfig.targetLeveldBOv
static const int kTandbergDbAdjustment = 10;
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
typedef std::map<uint32, int> ChannelMap;
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
std::set<int> ringback_channels_; // channels playing ringback
int channel_options_;
bool agc_adjusted_;
bool dtmf_allowed_;
bool desired_playout_;
bool playout_;
SendFlags desired_send_;
SendFlags send_;
ChannelMap mux_channels_; // for multiple sources
// mux_channels_ can be read from WebRtc callback thread. Accesses off the
// WebRtc thread must be synchronized with edits on the worker thread. Reads
// on the worker thread are ok.
mutable talk_base::CriticalSection mux_channels_cs_;
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_