* Push the //depotGoogle/chrome/third_party/libjingle/...@38654 to svn third_party_mods\libjingle.

* Update the peerconnection sample client accordingly.
Review URL: http://webrtc-codereview.appspot.com/60008

git-svn-id: http://webrtc.googlecode.com/svn/trunk@302 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
ronghuawu@google.com 2011-08-04 17:44:30 +00:00
parent 88bd440ef6
commit e256187f8b
68 changed files with 8798 additions and 11865 deletions

6
DEPS
View File

@ -1,11 +1,11 @@
vars = {
"webrtc_trunk" : "https://webrtc.googlecode.com/svn/trunk",
"chromium_trunk" : "http://src.chromium.org/svn/trunk",
"chromium_revision": "86252",
"chromium_revision": "95033",
# Use this googlecode_url variable only if there is an internal mirror for it.
# If you do not know, use the full path while defining your new deps entry.
"googlecode_url": "http://%s.googlecode.com/svn",
"libjingle_revision": "59",
"libjingle_revision": "77",
}
deps = {
@ -43,7 +43,7 @@ deps = {
Var("chromium_trunk") + "/src/third_party/libjingle@" + Var("chromium_revision"),
"trunk/third_party/libjingle/source":
(Var("googlecode_url") % "libjingle") + "/branches/chrome-sandbox@" + Var("libjingle_revision"),
(Var("googlecode_url") % "libjingle") + "/trunk@" + Var("libjingle_revision"),
"trunk/third_party/yasm/source/patched-yasm":
Var("chromium_trunk") + "/deps/third_party/yasm/patched-yasm@73761",

View File

@ -12,6 +12,7 @@
#include "peerconnection/samples/client/defaults.h"
#include "talk/base/logging.h"
#include "talk/p2p/client/basicportallocator.h"
#include "talk/session/phone/videorendererfactory.h"
Conductor::Conductor(PeerConnectionClient* client, MainWnd* main_wnd)
@ -19,8 +20,8 @@ Conductor::Conductor(PeerConnectionClient* client, MainWnd* main_wnd)
waiting_for_audio_(false),
waiting_for_video_(false),
peer_id_(-1),
video_channel_(-1),
audio_channel_(-1),
video_channel_(""),
audio_channel_(""),
client_(client),
main_wnd_(main_wnd) {
// Create a window for posting notifications back to from other threads.
@ -37,11 +38,11 @@ Conductor::~Conductor() {
}
bool Conductor::has_video() const {
return video_channel_ != -1;
return !video_channel_.empty();
}
bool Conductor::has_audio() const {
return audio_channel_ != -1;
return !audio_channel_.empty();
}
bool Conductor::connection_active() const {
@ -51,6 +52,8 @@ bool Conductor::connection_active() const {
void Conductor::Close() {
if (peer_connection_.get()) {
peer_connection_->Close();
video_channel_ = "";
audio_channel_ = "";
} else {
client_->SignOut();
}
@ -58,7 +61,25 @@ void Conductor::Close() {
bool Conductor::InitializePeerConnection() {
ASSERT(peer_connection_.get() == NULL);
peer_connection_.reset(new webrtc::PeerConnection(GetPeerConnectionString()));
ASSERT(port_allocator_.get() == NULL);
ASSERT(worker_thread_.get() == NULL);
port_allocator_.reset(new cricket::BasicPortAllocator(
new talk_base::BasicNetworkManager(),
talk_base::SocketAddress("stun.l.google.com", 19302),
talk_base::SocketAddress(),
talk_base::SocketAddress(), talk_base::SocketAddress()));
worker_thread_.reset(new talk_base::Thread());
if (!worker_thread_->SetName("workder thread", this) ||
!worker_thread_->Start()) {
LOG(WARNING) << "Failed to start libjingle workder thread";
}
peer_connection_.reset(
webrtc::PeerConnection::Create(GetPeerConnectionString(),
port_allocator_.get(),
worker_thread_.get()));
peer_connection_->RegisterObserver(this);
if (!peer_connection_->Init()) {
DeletePeerConnection();
@ -91,6 +112,10 @@ void Conductor::StartCaptureDevice() {
// PeerConnectionObserver implementation.
//
void Conductor::OnInitialized() {
PostMessage(handle(), PEER_CONNECTION_ADDSTREAMS, 0, 0);
}
void Conductor::OnError() {
LOG(INFO) << __FUNCTION__;
ASSERT(false);
@ -99,7 +124,7 @@ void Conductor::OnError() {
void Conductor::OnSignalingMessage(const std::string& msg) {
LOG(INFO) << __FUNCTION__;
bool shutting_down = (video_channel_ == -1 && audio_channel_ == -1);
bool shutting_down = (video_channel_.empty() && audio_channel_.empty());
if (handshake_ == OFFER_RECEIVED && !shutting_down)
StartCaptureDevice();
@ -120,38 +145,67 @@ void Conductor::OnSignalingMessage(const std::string& msg) {
}
}
// Called when a remote stream is added
void Conductor::OnAddStream(const std::string& stream_id, int channel_id,
bool video) {
// Called when a local stream is added and initialized
void Conductor::OnLocalStreamInitialized(const std::string& stream_id,
bool video) {
LOG(INFO) << __FUNCTION__ << " " << stream_id;
bool send_notification = (waiting_for_video_ || waiting_for_audio_);
if (video) {
ASSERT(video_channel_ == -1);
video_channel_ = channel_id;
ASSERT(video_channel_.empty());
video_channel_ = stream_id;
waiting_for_video_ = false;
LOG(INFO) << "Setting video renderer for channel: " << channel_id;
LOG(INFO) << "Setting video renderer for stream: " << stream_id;
bool ok = peer_connection_->SetVideoRenderer(stream_id,
main_wnd_->remote_renderer());
ASSERT(ok);
} else {
ASSERT(audio_channel_ == -1);
audio_channel_ = channel_id;
ASSERT(audio_channel_.empty());
audio_channel_ = stream_id;
waiting_for_audio_ = false;
}
if (send_notification && !waiting_for_audio_ && !waiting_for_video_)
PostMessage(handle(), MEDIA_CHANNELS_INITIALIZED, 0, 0);
if (!waiting_for_audio_ && !waiting_for_video_) {
PostMessage(handle(), PEER_CONNECTION_CONNECT, 0, 0);
}
}
void Conductor::OnRemoveStream(const std::string& stream_id, int channel_id,
bool video) {
// Called when a remote stream is added
void Conductor::OnAddStream(const std::string& stream_id, bool video) {
LOG(INFO) << __FUNCTION__ << " " << stream_id;
bool send_notification = (waiting_for_video_ || waiting_for_audio_);
if (video) {
ASSERT(video_channel_.empty());
video_channel_ = stream_id;
waiting_for_video_ = false;
LOG(INFO) << "Setting video renderer for stream: " << stream_id;
bool ok = peer_connection_->SetVideoRenderer(stream_id,
main_wnd_->remote_renderer());
ASSERT(ok);
} else {
ASSERT(audio_channel_.empty());
audio_channel_ = stream_id;
waiting_for_audio_ = false;
}
if (send_notification && !waiting_for_audio_ && !waiting_for_video_)
PostMessage(handle(), MEDIA_CHANNELS_INITIALIZED, 0, 0);
if (!waiting_for_audio_ && !waiting_for_video_) {
PostMessage(handle(), PEER_CONNECTION_CONNECT, 0, 0);
}
}
void Conductor::OnRemoveStream(const std::string& stream_id, bool video) {
LOG(INFO) << __FUNCTION__;
if (video) {
ASSERT(channel_id == video_channel_);
video_channel_ = -1;
ASSERT(video_channel_.compare(stream_id) == 0);
video_channel_ = "";
} else {
ASSERT(channel_id == audio_channel_);
audio_channel_ = -1;
ASSERT(audio_channel_.compare(stream_id) == 0);
audio_channel_ = "";
}
}
@ -214,9 +268,6 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) {
} else if (handshake_ == INITIATOR) {
LOG(INFO) << "Remote peer sent us an answer";
handshake_ = ANSWER_RECEIVED;
} else {
LOG(INFO) << "Remote peer is disconnecting";
handshake_ = QUIT_SENT;
}
peer_connection_->SignalingMessage(message);
@ -255,19 +306,24 @@ void Conductor::ConnectToPeer(int peer_id) {
if (InitializePeerConnection()) {
peer_id_ = peer_id;
waiting_for_video_ = peer_connection_->AddStream(kVideoLabel, true);
waiting_for_audio_ = peer_connection_->AddStream(kAudioLabel, false);
if (waiting_for_video_ || waiting_for_audio_)
handshake_ = INITIATOR;
ASSERT(waiting_for_video_ || waiting_for_audio_);
}
if (handshake_ == NONE) {
} else {
::MessageBoxA(main_wnd_->handle(), "Failed to initialize PeerConnection",
"Error", MB_OK | MB_ICONERROR);
}
}
void Conductor::AddStreams() {
waiting_for_video_ = peer_connection_->AddStream(kVideoLabel, true);
waiting_for_audio_ = peer_connection_->AddStream(kAudioLabel, false);
if (waiting_for_video_ || waiting_for_audio_)
handshake_ = INITIATOR;
ASSERT(waiting_for_video_ || waiting_for_audio_);
}
void Conductor::PeerConnectionConnect() {
peer_connection_->Connect();
}
void Conductor::DisconnectFromCurrentPeer() {
if (peer_connection_.get())
peer_connection_->Close();
@ -295,8 +351,8 @@ bool Conductor::OnMessage(UINT msg, WPARAM wp, LPARAM lp,
waiting_for_audio_ = false;
waiting_for_video_ = false;
peer_id_ = -1;
ASSERT(video_channel_ == -1);
ASSERT(audio_channel_ == -1);
ASSERT(video_channel_.empty());
ASSERT(audio_channel_.empty());
if (main_wnd_->IsWindow()) {
if (client_->is_connected()) {
main_wnd_->SwitchToPeerList(client_->peers());
@ -313,6 +369,10 @@ bool Conductor::OnMessage(UINT msg, WPARAM wp, LPARAM lp,
LOG(LS_ERROR) << "SendToPeer failed";
DisconnectFromServer();
}
} else if (msg == PEER_CONNECTION_ADDSTREAMS) {
AddStreams();
} else if (msg == PEER_CONNECTION_CONNECT) {
PeerConnectionConnect();
} else {
ret = false;
}

View File

@ -16,7 +16,7 @@
#include "peerconnection/samples/client/main_wnd.h"
#include "peerconnection/samples/client/peer_connection_client.h"
#include "talk/app/peerconnection.h"
#include "talk/app/webrtc/peerconnection.h"
#include "talk/base/scoped_ptr.h"
namespace cricket {
@ -33,6 +33,8 @@ class Conductor
MEDIA_CHANNELS_INITIALIZED = WM_APP + 1,
PEER_CONNECTION_CLOSED,
SEND_MESSAGE_TO_PEER,
PEER_CONNECTION_ADDSTREAMS,
PEER_CONNECTION_CONNECT,
};
enum HandshakeState {
@ -56,20 +58,25 @@ class Conductor
bool InitializePeerConnection();
void DeletePeerConnection();
void StartCaptureDevice();
void AddStreams();
void PeerConnectionConnect();
//
// PeerConnectionObserver implementation.
//
virtual void OnInitialized();
virtual void OnError();
virtual void OnSignalingMessage(const std::string& msg);
// Called when a local stream is added and initialized
virtual void OnLocalStreamInitialized(const std::string& stream_id,
bool video);
// Called when a remote stream is added
virtual void OnAddStream(const std::string& stream_id, int channel_id,
bool video);
virtual void OnAddStream(const std::string& stream_id, bool video);
virtual void OnRemoveStream(const std::string& stream_id,
int channel_id,
bool video);
//
@ -111,10 +118,12 @@ class Conductor
bool waiting_for_video_;
int peer_id_;
talk_base::scoped_ptr<webrtc::PeerConnection> peer_connection_;
talk_base::scoped_ptr<cricket::PortAllocator> port_allocator_;
talk_base::scoped_ptr<talk_base::Thread> worker_thread_;
PeerConnectionClient* client_;
MainWnd* main_wnd_;
int video_channel_;
int audio_channel_;
std::string video_channel_;
std::string audio_channel_;
};
#endif // PEERCONNECTION_SAMPLES_CLIENT_CONDUCTOR_H_

View File

@ -168,53 +168,68 @@ void MainWnd::OnPaint() {
long height = abs(bmi.bmiHeader.biHeight);
long width = bmi.bmiHeader.biWidth;
HDC dc_mem = ::CreateCompatibleDC(ps.hdc);
if (remote_video_.get()->image() != NULL) {
HDC dc_mem = ::CreateCompatibleDC(ps.hdc);
// Set the map mode so that the ratio will be maintained for us.
HDC all_dc[] = { ps.hdc, dc_mem };
for (int i = 0; i < ARRAY_SIZE(all_dc); ++i) {
SetMapMode(all_dc[i], MM_ISOTROPIC);
SetWindowExtEx(all_dc[i], width, height, NULL);
SetViewportExtEx(all_dc[i], rc.right, rc.bottom, NULL);
// Set the map mode so that the ratio will be maintained for us.
HDC all_dc[] = { ps.hdc, dc_mem };
for (int i = 0; i < ARRAY_SIZE(all_dc); ++i) {
SetMapMode(all_dc[i], MM_ISOTROPIC);
SetWindowExtEx(all_dc[i], width, height, NULL);
SetViewportExtEx(all_dc[i], rc.right, rc.bottom, NULL);
}
HBITMAP bmp_mem = ::CreateCompatibleBitmap(ps.hdc, rc.right, rc.bottom);
HGDIOBJ bmp_old = ::SelectObject(dc_mem, bmp_mem);
POINT logical_area = { rc.right, rc.bottom };
DPtoLP(ps.hdc, &logical_area, 1);
HBRUSH brush = ::CreateSolidBrush(RGB(0, 0, 0));
RECT logical_rect = {0, 0, logical_area.x, logical_area.y };
::FillRect(dc_mem, &logical_rect, brush);
::DeleteObject(brush);
const uint8* image = remote_video_->image();
int max_unit = std::max(width, height);
int x = (logical_area.x / 2) - (width / 2);
int y = (logical_area.y / 2) - (height / 2);
StretchDIBits(dc_mem, x, y, width, height,
0, 0, width, height, image, &bmi, DIB_RGB_COLORS, SRCCOPY);
if ((rc.right - rc.left) > 200 && (rc.bottom - rc.top) > 200) {
const BITMAPINFO& bmi = local_video_->bmi();
image = local_video_->image();
long thumb_width = bmi.bmiHeader.biWidth / 4;
long thumb_height = abs(bmi.bmiHeader.biHeight) / 4;
StretchDIBits(dc_mem,
logical_area.x - thumb_width - 10,
logical_area.y - thumb_height - 10,
thumb_width, thumb_height,
0, 0, bmi.bmiHeader.biWidth, -bmi.bmiHeader.biHeight,
image, &bmi, DIB_RGB_COLORS, SRCCOPY);
}
BitBlt(ps.hdc, 0, 0, logical_area.x, logical_area.y,
dc_mem, 0, 0, SRCCOPY);
// Cleanup.
::SelectObject(dc_mem, bmp_old);
::DeleteObject(bmp_mem);
::DeleteDC(dc_mem);
} else {
// We're still waiting for the video stream to be initialized.
HBRUSH brush = ::CreateSolidBrush(RGB(0, 0, 0));
::FillRect(ps.hdc, &rc, brush);
::DeleteObject(brush);
HGDIOBJ old_font = ::SelectObject(ps.hdc, GetDefaultFont());
::SetTextColor(ps.hdc, RGB(0xff, 0xff, 0xff));
::SetBkMode(ps.hdc, TRANSPARENT);
::DrawTextA(ps.hdc, "Connecting...", -1, &rc,
DT_SINGLELINE | DT_CENTER | DT_VCENTER);
::SelectObject(ps.hdc, old_font);
}
HBITMAP bmp_mem = ::CreateCompatibleBitmap(ps.hdc, rc.right, rc.bottom);
HGDIOBJ bmp_old = ::SelectObject(dc_mem, bmp_mem);
HBRUSH brush = ::CreateSolidBrush(RGB(0, 0, 0));
::FillRect(dc_mem, &rc, brush);
::DeleteObject(brush);
POINT logical_area = { rc.right, rc.bottom };
DPtoLP(ps.hdc, &logical_area, 1);
const uint8* image = remote_video_->image();
int max_unit = std::max(width, height);
int x = (logical_area.x / 2) - (width / 2);
int y = (logical_area.y / 2) - (height / 2);
StretchDIBits(dc_mem, x, y, width, height,
0, 0, width, height, image, &bmi, DIB_RGB_COLORS, SRCCOPY);
if ((rc.right - rc.left) > 200 && (rc.bottom - rc.top) > 200) {
const BITMAPINFO& bmi = local_video_->bmi();
image = local_video_->image();
long thumb_width = bmi.bmiHeader.biWidth / 4;
long thumb_height = abs(bmi.bmiHeader.biHeight) / 4;
StretchDIBits(dc_mem,
logical_area.x - thumb_width - 10,
logical_area.y - thumb_height - 10,
thumb_width, thumb_height,
0, 0, bmi.bmiHeader.biWidth, -bmi.bmiHeader.biHeight,
image, &bmi, DIB_RGB_COLORS, SRCCOPY);
}
BitBlt(ps.hdc, 0, 0, logical_area.x, logical_area.y,
dc_mem, 0, 0, SRCCOPY);
// Cleanup.
::SelectObject(dc_mem, bmp_old);
::DeleteObject(bmp_mem);
::DeleteDC(dc_mem);
} else {
HBRUSH brush = ::CreateSolidBrush(::GetSysColor(COLOR_WINDOW));
::FillRect(ps.hdc, &rc, brush);
@ -481,7 +496,6 @@ MainWnd::VideoRenderer::VideoRenderer(HWND wnd, int width, int height)
bmi_.bmiHeader.biHeight = -height;
bmi_.bmiHeader.biSizeImage = width * height *
(bmi_.bmiHeader.biBitCount >> 3);
image_.reset(new uint8[bmi_.bmiHeader.biSizeImage]);
}
MainWnd::VideoRenderer::~VideoRenderer() {
@ -529,6 +543,7 @@ void MainWnd::VideoRenderer::OnMessage(const MSG& msg) {
break;
case WM_PAINT: {
ASSERT(image_.get() != NULL);
const cricket::VideoFrame* frame =
reinterpret_cast<const cricket::VideoFrame*>(msg.lParam);
frame->ConvertToRgbBuffer(cricket::FOURCC_ARGB, image_.get(),

View File

@ -8,6 +8,7 @@
# We declare a default value of 0 for standalone builds.
'inside_chromium_build%': 0,
'no_libjingle_logging%': 0,
'peer_connection_dev%': 0,
},
'target_defaults': {
'defines': [
@ -16,6 +17,7 @@
'_USE_32BIT_TIME_T',
'SAFE_TO_DEFINE_TALK_BASE_LOGGING_MACROS',
'EXPAT_RELATIVE_PATH',
'WEBRTC_RELATIVE_PATH',
'HAVE_WEBRTC',
],
'configurations': {
@ -30,11 +32,15 @@
'dependencies': [
'../expat/expat.gyp:expat',
],
'export_dependent_settings': [
'../expat/expat.gyp:expat',
],
'direct_dependent_settings': {
'defines': [
'FEATURE_ENABLE_SSL',
'FEATURE_ENABLE_VOICEMAIL',
'EXPAT_RELATIVE_PATH',
'WEBRTC_RELATIVE_PATH',
],
'conditions': [
['OS=="win"', {
@ -64,7 +70,7 @@
'OSX',
],
}],
['OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd"', {
['os_posix == 1', {
'defines': [
'POSIX',
],
@ -114,7 +120,7 @@
},{
'include_dirs': [
# the third_party folder for webrtc/ includes (non-chromium).
'../../trunk',
'../../src',
'./source',
'../../third_party/expat/files',
],
@ -134,7 +140,7 @@
'OSX',
],
}],
['OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd"', {
['os_posix == 1', {
'defines': [
'POSIX',
],
@ -158,7 +164,7 @@
}],
],
},
'type': '<(library)',
'type': 'static_library',
'sources': [
'<(overrides)/talk/base/basictypes.h',
'<(overrides)/talk/base/constructormagic.h',
@ -353,7 +359,7 @@
'source/talk/base/winping.h',
],
}],
['OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd"', {
['os_posix == 1', {
'sources': [
'source/talk/base/latebindingsymboltable.cc',
'source/talk/base/latebindingsymboltable.h',
@ -463,6 +469,8 @@
'source/talk/session/phone/codec.cc',
'source/talk/session/phone/codec.h',
'source/talk/session/phone/cryptoparams.h',
'source/talk/session/phone/currentspeakermonitor.cc',
'source/talk/session/phone/currentspeakermonitor.h',
'source/talk/session/phone/devicemanager.cc',
'source/talk/session/phone/devicemanager.h',
'source/talk/session/phone/filemediaengine.cc',
@ -489,6 +497,15 @@
'source/talk/session/phone/srtpfilter.h',
'source/talk/session/phone/videocommon.h',
'source/talk/session/phone/voicechannel.h',
'source/talk/session/phone/webrtccommon.h',
'source/talk/session/phone/webrtcvideoengine.cc',
'source/talk/session/phone/webrtcvideoengine.h',
'source/talk/session/phone/webrtcvideoframe.cc',
'source/talk/session/phone/webrtcvideoframe.h',
'source/talk/session/phone/webrtcvie.h',
'source/talk/session/phone/webrtcvoe.h',
'source/talk/session/phone/webrtcvoiceengine.cc',
'source/talk/session/phone/webrtcvoiceengine.h',
'source/talk/session/tunnel/pseudotcpchannel.cc',
'source/talk/session/tunnel/pseudotcpchannel.h',
'source/talk/session/tunnel/tunnelsessionclient.cc',
@ -503,8 +520,8 @@
}],
['OS=="linux"', {
'sources': [
#'source/talk/session/phone/gtkvideorenderer.cc',
#'source/talk/session/phone/gtkvideorenderer.h',
'source/talk/session/phone/gtkvideorenderer.cc',
'source/talk/session/phone/gtkvideorenderer.h',
'source/talk/session/phone/libudevsymboltable.cc',
'source/talk/session/phone/libudevsymboltable.h',
'source/talk/session/phone/v4llookup.cc',
@ -513,21 +530,29 @@
'include_dirs': [
'source/talk/third_party/libudev',
],
'cflags': [
'<!@(pkg-config --cflags gtk+-2.0)',
],
}],
['inside_chromium_build==1', {
'dependencies': [
'libjingle',
'../webrtc/modules/audio_device/main/source/audio_device.gyp:audio_device',
'../webrtc/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../webrtc/modules/video_render/main/source/video_render.gyp:video_render_module',
'../webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'../webrtc/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../webrtc/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
],
'defines': [
'PLATFORM_CHROMIUM',
'libjingle',
],
}, {
'dependencies': [
'libjingle',
'../../src/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../../src/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'../../src/modules/audio_device/main/source/audio_device.gyp:audio_device',
'../../src/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../../src/modules/video_render/main/source/video_render.gyp:video_render_module',
'../../src/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'../../src/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../../src/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'libjingle',
],
} ], # inside_chromium_build
], # conditions
@ -535,101 +560,93 @@
# seperate project for app
{
'target_name': 'libjingle_app',
'type': '<(library)',
'sources': [
'source/talk/app/peerconnection.cc',
'source/talk/app/peerconnection.h',
'source/talk/app/videoengine.h',
'source/talk/app/videomediaengine.cc',
'source/talk/app/videomediaengine.h',
'source/talk/app/voiceengine.h',
'source/talk/app/voicemediaengine.cc',
'source/talk/app/voicemediaengine.h',
'source/talk/app/webrtc_json.cc',
'source/talk/app/webrtc_json.h',
'source/talk/app/webrtcsession.cc',
'source/talk/app/webrtcsession.h',
'source/talk/app/webrtcsessionimpl.cc',
'source/talk/app/webrtcsessionimpl.h',
'source/talk/app/pc_transport_impl.cc',
'source/talk/app/pc_transport_impl.h',
],
'direct_dependent_settings': {
'variables': {
'conditions': [
['inside_chromium_build==1', {
'defines': [
'PLATFORM_CHROMIUM',
],
'overrides': 'overrides',
},{
'sources': [
'source/talk/app/p2p_transport_manager.cc',
'source/talk/app/p2p_transport_manager.h',
],
'overrides': 'source',
}],
],
},
'dependencies': [
'type': '<(library)',
'sources': [
'source/talk/app/webrtc/peerconnection.cc',
'source/talk/app/webrtc/peerconnection.h',
'source/talk/app/webrtc/peerconnectionimpl_callbacks.h',
'source/talk/app/webrtc/peerconnection_impl.cc',
'source/talk/app/webrtc/peerconnection_impl.h',
'source/talk/app/webrtc/webrtcsession.cc',
'source/talk/app/webrtc/webrtcsession.h',
'source/talk/app/webrtc/webrtc_json.cc',
'source/talk/app/webrtc/webrtc_json.h',
],
'conditions': [
['inside_chromium_build==1', {
['inside_chromium_build==1', {
'dependencies': [
'../webrtc/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../webrtc/modules/video_render/main/source/video_render.gyp:video_render_module',
'../webrtc/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../webrtc/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'../webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'libjingle_p2p',
'source/talk/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
],
'defines': [
'PLATFORM_CHROMIUM',
],
],
}, {
'dependencies': [
'../../third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
'../../src/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../../src/modules/video_render/main/source/video_render.gyp:video_render_module',
'../../src/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../../src/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'../../src/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'libjingle_p2p',
],
} ], # inside_chromium_build
['peer_connection_dev==1', {
'sources': [
'<(overrides)/talk/app/webrtc/scoped_refptr.h',
'source/talk/app/webrtc/audio_device_dev.cc',
'source/talk/app/webrtc/local_audio_track_impl_dev.cc',
'source/talk/app/webrtc/local_stream_dev.h',
'source/talk/app/webrtc/local_stream_dev.cc',
'source/talk/app/webrtc/local_video_track_impl_dev.cc',
'source/talk/app/webrtc/peerconnection_dev.h',
'source/talk/app/webrtc/peerconnection_impl_dev.cc',
'source/talk/app/webrtc/peerconnection_impl_dev.h',
'source/talk/app/webrtc/peerconnectionmanager.cc',
'source/talk/app/webrtc/peerconnectionmanager.h',
'source/talk/app/webrtc/peerconnectiontransport.cc',
'source/talk/app/webrtc/peerconnectiontransport.h',
'source/talk/app/webrtc/ref_count.h',
'source/talk/app/webrtc/stream_dev.h',
'source/talk/app/webrtc/video_device_dev.cc',
'source/talk/app/webrtc/video_renderer_dev.cc',
],
} ], # inside_chromium_build
}], # peer_connection_dev
], # conditions
},
{
'target_name': 'session_test_app',
'target_name': 'peerconnection_client_dev',
'conditions': [
['OS=="win"', {
['peer_connection_dev==1 and OS=="linux"', {
'type': 'executable',
'sources': [
'source/talk/app/session_test/main_wnd.cc',
'source/talk/app/session_test/main_wnd.h',
'source/talk/app/session_test/session_test_main.cc',
'sources': [
'source/talk/app/webrtc/peerconnection_client_dev.cc',
],
'libraries': [
'-lXext',
'-lX11',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Windows
},
},
}, {
'type': 'none',
}],
} ], # peer_connection_dev
['inside_chromium_build==1', {
'dependencies': [
'../webrtc/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../webrtc/video_engine/main/source/video_engine_core.gyp:video_engine_core',
'../webrtc/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'../webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'libjingle_app',
'libjingle_p2p',
'source/talk/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
],
}, {
'dependencies': [
'../../third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
'../../src/modules/video_capture/main/source/video_capture.gyp:video_capture_module',
'../../src/voice_engine/main/source/voice_engine_core.gyp:voice_engine_core',
'../../src/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'libjingle_app',
],
} ], # inside_chromium_build
@ -637,9 +654,3 @@
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@ -1,75 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "talk/app/p2p_transport_manager.h"
#include "talk/base/socketaddress.h"
#include "talk/p2p/base/p2ptransportchannel.h"
#include "talk/p2p/client/httpportallocator.h"
#include "talk/p2p/client/basicportallocator.h"
namespace webrtc {
P2PTransportManager::P2PTransportManager(cricket::PortAllocator* allocator)
: event_handler_(NULL)
,state_(STATE_NONE)
,allocator_(allocator) {
}
P2PTransportManager::~P2PTransportManager() {
}
bool P2PTransportManager::Init(const std::string& name,
Protocol protocol,
const std::string& config,
EventHandler* event_handler) {
name_ = name;
event_handler_ = event_handler;
channel_.reset(new cricket::P2PTransportChannel(
name, "", NULL, allocator_));
channel_->SignalRequestSignaling.connect(
this, &P2PTransportManager::OnRequestSignaling);
channel_->SignalWritableState.connect(
this, &P2PTransportManager::OnReadableState);
channel_->SignalWritableState.connect(
this, &P2PTransportManager::OnWriteableState);
channel_->SignalCandidateReady.connect(
this, &P2PTransportManager::OnCandidateReady);
channel_->Connect();
return true;
}
bool P2PTransportManager::AddRemoteCandidate(
const cricket::Candidate& candidate) {
channel_->OnCandidate(candidate);
return true;
}
cricket::P2PTransportChannel* P2PTransportManager::GetP2PChannel() {
return channel_.get();
}
void P2PTransportManager::OnRequestSignaling() {
channel_->OnSignalingReady();
}
void P2PTransportManager::OnCandidateReady(
cricket::TransportChannelImpl* channel,
const cricket::Candidate& candidate) {
event_handler_->OnCandidateReady(candidate);
}
void P2PTransportManager::OnReadableState(cricket::TransportChannel* channel) {
state_ = static_cast<State>(state_ | STATE_READABLE);
event_handler_->OnStateChange(state_);
}
void P2PTransportManager::OnWriteableState(cricket::TransportChannel* channel) {
state_ = static_cast<State>(state_ | STATE_WRITABLE);
event_handler_->OnStateChange(state_);
}
}

View File

@ -1,87 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TALK_APP_WEBRTC_P2P_TRANSPORT_MANAGER_H_
#define TALK_APP_WEBRTC_P2P_TRANSPORT_MANAGER_H_
#include <string>
#include "talk/base/scoped_ptr.h"
#include "talk/base/sigslot.h"
namespace cricket {
class Candidate;
class P2PTransportChannel;
class PortAllocator;
class TransportChannel;
class TransportChannelImpl;
}
namespace talk_base {
class NetworkManager;
class PacketSocketFactory;
}
namespace webrtc {
class P2PTransportManager : public sigslot::has_slots<>{
public:
enum State {
STATE_NONE = 0,
STATE_WRITABLE = 1,
STATE_READABLE = 2,
};
enum Protocol {
PROTOCOL_UDP = 0,
PROTOCOL_TCP = 1,
};
class EventHandler {
public:
virtual ~EventHandler() {}
// Called for each local candidate.
virtual void OnCandidateReady(const cricket::Candidate& candidate) = 0;
// Called when readable of writable state of the stream changes.
virtual void OnStateChange(State state) = 0;
// Called when an error occures (e.g. TCP handshake
// failed). P2PTransportManager object is not usable after that and
// should be destroyed.
virtual void OnError(int error) = 0;
};
public:
// Create P2PTransportManager using specified NetworkManager and
// PacketSocketFactory. Takes ownership of |network_manager| and
// |socket_factory|.
P2PTransportManager(cricket::PortAllocator* allocator);
~P2PTransportManager();
bool Init(const std::string& name,
Protocol protocol,
const std::string& config,
EventHandler* event_handler);
bool AddRemoteCandidate(const cricket::Candidate& address);
cricket::P2PTransportChannel* GetP2PChannel();
private:
void OnRequestSignaling();
void OnCandidateReady(cricket::TransportChannelImpl* channel,
const cricket::Candidate& candidate);
void OnReadableState(cricket::TransportChannel* channel);
void OnWriteableState(cricket::TransportChannel* channel);
std::string name_;
EventHandler* event_handler_;
State state_;
cricket::PortAllocator* allocator_;
talk_base::scoped_ptr<cricket::P2PTransportChannel> channel_;
};
}
#endif // TALK_APP_WEBRTC_P2P_TRANSPORT_MANAGER_H_

View File

@ -1,359 +0,0 @@
/*
* pc_transport_impl.cc
*
* Created on: May 2, 2011
* Author: mallinath
*/
#include "talk/app/pc_transport_impl.h"
#ifdef PLATFORM_CHROMIUM
#include "base/values.h"
#include "content/common/json_value_serializer.h"
#include "content/renderer/p2p/p2p_transport_impl.h"
#include "jingle/glue/thread_wrapper.h"
#include "net/base/io_buffer.h"
#include "net/socket/socket.h"
#else
#include "talk/app/p2p_transport_manager.h"
#endif
#include "talk/p2p/base/transportchannel.h"
#include "talk/app/webrtcsessionimpl.h"
#include "talk/app/peerconnection.h"
namespace webrtc {
enum {
MSG_RTC_ONREADPACKET = 1,
MSG_RTC_TRANSPORTINIT,
MSG_RTC_ADDREMOTECANDIDATE,
MSG_RTC_ONCANDIDATEREADY,
};
struct MediaDataMsgParams : public talk_base::MessageData {
MediaDataMsgParams(cricket::TransportChannel* channel,
const char* dataPtr,
int len)
: channel(channel), data(dataPtr), len(len) {}
cricket::TransportChannel* channel;
const char* data;
int len;
};
PC_Transport_Impl::PC_Transport_Impl (WebRTCSessionImpl* session)
: session_(session),
#ifdef PLATFORM_CHROMIUM
ALLOW_THIS_IN_INITIALIZER_LIST(
channel_read_callback_(this, &PC_Transport_Impl::OnRead)),
ALLOW_THIS_IN_INITIALIZER_LIST(
channel_write_callback_(this, &PC_Transport_Impl::OnWrite)),
#endif
writable_(false),
event_(false, false),
network_thread_jingle_(session_->connection()->media_thread())
{
#ifdef PLATFORM_CHROMIUM
// Before proceeding, ensure we have libjingle thread wrapper for
// the current thread.
jingle_glue::JingleThreadWrapper::EnsureForCurrentThread();
network_thread_chromium_ = talk_base::Thread::Current();
#endif
event_.Set();
}
PC_Transport_Impl::~PC_Transport_Impl() {
}
bool PC_Transport_Impl::Init(const std::string& name) {
#ifdef PLATFORM_CHROMIUM
if(network_thread_chromium_ != talk_base::Thread::Current()) {
network_thread_chromium_->Post(this, MSG_RTC_TRANSPORTINIT,
new talk_base::TypedMessageData<std::string> (name));
return true;
}
#else
if(network_thread_jingle_ != talk_base::Thread::Current()) {
network_thread_jingle_->Send(this, MSG_RTC_TRANSPORTINIT,
new talk_base::TypedMessageData<std::string> (name));
return true;
}
#endif
name_ = name;
p2p_transport_.reset(CreateP2PTransport());
#ifdef PLATFORM_CHROMIUM
webkit_glue::P2PTransport::Protocol protocol =
webkit_glue::P2PTransport::PROTOCOL_UDP;
#else
webrtc::P2PTransportManager::Protocol protocol =
webrtc::P2PTransportManager::PROTOCOL_UDP;
#endif
p2p_transport_->Init(name_, protocol, "", this);
#ifdef PLATFORM_CHROMIUM
StreamRead();
#endif
return true;
}
#ifdef PLATFORM_CHROMIUM
void PC_Transport_Impl::OnCandidateReady(const std::string& address) {
if(network_thread_chromium_ != talk_base::Thread::Current()) {
network_thread_chromium_->Post(this, MSG_RTC_ONCANDIDATEREADY,
new talk_base::TypedMessageData<std::string> (
address));
return;
}
// using only first candidate
// use p2p_transport_impl.cc Deserialize method
cricket::Candidate candidate;
if (local_candidates_.empty()) {
cricket::Candidate candidate;
DeserializeCandidate(address, &candidate);
local_candidates_.push_back(candidate);
session_->OnCandidateReady(candidate);
}
}
bool PC_Transport_Impl::AddRemoteCandidate(
const cricket::Candidate& candidate) {
if(network_thread_chromium_ != talk_base::Thread::Current()) {
network_thread_chromium_->Post(this, MSG_RTC_ADDREMOTECANDIDATE,
new talk_base::TypedMessageData<const cricket::Candidate*> (
&candidate));
// TODO: save the result
return true;
}
if (!p2p_transport_.get())
return false;
return p2p_transport_->AddRemoteCandidate(SerializeCandidate(candidate));
}
#else
void PC_Transport_Impl::OnCandidateReady(const cricket::Candidate& candidate) {
if(network_thread_jingle_ != talk_base::Thread::Current()) {
network_thread_jingle_->Send(this, MSG_RTC_ONCANDIDATEREADY,
new talk_base::TypedMessageData<const cricket::Candidate*> (
&candidate));
return;
}
if (local_candidates_.empty()) {
local_candidates_.push_back(candidate);
session_->OnCandidateReady(candidate);
}
}
bool PC_Transport_Impl::AddRemoteCandidate(
const cricket::Candidate& candidate) {
if(network_thread_jingle_ != talk_base::Thread::Current()) {
network_thread_jingle_->Send(this, MSG_RTC_ADDREMOTECANDIDATE,
new talk_base::TypedMessageData<const cricket::Candidate*> (
&candidate));
// TODO: save the result
return true;
}
if (!p2p_transport_.get())
return false;
return p2p_transport_->AddRemoteCandidate(candidate);
}
#endif
#ifdef PLATFORM_CHROMIUM
int32 PC_Transport_Impl::DoRecv() {
if (!p2p_transport_.get())
return -1;
net::Socket* channel = p2p_transport_->GetChannel();
if (!channel)
return -1;
scoped_refptr<net::IOBuffer> buffer =
new net::WrappedIOBuffer(static_cast<const char*>(recv_buffer_));
int result = channel->Read(
buffer, kMaxRtpRtcpPacketLen, &channel_read_callback_);
return result;
}
void PC_Transport_Impl::OnRead(int result) {
network_thread_jingle_->Post(
this, MSG_RTC_ONREADPACKET, new MediaDataMsgParams(
GetP2PChannel(), recv_buffer_, result));
StreamRead();
}
void PC_Transport_Impl::OnWrite(int result) {
return;
}
net::Socket* PC_Transport_Impl::GetChannel() {
if (!p2p_transport_.get())
return NULL;
return p2p_transport_->GetChannel();
}
void PC_Transport_Impl::StreamRead() {
event_.Wait(talk_base::kForever);
DoRecv();
}
void PC_Transport_Impl::OnReadPacket_w(cricket::TransportChannel* channel,
const char* data,
size_t len) {
session()->SignalReadPacket(channel, data, len);
event_.Set();
return ;
}
std::string PC_Transport_Impl::SerializeCandidate(
const cricket::Candidate& candidate) {
// TODO(sergeyu): Use SDP to format candidates?
DictionaryValue value;
value.SetString("name", candidate.name());
value.SetString("ip", candidate.address().IPAsString());
value.SetInteger("port", candidate.address().port());
value.SetString("type", candidate.type());
value.SetString("protocol", candidate.protocol());
value.SetString("username", candidate.username());
value.SetString("password", candidate.password());
value.SetDouble("preference", candidate.preference());
value.SetInteger("generation", candidate.generation());
std::string result;
JSONStringValueSerializer serializer(&result);
serializer.Serialize(value);
return result;
}
bool PC_Transport_Impl::DeserializeCandidate(const std::string& address,
cricket::Candidate* candidate) {
JSONStringValueSerializer deserializer(address);
scoped_ptr<Value> value(deserializer.Deserialize(NULL, NULL));
if (!value.get() || !value->IsType(Value::TYPE_DICTIONARY)) {
return false;
}
DictionaryValue* dic_value = static_cast<DictionaryValue*>(value.get());
std::string name;
std::string ip;
int port;
std::string type;
std::string protocol;
std::string username;
std::string password;
double preference;
int generation;
if (!dic_value->GetString("name", &name) ||
!dic_value->GetString("ip", &ip) ||
!dic_value->GetInteger("port", &port) ||
!dic_value->GetString("type", &type) ||
!dic_value->GetString("protocol", &protocol) ||
!dic_value->GetString("username", &username) ||
!dic_value->GetString("password", &password) ||
!dic_value->GetDouble("preference", &preference) ||
!dic_value->GetInteger("generation", &generation)) {
return false;
}
candidate->set_name(name);
candidate->set_address(talk_base::SocketAddress(ip, port));
candidate->set_type(type);
candidate->set_protocol(protocol);
candidate->set_username(username);
candidate->set_password(password);
candidate->set_preference(static_cast<float>(preference));
candidate->set_generation(generation);
return true;
}
#endif
void PC_Transport_Impl::OnStateChange(P2PTransportClass::State state) {
writable_ = (state | P2PTransportClass::STATE_WRITABLE) != 0;
if (writable_) {
session_->OnStateChange(state, p2p_transport()->GetP2PChannel());
}
}
void PC_Transport_Impl::OnError(int error) {
}
cricket::TransportChannel* PC_Transport_Impl::GetP2PChannel() {
if (!p2p_transport_.get())
return NULL;
return p2p_transport_->GetP2PChannel();
}
void PC_Transport_Impl::OnMessage(talk_base::Message* message) {
talk_base::MessageData* data = message->pdata;
switch(message->message_id) {
case MSG_RTC_TRANSPORTINIT : {
talk_base::TypedMessageData<std::string> *p =
static_cast<talk_base::TypedMessageData<std::string>* >(data);
Init(p->data());
delete p;
break;
}
case MSG_RTC_ADDREMOTECANDIDATE : {
talk_base::TypedMessageData<const cricket::Candidate*> *p =
static_cast<talk_base::TypedMessageData<const cricket::Candidate*>* >(data);
AddRemoteCandidate(*p->data());
delete p;
break;
}
#ifdef PLATFORM_CHROMIUM
case MSG_RTC_ONCANDIDATEREADY : {
talk_base::TypedMessageData<std::string> *p =
static_cast<talk_base::TypedMessageData<std::string>* >(data);
OnCandidateReady(p->data());
delete p;
break;
}
case MSG_RTC_ONREADPACKET : {
MediaDataMsgParams* p = static_cast<MediaDataMsgParams*> (data);
ASSERT (p != NULL);
OnReadPacket_w(p->channel, p->data, p->len);
delete data;
break;
}
#else
case MSG_RTC_ONCANDIDATEREADY : {
talk_base::TypedMessageData<const cricket::Candidate*> *p =
static_cast<talk_base::TypedMessageData<const cricket::Candidate*>* >(data);
OnCandidateReady(*p->data());
delete p;
break;
}
#endif
default:
ASSERT(false);
}
}
P2PTransportClass* PC_Transport_Impl::CreateP2PTransport() {
#ifdef PLATFORM_CHROMIUM
return new P2PTransportImpl(
session()->connection()->p2p_socket_dispatcher());
#else
return new P2PTransportManager(session()->port_allocator());
#endif
}
} //namespace webrtc

View File

@ -1,109 +0,0 @@
/*
* peerconnection_transport_impl.h
*
* Created on: May 2, 2011
* Author: mallinath
*/
#ifndef TALK_APP_PEERCONNECTION_TRANSPORT_IMPL_H_
#define TALK_APP_PEERCONNECTION_TRANSPORT_IMPL_H_
#include <vector>
#include "talk/base/thread.h"
#include "talk/base/event.h"
#include "talk/base/messagehandler.h"
#include "talk/base/scoped_ptr.h"
#ifdef PLATFORM_CHROMIUM
#include "net/base/completion_callback.h"
#include "webkit/glue/p2p_transport.h"
class P2PTransportImpl;
#else
#include "talk/app/p2p_transport_manager.h"
#endif
#ifdef PLATFORM_CHROMIUM
typedef P2PTransportImpl TransportImplClass;
typedef webkit_glue::P2PTransport::EventHandler TransportEventHandler;
typedef webkit_glue::P2PTransport P2PTransportClass;
#else
typedef webrtc::P2PTransportManager TransportImplClass;
typedef webrtc::P2PTransportManager::EventHandler TransportEventHandler;
typedef webrtc::P2PTransportManager P2PTransportClass;
#endif
namespace cricket {
class TransportChannel;
class Candidate;
}
namespace webrtc {
const int kMaxRtpRtcpPacketLen = 1500;
class WebRTCSessionImpl;
// PC - PeerConnection
class PC_Transport_Impl : public talk_base::MessageHandler,
public TransportEventHandler {
public:
PC_Transport_Impl(WebRTCSessionImpl* session);
virtual ~PC_Transport_Impl();
bool Init(const std::string& name);
#ifdef PLATFORM_CHROMIUM
virtual void OnCandidateReady(const std::string& address);
#else
virtual void OnCandidateReady(const cricket::Candidate& candidate);
#endif
virtual void OnStateChange(P2PTransportClass::State state);
virtual void OnError(int error);
#ifdef PLATFORM_CHROMIUM
void OnRead(int result);
void OnWrite(int result);
net::Socket* GetChannel();
#endif
void OnMessage(talk_base::Message* message);
cricket::TransportChannel* GetP2PChannel();
bool AddRemoteCandidate(const cricket::Candidate& candidate);
WebRTCSessionImpl* session() { return session_; }
P2PTransportClass* p2p_transport() { return p2p_transport_.get(); }
const std::string& name() { return name_; }
std::vector<cricket::Candidate>& local_candidates() {
return local_candidates_;
}
private:
void MsgSend(uint32 id);
P2PTransportClass* CreateP2PTransport();
#ifdef PLATFORM_CHROMIUM
void OnReadPacket_w(
cricket::TransportChannel* channel, const char* data, size_t len);
int32 DoRecv();
void StreamRead();
std::string SerializeCandidate(const cricket::Candidate& candidate);
bool DeserializeCandidate(const std::string& address,
cricket::Candidate* candidate);
#endif
std::string name_;
WebRTCSessionImpl* session_;
talk_base::scoped_ptr<P2PTransportClass> p2p_transport_;
std::vector<cricket::Candidate> local_candidates_;
#ifdef PLATFORM_CHROMIUM
net::CompletionCallbackImpl<PC_Transport_Impl> channel_read_callback_;
net::CompletionCallbackImpl<PC_Transport_Impl> channel_write_callback_;
talk_base::Thread* network_thread_chromium_;
#endif
bool writable_;
char recv_buffer_[kMaxRtpRtcpPacketLen];
talk_base::Event event_;
talk_base::Thread* network_thread_jingle_;
};
} // namespace webrtc
#endif /* TALK_APP_PEERCONNECTION_TRANSPORT_IMPL_H_ */

View File

@ -1,300 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: mallinath@google.com (Mallinath Bareddy)
#include <vector>
#include "talk/app/peerconnection.h"
#include "talk/base/basicpacketsocketfactory.h"
#include "talk/base/helpers.h"
#include "talk/base/stringencode.h"
#include "talk/base/logging.h"
#include "talk/p2p/client/basicportallocator.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/app/webrtcsessionimpl.h"
#include "talk/app/webrtc_json.h"
namespace webrtc {
static const size_t kConfigTokens = 2;
static const int kDefaultStunPort = 3478;
#ifdef PLATFORM_CHROMIUM
PeerConnection::PeerConnection(const std::string& config,
P2PSocketDispatcher* p2p_socket_dispatcher)
#else
PeerConnection::PeerConnection(const std::string& config)
#endif // PLATFORM_CHROMIUM
: config_(config)
,media_thread_(new talk_base::Thread)
,network_manager_(new talk_base::NetworkManager)
,signaling_thread_(new talk_base::Thread)
,initialized_(false)
,service_type_(SERVICE_COUNT)
,event_callback_(NULL)
,session_(NULL)
,incoming_(false)
#ifdef PLATFORM_CHROMIUM
,p2p_socket_dispatcher_(p2p_socket_dispatcher)
#endif // PLATFORM_CHROMIUM
{
}
PeerConnection::~PeerConnection() {
if (session_ != NULL) {
// Before deleting the session, make sure that the signaling thread isn't
// running (or wait for it if it is).
signaling_thread_.reset();
ASSERT(!session_->HasAudioStream());
ASSERT(!session_->HasVideoStream());
// TODO: the RemoveAllStreams has to be asynchronous. At the same
//time "delete session_" should be called after RemoveAllStreams completed.
delete session_;
}
}
bool PeerConnection::Init() {
ASSERT(!initialized_);
std::vector<std::string> tokens;
talk_base::tokenize(config_, ' ', &tokens);
if (tokens.size() != kConfigTokens) {
LOG(LS_ERROR) << "Invalid config string";
return false;
}
service_type_ = SERVICE_COUNT;
// NOTE: Must be in the same order as the enum.
static const char* kValidServiceTypes[SERVICE_COUNT] = {
"STUN", "STUNS","TURN", "TURNS"
};
const std::string& type = tokens[0];
for (size_t i = 0; i < SERVICE_COUNT; ++i) {
if (type.compare(kValidServiceTypes[i]) == 0) {
service_type_ = static_cast<ServiceType>(i);
break;
}
}
if (service_type_ == SERVICE_COUNT) {
LOG(LS_ERROR) << "Invalid service type: " << type;
return false;
}
service_address_ = tokens[1];
int port;
tokens.clear();
talk_base::tokenize(service_address_, ':', &tokens);
if (tokens.size() != kConfigTokens) {
port = kDefaultStunPort;
} else {
port = atoi(tokens[1].c_str());
if (port <= 0 || port > 0xffff) {
LOG(LS_ERROR) << "Invalid port: " << tokens[1];
return false;
}
}
talk_base::SocketAddress stun_addr(tokens[0], port);
socket_factory_.reset(new talk_base::BasicPacketSocketFactory(
media_thread_.get()));
port_allocator_.reset(new cricket::BasicPortAllocator(network_manager_.get(),
stun_addr, talk_base::SocketAddress(), talk_base::SocketAddress(),
talk_base::SocketAddress()));
ASSERT(port_allocator_.get() != NULL);
port_allocator_->set_flags(cricket::PORTALLOCATOR_DISABLE_STUN |
cricket::PORTALLOCATOR_DISABLE_TCP |
cricket::PORTALLOCATOR_DISABLE_RELAY);
// create channel manager
channel_manager_.reset(new cricket::ChannelManager(media_thread_.get()));
//start the media thread
media_thread_->SetPriority(talk_base::PRIORITY_HIGH);
media_thread_->SetName("PeerConn", this);
if (!media_thread_->Start()) {
LOG(LS_ERROR) << "Failed to start media thread";
} else if (!channel_manager_->Init()) {
LOG(LS_ERROR) << "Failed to initialize the channel manager";
} if (!signaling_thread_->SetName("Session Signaling Thread", this) ||
!signaling_thread_->Start()) {
LOG(LS_ERROR) << "Failed to start session signaling thread";
} else {
initialized_ = true;
}
return initialized_;
}
void PeerConnection::RegisterObserver(PeerConnectionObserver* observer) {
// This assert is to catch cases where two observer pointers are registered.
// We only support one and if another is to be used, the current one must be
// cleared first.
ASSERT(observer == NULL || event_callback_ == NULL);
event_callback_ = observer;
}
bool PeerConnection::SignalingMessage(const std::string& signaling_message) {
// Deserialize signaling message
cricket::SessionDescription* incoming_sdp = NULL;
std::vector<cricket::Candidate> candidates;
if (!ParseJSONSignalingMessage(signaling_message, incoming_sdp, candidates))
return false;
bool ret = false;
if (!session_) {
// this will be incoming call
std::string sid;
talk_base::CreateRandomString(8, &sid);
std::string direction("r");
session_ = CreateMediaSession(sid, direction);
ASSERT(session_ != NULL);
incoming_ = true;
ret = session_->OnInitiateMessage(incoming_sdp, candidates);
} else {
ret = session_->OnRemoteDescription(incoming_sdp, candidates);
}
return ret;
}
WebRTCSessionImpl* PeerConnection::CreateMediaSession(const std::string& id,
const std::string& dir) {
WebRTCSessionImpl* session = new WebRTCSessionImpl(id, dir,
port_allocator_.get(), channel_manager_.get(), this,
signaling_thread_.get());
if (session) {
session->SignalOnRemoveStream.connect(this,
&PeerConnection::SendRemoveSignal);
}
return session;
}
void PeerConnection::SendRemoveSignal(WebRTCSessionImpl* session) {
if (event_callback_) {
std::string message;
if (GetJSONSignalingMessage(session->remote_description(),
session->local_candidates(), &message)) {
event_callback_->OnSignalingMessage(message);
}
}
}
bool PeerConnection::AddStream(const std::string& stream_id, bool video) {
if (!session_) {
// if session doesn't exist then this should be an outgoing call
std::string sid;
if (!talk_base::CreateRandomString(8, &sid) ||
(session_ = CreateMediaSession(sid, "s")) == NULL) {
ASSERT(false && "failed to initialize a session");
return false;
}
}
bool ret = false;
if (session_->HasStream(stream_id)) {
ASSERT(false && "A stream with this name already exists");
} else {
//TODO: we should ensure CreateVoiceChannel/CreateVideoChannel be called
// after transportchannel is ready
if (!video) {
ret = !session_->HasAudioStream() &&
session_->CreateP2PTransportChannel(stream_id, video) &&
session_->CreateVoiceChannel(stream_id);
} else {
ret = !session_->HasVideoStream() &&
session_->CreateP2PTransportChannel(stream_id, video) &&
session_->CreateVideoChannel(stream_id);
}
}
return ret;
}
bool PeerConnection::RemoveStream(const std::string& stream_id) {
ASSERT(session_ != NULL);
return session_->RemoveStream(stream_id);
}
void PeerConnection::OnLocalDescription(
cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
if (!desc) {
LOG(LS_ERROR) << "no local SDP ";
return;
}
std::string message;
if (GetJSONSignalingMessage(desc, candidates, &message)) {
if (event_callback_) {
event_callback_->OnSignalingMessage(message);
}
}
}
bool PeerConnection::SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts) {
return channel_manager_->SetAudioOptions(wave_in_device, wave_out_device, opts);
}
bool PeerConnection::SetLocalVideoRenderer(cricket::VideoRenderer* renderer) {
return channel_manager_->SetLocalRenderer(renderer);
}
bool PeerConnection::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
ASSERT(session_ != NULL);
return session_->SetVideoRenderer(stream_id, renderer);
}
bool PeerConnection::SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer) {
ASSERT(session_ != NULL);
return session_->SetVideoRenderer(stream_id, external_renderer);
}
bool PeerConnection::SetVideoCapture(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device);
}
bool PeerConnection::Connect() {
return session_->Initiate();
}
void PeerConnection::OnAddStream(const std::string& stream_id,
int channel_id,
bool video) {
if (event_callback_) {
event_callback_->OnAddStream(stream_id, channel_id, video);
}
}
void PeerConnection::OnRemoveStream(const std::string& stream_id,
int channel_id,
bool video) {
if (event_callback_) {
event_callback_->OnRemoveStream(stream_id, channel_id, video);
}
}
void PeerConnection::OnRtcMediaChannelCreated(const std::string& stream_id,
int channel_id,
bool video) {
if (event_callback_) {
event_callback_->OnAddStream(stream_id, channel_id, video);
}
}
void PeerConnection::Close() {
if (session_)
session_->RemoveAllStreams();
}
} // namespace webrtc

View File

@ -1,152 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: mallinath@google.com (Mallinath Bareddy)
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_H_
#include <string>
#include "talk/base/sigslot.h"
#include "talk/base/thread.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/basicpacketsocketfactory.h"
#include "talk/session/phone/channelmanager.h"
namespace Json {
class Value;
}
namespace cricket {
class BasicPortAllocator;
class ChannelManager;
class VideoRenderer;
}
#ifdef PLATFORM_CHROMIUM
class P2PSocketDispatcher;
#endif // PLATFORM_CHROMIUM
namespace webrtc {
class AudioDeviceModule;
class ExternalRenderer;
class WebRTCSessionImpl;
class PeerConnectionObserver {
public:
virtual void OnError() = 0;
// serialized signaling message
virtual void OnSignalingMessage(const std::string& msg) = 0;
// Triggered when a remote peer accepts a media connection.
virtual void OnAddStream(const std::string& stream_id,
int channel_id,
bool video) = 0;
// Triggered when a remote peer closes a media stream.
virtual void OnRemoveStream(const std::string& stream_id,
int channel_id,
bool video) = 0;
protected:
// Dtor protected as objects shouldn't be deleted via this interface.
~PeerConnectionObserver() {}
};
class PeerConnection : public sigslot::has_slots<> {
public:
#ifdef PLATFORM_CHROMIUM
PeerConnection(const std::string& config,
P2PSocketDispatcher* p2p_socket_dispatcher);
#else
explicit PeerConnection(const std::string& config);
#endif // PLATFORM_CHROMIUM
~PeerConnection();
bool Init();
void RegisterObserver(PeerConnectionObserver* observer);
bool SignalingMessage(const std::string& msg);
bool AddStream(const std::string& stream_id, bool video);
bool RemoveStream(const std::string& stream_id);
bool Connect();
void Close();
// TODO(ronghuawu): This section will be modified to reuse the existing libjingle APIs.
// Set Audio device
bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
// Set the video renderer
bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer);
// Set video capture device
// For Chromium the cam_device should use the capture session id.
// For standalone app, cam_device is the camera name. It will try to
// set the default capture device when cam_device is "".
bool SetVideoCapture(const std::string& cam_device);
// Access to the members
const std::string& config() const { return config_; }
bool incoming() const { return incoming_; }
talk_base::Thread* media_thread() {
return media_thread_.get();
}
#ifdef PLATFORM_CHROMIUM
P2PSocketDispatcher* p2p_socket_dispatcher() {
return p2p_socket_dispatcher_;
}
#endif // PLATFORM_CHROMIUM
// Callbacks
void OnAddStream(const std::string& stream_id, int channel_id, bool video);
void OnRemoveStream(const std::string& stream_id, int channel_id,
bool video);
void OnLocalDescription(cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates);
void OnRtcMediaChannelCreated(const std::string& stream_id,
int channel_id,
bool video);
private:
void SendRemoveSignal(WebRTCSessionImpl* session);
WebRTCSessionImpl* CreateMediaSession(const std::string& id,
const std::string& dir);
std::string config_;
talk_base::scoped_ptr<talk_base::Thread> media_thread_;
talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
talk_base::scoped_ptr<talk_base::NetworkManager> network_manager_;
talk_base::scoped_ptr<cricket::BasicPortAllocator> port_allocator_;
talk_base::scoped_ptr<talk_base::BasicPacketSocketFactory> socket_factory_;
talk_base::scoped_ptr<talk_base::Thread> signaling_thread_;
bool initialized_;
// NOTE: The order of the enum values must be in sync with the array
// in Init().
enum ServiceType {
STUN,
STUNS,
TURN,
TURNS,
SERVICE_COUNT, // Also means 'invalid'.
};
ServiceType service_type_;
std::string service_address_;
PeerConnectionObserver* event_callback_;
WebRTCSessionImpl* session_;
bool incoming_;
#ifdef PLATFORM_CHROMIUM
P2PSocketDispatcher* p2p_socket_dispatcher_;
#endif // PLATFORM_CHROMIUM
};
}
#endif /* TALK_APP_WEBRTC_PEERCONNECTION_H_ */

View File

@ -1,389 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: tommi@google.com (Tomas Gunnarsson)
#include "talk/app/session_test/main_wnd.h"
#include "talk/base/common.h"
#include "talk/base/logging.h"
ATOM MainWnd::wnd_class_ = 0;
const wchar_t MainWnd::kClassName[] = L"WebRTC_MainWnd";
// TODO(tommi): declare in header:
std::string GetDefaultServerName();
namespace {
void CalculateWindowSizeForText(HWND wnd, const wchar_t* text,
size_t* width, size_t* height) {
HDC dc = ::GetDC(wnd);
RECT text_rc = {0};
::DrawText(dc, text, -1, &text_rc, DT_CALCRECT | DT_SINGLELINE);
::ReleaseDC(wnd, dc);
RECT client, window;
::GetClientRect(wnd, &client);
::GetWindowRect(wnd, &window);
*width = text_rc.right - text_rc.left;
*width += (window.right - window.left) -
(client.right - client.left);
*height = text_rc.bottom - text_rc.top;
*height += (window.bottom - window.top) -
(client.bottom - client.top);
}
HFONT GetDefaultFont() {
static HFONT font = reinterpret_cast<HFONT>(GetStockObject(DEFAULT_GUI_FONT));
return font;
}
std::string GetWindowText(HWND wnd) {
char text[MAX_PATH] = {0};
::GetWindowTextA(wnd, &text[0], ARRAYSIZE(text));
return text;
}
void AddListBoxItem(HWND listbox, const std::string& str, LPARAM item_data) {
LRESULT index = ::SendMessageA(listbox, LB_ADDSTRING, 0,
reinterpret_cast<LPARAM>(str.c_str()));
::SendMessageA(listbox, LB_SETITEMDATA, index, item_data);
}
} // namespace
MainWnd::MainWnd()
: ui_(CONNECT_TO_SERVER), wnd_(NULL), edit1_(NULL), edit2_(NULL),
label1_(NULL), label2_(NULL), button_(NULL), listbox_(NULL),
destroyed_(false), callback_(NULL), nested_msg_(NULL) {
}
MainWnd::~MainWnd() {
ASSERT(!IsWindow());
}
bool MainWnd::Create() {
ASSERT(wnd_ == NULL);
if (!RegisterWindowClass())
return false;
wnd_ = ::CreateWindowExW(WS_EX_OVERLAPPEDWINDOW, kClassName, L"WebRTC",
WS_OVERLAPPEDWINDOW | WS_VISIBLE | WS_CLIPCHILDREN,
CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
NULL, NULL, GetModuleHandle(NULL), this);
::SendMessage(wnd_, WM_SETFONT, reinterpret_cast<WPARAM>(GetDefaultFont()),
TRUE);
CreateChildWindows();
SwitchToConnectUI();
return wnd_ != NULL;
}
bool MainWnd::Destroy() {
BOOL ret = FALSE;
if (IsWindow()) {
ret = ::DestroyWindow(wnd_);
}
return ret != FALSE;
}
void MainWnd::RegisterObserver(MainWndCallback* callback) {
callback_ = callback;
}
bool MainWnd::IsWindow() const {
return wnd_ && ::IsWindow(wnd_) != FALSE;
}
bool MainWnd::PreTranslateMessage(MSG* msg) {
bool ret = false;
if (msg->message == WM_CHAR) {
if (msg->wParam == VK_TAB) {
HandleTabbing();
ret = true;
} else if (msg->wParam == VK_RETURN) {
OnDefaultAction();
ret = true;
} else if (msg->wParam == VK_ESCAPE) {
if (callback_) {
if (ui_ == STREAMING) {
callback_->DisconnectFromCurrentPeer();
} else {
callback_->DisconnectFromServer();
}
}
}
}
return ret;
}
void MainWnd::SwitchToConnectUI() {
ASSERT(IsWindow());
LayoutPeerListUI(false);
ui_ = CONNECT_TO_SERVER;
LayoutConnectUI(true);
::SetFocus(edit1_);
}
void MainWnd::SwitchToPeerList(const Peers& peers) {
LayoutConnectUI(false);
::SendMessage(listbox_, LB_RESETCONTENT, 0, 0);
AddListBoxItem(listbox_, "List of currently connected peers:", -1);
Peers::const_iterator i = peers.begin();
for (; i != peers.end(); ++i)
AddListBoxItem(listbox_, i->second.c_str(), i->first);
ui_ = LIST_PEERS;
LayoutPeerListUI(true);
}
void MainWnd::SwitchToStreamingUI() {
LayoutConnectUI(false);
LayoutPeerListUI(false);
ui_ = STREAMING;
}
void MainWnd::OnPaint() {
PAINTSTRUCT ps;
::BeginPaint(handle(), &ps);
RECT rc;
::GetClientRect(handle(), &rc);
HBRUSH brush = ::CreateSolidBrush(::GetSysColor(COLOR_WINDOW));
::FillRect(ps.hdc, &rc, brush);
::DeleteObject(brush);
::EndPaint(handle(), &ps);
}
void MainWnd::OnDestroyed() {
PostQuitMessage(0);
}
void MainWnd::OnDefaultAction() {
if (!callback_)
return;
if (ui_ == CONNECT_TO_SERVER) {
std::string server(GetWindowText(edit1_));
std::string port_str(GetWindowText(edit2_));
int port = port_str.length() ? atoi(port_str.c_str()) : 0;
callback_->StartLogin(server, port);
} else if (ui_ == LIST_PEERS) {
LRESULT sel = ::SendMessage(listbox_, LB_GETCURSEL, 0, 0);
if (sel != LB_ERR) {
LRESULT peer_id = ::SendMessage(listbox_, LB_GETITEMDATA, sel, 0);
if (peer_id != -1 && callback_) {
callback_->ConnectToPeer(peer_id);
}
}
} else {
MessageBoxA(wnd_, "OK!", "Yeah", MB_OK);
}
}
bool MainWnd::OnMessage(UINT msg, WPARAM wp, LPARAM lp, LRESULT* result) {
switch (msg) {
case WM_ERASEBKGND:
*result = TRUE;
return true;
case WM_PAINT:
OnPaint();
return true;
case WM_SETFOCUS:
if (ui_ == CONNECT_TO_SERVER) {
SetFocus(edit1_);
}
return true;
case WM_SIZE:
if (ui_ == CONNECT_TO_SERVER) {
LayoutConnectUI(true);
} else if (ui_ == LIST_PEERS) {
LayoutPeerListUI(true);
}
break;
case WM_CTLCOLORSTATIC:
*result = reinterpret_cast<LRESULT>(GetSysColorBrush(COLOR_WINDOW));
return true;
case WM_COMMAND:
if (button_ == reinterpret_cast<HWND>(lp)) {
if (BN_CLICKED == HIWORD(wp))
OnDefaultAction();
} else if (listbox_ == reinterpret_cast<HWND>(lp)) {
if (LBN_DBLCLK == HIWORD(wp)) {
OnDefaultAction();
}
}
return true;
}
return false;
}
// static
LRESULT CALLBACK MainWnd::WndProc(HWND hwnd, UINT msg, WPARAM wp, LPARAM lp) {
MainWnd* me = reinterpret_cast<MainWnd*>(
::GetWindowLongPtr(hwnd, GWL_USERDATA));
if (!me && WM_CREATE == msg) {
CREATESTRUCT* cs = reinterpret_cast<CREATESTRUCT*>(lp);
me = reinterpret_cast<MainWnd*>(cs->lpCreateParams);
me->wnd_ = hwnd;
::SetWindowLongPtr(hwnd, GWL_USERDATA, reinterpret_cast<LONG_PTR>(me));
}
LRESULT result = 0;
if (me) {
void* prev_nested_msg = me->nested_msg_;
me->nested_msg_ = &msg;
bool handled = me->OnMessage(msg, wp, lp, &result);
if (WM_NCDESTROY == msg) {
me->destroyed_ = true;
} else if (!handled) {
result = ::DefWindowProc(hwnd, msg, wp, lp);
}
if (me->destroyed_ && prev_nested_msg == NULL) {
me->OnDestroyed();
me->wnd_ = NULL;
me->destroyed_ = false;
}
me->nested_msg_ = prev_nested_msg;
} else {
result = ::DefWindowProc(hwnd, msg, wp, lp);
}
return result;
}
// static
bool MainWnd::RegisterWindowClass() {
if (wnd_class_)
return true;
WNDCLASSEX wcex = { sizeof(WNDCLASSEX) };
wcex.style = CS_DBLCLKS;
wcex.hInstance = GetModuleHandle(NULL);
wcex.hbrBackground = reinterpret_cast<HBRUSH>(COLOR_WINDOW + 1);
wcex.hCursor = ::LoadCursor(NULL, IDC_ARROW);
wcex.lpfnWndProc = &WndProc;
wcex.lpszClassName = kClassName;
wnd_class_ = ::RegisterClassEx(&wcex);
ASSERT(wnd_class_);
return wnd_class_ != 0;
}
void MainWnd::CreateChildWindow(HWND* wnd, MainWnd::ChildWindowID id,
const wchar_t* class_name, DWORD control_style,
DWORD ex_style) {
if (::IsWindow(*wnd))
return;
// Child windows are invisible at first, and shown after being resized.
DWORD style = WS_CHILD | control_style;
*wnd = ::CreateWindowEx(ex_style, class_name, L"", style,
100, 100, 100, 100, wnd_,
reinterpret_cast<HMENU>(id),
GetModuleHandle(NULL), NULL);
ASSERT(::IsWindow(*wnd));
::SendMessage(*wnd, WM_SETFONT, reinterpret_cast<WPARAM>(GetDefaultFont()),
TRUE);
}
void MainWnd::CreateChildWindows() {
// Create the child windows in tab order.
CreateChildWindow(&label1_, LABEL1_ID, L"Static", ES_CENTER | ES_READONLY, 0);
CreateChildWindow(&edit1_, EDIT_ID, L"Edit",
ES_LEFT | ES_NOHIDESEL | WS_TABSTOP, WS_EX_CLIENTEDGE);
CreateChildWindow(&label2_, LABEL2_ID, L"Static", ES_CENTER | ES_READONLY, 0);
CreateChildWindow(&edit2_, EDIT_ID, L"Edit",
ES_LEFT | ES_NOHIDESEL | WS_TABSTOP, WS_EX_CLIENTEDGE);
CreateChildWindow(&button_, BUTTON_ID, L"Button", BS_CENTER | WS_TABSTOP, 0);
CreateChildWindow(&listbox_, LISTBOX_ID, L"ListBox",
LBS_HASSTRINGS | LBS_NOTIFY, WS_EX_CLIENTEDGE);
::SetWindowTextA(edit1_, GetDefaultServerName().c_str());
::SetWindowTextA(edit2_, "8888");
}
void MainWnd::LayoutConnectUI(bool show) {
struct Windows {
HWND wnd;
const wchar_t* text;
size_t width;
size_t height;
} windows[] = {
{ label1_, L"Server" },
{ edit1_, L"XXXyyyYYYgggXXXyyyYYYggg" },
{ label2_, L":" },
{ edit2_, L"XyXyX" },
{ button_, L"Connect" },
};
if (show) {
const size_t kSeparator = 5;
size_t total_width = (ARRAYSIZE(windows) - 1) * kSeparator;
for (size_t i = 0; i < ARRAYSIZE(windows); ++i) {
CalculateWindowSizeForText(windows[i].wnd, windows[i].text,
&windows[i].width, &windows[i].height);
total_width += windows[i].width;
}
RECT rc;
::GetClientRect(wnd_, &rc);
size_t x = (rc.right / 2) - (total_width / 2);
size_t y = rc.bottom / 2;
for (size_t i = 0; i < ARRAYSIZE(windows); ++i) {
size_t top = y - (windows[i].height / 2);
::MoveWindow(windows[i].wnd, x, top, windows[i].width, windows[i].height,
TRUE);
x += kSeparator + windows[i].width;
if (windows[i].text[0] != 'X')
::SetWindowText(windows[i].wnd, windows[i].text);
::ShowWindow(windows[i].wnd, SW_SHOWNA);
}
} else {
for (size_t i = 0; i < ARRAYSIZE(windows); ++i) {
::ShowWindow(windows[i].wnd, SW_HIDE);
}
}
}
void MainWnd::LayoutPeerListUI(bool show) {
if (show) {
RECT rc;
::GetClientRect(wnd_, &rc);
::MoveWindow(listbox_, 0, 0, rc.right, rc.bottom, TRUE);
::ShowWindow(listbox_, SW_SHOWNA);
} else {
::ShowWindow(listbox_, SW_HIDE);
}
}
void MainWnd::HandleTabbing() {
bool shift = ((::GetAsyncKeyState(VK_SHIFT) & 0x8000) != 0);
UINT next_cmd = shift ? GW_HWNDPREV : GW_HWNDNEXT;
UINT loop_around_cmd = shift ? GW_HWNDLAST : GW_HWNDFIRST;
HWND focus = GetFocus(), next;
do {
next = ::GetWindow(focus, next_cmd);
if (IsWindowVisible(next) &&
(GetWindowLong(next, GWL_STYLE) & WS_TABSTOP)) {
break;
}
if (!next) {
next = ::GetWindow(focus, loop_around_cmd);
if (IsWindowVisible(next) &&
(GetWindowLong(next, GWL_STYLE) & WS_TABSTOP)) {
break;
}
}
focus = next;
} while (true);
::SetFocus(next);
}

View File

@ -1,96 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: tommi@google.com (Tomas Gunnarsson)
#ifndef TALK_APP_SESSION_TEST_MAIN_WND_H_
#define TALK_APP_SESSION_TEST_MAIN_WND_H_
#pragma once
#include "talk/base/win32.h"
#include <map>
// TODO(tommi): Move to same header as PeerConnectionClient.
typedef std::map<int, std::string> Peers;
class MainWndCallback {
public:
virtual void StartLogin(const std::string& server, int port) = 0;
virtual void DisconnectFromServer() = 0;
virtual void ConnectToPeer(int peer_id) = 0;
virtual void DisconnectFromCurrentPeer() = 0;
};
class MainWnd {
public:
static const wchar_t kClassName[];
enum UI {
CONNECT_TO_SERVER,
LIST_PEERS,
STREAMING,
};
MainWnd();
~MainWnd();
bool Create();
bool Destroy();
bool IsWindow() const;
void RegisterObserver(MainWndCallback* callback);
bool PreTranslateMessage(MSG* msg);
void SwitchToConnectUI();
void SwitchToPeerList(const Peers& peers);
void SwitchToStreamingUI();
HWND handle() const { return wnd_; }
UI current_ui() const { return ui_; }
protected:
enum ChildWindowID {
EDIT_ID = 1,
BUTTON_ID,
LABEL1_ID,
LABEL2_ID,
LISTBOX_ID,
};
void OnPaint();
void OnDestroyed();
void OnDefaultAction();
bool OnMessage(UINT msg, WPARAM wp, LPARAM lp, LRESULT* result);
static LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wp, LPARAM lp);
static bool RegisterWindowClass();
void CreateChildWindow(HWND* wnd, ChildWindowID id, const wchar_t* class_name,
DWORD control_style, DWORD ex_style);
void CreateChildWindows();
void LayoutConnectUI(bool show);
void LayoutPeerListUI(bool show);
void HandleTabbing();
private:
UI ui_;
HWND wnd_;
HWND edit1_;
HWND edit2_;
HWND label1_;
HWND label2_;
HWND button_;
HWND listbox_;
bool destroyed_;
void* nested_msg_;
MainWndCallback* callback_;
static ATOM wnd_class_;
};
#endif // TALK_APP_SESSION_TEST_MAIN_WND_H_

View File

@ -1,870 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: tommi@google.com (Tomas Gunnarsson)
// This may not look like much but it has already uncovered several issues.
// In the future this will be a p2p reference app for the webrtc API along
// with a separate simple server implementation.
#include "talk/base/win32.h" // Must be first
#include <map>
#include "talk/base/scoped_ptr.h"
#include "talk/base/win32socketinit.cc"
#include "talk/base/win32socketserver.h" // For Win32Socket
#include "talk/base/win32socketserver.cc" // For Win32Socket
#include "modules/audio_device/main/interface/audio_device.h"
#include "modules/video_capture/main/interface/video_capture.h"
#include "system_wrappers/source/trace_impl.h"
#include "talk/app/peerconnection.h"
#include "talk/app/session_test/main_wnd.h"
#include "talk/base/logging.h"
#include "talk/session/phone/videorendererfactory.h"
static const char kAudioLabel[] = "audio_label";
static const char kVideoLabel[] = "video_label";
const unsigned short kDefaultServerPort = 8888;
using talk_base::scoped_ptr;
using webrtc::AudioDeviceModule;
using webrtc::PeerConnection;
using webrtc::PeerConnectionObserver;
std::string GetEnvVarOrDefault(const char* env_var_name,
const char* default_value) {
std::string value;
const char* env_var = getenv(env_var_name);
if (env_var)
value = env_var;
if (value.empty())
value = default_value;
return value;
}
std::string GetPeerConnectionString() {
return GetEnvVarOrDefault("WEBRTC_CONNECT", "STUN stun.l.google.com:19302");
}
std::string GetDefaultServerName() {
return GetEnvVarOrDefault("WEBRTC_SERVER", "localhost");
}
std::string GetPeerName() {
char computer_name[MAX_PATH] = {0}, user_name[MAX_PATH] = {0};
DWORD size = ARRAYSIZE(computer_name);
::GetComputerNameA(computer_name, &size);
size = ARRAYSIZE(user_name);
::GetUserNameA(user_name, &size);
std::string ret(user_name);
ret += '@';
ret += computer_name;
return ret;
}
struct PeerConnectionClientObserver {
virtual void OnSignedIn() = 0; // Called when we're "logged" on.
virtual void OnDisconnected() = 0;
virtual void OnPeerConnected(int id, const std::string& name) = 0;
virtual void OnPeerDisconnected(int id, const std::string& name) = 0;
virtual void OnMessageFromPeer(int peer_id, const std::string& message) = 0;
};
class PeerConnectionClient : public sigslot::has_slots<> {
public:
enum State {
NOT_CONNECTED,
SIGNING_IN,
CONNECTED,
SIGNING_OUT_WAITING,
SIGNING_OUT,
};
PeerConnectionClient() : callback_(NULL), my_id_(-1), state_(NOT_CONNECTED) {
control_socket_.SignalCloseEvent.connect(this,
&PeerConnectionClient::OnClose);
hanging_get_.SignalCloseEvent.connect(this,
&PeerConnectionClient::OnClose);
control_socket_.SignalConnectEvent.connect(this,
&PeerConnectionClient::OnConnect);
hanging_get_.SignalConnectEvent.connect(this,
&PeerConnectionClient::OnHangingGetConnect);
control_socket_.SignalReadEvent.connect(this,
&PeerConnectionClient::OnRead);
hanging_get_.SignalReadEvent.connect(this,
&PeerConnectionClient::OnHangingGetRead);
}
~PeerConnectionClient() {
}
int id() const {
return my_id_;
}
bool is_connected() const {
return my_id_ != -1;
}
const Peers& peers() const {
return peers_;
}
void RegisterObserver(PeerConnectionClientObserver* callback) {
ASSERT(!callback_);
callback_ = callback;
}
bool Connect(const std::string& server, int port,
const std::string& client_name) {
ASSERT(!server.empty());
ASSERT(!client_name.empty());
ASSERT(state_ == NOT_CONNECTED);
if (server.empty() || client_name.empty())
return false;
if (port <= 0)
port = kDefaultServerPort;
server_address_.SetIP(server);
server_address_.SetPort(port);
if (server_address_.IsUnresolved()) {
hostent* h = gethostbyname(server_address_.IPAsString().c_str());
if (!h) {
LOG(LS_ERROR) << "Failed to resolve host name: "
<< server_address_.IPAsString();
return false;
} else {
server_address_.SetResolvedIP(
ntohl(*reinterpret_cast<uint32*>(h->h_addr_list[0])));
}
}
char buffer[1024];
wsprintfA(buffer, "GET /sign_in?%s HTTP/1.0\r\n\r\n", client_name.c_str());
onconnect_data_ = buffer;
bool ret = ConnectControlSocket();
if (ret)
state_ = SIGNING_IN;
return ret;
}
bool SendToPeer(int peer_id, const std::string& message) {
if (state_ != CONNECTED)
return false;
ASSERT(is_connected());
ASSERT(control_socket_.GetState() == talk_base::Socket::CS_CLOSED);
if (!is_connected() || peer_id == -1)
return false;
char headers[1024];
wsprintfA(headers, "POST /message?peer_id=%i&to=%i HTTP/1.0\r\n"
"Content-Length: %i\r\n"
"Content-Type: text/plain\r\n"
"\r\n",
my_id_, peer_id, message.length());
onconnect_data_ = headers;
onconnect_data_ += message;
return ConnectControlSocket();
}
bool SignOut() {
if (state_ == NOT_CONNECTED || state_ == SIGNING_OUT)
return true;
if (hanging_get_.GetState() != talk_base::Socket::CS_CLOSED)
hanging_get_.Close();
if (control_socket_.GetState() == talk_base::Socket::CS_CLOSED) {
ASSERT(my_id_ != -1);
state_ = SIGNING_OUT;
char buffer[1024];
wsprintfA(buffer, "GET /sign_out?peer_id=%i HTTP/1.0\r\n\r\n", my_id_);
onconnect_data_ = buffer;
return ConnectControlSocket();
} else {
state_ = SIGNING_OUT_WAITING;
}
return true;
}
protected:
void Close() {
control_socket_.Close();
hanging_get_.Close();
onconnect_data_.clear();
peers_.clear();
my_id_ = -1;
state_ = NOT_CONNECTED;
}
bool ConnectControlSocket() {
ASSERT(control_socket_.GetState() == talk_base::Socket::CS_CLOSED);
int err = control_socket_.Connect(server_address_);
if (err == SOCKET_ERROR) {
Close();
return false;
}
return true;
}
void OnConnect(talk_base::AsyncSocket* socket) {
ASSERT(!onconnect_data_.empty());
int sent = socket->Send(onconnect_data_.c_str(), onconnect_data_.length());
ASSERT(sent == onconnect_data_.length());
onconnect_data_.clear();
}
void OnHangingGetConnect(talk_base::AsyncSocket* socket) {
char buffer[1024];
wsprintfA(buffer, "GET /wait?peer_id=%i HTTP/1.0\r\n\r\n", my_id_);
int len = lstrlenA(buffer);
int sent = socket->Send(buffer, len);
ASSERT(sent == len);
}
// Quick and dirty support for parsing HTTP header values.
bool GetHeaderValue(const std::string& data, size_t eoh,
const char* header_pattern, size_t* value) {
ASSERT(value);
size_t found = data.find(header_pattern);
if (found != std::string::npos && found < eoh) {
*value = atoi(&data[found + lstrlenA(header_pattern)]);
return true;
}
return false;
}
bool GetHeaderValue(const std::string& data, size_t eoh,
const char* header_pattern, std::string* value) {
ASSERT(value);
size_t found = data.find(header_pattern);
if (found != std::string::npos && found < eoh) {
size_t begin = found + lstrlenA(header_pattern);
size_t end = data.find("\r\n", begin);
if (end == std::string::npos)
end = eoh;
value->assign(data.substr(begin, end - begin));
return true;
}
return false;
}
// Returns true if the whole response has been read.
bool ReadIntoBuffer(talk_base::AsyncSocket* socket, std::string* data,
size_t* content_length) {
LOG(INFO) << __FUNCTION__;
char buffer[0xffff];
do {
int bytes = socket->Recv(buffer, sizeof(buffer));
if (bytes <= 0)
break;
data->append(buffer, bytes);
} while (true);
bool ret = false;
size_t i = data->find("\r\n\r\n");
if (i != std::string::npos) {
LOG(INFO) << "Headers received";
const char kContentLengthHeader[] = "\r\nContent-Length: ";
if (GetHeaderValue(*data, i, "\r\nContent-Length: ", content_length)) {
LOG(INFO) << "Expecting " << *content_length << " bytes.";
size_t total_response_size = (i + 4) + *content_length;
if (data->length() >= total_response_size) {
ret = true;
std::string should_close;
const char kConnection[] = "\r\nConnection: ";
if (GetHeaderValue(*data, i, kConnection, &should_close) &&
should_close.compare("close") == 0) {
socket->Close();
}
} else {
// We haven't received everything. Just continue to accept data.
}
} else {
LOG(LS_ERROR) << "No content length field specified by the server.";
}
}
return ret;
}
void OnRead(talk_base::AsyncSocket* socket) {
LOG(INFO) << __FUNCTION__;
size_t content_length = 0;
if (ReadIntoBuffer(socket, &control_data_, &content_length)) {
size_t peer_id = 0, eoh = 0;
bool ok = ParseServerResponse(control_data_, content_length, &peer_id,
&eoh);
if (ok) {
if (my_id_ == -1) {
// First response. Let's store our server assigned ID.
ASSERT(state_ == SIGNING_IN);
my_id_ = peer_id;
ASSERT(my_id_ != -1);
// The body of the response will be a list of already connected peers.
if (content_length) {
size_t pos = eoh + 4;
while (pos < control_data_.size()) {
size_t eol = control_data_.find('\n', pos);
if (eol == std::string::npos)
break;
int id = 0;
std::string name;
bool connected;
if (ParseEntry(control_data_.substr(pos, eol - pos), &name, &id,
&connected) && id != my_id_) {
peers_[id] = name;
callback_->OnPeerConnected(id, name);
}
pos = eol + 1;
}
}
ASSERT(is_connected());
callback_->OnSignedIn();
} else if (state_ == SIGNING_OUT) {
Close();
callback_->OnDisconnected();
} else if (state_ == SIGNING_OUT_WAITING) {
SignOut();
}
}
control_data_.clear();
if (state_ == SIGNING_IN) {
ASSERT(hanging_get_.GetState() == talk_base::Socket::CS_CLOSED);
state_ = CONNECTED;
hanging_get_.Connect(server_address_);
}
}
}
void OnHangingGetRead(talk_base::AsyncSocket* socket) {
LOG(INFO) << __FUNCTION__;
size_t content_length = 0;
if (ReadIntoBuffer(socket, &notification_data_, &content_length)) {
size_t peer_id = 0, eoh = 0;
bool ok = ParseServerResponse(notification_data_, content_length,
&peer_id, &eoh);
if (ok) {
// Store the position where the body begins.
size_t pos = eoh + 4;
if (my_id_ == peer_id) {
// A notification about a new member or a member that just
// disconnected.
int id = 0;
std::string name;
bool connected = false;
if (ParseEntry(notification_data_.substr(pos), &name, &id,
&connected)) {
if (connected) {
peers_[id] = name;
callback_->OnPeerConnected(id, name);
} else {
peers_.erase(id);
callback_->OnPeerDisconnected(id, name);
}
}
} else {
callback_->OnMessageFromPeer(peer_id,
notification_data_.substr(pos));
}
}
notification_data_.clear();
}
if (hanging_get_.GetState() == talk_base::Socket::CS_CLOSED &&
state_ == CONNECTED) {
hanging_get_.Connect(server_address_);
}
}
// Parses a single line entry in the form "<name>,<id>,<connected>"
bool ParseEntry(const std::string& entry, std::string* name, int* id,
bool* connected) {
ASSERT(name);
ASSERT(id);
ASSERT(connected);
ASSERT(entry.length());
*connected = false;
size_t separator = entry.find(',');
if (separator != std::string::npos) {
*id = atoi(&entry[separator + 1]);
name->assign(entry.substr(0, separator));
separator = entry.find(',', separator + 1);
if (separator != std::string::npos) {
*connected = atoi(&entry[separator + 1]) ? true : false;
}
}
return !name->empty();
}
int GetResponseStatus(const std::string& response) {
int status = -1;
size_t pos = response.find(' ');
if (pos != std::string::npos)
status = atoi(&response[pos + 1]);
return status;
}
bool ParseServerResponse(const std::string& response, size_t content_length,
size_t* peer_id, size_t* eoh) {
LOG(INFO) << response;
int status = GetResponseStatus(response.c_str());
if (status != 200) {
LOG(LS_ERROR) << "Received error from server";
Close();
callback_->OnDisconnected();
return false;
}
*eoh = response.find("\r\n\r\n");
ASSERT(*eoh != std::string::npos);
if (*eoh == std::string::npos)
return false;
*peer_id = -1;
// See comment in peer_channel.cc for why we use the Pragma header and
// not e.g. "X-Peer-Id".
GetHeaderValue(response, *eoh, "\r\nPragma: ", peer_id);
return true;
}
void OnClose(talk_base::AsyncSocket* socket, int err) {
LOG(INFO) << __FUNCTION__;
socket->Close();
if (err != WSAECONNREFUSED) {
if (socket == &hanging_get_) {
if (state_ == CONNECTED) {
LOG(INFO) << "Issuing a new hanging get";
hanging_get_.Close();
hanging_get_.Connect(server_address_);
}
}
} else {
// Failed to connect to the server.
Close();
callback_->OnDisconnected();
}
}
PeerConnectionClientObserver* callback_;
talk_base::SocketAddress server_address_;
talk_base::Win32Socket control_socket_;
talk_base::Win32Socket hanging_get_;
std::string onconnect_data_;
std::string control_data_;
std::string notification_data_;
Peers peers_;
State state_;
int my_id_;
};
class ConnectionObserver
: public PeerConnectionObserver,
public PeerConnectionClientObserver,
public MainWndCallback,
public talk_base::Win32Window {
public:
enum WindowMessages {
MEDIA_CHANNELS_INITIALIZED = WM_APP + 1,
PEER_CONNECTION_CLOSED,
SEND_MESSAGE_TO_PEER,
};
enum HandshakeState {
NONE,
INITIATOR,
ANSWER_RECEIVED,
OFFER_RECEIVED,
QUIT_SENT,
};
ConnectionObserver(PeerConnectionClient* client,
MainWnd* main_wnd)
: handshake_(NONE),
waiting_for_audio_(false),
waiting_for_video_(false),
peer_id_(-1),
video_channel_(-1),
audio_channel_(-1),
client_(client),
main_wnd_(main_wnd) {
// Create a window for posting notifications back to from other threads.
bool ok = Create(HWND_MESSAGE, L"ConnectionObserver", 0, 0, 0, 0, 0, 0);
ASSERT(ok);
client_->RegisterObserver(this);
main_wnd->RegisterObserver(this);
}
~ConnectionObserver() {
ASSERT(peer_connection_.get() == NULL);
Destroy();
DeletePeerConnection();
}
bool has_video() const {
return video_channel_ != -1;
}
bool has_audio() const {
return audio_channel_ != -1;
}
bool connection_active() const {
return peer_connection_.get() != NULL;
}
void Close() {
if (peer_connection_.get()) {
peer_connection_->Close();
} else {
client_->SignOut();
}
}
protected:
bool InitializePeerConnection() {
ASSERT(peer_connection_.get() == NULL);
peer_connection_.reset(new PeerConnection(GetPeerConnectionString()));
peer_connection_->RegisterObserver(this);
if (!peer_connection_->Init()) {
DeletePeerConnection();
} else {
bool audio = peer_connection_->SetAudioDevice("", "", 0);
LOG(INFO) << "SetAudioDevice " << (audio ? "succeeded." : "failed.");
}
return peer_connection_.get() != NULL;
}
void DeletePeerConnection() {
peer_connection_.reset();
handshake_ = NONE;
}
void StartCaptureDevice() {
ASSERT(peer_connection_.get());
if (main_wnd_->IsWindow()) {
main_wnd_->SwitchToStreamingUI();
if (peer_connection_->SetVideoCapture("")) {
if (!local_renderer_.get()) {
local_renderer_.reset(
cricket::VideoRendererFactory::CreateGuiVideoRenderer(176, 144));
}
peer_connection_->SetLocalVideoRenderer(local_renderer_.get());
} else {
ASSERT(false);
}
}
}
//
// PeerConnectionObserver implementation.
//
virtual void OnError() {
LOG(INFO) << __FUNCTION__;
ASSERT(false);
}
virtual void OnSignalingMessage(const std::string& msg) {
LOG(INFO) << __FUNCTION__;
bool shutting_down = (video_channel_ == -1 && audio_channel_ == -1);
if (handshake_ == OFFER_RECEIVED && !shutting_down)
StartCaptureDevice();
// Send our answer/offer/shutting down message.
// If we're the initiator, this will be our offer. If we just received
// an offer, this will be an answer. If PeerConnection::Close has been
// called, then this is our signal to the other end that we're shutting
// down.
if (handshake_ != QUIT_SENT) {
SendMessage(handle(), SEND_MESSAGE_TO_PEER, 0,
reinterpret_cast<LPARAM>(&msg));
}
if (shutting_down) {
handshake_ = QUIT_SENT;
PostMessage(handle(), PEER_CONNECTION_CLOSED, 0, 0);
}
}
// Called when a remote stream is added
virtual void OnAddStream(const std::string& stream_id, int channel_id,
bool video) {
LOG(INFO) << __FUNCTION__ << " " << stream_id;
bool send_notification = (waiting_for_video_ || waiting_for_audio_);
if (video) {
ASSERT(video_channel_ == -1);
video_channel_ = channel_id;
waiting_for_video_ = false;
LOG(INFO) << "Setting video renderer for channel: " << channel_id;
if (!remote_renderer_.get()) {
remote_renderer_.reset(
cricket::VideoRendererFactory::CreateGuiVideoRenderer(352, 288));
}
bool ok = peer_connection_->SetVideoRenderer(stream_id,
remote_renderer_.get());
ASSERT(ok);
} else {
ASSERT(audio_channel_ == -1);
audio_channel_ = channel_id;
waiting_for_audio_ = false;
}
if (send_notification && !waiting_for_audio_ && !waiting_for_video_)
PostMessage(handle(), MEDIA_CHANNELS_INITIALIZED, 0, 0);
}
virtual void OnRemoveStream(const std::string& stream_id,
int channel_id,
bool video) {
LOG(INFO) << __FUNCTION__;
if (video) {
ASSERT(channel_id == video_channel_);
video_channel_ = -1;
} else {
ASSERT(channel_id == audio_channel_);
audio_channel_ = -1;
}
}
//
// PeerConnectionClientObserver implementation.
//
virtual void OnSignedIn() {
LOG(INFO) << __FUNCTION__;
main_wnd_->SwitchToPeerList(client_->peers());
}
virtual void OnDisconnected() {
LOG(INFO) << __FUNCTION__;
if (peer_connection_.get()) {
peer_connection_->Close();
} else if (main_wnd_->IsWindow()) {
main_wnd_->SwitchToConnectUI();
}
}
virtual void OnPeerConnected(int id, const std::string& name) {
LOG(INFO) << __FUNCTION__;
// Refresh the list if we're showing it.
if (main_wnd_->current_ui() == MainWnd::LIST_PEERS)
main_wnd_->SwitchToPeerList(client_->peers());
}
virtual void OnPeerDisconnected(int id, const std::string& name) {
LOG(INFO) << __FUNCTION__;
if (id == peer_id_) {
LOG(INFO) << "Our peer disconnected";
peer_id_ = -1;
// TODO: Somehow make sure that Close has been called?
if (peer_connection_.get())
peer_connection_->Close();
}
// Refresh the list if we're showing it.
if (main_wnd_->current_ui() == MainWnd::LIST_PEERS)
main_wnd_->SwitchToPeerList(client_->peers());
}
virtual void OnMessageFromPeer(int peer_id, const std::string& message) {
ASSERT(peer_id_ == peer_id || peer_id_ == -1);
if (handshake_ == NONE) {
handshake_ = OFFER_RECEIVED;
peer_id_ = peer_id;
if (!peer_connection_.get()) {
// Got an offer. Give it to the PeerConnection instance.
// Once processed, we will get a callback to OnSignalingMessage with
// our 'answer' which we'll send to the peer.
LOG(INFO) << "Got an offer from our peer: " << peer_id;
if (!InitializePeerConnection()) {
LOG(LS_ERROR) << "Failed to initialize our PeerConnection instance";
client_->SignOut();
return;
}
}
} else if (handshake_ == INITIATOR) {
LOG(INFO) << "Remote peer sent us an answer";
handshake_ = ANSWER_RECEIVED;
} else {
LOG(INFO) << "Remote peer is disconnecting";
handshake_ = QUIT_SENT;
}
peer_connection_->SignalingMessage(message);
if (handshake_ == QUIT_SENT) {
DisconnectFromCurrentPeer();
}
}
//
// MainWndCallback implementation.
//
virtual void StartLogin(const std::string& server, int port) {
ASSERT(!client_->is_connected());
if (!client_->Connect(server, port, GetPeerName())) {
MessageBoxA(main_wnd_->handle(),
("Failed to connect to " + server).c_str(),
"Error", MB_OK | MB_ICONERROR);
}
}
virtual void DisconnectFromServer() {
if (!client_->is_connected())
return;
client_->SignOut();
}
virtual void ConnectToPeer(int peer_id) {
ASSERT(peer_id_ == -1);
ASSERT(peer_id != -1);
ASSERT(handshake_ == NONE);
if (handshake_ != NONE)
return;
if (InitializePeerConnection()) {
peer_id_ = peer_id;
waiting_for_video_ = peer_connection_->AddStream(kVideoLabel, true);
waiting_for_audio_ = peer_connection_->AddStream(kAudioLabel, false);
if (waiting_for_video_ || waiting_for_audio_)
handshake_ = INITIATOR;
ASSERT(waiting_for_video_ || waiting_for_audio_);
}
if (handshake_ == NONE) {
::MessageBoxA(main_wnd_->handle(), "Failed to initialize PeerConnection",
"Error", MB_OK | MB_ICONERROR);
}
}
virtual void DisconnectFromCurrentPeer() {
if (peer_connection_.get())
peer_connection_->Close();
}
//
// Win32Window implementation.
//
virtual bool OnMessage(UINT msg, WPARAM wp, LPARAM lp, LRESULT& result) {
bool ret = true;
if (msg == MEDIA_CHANNELS_INITIALIZED) {
ASSERT(handshake_ == INITIATOR);
bool ok = peer_connection_->Connect();
ASSERT(ok);
StartCaptureDevice();
// When we get an OnSignalingMessage notification, we'll send our
// json encoded signaling message to the peer, which is the first step
// of establishing a connection.
} else if (msg == PEER_CONNECTION_CLOSED) {
LOG(INFO) << "PEER_CONNECTION_CLOSED";
DeletePeerConnection();
::InvalidateRect(main_wnd_->handle(), NULL, TRUE);
waiting_for_audio_ = false;
waiting_for_video_ = false;
peer_id_ = -1;
ASSERT(video_channel_ == -1);
ASSERT(audio_channel_ == -1);
if (main_wnd_->IsWindow()) {
if (client_->is_connected()) {
main_wnd_->SwitchToPeerList(client_->peers());
} else {
main_wnd_->SwitchToConnectUI();
}
} else {
DisconnectFromServer();
}
} else if (msg == SEND_MESSAGE_TO_PEER) {
bool ok = client_->SendToPeer(peer_id_,
*reinterpret_cast<std::string*>(lp));
if (!ok) {
LOG(LS_ERROR) << "SendToPeer failed";
DisconnectFromServer();
}
} else {
ret = false;
}
return ret;
}
protected:
HandshakeState handshake_;
bool waiting_for_audio_;
bool waiting_for_video_;
int peer_id_;
scoped_ptr<PeerConnection> peer_connection_;
PeerConnectionClient* client_;
MainWnd* main_wnd_;
int video_channel_;
int audio_channel_;
scoped_ptr<cricket::VideoRenderer> local_renderer_;
scoped_ptr<cricket::VideoRenderer> remote_renderer_;
};
int PASCAL wWinMain(HINSTANCE instance, HINSTANCE prev_instance,
wchar_t* cmd_line, int cmd_show) {
talk_base::EnsureWinsockInit();
webrtc::Trace::CreateTrace();
webrtc::Trace::SetTraceFile("session_test_trace.txt");
webrtc::Trace::SetLevelFilter(webrtc::kTraceWarning);
MainWnd wnd;
if (!wnd.Create()) {
ASSERT(false);
return -1;
}
PeerConnectionClient client;
ConnectionObserver observer(&client, &wnd);
// Main loop.
MSG msg;
BOOL gm;
while ((gm = ::GetMessage(&msg, NULL, 0, 0)) && gm != -1) {
if (!wnd.PreTranslateMessage(&msg)) {
::TranslateMessage(&msg);
::DispatchMessage(&msg);
}
}
if (observer.connection_active() || client.is_connected()) {
observer.Close();
while ((observer.connection_active() || client.is_connected()) &&
(gm = ::GetMessage(&msg, NULL, 0, 0)) && gm != -1) {
::TranslateMessage(&msg);
::DispatchMessage(&msg);
}
}
return 0;
}

View File

@ -1,972 +0,0 @@
#include "talk/app/videomediaengine.h"
#include <iostream>
#ifdef PLATFORM_CHROMIUM
#include "content/renderer/video_capture_chrome.h"
#endif
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/app/voicemediaengine.h"
#include "modules/video_capture/main/interface/video_capture.h"
#include "vplib.h"
#ifndef ARRAYSIZE
#define ARRAYSIZE(a) (sizeof(a) / sizeof((a)[0]))
#endif
namespace webrtc {
static const int kDefaultLogSeverity = 3;
static const int kStartVideoBitrate = 300;
static const int kMaxVideoBitrate = 1000;
CricketWebRTCVideoFrame::CricketWebRTCVideoFrame() {
}
CricketWebRTCVideoFrame::~CricketWebRTCVideoFrame() {
// TODO(ronghuawu): should the CricketWebRTCVideoFrame owns the buffer?
WebRtc_UWord8* newMemory = NULL;
WebRtc_UWord32 newLength = 0;
WebRtc_UWord32 newSize = 0;
video_frame_.Swap(newMemory, newLength, newSize);
}
void CricketWebRTCVideoFrame::Attach(unsigned char* buffer, int bufferSize,
int w, int h) {
WebRtc_UWord8* newMemory = buffer;
WebRtc_UWord32 newLength = bufferSize;
WebRtc_UWord32 newSize = bufferSize;
video_frame_.Swap(newMemory, newLength, newSize);
video_frame_.SetWidth(w);
video_frame_.SetHeight(h);
}
size_t CricketWebRTCVideoFrame::GetWidth() const {
return video_frame_.Width();
}
size_t CricketWebRTCVideoFrame::GetHeight() const {
return video_frame_.Height();
}
const uint8* CricketWebRTCVideoFrame::GetYPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
const uint8* CricketWebRTCVideoFrame::GetUPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
const uint8* CricketWebRTCVideoFrame::GetVPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetYPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetUPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetVPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 3 / 2);
return buffer;
}
cricket::VideoFrame* CricketWebRTCVideoFrame::Copy() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer) {
int new_buffer_size = video_frame_.Length();
unsigned char* new_buffer = new unsigned char[new_buffer_size];
memcpy(new_buffer, buffer, new_buffer_size);
CricketWebRTCVideoFrame* copy = new CricketWebRTCVideoFrame();
copy->Attach(new_buffer, new_buffer_size,
video_frame_.Width(), video_frame_.Height());
copy->SetTimeStamp(video_frame_.TimeStamp());
copy->SetElapsedTime(elapsed_time_);
return copy;
}
return NULL;
}
size_t CricketWebRTCVideoFrame::CopyToBuffer(
uint8* buffer, size_t size) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
memcpy(buffer, video_frame_.Buffer(), needed);
}
return needed;
}
size_t CricketWebRTCVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
uint8* buffer,
size_t size,
size_t pitch_rgb) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t width = video_frame_.Width();
size_t height = video_frame_.Height();
// See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
// explanation of pitch and why this is the amount of space we need.
size_t needed = pitch_rgb * (height - 1) + 4 * width;
if (needed > size) {
LOG(LS_WARNING) << "RGB buffer is not large enough";
return needed;
}
VideoType outgoingVideoType = kUnknown;
switch (to_fourcc) {
case cricket::FOURCC_ARGB:
outgoingVideoType = kARGB;
break;
default:
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
break;
}
if (outgoingVideoType != kUnknown)
ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
width, height, buffer);
return needed;
}
// TODO(ronghuawu): Implement StretchToPlanes
void CricketWebRTCVideoFrame::StretchToPlanes(
uint8* y, uint8* u, uint8* v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
size_t width, size_t height, bool interpolate, bool crop) const {
}
size_t CricketWebRTCVideoFrame::StretchToBuffer(size_t w, size_t h,
uint8* buffer, size_t size,
bool interpolate,
bool crop) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
uint8* bufy = buffer;
uint8* bufu = bufy + w * h;
uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
interpolate, crop);
}
return needed;
}
void CricketWebRTCVideoFrame::StretchToFrame(cricket::VideoFrame *target,
bool interpolate, bool crop) const {
if (!target) return;
StretchToPlanes(target->GetYPlane(),
target->GetUPlane(),
target->GetVPlane(),
target->GetYPitch(),
target->GetUPitch(),
target->GetVPitch(),
target->GetWidth(),
target->GetHeight(),
interpolate, crop);
target->SetElapsedTime(GetElapsedTime());
target->SetTimeStamp(GetTimeStamp());
}
cricket::VideoFrame* CricketWebRTCVideoFrame::Stretch(size_t w, size_t h,
bool interpolate, bool crop) const {
// TODO(ronghuawu): implement
CricketWebRTCVideoFrame* frame = new CricketWebRTCVideoFrame();
return frame;
}
CricketWebRTCVideoRenderer::CricketWebRTCVideoRenderer
(cricket::VideoRenderer* renderer)
:renderer_(renderer) {
}
CricketWebRTCVideoRenderer::~CricketWebRTCVideoRenderer() {
}
int CricketWebRTCVideoRenderer::FrameSizeChange(unsigned int width,
unsigned int height,
unsigned int numberOfStreams) {
ASSERT(renderer_ != NULL);
width_ = width;
height_ = height;
number_of_streams_ = numberOfStreams;
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
int CricketWebRTCVideoRenderer::DeliverFrame(unsigned char* buffer,
int bufferSize) {
ASSERT(renderer_ != NULL);
video_frame_.Attach(buffer, bufferSize, width_, height_);
return renderer_->RenderFrame(&video_frame_) ? 0 : -1;
}
const RtcVideoEngine::VideoCodecPref RtcVideoEngine::kVideoCodecPrefs[] = {
{"VP8", 104, 0},
{"H264", 105, 1}
};
RtcVideoEngine::RtcVideoEngine()
: video_engine_(new VideoEngineWrapper()),
capture_(NULL),
capture_id_(-1),
voice_engine_(NULL),
initialized_(false),
log_level_(kDefaultLogSeverity),
capture_started_(false){
}
RtcVideoEngine::RtcVideoEngine(RtcVoiceEngine* voice_engine)
: video_engine_(new VideoEngineWrapper()),
capture_(NULL),
capture_id_(-1),
voice_engine_(voice_engine),
initialized_(false),
log_level_(kDefaultLogSeverity),
capture_started_(false){
}
RtcVideoEngine::~RtcVideoEngine() {
LOG(LS_VERBOSE) << " RtcVideoEngine::~RtcVideoEngine";
video_engine_->engine()->SetTraceCallback(NULL);
Terminate();
}
bool RtcVideoEngine::Init() {
LOG(LS_VERBOSE) << "RtcVideoEngine::Init";
ApplyLogging();
if (video_engine_->engine()->SetTraceCallback(this) != 0) {
LOG(LS_ERROR) << "SetTraceCallback error";
}
bool result = InitVideoEngine(voice_engine_);
if (result) {
LOG(LS_INFO) << "VideoEngine Init done";
} else {
LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
Terminate();
}
return result;
}
bool RtcVideoEngine::InitVideoEngine(RtcVoiceEngine* voice_engine) {
LOG(LS_VERBOSE) << "RtcVideoEngine::InitVideoEngine";
bool ret = true;
if (video_engine_->base()->Init() != 0) {
LOG(LS_ERROR) << "VideoEngine Init method failed";
ret = false;
}
if (!voice_engine) {
LOG(LS_WARNING) << "NULL voice engine";
} else if ((video_engine_->base()->SetVoiceEngine(
voice_engine->webrtc()->engine())) != 0) {
LOG(LS_WARNING) << "Failed to SetVoiceEngine";
}
if ((video_engine_->base()->RegisterObserver(*this)) != 0) {
LOG(LS_WARNING) << "Failed to register observer";
}
int ncodecs = video_engine_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs - 2; ++i) {
VideoCodec wcodec;
if ((video_engine_->codec()->GetCodec(i, wcodec) == 0) &&
(strncmp(wcodec.plName, "I420", 4) != 0)) { //ignore I420
cricket::VideoCodec codec(wcodec.plType, wcodec.plName, wcodec.width,
wcodec.height, wcodec.maxFramerate, i);
LOG(LS_INFO) << codec.ToString();
video_codecs_.push_back(codec);
}
}
std::sort(video_codecs_.begin(), video_codecs_.end(),
&cricket::VideoCodec::Preferable);
return ret;
}
void RtcVideoEngine::PerformanceAlarm(const unsigned int cpuLoad) {
return;
}
void RtcVideoEngine::Print(const TraceLevel level, const char *traceString,
const int length) {
return;
}
int RtcVideoEngine::GetCodecPreference(const char* name) {
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
if (strcmp(kVideoCodecPrefs[i].payload_name, name) == 0) {
return kVideoCodecPrefs[i].pref;
}
}
return -1;
}
void RtcVideoEngine::ApplyLogging() {
int filter = 0;
switch(log_level_) {
case talk_base::LS_VERBOSE: filter |= kTraceAll;
case talk_base::LS_INFO: filter |= kTraceStateInfo;
case talk_base::LS_WARNING: filter |= kTraceWarning;
case talk_base::LS_ERROR: filter |= kTraceError | kTraceCritical;
}
}
void RtcVideoEngine::Terminate() {
LOG(LS_INFO) << "RtcVideoEngine::Terminate";
ReleaseCaptureDevice();
}
int RtcVideoEngine::GetCapabilities() {
return cricket::MediaEngine::VIDEO_RECV | cricket::MediaEngine::VIDEO_SEND;
}
bool RtcVideoEngine::SetOptions(int options) {
return true;
}
bool RtcVideoEngine::ReleaseCaptureDevice() {
if (capture_) {
// Stop capture
SetCapture(false);
// DisconnectCaptureDevice
RtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
video_engine_->capture()->DisconnectCaptureDevice(channel->video_channel());
}
// ReleaseCaptureDevice
video_engine_->capture()->ReleaseCaptureDevice(capture_id_);
capture_id_ = -1;
#ifdef PLATFORM_CHROMIUM
VideoCaptureChrome::DestroyVideoCapture(
static_cast<VideoCaptureChrome*>(capture_));
#else
webrtc::VideoCaptureModule::Destroy(capture_);
#endif
capture_ = NULL;
}
return true;
}
bool RtcVideoEngine::SetCaptureDevice(const cricket::Device* cam) {
ASSERT(video_engine_.get());
ASSERT(cam != NULL);
ReleaseCaptureDevice();
#ifdef PLATFORM_CHROMIUM
int cam_id = atol(cam->id.c_str());
if (cam_id == -1)
return false;
unsigned char uniqueId[16];
capture_ = VideoCaptureChrome::CreateVideoCapture(cam_id, uniqueId);
#else
WebRtc_UWord8 device_name[128];
WebRtc_UWord8 device_id[260];
VideoCaptureModule::DeviceInfo* device_info =
VideoCaptureModule::CreateDeviceInfo(0);
for (WebRtc_UWord32 i = 0; i < device_info->NumberOfDevices(); ++i) {
if (device_info->GetDeviceName(i, device_name, ARRAYSIZE(device_name),
device_id, ARRAYSIZE(device_id)) == 0) {
if ((cam->name.compare("") == 0) ||
(cam->id.compare((char*) device_id) == 0)) {
capture_ = VideoCaptureModule::Create(1234, device_id);
if (capture_) {
LOG(INFO) << "Found video capture device: " << device_name;
break;
}
}
}
}
VideoCaptureModule::DestroyDeviceInfo(device_info);
#endif
if (!capture_)
return false;
ViECapture* vie_capture = video_engine_->capture();
if (vie_capture->AllocateCaptureDevice(*capture_, capture_id_) == 0) {
// Connect to all the channels
RtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_capture->ConnectCaptureDevice(capture_id_, channel->video_channel());
}
SetCapture(true);
} else {
ASSERT(capture_id_ == -1);
}
return (capture_id_ != -1);
}
bool RtcVideoEngine::SetLocalRenderer(cricket::VideoRenderer* renderer) {
if (!local_renderer_.get()) {
local_renderer_.reset(new CricketWebRTCVideoRenderer(renderer));
} else {
// Renderer already set
return true;
}
int ret;
ret = video_engine_->render()->AddRenderer(capture_id_,
kVideoI420,
local_renderer_.get());
if (ret != 0)
return false;
ret = video_engine_->render()->StartRender(capture_id_);
return (ret == 0);
}
cricket::CaptureResult RtcVideoEngine::SetCapture(bool capture) {
if (capture_started_ == capture)
return cricket::CR_SUCCESS;
if (capture_id_ != -1) {
int ret;
if (capture)
ret = video_engine_->capture()->StartCapture(capture_id_);
else
ret = video_engine_->capture()->StopCapture(capture_id_);
if (ret == 0) {
capture_started_ = capture;
return cricket::CR_SUCCESS;
}
}
return cricket::CR_NO_DEVICE;
}
const std::vector<cricket::VideoCodec>& RtcVideoEngine::codecs() const {
return video_codecs_;
}
void RtcVideoEngine::SetLogging(int min_sev, const char* filter) {
log_level_ = min_sev;
ApplyLogging();
}
bool RtcVideoEngine::SetDefaultEncoderConfig(
const cricket::VideoEncoderConfig& config) {
bool ret = SetDefaultCodec(config.max_codec);
if (ret) {
default_encoder_config_ = config;
}
return ret;
}
bool RtcVideoEngine::SetDefaultCodec(const cricket::VideoCodec& codec) {
default_codec_ = codec;
return true;
}
RtcVideoMediaChannel* RtcVideoEngine::CreateChannel(
cricket::VoiceMediaChannel* voice_channel) {
RtcVideoMediaChannel* channel =
new RtcVideoMediaChannel(this, voice_channel);
if (channel) {
if (!channel->Init()) {
delete channel;
channel = NULL;
}
}
return channel;
}
bool RtcVideoEngine::FindCodec(const cricket::VideoCodec& codec) {
for (size_t i = 0; i < video_codecs_.size(); ++i) {
if (video_codecs_[i].Matches(codec)) {
return true;
}
}
return false;
}
void RtcVideoEngine::ConvertToCricketVideoCodec(
const VideoCodec& in_codec, cricket::VideoCodec& out_codec) {
out_codec.id = in_codec.plType;
out_codec.name = in_codec.plName;
out_codec.width = in_codec.width;
out_codec.height = in_codec.height;
out_codec.framerate = in_codec.maxFramerate;
}
void RtcVideoEngine::ConvertFromCricketVideoCodec(
const cricket::VideoCodec& in_codec, VideoCodec& out_codec) {
out_codec.plType = in_codec.id;
strcpy(out_codec.plName, in_codec.name.c_str());
out_codec.width = 352; //in_codec.width;
out_codec.height = 288; //in_codec.height;
out_codec.maxFramerate = 30; //in_codec.framerate;
if (strncmp(out_codec.plName, "VP8", 3) == 0) {
out_codec.codecType = kVideoCodecVP8;
} else if (strncmp(out_codec.plName, "H263", 4) == 0) {
out_codec.codecType = kVideoCodecH263;
} else if (strncmp(out_codec.plName, "H264", 4) == 0) {
out_codec.codecType = kVideoCodecH264;
} else if (strncmp(out_codec.plName, "I420", 4) == 0) {
out_codec.codecType = kVideoCodecI420;
} else {
LOG(LS_INFO) << "invalid codec type";
}
out_codec.maxBitrate = kMaxVideoBitrate;
out_codec.startBitrate = kStartVideoBitrate;
out_codec.minBitrate = kStartVideoBitrate;
}
int RtcVideoEngine::GetLastVideoEngineError() {
return video_engine_->base()->LastError();
}
void RtcVideoEngine::RegisterChannel(RtcVideoMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
channels_.push_back(channel);
}
void RtcVideoEngine::UnregisterChannel(RtcVideoMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
VideoChannels::iterator i = std::find(channels_.begin(),
channels_.end(),
channel);
if (i != channels_.end()) {
channels_.erase(i);
}
}
// RtcVideoMediaChannel
RtcVideoMediaChannel::RtcVideoMediaChannel(
RtcVideoEngine* engine, cricket::VoiceMediaChannel* channel)
: engine_(engine),
voice_channel_(channel),
video_channel_(-1),
sending_(false),
render_started_(false) {
engine->RegisterChannel(this);
}
bool RtcVideoMediaChannel::Init() {
bool ret = true;
if (engine_->video_engine()->base()->CreateChannel(video_channel_) != 0) {
LOG(LS_ERROR) << "ViE CreateChannel Failed!!";
ret = false;
}
LOG(LS_INFO) << "RtcVideoMediaChannel::Init "
<< "video_channel " << video_channel_ << " created";
//connect audio channel
if (voice_channel_) {
RtcVoiceMediaChannel* channel =
static_cast<RtcVoiceMediaChannel*> (voice_channel_);
if (engine_->video_engine()->base()->ConnectAudioChannel(
video_channel_, channel->audio_channel()) != 0) {
LOG(LS_WARNING) << "ViE ConnectAudioChannel failed"
<< "A/V not synchronized";
// Don't set ret to false;
}
}
//Register external transport
if (engine_->video_engine()->network()->RegisterSendTransport(
video_channel_, *this) != 0) {
ret = false;
} else {
EnableRtcp();
EnablePLI();
}
return ret;
}
RtcVideoMediaChannel::~RtcVideoMediaChannel() {
// Stop and remote renderer
SetRender(false);
if (engine()->video_engine()->render()->RemoveRenderer(video_channel_) == -1) {
LOG(LS_ERROR) << "Video RemoveRenderer failed for channel "
<< video_channel_;
}
// DeRegister external transport
if (engine()->video_engine()->network()->DeregisterSendTransport(
video_channel_) == -1) {
LOG(LS_ERROR) << "DeRegisterSendTransport failed for channel id "
<< video_channel_;
}
// Unregister RtcChannel with the engine.
engine()->UnregisterChannel(this);
// Delete VideoChannel
if (engine()->video_engine()->base()->DeleteChannel(video_channel_) == -1) {
LOG(LS_ERROR) << "Video DeleteChannel failed for channel "
<< video_channel_;
}
}
bool RtcVideoMediaChannel::SetRecvCodecs(
const std::vector<cricket::VideoCodec>& codecs) {
bool ret = true;
for (std::vector<cricket::VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
VideoCodec wcodec;
engine()->ConvertFromCricketVideoCodec(*iter, wcodec);
if (engine()->video_engine()->codec()->SetReceiveCodec(
video_channel_, wcodec) != 0) {
LOG(LS_ERROR) << "ViE SetReceiveCodec failed"
<< " VideoChannel : " << video_channel_ << " Error: "
<< engine()->video_engine()->base()->LastError()
<< "wcodec " << wcodec.plName;
ret = false;
}
} else {
LOG(LS_INFO) << "Unknown codec" << iter->name;
ret = false;
}
}
// make channel ready to receive packets
if (ret) {
if (engine()->video_engine()->base()->StartReceive(video_channel_) != 0) {
LOG(LS_ERROR) << "ViE StartReceive failure";
ret = false;
}
}
return ret;
}
bool RtcVideoMediaChannel::SetSendCodecs(
const std::vector<cricket::VideoCodec>& codecs) {
if (sending_) {
LOG(LS_ERROR) << "channel is alredy sending";
return false;
}
//match with local video codec list
std::vector<VideoCodec> send_codecs;
for (std::vector<cricket::VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
VideoCodec wcodec;
engine()->ConvertFromCricketVideoCodec(*iter, wcodec);
send_codecs.push_back(wcodec);
}
}
// if none matches, return with set
if (send_codecs.empty()) {
LOG(LS_ERROR) << "No matching codecs avilable";
return false;
}
//select the first matched codec
const VideoCodec& codec(send_codecs[0]);
send_codec_ = codec;
if (engine()->video_engine()->codec()->SetSendCodec(
video_channel_, codec) != 0) {
LOG(LS_ERROR) << "ViE SetSendCodec failed";
return false;
}
return true;
}
bool RtcVideoMediaChannel::SetRender(bool render) {
if (video_channel_ != -1) {
int ret = -1;
if (render == render_started_)
return true;
if (render) {
ret = engine()->video_engine()->render()->StartRender(video_channel_);
} else {
ret = engine()->video_engine()->render()->StopRender(video_channel_);
}
if (ret == 0) {
render_started_ = render;
return true;
}
}
return false;
}
bool RtcVideoMediaChannel::SetSend(bool send) {
if (send == sending()) {
return true; // no action required
}
bool ret = true;
if (send) { //enable
if (engine()->video_engine()->base()->StartSend(video_channel_) != 0) {
LOG(LS_ERROR) << "ViE StartSend failed";
ret = false;
}
} else { // disable
if (engine()->video_engine()->base()->StopSend(video_channel_) != 0) {
LOG(LS_ERROR) << "ViE StopSend failed";
ret = false;
}
}
if (ret)
sending_ = send;
return ret;
}
bool RtcVideoMediaChannel::AddStream(uint32 ssrc, uint32 voice_ssrc) {
return false;
}
bool RtcVideoMediaChannel::RemoveStream(uint32 ssrc) {
return false;
}
bool RtcVideoMediaChannel::SetRenderer(
uint32 ssrc, cricket::VideoRenderer* renderer) {
if (!remote_renderer_.get()) {
remote_renderer_.reset(new CricketWebRTCVideoRenderer(renderer));
} else {
// Renderer already set
return true;
}
int ret;
ret = engine_->video_engine()->render()->AddRenderer(video_channel_,
kVideoI420,
remote_renderer_.get());
if (ret != 0)
return false;
ret = engine_->video_engine()->render()->StartRender(video_channel_);
return (ret == 0);
}
bool RtcVideoMediaChannel::SetExternalRenderer(uint32 ssrc, void* renderer)
{
int ret;
ret = engine_->video_engine()->render()->AddRenderer(
video_channel_,
kVideoI420,
static_cast<ExternalRenderer*>(renderer));
if (ret != 0)
return false;
ret = engine_->video_engine()->render()->StartRender(video_channel_);
return (ret == 0);
}
bool RtcVideoMediaChannel::GetStats(cricket::VideoMediaInfo* info) {
cricket::VideoSenderInfo sinfo;
memset(&sinfo, 0, sizeof(sinfo));
unsigned int ssrc;
if (engine_->video_engine()->rtp()->GetLocalSSRC(video_channel_,
ssrc) != 0) {
LOG(LS_ERROR) << "ViE GetLocalSSRC failed";
return false;
}
sinfo.ssrc = ssrc;
unsigned int cumulative_lost, extended_max, jitter;
int rtt_ms;
unsigned short fraction_lost;
if (engine_->video_engine()->rtp()->GetSentRTCPStatistics(video_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG(LS_ERROR) << "ViE GetLocalSSRC failed";
return false;
}
sinfo.fraction_lost = fraction_lost;
sinfo.rtt_ms = rtt_ms;
unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
if (engine_->video_engine()->rtp()->GetRTPStatistics(video_channel_,
bytes_sent, packets_sent, bytes_recv, packets_recv) != 0) {
LOG(LS_ERROR) << "ViE GetRTPStatistics";
return false;
}
sinfo.packets_sent = packets_sent;
sinfo.bytes_sent = bytes_sent;
sinfo.packets_lost = -1;
sinfo.packets_cached = -1;
info->senders.push_back(sinfo);
//build receiver info.
// reusing the above local variables
cricket::VideoReceiverInfo rinfo;
memset(&rinfo, 0, sizeof(rinfo));
if (engine_->video_engine()->rtp()->GetReceivedRTCPStatistics(video_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG(LS_ERROR) << "ViE GetReceivedRTPStatistics Failed";
return false;
}
rinfo.bytes_rcvd = bytes_recv;
rinfo.packets_rcvd = packets_recv;
rinfo.fraction_lost = fraction_lost;
if (engine_->video_engine()->rtp()->GetRemoteSSRC(video_channel_,
ssrc) != 0) {
return false;
}
rinfo.ssrc = ssrc;
//Get codec for wxh
info->receivers.push_back(rinfo);
return true;
}
bool RtcVideoMediaChannel::SendIntraFrame() {
bool ret = true;
if (engine()->video_engine()->codec()->SendKeyFrame(video_channel_) != 0) {
LOG(LS_ERROR) << "ViE SendKeyFrame failed";
ret = false;
}
return ret;
}
bool RtcVideoMediaChannel::RequestIntraFrame() {
//There is no API exposed to application to request a key frame
// ViE does this internally when there are errors from decoder
return true;
}
void RtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
engine()->video_engine()->network()->ReceivedRTPPacket(video_channel_,
packet->data(),
packet->length());
}
void RtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
engine_->video_engine()->network()->ReceivedRTCPPacket(video_channel_,
packet->data(),
packet->length());
}
void RtcVideoMediaChannel::SetSendSsrc(uint32 id) {
if (!sending_){
if (engine()->video_engine()->rtp()->SetLocalSSRC(video_channel_, id) != 0) {
LOG(LS_ERROR) << "ViE SetLocalSSRC failed";
}
} else {
LOG(LS_ERROR) << "Channel already in send state";
}
}
bool RtcVideoMediaChannel::SetRtcpCName(const std::string& cname) {
if (engine()->video_engine()->rtp()->SetRTCPCName(video_channel_,
cname.c_str()) != 0) {
LOG(LS_ERROR) << "ViE SetRTCPCName failed";
return false;
}
return true;
}
bool RtcVideoMediaChannel::Mute(bool on) {
// stop send??
return false;
}
bool RtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
LOG(LS_VERBOSE) << "RtcVideoMediaChanne::SetSendBandwidth";
VideoCodec current = send_codec_;
send_codec_.startBitrate = bps;
if (engine()->video_engine()->codec()->SetSendCodec(video_channel_,
send_codec_) != 0) {
LOG(LS_ERROR) << "ViE SetSendCodec failed";
if (engine()->video_engine()->codec()->SetSendCodec(video_channel_,
current) != 0) {
// should call be ended in this case?
}
return false;
}
return true;
}
bool RtcVideoMediaChannel::SetOptions(int options) {
return true;
}
void RtcVideoMediaChannel::EnableRtcp() {
engine()->video_engine()->rtp()->SetRTCPStatus(
video_channel_, kRtcpCompound_RFC4585);
}
void RtcVideoMediaChannel::EnablePLI() {
engine_->video_engine()->rtp()->SetKeyFrameRequestMethod(
video_channel_, kViEKeyFrameRequestPliRtcp);
}
void RtcVideoMediaChannel::EnableTMMBR() {
engine_->video_engine()->rtp()->SetTMMBRStatus(video_channel_, true);
}
int RtcVideoMediaChannel::SendPacket(int channel, const void* data, int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, cricket::kMaxRtpPacketLen);
return network_interface_->SendPacket(&packet) ? len : -1;
}
int RtcVideoMediaChannel::SendRTCPPacket(int channel,
const void* data,
int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, cricket::kMaxRtpPacketLen);
return network_interface_->SendRtcp(&packet) ? len : -1;
}
} // namespace webrtc

View File

@ -1,258 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_VIDEOMEDIAENGINE_H_
#define TALK_APP_WEBRTC_VIDEOMEDIAENGINE_H_
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/videocommon.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/app/videoengine.h"
namespace cricket {
class VoiceMediaChannel;
class Device;
class VideoRenderer;
}
namespace webrtc {
class RtcVideoMediaChannel;
class RtcVoiceEngine;
class ExternalRenderer;
// CricketWebRTCVideoFrame only supports I420
class CricketWebRTCVideoFrame : public cricket::VideoFrame {
public:
CricketWebRTCVideoFrame();
~CricketWebRTCVideoFrame();
void Attach(unsigned char* buffer, int bufferSize, int w, int h);
virtual size_t GetWidth() const;
virtual size_t GetHeight() const;
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const { return video_frame_.Width(); }
virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) {
video_frame_.SetTimeStamp(time_stamp);
}
virtual VideoFrame* Copy() const;
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, size_t pitch_rgb) const;
virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
private:
webrtc::VideoFrame video_frame_;
int64 elapsed_time_;
};
class CricketWebRTCVideoRenderer : public ExternalRenderer {
public:
CricketWebRTCVideoRenderer(cricket::VideoRenderer* renderer);
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int numberOfStreams);
virtual int DeliverFrame(unsigned char* buffer, int bufferSize);
virtual ~CricketWebRTCVideoRenderer();
private:
cricket::VideoRenderer* renderer_;
CricketWebRTCVideoFrame video_frame_;
unsigned int width_;
unsigned int height_;
unsigned int number_of_streams_;
};
class RtcVideoEngine : public ViEBaseObserver, public TraceCallback {
public:
RtcVideoEngine();
explicit RtcVideoEngine(RtcVoiceEngine* voice_engine);
~RtcVideoEngine();
bool Init();
void Terminate();
RtcVideoMediaChannel* CreateChannel(
cricket::VoiceMediaChannel* voice_channel);
bool FindCodec(const cricket::VideoCodec& codec);
bool SetDefaultEncoderConfig(const cricket::VideoEncoderConfig& config);
void RegisterChannel(RtcVideoMediaChannel* channel);
void UnregisterChannel(RtcVideoMediaChannel* channel);
VideoEngineWrapper* video_engine() { return video_engine_.get(); }
int GetLastVideoEngineError();
int GetCapabilities();
bool SetOptions(int options);
//TODO - need to change this interface for webrtc
bool SetCaptureDevice(const cricket::Device* device);
bool SetLocalRenderer(cricket::VideoRenderer* renderer);
cricket::CaptureResult SetCapture(bool capture);
const std::vector<cricket::VideoCodec>& codecs() const;
void SetLogging(int min_sev, const char* filter);
cricket::VideoEncoderConfig& default_encoder_config() {
return default_encoder_config_;
}
cricket::VideoCodec& default_codec() {
return default_codec_;
}
bool SetDefaultCodec(const cricket::VideoCodec& codec);
void ConvertToCricketVideoCodec(const VideoCodec& in_codec,
cricket::VideoCodec& out_codec);
void ConvertFromCricketVideoCodec(const cricket::VideoCodec& in_codec,
VideoCodec& out_codec);
bool SetCaptureDevice(void* external_capture);
sigslot::signal1<cricket::CaptureResult> SignalCaptureResult;
private:
struct VideoCodecPref {
const char* payload_name;
int payload_type;
int pref;
};
static const VideoCodecPref kVideoCodecPrefs[];
int GetCodecPreference(const char* name);
void ApplyLogging();
bool InitVideoEngine(RtcVoiceEngine* voice_engine);
void PerformanceAlarm(const unsigned int cpuLoad);
bool ReleaseCaptureDevice();
virtual void Print(const TraceLevel level, const char *traceString,
const int length);
typedef std::vector<RtcVideoMediaChannel*> VideoChannels;
talk_base::scoped_ptr<VideoEngineWrapper> video_engine_;
VideoCaptureModule* capture_;
int capture_id_;
RtcVoiceEngine* voice_engine_;
std::vector<cricket::VideoCodec> video_codecs_;
VideoChannels channels_;
talk_base::CriticalSection channels_cs_;
bool initialized_;
int log_level_;
cricket::VideoEncoderConfig default_encoder_config_;
cricket::VideoCodec default_codec_;
bool capture_started_;
talk_base::scoped_ptr<CricketWebRTCVideoRenderer> local_renderer_;
};
class RtcVideoMediaChannel: public cricket::VideoMediaChannel,
public webrtc::Transport {
public:
RtcVideoMediaChannel(
RtcVideoEngine* engine, cricket::VoiceMediaChannel* voice_channel);
~RtcVideoMediaChannel();
bool Init();
virtual bool SetRecvCodecs(const std::vector<cricket::VideoCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<cricket::VideoCodec> &codecs);
virtual bool SetRender(bool render);
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool SetRenderer(uint32 ssrc, cricket::VideoRenderer* renderer);
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer);
virtual bool GetStats(cricket::VideoMediaInfo* info);
virtual bool SendIntraFrame();
virtual bool RequestIntraFrame();
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool on);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<cricket::RtpHeaderExtension>& extensions) { return false; }
virtual bool SetSendRtpHeaderExtensions(
const std::vector<cricket::RtpHeaderExtension>& extensions) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps);
virtual bool SetOptions(int options);
RtcVideoEngine* engine() { return engine_; }
cricket::VoiceMediaChannel* voice_channel() { return voice_channel_; }
int video_channel() { return video_channel_; }
bool sending() { return sending_; }
int GetMediaChannelId() { return video_channel_; }
protected:
virtual int SendPacket(int channel, const void* data, int len);
virtual int SendRTCPPacket(int channel, const void* data, int len);
private:
void EnableRtcp();
void EnablePLI();
void EnableTMMBR();
RtcVideoEngine* engine_;
cricket::VoiceMediaChannel* voice_channel_;
int video_channel_;
bool sending_;
bool render_started_;
webrtc::VideoCodec send_codec_;
talk_base::scoped_ptr<CricketWebRTCVideoRenderer> remote_renderer_;
};
}
#endif /* TALK_APP_WEBRTC_VIDEOMEDIAENGINE_H_ */

View File

@ -1,159 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_VOICEENGINE_H_
#define TALK_APP_WEBRTC_VOICEENGINE_H_
#include "talk/base/common.h"
#include "common_types.h"
#include "voice_engine/main/interface/voe_base.h"
#include "voice_engine/main/interface/voe_codec.h"
#include "voice_engine/main/interface/voe_errors.h"
#include "voice_engine/main/interface/voe_file.h"
#include "voice_engine/main/interface/voe_hardware.h"
#include "voice_engine/main/interface/voe_network.h"
#include "voice_engine/main/interface/voe_rtp_rtcp.h"
#include "voice_engine/main/interface/voe_video_sync.h"
#include "voice_engine/main/interface/voe_volume_control.h"
namespace webrtc {
// Tracing helpers, for easy logging when WebRTC calls fail.
// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
// "StartSend(1) failed, err=XXXX"
// The method GetLastRtcError must be defined in the calling scope.
#define LOG_RTCERR0(func) \
LOG_RTCERR0_EX(func, GetLastRtcError())
#define LOG_RTCERR1(func, a1) \
LOG_RTCERR1_EX(func, a1, GetLastRtcError())
#define LOG_RTCERR2(func, a1, a2) \
LOG_RTCERR2_EX(func, a1, a2, GetLastRtcError())
#define LOG_RTCERR3(func, a1, a2, a3) \
LOG_RTCERR3_EX(func, a1, a2, a3, GetLastRtcError())
#define LOG_RTCERR0_EX(func, err) LOG(WARNING) \
<< "" << #func << "() failed, err=" << err
#define LOG_RTCERR1_EX(func, a1, err) LOG(WARNING) \
<< "" << #func << "(" << a1 << ") failed, err=" << err
#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
<< err
#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ") failed, err=" << err
// automatically handles lifetime of WebRtc VoiceEngine
class scoped_webrtc_engine {
public:
explicit scoped_webrtc_engine(VoiceEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_webrtc_engine() { if (ptr) VERIFY(VoiceEngine::Delete(ptr)); }
VoiceEngine* get() const { return ptr; }
private:
VoiceEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
template<class T>
class scoped_rtc_ptr {
public:
explicit scoped_rtc_ptr(const scoped_webrtc_engine& e)
: ptr(T::GetInterface(e.get())) {}
template <typename E>
explicit scoped_rtc_ptr(E* engine) : ptr(T::GetInterface(engine)) {}
explicit scoped_rtc_ptr(T* p) : ptr(p) {}
~scoped_rtc_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
// Queries the engine for the wrapped type and releases the current pointer.
template <typename E>
void reset(E* engine) {
reset();
if (engine)
ptr = T::GetInterface(engine);
}
// Releases the current pointer.
void reset() {
if (ptr) {
ptr->Release();
ptr = NULL;
}
}
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class RtcWrapper {
public:
RtcWrapper()
: engine_(VoiceEngine::Create()),
base_(engine_), codec_(engine_), file_(engine_),
hw_(engine_), network_(engine_), rtp_(engine_),
sync_(engine_), volume_(engine_) {
}
RtcWrapper(VoEBase* base, VoECodec* codec, VoEFile* file,
VoEHardware* hw, VoENetwork* network,
VoERTP_RTCP* rtp, VoEVideoSync* sync,
VoEVolumeControl* volume)
: engine_(NULL),
base_(base), codec_(codec), file_(file),
hw_(hw), network_(network), rtp_(rtp),
sync_(sync), volume_(volume) {
}
virtual ~RtcWrapper() {}
VoiceEngine* engine() { return engine_.get(); }
VoEBase* base() { return base_.get(); }
VoECodec* codec() { return codec_.get(); }
VoEFile* file() { return file_.get(); }
VoEHardware* hw() { return hw_.get(); }
VoENetwork* network() { return network_.get(); }
VoERTP_RTCP* rtp() { return rtp_.get(); }
VoEVideoSync* sync() { return sync_.get(); }
VoEVolumeControl* volume() { return volume_.get(); }
int error() { return base_->LastError(); }
private:
scoped_webrtc_engine engine_;
scoped_rtc_ptr<VoEBase> base_;
scoped_rtc_ptr<VoECodec> codec_;
scoped_rtc_ptr<VoEFile> file_;
scoped_rtc_ptr<VoEHardware> hw_;
scoped_rtc_ptr<VoENetwork> network_;
scoped_rtc_ptr<VoERTP_RTCP> rtp_;
scoped_rtc_ptr<VoEVideoSync> sync_;
scoped_rtc_ptr<VoEVolumeControl> volume_;
};
} //namespace webrtc
#endif // TALK_APP_WEBRTC_VOICEENGINE_H_

View File

@ -1,966 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/voicemediaengine.h"
#include <algorithm>
#include <cstdio>
#include <string>
#include <vector>
#ifdef PLATFORM_CHROMIUM
#include "content/renderer/renderer_webrtc_audio_device_impl.h"
#else
#include "modules/audio_device/main/interface/audio_device.h"
#endif
#include "talk/base/base64.h"
#include "talk/base/byteorder.h"
#include "talk/base/common.h"
#include "talk/base/helpers.h"
#include "talk/base/logging.h"
#include "talk/base/stringencode.h"
namespace webrtc {
static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
const char* delim = "\r\n";
for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
LOG_V(sev) << tok;
}
}
// RtcVoiceEngine
const RtcVoiceEngine::CodecPref RtcVoiceEngine::kCodecPrefs[] = {
{ "ISAC", 16000 },
{ "ISAC", 32000 },
{ "ISACLC", 16000 },
{ "speex", 16000 },
{ "IPCMWB", 16000 },
{ "G722", 16000 },
{ "iLBC", 8000 },
{ "speex", 8000 },
{ "GSM", 8000 },
{ "EG711U", 8000 },
{ "EG711A", 8000 },
{ "PCMU", 8000 },
{ "PCMA", 8000 },
{ "CN", 32000 },
{ "CN", 16000 },
{ "CN", 8000 },
{ "red", 8000 },
{ "telephone-event", 8000 },
};
RtcVoiceEngine::RtcVoiceEngine()
: rtc_wrapper_(new RtcWrapper()),
log_level_(kDefaultLogSeverity),
adm_(NULL) {
Construct();
}
RtcVoiceEngine::RtcVoiceEngine(RtcWrapper* rtc_wrapper)
: rtc_wrapper_(rtc_wrapper),
log_level_(kDefaultLogSeverity),
adm_(NULL) {
Construct();
}
void RtcVoiceEngine::Construct() {
LOG(INFO) << "RtcVoiceEngine::RtcVoiceEngine";
ApplyLogging();
if (rtc_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
LOG_RTCERR0(RegisterVoiceEngineObserver);
}
// Load our audio codec list
LOG(INFO) << "WebRTC VoiceEngine codecs:";
int ncodecs = rtc_wrapper_->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
CodecInst gcodec;
if (rtc_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
int pref = GetCodecPreference(gcodec.plname, gcodec.plfreq);
if (pref != -1) {
if (gcodec.rate == -1) gcodec.rate = 0;
cricket::AudioCodec codec(gcodec.pltype, gcodec.plname, gcodec.plfreq,
gcodec.rate, gcodec.channels, pref);
LOG(INFO) << gcodec.plname << "/" << gcodec.plfreq << "/" \
<< gcodec.channels << " " << gcodec.pltype;
codecs_.push_back(codec);
}
}
}
// Make sure they are in local preference order
std::sort(codecs_.begin(), codecs_.end(), &cricket::AudioCodec::Preferable);
}
RtcVoiceEngine::~RtcVoiceEngine() {
LOG(INFO) << "RtcVoiceEngine::~RtcVoiceEngine";
if (rtc_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
LOG_RTCERR0(DeRegisterVoiceEngineObserver);
}
rtc_wrapper_.reset();
if (adm_) {
AudioDeviceModule::Destroy(adm_);
adm_ = NULL;
}
}
bool RtcVoiceEngine::Init() {
LOG(INFO) << "RtcVoiceEngine::Init";
bool res = InitInternal();
if (res) {
LOG(INFO) << "RtcVoiceEngine::Init Done!";
} else {
LOG(LERROR) << "RtcVoiceEngine::Init failed";
Terminate();
}
return res;
}
bool RtcVoiceEngine::InitInternal() {
// Temporarily turn logging level up for the Init call
int old_level = log_level_;
log_level_ = talk_base::_min(log_level_,
static_cast<int>(talk_base::INFO));
ApplyLogging();
if (!adm_) {
#ifdef PLATFORM_CHROMIUM
adm_ = new RendererWebRtcAudioDeviceImpl(1440, 1440, 1, 1, 48000, 48000);
#else
adm_ = AudioDeviceModule::Create(0);
#endif
if (rtc_wrapper_->base()->RegisterAudioDeviceModule(*adm_) == -1) {
LOG_RTCERR0_EX(Init, rtc_wrapper_->error());
return false;
}
}
// Init WebRTC VoiceEngine, enabling AEC logging if specified in SetLogging.
if (rtc_wrapper_->base()->Init() == -1) {
LOG_RTCERR0_EX(Init, rtc_wrapper_->error());
return false;
}
// Restore the previous log level
log_level_ = old_level;
ApplyLogging();
// Log the WebRTC version info
char buffer[1024] = "";
rtc_wrapper_->base()->GetVersion(buffer);
LOG(INFO) << "WebRTC VoiceEngine Version:";
LogMultiline(talk_base::INFO, buffer);
// Turn on AEC and AGC by default.
if (!SetOptions(
cricket::MediaEngine::ECHO_CANCELLATION | cricket::MediaEngine::AUTO_GAIN_CONTROL)) {
return false;
}
// Print our codec list again for the call diagnostic log
LOG(INFO) << "WebRTC VoiceEngine codecs:";
for (std::vector<cricket::AudioCodec>::const_iterator it = codecs_.begin();
it != codecs_.end(); ++it) {
LOG(INFO) << it->name << "/" << it->clockrate << "/"
<< it->channels << " " << it->id;
}
return true;
}
bool RtcVoiceEngine::SetDevices(const cricket::Device* in_device,
const cricket::Device* out_device) {
LOG(INFO) << "RtcVoiceEngine::SetDevices";
// Currently we always use the default device, so do nothing here.
return true;
}
void RtcVoiceEngine::Terminate() {
LOG(INFO) << "RtcVoiceEngine::Terminate";
rtc_wrapper_->base()->Terminate();
}
int RtcVoiceEngine::GetCapabilities() {
return cricket::MediaEngine::AUDIO_SEND | cricket::MediaEngine::AUDIO_RECV;
}
cricket::VoiceMediaChannel *RtcVoiceEngine::CreateChannel() {
RtcVoiceMediaChannel* ch = new RtcVoiceMediaChannel(this);
if (!ch->valid()) {
delete ch;
ch = NULL;
}
return ch;
}
bool RtcVoiceEngine::SetOptions(int options) {
return true;
}
bool RtcVoiceEngine::FindAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
return false;
}
bool RtcVoiceEngine::GetOutputVolume(int* level) {
unsigned int ulevel;
if (rtc_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
LOG_RTCERR1(GetSpeakerVolume, level);
return false;
}
*level = ulevel;
return true;
}
bool RtcVoiceEngine::SetOutputVolume(int level) {
ASSERT(level >= 0 && level <= 255);
if (rtc_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
LOG_RTCERR1(SetSpeakerVolume, level);
return false;
}
return true;
}
int RtcVoiceEngine::GetInputLevel() {
unsigned int ulevel;
return (rtc_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
static_cast<int>(ulevel) : -1;
}
bool RtcVoiceEngine::SetLocalMonitor(bool enable) {
return true;
}
const std::vector<cricket::AudioCodec>& RtcVoiceEngine::codecs() {
return codecs_;
}
bool RtcVoiceEngine::FindCodec(const cricket::AudioCodec& in) {
return FindRtcCodec(in, NULL);
}
bool RtcVoiceEngine::FindRtcCodec(const cricket::AudioCodec& in, CodecInst* out) {
int ncodecs = rtc_wrapper_->codec()->NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
CodecInst gcodec;
if (rtc_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
cricket::AudioCodec codec(gcodec.pltype, gcodec.plname,
gcodec.plfreq, gcodec.rate, gcodec.channels, 0);
if (codec.Matches(in)) {
if (out) {
// If the codec is VBR and an explicit rate is specified, use it.
if (in.bitrate != 0 && gcodec.rate == -1) {
gcodec.rate = in.bitrate;
}
*out = gcodec;
}
return true;
}
}
}
return false;
}
void RtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
log_level_ = min_sev;
std::vector<std::string> opts;
talk_base::tokenize(filter, ' ', &opts);
// voice log level
ApplyLogging();
}
int RtcVoiceEngine::GetLastRtcError() {
return rtc_wrapper_->error();
}
void RtcVoiceEngine::ApplyLogging() {
int filter = 0;
switch (log_level_) {
case talk_base::INFO: filter |= kTraceAll; // fall through
case talk_base::WARNING: filter |= kTraceWarning; // fall through
case talk_base::LERROR: filter |= kTraceError | kTraceCritical;
}
}
void RtcVoiceEngine::Print(const TraceLevel level,
const char* traceString, const int length) {
talk_base::LoggingSeverity sev = talk_base::INFO;
if (level == kTraceError || level == kTraceCritical)
sev = talk_base::LERROR;
else if (level == kTraceWarning)
sev = talk_base::WARNING;
else if (level == kTraceStateInfo)
sev = talk_base::INFO;
if (sev >= log_level_) {
// Skip past webrtc boilerplate prefix text
if (length <= 70) {
std::string msg(traceString, length);
LOG(LERROR) << "Malformed WebRTC log message: ";
LOG_V(sev) << msg;
} else {
std::string msg(traceString + 70, length - 71);
LOG_V(sev) << "VoE:" << msg;
}
}
}
void RtcVoiceEngine::CallbackOnError(const int err_code,
const int channel_num) {
talk_base::CritScope lock(&channels_cs_);
RtcVoiceMediaChannel* channel = NULL;
uint32 ssrc = 0;
LOG(WARNING) << "WebRTC error " << err_code << " reported on channel "
<< channel_num << ".";
if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
ASSERT(channel != NULL);
channel->OnError(ssrc, err_code);
} else {
LOG(LERROR) << "WebRTC channel " << channel_num
<< " could not be found in the channel list when error reported.";
}
}
int RtcVoiceEngine::GetCodecPreference(const char *name, int clockrate) {
for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
if ((strcmp(kCodecPrefs[i].name, name) == 0) &&
(kCodecPrefs[i].clockrate == clockrate))
return ARRAY_SIZE(kCodecPrefs) - i;
}
LOG(WARNING) << "Unexpected codec \"" << name << "/" << clockrate << "\"";
return -1;
}
bool RtcVoiceEngine::FindChannelAndSsrc(
int channel_num, RtcVoiceMediaChannel** channel, uint32* ssrc) const {
ASSERT(channel != NULL && ssrc != NULL);
*channel = NULL;
*ssrc = 0;
// Find corresponding channel and ssrc
for (ChannelList::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
if ((*it)->FindSsrc(channel_num, ssrc)) {
*channel = *it;
return true;
}
}
return false;
}
void RtcVoiceEngine::RegisterChannel(RtcVoiceMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
channels_.push_back(channel);
}
void RtcVoiceEngine::UnregisterChannel(RtcVoiceMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
ChannelList::iterator i = std::find(channels_.begin(),
channels_.end(),
channel);
if (i != channels_.end()) {
channels_.erase(i);
}
}
// RtcVoiceMediaChannel
RtcVoiceMediaChannel::RtcVoiceMediaChannel(RtcVoiceEngine *engine)
: RtcMediaChannel<cricket::VoiceMediaChannel, RtcVoiceEngine>(engine,
engine->webrtc()->base()->CreateChannel()),
channel_options_(0), playout_(false), send_(cricket::SEND_NOTHING) {
engine->RegisterChannel(this);
LOG(INFO) << "RtcVoiceMediaChannel::RtcVoiceMediaChannel "
<< audio_channel();
// Register external transport
if (engine->webrtc()->network()->RegisterExternalTransport(
audio_channel(), *static_cast<Transport*>(this)) == -1) {
LOG_RTCERR2(RegisterExternalTransport, audio_channel(), this);
}
// Enable RTCP (for quality stats and feedback messages)
EnableRtcp(audio_channel());
// Create a random but nonzero send SSRC
SetSendSsrc(talk_base::CreateRandomNonZeroId());
}
RtcVoiceMediaChannel::~RtcVoiceMediaChannel() {
LOG(INFO) << "RtcVoiceMediaChannel::~RtcVoiceMediaChannel "
<< audio_channel();
// DeRegister external transport
if (engine()->webrtc()->network()->DeRegisterExternalTransport(
audio_channel()) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, audio_channel());
}
// Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
// Remove any remaining streams.
while (!mux_channels_.empty()) {
RemoveStream(mux_channels_.begin()->first);
}
// Delete the primary channel.
if (engine()->webrtc()->base()->DeleteChannel(audio_channel()) == -1) {
LOG_RTCERR1(DeleteChannel, audio_channel());
}
}
bool RtcVoiceMediaChannel::SetOptions(int flags) {
// Always accept flags that are unchanged.
if (channel_options_ == flags) {
return true;
}
// Reject new options if we're already sending.
if (send_ != cricket::SEND_NOTHING) {
return false;
}
// Save the options, to be interpreted where appropriate.
channel_options_ = flags;
return true;
}
bool RtcVoiceMediaChannel::SetRecvCodecs(
const std::vector<cricket::AudioCodec>& codecs) {
// Update our receive payload types to match what we offered. This only is
// an issue when a different entity (i.e. a server) is generating the offer
// for us.
bool ret = true;
for (std::vector<cricket::AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end() && ret; ++i) {
CodecInst gcodec;
if (engine()->FindRtcCodec(*i, &gcodec)) {
if (gcodec.pltype != i->id) {
LOG(INFO) << "Updating payload type for " << gcodec.plname
<< " from " << gcodec.pltype << " to " << i->id;
gcodec.pltype = i->id;
if (engine()->webrtc()->codec()->SetRecPayloadType(
audio_channel(), gcodec) == -1) {
LOG_RTCERR1(SetRecPayloadType, audio_channel());
ret = false;
}
}
} else {
LOG(WARNING) << "Unknown codec " << i->name;
ret = false;
}
}
return ret;
}
bool RtcVoiceMediaChannel::SetSendCodecs(
const std::vector<cricket::AudioCodec>& codecs) {
bool first = true;
CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
for (std::vector<cricket::AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end(); ++i) {
CodecInst gcodec;
if (!engine()->FindRtcCodec(*i, &gcodec))
continue;
// We'll use the first codec in the list to actually send audio data.
// Be sure to use the payload type requested by the remote side.
if (first) {
send_codec = gcodec;
send_codec.pltype = i->id;
first = false;
}
}
// If we're being asked to set an empty list of codecs, due to a buggy client,
// choose the most common format: PCMU
if (first) {
LOG(WARNING) << "Received empty list of codecs; using PCMU/8000";
cricket::AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
engine()->FindRtcCodec(codec, &send_codec);
}
// Set the codec.
LOG(INFO) << "Selected voice codec " << send_codec.plname
<< "/" << send_codec.plfreq;
if (engine()->webrtc()->codec()->SetSendCodec(audio_channel(),
send_codec) == -1) {
LOG_RTCERR1(SetSendCodec, audio_channel());
return false;
}
return true;
}
bool RtcVoiceMediaChannel::SetPlayout(bool playout) {
if (playout_ == playout) {
return true;
}
bool result = true;
if (mux_channels_.empty()) {
// Only toggle the default channel if we don't have any other channels.
result = SetPlayout(audio_channel(), playout);
}
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end() && result; ++it) {
if (!SetPlayout(it->second, playout)) {
LOG(LERROR) << "SetPlayout " << playout << " on channel " << it->second
<< " failed";
result = false;
}
}
if (result) {
playout_ = playout;
}
return result;
}
bool RtcVoiceMediaChannel::GetPlayout() {
return playout_;
}
bool RtcVoiceMediaChannel::SetSend(cricket::SendFlags send) {
if (send_ == send) {
return true;
}
if (send == cricket::SEND_MICROPHONE) {
if (sequence_number() != -1) {
if (engine()->webrtc()->sync()->SetInitSequenceNumber(
audio_channel(), sequence_number() + 1) == -1) {
LOG_RTCERR2(SetInitSequenceNumber, audio_channel(),
sequence_number() + 1);
}
}
if (engine()->webrtc()->base()->StartSend(audio_channel()) == -1) {
LOG_RTCERR1(StartSend, audio_channel());
return false;
}
if (engine()->webrtc()->file()->StopPlayingFileAsMicrophone(
audio_channel()) == -1) {
LOG_RTCERR1(StopPlayingFileAsMicrophone, audio_channel());
return false;
}
} else { // SEND_NOTHING
if (engine()->webrtc()->base()->StopSend(audio_channel()) == -1) {
LOG_RTCERR1(StopSend, audio_channel());
}
}
send_ = send;
return true;
}
cricket::SendFlags RtcVoiceMediaChannel::GetSend() {
return send_;
}
bool RtcVoiceMediaChannel::AddStream(uint32 ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
if (mux_channels_.find(ssrc) != mux_channels_.end()) {
return false;
}
// Create a new channel for receiving audio data.
int channel = engine()->webrtc()->base()->CreateChannel();
if (channel == -1) {
LOG_RTCERR0(CreateChannel);
return false;
}
// Configure to use external transport, like our default channel.
if (engine()->webrtc()->network()->RegisterExternalTransport(
channel, *this) == -1) {
LOG_RTCERR2(SetExternalTransport, channel, this);
return false;
}
// Use the same SSRC as our default channel (so the RTCP reports are correct).
unsigned int send_ssrc;
VoERTP_RTCP* rtp = engine()->webrtc()->rtp();
if (rtp->GetLocalSSRC(audio_channel(), send_ssrc) == -1) {
LOG_RTCERR2(GetSendSSRC, channel, send_ssrc);
return false;
}
if (rtp->SetLocalSSRC(channel, send_ssrc) == -1) {
LOG_RTCERR2(SetSendSSRC, channel, send_ssrc);
return false;
}
if (mux_channels_.empty() && GetPlayout()) {
LOG(INFO) << "Disabling playback on the default voice channel";
SetPlayout(audio_channel(), false);
}
mux_channels_[ssrc] = channel;
LOG(INFO) << "New audio stream " << ssrc << " registered to WebRTC channel #"
<< channel << ".";
return SetPlayout(channel, playout_);
}
bool RtcVoiceMediaChannel::RemoveStream(uint32 ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
ChannelMap::iterator it = mux_channels_.find(ssrc);
if (it != mux_channels_.end()) {
if (engine()->webrtc()->network()->DeRegisterExternalTransport(
it->second) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, it->second);
}
LOG(INFO) << "Removing audio stream " << ssrc << " with WebRTC channel #"
<< it->second << ".";
if (engine()->webrtc()->base()->DeleteChannel(it->second) == -1) {
LOG_RTCERR1(DeleteChannel, audio_channel());
return false;
}
mux_channels_.erase(it);
if (mux_channels_.empty() && GetPlayout()) {
// The last stream was removed. We can now enable the default
// channel for new channels to be played out immediately without
// waiting for AddStream messages.
// TODO(oja): Does the default channel still have it's CN state?
LOG(INFO) << "Enabling playback on the default voice channel";
SetPlayout(audio_channel(), true);
}
}
return true;
}
bool RtcVoiceMediaChannel::GetActiveStreams(cricket::AudioInfo::StreamList* actives) {
actives->clear();
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
int level = GetOutputLevel(it->second);
if (level > 0) {
actives->push_back(std::make_pair(it->first, level));
}
}
return true;
}
int RtcVoiceMediaChannel::GetOutputLevel() {
// return the highest output level of all streams
int highest = GetOutputLevel(audio_channel());
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
int level = GetOutputLevel(it->second);
highest = talk_base::_max(level, highest);
}
return highest;
}
bool RtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
return true;
}
bool RtcVoiceMediaChannel::PlayRingbackTone(uint32 ssrc, bool play, bool loop) {
return true;
}
bool RtcVoiceMediaChannel::PlayRingbackTone(bool play, bool loop) {
return true;
}
bool RtcVoiceMediaChannel::PressDTMF(int event, bool playout) {
return true;
}
void RtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
// Pick which channel to send this packet to. If this packet doesn't match
// any multiplexed streams, just send it to the default channel. Otherwise,
// send it to the specific decoder instance for that stream.
int which_channel = GetChannel(
ParseSsrc(packet->data(), packet->length(), false));
if (which_channel == -1) {
which_channel = audio_channel();
}
engine()->webrtc()->network()->ReceivedRTPPacket(which_channel,
packet->data(),
packet->length());
}
void RtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
// See above.
int which_channel = GetChannel(
ParseSsrc(packet->data(), packet->length(), true));
if (which_channel == -1) {
which_channel = audio_channel();
}
engine()->webrtc()->network()->ReceivedRTCPPacket(which_channel,
packet->data(),
packet->length());
}
void RtcVoiceMediaChannel::SetSendSsrc(uint32 ssrc) {
if (engine()->webrtc()->rtp()->SetLocalSSRC(audio_channel(), ssrc)
== -1) {
LOG_RTCERR2(SetSendSSRC, audio_channel(), ssrc);
}
}
bool RtcVoiceMediaChannel::SetRtcpCName(const std::string& cname) {
if (engine()->webrtc()->rtp()->SetRTCP_CNAME(audio_channel(),
cname.c_str()) == -1) {
LOG_RTCERR2(SetRTCP_CNAME, audio_channel(), cname);
return false;
}
return true;
}
bool RtcVoiceMediaChannel::Mute(bool muted) {
if (engine()->webrtc()->volume()->SetInputMute(audio_channel(),
muted) == -1) {
LOG_RTCERR2(SetInputMute, audio_channel(), muted);
return false;
}
return true;
}
bool RtcVoiceMediaChannel::GetStats(cricket::VoiceMediaInfo* info) {
CallStatistics cs;
unsigned int ssrc;
CodecInst codec;
unsigned int level;
// Fill in the sender info, based on what we know, and what the
// remote side told us it got from its RTCP report.
cricket::VoiceSenderInfo sinfo;
memset(&sinfo, 0, sizeof(sinfo));
// Data we obtain locally.
memset(&cs, 0, sizeof(cs));
if (engine()->webrtc()->rtp()->GetRTCPStatistics(
audio_channel(), cs) == -1 ||
engine()->webrtc()->rtp()->GetLocalSSRC(audio_channel(), ssrc) == -1)
{
return false;
}
sinfo.ssrc = ssrc;
sinfo.bytes_sent = cs.bytesSent;
sinfo.packets_sent = cs.packetsSent;
// RTT isn't known until a RTCP report is received. Until then, WebRTC
// returns 0 to indicate an error value.
sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
// Data from the last remote RTCP report.
unsigned int ntp_high, ntp_low, timestamp, ptimestamp, jitter;
unsigned short loss; // NOLINT
if (engine()->webrtc()->rtp()->GetRemoteRTCPData(audio_channel(),
ntp_high, ntp_low, timestamp, ptimestamp, &jitter, &loss) != -1 &&
engine()->webrtc()->codec()->GetSendCodec(audio_channel(),
codec) != -1) {
// Convert Q8 to floating point.
sinfo.fraction_lost = static_cast<float>(loss) / (1 << 8);
// Convert samples to milliseconds.
if (codec.plfreq / 1000 > 0) {
sinfo.jitter_ms = jitter / (codec.plfreq / 1000);
}
} else {
sinfo.fraction_lost = -1;
sinfo.jitter_ms = -1;
}
sinfo.packets_lost = -1;
sinfo.ext_seqnum = -1;
// Local speech level.
sinfo.audio_level = (engine()->webrtc()->volume()->
GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
info->senders.push_back(sinfo);
// Build the list of receivers, one for each mux channel, or 1 in a 1:1 call.
std::vector<int> channels;
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
channels.push_back(it->second);
}
if (channels.empty()) {
channels.push_back(audio_channel());
}
// Get the SSRC and stats for each receiver, based on our own calculations.
for (std::vector<int>::const_iterator it = channels.begin();
it != channels.end(); ++it) {
memset(&cs, 0, sizeof(cs));
if (engine()->webrtc()->rtp()->GetRemoteSSRC(*it, ssrc) != -1 &&
engine()->webrtc()->rtp()->GetRTCPStatistics(*it, cs) != -1 &&
engine()->webrtc()->codec()->GetRecCodec(*it, codec) != -1) {
cricket::VoiceReceiverInfo rinfo;
memset(&rinfo, 0, sizeof(rinfo));
rinfo.ssrc = ssrc;
rinfo.bytes_rcvd = cs.bytesReceived;
rinfo.packets_rcvd = cs.packetsReceived;
// The next four fields are from the most recently sent RTCP report.
// Convert Q8 to floating point.
rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
rinfo.packets_lost = cs.cumulativeLost;
rinfo.ext_seqnum = cs.extendedMax;
// Convert samples to milliseconds.
if (codec.plfreq / 1000 > 0) {
rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
}
// Get speech level.
rinfo.audio_level = (engine()->webrtc()->volume()->
GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
info->receivers.push_back(rinfo);
}
}
return true;
}
void RtcVoiceMediaChannel::GetLastMediaError(
uint32* ssrc, VoiceMediaChannel::Error* error) {
ASSERT(ssrc != NULL);
ASSERT(error != NULL);
FindSsrc(audio_channel(), ssrc);
*error = WebRTCErrorToChannelError(GetLastRtcError());
}
bool RtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
ASSERT(ssrc != NULL);
if (channel_num == audio_channel()) {
unsigned local_ssrc = 0;
// This is a sending channel.
if (engine()->webrtc()->rtp()->GetLocalSSRC(
channel_num, local_ssrc) != -1) {
*ssrc = local_ssrc;
}
return true;
} else {
// Check whether this is a receiving channel.
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
if (it->second == channel_num) {
*ssrc = it->first;
return true;
}
}
}
return false;
}
void RtcVoiceMediaChannel::OnError(uint32 ssrc, int error) {
SignalMediaError(ssrc, WebRTCErrorToChannelError(error));
}
int RtcVoiceMediaChannel::GetChannel(uint32 ssrc) {
ChannelMap::iterator it = mux_channels_.find(ssrc);
return (it != mux_channels_.end()) ? it->second : -1;
}
int RtcVoiceMediaChannel::GetOutputLevel(int channel) {
unsigned int ulevel;
int ret =
engine()->webrtc()->volume()->GetSpeechOutputLevel(channel, ulevel);
return (ret == 0) ? static_cast<int>(ulevel) : -1;
}
bool RtcVoiceMediaChannel::EnableRtcp(int channel) {
if (engine()->webrtc()->rtp()->SetRTCPStatus(channel, true) == -1) {
LOG_RTCERR2(SetRTCPStatus, audio_channel(), 1);
return false;
}
return true;
}
bool RtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
if (playout) {
LOG(INFO) << "Starting playout for channel #" << channel;
if (engine()->webrtc()->base()->StartPlayout(channel) == -1) {
LOG_RTCERR1(StartPlayout, channel);
return false;
}
} else {
LOG(INFO) << "Stopping playout for channel #" << channel;
engine()->webrtc()->base()->StopPlayout(channel);
}
return true;
}
uint32 RtcVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
bool rtcp) {
size_t ssrc_pos = (!rtcp) ? 8 : 4;
uint32 ssrc = 0;
if (len >= (ssrc_pos + sizeof(ssrc))) {
ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
}
return ssrc;
}
// Convert WebRTC error code into VoiceMediaChannel::Error enum.
cricket::VoiceMediaChannel::Error RtcVoiceMediaChannel::WebRTCErrorToChannelError(
int err_code) {
switch (err_code) {
case 0:
return ERROR_NONE;
case VE_CANNOT_START_RECORDING:
case VE_MIC_VOL_ERROR:
case VE_GET_MIC_VOL_ERROR:
case VE_CANNOT_ACCESS_MIC_VOL:
return ERROR_REC_DEVICE_OPEN_FAILED;
case VE_SATURATION_WARNING:
return ERROR_REC_DEVICE_SATURATION;
case VE_REC_DEVICE_REMOVED:
return ERROR_REC_DEVICE_REMOVED;
case VE_RUNTIME_REC_WARNING:
case VE_RUNTIME_REC_ERROR:
return ERROR_REC_RUNTIME_ERROR;
case VE_CANNOT_START_PLAYOUT:
case VE_SPEAKER_VOL_ERROR:
case VE_GET_SPEAKER_VOL_ERROR:
case VE_CANNOT_ACCESS_SPEAKER_VOL:
return ERROR_PLAY_DEVICE_OPEN_FAILED;
case VE_RUNTIME_PLAY_WARNING:
case VE_RUNTIME_PLAY_ERROR:
return ERROR_PLAY_RUNTIME_ERROR;
default:
return VoiceMediaChannel::ERROR_OTHER;
}
}
} // namespace webrtc

View File

@ -1,244 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_AUDIOMEDIAENGINE_H_
#define TALK_APP_WEBRTC_AUDIOMEDIAENGINE_H_
#include <map>
#include <string>
#include <vector>
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/stream.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/rtputils.h"
#include "talk/app/voiceengine.h"
namespace cricket {
class SoundclipMedia;
class VoiceMediaChannel;
}
namespace webrtc {
// MonitorStream is used to monitor a stream coming from WebRTC.
// For now we just dump the data.
class MonitorStream : public OutStream {
virtual bool Write(const void *buf, int len) {
return true;
}
};
class AudioDeviceModule;
class RtcVoiceMediaChannel;
// RtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRTC VoiceEngine library for audio handling.
class RtcVoiceEngine
: public VoiceEngineObserver,
public TraceCallback {
public:
RtcVoiceEngine(); // NOLINT
// Dependency injection for testing.
explicit RtcVoiceEngine(RtcWrapper* rtc_wrapper);
~RtcVoiceEngine();
bool Init();
void Terminate();
int GetCapabilities();
cricket::VoiceMediaChannel* CreateChannel();
cricket::SoundclipMedia* CreateSoundclip() { return NULL; }
bool SetDevices(const cricket::Device* in_device,
const cricket::Device* out_device);
bool SetOptions(int options);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
int GetInputLevel();
bool SetLocalMonitor(bool enable);
const std::vector<cricket::AudioCodec>& codecs();
bool FindCodec(const cricket::AudioCodec& codec);
bool FindRtcCodec(const cricket::AudioCodec& codec, CodecInst* gcodec);
void SetLogging(int min_sev, const char* filter);
// For tracking WebRTC channels. Needed because we have to pause them
// all when switching devices.
// May only be called by RtcVoiceMediaChannel.
void RegisterChannel(RtcVoiceMediaChannel *channel);
void UnregisterChannel(RtcVoiceMediaChannel *channel);
RtcWrapper* webrtc() { return rtc_wrapper_.get(); }
int GetLastRtcError();
private:
typedef std::vector<RtcVoiceMediaChannel *> ChannelList;
struct CodecPref {
const char* name;
int clockrate;
};
void Construct();
bool InitInternal();
void ApplyLogging();
virtual void Print(const TraceLevel level,
const char* traceString, const int length);
virtual void CallbackOnError(const int errCode, const int channel);
static int GetCodecPreference(const char *name, int clockrate);
// Given the device type, name, and id, find WebRTC's device id. Return true and
// set the output parameter rtc_id if successful.
bool FindAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
bool FindChannelAndSsrc(int channel_num,
RtcVoiceMediaChannel** channel,
uint32* ssrc) const;
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const CodecPref kCodecPrefs[];
// The primary instance of WebRTC VoiceEngine.
talk_base::scoped_ptr<RtcWrapper> rtc_wrapper_;
int log_level_;
std::vector<cricket::AudioCodec> codecs_;
talk_base::scoped_ptr<MonitorStream> monitor_;
// TODO: Can't use scoped_ptr here since ~AudioDeviceModule is protected.
AudioDeviceModule* adm_;
ChannelList channels_;
talk_base::CriticalSection channels_cs_;
};
// RtcMediaChannel is a class that implements the common WebRTC channel
// functionality.
template <class T, class E>
class RtcMediaChannel : public T, public Transport {
public:
RtcMediaChannel(E *engine, int channel)
: engine_(engine), audio_channel_(channel), sequence_number_(-1) {}
E *engine() { return engine_; }
int audio_channel() const { return audio_channel_; }
bool valid() const { return audio_channel_ != -1; }
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
const uint8* header = static_cast<const uint8*>(data);
sequence_number_ = talk_base::GetBE16(header + 2);
talk_base::Buffer packet(data, len, cricket::kMaxRtpPacketLen);
return T::network_interface_->SendPacket(&packet) ? len : -1;
}
virtual int SendRTCPPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, cricket::kMaxRtpPacketLen);
return T::network_interface_->SendRtcp(&packet) ? len : -1;
}
int sequence_number() {
return sequence_number_;
}
private:
E *engine_;
int audio_channel_;
int sequence_number_;
};
// RtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
// WebRTC Voice Engine.
class RtcVoiceMediaChannel
: public RtcMediaChannel<cricket::VoiceMediaChannel, RtcVoiceEngine> {
public:
explicit RtcVoiceMediaChannel(RtcVoiceEngine *engine);
virtual ~RtcVoiceMediaChannel();
virtual bool SetOptions(int options);
virtual bool SetRecvCodecs(const std::vector<cricket::AudioCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<cricket::AudioCodec> &codecs);
virtual bool SetPlayout(bool playout);
bool GetPlayout();
virtual bool SetSend(cricket::SendFlags send);
cricket::SendFlags GetSend();
virtual bool AddStream(uint32 ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool GetActiveStreams(cricket::AudioInfo::StreamList* actives);
virtual int GetOutputLevel();
virtual bool SetRingbackTone(const char *buf, int len);
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
virtual bool PlayRingbackTone(bool play, bool loop);
virtual bool PressDTMF(int event, bool playout);
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool mute);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<cricket::RtpHeaderExtension>& extensions) { return false; }
virtual bool SetSendRtpHeaderExtensions(
const std::vector<cricket::RtpHeaderExtension>& extensions) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
virtual bool GetStats(cricket::VoiceMediaInfo* info);
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error);
bool FindSsrc(int channel_num, uint32* ssrc);
void OnError(uint32 ssrc, int error);
virtual int GetMediaChannelId() { return audio_channel(); }
protected:
int GetLastRtcError() { return engine()->GetLastRtcError(); }
int GetChannel(uint32 ssrc);
int GetOutputLevel(int channel);
bool EnableRtcp(int channel);
bool SetPlayout(int channel, bool playout);
static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
static Error WebRTCErrorToChannelError(int err_code);
private:
typedef std::map<uint32, int> ChannelMap;
int channel_options_;
bool playout_;
cricket::SendFlags send_;
ChannelMap mux_channels_; // for multiple sources
// mux_channels_ can be read from WebRTC callback thread. Accesses off the
// WebRTC thread must be synchronized with edits on the worker thread. Reads
// on the worker thread are ok.
mutable talk_base::CriticalSection mux_channels_cs_;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_AUDIOMEDIAENGINE_H_

View File

@ -24,54 +24,48 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <string>
#include "talk/base/gunit.h"
#include "talk/app/webrtc_json.h"
#include "talk/app/webrtc/local_stream_dev.h"
namespace webrtc {
Json::Value JsonValueFromString(const std::string &json) {
Json::Reader reader;
Json::Value value;
EXPECT_TRUE(reader.parse(json, value, false));
return value;
scoped_refptr<LocalStream> LocalStream::Create(const std::string& label) {
// To instantiate LocalStream use
RefCountImpl<LocalStreamImpl>* stream = new RefCountImpl<LocalStreamImpl>(label);
return stream;
}
class WebRTCJsonTest : public testing::Test {
public:
WebRTCJsonTest() {}
~WebRTCJsonTest() {}
};
TEST_F(WebRTCJsonTest, TestParseConfig) {
Json::Value value(JsonValueFromString(
"\{"
" \"connectionmediator\": \"https://somewhere.example.com/conneg\","
" \"stun_service\": { "
" \"host\" : \"stun.service.example.com\","
" \"service\" : \"stun\","
" \"protocol\" : \"udp\""
" }"
" }"));
std::string c;
EXPECT_TRUE(GetConnectionMediator(value, c));
std::cout << " --- connectionmediator --- : " << c << std::endl;
StunServiceDetails stun;
EXPECT_TRUE(GetStunServer(value, stun));
std::cout << " --- stun host --- : " << stun.host << std::endl;
std::cout << " --- stun service --- : " << stun.service << std::endl;
std::cout << " --- stun protocol --- : " << stun.protocol << std::endl;
LocalStreamImpl::LocalStreamImpl(const std::string& label)
: label_(label),
ready_state_(kInitializing) {
}
TEST_F(WebRTCJsonTest, TestLocalBlob) {
EXPECT_TRUE(FromSessionDescriptionToJson());
// Implement MediaStream
const std::string& LocalStreamImpl::label() {
return label_;
}
scoped_refptr<MediaStreamTrackList> LocalStreamImpl::tracks() {
return this;
}
MediaStream::ReadyState LocalStreamImpl::readyState() {
return ready_state_;
}
// Implement MediaStreamTrackList.
size_t LocalStreamImpl::count() {
return tracks_.size();
}
scoped_refptr<MediaStreamTrack> LocalStreamImpl::at(size_t index) {
return tracks_[index];
}
bool LocalStreamImpl::AddTrack(MediaStreamTrack* track) {
if(ready_state_ != kInitializing)
return false;
tracks_.push_back(track);
}
} // namespace webrtc

View File

@ -0,0 +1,75 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_LOCAL_STREAM_H_
#define TALK_APP_WEBRTC_LOCAL_STREAM_H_
#include <vector>
#include "talk/app/webrtc/notifier_impl.h"
#include "talk/app/webrtc/ref_count.h"
#include "talk/app/webrtc/stream_dev.h"
#include "talk/app/webrtc/scoped_refptr.h"
namespace webrtc {
/////////////////////////////////////////////
// Local streams are Created by the PeerConnections client and provided to a
// PeerConnection object using the call PeerConnection::AddStream.
class LocalStreamImpl
: public LocalStream,
public MediaStreamTrackList {
public:
// static LocalStream* Create(const std::string& label);
// Implement LocalStream.
virtual bool AddTrack(MediaStreamTrack* track);
// Implement MediaStream
virtual const std::string& label();
virtual scoped_refptr<MediaStreamTrackList> tracks();
virtual ReadyState readyState();
// Implement MediaStreamTrackList.
virtual size_t count();
virtual scoped_refptr<MediaStreamTrack> at(size_t index);
protected:
LocalStreamImpl(const std::string& label);
std::string label_;
ReadyState ready_state_;
std::vector<scoped_refptr<MediaStreamTrack> > tracks_;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_LOCAL_STREAM_H_

View File

@ -0,0 +1,36 @@
#ifndef TALK_APP_WEBRTC_NOTIFIER_IMPL_H_
#define TALK_APP_WEBRTC_NOTIFIER_IMPL_H_
// Implement a template version of a notifier.
// TODO - allow multiple observers.
//#include <list>
#include "talk/app/webrtc/stream_dev.h"
namespace webrtc {
template <class T>
class NotifierImpl : public T{
public:
NotifierImpl(){
}
virtual void RegisterObserver(Observer* observer) {
observer_ = observer;
}
virtual void UnregisterObserver(Observer*) {
observer_ = NULL;
}
void FireOnChanged() {
if(observer_)
observer_->OnChanged();
}
protected:
Observer* observer_;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_REF_COUNT_H_

View File

@ -0,0 +1,47 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/peerconnection_impl.h"
namespace webrtc {
PeerConnection* PeerConnection::Create(const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
talk_base::Thread* worker_thread,
cricket::DeviceManager* device_manager) {
return new PeerConnectionImpl(config, port_allocator, media_engine,
worker_thread, device_manager);
}
PeerConnection* PeerConnection::Create(const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread) {
return new PeerConnectionImpl(config, port_allocator, worker_thread);
}
} // namespace webrtc

View File

@ -0,0 +1,142 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_H_
// TODO(mallinath) - Add a factory class or some kind of PeerConnection manager
// to support multiple PeerConnection object instantiation. This class will
// create ChannelManager object and pass it to PeerConnection object. Otherwise
// each PeerConnection object will have its own ChannelManager hence MediaEngine
// and VoiceEngine/VideoEngine.
#include <string>
namespace cricket {
class DeviceManager;
class MediaEngine;
class PortAllocator;
class VideoRenderer;
}
namespace talk_base {
class Thread;
}
namespace webrtc {
class PeerConnectionObserver {
public:
virtual void OnInitialized() = 0;
virtual void OnError() = 0;
// serialized signaling message
virtual void OnSignalingMessage(const std::string& msg) = 0;
// Triggered when a local stream has been added and initialized
virtual void OnLocalStreamInitialized(const std::string& stream_id,
bool video) = 0;
// Triggered when a remote peer accepts a media connection.
virtual void OnAddStream(const std::string& stream_id, bool video) = 0;
// Triggered when a remote peer closes a media stream.
virtual void OnRemoveStream(const std::string& stream_id, bool video) = 0;
protected:
// Dtor protected as objects shouldn't be deleted via this interface.
~PeerConnectionObserver() {}
};
class PeerConnection {
public:
virtual ~PeerConnection() {}
static PeerConnection* Create(const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
talk_base::Thread* worker_thread,
cricket::DeviceManager* device_manager);
static PeerConnection* Create(const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread);
// Initialize
virtual bool Init() = 0;
// Register a listener
virtual void RegisterObserver(PeerConnectionObserver* observer) = 0;
// SignalingMessage in json format
virtual bool SignalingMessage(const std::string& msg) = 0;
// Asynchronously adds a local stream device to the peer
// connection. The operation is complete when
// PeerConnectionObserver::OnLocalStreamInitialized is called.
virtual bool AddStream(const std::string& stream_id, bool video) = 0;
// Asynchronously removes a local stream device from the peer
// connection. The operation is complete when
// PeerConnectionObserver::OnRemoveStream is called.
virtual bool RemoveStream(const std::string& stream_id) = 0;
// Info the peerconnection that it is time to return the signaling
// information. The operation is complete when
// PeerConnectionObserver::OnSignalingMessage is called.
virtual bool Connect() = 0;
// Remove all the streams and tear down the session.
// After the Close() is called, the OnSignalingMessage will be invoked
// asynchronously. And before OnSignalingMessage is called,
// OnRemoveStream will be called for each stream that was active.
// TODO(ronghuawu): Add an event such as onclose, or onreadystatechanged
// when the readystate reaches the closed state (no more streams in the
// peerconnection object.
virtual void Close() = 0;
// Set the audio input & output devices based on the given device name.
// An empty device name means to use the default audio device.
virtual bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) = 0;
// Set the video renderer for the camera preview.
virtual bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer) = 0;
// Set the video renderer for the specified stream.
virtual bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) = 0;
// Set video capture device
// For Chromium the cam_device should use the capture session id.
// For standalone app, cam_device is the camera name. It will try to
// set the default capture device when cam_device is "".
virtual bool SetVideoCapture(const std::string& cam_device) = 0;
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_PEERCONNECTION_H_

View File

@ -25,15 +25,20 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtcsession.h"
#include "talk/app/webrtc/peerconnection_impl.h"
namespace webrtc {
PeerConnection* PeerConnection::Create(
const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread,
cricket::DeviceManager* device_manager) {
return new PeerConnectionImpl(config, port_allocator,
worker_thread, device_manager);
}
const std::string WebRTCSession::kOutgoingDirection = "s";
const std::string WebRTCSession::kIncomingDirection = "r";
//const std::string WebRTCSession::kAudioType = "a";
//const std::string WebRTCSession::kVideoType = "v";
//const std::string WebRTCSession::kTestType = "t";
} /* namespace webrtc */
PeerConnection* PeerConnection::Create(const std::string& config) {
return new PeerConnectionImpl(config);
}
} // namespace webrtc

View File

@ -0,0 +1,112 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_H_
#include <string>
#include "talk/app/webrtc/stream_dev.h"
namespace cricket {
class PortAllocator;
class DeviceManager;
}
namespace talk_base {
class Thread;
}
namespace webrtc {
/////////////////////////////////////////////
class PeerConnectionObserver {
public:
enum Readiness {
kNegotiating,
kActive,
};
virtual void OnError() = 0;
virtual void OnMessage(const std::string& msg) = 0;
// serialized signaling message
// First message will be the initial offer.
virtual void OnSignalingMessage(const std::string& msg) = 0;
virtual void OnStateChange(Readiness state) = 0;
// Triggered when media is received on a new stream from remote peer.
// The label is unique for a certain peer_id.
virtual void OnAddStream(scoped_refptr<RemoteStream> stream) = 0;
// Triggered when a remote peer close a stream.
virtual void OnRemoveStream(scoped_refptr<RemoteStream> stream) = 0;
protected:
// Dtor protected as objects shouldn't be deleted via this interface.
~PeerConnectionObserver() {}
};
class StreamCollection : public RefCount {
public:
virtual size_t count() = 0;
virtual MediaStream* at(size_t index) = 0;
};
class PeerConnection {
public:
// Start Negotiation. Negotiation is based on if
// SignalingMessage and AddStream have been called prior to this function.
virtual bool StartNegotiation() = 0;
// SignalingMessage in json format
virtual bool SignalingMessage(const std::string& msg) = 0;
// Sends the msg over a data stream.
virtual bool Send(const std::string& msg) = 0;
// Accessor methods to active local streams.
virtual scoped_refptr<StreamCollection> local_streams() = 0;
// Accessor methods to remote streams.
virtual scoped_refptr<StreamCollection> remote_streams() = 0;
// Add a new local stream.
virtual void AddStream(LocalStream* stream) = 0;
// Remove a local stream and stop sending it.
virtual void RemoveStream(LocalStream* stream) = 0;
virtual ~PeerConnection(){};
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_PEERCONNECTION_H_

View File

@ -0,0 +1,615 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include "talk/app/webrtc/peerconnection_impl.h"
#include "talk/base/basicpacketsocketfactory.h"
#include "talk/base/helpers.h"
#include "talk/base/stringencode.h"
#include "talk/base/logging.h"
#include "talk/p2p/client/basicportallocator.h"
#include "talk/app/webrtc/webrtcsession.h"
#include "talk/app/webrtc/webrtc_json.h"
using talk_base::ThreadManager;
namespace webrtc {
// The number of the tokens in the config string.
static const size_t kConfigTokens = 2;
// The default stun port.
static const int kDefaultStunPort = 3478;
enum {
MSG_WEBRTC_ADDSTREAM = 1,
MSG_WEBRTC_REMOVESTREAM,
MSG_WEBRTC_SIGNALINGMESSAGE,
MSG_WEBRTC_SETAUDIODEVICE,
MSG_WEBRTC_SETLOCALRENDERER,
MSG_WEBRTC_SETVIDEORENDERER,
MSG_WEBRTC_SETVIDEOCAPTURE,
MSG_WEBRTC_CONNECT,
MSG_WEBRTC_CLOSE,
MSG_WEBRTC_INIT_CHANNELMANAGER,
MSG_WEBRTC_RELEASE,
};
struct AddStreamParams : public talk_base::MessageData {
AddStreamParams(const std::string& stream_id, bool video)
: stream_id(stream_id), video(video) {}
std::string stream_id;
bool video;
bool result;
};
struct RemoveStreamParams : public talk_base::MessageData {
explicit RemoveStreamParams(const std::string& stream_id)
: stream_id(stream_id) {}
std::string stream_id;
bool result;
};
struct SignalingMsgParams : public talk_base::MessageData {
explicit SignalingMsgParams(const std::string& signaling_message)
: signaling_message(signaling_message) {}
std::string signaling_message;
bool result;
};
struct SetAudioDeviceParams : public talk_base::MessageData {
SetAudioDeviceParams(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts)
: wave_in_device(wave_in_device), wave_out_device(wave_out_device),
opts(opts) {}
std::string wave_in_device;
std::string wave_out_device;
int opts;
bool result;
};
struct SetLocalRendererParams : public talk_base::MessageData {
explicit SetLocalRendererParams(cricket::VideoRenderer* renderer)
: renderer(renderer) {}
cricket::VideoRenderer* renderer;
bool result;
};
struct SetVideoRendererParams : public talk_base::MessageData {
SetVideoRendererParams(const std::string& stream_id,
cricket::VideoRenderer* renderer)
: stream_id(stream_id), renderer(renderer) {}
std::string stream_id;
cricket::VideoRenderer* renderer;
bool result;
};
struct SetVideoCaptureParams : public talk_base::MessageData {
explicit SetVideoCaptureParams(const std::string& cam_device)
: cam_device(cam_device) {}
std::string cam_device;
bool result;
};
struct ConnectParams : public talk_base::MessageData {
bool result;
};
PeerConnectionImpl::PeerConnectionImpl(const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
talk_base::Thread* worker_thread,
cricket::DeviceManager* device_manager)
: config_(config),
port_allocator_(port_allocator),
media_engine_(media_engine),
worker_thread_(worker_thread),
device_manager_(device_manager),
signaling_thread_(NULL),
initialized_(false),
service_type_(SERVICE_COUNT),
event_callback_(NULL),
session_(NULL),
incoming_(false) {
}
PeerConnectionImpl::PeerConnectionImpl(const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread)
: config_(config),
port_allocator_(port_allocator),
media_engine_(NULL),
worker_thread_(worker_thread),
device_manager_(NULL),
signaling_thread_(NULL),
initialized_(false),
service_type_(SERVICE_COUNT),
event_callback_(NULL),
session_(NULL),
incoming_(false) {
}
PeerConnectionImpl::~PeerConnectionImpl() {
signaling_thread_->Send(this, MSG_WEBRTC_RELEASE);
}
void PeerConnectionImpl::Release_s() {
session_.reset();
channel_manager_.reset();
}
bool PeerConnectionImpl::Init() {
ASSERT(!initialized_);
std::vector<talk_base::SocketAddress> stun_hosts;
talk_base::SocketAddress stun_addr;
if (!ParseConfigString(config_, &stun_addr))
return false;
stun_hosts.push_back(stun_addr);
signaling_thread_.reset(new talk_base::Thread());
ASSERT(signaling_thread_.get());
if (!signaling_thread_->SetName("signaling thread", this) ||
!signaling_thread_->Start()) {
LOG(WARNING) << "Failed to start libjingle signaling thread";
return false;
}
signaling_thread_->Post(this, MSG_WEBRTC_INIT_CHANNELMANAGER);
return true;
}
bool PeerConnectionImpl::ParseConfigString(
const std::string& config, talk_base::SocketAddress* stun_addr) {
std::vector<std::string> tokens;
talk_base::tokenize(config_, ' ', &tokens);
if (tokens.size() != kConfigTokens) {
LOG(WARNING) << "Invalid config string";
return false;
}
service_type_ = SERVICE_COUNT;
// NOTE: Must be in the same order as the enum.
static const char* kValidServiceTypes[SERVICE_COUNT] = {
"STUN", "STUNS", "TURN", "TURNS"
};
const std::string& type = tokens[0];
for (size_t i = 0; i < SERVICE_COUNT; ++i) {
if (type.compare(kValidServiceTypes[i]) == 0) {
service_type_ = static_cast<ServiceType>(i);
break;
}
}
if (service_type_ == SERVICE_COUNT) {
LOG(WARNING) << "Invalid service type: " << type;
return false;
}
std::string service_address = tokens[1];
int port;
tokens.clear();
talk_base::tokenize(service_address, ':', &tokens);
if (tokens.size() != kConfigTokens) {
port = kDefaultStunPort;
} else {
port = atoi(tokens[1].c_str());
if (port <= 0 || port > 0xffff) {
LOG(WARNING) << "Invalid port: " << tokens[1];
return false;
}
}
stun_addr->SetIP(service_address);
stun_addr->SetPort(port);
return true;
}
void PeerConnectionImpl::RegisterObserver(PeerConnectionObserver* observer) {
// This assert is to catch cases where two observer pointers are registered.
// We only support one and if another is to be used, the current one must be
// cleared first.
ASSERT(observer == NULL || event_callback_ == NULL);
event_callback_ = observer;
}
bool PeerConnectionImpl::SignalingMessage(
const std::string& signaling_message) {
SignalingMsgParams* msg = new SignalingMsgParams(signaling_message);
signaling_thread_->Post(this, MSG_WEBRTC_SIGNALINGMESSAGE, msg);
return true;
}
bool PeerConnectionImpl::SignalingMessage_s(const std::string& msg) {
// Deserialize signaling message
cricket::SessionDescription* incoming_sdp = NULL;
std::vector<cricket::Candidate> candidates;
if (!ParseJSONSignalingMessage(msg, incoming_sdp, &candidates)) {
if (event_callback_)
event_callback_->OnError();
return false;
}
bool ret = false;
if (!session_.get()) {
// this will be incoming call
std::string sid;
talk_base::CreateRandomString(8, &sid);
std::string direction("r");
session_.reset(CreateMediaSession(sid, direction));
ASSERT(session_.get() != NULL);
incoming_ = true;
ret = session_->OnInitiateMessage(incoming_sdp, candidates);
} else {
ret = session_->OnRemoteDescription(incoming_sdp, candidates);
}
if (!ret && event_callback_)
event_callback_->OnError();
return ret;
}
WebRTCSession* PeerConnectionImpl::CreateMediaSession(
const std::string& id, const std::string& dir) {
ASSERT(port_allocator_ != NULL);
WebRTCSession* session = new WebRTCSession(id, dir,
port_allocator_, channel_manager_.get(),
signaling_thread_.get());
if (session->Initiate()) {
session->SignalRemoveStream.connect(
this,
&PeerConnectionImpl::SendRemoveSignal);
session->SignalAddStream.connect(
this,
&PeerConnectionImpl::OnAddStream);
session->SignalRemoveStream2.connect(
this,
&PeerConnectionImpl::OnRemoveStream2);
session->SignalRtcMediaChannelCreated.connect(
this,
&PeerConnectionImpl::OnRtcMediaChannelCreated);
session->SignalLocalDescription.connect(
this,
&PeerConnectionImpl::OnLocalDescription);
session->SignalFailedCall.connect(
this,
&PeerConnectionImpl::OnFailedCall);
} else {
delete session;
session = NULL;
}
return session;
}
void PeerConnectionImpl::SendRemoveSignal(WebRTCSession* session) {
if (event_callback_) {
std::string message;
if (GetJSONSignalingMessage(session->remote_description(),
session->local_candidates(), &message)) {
event_callback_->OnSignalingMessage(message);
// TODO(ronghuawu): Notify the client when the PeerConnection object
// doesn't have any streams. Something like the onreadystatechanged
// + setting readyState to 'CLOSED'.
}
}
}
bool PeerConnectionImpl::AddStream(const std::string& stream_id, bool video) {
AddStreamParams* params = new AddStreamParams(stream_id, video);
signaling_thread_->Post(this, MSG_WEBRTC_ADDSTREAM, params, true);
return true;
}
bool PeerConnectionImpl::AddStream_s(const std::string& stream_id, bool video) {
if (!session_.get()) {
// if session doesn't exist then this should be an outgoing call
std::string sid;
talk_base::CreateRandomString(8, &sid);
session_.reset(CreateMediaSession(sid, "s"));
if (session_.get() == NULL) {
ASSERT(false && "failed to initialize a session");
return false;
}
}
bool ret = false;
if (session_->HasStream(stream_id)) {
ASSERT(false && "A stream with this name already exists");
} else {
if (!video) {
ret = !session_->HasAudioStream() &&
session_->CreateVoiceChannel(stream_id);
} else {
ret = !session_->HasVideoStream() &&
session_->CreateVideoChannel(stream_id);
}
}
return ret;
}
bool PeerConnectionImpl::RemoveStream(const std::string& stream_id) {
RemoveStreamParams* params = new RemoveStreamParams(stream_id);
signaling_thread_->Post(this, MSG_WEBRTC_REMOVESTREAM, params);
return true;
}
bool PeerConnectionImpl::RemoveStream_s(const std::string& stream_id) {
if (!session_.get()) {
if (event_callback_) {
event_callback_->OnError();
}
return false;
}
return session_->RemoveStream(stream_id);
}
void PeerConnectionImpl::OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
if (!desc) {
LOG(WARNING) << "no local SDP ";
return;
}
std::string message;
if (GetJSONSignalingMessage(desc, candidates, &message)) {
if (event_callback_) {
event_callback_->OnSignalingMessage(message);
}
}
}
void PeerConnectionImpl::OnFailedCall() {
// TODO(mallinath): implement.
}
bool PeerConnectionImpl::SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) {
SetAudioDeviceParams* params = new SetAudioDeviceParams(wave_in_device,
wave_out_device, opts);
signaling_thread_->Post(this, MSG_WEBRTC_SETAUDIODEVICE, params);
return true;
}
bool PeerConnectionImpl::SetAudioDevice_s(const std::string& wave_in_device,
const std::string& wave_out_device,
int opts) {
return channel_manager_->SetAudioOptions(wave_in_device,
wave_out_device,
opts);
}
bool PeerConnectionImpl::SetLocalVideoRenderer(
cricket::VideoRenderer* renderer) {
SetLocalRendererParams* params = new SetLocalRendererParams(renderer);
signaling_thread_->Post(this, MSG_WEBRTC_SETLOCALRENDERER, params);
return true;
}
bool PeerConnectionImpl::SetLocalVideoRenderer_s(
cricket::VideoRenderer* renderer) {
return channel_manager_->SetLocalRenderer(renderer);
}
bool PeerConnectionImpl::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
SetVideoRendererParams* params = new SetVideoRendererParams(stream_id,
renderer);
signaling_thread_->Post(this, MSG_WEBRTC_SETVIDEORENDERER, params);
return true;
}
bool PeerConnectionImpl::SetVideoRenderer_s(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
if (!session_.get()) {
if (event_callback_) {
event_callback_->OnError();
}
return false;
}
return session_->SetVideoRenderer(stream_id, renderer);
}
bool PeerConnectionImpl::SetVideoCapture(const std::string& cam_device) {
SetVideoCaptureParams* params = new SetVideoCaptureParams(cam_device);
signaling_thread_->Post(this, MSG_WEBRTC_SETVIDEOCAPTURE, params);
return true;
}
bool PeerConnectionImpl::SetVideoCapture_s(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device);
}
bool PeerConnectionImpl::Connect() {
signaling_thread_->Post(this, MSG_WEBRTC_CONNECT);
return true;
}
bool PeerConnectionImpl::Connect_s() {
if (!session_.get()) {
if (event_callback_) {
event_callback_->OnError();
}
return false;
}
return session_->Connect();
}
void PeerConnectionImpl::OnAddStream(const std::string& stream_id,
bool video) {
if (event_callback_) {
event_callback_->OnAddStream(stream_id, video);
}
}
void PeerConnectionImpl::OnRemoveStream2(const std::string& stream_id,
bool video) {
if (event_callback_) {
event_callback_->OnRemoveStream(stream_id, video);
}
}
void PeerConnectionImpl::OnRtcMediaChannelCreated(const std::string& stream_id,
bool video) {
if (event_callback_) {
event_callback_->OnLocalStreamInitialized(stream_id, video);
}
}
void PeerConnectionImpl::CreateChannelManager_s() {
// create cricket::ChannelManager object
ASSERT(worker_thread_ != NULL);
if (media_engine_ && device_manager_) {
channel_manager_.reset(new cricket::ChannelManager(
media_engine_, device_manager_, worker_thread_));
} else {
channel_manager_.reset(new cricket::ChannelManager(worker_thread_));
}
initialized_ = channel_manager_->Init();
if (event_callback_) {
if (initialized_)
event_callback_->OnInitialized();
else
event_callback_->OnError();
}
}
void PeerConnectionImpl::Close() {
signaling_thread_->Post(this, MSG_WEBRTC_CLOSE);
}
void PeerConnectionImpl::Close_s() {
if (!session_.get()) {
if (event_callback_)
event_callback_->OnError();
return;
}
session_->RemoveAllStreams();
}
void PeerConnectionImpl::OnMessage(talk_base::Message* message) {
talk_base::MessageData* data = message->pdata;
switch (message->message_id) {
case MSG_WEBRTC_ADDSTREAM: {
AddStreamParams* params = reinterpret_cast<AddStreamParams*>(data);
params->result = AddStream_s(params->stream_id, params->video);
delete params;
break;
}
case MSG_WEBRTC_SIGNALINGMESSAGE: {
SignalingMsgParams* params =
reinterpret_cast<SignalingMsgParams*>(data);
params->result = SignalingMessage_s(params->signaling_message);
if (!params->result && event_callback_)
event_callback_->OnError();
delete params;
break;
}
case MSG_WEBRTC_REMOVESTREAM: {
RemoveStreamParams* params = reinterpret_cast<RemoveStreamParams*>(data);
params->result = RemoveStream_s(params->stream_id);
delete params;
break;
}
case MSG_WEBRTC_SETAUDIODEVICE: {
SetAudioDeviceParams* params =
reinterpret_cast<SetAudioDeviceParams*>(data);
params->result = SetAudioDevice_s(
params->wave_in_device, params->wave_out_device, params->opts);
if (!params->result && event_callback_)
event_callback_->OnError();
delete params;
break;
}
case MSG_WEBRTC_SETLOCALRENDERER: {
SetLocalRendererParams* params =
reinterpret_cast<SetLocalRendererParams*>(data);
params->result = SetLocalVideoRenderer_s(params->renderer);
if (!params->result && event_callback_)
event_callback_->OnError();
delete params;
break;
}
case MSG_WEBRTC_SETVIDEOCAPTURE: {
SetVideoCaptureParams* params =
reinterpret_cast<SetVideoCaptureParams*>(data);
params->result = SetVideoCapture_s(params->cam_device);
if (!params->result && event_callback_)
event_callback_->OnError();
delete params;
break;
}
case MSG_WEBRTC_SETVIDEORENDERER: {
SetVideoRendererParams* params =
reinterpret_cast<SetVideoRendererParams*>(data);
params->result = SetVideoRenderer_s(params->stream_id, params->renderer);
if (!params->result && event_callback_)
event_callback_->OnError();
delete params;
break;
}
case MSG_WEBRTC_CONNECT: {
Connect_s();
break;
}
case MSG_WEBRTC_CLOSE: {
Close_s();
break;
}
case MSG_WEBRTC_INIT_CHANNELMANAGER: {
CreateChannelManager_s();
break;
}
case MSG_WEBRTC_RELEASE: {
Release_s();
break;
}
default: {
ASSERT(false);
break;
}
}
}
} // namespace webrtc

View File

@ -0,0 +1,169 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_
#define TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_
#include "talk/app/webrtc/peerconnection.h"
#include <string>
#include "talk/base/sigslot.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/packetsocketfactory.h"
#include "talk/base/thread.h"
#include "talk/session/phone/channelmanager.h"
namespace Json {
class Value;
}
namespace cricket {
class BasicPortAllocator;
class ChannelManager;
class DeviceManager;
class SessionDescription;
}
namespace webrtc {
class AudioDeviceModule;
class ExternalRenderer;
class WebRTCSession;
class PeerConnectionImpl : public PeerConnection,
public talk_base::MessageHandler,
public sigslot::has_slots<> {
public:
PeerConnectionImpl(const std::string& config,
cricket::PortAllocator* port_allocator,
cricket::MediaEngine* media_engine,
talk_base::Thread* worker_thread,
cricket::DeviceManager* device_manager);
PeerConnectionImpl(const std::string& config,
cricket::PortAllocator* port_allocator,
talk_base::Thread* worker_thread);
virtual ~PeerConnectionImpl();
enum ReadyState {
NEW = 0,
NEGOTIATING,
ACTIVE,
CLOSED,
};
// PeerConnection interfaces
bool Init();
void RegisterObserver(PeerConnectionObserver* observer);
bool SignalingMessage(const std::string& msg);
bool AddStream(const std::string& stream_id, bool video);
bool RemoveStream(const std::string& stream_id);
bool Connect();
void Close();
bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoCapture(const std::string& cam_device);
// Access to the members
const std::string& config() const { return config_; }
bool incoming() const { return incoming_; }
cricket::ChannelManager* channel_manager() {
return channel_manager_.get();
}
ReadyState ready_state() const { return ready_state_; }
// Callbacks from PeerConnectionImplCallbacks
void OnAddStream(const std::string& stream_id, bool video);
void OnRemoveStream2(const std::string& stream_id, bool video);
void OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates);
void OnFailedCall();
void OnRtcMediaChannelCreated(const std::string& stream_id,
bool video);
private:
bool ParseConfigString(const std::string& config,
talk_base::SocketAddress* stun_addr);
void WrapChromiumThread();
void SendRemoveSignal(WebRTCSession* session);
WebRTCSession* CreateMediaSession(const std::string& id,
const std::string& dir);
virtual void OnMessage(talk_base::Message* message);
// signaling thread methods
bool AddStream_s(const std::string& stream_id, bool video);
bool SignalingMessage_s(const std::string& signaling_message);
bool RemoveStream_s(const std::string& stream_id);
bool Connect_s();
void Close_s();
bool SetAudioDevice_s(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
bool SetLocalVideoRenderer_s(cricket::VideoRenderer* renderer);
bool SetVideoRenderer_s(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoCapture_s(const std::string& cam_device);
void CreateChannelManager_s();
void Release_s();
std::string config_;
talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
cricket::PortAllocator* port_allocator_;
cricket::MediaEngine* media_engine_;
talk_base::Thread* worker_thread_;
cricket::DeviceManager* device_manager_;
talk_base::scoped_ptr<talk_base::Thread> signaling_thread_;
bool initialized_;
ReadyState ready_state_;
// TODO(ronghuawu/tommi): Replace the initialized_ with ready_state_.
// Fire notifications via the observer interface
// when ready_state_ changes (i.e. onReadyStateChanged()).
// NOTE: The order of the enum values must be in sync with the array
// in Init().
enum ServiceType {
STUN,
STUNS,
TURN,
TURNS,
SERVICE_COUNT, // Also means 'invalid'.
};
ServiceType service_type_;
PeerConnectionObserver* event_callback_;
talk_base::scoped_ptr<WebRTCSession> session_;
// TODO(ronghua): There's no such concept as "incoming" and "outgoing"
// according to the spec. This will be removed in the new PeerConnection.
bool incoming_;
};
}
#endif // TALK_APP_WEBRTC_PEERCONNECTION_IMPL_H_

View File

@ -0,0 +1,43 @@
#ifndef TALK_APP_WEBRTC_REF_COUNT_H_
#define TALK_APP_WEBRTC_REF_COUNT_H_
#include <cstring>
// Reference count interface.
class RefCount {
public:
virtual size_t AddRef() = 0;
virtual size_t Release() = 0;
};
template <class T>
class RefCountImpl : public T {
public:
RefCountImpl() : ref_count_(0) {
}
template<typename P>
RefCountImpl(P p) : ref_count_(0), T(p) {
}
template<typename P1, typename P2>
RefCountImpl(P1 p1, P2 p2) : ref_count_(0), T(p1, p2) {
}
virtual size_t AddRef() {
++ref_count_;
return ref_count_;
}
virtual size_t Release() {
size_t ret = --ref_count_;
if(!ref_count_) {
delete this;
}
return ret;
}
protected:
size_t ref_count_;
};
#endif // TALK_APP_WEBRTC_REF_COUNT_H_

View File

@ -0,0 +1,128 @@
#ifndef TALK_APP_WEBRTC_SCOPED_REFPTR_H_
#define TALK_APP_WEBRTC_SCOPED_REFPTR_H_
// Originally these classes are copied from Chromium.
//
// A smart pointer class for reference counted objects. Use this class instead
// of calling AddRef and Release manually on a reference counted object to
// avoid common memory leaks caused by forgetting to Release an object
// reference. Sample usage:
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
// };
//
// void some_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// foo->Method(param);
// // |foo| is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// ...
// foo = NULL; // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
// }
//
// The above examples show how scoped_refptr<T> acts like a pointer to T.
// Given two scoped_refptr<T> classes, it is also possible to exchange
// references between the two objects, like so:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
// // now, |b| references the MyFoo object, and |a| references NULL.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
// object, simply use the assignment operator:
//
// {
// scoped_refptr<MyFoo> a = new MyFoo();
// scoped_refptr<MyFoo> b;
//
// b = a;
// // now, |a| and |b| each own a reference to the same MyFoo object.
// }
//
template <class T>
class scoped_refptr {
public:
scoped_refptr() : ptr_(NULL) {
}
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
ptr_->AddRef();
}
scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
if (ptr_)
ptr_->AddRef();
}
template <typename U>
scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
if (ptr_)
ptr_->AddRef();
}
~scoped_refptr() {
if (ptr_)
ptr_->Release();
}
T* get() const { return ptr_; }
operator T*() const { return ptr_; }
T* operator->() const { return ptr_; }
// Release a pointer.
// The return value is the current pointer held by this object.
// If this object holds a NULL pointer, the return value is NULL.
// After this operation, this object will hold a NULL pointer,
// and will not own the object any more.
T* release() {
T* retVal = ptr_;
ptr_ = NULL;
return retVal;
}
scoped_refptr<T>& operator=(T* p) {
// AddRef first so that self assignment should work
if (p)
p->AddRef();
if (ptr_ )
ptr_ ->Release();
ptr_ = p;
return *this;
}
scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
return *this = r.ptr_;
}
template <typename U>
scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
return *this = r.get();
}
void swap(T** pp) {
T* p = ptr_;
ptr_ = *pp;
*pp = p;
}
void swap(scoped_refptr<T>& r) {
swap(&r.ptr_);
}
protected:
T* ptr_;
};
#endif // TALK_APP_WEBRTC_SCOPED_REFPTR_H_

View File

@ -0,0 +1,213 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_STREAM_H_
#define TALK_APP_WEBRTC_STREAM_H_
#include <string>
#include "talk/app/webrtc/ref_count.h"
#include "talk/app/webrtc/scoped_refptr.h"
namespace cricket {
class VideoRenderer;
class MediaEngine;
} // namespace cricket
namespace webrtc {
class AudioDeviceModule;
class VideoCaptureModule;
const char kVideoTrackKind[] = "video";
const char kAudioTrackKind[] = "audio";
// Generic observer interface.
class Observer {
public:
virtual void OnChanged() = 0;
};
class Notifier {
virtual void RegisterObserver(Observer*) = 0;
virtual void UnregisterObserver(Observer*) = 0;
// This method should only be accessible to the owner
//void FireOnChanged() = 0;
};
// Information about a track.
class MediaStreamTrack : public RefCount,
public Notifier {
public:
virtual const std::string& kind() = 0;
virtual const std::string& label() = 0;
virtual bool enabled() = 0;
// Enable or disables a track.
// For Remote streams - disable means that the video is not decoded,
// or audio not decoded.
// For local streams this means that video is not captured
// or audio is not captured.
virtual bool set_enabled(bool enable);
};
// Reference counted wrapper for an AudioDeviceModule.
class AudioDevice : public RefCount {
public:
static scoped_refptr<AudioDevice> Create(const std::string& name,
AudioDeviceModule* adm);
// Name of this device. Same as label of a MediaStreamTrack.
const std::string& name();
AudioDeviceModule* module();
protected:
AudioDevice(){};
virtual ~AudioDevice() {};
void Initialize(const std::string& name, AudioDeviceModule* adm);
std::string name_;
AudioDeviceModule* adm_;
};
// Reference counted wrapper for a VideoCaptureModule.
class VideoDevice : public RefCount {
public:
static scoped_refptr<VideoDevice> Create(const std::string& name,
VideoCaptureModule* vcm);
// Name of this device. Same as label of a MediaStreamTrack.
const std::string& name();
VideoCaptureModule* module();
protected:
VideoDevice(){};
~VideoDevice() {};
void Initialize(const std::string& name, VideoCaptureModule* vcm);
std::string name_;
VideoCaptureModule* vcm_;
};
// Reference counted wrapper for a VideoRenderer.
class VideoRenderer : public RefCount {
public:
static scoped_refptr<VideoRenderer> Create(cricket::VideoRenderer* renderer);
virtual cricket::VideoRenderer* module();
protected:
VideoRenderer() {};
~VideoRenderer() {};
void Initialize(cricket::VideoRenderer* renderer);
cricket::VideoRenderer* renderer_;
};
class VideoTrack : public MediaStreamTrack {
public:
// Set the video renderer for a local or remote stream.
// This call will start decoding the received video stream and render it.
virtual void SetRenderer(VideoRenderer* renderer) = 0;
// Get the VideoRenderer associated with this track.
virtual scoped_refptr<VideoRenderer> GetRenderer() = 0;
protected:
virtual ~VideoTrack() {};
};
class LocalVideoTrack : public VideoTrack {
public:
static scoped_refptr<LocalVideoTrack> Create(VideoDevice* video_device);
// Get the VideoCapture device associated with this track.
virtual scoped_refptr<VideoDevice> GetVideoCapture() = 0;
protected:
virtual ~LocalVideoTrack() {};
};
class AudioTrack : public MediaStreamTrack {
public:
protected:
virtual ~AudioTrack() {};
};
class LocalAudioTrack : public AudioTrack {
public:
static scoped_refptr<LocalAudioTrack> Create(AudioDevice* audio_device);
// Get the AudioDevice associated with this track.
virtual scoped_refptr<AudioDevice> GetAudioDevice() = 0;
protected:
virtual ~LocalAudioTrack() {};
};
// List of of tracks.
class MediaStreamTrackList : public RefCount {
public:
virtual size_t count() = 0;
virtual scoped_refptr<MediaStreamTrack> at(size_t index) = 0;
protected:
virtual ~MediaStreamTrackList() {};
};
class MediaStream : public RefCount {
public:
virtual const std::string& label() = 0;
virtual scoped_refptr<MediaStreamTrackList> tracks() = 0;
enum ReadyState {
kInitializing,
kLive = 1, // Stream alive
kEnded = 2, // Stream have ended
};
virtual ReadyState readyState() = 0;
protected:
virtual ~MediaStream() {};
};
class LocalStream : public MediaStream {
public:
static scoped_refptr<LocalStream> Create(const std::string& label);
virtual bool AddTrack(MediaStreamTrack* track) = 0;
};
// Remote streams are created by the PeerConnection object and provided to the
// client using PeerConnectionObserver::OnAddStream.
// The client can provide the renderer to the PeerConnection object calling
// VideoTrack::SetRenderer.
class RemoteStream : public MediaStream {
public:
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_STREAM_H_

View File

@ -0,0 +1,56 @@
# -*- Python -*-
import talk
Import('env')
# local sources
talk.Library(
env,
name = 'webrtc',
srcs = [
'peerconnection.cc',
'peerconnection_impl.cc',
'webrtc_json.cc',
'webrtcsession.cc',
],
)
talk.Unittest(
env,
name = 'webrtc',
srcs = [
'webrtcsession_unittest.cc',
'testing/timing.cc'
],
libs = [
'srtp',
'base',
'jpeg',
'json',
'webrtc',
'p2p',
'phone',
'xmpp',
'xmllite',
'yuvscaler'
],
include_talk_media_libs = True,
mac_libs = [
'crypto',
'ssl',
],
mac_FRAMEWORKS = [
'Foundation',
'IOKit',
'QTKit',
],
lin_libs = [
'rt',
'dl',
'sound',
'X11',
'Xext',
'Xfixes',
'Xrandr'
],
)

View File

@ -0,0 +1,447 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtc_json.h"
#include <stdio.h>
#include <string>
#include "talk/base/json.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/codec.h"
namespace webrtc {
static const int kIceComponent = 1;
static const int kIceFoundation = 1;
bool GetConnectionMediator(const Json::Value& value,
std::string* connection_mediator) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
if (!GetStringFromJsonObject(value,
"connectionmediator",
connection_mediator)) {
LOG(LS_WARNING) << "Failed to parse JSON for value: "
<< value.toStyledString();
return false;
}
return true;
}
bool GetStunServer(const Json::Value& value, StunServiceDetails* stunServer) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
Json::Value stun;
if (GetValueFromJsonObject(value, "stun_service", &stun)) {
if (stun.type() == Json::objectValue) {
if (!GetStringFromJsonObject(stun, "host", &stunServer->host) ||
!GetStringFromJsonObject(stun, "service", &stunServer->service) ||
!GetStringFromJsonObject(stun, "protocol", &stunServer->protocol)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
LOG(LS_WARNING) << "Failed to find the stun_service member.";
return false;
}
} else {
LOG(LS_WARNING) << "Wrong ValueType. Expect Json::objectValue).";
return false;
}
return true;
}
bool GetTurnServer(const Json::Value& value, std::string* turn_server) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values";
return false;
}
Json::Value turn;
if (GetValueFromJsonObject(value, "turn_service", &turn)) {
if (turn.type() == Json::objectValue) {
if (!GetStringFromJsonObject(turn, "host", turn_server)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
LOG(LS_WARNING) << "Wrong ValueType. Expect Json::objectValue).";
return false;
}
}
return true;
}
bool GetJSONSignalingMessage(
const cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates,
std::string* signaling_message) {
const cricket::ContentInfo* audio_content = GetFirstAudioContent(sdp);
const cricket::ContentInfo* video_content = GetFirstVideoContent(sdp);
std::vector<Json::Value> media;
if (audio_content) {
Json::Value value;
BuildMediaMessage(*audio_content, candidates, false, &value);
media.push_back(value);
}
if (video_content) {
Json::Value value;
BuildMediaMessage(*video_content, candidates, true, &value);
media.push_back(value);
}
Json::Value signal;
Append(&signal, "media", media);
// now serialize
*signaling_message = Serialize(signal);
return true;
}
bool BuildMediaMessage(
const cricket::ContentInfo& content_info,
const std::vector<cricket::Candidate>& candidates,
bool video,
Json::Value* params) {
if (video) {
Append(params, "label", 2); // always video 2
} else {
Append(params, "label", 1); // always audio 1
}
std::vector<Json::Value> rtpmap;
if (!BuildRtpMapParams(content_info, video, &rtpmap)) {
return false;
}
Append(params, "rtpmap", rtpmap);
Json::Value attributes;
std::vector<Json::Value> jcandidates;
if (!BuildAttributes(candidates, video, &jcandidates)) {
return false;
}
Append(&attributes, "candidate", jcandidates);
Append(params, "attributes", attributes);
return true;
}
bool BuildRtpMapParams(const cricket::ContentInfo& content_info,
bool video,
std::vector<Json::Value>* rtpmap) {
if (!video) {
const cricket::AudioContentDescription* audio_offer =
static_cast<const cricket::AudioContentDescription*>(
content_info.description);
std::vector<cricket::AudioCodec>::const_iterator iter =
audio_offer->codecs().begin();
std::vector<cricket::AudioCodec>::const_iterator iter_end =
audio_offer->codecs().end();
for (; iter != iter_end; ++iter) {
Json::Value codec;
std::string codec_str(std::string("audio/").append(iter->name));
// adding clockrate
Append(&codec, "clockrate", iter->clockrate);
Append(&codec, "codec", codec_str);
Json::Value codec_id;
Append(&codec_id, talk_base::ToString(iter->id), codec);
rtpmap->push_back(codec_id);
}
} else {
const cricket::VideoContentDescription* video_offer =
static_cast<const cricket::VideoContentDescription*>(
content_info.description);
std::vector<cricket::VideoCodec>::const_iterator iter =
video_offer->codecs().begin();
std::vector<cricket::VideoCodec>::const_iterator iter_end =
video_offer->codecs().end();
for (; iter != iter_end; ++iter) {
Json::Value codec;
std::string codec_str(std::string("video/").append(iter->name));
Append(&codec, "codec", codec_str);
Json::Value codec_id;
Append(&codec_id, talk_base::ToString(iter->id), codec);
rtpmap->push_back(codec_id);
}
}
return true;
}
bool BuildAttributes(const std::vector<cricket::Candidate>& candidates,
bool video,
std::vector<Json::Value>* jcandidates) {
std::vector<cricket::Candidate>::const_iterator iter =
candidates.begin();
std::vector<cricket::Candidate>::const_iterator iter_end =
candidates.end();
for (; iter != iter_end; ++iter) {
if ((video && !iter->name().compare("video_rtp")) ||
(!video && !iter->name().compare("rtp"))) {
Json::Value candidate;
Append(&candidate, "component", kIceComponent);
Append(&candidate, "foundation", kIceFoundation);
Append(&candidate, "generation", iter->generation());
Append(&candidate, "proto", iter->protocol());
Append(&candidate, "priority", iter->preference());
Append(&candidate, "ip", iter->address().IPAsString());
Append(&candidate, "port", iter->address().PortAsString());
Append(&candidate, "type", iter->type());
Append(&candidate, "name", iter->name());
Append(&candidate, "network_name", iter->network_name());
Append(&candidate, "username", iter->username());
Append(&candidate, "password", iter->password());
jcandidates->push_back(candidate);
}
}
return true;
}
std::string Serialize(const Json::Value& value) {
Json::StyledWriter writer;
return writer.write(value);
}
bool Deserialize(const std::string& message, Json::Value* value) {
Json::Reader reader;
return reader.parse(message, *value);
}
bool ParseJSONSignalingMessage(const std::string& signaling_message,
cricket::SessionDescription*& sdp,
std::vector<cricket::Candidate>* candidates) {
ASSERT(!sdp); // expect this to be NULL
// first deserialize message
Json::Value value;
if (!Deserialize(signaling_message, &value)) {
return false;
}
// get media objects
std::vector<Json::Value> mlines = ReadValues(value, "media");
if (mlines.empty()) {
// no m-lines found
return false;
}
sdp = new cricket::SessionDescription();
// get codec information
for (size_t i = 0; i < mlines.size(); ++i) {
if (mlines[i]["label"].asInt() == 1) {
cricket::AudioContentDescription* audio_content =
new cricket::AudioContentDescription();
ParseAudioCodec(mlines[i], audio_content);
audio_content->SortCodecs();
sdp->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, audio_content);
ParseICECandidates(mlines[i], candidates);
} else {
cricket::VideoContentDescription* video_content =
new cricket::VideoContentDescription();
ParseVideoCodec(mlines[i], video_content);
video_content->SortCodecs();
sdp->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, video_content);
ParseICECandidates(mlines[i], candidates);
}
}
return true;
}
bool ParseAudioCodec(const Json::Value& value,
cricket::AudioContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
std::vector<Json::Value>::const_iterator iter =
rtpmap.begin();
std::vector<Json::Value>::const_iterator iter_end =
rtpmap.end();
for (; iter != iter_end; ++iter) {
cricket::AudioCodec codec;
std::string pltype(iter->begin().memberName());
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info((*iter)[pltype]);
std::string codec_name(ReadString(codec_info, "codec"));
std::vector<std::string> tokens;
talk_base::split(codec_name, '/', &tokens);
codec.name = tokens[1];
codec.clockrate = ReadUInt(codec_info, "clockrate");
content->AddCodec(codec);
}
return true;
}
bool ParseVideoCodec(const Json::Value& value,
cricket::VideoContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
std::vector<Json::Value>::const_iterator iter =
rtpmap.begin();
std::vector<Json::Value>::const_iterator iter_end =
rtpmap.end();
for (; iter != iter_end; ++iter) {
cricket::VideoCodec codec;
std::string pltype(iter->begin().memberName());
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info((*iter)[pltype]);
std::vector<std::string> tokens;
talk_base::split(codec_info["codec"].asString(), '/', &tokens);
codec.name = tokens[1];
content->AddCodec(codec);
}
return true;
}
bool ParseICECandidates(const Json::Value& value,
std::vector<cricket::Candidate>* candidates) {
Json::Value attributes(ReadValue(value, "attributes"));
std::string ice_pwd(ReadString(attributes, "ice-pwd"));
std::string ice_ufrag(ReadString(attributes, "ice-ufrag"));
std::vector<Json::Value> jcandidates(ReadValues(attributes, "candidate"));
std::vector<Json::Value>::const_iterator iter =
jcandidates.begin();
std::vector<Json::Value>::const_iterator iter_end =
jcandidates.end();
char buffer[16];
for (; iter != iter_end; ++iter) {
cricket::Candidate cand;
std::string str;
str = ReadUInt(*iter, "generation");
cand.set_generation_str(str);
str = ReadString(*iter, "proto");
cand.set_protocol(str);
double priority = ReadDouble(*iter, "priority");
talk_base::sprintfn(buffer, ARRAY_SIZE(buffer), "%f", priority);
cand.set_preference_str(buffer);
talk_base::SocketAddress addr;
str = ReadString(*iter, "ip");
addr.SetIP(str);
str = ReadString(*iter, "port");
int port;
talk_base::FromString(str, &port);
addr.SetPort(port);
cand.set_address(addr);
str = ReadString(*iter, "type");
cand.set_type(str);
str = ReadString(*iter, "name");
cand.set_name(str);
str = ReadString(*iter, "network_name");
cand.set_network_name(str);
str = ReadString(*iter, "username");
cand.set_username(str);
str = ReadString(*iter, "password");
cand.set_password(str);
candidates->push_back(cand);
}
return true;
}
std::vector<Json::Value> ReadValues(
const Json::Value& value, const std::string& key) {
std::vector<Json::Value> objects;
for (size_t i = 0; i < value[key].size(); ++i) {
objects.push_back(value[key][i]);
}
return objects;
}
Json::Value ReadValue(const Json::Value& value, const std::string& key) {
return value[key];
}
std::string ReadString(const Json::Value& value, const std::string& key) {
return value[key].asString();
}
uint32 ReadUInt(const Json::Value& value, const std::string& key) {
return value[key].asUInt();
}
double ReadDouble(const Json::Value& value, const std::string& key) {
return value[key].asDouble();
}
// Add values
void Append(Json::Value* object, const std::string& key, bool value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, char * value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, double value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, float value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, int value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key,
const std::string& value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key, uint32 value) {
(*object)[key] = Json::Value(value);
}
void Append(Json::Value* object, const std::string& key,
const Json::Value& value) {
(*object)[key] = value;
}
void Append(Json::Value* object,
const std::string & key,
const std::vector<Json::Value>& values) {
for (std::vector<Json::Value>::const_iterator iter = values.begin();
iter != values.end(); ++iter) {
(*object)[key].append(*iter);
}
}
} // namespace webrtc

View File

@ -44,6 +44,7 @@ class VideoContentDescription;
struct ContentInfo;
class SessionDescription;
}
struct StunServiceDetails {
std::string host;
std::string service;
@ -53,20 +54,21 @@ struct StunServiceDetails {
namespace webrtc {
bool GetConnectionMediator(const Json::Value& value,
std::string& connectionMediator);
bool GetStunServer(const Json::Value& value, StunServiceDetails& stun);
bool GetTurnServer(const Json::Value& value, std::string& turnServer);
std::string* connection_mediator);
bool GetStunServer(const Json::Value& value, StunServiceDetails* stun);
bool GetTurnServer(const Json::Value& value, std::string* turn_server);
bool FromJsonToAVCodec(const Json::Value& value,
cricket::AudioContentDescription* audio,
cricket::VideoContentDescription* video);
std::vector<Json::Value> ReadValues(Json::Value& value, const std::string& key);
std::vector<Json::Value> ReadValues(const Json::Value& value,
const std::string& key);
bool BuildMediaMessage(
const cricket::ContentInfo* content_info,
const cricket::ContentInfo& content_info,
const std::vector<cricket::Candidate>& candidates,
bool video,
Json::Value& value);
Json::Value* value);
bool GetJSONSignalingMessage(
const cricket::SessionDescription* sdp,
@ -74,43 +76,46 @@ bool GetJSONSignalingMessage(
std::string* signaling_message);
bool BuildRtpMapParams(
const cricket::ContentInfo* audio_offer,
const cricket::ContentInfo& audio_offer,
bool video,
std::vector<Json::Value>& rtpmap);
std::vector<Json::Value>* rtpmap);
bool BuildAttributes(const std::vector<cricket::Candidate>& candidates,
bool video,
std::vector<Json::Value>& jcandidates);
std::vector<Json::Value>* jcandidates);
std::string Serialize(const Json::Value& value);
bool Deserialize(const std::string& message, Json::Value& value);
bool ParseJSONSignalingMessage(const std::string& signaling_message,
cricket::SessionDescription*& sdp,
std::vector<cricket::Candidate>& candidates);
bool ParseAudioCodec(Json::Value value, cricket::AudioContentDescription* content);
bool ParseVideoCodec(Json::Value value, cricket::VideoContentDescription* content);
bool ParseICECandidates(Json::Value& value,
std::vector<cricket::Candidate>& candidates);
Json::Value ReadValue(Json::Value& value, const std::string& key);
std::string ReadString(Json::Value& value, const std::string& key);
double ReadDouble(Json::Value& value, const std::string& key);
uint32 ReadUInt(Json::Value& value, const std::string& key);
std::vector<cricket::Candidate>* candidates);
bool ParseAudioCodec(const Json::Value& value,
cricket::AudioContentDescription* content);
bool ParseVideoCodec(const Json::Value& value,
cricket::VideoContentDescription* content);
bool ParseICECandidates(const Json::Value& value,
std::vector<cricket::Candidate>* candidates);
Json::Value ReadValue(const Json::Value& value, const std::string& key);
std::string ReadString(const Json::Value& value, const std::string& key);
double ReadDouble(const Json::Value& value, const std::string& key);
uint32 ReadUInt(const Json::Value& value, const std::string& key);
// Add values
void Append(Json::Value& object, const std::string& key, bool value);
void Append(Json::Value* object, const std::string& key, bool value);
void Append(Json::Value& object, const std::string& key, char * value);
void Append(Json::Value& object, const std::string& key, double value);
void Append(Json::Value& object, const std::string& key, float value);
void Append(Json::Value& object, const std::string& key, int value);
void Append(Json::Value& object, const std::string& key, std::string value);
void Append(Json::Value& object, const std::string& key, uint32 value);
void Append(Json::Value& object, const std::string& key, Json::Value value);
void Append(Json::Value & object,
const std::string & key,
std::vector<Json::Value>& values);
void Append(Json::Value* object, const std::string& key, char * value);
void Append(Json::Value* object, const std::string& key, double value);
void Append(Json::Value* object, const std::string& key, float value);
void Append(Json::Value* object, const std::string& key, int value);
void Append(Json::Value* object, const std::string& key,
const std::string& value);
void Append(Json::Value* object, const std::string& key, uint32 value);
void Append(Json::Value* object, const std::string& key,
const Json::Value& value);
void Append(Json::Value* object,
const std::string& key,
const std::vector<Json::Value>& values);
}
#endif // TALK_APP_WEBRTC_WEBRTC_JSON_H_
#endif // TALK_APP_WEBRTC_WEBRTC_JSON_H_

View File

@ -0,0 +1,693 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/webrtcsession.h"
#include <string>
#include <vector>
#include "talk/base/common.h"
#include "talk/base/json.h"
#include "talk/base/scoped_ptr.h"
#include "talk/p2p/base/constants.h"
#include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/base/p2ptransport.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/voicechannel.h"
namespace webrtc {
enum {
MSG_CANDIDATE_TIMEOUT = 101,
MSG_WEBRTC_CREATE_TRANSPORT,
MSG_WEBRTC_DELETE_TRANSPORT,
};
static const int kAudioMonitorPollFrequency = 100;
static const int kMonitorPollFrequency = 1000;
// We allow 30 seconds to establish a connection; beyond that we consider
// it an error
static const int kCallSetupTimeout = 30 * 1000;
// A loss of connectivity is probably due to the Internet connection going
// down, and it might take a while to come back on wireless networks, so we
// use a longer timeout for that.
static const int kCallLostTimeout = 60 * 1000;
typedef std::vector<StreamInfo*> StreamMap; // not really a map (vector)
static const char kVideoStream[] = "video_rtp";
static const char kAudioStream[] = "rtp";
const char WebRTCSession::kOutgoingDirection[] = "s";
const char WebRTCSession::kIncomingDirection[] = "r";
WebRTCSession::WebRTCSession(
const std::string& id,
const std::string& direction,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr,
talk_base::Thread* signaling_thread)
: BaseSession(signaling_thread),
transport_(NULL),
channel_manager_(channelmgr),
all_transports_writable_(false),
muted_(false),
camera_muted_(false),
setup_timeout_(kCallSetupTimeout),
signaling_thread_(signaling_thread),
id_(id),
incoming_(direction == kIncomingDirection),
port_allocator_(allocator) {
BaseSession::sid_ = id;
}
WebRTCSession::~WebRTCSession() {
RemoveAllStreams();
if (state_ != STATE_RECEIVEDTERMINATE) {
Terminate();
}
signaling_thread_->Send(this, MSG_WEBRTC_DELETE_TRANSPORT, NULL);
}
bool WebRTCSession::Initiate() {
signaling_thread_->Send(this, MSG_WEBRTC_CREATE_TRANSPORT, NULL);
if (transport_ == NULL) {
return false;
}
transport_->set_allow_local_ips(true);
// start transports
transport_->SignalRequestSignaling.connect(
this, &WebRTCSession::OnRequestSignaling);
transport_->SignalCandidatesReady.connect(
this, &WebRTCSession::OnCandidatesReady);
transport_->SignalWritableState.connect(
this, &WebRTCSession::OnWritableState);
// Limit the amount of time that setting up a call may take.
StartTransportTimeout(kCallSetupTimeout);
return true;
}
cricket::Transport* WebRTCSession::CreateTransport() {
ASSERT(signaling_thread()->IsCurrent());
return new cricket::P2PTransport(
talk_base::Thread::Current(),
channel_manager_->worker_thread(), port_allocator());
}
bool WebRTCSession::CreateVoiceChannel(const std::string& stream_id) {
StreamInfo* stream_info = new StreamInfo(stream_id);
stream_info->video = false;
streams_.push_back(stream_info);
// RTCP disabled
cricket::VoiceChannel* voice_channel =
channel_manager_->CreateVoiceChannel(this, stream_id, false);
ASSERT(voice_channel != NULL);
stream_info->channel = voice_channel;
if (incoming()) {
SignalAddStream(stream_id, false);
} else {
SignalRtcMediaChannelCreated(stream_id, false);
}
return true;
}
bool WebRTCSession::CreateVideoChannel(const std::string& stream_id) {
StreamInfo* stream_info = new StreamInfo(stream_id);
stream_info->video = true;
streams_.push_back(stream_info);
// RTCP disabled
cricket::VideoChannel* video_channel =
channel_manager_->CreateVideoChannel(this, stream_id, false, NULL);
ASSERT(video_channel != NULL);
stream_info->channel = video_channel;
if (incoming()) {
SignalAddStream(stream_id, true);
} else {
SignalRtcMediaChannelCreated(stream_id, true);
}
return true;
}
cricket::TransportChannel* WebRTCSession::CreateChannel(
const std::string& content_name,
const std::string& name) {
if (!transport_) {
return NULL;
}
std::string type;
if (content_name.compare(kVideoStream) == 0) {
type = cricket::NS_GINGLE_VIDEO;
} else {
type = cricket::NS_GINGLE_AUDIO;
}
cricket::TransportChannel* transport_channel =
transport_->CreateChannel(name, type);
ASSERT(transport_channel != NULL);
transport_channels_[name] = transport_channel;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* stream_info = (*iter);
if (stream_info->stream_id.compare(content_name) == 0) {
ASSERT(!stream_info->channel);
stream_info->transport = transport_channel;
break;
}
}
return transport_channel;
}
cricket::TransportChannel* WebRTCSession::GetChannel(
const std::string& content_name, const std::string& name) {
if (!transport_)
return NULL;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if (content_name.compare((*iter)->stream_id) == 0) {
return (*iter)->transport;
}
}
return NULL;
}
void WebRTCSession::DestroyChannel(
const std::string& content_name, const std::string& name) {
if (!transport_)
return;
transport_->DestroyChannel(name);
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if (content_name.compare((*iter)->stream_id) == 0) {
(*iter)->transport = NULL;
streams_.erase(iter);
break;
}
}
}
void WebRTCSession::OnMessage(talk_base::Message* message) {
switch (message->message_id) {
case MSG_CANDIDATE_TIMEOUT:
if (transport_->writable()) {
// This should never happen: The timout triggered even
// though a call was successfully set up.
ASSERT(false);
}
SignalFailedCall();
break;
case MSG_WEBRTC_CREATE_TRANSPORT:
transport_ = CreateTransport();
break;
case MSG_WEBRTC_DELETE_TRANSPORT:
if (transport_) {
delete transport_;
transport_ = NULL;
}
break;
default:
cricket::BaseSession::OnMessage(message);
break;
}
}
bool WebRTCSession::Connect() {
if (streams_.empty()) {
// nothing to initiate
return false;
}
// lets connect all the transport channels created before for this session
transport_->ConnectChannels();
// create an offer now. This is to call SetState
// Actual offer will be send when OnCandidatesReady callback received
cricket::SessionDescription* offer = CreateOffer();
set_local_description(offer);
SetState((incoming()) ? STATE_SENTACCEPT : STATE_SENTINITIATE);
// Enable all the channels
EnableAllStreams();
SetVideoCapture(true);
return true;
}
bool WebRTCSession::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
bool ret = false;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* stream_info = (*iter);
if (stream_info->stream_id.compare(stream_id) == 0) {
ASSERT(stream_info->channel != NULL);
ASSERT(stream_info->video);
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*>(
stream_info->channel);
ret = channel->SetRenderer(0, renderer);
break;
}
}
return ret;
}
bool WebRTCSession::SetVideoCapture(bool capture) {
channel_manager_->SetVideoCapture(capture);
return true;
}
bool WebRTCSession::RemoveStream(const std::string& stream_id) {
bool ret = false;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (sinfo->stream_id.compare(stream_id) == 0) {
DisableLocalCandidate(sinfo->transport->name());
if (!sinfo->video) {
cricket::VoiceChannel* channel = static_cast<cricket::VoiceChannel*> (
sinfo->channel);
channel->Enable(false);
channel_manager_->DestroyVoiceChannel(channel);
} else {
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*> (
sinfo->channel);
channel->Enable(false);
channel_manager_->DestroyVideoChannel(channel);
}
// channel and transport will be deleted in
// DestroyVoiceChannel/DestroyVideoChannel
ret = true;
break;
}
}
if (!ret) {
LOG(LERROR) << "No streams found for stream id " << stream_id;
// TODO(ronghuawu): trigger onError callback
}
return ret;
}
void WebRTCSession::DisableLocalCandidate(const std::string& name) {
for (size_t i = 0; i < local_candidates_.size(); ++i) {
if (local_candidates_[i].name().compare(name) == 0) {
talk_base::SocketAddress address(local_candidates_[i].address().ip(), 0);
local_candidates_[i].set_address(address);
}
}
}
void WebRTCSession::EnableAllStreams() {
StreamMap::const_iterator i;
for (i = streams_.begin(); i != streams_.end(); ++i) {
cricket::BaseChannel* channel = (*i)->channel;
if (channel)
channel->Enable(true);
}
}
void WebRTCSession::RemoveAllStreams() {
// signaling_thread_->Post(this, MSG_RTC_REMOVEALLSTREAMS);
// First build a list of streams to remove and then remove them.
// The reason we do this is that if we remove the streams inside the
// loop, a stream might get removed while we're enumerating and the iterator
// will become invalid (and we crash).
// streams_ entry will be removed from ChannelManager callback method
// DestroyChannel
std::vector<std::string> streams_to_remove;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter)
streams_to_remove.push_back((*iter)->stream_id);
for (std::vector<std::string>::iterator i = streams_to_remove.begin();
i != streams_to_remove.end(); ++i) {
RemoveStream(*i);
}
SignalRemoveStream(this);
}
bool WebRTCSession::HasStream(const std::string& stream_id) const {
StreamMap::const_iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (stream_id.compare(sinfo->stream_id) == 0) {
return true;
}
}
return false;
}
bool WebRTCSession::HasStream(bool video) const {
StreamMap::const_iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* sinfo = (*iter);
if (sinfo->video == video) {
return true;
}
}
return false;
}
bool WebRTCSession::HasAudioStream() const {
return HasStream(false);
}
bool WebRTCSession::HasVideoStream() const {
return HasStream(true);
}
talk_base::Thread* WebRTCSession::worker_thread() {
return channel_manager_->worker_thread();
}
void WebRTCSession::OnRequestSignaling(cricket::Transport* transport) {
transport->OnSignalingReady();
}
void WebRTCSession::OnWritableState(cricket::Transport* transport) {
ASSERT(transport == transport_);
const bool all_transports_writable = transport_->writable();
if (all_transports_writable) {
if (all_transports_writable != all_transports_writable_) {
signaling_thread_->Clear(this, MSG_CANDIDATE_TIMEOUT);
} else {
// At one point all channels were writable and we had full connectivity,
// but then we lost it. Start the timeout again to kill the call if it
// doesn't come back.
StartTransportTimeout(kCallLostTimeout);
}
all_transports_writable_ = all_transports_writable;
}
NotifyTransportState();
return;
}
void WebRTCSession::StartTransportTimeout(int timeout) {
talk_base::Thread::Current()->PostDelayed(timeout, this,
MSG_CANDIDATE_TIMEOUT,
NULL);
}
void WebRTCSession::NotifyTransportState() {
}
bool WebRTCSession::OnInitiateMessage(
cricket::SessionDescription* offer,
const std::vector<cricket::Candidate>& candidates) {
if (!offer) {
LOG(LERROR) << "No SessionDescription from peer";
return false;
}
talk_base::scoped_ptr<cricket::SessionDescription> answer;
answer.reset(CreateAnswer(offer));
const cricket::ContentInfo* audio_content = GetFirstAudioContent(
answer.get());
const cricket::ContentInfo* video_content = GetFirstVideoContent(
answer.get());
if (!audio_content && !video_content) {
return false;
}
bool ret = true;
if (audio_content) {
ret = !HasAudioStream() &&
CreateVoiceChannel(audio_content->name);
if (!ret) {
LOG(LERROR) << "Failed to create voice channel for "
<< audio_content->name;
return false;
}
}
if (video_content) {
ret = !HasVideoStream() &&
CreateVideoChannel(video_content->name);
if (!ret) {
LOG(LERROR) << "Failed to create video channel for "
<< video_content->name;
return false;
}
}
set_remote_description(offer);
SetState(STATE_RECEIVEDINITIATE);
transport_->ConnectChannels();
EnableAllStreams();
set_local_description(answer.release());
SetState(STATE_SENTACCEPT);
return true;
}
bool WebRTCSession::OnRemoteDescription(
cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
if (state() == STATE_SENTACCEPT ||
state() == STATE_RECEIVEDACCEPT ||
state() == STATE_INPROGRESS) {
if (CheckForStreamDeleteMessage(candidates)) {
return OnRemoteDescriptionUpdate(desc, candidates);
} else {
transport_->OnRemoteCandidates(candidates);
return true;
}
}
// Session description is always accepted.
set_remote_description(desc);
SetState(STATE_RECEIVEDACCEPT);
// Will trigger OnWritableState() if successfull.
transport_->OnRemoteCandidates(candidates);
return true;
}
bool WebRTCSession::CheckForStreamDeleteMessage(
const std::vector<cricket::Candidate>& candidates) {
for (size_t i = 0; i < candidates.size(); ++i) {
if (candidates[i].address().port() == 0) {
return true;
}
}
return false;
}
bool WebRTCSession::OnRemoteDescriptionUpdate(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
// This will be called when session is in connected state
// In this state session expects signaling message for any removed
// streamed by the peer.
// check for candidates port, if its equal to 0, remove that stream
// and provide callback OnRemoveStream else keep as it is
for (size_t i = 0; i < candidates.size(); ++i) {
if (candidates[i].address().port() == 0) {
RemoveStreamOnRequest(candidates[i]);
}
}
return true;
}
void WebRTCSession::RemoveStreamOnRequest(
const cricket::Candidate& candidate) {
// 1. Get Transport corresponding to candidate name
// 2. Get StreamInfo for the transport found in step 1
// 3. call ChannelManager Destroy voice/video method
//
TransportChannelMap::iterator iter =
transport_channels_.find(candidate.name());
if (iter == transport_channels_.end()) {
return;
}
cricket::TransportChannel* transport = iter->second;
std::vector<StreamInfo*>::iterator siter;
for (siter = streams_.begin(); siter != streams_.end(); ++siter) {
StreamInfo* stream_info = (*siter);
if (stream_info->transport == transport) {
if (!stream_info->video) {
cricket::VoiceChannel* channel = static_cast<cricket::VoiceChannel*> (
stream_info->channel);
channel->Enable(false);
channel_manager_->DestroyVoiceChannel(channel);
} else {
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*> (
stream_info->channel);
channel->Enable(false);
channel_manager_->DestroyVideoChannel(channel);
}
SignalRemoveStream2((*siter)->stream_id, (*siter)->video);
break;
}
}
}
cricket::SessionDescription* WebRTCSession::CreateOffer() {
cricket::SessionDescription* offer = new cricket::SessionDescription();
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if ((*iter)->video) {
// add video codecs, if there is video stream added
cricket::VideoContentDescription* video =
new cricket::VideoContentDescription();
std::vector<cricket::VideoCodec> video_codecs;
channel_manager_->GetSupportedVideoCodecs(&video_codecs);
for (VideoCodecs::const_iterator codec = video_codecs.begin();
codec != video_codecs.end(); ++codec) {
video->AddCodec(*codec);
}
video->SortCodecs();
offer->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, video);
} else {
cricket::AudioContentDescription* audio =
new cricket::AudioContentDescription();
std::vector<cricket::AudioCodec> audio_codecs;
channel_manager_->GetSupportedAudioCodecs(&audio_codecs);
for (AudioCodecs::const_iterator codec = audio_codecs.begin();
codec != audio_codecs.end(); ++codec) {
audio->AddCodec(*codec);
}
audio->SortCodecs();
offer->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, audio);
}
}
return offer;
}
cricket::SessionDescription* WebRTCSession::CreateAnswer(
const cricket::SessionDescription* offer) {
cricket::SessionDescription* answer = new cricket::SessionDescription();
const cricket::ContentInfo* audio_content = GetFirstAudioContent(offer);
if (audio_content) {
const cricket::AudioContentDescription* audio_offer =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
cricket::AudioContentDescription* audio_accept =
new cricket::AudioContentDescription();
AudioCodecs audio_codecs;
channel_manager_->GetSupportedAudioCodecs(&audio_codecs);
for (AudioCodecs::const_iterator ours = audio_codecs.begin();
ours != audio_codecs.end(); ++ours) {
for (AudioCodecs::const_iterator theirs = audio_offer->codecs().begin();
theirs != audio_offer->codecs().end(); ++theirs) {
if (ours->Matches(*theirs)) {
cricket::AudioCodec negotiated(*ours);
negotiated.id = theirs->id;
audio_accept->AddCodec(negotiated);
}
}
}
audio_accept->SortCodecs();
answer->AddContent(audio_content->name, audio_content->type, audio_accept);
}
const cricket::ContentInfo* video_content = GetFirstVideoContent(offer);
if (video_content) {
const cricket::VideoContentDescription* video_offer =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
cricket::VideoContentDescription* video_accept =
new cricket::VideoContentDescription();
VideoCodecs video_codecs;
channel_manager_->GetSupportedVideoCodecs(&video_codecs);
for (VideoCodecs::const_iterator ours = video_codecs.begin();
ours != video_codecs.end(); ++ours) {
for (VideoCodecs::const_iterator theirs = video_offer->codecs().begin();
theirs != video_offer->codecs().end(); ++theirs) {
if (ours->Matches(*theirs)) {
cricket::VideoCodec negotiated(*ours);
negotiated.id = theirs->id;
video_accept->AddCodec(negotiated);
}
}
}
video_accept->SortCodecs();
answer->AddContent(video_content->name, video_content->type, video_accept);
}
return answer;
}
void WebRTCSession::OnMute(bool mute) {
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if (!(*iter)->video) {
cricket::VoiceChannel* voice_channel =
static_cast<cricket::VoiceChannel*>((*iter)->channel);
ASSERT(voice_channel != NULL);
voice_channel->Mute(mute);
}
}
}
void WebRTCSession::OnCameraMute(bool mute) {
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if ((*iter)->video) {
cricket::VideoChannel* video_channel =
static_cast<cricket::VideoChannel*>((*iter)->channel);
ASSERT(video_channel != NULL);
video_channel->Mute(mute);
}
}
}
void WebRTCSession::SetError(Error error) {
BaseSession::SetError(error);
}
void WebRTCSession::OnCandidatesReady(
cricket::Transport* transport,
const std::vector<cricket::Candidate>& candidates) {
std::vector<cricket::Candidate>::const_iterator iter;
for (iter = candidates.begin(); iter != candidates.end(); ++iter) {
local_candidates_.push_back(*iter);
}
SignalLocalDescription(local_description(), candidates);
}
} /* namespace webrtc */

View File

@ -25,18 +25,18 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTCSESSIONIMPL_H_
#define TALK_APP_WEBRTC_WEBRTCSESSIONIMPL_H_
#ifndef TALK_APP_WEBRTC_WEBRTCSESSION_H_
#define TALK_APP_WEBRTC_WEBRTCSESSION_H_
#include <string>
#include <vector>
#include "talk/base/logging.h"
#include "talk/base/messagehandler.h"
#include "talk/p2p/base/candidate.h"
#include "talk/p2p/base/session.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediachannel.h"
#include "talk/app/pc_transport_impl.h"
#include "talk/app/webrtcsession.h"
namespace cricket {
class ChannelManager;
@ -54,62 +54,44 @@ class Value;
namespace webrtc {
struct StreamInfo {
StreamInfo(const std::string stream_id)
explicit StreamInfo(const std::string stream_id)
: channel(NULL),
transport(NULL),
video(false),
stream_id(stream_id),
media_channel(-1) {}
stream_id(stream_id) {}
StreamInfo()
: channel(NULL),
transport(NULL),
video(false),
media_channel(-1) {}
video(false) {}
cricket::BaseChannel* channel;
PC_Transport_Impl* transport; //TODO - add RTCP transport channel
cricket::TransportChannel* transport;
bool video;
std::string stream_id;
int media_channel;
};
typedef std::vector<cricket::AudioCodec> AudioCodecs;
typedef std::vector<cricket::VideoCodec> VideoCodecs;
class ExternalRenderer;
class PeerConnection;
class WebRTCSessionImpl: public WebRTCSession {
class WebRTCSession : public cricket::BaseSession {
public:
WebRTCSessionImpl(const std::string& id,
WebRTCSession(const std::string& id,
const std::string& direction,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr,
PeerConnection* connection,
talk_base::Thread* signaling_thread);
~WebRTCSessionImpl();
virtual bool Initiate();
virtual bool OnRemoteDescription(Json::Value& desc);
virtual bool OnRemoteDescription(const cricket::SessionDescription* sdp,
std::vector<cricket::Candidate>& candidates);
virtual bool OnInitiateMessage(const cricket::SessionDescription* sdp,
std::vector<cricket::Candidate>& candidates);
virtual void OnMute(bool mute);
virtual void OnCameraMute(bool mute);
// Override from BaseSession to allow setting errors from other threads
// than the signaling thread.
virtual void SetError(Error error);
bool muted() const { return muted_; }
bool camera_muted() const { return camera_muted_; }
bool CreateP2PTransportChannel(const std::string& stream_id, bool video);
~WebRTCSession();
bool Initiate();
bool Connect();
bool OnRemoteDescription(cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates);
bool OnInitiateMessage(cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates);
void OnMute(bool mute);
void OnCameraMute(bool mute);
bool CreateVoiceChannel(const std::string& stream_id);
bool CreateVideoChannel(const std::string& stream_id);
bool RemoveStream(const std::string& stream_id);
@ -125,45 +107,32 @@ class WebRTCSessionImpl: public WebRTCSession {
// Returns true if there's one or more video channels in the session.
bool HasVideoStream() const;
void OnCandidateReady(const cricket::Candidate& candidate);
void OnStateChange(P2PTransportClass::State state,
cricket::TransportChannel* channel);
void OnMessageReceived(const char* data, size_t data_size);
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer);
sigslot::signal2<cricket::VideoChannel*, std::string&> SignalVideoChannel;
sigslot::signal2<cricket::VoiceChannel*, std::string&> SignalVoiceChannel;
sigslot::signal1<WebRTCSessionImpl*> SignalOnRemoveStream;
sigslot::signal1<WebRTCSession*> SignalRemoveStream;
sigslot::signal2<const std::string&, bool> SignalAddStream;
sigslot::signal2<const std::string&, bool> SignalRemoveStream2;
sigslot::signal2<const std::string&, bool> SignalRtcMediaChannelCreated;
// Triggered when the local candidate is ready
sigslot::signal2<const cricket::SessionDescription*,
const std::vector<cricket::Candidate>&> SignalLocalDescription;
// This callback will trigger if setting up a call times out.
sigslot::signal0<> SignalFailedCall;
void OnVoiceChannelCreated(cricket::VoiceChannel* voice_channel,
std::string& stream_id);
void OnVideoChannelCreated(cricket::VideoChannel* video_channel,
std::string& stream_id);
void ChannelEnable(cricket::BaseChannel* channel, bool enable);
std::vector<cricket::Candidate>& local_candidates() {
bool muted() const { return muted_; }
bool camera_muted() const { return camera_muted_; }
const std::vector<cricket::Candidate>& local_candidates() {
return local_candidates_;
}
const std::string& id() const { return id_; }
bool incoming() const { return incoming_; }
cricket::PortAllocator* port_allocator() const { return port_allocator_; }
talk_base::Thread* signaling_thread() const { return signaling_thread_; }
private:
void ChannelEnable_w(cricket::BaseChannel* channel, bool enable);
void OnVoiceChannelError(cricket::VoiceChannel* voice_channel, uint32 ssrc,
cricket::VoiceMediaChannel::Error error);
void OnVideoChannelError(cricket::VideoChannel* video_channel, uint32 ssrc,
cricket::VideoMediaChannel::Error error);
// methods signaled by the transport
void OnRequestSignaling(cricket::Transport* transport);
void OnCandidatesReady(cricket::Transport* transport,
const std::vector<cricket::Candidate>& candidates);
void OnWritableState(cricket::Transport* transport);
// transport-management overrides from cricket::BaseSession
protected:
// methods from cricket::BaseSession
virtual void SetError(cricket::BaseSession::Error error);
virtual cricket::TransportChannel* CreateChannel(
const std::string& content_name, const std::string& name);
virtual cricket::TransportChannel* GetChannel(
@ -171,68 +140,74 @@ class WebRTCSessionImpl: public WebRTCSession {
virtual void DestroyChannel(
const std::string& content_name, const std::string& name);
virtual talk_base::Thread* worker_thread() {
return NULL;
private:
// Dummy functions inherited from cricket::BaseSession.
// They should never be called.
virtual bool Accept(const cricket::SessionDescription* sdesc) {
return true;
}
void SendLocalDescription();
virtual bool Reject(const std::string& reason) {
return true;
}
virtual bool TerminateWithReason(const std::string& reason) {
return true;
}
virtual talk_base::Thread* worker_thread();
// methods signaled by the transport
void OnRequestSignaling(cricket::Transport* transport);
void OnCandidatesReady(cricket::Transport* transport,
const std::vector<cricket::Candidate>& candidates);
void OnWritableState(cricket::Transport* transport);
void OnTransportError(cricket::Transport* transport);
void OnChannelGone(cricket::Transport* transport);
bool CheckForStreamDeleteMessage(
const std::vector<cricket::Candidate>& candidates);
void UpdateTransportWritableState();
bool CheckAllTransportsWritable();
void StartTransportTimeout(int timeout);
void ClearTransportTimeout();
void NotifyTransportState();
cricket::SessionDescription* CreateOffer();
cricket::SessionDescription* CreateAnswer(
const cricket::SessionDescription* answer);
//from MessageHandler
// from MessageHandler
virtual void OnMessage(talk_base::Message* message);
private:
typedef std::map<std::string, PC_Transport_Impl*> TransportChannelMap;
virtual cricket::Transport* CreateTransport();
cricket::Transport* GetTransport();
cricket::VideoChannel* CreateVideoChannel_w(
const std::string& content_name,
bool rtcp,
cricket::VoiceChannel* voice_channel);
typedef std::map<std::string, cricket::TransportChannel*> TransportChannelMap;
cricket::VoiceChannel* CreateVoiceChannel_w(
const std::string& content_name,
bool rtcp);
void DestroyVoiceChannel_w(cricket::VoiceChannel* channel);
void DestroyVideoChannel_w(cricket::VideoChannel* channel);
void SignalOnWritableState_w(cricket::TransportChannel* channel);
void SetSessionState(State state);
void SetSessionState_w();
bool SetVideoCapture(bool capture);
cricket::CaptureResult SetVideoCapture_w(bool capture);
void DisableLocalCandidate(const std::string& name);
bool OnRemoteDescriptionUpdate(const cricket::SessionDescription* desc,
std::vector<cricket::Candidate>& candidates);
const std::vector<cricket::Candidate>& candidates);
void RemoveStreamOnRequest(const cricket::Candidate& candidate);
void RemoveStream_w(const std::string& stream_id);
void RemoveAllStreams_w();
void EnableAllStreams_w();
void SendLocalDescription_w();
void EnableAllStreams();
cricket::Transport* transport_;
cricket::ChannelManager* channel_manager_;
std::vector<StreamInfo*> streams_;
TransportChannelMap transport_channels_;
bool all_writable_;
bool all_transports_writable_;
bool muted_;
bool camera_muted_;
int setup_timeout_;
std::vector<cricket::Candidate> local_candidates_;
std::vector<cricket::Candidate> remote_candidates_;
State session_state_;
bool signal_initiated_;
talk_base::Thread* signaling_thread_;
std::string id_;
bool incoming_;
cricket::PortAllocator* port_allocator_;
static const char kIncomingDirection[];
static const char kOutgoingDirection[];
};
} /* namespace webrtc */
} // namespace webrtc
#endif /* TALK_APP_WEBRTC_WEBRTCSESSIONIMPL_H_ */
#endif // TALK_APP_WEBRTC_WEBRTCSESSION_H_

View File

@ -0,0 +1,885 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include "base/gunit.h"
#include "base/helpers.h"
#include "talk/app/webrtc/testing/timing.h"
#include "talk/app/webrtc/webrtcsession.h"
#include "talk/base/fakenetwork.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/thread.h"
#include "talk/p2p/base/portallocator.h"
#include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/client/fakeportallocator.h"
#include "talk/session/phone/fakesession.h"
#include "talk/session/phone/mediasessionclient.h"
cricket::VideoContentDescription* CopyVideoContentDescription(
const cricket::VideoContentDescription* video_description) {
cricket::VideoContentDescription* new_video_description =
new cricket::VideoContentDescription();
cricket::VideoCodecs::const_iterator iter =
video_description->codecs().begin();
for (; iter != video_description->codecs().end(); iter++) {
new_video_description->AddCodec(*iter);
}
new_video_description->SortCodecs();
return new_video_description;
}
cricket::AudioContentDescription* CopyAudioContentDescription(
const cricket::AudioContentDescription* audio_description) {
cricket::AudioContentDescription* new_audio_description =
new cricket::AudioContentDescription();
cricket::AudioCodecs::const_iterator iter =
audio_description->codecs().begin();
for (; iter != audio_description->codecs().end(); iter++) {
new_audio_description->AddCodec(*iter);
}
new_audio_description->SortCodecs();
return new_audio_description;
}
const cricket::ContentDescription* CopyContentDescription(
const cricket::ContentDescription* original) {
const cricket::MediaContentDescription* media =
static_cast<const cricket::MediaContentDescription*>(original);
const cricket::ContentDescription* new_content_description = NULL;
if (media->type() == cricket::MEDIA_TYPE_VIDEO) {
const cricket::VideoContentDescription* video_description =
static_cast<const cricket::VideoContentDescription*>(original);
new_content_description = static_cast<const cricket::ContentDescription*>
(CopyVideoContentDescription(video_description));
} else if (media->type() == cricket::MEDIA_TYPE_AUDIO) {
const cricket::AudioContentDescription* audio_description =
static_cast<const cricket::AudioContentDescription*>(original);
new_content_description = static_cast<const cricket::ContentDescription*>
(CopyAudioContentDescription(audio_description));
} else {
return NULL;
}
return new_content_description;
}
cricket::ContentInfos CopyContentInfos(const cricket::ContentInfos& original) {
cricket::ContentInfos new_content_infos;
for (cricket::ContentInfos::const_iterator iter = original.begin();
iter != original.end(); iter++) {
cricket::ContentInfo info;
info.name = (*iter).name;
info.type = (*iter).type;
info.description = CopyContentDescription((*iter).description);
}
return new_content_infos;
}
cricket::SessionDescription* CopySessionDescription(
const cricket::SessionDescription* original) {
const cricket::ContentInfos& content_infos = original->contents();
cricket::ContentInfos new_content_infos = CopyContentInfos(content_infos);
return new cricket::SessionDescription(new_content_infos);
}
bool GenerateFakeSessionDescription(bool video,
cricket::SessionDescription** incoming_sdp) {
*incoming_sdp = new cricket::SessionDescription();
if (*incoming_sdp == NULL)
return false;
const std::string name = video ? std::string(cricket::CN_VIDEO) :
std::string(cricket::CN_AUDIO);
cricket::ContentDescription* description = NULL;
if (video) {
cricket::VideoContentDescription* video_dsc =
new cricket::VideoContentDescription;
video_dsc->SortCodecs();
description = static_cast<cricket::ContentDescription*>(video_dsc);
} else {
cricket::AudioContentDescription* audio_dsc =
new cricket::AudioContentDescription();
audio_dsc->SortCodecs();
description = static_cast<cricket::ContentDescription*>(audio_dsc);
}
// Cannot fail.
(*incoming_sdp)->AddContent(name, cricket::NS_JINGLE_RTP, description);
return true;
}
void GenerateFakeCandidate(bool video,
std::vector<cricket::Candidate>* candidates) {
// Next add a candidate.
// int port_index = 0;
std::string port_index_as_string("0");
cricket::Candidate candidate;
candidate.set_name("rtp");
candidate.set_protocol("udp");
talk_base::SocketAddress address("127.0.0.1", 1234);
candidate.set_address(address);
candidate.set_preference(1);
candidate.set_username("username" + port_index_as_string);
candidate.set_password(port_index_as_string);
candidate.set_type("local");
candidate.set_network_name("network");
candidate.set_generation(0);
candidates->push_back(candidate);
}
bool GenerateFakeSession(bool video, cricket::SessionDescription** incoming_sdp,
std::vector<cricket::Candidate>* candidates) {
if (!GenerateFakeSessionDescription(video, incoming_sdp)) {
return false;
}
GenerateFakeCandidate(video, candidates);
return true;
}
class OnSignalImpl
: public sigslot::has_slots<> {
public:
enum CallbackId {
kNone,
kOnAddStream,
kOnRemoveStream2,
kOnRtcMediaChannelCreated,
kOnLocalDescription,
kOnFailedCall,
};
OnSignalImpl()
: callback_ids_(),
last_stream_id_(""),
last_was_video_(false),
last_description_ptr_(NULL),
last_candidates_() {
}
virtual ~OnSignalImpl() {
delete last_description_ptr_;
last_description_ptr_ = NULL;
}
void OnAddStream(const std::string& stream_id, bool video) {
callback_ids_.push_back(kOnAddStream);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnRemoveStream2(const std::string& stream_id, bool video) {
callback_ids_.push_back(kOnRemoveStream2);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnRtcMediaChannelCreated(const std::string& stream_id,
bool video) {
callback_ids_.push_back(kOnRtcMediaChannelCreated);
last_stream_id_ = stream_id;
last_was_video_ = video;
}
void OnLocalDescription(
const cricket::SessionDescription* desc,
const std::vector<cricket::Candidate>& candidates) {
callback_ids_.push_back(kOnLocalDescription);
delete last_description_ptr_;
last_description_ptr_ = CopySessionDescription(desc);
last_candidates_.clear();
last_candidates_.insert(last_candidates_.end(), candidates.begin(),
candidates.end());
}
cricket::SessionDescription* GetLocalDescription(
std::vector<cricket::Candidate>* candidates) {
if (last_candidates_.empty()) {
return NULL;
}
if (last_description_ptr_ == NULL) {
return NULL;
}
candidates->insert(candidates->end(), last_candidates_.begin(),
last_candidates_.end());
return CopySessionDescription(last_description_ptr_);
}
void OnFailedCall() {
callback_ids_.push_back(kOnFailedCall);
}
CallbackId PopOldestCallback() {
if (callback_ids_.empty()) {
return kNone;
}
const CallbackId return_value = callback_ids_.front();
callback_ids_.pop_front();
return return_value;
}
CallbackId PeekOldestCallback() {
if (callback_ids_.empty()) {
return kNone;
}
const CallbackId return_value = callback_ids_.front();
return return_value;
}
void Reset() {
callback_ids_.clear();
last_stream_id_ = "";
last_was_video_ = false;
delete last_description_ptr_;
last_description_ptr_ = NULL;
last_candidates_.clear();
}
protected:
std::list<CallbackId> callback_ids_;
std::string last_stream_id_;
bool last_was_video_;
cricket::SessionDescription* last_description_ptr_;
std::vector<cricket::Candidate> last_candidates_;
};
template<typename T>
struct ReturnValue : public talk_base::MessageData {
ReturnValue() : return_value_() {}
T return_value_;
};
typedef ReturnValue<bool> ReturnBool;
typedef ReturnValue<const std::vector<cricket::Candidate>*>
ReturnCandidates;
template <typename T>
class PassArgument : public talk_base::MessageData {
public:
explicit PassArgument(const T& argument) : argument_(argument) {}
const T& argument() { return argument_; }
protected:
T argument_;
};
typedef PassArgument<bool> PassBool;
typedef PassArgument<cricket::BaseSession::Error> PassError;
typedef PassArgument<std::pair<cricket::VoiceChannel*, std::string> >
PassVoiceChannelString;
typedef PassArgument<std::pair<cricket::VideoChannel*, std::string> >
PassVideoChannelString;
template <typename T>
class ReturnBoolPassArgument : public talk_base::MessageData {
public:
explicit ReturnBoolPassArgument(const T& argument)
: argument_(argument) { return_value_ = false; }
const T& argument() { return argument_; }
bool return_value_;
protected:
T argument_;
};
typedef ReturnBoolPassArgument<std::pair<std::string, bool> >
ReturnBoolPassStringBool;
typedef ReturnBoolPassArgument<std::string> ReturnBoolPassString;
typedef ReturnBoolPassArgument<bool> ReturnBoolPassBool;
typedef ReturnBoolPassArgument<
std::pair<std::string, cricket::VideoRenderer*> >
ReturnBoolPassStringVideoRenderer;
class WebRTCSessionExtendedForTest : public webrtc::WebRTCSession {
public:
WebRTCSessionExtendedForTest(const std::string& id,
const std::string& direction,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr,
talk_base::Thread* signaling_thread)
: WebRTCSession(id, direction, allocator, channelmgr, signaling_thread),
worker_thread_(channelmgr->worker_thread()) {
}
private:
virtual cricket::Transport* CreateTransport() {
ASSERT(signaling_thread()->IsCurrent());
return static_cast<cricket::Transport*> (new cricket::FakeTransport(
signaling_thread(),
worker_thread_));
}
talk_base::Thread* worker_thread_;
};
class WebRTCSessionTest : public OnSignalImpl,
public talk_base::MessageHandler {
public:
enum FunctionCallId {
kCallInitiate,
kCallConnect,
kCallOnRemoteDescription,
kCallOnInitiateMessage,
kCallOnMute,
kCallOnCameraMute,
kCallMuted,
kCallCameraMuted,
kCallCreateVoiceChannel,
kCallCreateVideoChannel,
kCallRemoveStream,
kCallRemoveAllStreams,
kCallHasStreamString,
kCallHasStreamBool,
kCallHasAudioStream,
kCallHasVideoStream,
kCallSetVideoRenderer,
kCallLocalCandidates
};
enum {kInit = kCallLocalCandidates + 1};
enum {kTerminate = kInit + 1};
static WebRTCSessionTest* CreateWebRTCSessionTest(bool receiving) {
WebRTCSessionTest* return_value =
new WebRTCSessionTest();
if (return_value == NULL) {
return NULL;
}
if (!return_value->Init(receiving)) {
delete return_value;
return NULL;
}
return return_value;
}
std::string DirectionAsString() {
// Direction is either "r"=incoming or "s"=outgoing.
return (receiving_) ? "r" : "s";
}
bool WaitForCallback(CallbackId id, int timeout_ms) {
bool success = false;
Timing my_timer;
for (int ms = 0; ms < timeout_ms; ms++) {
const CallbackId peek_id = PeekOldestCallback();
if (peek_id == id) {
PopOldestCallback();
success = true;
break;
} else if (peek_id != kNone) {
success = false;
break;
}
my_timer.IdleWait(0.001);
}
return success;
}
bool Init(bool receiving) {
if (signaling_thread_ != NULL)
return false;
signaling_thread_ = new talk_base::Thread();
if (!signaling_thread_->SetName("signaling_thread test", this)) {
return false;
}
if (!signaling_thread_->Start()) {
return false;
}
receiving_ = receiving;
ReturnBool return_value;
signaling_thread_->Send(this, kInit, &return_value);
return return_value.return_value_;
}
void Init_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = false;
ASSERT_TRUE(worker_thread_ == NULL);
worker_thread_ = new talk_base::Thread();
if (!worker_thread_->SetName("worker thread test", this))
return;
if (!worker_thread_->Start())
return;
cricket::FakePortAllocator* fake_port_allocator =
new cricket::FakePortAllocator(worker_thread_, NULL);
fake_port_allocator->set_flags(cricket::PORTALLOCATOR_DISABLE_STUN |
cricket::PORTALLOCATOR_DISABLE_RELAY |
cricket::PORTALLOCATOR_DISABLE_TCP);
allocator_ = static_cast<cricket::PortAllocator*>(fake_port_allocator);
channel_manager_ = new cricket::ChannelManager(worker_thread_);
if (!channel_manager_->Init())
return;
talk_base::CreateRandomString(8, &id_);
session_ = new webrtc::WebRTCSession(
id_, DirectionAsString() , allocator_,
channel_manager_,
signaling_thread_);
session_->SignalAddStream.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnAddStream);
session_->SignalRemoveStream2.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnRemoveStream2);
session_->SignalRtcMediaChannelCreated.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnRtcMediaChannelCreated);
session_->SignalLocalDescription.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnLocalDescription);
session_->SignalFailedCall.connect(
static_cast<OnSignalImpl*> (this),
&OnSignalImpl::OnFailedCall);
return_value->return_value_ = true;
return;
}
void Terminate_s() {
delete session_;
delete channel_manager_;
delete allocator_;
}
~WebRTCSessionTest() {
if (signaling_thread_ != NULL) {
signaling_thread_->Send(this, kTerminate, NULL);
signaling_thread_->Stop();
signaling_thread_->Clear(NULL);
delete signaling_thread_;
}
if (worker_thread_ != NULL) {
worker_thread_->Stop();
worker_thread_->Clear(NULL);
delete worker_thread_;
}
}
// All session APIs must be called from the signaling thread.
bool CallInitiate() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallInitiate, &return_value);
return return_value.return_value_;
}
bool CallConnect() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallConnect, &return_value);
// This callback does not happen with FakeTransport!
if (!WaitForCallback(kOnLocalDescription, 1000)) {
return false;
}
return return_value.return_value_;
}
bool CallOnRemoteDescription() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallOnRemoteDescription, &return_value);
return return_value.return_value_;
}
bool CallOnInitiateMessage() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallOnInitiateMessage, &return_value);
return return_value.return_value_;
}
void CallOnMute(bool mute) {
PassBool return_value(mute);
signaling_thread_->Send(this, kCallOnMute, &return_value);
}
void CallOnCameraMute(bool mute) {
PassBool return_value(mute);
signaling_thread_->Send(this, kCallOnCameraMute, &return_value);
}
bool CallMuted() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallMuted, &return_value);
return return_value.return_value_;
}
bool CallCameraMuted() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallCameraMuted, &return_value);
return return_value.return_value_;
}
bool CallCreateVoiceChannel(const std::string& stream_id) {
ReturnBoolPassString return_value(stream_id);
signaling_thread_->Send(this, kCallCreateVoiceChannel, &return_value);
if (!WaitForCallback(kOnRtcMediaChannelCreated, 1000)) {
return false;
}
return return_value.return_value_;
}
bool CallCreateVideoChannel(const std::string& stream_id) {
ReturnBoolPassString return_value(stream_id);
signaling_thread_->Send(this, kCallCreateVideoChannel, &return_value);
return return_value.return_value_;
}
bool CallRemoveStream(const std::string& stream_id) {
ReturnBoolPassString return_value(stream_id);
signaling_thread_->Send(this, kCallRemoveStream, &return_value);
return return_value.return_value_;
}
void CallRemoveAllStreams() {
signaling_thread_->Send(this, kCallRemoveAllStreams, NULL);
}
bool CallHasStream(const std::string& label) {
ReturnBoolPassString return_value(label);
signaling_thread_->Send(this, kCallHasStreamString, &return_value);
return return_value.return_value_;
}
bool CallHasStream(bool video) {
ReturnBoolPassBool return_value(video);
signaling_thread_->Send(this, kCallHasStreamBool, &return_value);
return return_value.return_value_;
}
bool CallHasAudioStream() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallHasAudioStream, &return_value);
return return_value.return_value_;
}
bool CallHasVideoStream() {
ReturnBool return_value;
signaling_thread_->Send(this, kCallHasVideoStream, &return_value);
return return_value.return_value_;
}
bool CallSetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
ReturnBoolPassStringVideoRenderer return_value(std::make_pair(
stream_id, renderer));
signaling_thread_->Send(this, kCallSetVideoRenderer, &return_value);
return return_value.return_value_;
}
const std::vector<cricket::Candidate>& CallLocalCandidates() {
ReturnCandidates return_value;
signaling_thread_->Send(this, kCallLocalCandidates, &return_value);
EXPECT_TRUE(return_value.return_value_ != NULL);
return *return_value.return_value_;
}
void Initiate_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
if (!session_->Initiate()) {
return_value->return_value_ = false;
return;
}
return_value->return_value_ = true;
}
void Connect_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = session_->Connect();
}
void OnRemoteDescription_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = false;
std::vector<cricket::Candidate> candidates;
cricket::SessionDescription* description = GetLocalDescription(&candidates);
if (description == NULL) {
return;
}
if (!session_->OnRemoteDescription(description, candidates)) {
delete description;
return;
}
return_value->return_value_ = true;
}
void OnInitiateMessage_s(talk_base::Message* message) {
cricket::SessionDescription* description = NULL;
std::vector<cricket::Candidate> candidates;
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
if (!GenerateFakeSession(false, &description, &candidates)) {
return_value->return_value_ = false;
return;
}
if (!session_->OnInitiateMessage(description, candidates)) {
return_value->return_value_ = false;
delete description;
return;
}
return_value->return_value_ = true;
}
void OnMute_s(talk_base::Message* message) {
PassBool* return_value = reinterpret_cast<PassBool*>(message->pdata);
session_->OnMute(return_value->argument());
}
void OnCameraMute_s(talk_base::Message* message) {
PassBool* return_value = reinterpret_cast<PassBool*>(message->pdata);
session_->OnCameraMute(return_value->argument());
}
void Muted_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = session_->muted();
}
void CameraMuted_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = session_->camera_muted();
}
void CreateVoiceChannel_s(talk_base::Message* message) {
ReturnBoolPassString* return_value =
reinterpret_cast<ReturnBoolPassString*>(message->pdata);
return_value->return_value_ = session_->CreateVoiceChannel(
return_value->argument());
}
void CreateVideoChannel_s(talk_base::Message* message) {
ReturnBoolPassString* return_value =
reinterpret_cast<ReturnBoolPassString*>(message->pdata);
return_value->return_value_ = session_->CreateVideoChannel(
return_value->argument());
}
void RemoveStream_s(talk_base::Message* message) {
ReturnBoolPassString* return_value =
reinterpret_cast<ReturnBoolPassString*>(message->pdata);
return_value->return_value_ = session_->RemoveStream(
return_value->argument());
}
void RemoveAllStreams_s(talk_base::Message* message) {
EXPECT_TRUE(message->pdata == NULL);
session_->RemoveAllStreams();
}
void HasStreamString_s(talk_base::Message* message) {
ReturnBoolPassString* return_value =
reinterpret_cast<ReturnBoolPassString*>(message->pdata);
return_value->return_value_ = session_->HasStream(return_value->argument());
}
void HasStreamBool_s(talk_base::Message* message) {
ReturnBoolPassBool* return_value = reinterpret_cast<ReturnBoolPassBool*>(
message->pdata);
return_value->return_value_ = session_->HasStream(return_value->argument());
}
void HasAudioStream_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = session_->HasAudioStream();
}
void HasVideoStream_s(talk_base::Message* message) {
ReturnBool* return_value = reinterpret_cast<ReturnBool*>(message->pdata);
return_value->return_value_ = session_->HasVideoStream();
}
void SetVideoRenderer_s(talk_base::Message* message) {
ReturnBoolPassStringVideoRenderer* return_value =
reinterpret_cast<ReturnBoolPassStringVideoRenderer*>(message->pdata);
return_value->return_value_ = session_->SetVideoRenderer(
return_value->argument().first, return_value->argument().second);
}
void LocalCandidates_s(talk_base::Message* message) {
ReturnCandidates* return_value =
reinterpret_cast<ReturnCandidates*>(message->pdata);
return_value->return_value_ = &session_->local_candidates();
}
void OnMessage(talk_base::Message* message) {
if ((message->pdata == NULL) &&
(message->message_id != kCallRemoveAllStreams) &&
(message->message_id != kTerminate)) {
ADD_FAILURE();
return;
}
if (!signaling_thread_->IsCurrent()) {
ADD_FAILURE();
return;
}
switch (message->message_id) {
case kCallInitiate:
Initiate_s(message);
return;
case kCallConnect:
Connect_s(message);
return;
case kCallOnRemoteDescription:
OnRemoteDescription_s(message);
return;
case kCallOnInitiateMessage:
OnInitiateMessage_s(message);
return;
case kCallOnMute:
OnMute_s(message);
return;
case kCallOnCameraMute:
OnCameraMute_s(message);
return;
case kCallMuted:
Muted_s(message);
return;
case kCallCameraMuted:
CameraMuted_s(message);
return;
case kCallCreateVoiceChannel:
CreateVoiceChannel_s(message);
return;
case kCallCreateVideoChannel:
CreateVideoChannel_s(message);
return;
case kCallRemoveStream:
RemoveStream_s(message);
return;
case kCallRemoveAllStreams:
RemoveAllStreams_s(message);
return;
case kCallHasStreamString:
HasStreamString_s(message);
return;
case kCallHasStreamBool:
HasStreamBool_s(message);
return;
case kCallHasAudioStream:
HasAudioStream_s(message);
return;
case kCallHasVideoStream:
HasVideoStream_s(message);
return;
case kCallSetVideoRenderer:
SetVideoRenderer_s(message);
return;
case kCallLocalCandidates:
LocalCandidates_s(message);
return;
case kInit:
Init_s(message);
return;
case kTerminate:
Terminate_s();
return;
default:
ADD_FAILURE();
return;
}
}
private:
WebRTCSessionTest()
: session_(NULL),
id_(),
receiving_(false),
allocator_(NULL),
channel_manager_(NULL),
worker_thread_(NULL),
signaling_thread_(NULL) {
}
webrtc::WebRTCSession* session_;
std::string id_;
bool receiving_;
cricket::PortAllocator* allocator_;
cricket::ChannelManager* channel_manager_;
talk_base::Thread* worker_thread_;
talk_base::Thread* signaling_thread_;
};
bool CallbackReceived(WebRTCSessionTest* session, int timeout) {
Timing my_timer;
my_timer.IdleWait(timeout * 0.001);
const OnSignalImpl::CallbackId peek_id =
session->PeekOldestCallback();
return peek_id != OnSignalImpl::kNone;
}
void SleepMs(int timeout_ms) {
Timing my_timer;
my_timer.IdleWait(timeout_ms * 0.001);
}
TEST(WebRtcSessionTest, InitializationReceiveSanity) {
const bool kReceiving = true;
talk_base::scoped_ptr<WebRTCSessionTest> my_session;
my_session.reset(WebRTCSessionTest::CreateWebRTCSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
// Should return false because no stream has been set up yet.
EXPECT_FALSE(my_session->CallConnect());
const bool kVideo = true;
EXPECT_FALSE(my_session->CallHasStream(kVideo));
EXPECT_FALSE(my_session->CallHasStream(!kVideo));
EXPECT_EQ(OnSignalImpl::kNone,
my_session->PopOldestCallback());
}
TEST(WebRtcSessionTest, SendCallSetUp) {
const bool kReceiving = false;
talk_base::scoped_ptr<WebRTCSessionTest> my_session;
my_session.reset(WebRTCSessionTest::CreateWebRTCSessionTest(kReceiving));
ASSERT_TRUE(my_session.get() != NULL);
ASSERT_TRUE(my_session->CallInitiate());
ASSERT_TRUE(my_session->CallCreateVoiceChannel("Audio"));
ASSERT_TRUE(my_session->CallConnect());
ASSERT_TRUE(my_session->CallOnRemoteDescription());
// All callbacks should be caught by my session. Assert it.
ASSERT_FALSE(CallbackReceived(my_session.get(), 1000));
}
int main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
// Added return_value so that it's convenient to put a breakpoint before
// exiting please note that the return value from RUN_ALL_TESTS() must
// be returned by the main function.
const int return_value = RUN_ALL_TESTS();
return return_value;
}

View File

@ -1,434 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//this file contains all the json helper methods
#include "talk/app/webrtc_json.h"
#include <stdio.h>
#include <string>
#include "talk/base/json.h"
#include "talk/base/logging.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/codec.h"
#include "json/json.h"
namespace webrtc {
static const int kIceComponent = 1;
static const int kIceFoundation = 1;
bool GetConnectionMediator(const Json::Value& value, std::string& connectionMediator) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values" ;
return false;
}
if (!GetStringFromJsonObject(value, "connectionmediator", &connectionMediator)) {
LOG(LS_WARNING) << "Failed to parse JSON for value: "
<< value.toStyledString();
return false;
}
return true;
}
bool GetStunServer(const Json::Value& value, StunServiceDetails& stunServer) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values" ;
return false;
}
Json::Value stun;
if (GetValueFromJsonObject(value, "stun_service", &stun)) {
if (stun.type() == Json::objectValue) {
if (!GetStringFromJsonObject(stun, "host", &stunServer.host) ||
!GetStringFromJsonObject(stun, "service", &stunServer.service) ||
!GetStringFromJsonObject(stun, "protocol", &stunServer.protocol)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
return false;
}
}
return true;
}
bool GetTurnServer(const Json::Value& value, std::string& turnServer) {
if (value.type() != Json::objectValue && value.type() != Json::nullValue) {
LOG(LS_WARNING) << "Failed to parse stun values" ;
return false;
}
Json::Value turn;
if (GetValueFromJsonObject(value, "turn_service", &turn)) {
if (turn.type() == Json::objectValue) {
if (!GetStringFromJsonObject(turn, "host", &turnServer)) {
LOG(LS_WARNING) << "Failed to parse JSON value: "
<< value.toStyledString();
return false;
}
} else {
return false;
}
}
return true;
}
bool GetJSONSignalingMessage(
const cricket::SessionDescription* sdp,
const std::vector<cricket::Candidate>& candidates,
std::string* signaling_message) {
const cricket::ContentInfo* audio_content = GetFirstAudioContent(sdp);
const cricket::ContentInfo* video_content = GetFirstVideoContent(sdp);
std::vector<Json::Value> media;
if (audio_content) {
Json::Value value;
BuildMediaMessage(audio_content, candidates, false, value);
media.push_back(value);
}
if (video_content) {
Json::Value value;
BuildMediaMessage(video_content, candidates, true, value);
media.push_back(value);
}
Json::Value signal;
Append(signal, "media", media);
// now serialize
*signaling_message = Serialize(signal);
return true;
}
bool BuildMediaMessage(
const cricket::ContentInfo* content_info,
const std::vector<cricket::Candidate>& candidates,
bool video,
Json::Value& params) {
if (!content_info) {
return false;
}
if (video) {
Append(params, "label", 2); //always video 2
} else {
Append(params, "label", 1); //always audio 1
}
std::vector<Json::Value> rtpmap;
if (!BuildRtpMapParams(content_info, video, rtpmap)) {
return false;
}
Append(params, "rtpmap", rtpmap);
Json::Value attributes;
// Append(attributes, "ice-pwd", candidates.front().password());
// Append(attributes, "ice-ufrag", candidates.front().username());
std::vector<Json::Value> jcandidates;
if (!BuildAttributes(candidates, video, jcandidates)) {
return false;
}
Append(attributes, "candidate", jcandidates);
Append(params, "attributes", attributes);
return true;
}
bool BuildRtpMapParams(const cricket::ContentInfo* content_info,
bool video,
std::vector<Json::Value>& rtpmap) {
if (!video) {
const cricket::AudioContentDescription* audio_offer =
static_cast<const cricket::AudioContentDescription*>(
content_info->description);
for (std::vector<cricket::AudioCodec>::const_iterator iter =
audio_offer->codecs().begin();
iter != audio_offer->codecs().end(); ++iter) {
Json::Value codec;
std::string codec_str = std::string("audio/").append(iter->name);
Append(codec, "codec", codec_str);
Json::Value codec_id;
Append(codec_id, talk_base::ToString(iter->id), codec);
rtpmap.push_back(codec_id);
}
} else {
const cricket::VideoContentDescription* video_offer =
static_cast<const cricket::VideoContentDescription*>(
content_info->description);
for (std::vector<cricket::VideoCodec>::const_iterator iter =
video_offer->codecs().begin();
iter != video_offer->codecs().end(); ++iter) {
Json::Value codec;
std::string codec_str = std::string("video/").append(iter->name);
Append(codec, "codec", codec_str);
Json::Value codec_id;
Append(codec_id, talk_base::ToString(iter->id), codec);
rtpmap.push_back(codec_id);
}
}
return true;
}
bool BuildAttributes(const std::vector<cricket::Candidate>& candidates,
bool video,
std::vector<Json::Value>& jcandidates) {
for (std::vector<cricket::Candidate>::const_iterator iter =
candidates.begin(); iter != candidates.end(); ++iter) {
if ((video && !iter->name().compare("video_rtp") ||
(!video && !iter->name().compare("rtp")))) {
Json::Value candidate;
Append(candidate, "component", kIceComponent);
Append(candidate, "foundation", kIceFoundation);
Append(candidate, "generation", iter->generation());
Append(candidate, "proto", iter->protocol());
Append(candidate, "priority", iter->preference());
Append(candidate, "ip", iter->address().IPAsString());
Append(candidate, "port", iter->address().PortAsString());
Append(candidate, "type", iter->type());
Append(candidate, "name", iter->name());
Append(candidate, "network_name", iter->network_name());
Append(candidate, "username", iter->username());
Append(candidate, "password", iter->password());
jcandidates.push_back(candidate);
}
}
return true;
}
std::string Serialize(const Json::Value& value) {
Json::StyledWriter writer;
return writer.write(value);
}
bool Deserialize(const std::string& message, Json::Value& value) {
Json::Reader reader;
return reader.parse(message, value);
}
bool ParseJSONSignalingMessage(const std::string& signaling_message,
cricket::SessionDescription*& sdp,
std::vector<cricket::Candidate>& candidates) {
ASSERT(!sdp); // expect this to NULL
// first deserialize message
Json::Value value;
if (!Deserialize(signaling_message, value)) {
return false;
}
// get media objects
std::vector<Json::Value> mlines = ReadValues(value, "media");
if (mlines.empty()) {
// no m-lines found
return false;
}
sdp = new cricket::SessionDescription();
// get codec information
for (size_t i = 0; i < mlines.size(); ++i) {
if (mlines[i]["label"].asInt() == 1) {
cricket::AudioContentDescription* audio_content =
new cricket::AudioContentDescription();
ParseAudioCodec(mlines[i], audio_content);
audio_content->SortCodecs();
sdp->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, audio_content);
ParseICECandidates(mlines[i], candidates);
} else {
cricket::VideoContentDescription* video_content =
new cricket::VideoContentDescription();
ParseVideoCodec(mlines[i], video_content);
video_content->SortCodecs();
sdp->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, video_content);
ParseICECandidates(mlines[i], candidates);
}
}
return true;
}
bool ParseAudioCodec(Json::Value value,
cricket::AudioContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
for (size_t i = 0; i < rtpmap.size(); ++i) {
cricket::AudioCodec codec;
std::string pltype = rtpmap[i].begin().memberName();
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info = rtpmap[i][pltype];
std::vector<std::string> tokens;
talk_base::split(codec_info["codec"].asString(), '/', &tokens);
codec.name = tokens[1];
content->AddCodec(codec);
}
return true;
}
bool ParseVideoCodec(Json::Value value,
cricket::VideoContentDescription* content) {
std::vector<Json::Value> rtpmap(ReadValues(value, "rtpmap"));
if (rtpmap.empty())
return false;
for (size_t i = 0; i < rtpmap.size(); ++i) {
cricket::VideoCodec codec;
std::string pltype = rtpmap[i].begin().memberName();
talk_base::FromString(pltype, &codec.id);
Json::Value codec_info = rtpmap[i][pltype];
std::vector<std::string> tokens;
talk_base::split(codec_info["codec"].asString(), '/', &tokens);
codec.name = tokens[1];
content->AddCodec(codec);
}
return true;
}
bool ParseICECandidates(Json::Value& value,
std::vector<cricket::Candidate>& candidates) {
Json::Value attributes = ReadValue(value, "attributes");
std::string ice_pwd = ReadString(attributes, "ice-pwd");
std::string ice_ufrag = ReadString(attributes, "ice-ufrag");
std::vector<Json::Value> jcandidates = ReadValues(attributes, "candidate");
char buffer[64];
for (size_t i = 0; i < jcandidates.size(); ++i) {
cricket::Candidate cand;
std::string str;
str = ReadUInt(jcandidates[i], "generation");
cand.set_generation_str(str);
str = ReadString(jcandidates[i], "proto");
cand.set_protocol(str);
double priority = ReadDouble(jcandidates[i], "priority");
#ifdef _DEBUG
double as_int = static_cast<int>(priority);
ASSERT(as_int == priority);
#endif
sprintf(buffer, "%i", static_cast<int>(priority));
str = buffer;
cand.set_preference_str(str);
talk_base::SocketAddress addr;
str = ReadString(jcandidates[i], "ip");
addr.SetIP(str);
str = ReadString(jcandidates[i], "port");
int port; talk_base::FromString(str, &port);
addr.SetPort(port);
cand.set_address(addr);
str = ReadString(jcandidates[i], "type");
cand.set_type(str);
str = ReadString(jcandidates[i], "name");
cand.set_name(str);
str = ReadString(jcandidates[i], "network_name");
cand.set_network_name(str);
str = ReadString(jcandidates[i], "username");
cand.set_username(str);
str = ReadString(jcandidates[i], "password");
cand.set_password(str);
candidates.push_back(cand);
}
return true;
}
std::vector<Json::Value> ReadValues(
Json::Value& value, const std::string& key) {
std::vector<Json::Value> objects;
for (size_t i = 0; i < value[key].size(); ++i) {
objects.push_back(value[key][i]);
}
return objects;
}
Json::Value ReadValue(Json::Value& value, const std::string& key) {
return value[key];
}
std::string ReadString(Json::Value& value, const std::string& key) {
return value[key].asString();
}
uint32 ReadUInt(Json::Value& value, const std::string& key) {
return value[key].asUInt();
}
double ReadDouble(Json::Value& value, const std::string& key) {
return value[key].asDouble();
}
// Add values
void Append(Json::Value& object, const std::string& key, bool value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, char * value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, double value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, float value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, int value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, std::string value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, uint32 value) {
object[key] = Json::Value(value);
}
void Append(Json::Value& object, const std::string& key, Json::Value value) {
object[key] = value;
}
void Append(Json::Value & object,
const std::string & key,
std::vector<Json::Value>& values){
for (std::vector<Json::Value>::const_iterator iter = values.begin();
iter != values.end(); ++iter) {
object[key].append(*iter);
}
}
} //namespace webrtc

View File

@ -1,100 +0,0 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_APP_WEBRTC_WEBRTCSESSION_H_
#define TALK_APP_WEBRTC_WEBRTCSESSION_H_
#include "talk/base/logging.h"
#include "talk/p2p/base/constants.h"
#include "talk/p2p/base/session.h"
namespace cricket {
class PortAllocator;
}
namespace webrtc {
class PeerConnection;
class WebRTCSession: public cricket::BaseSession {
public:
WebRTCSession(const std::string& id, const std::string& direction,
cricket::PortAllocator* allocator,
PeerConnection* connection,
talk_base::Thread* signaling_thread)
: BaseSession(signaling_thread),
signaling_thread_(signaling_thread),
id_(id),
incoming_(direction == kIncomingDirection),
port_allocator_(allocator),
connection_(connection) {
BaseSession::sid_ = id;
}
virtual ~WebRTCSession() {
}
virtual bool Initiate() = 0;
const std::string& id() const { return id_; }
//const std::string& type() const { return type_; }
bool incoming() const { return incoming_; }
cricket::PortAllocator* port_allocator() const { return port_allocator_; }
// static const std::string kAudioType;
// static const std::string kVideoType;
static const std::string kIncomingDirection;
static const std::string kOutgoingDirection;
// static const std::string kTestType;
PeerConnection* connection() const { return connection_; }
protected:
//methods from cricket::BaseSession
virtual bool Accept(const cricket::SessionDescription* sdesc) {
return true;
}
virtual bool Reject(const std::string& reason) {
return true;
}
virtual bool TerminateWithReason(const std::string& reason) {
return true;
}
protected:
talk_base::Thread* signaling_thread_;
private:
std::string id_;
//std::string type_;
bool incoming_;
cricket::PortAllocator* port_allocator_;
PeerConnection* connection_;
};
} // namespace webrtc
#endif /* TALK_APP_WEBRTC_WEBRTCSESSION_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,100 +0,0 @@
/*
* webrtcsessionimpl_unittest.cc
*
* Created on: Mar 11, 2011
* Author: mallinath
*/
#include "talk/base/gunit.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/sigslot.h"
#include "talk/app/webrtcsessionimpl.h"
#include "talk/p2p/client/basicportallocator.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/fakemediaengine.h"
#include "talk/session/phone/fakesession.h"
namespace webrtc {
using talk_base::scoped_ptr;
static const char* kTestSessionId = "1234";
class WebRTCSessionImplForTest : public WebRTCSessionImpl {
public:
WebRTCSessionImplForTest(const std::string& jid, const std::string& id,
const std::string& type,
const std::string& direction,
cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr)
: WebRTCSessionImpl(NULL, id, type, direction, allocator, channelmgr) {
}
~WebRTCSessionImplForTest() {
//Do Nothing
}
virtual cricket::Transport* GetTransport() {
return static_cast<cricket::FakeTransport*>(WebRTCSessionImpl::GetTransport());
}
protected:
virtual cricket::Transport* CreateTransport() {
return new cricket::FakeTransport(talk_base::Thread::Current(), talk_base::Thread::Current());
}
};
class WebRTCSessionImplTest : public sigslot::has_slots<>,
public testing::Test {
public:
WebRTCSessionImplTest() {
network_mgr_.reset(new talk_base::NetworkManager());
port_allocator_.reset(new cricket::BasicPortAllocator(network_mgr_.get()));
media_engine_ = new cricket::FakeMediaEngine();
channel_mgr_.reset(new cricket::ChannelManager(talk_base::Thread::Current()));
channel_mgr_.reset(NULL);
}
~WebRTCSessionImplTest() {
}
void CreateSession(const std::string& jid, const std::string& id,
const std::string& type, const std::string& dir) {
session_.reset(new WebRTCSessionImplForTest(jid, id, type, dir,
port_allocator_.get(),
channel_mgr_.get()));
}
bool InitiateCall(const std::string& jid, const std::string& id,
const std::string& type, const std::string& dir) {
CreateSession(jid, id, type, dir);
bool ret = session_->Initiate();
return ret;
}
bool GetCandidates() {
return InitiateCall("", kTestSessionId, "t", "s");
}
protected:
scoped_ptr<talk_base::NetworkManager> network_mgr_;
scoped_ptr<cricket::BasicPortAllocator> port_allocator_;
cricket::FakeMediaEngine* media_engine_;
scoped_ptr<cricket::ChannelManager> channel_mgr_;
scoped_ptr<WebRTCSessionImplForTest> session_;
};
TEST_F(WebRTCSessionImplTest, TestGetCandidatesCall) {
EXPECT_TRUE(GetCandidates());
EXPECT_EQ(cricket::Session::STATE_INIT, session_->state());
EXPECT_EQ(kTestSessionId, session_->id());
EXPECT_EQ(WebRTCSession::kTestType, session_->type());
EXPECT_FALSE(session_->incoming());
}
} /* namespace webrtc */

View File

@ -121,7 +121,7 @@ bool GetValueFromJsonArray(const Json::Value& in, size_t n,
return false;
}
*out = in[n];
*out = in[static_cast<unsigned int>(n)];
return true;
}
@ -199,7 +199,7 @@ bool JsonValueToStringVector(const Json::Value& value,
return false;
}
for (size_t i = 0; i < value.size(); ++i) {
for (unsigned int i = 0; i < value.size(); ++i) {
if (value[i].isString()) {
strings->push_back(value[i].asString());
} else {

View File

@ -1,972 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/p2p/base/p2ptransportchannel.h"
#include <set>
#include "talk/base/buffer.h"
#include "talk/base/common.h"
#include "talk/base/logging.h"
#include "talk/p2p/base/common.h"
namespace {
// messages for queuing up work for ourselves
const uint32 MSG_SORT = 1;
const uint32 MSG_PING = 2;
const uint32 MSG_ALLOCATE = 3;
#ifdef PLATFORM_CHROMIUM
const uint32 MSG_SENDPACKET = 4;
struct SendPacketParams : public talk_base::MessageData {
talk_base::Buffer packet;
};
#endif
// When the socket is unwritable, we will use 10 Kbps (ignoring IP+UDP headers)
// for pinging. When the socket is writable, we will use only 1 Kbps because
// we don't want to degrade the quality on a modem. These numbers should work
// well on a 28.8K modem, which is the slowest connection on which the voice
// quality is reasonable at all.
static const uint32 PING_PACKET_SIZE = 60 * 8;
static const uint32 WRITABLE_DELAY = 1000 * PING_PACKET_SIZE / 1000; // 480ms
static const uint32 UNWRITABLE_DELAY = 1000 * PING_PACKET_SIZE / 10000; // 50ms
// If there is a current writable connection, then we will also try hard to
// make sure it is pinged at this rate.
static const uint32 MAX_CURRENT_WRITABLE_DELAY = 900; // 2*WRITABLE_DELAY - bit
// The minimum improvement in RTT that justifies a switch.
static const double kMinImprovement = 10;
// Amount of time that we wait when *losing* writability before we try doing
// another allocation.
static const int kAllocateDelay = 1 * 1000; // 1 second
// We will try creating a new allocator from scratch after a delay of this
// length without becoming writable (or timing out).
static const int kAllocatePeriod = 20 * 1000; // 20 seconds
cricket::Port::CandidateOrigin GetOrigin(cricket::Port* port,
cricket::Port* origin_port) {
if (!origin_port)
return cricket::Port::ORIGIN_MESSAGE;
else if (port == origin_port)
return cricket::Port::ORIGIN_THIS_PORT;
else
return cricket::Port::ORIGIN_OTHER_PORT;
}
// Compares two connections based only on static information about them.
int CompareConnectionCandidates(cricket::Connection* a,
cricket::Connection* b) {
// Combine local and remote preferences
ASSERT(a->local_candidate().preference() == a->port()->preference());
ASSERT(b->local_candidate().preference() == b->port()->preference());
double a_pref = a->local_candidate().preference()
* a->remote_candidate().preference();
double b_pref = b->local_candidate().preference()
* b->remote_candidate().preference();
// Now check combined preferences. Lower values get sorted last.
if (a_pref > b_pref)
return 1;
if (a_pref < b_pref)
return -1;
// If we're still tied at this point, prefer a younger generation.
return (a->remote_candidate().generation() + a->port()->generation()) -
(b->remote_candidate().generation() + b->port()->generation());
}
// Compare two connections based on their writability and static preferences.
int CompareConnections(cricket::Connection *a, cricket::Connection *b) {
// Sort based on write-state. Better states have lower values.
if (a->write_state() < b->write_state())
return 1;
if (a->write_state() > b->write_state())
return -1;
// Compare the candidate information.
return CompareConnectionCandidates(a, b);
}
// Wraps the comparison connection into a less than operator that puts higher
// priority writable connections first.
class ConnectionCompare {
public:
bool operator()(const cricket::Connection *ca,
const cricket::Connection *cb) {
cricket::Connection* a = const_cast<cricket::Connection*>(ca);
cricket::Connection* b = const_cast<cricket::Connection*>(cb);
// Compare first on writability and static preferences.
int cmp = CompareConnections(a, b);
if (cmp > 0)
return true;
if (cmp < 0)
return false;
// Otherwise, sort based on latency estimate.
return a->rtt() < b->rtt();
// Should we bother checking for the last connection that last received
// data? It would help rendezvous on the connection that is also receiving
// packets.
//
// TODO: Yes we should definitely do this. The TCP protocol gains
// efficiency by being used bidirectionally, as opposed to two separate
// unidirectional streams. This test should probably occur before
// comparison of local prefs (assuming combined prefs are the same). We
// need to be careful though, not to bounce back and forth with both sides
// trying to rendevous with the other.
}
};
// Determines whether we should switch between two connections, based first on
// static preferences and then (if those are equal) on latency estimates.
bool ShouldSwitch(cricket::Connection* a_conn, cricket::Connection* b_conn) {
if (a_conn == b_conn)
return false;
if (!a_conn || !b_conn) // don't think the latter should happen
return true;
int prefs_cmp = CompareConnections(a_conn, b_conn);
if (prefs_cmp < 0)
return true;
if (prefs_cmp > 0)
return false;
return b_conn->rtt() <= a_conn->rtt() + kMinImprovement;
}
} // unnamed namespace
namespace cricket {
P2PTransportChannel::P2PTransportChannel(const std::string &name,
const std::string &content_type,
P2PTransport* transport,
PortAllocator *allocator) :
TransportChannelImpl(name, content_type),
transport_(transport),
allocator_(allocator),
worker_thread_(talk_base::Thread::Current()),
incoming_only_(false),
waiting_for_signaling_(false),
error_(0),
best_connection_(NULL),
pinging_started_(false),
sort_dirty_(false),
was_writable_(false),
was_timed_out_(true) {
}
P2PTransportChannel::~P2PTransportChannel() {
ASSERT(worker_thread_ == talk_base::Thread::Current());
for (uint32 i = 0; i < allocator_sessions_.size(); ++i)
delete allocator_sessions_[i];
}
// Add the allocator session to our list so that we know which sessions
// are still active.
void P2PTransportChannel::AddAllocatorSession(PortAllocatorSession* session) {
session->set_generation(static_cast<uint32>(allocator_sessions_.size()));
allocator_sessions_.push_back(session);
// We now only want to apply new candidates that we receive to the ports
// created by this new session because these are replacing those of the
// previous sessions.
ports_.clear();
session->SignalPortReady.connect(this, &P2PTransportChannel::OnPortReady);
session->SignalCandidatesReady.connect(
this, &P2PTransportChannel::OnCandidatesReady);
session->GetInitialPorts();
if (pinging_started_)
session->StartGetAllPorts();
}
// Go into the state of processing candidates, and running in general
void P2PTransportChannel::Connect() {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Kick off an allocator session
Allocate();
// Start pinging as the ports come in.
thread()->Post(this, MSG_PING);
}
// Reset the socket, clear up any previous allocations and start over
void P2PTransportChannel::Reset() {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Get rid of all the old allocators. This should clean up everything.
for (uint32 i = 0; i < allocator_sessions_.size(); ++i)
delete allocator_sessions_[i];
allocator_sessions_.clear();
ports_.clear();
connections_.clear();
best_connection_ = NULL;
// Forget about all of the candidates we got before.
remote_candidates_.clear();
// Revert to the initial state.
set_readable(false);
set_writable(false);
// Reinitialize the rest of our state.
waiting_for_signaling_ = false;
pinging_started_ = false;
sort_dirty_ = false;
was_writable_ = false;
was_timed_out_ = true;
// If we allocated before, start a new one now.
if (transport_->connect_requested())
Allocate();
// Start pinging as the ports come in.
thread()->Clear(this);
thread()->Post(this, MSG_PING);
}
// A new port is available, attempt to make connections for it
void P2PTransportChannel::OnPortReady(PortAllocatorSession *session,
Port* port) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Set in-effect options on the new port
for (OptionMap::const_iterator it = options_.begin();
it != options_.end();
++it) {
int val = port->SetOption(it->first, it->second);
if (val < 0) {
LOG_J(LS_WARNING, port) << "SetOption(" << it->first
<< ", " << it->second
<< ") failed: " << port->GetError();
}
}
// Remember the ports and candidates, and signal that candidates are ready.
// The session will handle this, and send an initiate/accept/modify message
// if one is pending.
ports_.push_back(port);
port->SignalUnknownAddress.connect(
this, &P2PTransportChannel::OnUnknownAddress);
port->SignalDestroyed.connect(this, &P2PTransportChannel::OnPortDestroyed);
// Attempt to create a connection from this new port to all of the remote
// candidates that we were given so far.
std::vector<RemoteCandidate>::iterator iter;
for (iter = remote_candidates_.begin(); iter != remote_candidates_.end();
++iter) {
CreateConnection(port, *iter, iter->origin_port(), false);
}
SortConnections();
}
// A new candidate is available, let listeners know
void P2PTransportChannel::OnCandidatesReady(
PortAllocatorSession *session, const std::vector<Candidate>& candidates) {
for (size_t i = 0; i < candidates.size(); ++i) {
SignalCandidateReady(this, candidates[i]);
}
}
// Handle stun packets
void P2PTransportChannel::OnUnknownAddress(
Port *port, const talk_base::SocketAddress &address, StunMessage *stun_msg,
const std::string &remote_username) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Port has received a valid stun packet from an address that no Connection
// is currently available for. See if the remote user name is in the remote
// candidate list. If it isn't return error to the stun request.
const Candidate *candidate = NULL;
std::vector<RemoteCandidate>::iterator it;
for (it = remote_candidates_.begin(); it != remote_candidates_.end(); ++it) {
if ((*it).username() == remote_username) {
candidate = &(*it);
break;
}
}
if (candidate == NULL) {
// Don't know about this username, the request is bogus
// This sometimes happens if a binding response comes in before the ACCEPT
// message. It is totally valid; the retry state machine will try again.
port->SendBindingErrorResponse(stun_msg, address,
STUN_ERROR_STALE_CREDENTIALS, STUN_ERROR_REASON_STALE_CREDENTIALS);
delete stun_msg;
return;
}
// Check for connectivity to this address. Create connections
// to this address across all local ports. First, add this as a new remote
// address
Candidate new_remote_candidate = *candidate;
new_remote_candidate.set_address(address);
// new_remote_candidate.set_protocol(port->protocol());
// This remote username exists. Now create connections using this candidate,
// and resort
if (CreateConnections(new_remote_candidate, port, true)) {
// Send the pinger a successful stun response.
port->SendBindingResponse(stun_msg, address);
// Update the list of connections since we just added another. We do this
// after sending the response since it could (in principle) delete the
// connection in question.
SortConnections();
} else {
// Hopefully this won't occur, because changing a destination address
// shouldn't cause a new connection to fail
ASSERT(false);
port->SendBindingErrorResponse(stun_msg, address, STUN_ERROR_SERVER_ERROR,
STUN_ERROR_REASON_SERVER_ERROR);
}
delete stun_msg;
}
void P2PTransportChannel::OnCandidate(const Candidate& candidate) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Create connections to this remote candidate.
CreateConnections(candidate, NULL, false);
// Resort the connections list, which may have new elements.
SortConnections();
}
// Creates connections from all of the ports that we care about to the given
// remote candidate. The return value is true if we created a connection from
// the origin port.
bool P2PTransportChannel::CreateConnections(const Candidate &remote_candidate,
Port* origin_port,
bool readable) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Add a new connection for this candidate to every port that allows such a
// connection (i.e., if they have compatible protocols) and that does not
// already have a connection to an equivalent candidate. We must be careful
// to make sure that the origin port is included, even if it was pruned,
// since that may be the only port that can create this connection.
bool created = false;
std::vector<Port *>::reverse_iterator it;
for (it = ports_.rbegin(); it != ports_.rend(); ++it) {
if (CreateConnection(*it, remote_candidate, origin_port, readable)) {
if (*it == origin_port)
created = true;
}
}
if ((origin_port != NULL) &&
std::find(ports_.begin(), ports_.end(), origin_port) == ports_.end()) {
if (CreateConnection(origin_port, remote_candidate, origin_port, readable))
created = true;
}
// Remember this remote candidate so that we can add it to future ports.
RememberRemoteCandidate(remote_candidate, origin_port);
return created;
}
// Setup a connection object for the local and remote candidate combination.
// And then listen to connection object for changes.
bool P2PTransportChannel::CreateConnection(Port* port,
const Candidate& remote_candidate,
Port* origin_port,
bool readable) {
// Look for an existing connection with this remote address. If one is not
// found, then we can create a new connection for this address.
Connection* connection = port->GetConnection(remote_candidate.address());
if (connection != NULL) {
// It is not legal to try to change any of the parameters of an existing
// connection; however, the other side can send a duplicate candidate.
if (!remote_candidate.IsEquivalent(connection->remote_candidate())) {
LOG(INFO) << "Attempt to change a remote candidate";
return false;
}
} else {
Port::CandidateOrigin origin = GetOrigin(port, origin_port);
// Don't create connection if this is a candidate we received in a
// message and we are not allowed to make outgoing connections.
if (origin == cricket::Port::ORIGIN_MESSAGE && incoming_only_)
return false;
connection = port->CreateConnection(remote_candidate, origin);
if (!connection)
return false;
connections_.push_back(connection);
connection->SignalReadPacket.connect(
this, &P2PTransportChannel::OnReadPacket);
connection->SignalStateChange.connect(
this, &P2PTransportChannel::OnConnectionStateChange);
connection->SignalDestroyed.connect(
this, &P2PTransportChannel::OnConnectionDestroyed);
LOG_J(LS_INFO, this) << "Created connection with origin=" << origin << ", ("
<< connections_.size() << " total)";
}
// If we are readable, it is because we are creating this in response to a
// ping from the other side. This will cause the state to become readable.
if (readable)
connection->ReceivedPing();
return true;
}
// Maintain our remote candidate list, adding this new remote one.
void P2PTransportChannel::RememberRemoteCandidate(
const Candidate& remote_candidate, Port* origin_port) {
// Remove any candidates whose generation is older than this one. The
// presence of a new generation indicates that the old ones are not useful.
uint32 i = 0;
while (i < remote_candidates_.size()) {
if (remote_candidates_[i].generation() < remote_candidate.generation()) {
LOG(INFO) << "Pruning candidate from old generation: "
<< remote_candidates_[i].address().ToString();
remote_candidates_.erase(remote_candidates_.begin() + i);
} else {
i += 1;
}
}
// Make sure this candidate is not a duplicate.
for (uint32 i = 0; i < remote_candidates_.size(); ++i) {
if (remote_candidates_[i].IsEquivalent(remote_candidate)) {
LOG(INFO) << "Duplicate candidate: "
<< remote_candidate.address().ToString();
return;
}
}
// Try this candidate for all future ports.
remote_candidates_.push_back(RemoteCandidate(remote_candidate, origin_port));
// We have some candidates from the other side, we are now serious about
// this connection. Let's do the StartGetAllPorts thing.
if (!pinging_started_) {
pinging_started_ = true;
for (size_t i = 0; i < allocator_sessions_.size(); ++i) {
if (!allocator_sessions_[i]->IsGettingAllPorts())
allocator_sessions_[i]->StartGetAllPorts();
}
}
}
// Send data to the other side, using our best connection
int P2PTransportChannel::SendPacket(talk_base::Buffer* packet) {
#ifdef PLATFORM_CHROMIUM
if(worker_thread_ != talk_base::Thread::Current()) {
SendPacketParams* params = new SendPacketParams;
packet->TransferTo(&params->packet);
worker_thread_->Post(this, MSG_SENDPACKET, params);
return params->packet.length();
}
#endif
return SendPacket(packet->data(), packet->length());
}
// Send data to the other side, using our best connection
int P2PTransportChannel::SendPacket(const char *data, size_t len) {
// This can get called on any thread that is convenient to write from!
if (best_connection_ == NULL) {
error_ = EWOULDBLOCK;
return SOCKET_ERROR;
}
int sent = best_connection_->Send(data, len);
if (sent <= 0) {
ASSERT(sent < 0);
error_ = best_connection_->GetError();
}
return sent;
}
// Begin allocate (or immediately re-allocate, if MSG_ALLOCATE pending)
void P2PTransportChannel::Allocate() {
CancelPendingAllocate();
// Time for a new allocator, lets make sure we have a signalling channel
// to communicate candidates through first.
waiting_for_signaling_ = true;
SignalRequestSignaling();
}
// Cancels the pending allocate, if any.
void P2PTransportChannel::CancelPendingAllocate() {
thread()->Clear(this, MSG_ALLOCATE);
}
// Monitor connection states
void P2PTransportChannel::UpdateConnectionStates() {
uint32 now = talk_base::Time();
// We need to copy the list of connections since some may delete themselves
// when we call UpdateState.
for (uint32 i = 0; i < connections_.size(); ++i)
connections_[i]->UpdateState(now);
}
// Prepare for best candidate sorting
void P2PTransportChannel::RequestSort() {
if (!sort_dirty_) {
worker_thread_->Post(this, MSG_SORT);
sort_dirty_ = true;
}
}
// Sort the available connections to find the best one. We also monitor
// the number of available connections and the current state so that we
// can possibly kick off more allocators (for more connections).
void P2PTransportChannel::SortConnections() {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Make sure the connection states are up-to-date since this affects how they
// will be sorted.
UpdateConnectionStates();
// Any changes after this point will require a re-sort.
sort_dirty_ = false;
// Get a list of the networks that we are using.
std::set<talk_base::Network*> networks;
for (uint32 i = 0; i < connections_.size(); ++i)
networks.insert(connections_[i]->port()->network());
// Find the best alternative connection by sorting. It is important to note
// that amongst equal preference, writable connections, this will choose the
// one whose estimated latency is lowest. So it is the only one that we
// need to consider switching to.
ConnectionCompare cmp;
std::stable_sort(connections_.begin(), connections_.end(), cmp);
LOG(LS_VERBOSE) << "Sorting available connections:";
for (uint32 i = 0; i < connections_.size(); ++i) {
LOG(LS_VERBOSE) << connections_[i]->ToString();
}
Connection* top_connection = NULL;
if (connections_.size() > 0)
top_connection = connections_[0];
// If necessary, switch to the new choice.
if (ShouldSwitch(best_connection_, top_connection))
SwitchBestConnectionTo(top_connection);
// We can prune any connection for which there is a writable connection on
// the same network with better or equal prefences. We leave those with
// better preference just in case they become writable later (at which point,
// we would prune out the current best connection). We leave connections on
// other networks because they may not be using the same resources and they
// may represent very distinct paths over which we can switch.
std::set<talk_base::Network*>::iterator network;
for (network = networks.begin(); network != networks.end(); ++network) {
Connection* primier = GetBestConnectionOnNetwork(*network);
if (!primier || (primier->write_state() != Connection::STATE_WRITABLE))
continue;
for (uint32 i = 0; i < connections_.size(); ++i) {
if ((connections_[i] != primier) &&
(connections_[i]->port()->network() == *network) &&
(CompareConnectionCandidates(primier, connections_[i]) >= 0)) {
connections_[i]->Prune();
}
}
}
// Count the number of connections in the various states.
int writable = 0;
int write_connect = 0;
int write_timeout = 0;
for (uint32 i = 0; i < connections_.size(); ++i) {
switch (connections_[i]->write_state()) {
case Connection::STATE_WRITABLE:
++writable;
break;
case Connection::STATE_WRITE_CONNECT:
++write_connect;
break;
case Connection::STATE_WRITE_TIMEOUT:
++write_timeout;
break;
default:
ASSERT(false);
}
}
if (writable > 0) {
HandleWritable();
} else if (write_connect > 0) {
HandleNotWritable();
} else {
HandleAllTimedOut();
}
// Update the state of this channel. This method is called whenever the
// state of any connection changes, so this is a good place to do this.
UpdateChannelState();
// Notify of connection state change
SignalConnectionMonitor(this);
}
// Track the best connection, and let listeners know
void P2PTransportChannel::SwitchBestConnectionTo(Connection* conn) {
// Note: if conn is NULL, the previous best_connection_ has been destroyed,
// so don't use it.
// use it.
Connection* old_best_connection = best_connection_;
best_connection_ = conn;
if (best_connection_) {
if (old_best_connection) {
LOG_J(LS_INFO, this) << "Previous best connection: "
<< old_best_connection->ToString();
}
LOG_J(LS_INFO, this) << "New best connection: "
<< best_connection_->ToString();
SignalRouteChange(this, best_connection_->remote_candidate());
} else {
LOG_J(LS_INFO, this) << "No best connection";
}
}
void P2PTransportChannel::UpdateChannelState() {
// The Handle* functions already set the writable state. We'll just double-
// check it here.
bool writable = ((best_connection_ != NULL) &&
(best_connection_->write_state() ==
Connection::STATE_WRITABLE));
ASSERT(writable == this->writable());
if (writable != this->writable())
LOG(LS_ERROR) << "UpdateChannelState: writable state mismatch";
bool readable = false;
for (uint32 i = 0; i < connections_.size(); ++i) {
if (connections_[i]->read_state() == Connection::STATE_READABLE)
readable = true;
}
set_readable(readable);
}
// We checked the status of our connections and we had at least one that
// was writable, go into the writable state.
void P2PTransportChannel::HandleWritable() {
//
// One or more connections writable!
//
if (!writable()) {
for (uint32 i = 0; i < allocator_sessions_.size(); ++i) {
if (allocator_sessions_[i]->IsGettingAllPorts()) {
allocator_sessions_[i]->StopGetAllPorts();
}
}
// Stop further allocations.
CancelPendingAllocate();
}
// We're writable, obviously we aren't timed out
was_writable_ = true;
was_timed_out_ = false;
set_writable(true);
}
// We checked the status of our connections and we didn't have any that
// were writable, go into the connecting state (kick off a new allocator
// session).
void P2PTransportChannel::HandleNotWritable() {
//
// No connections are writable but not timed out!
//
if (was_writable_) {
// If we were writable, let's kick off an allocator session immediately
was_writable_ = false;
Allocate();
}
// We were connecting, obviously not ALL timed out.
was_timed_out_ = false;
set_writable(false);
}
// We checked the status of our connections and not only weren't they writable
// but they were also timed out, we really need a new allocator.
void P2PTransportChannel::HandleAllTimedOut() {
//
// No connections... all are timed out!
//
if (!was_timed_out_) {
// We weren't timed out before, so kick off an allocator now (we'll still
// be in the fully timed out state until the allocator actually gives back
// new ports)
Allocate();
}
// NOTE: we start was_timed_out_ in the true state so that we don't get
// another allocator created WHILE we are in the process of building up
// our first allocator.
was_timed_out_ = true;
was_writable_ = false;
set_writable(false);
}
// If we have a best connection, return it, otherwise return top one in the
// list (later we will mark it best).
Connection* P2PTransportChannel::GetBestConnectionOnNetwork(
talk_base::Network* network) {
// If the best connection is on this network, then it wins.
if (best_connection_ && (best_connection_->port()->network() == network))
return best_connection_;
// Otherwise, we return the top-most in sorted order.
for (uint32 i = 0; i < connections_.size(); ++i) {
if (connections_[i]->port()->network() == network)
return connections_[i];
}
return NULL;
}
// Handle any queued up requests
void P2PTransportChannel::OnMessage(talk_base::Message *pmsg) {
if (pmsg->message_id == MSG_SORT)
OnSort();
else if (pmsg->message_id == MSG_PING)
OnPing();
else if (pmsg->message_id == MSG_ALLOCATE)
Allocate();
#ifdef PLATFORM_CHROMIUM
else if (pmsg->message_id == MSG_SENDPACKET) {
SendPacketParams* data = static_cast<SendPacketParams*>(pmsg->pdata);
SendPacket(&data->packet);
delete data; // because it is Posted
}
#endif
else
ASSERT(false);
}
// Handle queued up sort request
void P2PTransportChannel::OnSort() {
// Resort the connections based on the new statistics.
SortConnections();
}
// Handle queued up ping request
void P2PTransportChannel::OnPing() {
// Make sure the states of the connections are up-to-date (since this affects
// which ones are pingable).
UpdateConnectionStates();
// Find the oldest pingable connection and have it do a ping.
Connection* conn = FindNextPingableConnection();
if (conn)
conn->Ping(talk_base::Time());
// Post ourselves a message to perform the next ping.
uint32 delay = writable() ? WRITABLE_DELAY : UNWRITABLE_DELAY;
thread()->PostDelayed(delay, this, MSG_PING);
}
// Is the connection in a state for us to even consider pinging the other side?
bool P2PTransportChannel::IsPingable(Connection* conn) {
// An unconnected connection cannot be written to at all, so pinging is out
// of the question.
if (!conn->connected())
return false;
if (writable()) {
// If we are writable, then we only want to ping connections that could be
// better than this one, i.e., the ones that were not pruned.
return (conn->write_state() != Connection::STATE_WRITE_TIMEOUT);
} else {
// If we are not writable, then we need to try everything that might work.
// This includes both connections that do not have write timeout as well as
// ones that do not have read timeout. A connection could be readable but
// be in write-timeout if we pruned it before. Since the other side is
// still pinging it, it very well might still work.
return (conn->write_state() != Connection::STATE_WRITE_TIMEOUT) ||
(conn->read_state() != Connection::STATE_READ_TIMEOUT);
}
}
// Returns the next pingable connection to ping. This will be the oldest
// pingable connection unless we have a writable connection that is past the
// maximum acceptable ping delay.
Connection* P2PTransportChannel::FindNextPingableConnection() {
uint32 now = talk_base::Time();
if (best_connection_ &&
(best_connection_->write_state() == Connection::STATE_WRITABLE) &&
(best_connection_->last_ping_sent()
+ MAX_CURRENT_WRITABLE_DELAY <= now)) {
return best_connection_;
}
Connection* oldest_conn = NULL;
uint32 oldest_time = 0xFFFFFFFF;
for (uint32 i = 0; i < connections_.size(); ++i) {
if (IsPingable(connections_[i])) {
if (connections_[i]->last_ping_sent() < oldest_time) {
oldest_time = connections_[i]->last_ping_sent();
oldest_conn = connections_[i];
}
}
}
return oldest_conn;
}
// return the number of "pingable" connections
uint32 P2PTransportChannel::NumPingableConnections() {
uint32 count = 0;
for (uint32 i = 0; i < connections_.size(); ++i) {
if (IsPingable(connections_[i]))
count += 1;
}
return count;
}
// When a connection's state changes, we need to figure out who to use as
// the best connection again. It could have become usable, or become unusable.
void P2PTransportChannel::OnConnectionStateChange(Connection *connection) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// We have to unroll the stack before doing this because we may be changing
// the state of connections while sorting.
RequestSort();
}
// When a connection is removed, edit it out, and then update our best
// connection.
void P2PTransportChannel::OnConnectionDestroyed(Connection *connection) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Note: the previous best_connection_ may be destroyed by now, so don't
// use it.
// Remove this connection from the list.
std::vector<Connection*>::iterator iter =
std::find(connections_.begin(), connections_.end(), connection);
ASSERT(iter != connections_.end());
connections_.erase(iter);
LOG_J(LS_INFO, this) << "Removed connection ("
<< static_cast<int>(connections_.size()) << " remaining)";
// If this is currently the best connection, then we need to pick a new one.
// The call to SortConnections will pick a new one. It looks at the current
// best connection in order to avoid switching between fairly similar ones.
// Since this connection is no longer an option, we can just set best to NULL
// and re-choose a best assuming that there was no best connection.
if (best_connection_ == connection) {
SwitchBestConnectionTo(NULL);
RequestSort();
}
}
// When a port is destroyed remove it from our list of ports to use for
// connection attempts.
void P2PTransportChannel::OnPortDestroyed(Port* port) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Remove this port from the list (if we didn't drop it already).
std::vector<Port*>::iterator iter =
std::find(ports_.begin(), ports_.end(), port);
if (iter != ports_.end())
ports_.erase(iter);
LOG(INFO) << "Removed port from p2p socket: "
<< static_cast<int>(ports_.size()) << " remaining";
}
// We data is available, let listeners know
void P2PTransportChannel::OnReadPacket(Connection *connection,
const char *data, size_t len) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
// Let the client know of an incoming packet
SignalReadPacket(this, data, len);
}
// Set options on ourselves is simply setting options on all of our available
// port objects.
int P2PTransportChannel::SetOption(talk_base::Socket::Option opt, int value) {
OptionMap::iterator it = options_.find(opt);
if (it == options_.end()) {
options_.insert(std::make_pair(opt, value));
} else if (it->second == value) {
return 0;
} else {
it->second = value;
}
for (uint32 i = 0; i < ports_.size(); ++i) {
int val = ports_[i]->SetOption(opt, value);
if (val < 0) {
// Because this also occurs deferred, probably no point in reporting an
// error
LOG(WARNING) << "SetOption(" << opt << ", " << value << ") failed: "
<< ports_[i]->GetError();
}
}
return 0;
}
// When the signalling channel is ready, we can really kick off the allocator
void P2PTransportChannel::OnSignalingReady() {
if (waiting_for_signaling_) {
waiting_for_signaling_ = false;
AddAllocatorSession(allocator_->CreateSession(name(), content_type()));
thread()->PostDelayed(kAllocatePeriod, this, MSG_ALLOCATE);
}
}
} // namespace cricket

View File

@ -1,169 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// P2PTransportChannel wraps up the state management of the connection between
// two P2P clients. Clients have candidate ports for connecting, and
// connections which are combinations of candidates from each end (Alice and
// Bob each have candidates, one candidate from Alice and one candidate from
// Bob are used to make a connection, repeat to make many connections).
//
// When all of the available connections become invalid (non-writable), we
// kick off a process of determining more candidates and more connections.
//
#ifndef TALK_P2P_BASE_P2PTRANSPORTCHANNEL_H_
#define TALK_P2P_BASE_P2PTRANSPORTCHANNEL_H_
#include <map>
#include <vector>
#include <string>
#include "talk/base/sigslot.h"
#include "talk/p2p/base/candidate.h"
#include "talk/p2p/base/port.h"
#include "talk/p2p/base/portallocator.h"
#include "talk/p2p/base/transport.h"
#include "talk/p2p/base/transportchannelimpl.h"
#include "talk/p2p/base/p2ptransport.h"
namespace cricket {
// Adds the port on which the candidate originated.
class RemoteCandidate : public Candidate {
public:
RemoteCandidate(const Candidate& c, Port* origin_port)
: Candidate(c), origin_port_(origin_port) {}
Port* origin_port() { return origin_port_; }
private:
Port* origin_port_;
};
// P2PTransportChannel manages the candidates and connection process to keep
// two P2P clients connected to each other.
class P2PTransportChannel : public TransportChannelImpl,
public talk_base::MessageHandler {
public:
P2PTransportChannel(const std::string &name,
const std::string &content_type,
P2PTransport* transport,
PortAllocator *allocator);
virtual ~P2PTransportChannel();
// From TransportChannelImpl:
virtual Transport* GetTransport() { return transport_; }
virtual void Connect();
virtual void Reset();
virtual void OnSignalingReady();
// From TransportChannel:
virtual int SendPacket(talk_base::Buffer* packet);
virtual int SendPacket(const char *data, size_t len);
virtual int SetOption(talk_base::Socket::Option opt, int value);
virtual int GetError() { return error_; }
// This hack is here to allow the SocketMonitor to downcast to the
// P2PTransportChannel safely.
virtual P2PTransportChannel* GetP2PChannel() { return this; }
// These are used by the connection monitor.
sigslot::signal1<P2PTransportChannel*> SignalConnectionMonitor;
const std::vector<Connection *>& connections() const { return connections_; }
Connection* best_connection() const { return best_connection_; }
void set_incoming_only(bool value) { incoming_only_ = value; }
// Handler for internal messages.
virtual void OnMessage(talk_base::Message *pmsg);
virtual void OnCandidate(const Candidate& candidate);
private:
void Allocate();
void CancelPendingAllocate();
void UpdateConnectionStates();
void RequestSort();
void SortConnections();
void SwitchBestConnectionTo(Connection* conn);
void UpdateChannelState();
void HandleWritable();
void HandleNotWritable();
void HandleAllTimedOut();
Connection* GetBestConnectionOnNetwork(talk_base::Network* network);
bool CreateConnections(const Candidate &remote_candidate, Port* origin_port,
bool readable);
bool CreateConnection(Port* port, const Candidate& remote_candidate,
Port* origin_port, bool readable);
void RememberRemoteCandidate(const Candidate& remote_candidate,
Port* origin_port);
void OnUnknownAddress(Port *port, const talk_base::SocketAddress &addr,
StunMessage *stun_msg,
const std::string &remote_username);
void OnPortReady(PortAllocatorSession *session, Port* port);
void OnCandidatesReady(PortAllocatorSession *session,
const std::vector<Candidate>& candidates);
void OnConnectionStateChange(Connection *connection);
void OnConnectionDestroyed(Connection *connection);
void OnPortDestroyed(Port* port);
void OnReadPacket(Connection *connection, const char *data, size_t len);
void OnSort();
void OnPing();
bool IsPingable(Connection* conn);
Connection* FindNextPingableConnection();
uint32 NumPingableConnections();
PortAllocatorSession* allocator_session() {
return allocator_sessions_.back();
}
void AddAllocatorSession(PortAllocatorSession* session);
talk_base::Thread* thread() const { return worker_thread_; }
P2PTransport* transport_;
PortAllocator *allocator_;
talk_base::Thread *worker_thread_;
bool incoming_only_;
bool waiting_for_signaling_;
int error_;
std::vector<PortAllocatorSession*> allocator_sessions_;
std::vector<Port *> ports_;
std::vector<Connection *> connections_;
Connection *best_connection_;
std::vector<RemoteCandidate> remote_candidates_;
// indicates whether StartGetAllCandidates has been called
bool pinging_started_;
bool sort_dirty_; // indicates whether another sort is needed right now
bool was_writable_;
bool was_timed_out_;
typedef std::map<talk_base::Socket::Option, int> OptionMap;
OptionMap options_;
DISALLOW_EVIL_CONSTRUCTORS(P2PTransportChannel);
};
} // namespace cricket
#endif // TALK_P2P_BASE_P2PTRANSPORTCHANNEL_H_

View File

@ -1,546 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_P2P_BASE_SESSION_H_
#define TALK_P2P_BASE_SESSION_H_
#include <list>
#include <map>
#include <string>
#include <vector>
#include "talk/p2p/base/sessionmessages.h"
#include "talk/p2p/base/sessionmanager.h"
#include "talk/base/socketaddress.h"
#include "talk/p2p/base/sessionclient.h"
#include "talk/p2p/base/parsing.h"
#include "talk/p2p/base/port.h"
#include "talk/xmllite/xmlelement.h"
#include "talk/xmpp/constants.h"
namespace cricket {
class P2PTransportChannel;
class Transport;
class TransportChannel;
class TransportChannelProxy;
class TransportChannelImpl;
// Used for errors that will send back a specific error message to the
// remote peer. We add "type" to the errors because it's needed for
// SignalErrorMessage.
struct MessageError : ParseError {
buzz::QName type;
// if unset, assume type is a parse error
MessageError() : ParseError(), type(buzz::QN_STANZA_BAD_REQUEST) {}
void SetType(const buzz::QName type) {
this->type = type;
}
};
// Used for errors that may be returned by public session methods that
// can fail.
// TODO: Use this error in Session::Initiate and
// Session::Accept.
struct SessionError : WriteError {
};
// Bundles a Transport and ChannelMap together. ChannelMap is used to
// create transport channels before receiving or sending a session
// initiate, and for speculatively connecting channels. Previously, a
// session had one ChannelMap and transport. Now, with multiple
// transports per session, we need multiple ChannelMaps as well.
class TransportProxy {
public:
TransportProxy(const std::string& content_name, Transport* transport)
: content_name_(content_name),
transport_(transport),
state_(STATE_INIT),
sent_candidates_(false) {}
~TransportProxy();
std::string content_name() const { return content_name_; }
Transport* impl() const { return transport_; }
std::string type() const;
bool negotiated() const { return state_ == STATE_NEGOTIATED; }
const Candidates& sent_candidates() const { return sent_candidates_; }
TransportChannel* GetChannel(const std::string& name);
TransportChannel* CreateChannel(const std::string& name,
const std::string& content_type);
void DestroyChannel(const std::string& name);
void AddSentCandidates(const Candidates& candidates);
void ClearSentCandidates() { sent_candidates_.clear(); }
void SpeculativelyConnectChannels();
void CompleteNegotiation();
private:
enum TransportState {
STATE_INIT,
STATE_CONNECTING,
STATE_NEGOTIATED
};
typedef std::map<std::string, TransportChannelProxy*> ChannelMap;
TransportChannelProxy* GetProxy(const std::string& name);
TransportChannelImpl* GetOrCreateImpl(const std::string& name,
const std::string& content_type);
void SetProxyImpl(const std::string& name, TransportChannelProxy* proxy);
std::string content_name_;
Transport* transport_;
TransportState state_;
ChannelMap channels_;
Candidates sent_candidates_;
};
typedef std::map<std::string, TransportProxy*> TransportMap;
// TODO: Consider simplifying the dependency from Voice/VideoChannel
// on Session. Right now the Channel class requires a BaseSession, but it only
// uses CreateChannel/DestroyChannel. Perhaps something like a
// TransportChannelFactory could be hoisted up out of BaseSession, or maybe
// the transports could be passed in directly.
// A BaseSession manages general session state. This includes negotiation
// of both the application-level and network-level protocols: the former
// defines what will be sent and the latter defines how it will be sent. Each
// network-level protocol is represented by a Transport object. Each Transport
// participates in the network-level negotiation. The individual streams of
// packets are represented by TransportChannels. The application-level protocol
// is represented by SessionDecription objects.
class BaseSession : public sigslot::has_slots<>,
public talk_base::MessageHandler {
public:
enum State {
STATE_INIT = 0,
STATE_SENTINITIATE, // sent initiate, waiting for Accept or Reject
STATE_RECEIVEDINITIATE, // received an initiate. Call Accept or Reject
STATE_SENTACCEPT, // sent accept. begin connecting transport
STATE_RECEIVEDACCEPT, // received accept. begin connecting transport
STATE_SENTMODIFY, // sent modify, waiting for Accept or Reject
STATE_RECEIVEDMODIFY, // received modify, call Accept or Reject
STATE_SENTREJECT, // sent reject after receiving initiate
STATE_RECEIVEDREJECT, // received reject after sending initiate
STATE_SENTREDIRECT, // sent direct after receiving initiate
STATE_SENTTERMINATE, // sent terminate (any time / either side)
STATE_RECEIVEDTERMINATE, // received terminate (any time / either side)
STATE_INPROGRESS, // session accepted and in progress
STATE_DEINIT, // session is being destroyed
};
enum Error {
ERROR_NONE = 0, // no error
ERROR_TIME = 1, // no response to signaling
ERROR_RESPONSE = 2, // error during signaling
ERROR_NETWORK = 3, // network error, could not allocate network resources
ERROR_CONTENT = 4, // channel errors in SetLocalContent/SetRemoteContent
};
explicit BaseSession(talk_base::Thread *signaling_thread);
virtual ~BaseSession();
// Updates the state, signaling if necessary.
void SetState(State state);
// Updates the error state, signaling if necessary.
virtual void SetError(Error error);
// Handles messages posted to us.
virtual void OnMessage(talk_base::Message *pmsg);
// Returns the current state of the session. See the enum above for details.
// Each time the state changes, we will fire this signal.
State state() const { return state_; }
sigslot::signal2<BaseSession *, State> SignalState;
// Returns the last error in the session. See the enum above for details.
// Each time the an error occurs, we will fire this signal.
Error error() const { return error_; }
sigslot::signal2<BaseSession *, Error> SignalError;
sigslot::signal1<TransportChannel*> SignalWritableState;
sigslot::signal3<TransportChannel*, const char*, size_t> SignalReadPacket;
// Creates a new channel with the given names. This method may be called
// immediately after creating the session. However, the actual
// implementation may not be fixed until transport negotiation completes.
// This will usually be called from the worker thread, but that
// shouldn't be an issue since the main thread will be blocked in
// Send when doing so.
virtual TransportChannel* CreateChannel(const std::string& content_name,
const std::string& channel_name) = 0;
// Returns the channel with the given names.
virtual TransportChannel* GetChannel(const std::string& content_name,
const std::string& channel_name) = 0;
// Destroys the channel with the given names.
// This will usually be called from the worker thread, but that
// shouldn't be an issue since the main thread will be blocked in
// Send when doing so.
virtual void DestroyChannel(const std::string& content_name,
const std::string& channel_name) = 0;
// Invoked when we notice that there is no matching channel on our peer.
sigslot::signal2<Session*, const std::string&> SignalChannelGone;
// Returns the application-level description given by our client.
// If we are the recipient, this will be NULL until we send an accept.
const SessionDescription* local_description() const {
return local_description_;
}
// Takes ownership of SessionDescription*
bool set_local_description(const SessionDescription* sdesc) {
if (sdesc != local_description_) {
delete local_description_;
local_description_ = sdesc;
}
return true;
}
// Returns the application-level description given by the other client.
// If we are the initiator, this will be NULL until we receive an accept.
const SessionDescription* remote_description() const {
return remote_description_;
}
// Takes ownership of SessionDescription*
bool set_remote_description(const SessionDescription* sdesc) {
if (sdesc != remote_description_) {
delete remote_description_;
remote_description_ = sdesc;
}
return true;
}
// When we receive an initiate, we create a session in the
// RECEIVEDINITIATE state and respond by accepting or rejecting.
// Takes ownership of session description.
virtual bool Accept(const SessionDescription* sdesc) = 0;
virtual bool Reject(const std::string& reason) = 0;
bool Terminate() {
return TerminateWithReason(STR_TERMINATE_SUCCESS);
}
virtual bool TerminateWithReason(const std::string& reason) = 0;
// The worker thread used by the session manager
virtual talk_base::Thread *worker_thread() = 0;
talk_base::Thread *signaling_thread() {
return signaling_thread_;
}
// Returns the JID of this client.
const std::string& local_name() const { return local_name_; }
// Returns the JID of the other peer in this session.
const std::string& remote_name() const { return remote_name_; }
// Set the JID of the other peer in this session.
// Typically the remote_name_ is set when the session is initiated.
// However, sometimes (e.g when a proxy is used) the peer name is
// known after the BaseSession has been initiated and it must be updated
// explicitly.
void set_remote_name(const std::string& name) { remote_name_ = name; }
const std::string& id() const { return sid_; }
protected:
State state_;
Error error_;
const SessionDescription* local_description_;
const SessionDescription* remote_description_;
std::string sid_;
// We don't use buzz::Jid because changing to buzz:Jid here has a
// cascading effect that requires an enormous number places to
// change to buzz::Jid as well.
std::string local_name_;
std::string remote_name_;
talk_base::Thread *signaling_thread_;
};
// A specific Session created by the SessionManager, using XMPP for protocol.
class Session : public BaseSession {
public:
// Returns the manager that created and owns this session.
SessionManager* session_manager() const { return session_manager_; }
// the worker thread used by the session manager
talk_base::Thread *worker_thread() {
return session_manager_->worker_thread();
}
// Returns the XML namespace identifying the type of this session.
const std::string& content_type() const { return content_type_; }
// Returns the client that is handling the application data of this session.
SessionClient* client() const { return client_; }
SignalingProtocol current_protocol() const { return current_protocol_; }
void set_current_protocol(SignalingProtocol protocol) {
current_protocol_ = protocol;
}
// Indicates whether we initiated this session.
bool initiator() const { return initiator_; }
const SessionDescription* initiator_description() const {
if (initiator_) {
return local_description_;
} else {
return remote_description_;
}
}
// Fired whenever we receive a terminate message along with a reason
sigslot::signal2<Session*, const std::string&> SignalReceivedTerminateReason;
void set_allow_local_ips(bool allow);
// Returns the transport that has been negotiated or NULL if
// negotiation is still in progress.
Transport* GetTransport(const std::string& content_name);
// Takes ownership of session description.
// TODO: Add an error argument to pass back to the caller.
bool Initiate(const std::string& to,
const SessionDescription* sdesc);
// When we receive an initiate, we create a session in the
// RECEIVEDINITIATE state and respond by accepting or rejecting.
// Takes ownership of session description.
// TODO: Add an error argument to pass back to the caller.
virtual bool Accept(const SessionDescription* sdesc);
virtual bool Reject(const std::string& reason);
virtual bool TerminateWithReason(const std::string& reason);
// The two clients in the session may also send one another
// arbitrary XML messages, which are called "info" messages. Sending
// takes ownership of the given elements. The signal does not; the
// parent element will be deleted after the signal.
bool SendInfoMessage(const XmlElements& elems);
sigslot::signal2<Session*, const buzz::XmlElement*> SignalInfoMessage;
// Maps passed to serialization functions.
TransportParserMap GetTransportParsers();
ContentParserMap GetContentParsers();
// Creates a new channel with the given names. This method may be called
// immediately after creating the session. However, the actual
// implementation may not be fixed until transport negotiation completes.
virtual TransportChannel* CreateChannel(const std::string& content_name,
const std::string& channel_name);
// Returns the channel with the given names.
virtual TransportChannel* GetChannel(const std::string& content_name,
const std::string& channel_name);
// Destroys the channel with the given names.
virtual void DestroyChannel(const std::string& content_name,
const std::string& channel_name);
// Updates the error state, signaling if necessary.
virtual void SetError(Error error);
// Handles messages posted to us.
virtual void OnMessage(talk_base::Message *pmsg);
private:
// Creates or destroys a session. (These are called only SessionManager.)
Session(SessionManager *session_manager,
const std::string& local_name, const std::string& initiator_name,
const std::string& sid, const std::string& content_type,
SessionClient* client);
~Session();
// Get a TransportProxy by content_name or transport. NULL if not found.
TransportProxy* GetTransportProxy(const std::string& content_name);
TransportProxy* GetTransportProxy(const Transport* transport);
TransportProxy* GetFirstTransportProxy();
// TransportProxy is owned by session. Return proxy just for convenience.
TransportProxy* GetOrCreateTransportProxy(const std::string& content_name);
// For each transport info, create a transport proxy. Can fail for
// incompatible transport types.
bool CreateTransportProxies(const TransportInfos& tinfos,
SessionError* error);
void SpeculativelyConnectAllTransportChannels();
bool OnRemoteCandidates(const TransportInfos& tinfos,
ParseError* error);
// Returns a TransportInfo without candidates for each content name.
// Uses the transport_type_ of the session.
TransportInfos GetEmptyTransportInfos(const ContentInfos& contents) const;
// Called when the first channel of a transport begins connecting. We use
// this to start a timer, to make sure that the connection completes in a
// reasonable amount of time.
void OnTransportConnecting(Transport* transport);
// Called when a transport changes its writable state. We track this to make
// sure that the transport becomes writable within a reasonable amount of
// time. If this does not occur, we signal an error.
void OnTransportWritable(Transport* transport);
// Called when a transport requests signaling.
void OnTransportRequestSignaling(Transport* transport);
// Called when a transport signals that it has a message to send. Note that
// these messages are just the transport part of the stanza; they need to be
// wrapped in the appropriate session tags.
void OnTransportCandidatesReady(Transport* transport,
const Candidates& candidates);
// Called when a transport signals that it found an error in an incoming
// message.
void OnTransportSendError(Transport* transport,
const buzz::XmlElement* stanza,
const buzz::QName& name,
const std::string& type,
const std::string& text,
const buzz::XmlElement* extra_info);
// Called when we notice that one of our local channels has no peer, so it
// should be destroyed.
void OnTransportChannelGone(Transport* transport, const std::string& name);
// When the session needs to send signaling messages, it beings by requesting
// signaling. The client should handle this by calling OnSignalingReady once
// it is ready to send the messages.
// (These are called only by SessionManager.)
sigslot::signal1<Session*> SignalRequestSignaling;
void OnSignalingReady();
// Send various kinds of session messages.
bool SendInitiateMessage(const SessionDescription* sdesc,
SessionError* error);
bool SendAcceptMessage(const SessionDescription* sdesc, SessionError* error);
bool SendRejectMessage(const std::string& reason, SessionError* error);
bool SendTerminateMessage(const std::string& reason, SessionError* error);
bool SendTransportInfoMessage(const TransportInfo& tinfo,
SessionError* error);
bool ResendAllTransportInfoMessages(SessionError* error);
// Both versions of SendMessage send a message of the given type to
// the other client. Can pass either a set of elements or an
// "action", which must have a WriteSessionAction method to go along
// with it. Sending with an action supports sending a "hybrid"
// message. Sending with elements must be sent as Jingle or Gingle.
// When passing elems, must be either Jingle or Gingle protocol.
// Takes ownership of action_elems.
bool SendMessage(ActionType type, const XmlElements& action_elems,
SessionError* error);
// When passing an action, may be Hybrid protocol.
template <typename Action>
bool SendMessage(ActionType type, const Action& action,
SessionError* error);
// Helper methods to write the session message stanza.
template <typename Action>
bool WriteActionMessage(ActionType type, const Action& action,
buzz::XmlElement* stanza, WriteError* error);
template <typename Action>
bool WriteActionMessage(SignalingProtocol protocol,
ActionType type, const Action& action,
buzz::XmlElement* stanza, WriteError* error);
// Sending messages in hybrid form requires being able to write them
// on a per-protocol basis with a common method signature, which all
// of these have.
bool WriteSessionAction(SignalingProtocol protocol,
const SessionInitiate& init,
XmlElements* elems, WriteError* error);
bool WriteSessionAction(SignalingProtocol protocol,
const TransportInfo& tinfo,
XmlElements* elems, WriteError* error);
bool WriteSessionAction(SignalingProtocol protocol,
const SessionTerminate& term,
XmlElements* elems, WriteError* error);
// Sends a message back to the other client indicating that we have received
// and accepted their message.
void SendAcknowledgementMessage(const buzz::XmlElement* stanza);
// Once signaling is ready, the session will use this signal to request the
// sending of each message. When messages are received by the other client,
// they should be handed to OnIncomingMessage.
// (These are called only by SessionManager.)
sigslot::signal2<Session *, const buzz::XmlElement*> SignalOutgoingMessage;
void OnIncomingMessage(const SessionMessage& msg);
void OnFailedSend(const buzz::XmlElement* orig_stanza,
const buzz::XmlElement* error_stanza);
// Invoked when an error is found in an incoming message. This is translated
// into the appropriate XMPP response by SessionManager.
sigslot::signal6<BaseSession*,
const buzz::XmlElement*,
const buzz::QName&,
const std::string&,
const std::string&,
const buzz::XmlElement*> SignalErrorMessage;
// Handlers for the various types of messages. These functions may take
// pointers to the whole stanza or to just the session element.
bool OnInitiateMessage(const SessionMessage& msg, MessageError* error);
bool OnAcceptMessage(const SessionMessage& msg, MessageError* error);
bool OnRejectMessage(const SessionMessage& msg, MessageError* error);
bool OnInfoMessage(const SessionMessage& msg);
bool OnTerminateMessage(const SessionMessage& msg, MessageError* error);
bool OnTransportInfoMessage(const SessionMessage& msg, MessageError* error);
bool OnTransportAcceptMessage(const SessionMessage& msg, MessageError* error);
bool OnUpdateMessage(const SessionMessage& msg, MessageError* error);
bool OnRedirectError(const SessionRedirect& redirect, SessionError* error);
// Verifies that we are in the appropriate state to receive this message.
bool CheckState(State state, MessageError* error);
SessionManager *session_manager_;
bool initiator_;
std::string initiator_name_;
std::string content_type_;
SessionClient* client_;
std::string transport_type_;
TransportParser* transport_parser_;
// This is transport-specific but required so much by unit tests
// that it's much easier to put it here.
bool allow_local_ips_;
TransportMap transports_;
// Keeps track of what protocol we are speaking.
SignalingProtocol current_protocol_;
friend class SessionManager; // For access to constructor, destructor,
// and signaling related methods.
};
} // namespace cricket
#endif // TALK_P2P_BASE_SESSION_H_

View File

@ -1,114 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_P2P_BASE_TRANSPORTCHANNEL_H_
#define TALK_P2P_BASE_TRANSPORTCHANNEL_H_
#include <string>
#include "talk/base/basictypes.h"
#include "talk/base/sigslot.h"
#include "talk/base/socket.h"
namespace talk_base {
class Buffer;
}
namespace cricket {
class Candidate;
class P2PTransportChannel;
// A TransportChannel represents one logical stream of packets that are sent
// between the two sides of a session.
class TransportChannel: public sigslot::has_slots<> {
public:
TransportChannel(const std::string& name, const std::string &content_type)
: name_(name), content_type_(content_type),
readable_(false), writable_(false) {}
virtual ~TransportChannel() {}
// Returns the name of this channel.
const std::string& name() const { return name_; }
const std::string& content_type() const { return content_type_; }
// Returns the readable and states of this channel. Each time one of these
// states changes, a signal is raised. These states are aggregated by the
// TransportManager.
bool readable() const { return readable_; }
bool writable() const { return writable_; }
sigslot::signal1<TransportChannel*> SignalReadableState;
sigslot::signal1<TransportChannel*> SignalWritableState;
virtual int SendPacket(talk_base::Buffer* packet) = 0;
// Attempts to send the given packet. The return value is < 0 on failure.
virtual int SendPacket(const char *data, size_t len) = 0;
// Sets a socket option on this channel. Note that not all options are
// supported by all transport types.
virtual int SetOption(talk_base::Socket::Option opt, int value) = 0;
// Returns the most recent error that occurred on this channel.
virtual int GetError() = 0;
// This hack is here to allow the SocketMonitor to downcast to the
// P2PTransportChannel safely.
// TODO: Generalize network monitoring.
virtual P2PTransportChannel* GetP2PChannel() { return NULL; }
// Signalled each time a packet is received on this channel.
sigslot::signal3<TransportChannel*, const char*, size_t> SignalReadPacket;
// This signal occurs when there is a change in the way that packets are
// being routed, i.e. to a different remote location. The candidate
// indicates where and how we are currently sending media.
sigslot::signal2<TransportChannel*, const Candidate&> SignalRouteChange;
// Invoked when the channel is being destroyed.
sigslot::signal1<TransportChannel*> SignalDestroyed;
// Debugging description of this transport channel.
std::string ToString() const;
protected:
// Sets the readable state, signaling if necessary.
void set_readable(bool readable);
// Sets the writable state, signaling if necessary.
void set_writable(bool writable);
private:
std::string name_;
std::string content_type_;
bool readable_;
bool writable_;
DISALLOW_EVIL_CONSTRUCTORS(TransportChannel);
};
} // namespace cricket
#endif // TALK_P2P_BASE_TRANSPORTCHANNEL_H_

View File

@ -1,112 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/p2p/base/transportchannelproxy.h"
#include "talk/base/common.h"
#include "talk/p2p/base/transport.h"
#include "talk/p2p/base/transportchannelimpl.h"
namespace cricket {
TransportChannelProxy::TransportChannelProxy(const std::string& name,
const std::string& content_type)
: TransportChannel(name, content_type), impl_(NULL) {
}
TransportChannelProxy::~TransportChannelProxy() {
if (impl_)
impl_->GetTransport()->DestroyChannel(impl_->name());
}
void TransportChannelProxy::SetImplementation(TransportChannelImpl* impl) {
impl_ = impl;
impl_->SignalReadableState.connect(
this, &TransportChannelProxy::OnReadableState);
impl_->SignalWritableState.connect(
this, &TransportChannelProxy::OnWritableState);
impl_->SignalReadPacket.connect(this, &TransportChannelProxy::OnReadPacket);
impl_->SignalRouteChange.connect(this, &TransportChannelProxy::OnRouteChange);
for (OptionList::iterator it = pending_options_.begin();
it != pending_options_.end();
++it) {
impl_->SetOption(it->first, it->second);
}
pending_options_.clear();
}
int TransportChannelProxy::SendPacket(talk_base::Buffer* packet) {
// Fail if we don't have an impl yet.
return (impl_) ? impl_->SendPacket(packet) : -1;
}
int TransportChannelProxy::SendPacket(const char *data, size_t len) {
// Fail if we don't have an impl yet.
return (impl_) ? impl_->SendPacket(data, len) : -1;
}
int TransportChannelProxy::SetOption(talk_base::Socket::Option opt, int value) {
if (impl_)
return impl_->SetOption(opt, value);
pending_options_.push_back(OptionPair(opt, value));
return 0;
}
int TransportChannelProxy::GetError() {
ASSERT(impl_ != NULL); // should not be used until channel is writable
return impl_->GetError();
}
P2PTransportChannel* TransportChannelProxy::GetP2PChannel() {
if (impl_) {
return impl_->GetP2PChannel();
}
return NULL;
}
void TransportChannelProxy::OnReadableState(TransportChannel* channel) {
ASSERT(channel == impl_);
set_readable(impl_->readable());
}
void TransportChannelProxy::OnWritableState(TransportChannel* channel) {
ASSERT(channel == impl_);
set_writable(impl_->writable());
}
void TransportChannelProxy::OnReadPacket(TransportChannel* channel,
const char* data, size_t size) {
ASSERT(channel == impl_);
SignalReadPacket(this, data, size);
}
void TransportChannelProxy::OnRouteChange(TransportChannel* channel,
const Candidate& candidate) {
ASSERT(channel == impl_);
SignalRouteChange(this, candidate);
}
} // namespace cricket

View File

@ -1,84 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_P2P_BASE_TRANSPORTCHANNELPROXY_H_
#define TALK_P2P_BASE_TRANSPORTCHANNELPROXY_H_
#include <string>
#include <vector>
#include "talk/p2p/base/transportchannel.h"
namespace talk_base {
class Buffer;
}
namespace cricket {
class TransportChannelImpl;
// Proxies calls between the client and the transport channel implementation.
// This is needed because clients are allowed to create channels before the
// network negotiation is complete. Hence, we create a proxy up front, and
// when negotiation completes, connect the proxy to the implementaiton.
class TransportChannelProxy: public TransportChannel {
public:
TransportChannelProxy(const std::string& name,
const std::string& content_type);
virtual ~TransportChannelProxy();
TransportChannelImpl* impl() { return impl_; }
// Sets the implementation to which we will proxy.
void SetImplementation(TransportChannelImpl* impl);
// Implementation of the TransportChannel interface. These simply forward to
// the implementation.
virtual int SendPacket(talk_base::Buffer* packet);
virtual int SendPacket(const char *data, size_t len);
virtual int SetOption(talk_base::Socket::Option opt, int value);
virtual int GetError();
virtual P2PTransportChannel* GetP2PChannel();
private:
typedef std::pair<talk_base::Socket::Option, int> OptionPair;
typedef std::vector<OptionPair> OptionList;
TransportChannelImpl* impl_;
OptionList pending_options_;
// Catch signals from the implementation channel. These just forward to the
// client (after updating our state to match).
void OnReadableState(TransportChannel* channel);
void OnWritableState(TransportChannel* channel);
void OnReadPacket(TransportChannel* channel, const char* data, size_t size);
void OnRouteChange(TransportChannel* channel, const Candidate& candidate);
DISALLOW_EVIL_CONSTRUCTORS(TransportChannelProxy);
};
} // namespace cricket
#endif // TALK_P2P_BASE_TRANSPORTCHANNELPROXY_H_

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,25 +31,15 @@
#include <atlbase.h>
#include <dbt.h>
#include <strmif.h> // must come before ks.h
#include <mmsystem.h>
#include <ks.h>
#include <ksmedia.h>
#define INITGUID // For PKEY_AudioEndpoint_GUID
#include <mmdeviceapi.h>
#include <MMSystem.h>
#include <functiondiscoverykeys_devpkey.h>
#include <uuids.h>
#include "talk/base/win32.h" // ToUtf8
#include "talk/base/win32window.h"
// PKEY_AudioEndpoint_GUID isn't included in uuid.lib and we don't want
// to define INITGUID in order to define all the uuids in this object file
// as it will conflict with uuid.lib (multiply defined symbols).
// So our workaround is to define this one missing symbol here manually.
EXTERN_C const PROPERTYKEY PKEY_AudioEndpoint_GUID = { {
0x1da5d803, 0xd492, 0x4edd, {
0x8c, 0x23, 0xe0, 0xc0, 0xff, 0xee, 0x7f, 0x0e
} }, 4
};
#elif OSX
#include <CoreAudio/CoreAudio.h>
#include <QuickTime/QuickTime.h>
@ -79,14 +69,7 @@ namespace cricket {
// Initialize to empty string.
const std::string DeviceManager::kDefaultDeviceName;
#ifdef PLATFORM_CHROMIUM
class DeviceWatcher {
public:
explicit DeviceWatcher(DeviceManager* dm);
bool Start();
void Stop();
};
#elif defined(WIN32)
#ifdef WIN32
class DeviceWatcher : public talk_base::Win32Window {
public:
explicit DeviceWatcher(DeviceManager* dm);
@ -135,8 +118,11 @@ class DeviceWatcher {
};
#endif
#if defined(CHROMEOS)
static bool ShouldAudioDeviceBeIgnored(const std::string& device_name);
#endif
#if !defined(LINUX) && !defined(IOS)
static bool ShouldDeviceBeIgnored(const std::string& device_name);
static bool ShouldVideoDeviceBeIgnored(const std::string& device_name);
#endif
#ifndef OSX
static bool GetVideoDevices(std::vector<Device>* out);
@ -180,7 +166,7 @@ DeviceManager::~DeviceManager() {
bool DeviceManager::Init() {
if (!initialized_) {
#if defined(WIN32) && !defined(PLATFORM_CHROMIUM)
#if defined(WIN32)
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
need_couninitialize_ = SUCCEEDED(hr);
if (FAILED(hr)) {
@ -201,7 +187,7 @@ bool DeviceManager::Init() {
void DeviceManager::Terminate() {
if (initialized_) {
watcher_->Stop();
#if defined(WIN32) && !defined(PLATFORM_CHROMIUM)
#if defined(WIN32)
if (need_couninitialize_) {
CoUninitialize();
need_couninitialize_ = false;
@ -244,16 +230,13 @@ bool DeviceManager::GetAudioOutputDevice(const std::string& name, Device* out) {
#ifdef OSX
static bool FilterDevice(const Device& d) {
return ShouldDeviceBeIgnored(d.name);
return ShouldVideoDeviceBeIgnored(d.name);
}
#endif
bool DeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
devices->clear();
#ifdef PLATFORM_CHROMIUM
devices->push_back(Device("", -1));
return true;
#elif OSX
#ifdef OSX
if (GetQTKitVideoDevices(devices)) {
// Now filter out any known incompatible devices
devices->erase(remove_if(devices->begin(), devices->end(), FilterDevice),
@ -268,10 +251,7 @@ bool DeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
bool DeviceManager::GetDefaultVideoCaptureDevice(Device* device) {
bool ret = false;
#ifdef PLATFORM_CHROMIUM
*device = Device("", -1);
ret = true;
#elif WIN32
#if WIN32
// If there are multiple capture devices, we want the first USB one.
// This avoids issues with defaulting to virtual cameras or grabber cards.
std::vector<Device> devices;
@ -309,10 +289,6 @@ bool DeviceManager::GetVideoCaptureDevice(const std::string& name,
return false;
}
#ifdef PLATFORM_CHROMIUM
*out = Device(name, name);
return true;
#else
for (std::vector<Device>::const_iterator it = devices.begin();
it != devices.end(); ++it) {
if (name == it->name) {
@ -320,7 +296,6 @@ bool DeviceManager::GetVideoCaptureDevice(const std::string& name,
return true;
}
}
#endif
return false;
}
@ -352,10 +327,7 @@ bool DeviceManager::GetAudioDevice(bool is_input, const std::string& name,
bool DeviceManager::GetAudioDevicesByPlatform(bool input,
std::vector<Device>* devs) {
devs->clear();
#ifdef PLATFORM_CHROMIUM
devs->push_back(Device("", -1));
return true;
#elif defined(LINUX_SOUND_USED)
#if defined(LINUX_SOUND_USED)
if (!sound_system_.get()) {
return false;
}
@ -378,7 +350,14 @@ bool DeviceManager::GetAudioDevicesByPlatform(bool input,
for (SoundSystemInterface::SoundDeviceLocatorList::iterator i = list.begin();
i != list.end();
++i, ++index) {
devs->push_back(Device((*i)->name(), index));
#if defined(CHROMEOS)
// On ChromeOS, we ignore ALSA surround and S/PDIF devices.
if (!ShouldAudioDeviceBeIgnored((*i)->device_name())) {
#endif
devs->push_back(Device((*i)->name(), index));
#if defined(CHROMEOS)
}
#endif
}
SoundSystemInterface::ClearSoundDeviceLocatorList(&list);
sound_system_.release();
@ -409,18 +388,7 @@ bool DeviceManager::GetAudioDevicesByPlatform(bool input,
#endif
}
#if defined(PLATFORM_CHROMIUM)
DeviceWatcher::DeviceWatcher(DeviceManager* manager) {
}
bool DeviceWatcher::Start() {
return true;
}
void DeviceWatcher::Stop() {
}
#elif defined(WIN32)
#if defined(WIN32)
bool GetVideoDevices(std::vector<Device>* devices) {
return GetDevices(CLSID_VideoInputDeviceCategory, devices);
}
@ -452,7 +420,7 @@ bool GetDevices(const CLSID& catid, std::vector<Device>* devices) {
if (SUCCEEDED(bag->Read(kFriendlyName, &name, 0)) &&
name.vt == VT_BSTR) {
name_str = talk_base::ToUtf8(name.bstrVal);
if (!ShouldDeviceBeIgnored(name_str)) {
if (!ShouldVideoDeviceBeIgnored(name_str)) {
// Get the device id if one exists.
if (SUCCEEDED(bag->Read(kDevicePath, &path, 0)) &&
path.vt == VT_BSTR) {
@ -999,11 +967,32 @@ bool DeviceWatcher::IsDescriptorClosed() {
#endif
#if defined(CHROMEOS)
// Checks if we want to ignore this audio device.
static bool ShouldAudioDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredAudioDevicesName[] = {
"surround40:",
"surround41:",
"surround50:",
"surround51:",
"surround71:",
"iec958:" // S/PDIF
};
for (int i = 0; i < ARRAY_SIZE(kFilteredAudioDevicesName); ++i) {
if (0 == device_name.find(kFilteredAudioDevicesName[i])) {
LOG(LS_INFO) << "Ignoring device " << device_name;
return true;
}
}
return false;
}
#endif
// TODO: Try to get hold of a copy of Final Cut to understand why we
// crash while scanning their components on OS X.
#if !defined(LINUX) && !defined(IOS)
static bool ShouldDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredDevices[] = {
static bool ShouldVideoDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredVideoDevicesName[] = {
"Google Camera Adapter", // Our own magiccams
#ifdef WIN32
"Asus virtual Camera", // Bad Asus desktop virtual cam
@ -1014,9 +1003,9 @@ static bool ShouldDeviceBeIgnored(const std::string& device_name) {
#endif
};
for (int i = 0; i < ARRAY_SIZE(kFilteredDevices); ++i) {
if (strnicmp(device_name.c_str(), kFilteredDevices[i],
strlen(kFilteredDevices[i])) == 0) {
for (int i = 0; i < ARRAY_SIZE(kFilteredVideoDevicesName); ++i) {
if (strnicmp(device_name.c_str(), kFilteredVideoDevicesName[i],
strlen(kFilteredVideoDevicesName[i])) == 0) {
LOG(LS_INFO) << "Ignoring device " << device_name;
return true;
}

View File

@ -42,8 +42,7 @@ namespace cricket {
class DeviceWatcher;
// Used to represent an audio or video capture or render device.
class Device {
public:
struct Device {
Device() {}
Device(const std::string& first, int second)
: name(first),

View File

@ -1,221 +0,0 @@
// libjingle
// Copyright 2004--2005, Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef TALK_SESSION_PHONE_FILEMEDIAENGINE_H_
#define TALK_SESSION_PHONE_FILEMEDIAENGINE_H_
#include <string>
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/mediachannel.h"
#include "talk/session/phone/mediaengine.h"
namespace talk_base {
class StreamInterface;
}
namespace cricket {
// A media engine contains a capturer, an encoder, and a sender in the sender
// side and a receiver, a decoder, and a renderer in the receiver side.
// FileMediaEngine simulates the capturer and the encoder via an input RTP dump
// stream and simulates the decoder and the renderer via an output RTP dump
// stream. Depending on the parameters of the constructor, FileMediaEngine can
// act as file voice engine, file video engine, or both. Currently, we use
// only the RTP dump packets. TODO: Enable RTCP packets.
class FileMediaEngine : public MediaEngine {
public:
FileMediaEngine() {}
virtual ~FileMediaEngine() {}
// Set the file name of the input or output RTP dump for voice or video.
// Should be called before the channel is created.
void set_voice_input_filename(const std::string& filename) {
voice_input_filename_ = filename;
}
void set_voice_output_filename(const std::string& filename) {
voice_output_filename_ = filename;
}
void set_video_input_filename(const std::string& filename) {
video_input_filename_ = filename;
}
void set_video_output_filename(const std::string& filename) {
video_output_filename_ = filename;
}
// Should be called before codecs() and video_codecs() are called. We need to
// set the voice and video codecs; otherwise, Jingle initiation will fail.
void set_voice_codecs(const std::vector<AudioCodec>& codecs) {
voice_codecs_ = codecs;
}
void set_video_codecs(const std::vector<VideoCodec>& codecs) {
video_codecs_ = codecs;
}
// Implement pure virtual methods of MediaEngine.
virtual bool Init() { return true; }
virtual void Terminate() {}
virtual int GetCapabilities();
virtual VoiceMediaChannel* CreateChannel();
virtual VideoMediaChannel* CreateVideoChannel(VoiceMediaChannel* voice_ch);
virtual SoundclipMedia* CreateSoundclip() { return NULL; }
virtual bool SetAudioOptions(int options) { return true; }
virtual bool SetVideoOptions(int options) { return true; }
virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
return true;
}
virtual bool SetSoundDevices(const Device* in_dev, const Device* out_dev) {
return true;
}
virtual bool SetVideoCaptureDevice(const Device* cam_device) { return true; }
virtual bool GetOutputVolume(int* level) { *level = 0; return true; }
virtual bool SetOutputVolume(int level) { return true; }
virtual int GetInputLevel() { return 0; }
virtual bool SetLocalMonitor(bool enable) { return true; }
virtual bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
// TODO: control channel send?
virtual CaptureResult SetVideoCapture(bool capture) { return CR_SUCCESS; }
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_codecs_;
}
virtual const std::vector<VideoCodec>& video_codecs() {
return video_codecs_;
}
virtual bool FindAudioCodec(const AudioCodec& codec) { return true; }
virtual bool FindVideoCodec(const VideoCodec& codec) { return true; }
virtual void SetVoiceLogging(int min_sev, const char* filter) {}
virtual void SetVideoLogging(int min_sev, const char* filter) {}
private:
std::string voice_input_filename_;
std::string voice_output_filename_;
std::string video_input_filename_;
std::string video_output_filename_;
std::vector<AudioCodec> voice_codecs_;
std::vector<VideoCodec> video_codecs_;
DISALLOW_COPY_AND_ASSIGN(FileMediaEngine);
};
class RtpSenderReceiver; // Forward declaration. Defined in the .cc file.
class FileVoiceChannel : public VoiceMediaChannel {
public:
FileVoiceChannel(const std::string& in_file, const std::string& out_file);
virtual ~FileVoiceChannel();
// Implement pure virtual methods of VoiceMediaChannel.
virtual bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
return true;
}
virtual bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetPlayout(bool playout) { return true; }
virtual bool SetSend(SendFlags flag);
virtual bool AddStream(uint32 ssrc) { return true; }
virtual bool RemoveStream(uint32 ssrc) { return true; }
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) { return true; }
virtual int GetOutputLevel() { return 0; }
virtual bool SetRingbackTone(const char* buf, int len) { return true; }
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop) {
return true;
}
virtual bool PressDTMF(int event, bool playout) { return true; }
virtual bool GetStats(VoiceMediaInfo* info) { return true; }
// Implement pure virtual methods of MediaChannel.
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet) {}
virtual void SetSendSsrc(uint32 id) {} // TODO: change RTP packet?
virtual bool SetRtcpCName(const std::string& cname) { return true; }
virtual bool Mute(bool on) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps) { return true; }
virtual bool SetOptions(int options) { return true; }
virtual int GetMediaChannelId() { return -1; }
private:
talk_base::scoped_ptr<RtpSenderReceiver> rtp_sender_receiver_;
DISALLOW_COPY_AND_ASSIGN(FileVoiceChannel);
};
class FileVideoChannel : public VideoMediaChannel {
public:
FileVideoChannel(const std::string& in_file, const std::string& out_file);
virtual ~FileVideoChannel();
// Implement pure virtual methods of VideoMediaChannel.
virtual bool SetRecvCodecs(const std::vector<VideoCodec>& codecs) {
return true;
}
virtual bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetRender(bool render) { return true; }
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc) { return true; }
virtual bool RemoveStream(uint32 ssrc) { return true; }
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer) {
return true;
}
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer) {
return true;
}
virtual bool GetStats(VideoMediaInfo* info) { return true; }
virtual bool SendIntraFrame() { return false; }
virtual bool RequestIntraFrame() { return false; }
// Implement pure virtual methods of MediaChannel.
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet) {}
virtual void SetSendSsrc(uint32 id) {} // TODO: change RTP packet?
virtual bool SetRtcpCName(const std::string& cname) { return true; }
virtual bool Mute(bool on) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps) { return true; }
virtual bool SetOptions(int options) { return true; }
virtual int GetMediaChannelId() { return -1; }
private:
talk_base::scoped_ptr<RtpSenderReceiver> rtp_sender_receiver_;
DISALLOW_COPY_AND_ASSIGN(FileVideoChannel);
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_FILEMEDIAENGINE_H_

View File

@ -1,501 +0,0 @@
/*
* libjingle
* Copyright 2004--2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIACHANNEL_H_
#define TALK_SESSION_PHONE_MEDIACHANNEL_H_
#include <string>
#include <vector>
#include "talk/base/basictypes.h"
#include "talk/base/sigslot.h"
#include "talk/base/socket.h"
#include "talk/session/phone/codec.h"
// TODO: re-evaluate this include
#include "talk/session/phone/audiomonitor.h"
namespace talk_base {
class Buffer;
}
namespace flute {
class MagicCamVideoRenderer;
}
namespace cricket {
const int kMinRtpHeaderExtensionId = 1;
const int kMaxRtpHeaderExtensionId = 255;
struct RtpHeaderExtension {
RtpHeaderExtension(const std::string& u, int i) : uri(u), id(i) {}
std::string uri;
int id;
// TODO: SendRecv direction;
};
enum VoiceMediaChannelOptions {
OPT_CONFERENCE = 0x10000, // tune the audio stream for conference mode
};
enum VideoMediaChannelOptions {
OPT_INTERPOLATE = 0x10000 // Increase the output framerate by 2x by
// interpolating frames
};
class MediaChannel : public sigslot::has_slots<> {
public:
class NetworkInterface {
public:
enum SocketType { ST_RTP, ST_RTCP };
virtual bool SendPacket(talk_base::Buffer* packet) = 0;
virtual bool SendRtcp(talk_base::Buffer* packet) = 0;
virtual int SetOption(SocketType type, talk_base::Socket::Option opt,
int option) = 0;
virtual ~NetworkInterface() {}
};
MediaChannel() : network_interface_(NULL) {}
virtual ~MediaChannel() {}
// Gets/sets the abstract inteface class for sending RTP/RTCP data.
NetworkInterface *network_interface() { return network_interface_; }
virtual void SetInterface(NetworkInterface *iface) {
network_interface_ = iface;
}
// Called when a RTP packet is received.
virtual void OnPacketReceived(talk_base::Buffer* packet) = 0;
// Called when a RTCP packet is received.
virtual void OnRtcpReceived(talk_base::Buffer* packet) = 0;
// Sets the SSRC to be used for outgoing data.
virtual void SetSendSsrc(uint32 id) = 0;
// Set the CNAME of RTCP
virtual bool SetRtcpCName(const std::string& cname) = 0;
// Mutes the channel.
virtual bool Mute(bool on) = 0;
// Sets the RTP extension headers and IDs to use when sending RTP.
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
// Sets the rate control to use when sending data.
virtual bool SetSendBandwidth(bool autobw, int bps) = 0;
// Sets the media options to use.
virtual bool SetOptions(int options) = 0;
// Gets the Rtc channel id
virtual int GetMediaChannelId() = 0;
protected:
NetworkInterface *network_interface_;
};
enum SendFlags {
SEND_NOTHING,
SEND_RINGBACKTONE,
SEND_MICROPHONE
};
struct VoiceSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int rtt_ms;
int jitter_ms;
int audio_level;
};
struct VoiceReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
int packets_rcvd;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int jitter_ms;
int jitter_buffer_ms;
int jitter_buffer_preferred_ms;
int delay_estimate_ms;
int audio_level;
};
struct VideoSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_cached;
int packets_lost;
float fraction_lost;
int firs_rcvd;
int nacks_rcvd;
int rtt_ms;
int frame_width;
int frame_height;
int framerate_input;
int framerate_sent;
int nominal_bitrate;
int preferred_bitrate;
};
struct VideoReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
// vector<int> layer_bytes_rcvd;
int packets_rcvd;
int packets_lost;
int packets_concealed;
float fraction_lost;
int firs_sent;
int nacks_sent;
int frame_width;
int frame_height;
int framerate_rcvd;
int framerate_decoded;
int framerate_output;
};
struct BandwidthEstimationInfo {
int available_send_bandwidth;
int available_recv_bandwidth;
int target_enc_bitrate;
int actual_enc_bitrate;
int retransmit_bitrate;
int transmit_bitrate;
int bucket_delay;
};
struct VoiceMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
}
std::vector<VoiceSenderInfo> senders;
std::vector<VoiceReceiverInfo> receivers;
};
struct VideoMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
bw_estimations.clear();
}
std::vector<VideoSenderInfo> senders;
std::vector<VideoReceiverInfo> receivers;
std::vector<BandwidthEstimationInfo> bw_estimations;
};
class VoiceMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open mic.
ERROR_REC_DEVICE_MUTED, // Mic was muted by OS.
ERROR_REC_DEVICE_SILENT, // No background noise picked up.
ERROR_REC_DEVICE_SATURATION, // Mic input is clipping.
ERROR_REC_DEVICE_REMOVED, // Mic was removed while active.
ERROR_REC_RUNTIME_ERROR, // Processing is encountering errors.
ERROR_REC_SRTP_ERROR, // Generic SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_REC_TYPING_NOISE_DETECTED, // Typing noise is detected.
ERROR_PLAY_DEVICE_OPEN_FAILED = 200, // Could not open playout.
ERROR_PLAY_DEVICE_MUTED, // Playout muted by OS.
ERROR_PLAY_DEVICE_REMOVED, // Playout removed while active.
ERROR_PLAY_RUNTIME_ERROR, // Errors in voice processing.
ERROR_PLAY_SRTP_ERROR, // Generic SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VoiceMediaChannel() {}
virtual ~VoiceMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Starts or stops playout of received audio.
virtual bool SetPlayout(bool playout) = 0;
// Starts or stops sending (and potentially capture) of local audio.
virtual bool SetSend(SendFlags flag) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Gets current energy levels for all incoming streams.
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) = 0;
// Get the current energy level for the outgoing stream.
virtual int GetOutputLevel() = 0;
// Specifies a ringback tone to be played during call setup.
virtual bool SetRingbackTone(const char *buf, int len) = 0;
// Plays or stops the aforementioned ringback tone
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop) = 0;
// Sends a out-of-band DTMF signal using the specified event.
virtual bool PressDTMF(int event, bool playout) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VoiceMediaInfo* info) = 0;
// Gets last reported error for this media channel.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error) {
ASSERT(error != NULL);
*error = ERROR_NONE;
}
// Signal errors from MediaChannel. Arguments are:
// ssrc(uint32), and error(VoiceMediaChannel::Error).
sigslot::signal2<uint32, VoiceMediaChannel::Error> SignalMediaError;
};
// Represents a YUV420 (a.k.a. I420) video frame.
class VideoFrame {
friend class flute::MagicCamVideoRenderer;
public:
VideoFrame() : rendered_(false) {}
virtual ~VideoFrame() {}
virtual size_t GetWidth() const = 0;
virtual size_t GetHeight() const = 0;
virtual const uint8 *GetYPlane() const = 0;
virtual const uint8 *GetUPlane() const = 0;
virtual const uint8 *GetVPlane() const = 0;
virtual uint8 *GetYPlane() = 0;
virtual uint8 *GetUPlane() = 0;
virtual uint8 *GetVPlane() = 0;
virtual int32 GetYPitch() const = 0;
virtual int32 GetUPitch() const = 0;
virtual int32 GetVPitch() const = 0;
// For retrieving the aspect ratio of each pixel. Usually this is 1x1, but
// the aspect_ratio_idc parameter of H.264 can specify non-square pixels.
virtual size_t GetPixelWidth() const = 0;
virtual size_t GetPixelHeight() const = 0;
// TODO: Add a fourcc format here and probably combine VideoFrame
// with CapturedFrame.
virtual int64 GetElapsedTime() const = 0;
virtual int64 GetTimeStamp() const = 0;
virtual void SetElapsedTime(int64 elapsed_time) = 0;
virtual void SetTimeStamp(int64 time_stamp) = 0;
// Make a copy of the frame. The frame buffer itself may not be copied,
// in which case both the current and new VideoFrame will share a single
// reference-counted frame buffer.
virtual VideoFrame *Copy() const = 0;
// Writes the frame into the given frame buffer, provided that it is of
// sufficient size. Returns the frame's actual size, regardless of whether
// it was written or not (like snprintf). If there is insufficient space,
// nothing is written.
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const = 0;
// Converts the I420 data to RGB of a certain type such as ARGB and ABGR.
// Returns the frame's actual size, regardless of whether it was written or
// not (like snprintf). Parameters size and pitch_rgb are in units of bytes.
// If there is insufficient space, nothing is written.
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const = 0;
// Writes the frame into the given planes, stretched to the given width and
// height. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the given dimensions before stretching.
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const = 0;
// Writes the frame into the given frame buffer, stretched to the given width
// and height, provided that it is of sufficient size. Returns the frame's
// actual size, regardless of whether it was written or not (like snprintf).
// If there is insufficient space, nothing is written. The parameter
// "interpolate" controls whether to interpolate or just take the
// nearest-point. The parameter "crop" controls whether to crop this frame to
// the aspect ratio of the given dimensions before stretching.
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const = 0;
// Writes the frame into the target VideoFrame, stretched to the size of that
// frame. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the target frame before stretching.
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const = 0;
// Stretches the frame to the given size, creating a new VideoFrame object to
// hold it. The parameter "interpolate" controls whether to interpolate or
// just take the nearest-point. The parameter "crop" controls whether to crop
// this frame to the aspect ratio of the given dimensions before stretching.
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const = 0;
// Size of an I420 image of given dimensions when stored as a frame buffer.
static size_t SizeOf(size_t w, size_t h) {
return w * h + ((w + 1) / 2) * ((h + 1) / 2) * 2;
}
protected:
// The frame needs to be rendered to magiccam only once.
// TODO: Remove this flag once magiccam rendering is fully replaced
// by client3d rendering.
mutable bool rendered_;
};
// Simple subclass for use in mocks.
class NullVideoFrame : public VideoFrame {
public:
virtual size_t GetWidth() const { return 0; }
virtual size_t GetHeight() const { return 0; }
virtual const uint8 *GetYPlane() const { return NULL; }
virtual const uint8 *GetUPlane() const { return NULL; }
virtual const uint8 *GetVPlane() const { return NULL; }
virtual uint8 *GetYPlane() { return NULL; }
virtual uint8 *GetUPlane() { return NULL; }
virtual uint8 *GetVPlane() { return NULL; }
virtual int32 GetYPitch() const { return 0; }
virtual int32 GetUPitch() const { return 0; }
virtual int32 GetVPitch() const { return 0; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return 0; }
virtual int64 GetTimeStamp() const { return 0; }
virtual void SetElapsedTime(int64 elapsed_time) {}
virtual void SetTimeStamp(int64 time_stamp) {}
virtual VideoFrame *Copy() const {
return NULL;
}
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const {
return 0;
}
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const {
return 0;
}
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const {
}
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const {
return 0;
}
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const {
}
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const {
return NULL;
}
};
// Abstract interface for rendering VideoFrames.
class VideoRenderer {
public:
virtual ~VideoRenderer() {}
// Called when the video has changed size.
virtual bool SetSize(int width, int height, int reserved) = 0;
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) = 0;
};
// Simple implementation for use in tests.
class NullVideoRenderer : public VideoRenderer {
virtual bool SetSize(int width, int height, int reserved) {
return true;
}
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) {
return true;
}
};
class VideoMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open camera.
ERROR_REC_DEVICE_NO_DEVICE, // No camera.
ERROR_REC_DEVICE_IN_USE, // Device is in already use.
ERROR_REC_DEVICE_REMOVED, // Device is removed.
ERROR_REC_SRTP_ERROR, // Generic sender SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_ERROR = 200, // Generic receiver SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VideoMediaChannel() { renderer_ = NULL; }
virtual ~VideoMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Starts or stops playout of received video.
virtual bool SetRender(bool render) = 0;
// Starts or stops transmission (and potentially capture) of local video.
virtual bool SetSend(bool send) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VideoMediaInfo* info) = 0;
// Send an intra frame to the receivers.
virtual bool SendIntraFrame() = 0;
// Reuqest each of the remote senders to send an intra frame.
virtual bool RequestIntraFrame() = 0;
sigslot::signal2<uint32, Error> SignalMediaError;
protected:
VideoRenderer *renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIACHANNEL_H_

View File

@ -1,58 +0,0 @@
//
// libjingle
// Copyright 2004--2007, Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifdef HAVE_WEBRTC
#include "talk/app/voicemediaengine.h"
#include "talk/app/videomediaengine.h"
#endif
#include "talk/session/phone/mediaengine.h"
#ifdef HAVE_LINPHONE
#include "talk/session/phone/linphonemediaengine.h"
#endif
namespace cricket {
#ifdef HAVE_WEBRTC
template<>
CompositeMediaEngine<webrtc::RtcVoiceEngine, webrtc::RtcVideoEngine>
::CompositeMediaEngine() : video_(&voice_) {
}
MediaEngine* MediaEngine::Create() {
return new CompositeMediaEngine<webrtc::RtcVoiceEngine,
webrtc::RtcVideoEngine>();
}
#else
MediaEngine* MediaEngine::Create() {
#ifdef HAVE_LINPHONE
return new LinphoneMediaEngine("", "");
#else
return new NullMediaEngine();
#endif
}
#endif
}; // namespace cricket

View File

@ -1,242 +0,0 @@
/*
* libjingle
* Copyright 2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/mediamessages.h"
#include "talk/base/stringencode.h"
#include "talk/p2p/base/constants.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/xmllite/xmlelement.h"
namespace cricket {
const NamedSource* GetFirstSourceByNick(const NamedSources& sources,
const std::string& nick) {
for (NamedSources::const_iterator source = sources.begin();
source != sources.end(); ++source) {
if (source->nick == nick) {
return &*source;
}
}
return NULL;
}
const NamedSource* GetSourceBySsrc(const NamedSources& sources, uint32 ssrc) {
for (NamedSources::const_iterator source = sources.begin();
source != sources.end(); ++source) {
if (source->ssrc == ssrc) {
return &*source;
}
}
return NULL;
}
const NamedSource* MediaSources::GetFirstAudioSourceByNick(
const std::string& nick) {
return GetFirstSourceByNick(audio, nick);
}
const NamedSource* MediaSources::GetFirstVideoSourceByNick(
const std::string& nick) {
return GetFirstSourceByNick(video, nick);
}
const NamedSource* MediaSources::GetAudioSourceBySsrc(uint32 ssrc) {
return GetSourceBySsrc(audio, ssrc);
}
const NamedSource* MediaSources::GetVideoSourceBySsrc(uint32 ssrc) {
return GetSourceBySsrc(video, ssrc);
}
// NOTE: There is no check here for duplicate sources, so check before
// adding.
void AddSource(NamedSources* sources, const NamedSource& source) {
sources->push_back(source);
}
void MediaSources::AddAudioSource(const NamedSource& source) {
AddSource(&audio, source);
}
void MediaSources::AddVideoSource(const NamedSource& source) {
AddSource(&video, source);
}
void RemoveSourceBySsrc(NamedSources* sources, uint32 ssrc) {
for (NamedSources::iterator source = sources->begin();
source != sources->end(); ) {
if (source->ssrc == ssrc) {
source = sources->erase(source);
} else {
++source;
}
}
}
void MediaSources::RemoveAudioSourceBySsrc(uint32 ssrc) {
RemoveSourceBySsrc(&audio, ssrc);
}
void MediaSources::RemoveVideoSourceBySsrc(uint32 ssrc) {
RemoveSourceBySsrc(&video, ssrc);
}
bool ParseSsrc(const std::string& string, uint32* ssrc) {
return talk_base::FromString(string, ssrc);
}
bool ParseSsrc(const buzz::XmlElement* element, uint32* ssrc) {
if (element == NULL) {
return false;
}
return ParseSsrc(element->BodyText(), ssrc);
}
bool ParseNamedSource(const buzz::XmlElement* source_elem,
NamedSource* named_source,
ParseError* error) {
named_source->nick = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_NICK);
if (named_source->nick.empty()) {
return BadParse("Missing or invalid nick.", error);
}
named_source->name = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_NAME);
named_source->usage = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_USAGE);
named_source->removed =
(STR_JINGLE_DRAFT_SOURCE_STATE_REMOVED ==
source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_STATE));
const buzz::XmlElement* ssrc_elem =
source_elem->FirstNamed(QN_JINGLE_DRAFT_SOURCE_SSRC);
if (ssrc_elem != NULL && !ssrc_elem->BodyText().empty()) {
uint32 ssrc;
if (!ParseSsrc(ssrc_elem->BodyText(), &ssrc)) {
return BadParse("Missing or invalid ssrc.", error);
}
named_source->SetSsrc(ssrc);
}
return true;
}
bool IsSourcesNotify(const buzz::XmlElement* action_elem) {
return action_elem->FirstNamed(QN_JINGLE_DRAFT_NOTIFY) != NULL;
}
bool ParseSourcesNotify(const buzz::XmlElement* action_elem,
const SessionDescription* session_description,
MediaSources* sources,
ParseError* error) {
for (const buzz::XmlElement* notify_elem
= action_elem->FirstNamed(QN_JINGLE_DRAFT_NOTIFY);
notify_elem != NULL;
notify_elem = notify_elem->NextNamed(QN_JINGLE_DRAFT_NOTIFY)) {
std::string content_name = notify_elem->Attr(QN_JINGLE_DRAFT_CONTENT_NAME);
for (const buzz::XmlElement* source_elem
= notify_elem->FirstNamed(QN_JINGLE_DRAFT_SOURCE);
source_elem != NULL;
source_elem = source_elem->NextNamed(QN_JINGLE_DRAFT_SOURCE)) {
NamedSource named_source;
if (!ParseNamedSource(source_elem, &named_source, error)) {
return false;
}
if (session_description == NULL) {
return BadParse("unknown content name: " + content_name, error);
}
const ContentInfo* content =
FindContentInfoByName(session_description->contents(), content_name);
if (content == NULL) {
return BadParse("unknown content name: " + content_name, error);
}
if (IsAudioContent(content)) {
sources->audio.push_back(named_source);
} else if (IsVideoContent(content)) {
sources->video.push_back(named_source);
}
}
}
return true;
}
buzz::XmlElement* CreateViewElem(const std::string& name,
const std::string& type) {
buzz::XmlElement* view_elem =
new buzz::XmlElement(QN_JINGLE_DRAFT_VIEW, true);
view_elem->AddAttr(QN_JINGLE_DRAFT_CONTENT_NAME, name);
view_elem->SetAttr(QN_JINGLE_DRAFT_VIEW_TYPE, type);
return view_elem;
}
buzz::XmlElement* CreateVideoViewElem(const std::string& content_name,
const std::string& type) {
return CreateViewElem(content_name, type);
}
buzz::XmlElement* CreateNoneVideoViewElem(const std::string& content_name) {
return CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_NONE);
}
buzz::XmlElement* CreateStaticVideoViewElem(const std::string& content_name,
const StaticVideoView& view) {
buzz::XmlElement* view_elem =
CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_STATIC);
AddXmlAttr(view_elem, QN_JINGLE_DRAFT_VIEW_SSRC, view.ssrc);
buzz::XmlElement* params_elem = new buzz::XmlElement(
QN_JINGLE_DRAFT_VIEW_PARAMS);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_WIDTH, view.width);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_HEIGHT, view.height);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_FRAMERATE,
view.framerate);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_PREFERENCE,
view.preference);
view_elem->AddElement(params_elem);
return view_elem;
}
bool WriteViewRequest(const std::string& content_name,
const ViewRequest& request,
XmlElements* elems,
WriteError* error) {
if (request.static_video_views.size() == 0) {
elems->push_back(CreateNoneVideoViewElem(content_name));
} else {
for (StaticVideoViews::const_iterator view =
request.static_video_views.begin();
view != request.static_video_views.end(); ++view) {
elems->push_back(CreateStaticVideoViewElem(content_name, *view));
}
}
return true;
}
} // namespace cricket

View File

@ -1,106 +0,0 @@
/*
* libjingle
* Copyright 2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIAMESSAGES_H_
#define TALK_SESSION_PHONE_MEDIAMESSAGES_H_
#include <string>
#include <vector>
#include "talk/base/basictypes.h"
#include "talk/p2p/base/parsing.h"
#include "talk/p2p/base/sessiondescription.h"
namespace cricket {
struct NamedSource {
NamedSource() : ssrc(0), ssrc_set(false), removed(false) {}
void SetSsrc(uint32 ssrc) {
this->ssrc = ssrc;
this->ssrc_set = true;
}
std::string nick;
std::string name;
std::string usage;
uint32 ssrc;
bool ssrc_set;
bool removed;
};
typedef std::vector<NamedSource> NamedSources;
class MediaSources {
public:
const NamedSource* GetAudioSourceBySsrc(uint32 ssrc);
const NamedSource* GetVideoSourceBySsrc(uint32 ssrc);
// TODO: Remove once all senders use excplict remove by ssrc.
const NamedSource* GetFirstAudioSourceByNick(const std::string& nick);
const NamedSource* GetFirstVideoSourceByNick(const std::string& nick);
void AddAudioSource(const NamedSource& source);
void AddVideoSource(const NamedSource& source);
void RemoveAudioSourceBySsrc(uint32 ssrc);
void RemoveVideoSourceBySsrc(uint32 ssrc);
NamedSources audio;
NamedSources video;
};
struct StaticVideoView {
StaticVideoView(uint32 ssrc, int width, int height, int framerate)
: ssrc(ssrc),
width(width),
height(height),
framerate(framerate),
preference(0) {}
uint32 ssrc;
int width;
int height;
int framerate;
int preference;
};
typedef std::vector<StaticVideoView> StaticVideoViews;
struct ViewRequest {
StaticVideoViews static_video_views;
};
bool WriteViewRequest(const std::string& content_name,
const ViewRequest& view,
XmlElements* elems,
WriteError* error);
bool IsSourcesNotify(const buzz::XmlElement* action_elem);
// The session_description is needed to map content_name => media type.
bool ParseSourcesNotify(const buzz::XmlElement* action_elem,
const SessionDescription* session_description,
MediaSources* sources,
ParseError* error);
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIAMESSAGES_H_

View File

@ -1,289 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_
#define TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_
#include <string>
#include <vector>
#include <map>
#include <algorithm>
#include "talk/session/phone/call.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/cryptoparams.h"
#include "talk/base/sigslot.h"
#include "talk/base/sigslotrepeater.h"
#include "talk/base/messagequeue.h"
#include "talk/base/thread.h"
#include "talk/p2p/base/sessionmanager.h"
#include "talk/p2p/base/session.h"
#include "talk/p2p/base/sessionclient.h"
#include "talk/p2p/base/sessiondescription.h"
namespace cricket {
class Call;
class SessionDescription;
typedef std::vector<AudioCodec> AudioCodecs;
typedef std::vector<VideoCodec> VideoCodecs;
// SEC_ENABLED and SEC_REQUIRED should only be used if the session
// was negotiated over TLS, to protect the inline crypto material
// exchange.
// SEC_DISABLED: No crypto in outgoing offer and answer. Fail any
// offer with crypto required.
// SEC_ENABLED: Crypto in outgoing offer and answer. Fail any offer
// with unsupported required crypto. Crypto set but not
// required in outgoing offer.
// SEC_REQUIRED: Crypto in outgoing offer and answer with
// required='true'. Fail any offer with no or
// unsupported crypto (implicit crypto required='true'
// in the offer.)
enum SecureMediaPolicy {SEC_DISABLED, SEC_ENABLED, SEC_REQUIRED};
const int kAutoBandwidth = -1;
struct CallOptions {
CallOptions() :
is_video(false),
is_muc(false),
video_bandwidth(kAutoBandwidth) {
}
bool is_video;
bool is_muc;
// bps. -1 == auto.
int video_bandwidth;
};
class MediaSessionClient: public SessionClient, public sigslot::has_slots<> {
public:
MediaSessionClient(const buzz::Jid& jid, SessionManager *manager);
// Alternative constructor, allowing injection of media_engine
// and device_manager.
MediaSessionClient(const buzz::Jid& jid, SessionManager *manager,
MediaEngine* media_engine, DeviceManager* device_manager);
~MediaSessionClient();
const buzz::Jid &jid() const { return jid_; }
SessionManager* session_manager() const { return session_manager_; }
ChannelManager* channel_manager() const { return channel_manager_; }
int GetCapabilities() { return channel_manager_->GetCapabilities(); }
Call *CreateCall();
void DestroyCall(Call *call);
Call *GetFocus();
void SetFocus(Call *call);
void JoinCalls(Call *call_to_join, Call *call);
bool GetAudioInputDevices(std::vector<std::string>* names) {
return channel_manager_->GetAudioInputDevices(names);
}
bool GetAudioOutputDevices(std::vector<std::string>* names) {
return channel_manager_->GetAudioOutputDevices(names);
}
bool GetVideoCaptureDevices(std::vector<std::string>* names) {
return channel_manager_->GetVideoCaptureDevices(names);
}
bool SetAudioOptions(const std::string& in_name, const std::string& out_name,
int opts) {
return channel_manager_->SetAudioOptions(in_name, out_name, opts);
}
bool SetOutputVolume(int level) {
return channel_manager_->SetOutputVolume(level);
}
bool SetVideoOptions(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device);
}
sigslot::signal2<Call *, Call *> SignalFocus;
sigslot::signal1<Call *> SignalCallCreate;
sigslot::signal1<Call *> SignalCallDestroy;
sigslot::repeater0<> SignalDevicesChange;
SessionDescription* CreateOffer(const CallOptions& options);
SessionDescription* CreateAnswer(const SessionDescription* offer,
const CallOptions& options);
SecureMediaPolicy secure() const { return secure_; }
void set_secure(SecureMediaPolicy s) { secure_ = s; }
private:
void Construct();
void OnSessionCreate(Session *session, bool received_initiate);
void OnSessionState(BaseSession *session, BaseSession::State state);
void OnSessionDestroy(Session *session);
virtual bool ParseContent(SignalingProtocol protocol,
const buzz::XmlElement* elem,
const ContentDescription** content,
ParseError* error);
virtual bool WriteContent(SignalingProtocol protocol,
const ContentDescription* content,
buzz::XmlElement** elem,
WriteError* error);
Session *CreateSession(Call *call);
buzz::Jid jid_;
SessionManager* session_manager_;
Call *focus_call_;
ChannelManager *channel_manager_;
std::map<uint32, Call *> calls_;
std::map<std::string, Call *> session_map_;
SecureMediaPolicy secure_;
friend class Call;
};
enum MediaType {
MEDIA_TYPE_AUDIO,
MEDIA_TYPE_VIDEO
};
class MediaContentDescription : public ContentDescription {
public:
MediaContentDescription()
: ssrc_(0),
ssrc_set_(false),
rtcp_mux_(false),
bandwidth_(kAutoBandwidth),
crypto_required_(false),
rtp_header_extensions_set_(false) {
}
virtual MediaType type() const = 0;
uint32 ssrc() const { return ssrc_; }
bool ssrc_set() const { return ssrc_set_; }
void set_ssrc(uint32 ssrc) {
ssrc_ = ssrc;
ssrc_set_ = true;
}
bool rtcp_mux() const { return rtcp_mux_; }
void set_rtcp_mux(bool mux) { rtcp_mux_ = mux; }
int bandwidth() const { return bandwidth_; }
void set_bandwidth(int bandwidth) { bandwidth_ = bandwidth; }
const std::vector<CryptoParams>& cryptos() const { return cryptos_; }
void AddCrypto(const CryptoParams& params) {
cryptos_.push_back(params);
}
bool crypto_required() const { return crypto_required_; }
void set_crypto_required(bool crypto) {
crypto_required_ = crypto;
}
const std::vector<RtpHeaderExtension>& rtp_header_extensions() const {
return rtp_header_extensions_;
}
void AddRtpHeaderExtension(const RtpHeaderExtension& ext) {
rtp_header_extensions_.push_back(ext);
rtp_header_extensions_set_ = true;
}
void ClearRtpHeaderExtensions() {
rtp_header_extensions_.clear();
rtp_header_extensions_set_ = true;
}
// We can't always tell if an empty list of header extensions is
// because the other side doesn't support them, or just isn't hooked up to
// signal them. For now we assume an empty list means no signaling, but
// provide the ClearRtpHeaderExtensions method to allow "no support" to be
// clearly indicated (i.e. when derived from other information).
bool rtp_header_extensions_set() const {
return rtp_header_extensions_set_;
}
protected:
uint32 ssrc_;
bool ssrc_set_;
bool rtcp_mux_;
int bandwidth_;
std::vector<CryptoParams> cryptos_;
bool crypto_required_;
std::vector<RtpHeaderExtension> rtp_header_extensions_;
bool rtp_header_extensions_set_;
};
template <class C>
class MediaContentDescriptionImpl : public MediaContentDescription {
public:
struct PreferenceSort {
bool operator()(C a, C b) { return a.preference > b.preference; }
};
const std::vector<C>& codecs() const { return codecs_; }
void AddCodec(const C& codec) {
codecs_.push_back(codec);
}
void SortCodecs() {
std::sort(codecs_.begin(), codecs_.end(), PreferenceSort());
}
private:
std::vector<C> codecs_;
};
class AudioContentDescription : public MediaContentDescriptionImpl<AudioCodec> {
public:
AudioContentDescription() :
conference_mode_(false) {}
virtual MediaType type() const { return MEDIA_TYPE_AUDIO; }
bool conference_mode() const { return conference_mode_; }
void set_conference_mode(bool enable) {
conference_mode_ = enable;
}
const std::string &lang() const { return lang_; }
void set_lang(const std::string &lang) { lang_ = lang; }
private:
bool conference_mode_;
std::string lang_;
};
class VideoContentDescription : public MediaContentDescriptionImpl<VideoCodec> {
public:
virtual MediaType type() const { return MEDIA_TYPE_VIDEO; }
};
// Convenience functions.
bool IsAudioContent(const ContentInfo* content);
bool IsVideoContent(const ContentInfo* content);
const ContentInfo* GetFirstAudioContent(const SessionDescription* sdesc);
const ContentInfo* GetFirstVideoContent(const SessionDescription* sdesc);
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_

View File

@ -0,0 +1,660 @@
/*
* libjingle
* Copyright 2009, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// talk's config.h, generated from mac_config_dot_h for OSX, conflicts with the
// one included by the libsrtp headers. Don't use it. Instead, we keep HAVE_SRTP
// and LOGGING defined in config.h.
#undef HAVE_CONFIG_H
#ifdef OSX
// TODO: For the XCode build, we force SRTP (b/2500074)
#ifndef HAVE_SRTP
#define HAVE_SRTP 1
#endif // HAVE_SRTP
// If LOGGING is not defined, define it to 1 (b/3245816)
#ifndef LOGGING
#define LOGGING 1
#endif // HAVE_SRTP
#endif
#include "talk/session/phone/srtpfilter.h"
#include <algorithm>
#include <cstring>
#include "talk/base/base64.h"
#include "talk/base/logging.h"
#include "talk/base/time.h"
#include "talk/session/phone/rtputils.h"
// Enable this line to turn on SRTP debugging
// #define SRTP_DEBUG
#ifdef HAVE_SRTP
#ifdef SRTP_RELATIVE_PATH
#include "srtp.h" // NOLINT
#else
#include "third_party/libsrtp/include/srtp.h"
#endif // SRTP_RELATIVE_PATH
#ifdef _DEBUG
extern "C" debug_module_t mod_srtp;
extern "C" debug_module_t mod_auth;
extern "C" debug_module_t mod_cipher;
extern "C" debug_module_t mod_stat;
extern "C" debug_module_t mod_alloc;
extern "C" debug_module_t mod_aes_icm;
extern "C" debug_module_t mod_aes_hmac;
#endif
#else
// SrtpFilter needs that constant.
#define SRTP_MASTER_KEY_LEN 30
#endif // HAVE_SRTP
namespace cricket {
const std::string& CS_DEFAULT = CS_AES_CM_128_HMAC_SHA1_80;
const std::string CS_AES_CM_128_HMAC_SHA1_80 = "AES_CM_128_HMAC_SHA1_80";
const std::string CS_AES_CM_128_HMAC_SHA1_32 = "AES_CM_128_HMAC_SHA1_32";
const int SRTP_MASTER_KEY_BASE64_LEN = SRTP_MASTER_KEY_LEN * 4 / 3;
#ifndef HAVE_SRTP
// This helper function is used on systems that don't (yet) have SRTP,
// to log that the functions that require it won't do anything.
namespace {
bool SrtpNotAvailable(const char *func) {
LOG(LS_ERROR) << func << ": SRTP is not available on your system.";
return false;
}
} // anonymous namespace
#endif // !HAVE_SRTP
#ifdef HAVE_SRTP //due to cricket namespace it can't be clubbed with above cond
void EnableSrtpDebugging() {
#ifdef _DEBUG
debug_on(mod_srtp);
debug_on(mod_auth);
debug_on(mod_cipher);
debug_on(mod_stat);
debug_on(mod_alloc);
debug_on(mod_aes_icm);
// debug_on(mod_aes_cbc);
// debug_on(mod_hmac);
#endif
}
#endif
SrtpFilter::SrtpFilter()
: state_(ST_INIT),
send_session_(new SrtpSession()),
recv_session_(new SrtpSession()) {
SignalSrtpError.repeat(send_session_->SignalSrtpError);
SignalSrtpError.repeat(recv_session_->SignalSrtpError);
}
SrtpFilter::~SrtpFilter() {
}
bool SrtpFilter::IsActive() const {
return (state_ == ST_ACTIVE);
}
bool SrtpFilter::SetOffer(const std::vector<CryptoParams>& offer_params,
ContentSource source) {
bool ret = false;
if (state_ == ST_INIT) {
ret = StoreParams(offer_params, source);
} else {
LOG(LS_ERROR) << "Invalid state for SRTP offer";
}
return ret;
}
bool SrtpFilter::SetAnswer(const std::vector<CryptoParams>& answer_params,
ContentSource source) {
bool ret = false;
if ((state_ == ST_SENTOFFER && source == CS_REMOTE) ||
(state_ == ST_RECEIVEDOFFER && source == CS_LOCAL)) {
// If the answer requests crypto, finalize the parameters and apply them.
// Otherwise, complete the negotiation of a unencrypted session.
if (!answer_params.empty()) {
CryptoParams selected_params;
ret = NegotiateParams(answer_params, &selected_params);
if (ret) {
if (state_ == ST_SENTOFFER) {
ret = ApplyParams(selected_params, answer_params[0]);
} else { // ST_RECEIVEDOFFER
ret = ApplyParams(answer_params[0], selected_params);
}
}
} else {
ret = ResetParams();
}
} else {
LOG(LS_ERROR) << "Invalid state for SRTP answer";
}
return ret;
}
bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
return false;
}
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
}
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
return false;
}
return send_session_->ProtectRtcp(p, in_len, max_len, out_len);
}
bool SrtpFilter::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtp(p, in_len, out_len);
}
bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtcp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtcp(p, in_len, out_len);
}
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
send_session_->set_signal_silent_time(signal_silent_time_in_ms);
recv_session_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpFilter::StoreParams(const std::vector<CryptoParams>& params,
ContentSource source) {
offer_params_ = params;
state_ = (source == CS_LOCAL) ? ST_SENTOFFER : ST_RECEIVEDOFFER;
return true;
}
bool SrtpFilter::NegotiateParams(const std::vector<CryptoParams>& answer_params,
CryptoParams* selected_params) {
// We're processing an accept. We should have exactly one set of params,
// unless the offer didn't mention crypto, in which case we shouldn't be here.
bool ret = (answer_params.size() == 1U && !offer_params_.empty());
if (ret) {
// We should find a match between the answer params and the offered params.
std::vector<CryptoParams>::const_iterator it;
for (it = offer_params_.begin(); it != offer_params_.end(); ++it) {
if (answer_params[0].Matches(*it)) {
break;
}
}
if (it != offer_params_.end()) {
*selected_params = *it;
} else {
ret = false;
}
}
if (!ret) {
LOG(LS_WARNING) << "Invalid parameters in SRTP answer";
}
return ret;
}
bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
const CryptoParams& recv_params) {
// TODO: Zero these buffers after use.
bool ret;
uint8 send_key[SRTP_MASTER_KEY_LEN], recv_key[SRTP_MASTER_KEY_LEN];
ret = (ParseKeyParams(send_params.key_params, send_key, sizeof(send_key)) &&
ParseKeyParams(recv_params.key_params, recv_key, sizeof(recv_key)));
if (ret) {
ret = (send_session_->SetSend(send_params.cipher_suite,
send_key, sizeof(send_key)) &&
recv_session_->SetRecv(recv_params.cipher_suite,
recv_key, sizeof(recv_key)));
}
if (ret) {
offer_params_.clear();
state_ = ST_ACTIVE;
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
<< " send cipher_suite " << send_params.cipher_suite
<< " recv cipher_suite " << recv_params.cipher_suite;
} else {
LOG(LS_WARNING) << "Failed to apply negotiated SRTP parameters";
}
return ret;
}
bool SrtpFilter::ResetParams() {
offer_params_.clear();
state_ = ST_INIT;
LOG(LS_INFO) << "SRTP reset to init state";
return true;
}
bool SrtpFilter::ParseKeyParams(const std::string& key_params,
uint8* key, int len) {
// example key_params: "inline:YUJDZGVmZ2hpSktMbW9QUXJzVHVWd3l6MTIzNDU2"
// Fail if key-method is wrong.
if (key_params.find("inline:") != 0) {
return false;
}
// Fail if base64 decode fails, or the key is the wrong size.
std::string key_b64(key_params.substr(7)), key_str;
if (!talk_base::Base64::Decode(key_b64, talk_base::Base64::DO_STRICT,
&key_str, NULL) ||
static_cast<int>(key_str.size()) != len) {
return false;
}
memcpy(key, key_str.c_str(), len);
return true;
}
///////////////////////////////////////////////////////////////////////////////
// SrtpSession
#ifdef HAVE_SRTP
bool SrtpSession::inited_ = false;
std::list<SrtpSession*> SrtpSession::sessions_;
SrtpSession::SrtpSession()
: session_(NULL),
rtp_auth_tag_len_(0),
rtcp_auth_tag_len_(0),
srtp_stat_(new SrtpStat()),
last_send_seq_num_(-1) {
sessions_.push_back(this);
SignalSrtpError.repeat(srtp_stat_->SignalSrtpError);
}
SrtpSession::~SrtpSession() {
sessions_.erase(std::find(sessions_.begin(), sessions_.end(), this));
if (session_) {
srtp_dealloc(session_);
}
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_outbound, cs, key, len);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_inbound, cs, key, len);
}
bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: no SRTP Session";
return false;
}
int need_len = in_len + rtp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddProtectRtpResult(ssrc, err);
}
int seq_num;
GetRtpSeqNum(p, in_len, &seq_num);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTP packet, seqnum="
<< seq_num << ", err=" << err << ", last seqnum="
<< last_send_seq_num_;
return false;
}
last_send_seq_num_ = seq_num;
return true;
}
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
return false;
}
int need_len = in_len + sizeof(uint32) + rtcp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect_rtcp(session_, p, out_len);
srtp_stat_->AddProtectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddUnprotectRtpResult(ssrc, err);
}
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect_rtcp(session_, p, out_len);
srtp_stat_->AddUnprotectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet, err=" << err;
return false;
}
return true;
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpSession::SetKey(int type, const std::string& cs,
const uint8* key, int len) {
if (session_) {
LOG(LS_ERROR) << "Failed to create SRTP session: "
<< "SRTP session already created";
return false;
}
if (!Init()) {
return false;
}
srtp_policy_t policy;
memset(&policy, 0, sizeof(policy));
if (cs == CS_AES_CM_128_HMAC_SHA1_80) {
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
} else if (cs == CS_AES_CM_128_HMAC_SHA1_32) {
crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); // rtp is 32,
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // rtcp still 80
} else {
LOG(LS_WARNING) << "Failed to create SRTP session: unsupported"
<< " cipher_suite " << cs.c_str();
return false;
}
if (!key || len != SRTP_MASTER_KEY_LEN) {
LOG(LS_WARNING) << "Failed to create SRTP session: invalid key";
return false;
}
policy.ssrc.type = static_cast<ssrc_type_t>(type);
policy.ssrc.value = 0;
policy.key = const_cast<uint8*>(key);
// TODO parse window size from WSH session-param
policy.window_size = 1024;
policy.allow_repeat_tx = 1;
policy.next = NULL;
int err = srtp_create(&session_, &policy);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to create SRTP session, err=" << err;
return false;
}
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
return true;
}
bool SrtpSession::Init() {
if (!inited_) {
int err;
err = srtp_init();
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to init SRTP, err=" << err;
return false;
}
err = srtp_install_event_handler(&SrtpSession::HandleEventThunk);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
return false;
}
inited_ = true;
}
return true;
}
void SrtpSession::HandleEvent(const srtp_event_data_t* ev) {
switch (ev->event) {
case event_ssrc_collision:
LOG(LS_INFO) << "SRTP event: SSRC collision";
break;
case event_key_soft_limit:
LOG(LS_INFO) << "SRTP event: reached soft key usage limit";
break;
case event_key_hard_limit:
LOG(LS_INFO) << "SRTP event: reached hard key usage limit";
break;
case event_packet_index_limit:
LOG(LS_INFO) << "SRTP event: reached hard packet limit (2^48 packets)";
break;
default:
LOG(LS_INFO) << "SRTP event: unknown " << ev->event;
break;
}
}
void SrtpSession::HandleEventThunk(srtp_event_data_t* ev) {
for (std::list<SrtpSession*>::iterator it = sessions_.begin();
it != sessions_.end(); ++it) {
if ((*it)->session_ == ev->session) {
(*it)->HandleEvent(ev);
break;
}
}
}
#else // !HAVE_SRTP
SrtpSession::SrtpSession() {
LOG(WARNING) << "SRTP implementation is missing.";
}
SrtpSession::~SrtpSession() {
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtcp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtcp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time) {
// Do nothing.
}
#endif // HAVE_SRTP
///////////////////////////////////////////////////////////////////////////////
// SrtpStat
#ifdef HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::PROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::UNPROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
case err_status_replay_fail:
case err_status_replay_old:
key.error = SrtpFilter::ERROR_REPLAY;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddProtectRtcpResult(int result) {
AddProtectRtpResult(0U, result);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
AddUnprotectRtpResult(0U, result);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
// Handle some cases where error should be signalled right away. For other
// errors, trigger error for the first time seeing it. After that, silent
// the same error for a certain amount of time (default 1 sec).
if (key.error != SrtpFilter::ERROR_NONE) {
// For errors, signal first time and wait for 1 sec.
FailureStat* stat = &(failures_[key]);
uint32 current_time = talk_base::Time();
if (stat->last_signal_time == 0 ||
talk_base::TimeDiff(current_time, stat->last_signal_time) >
static_cast<int>(signal_silent_time_)) {
SignalSrtpError(key.ssrc, key.mode, key.error);
stat->last_signal_time = current_time;
}
}
}
#else // !HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
LOG(WARNING) << "SRTP implementation is missing.";
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddProtectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
SrtpNotAvailable(__FUNCTION__);
}
#endif // HAVE_SRTP
} // namespace cricket

View File

@ -0,0 +1,84 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#define TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "video_engine/main/interface/vie_base.h"
#include "voice_engine/main/interface/voe_base.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/vie_base.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// Tracing helpers, for easy logging when WebRTC calls fail.
// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
// "StartSend(1) failed, err=XXXX"
// The method GetLastEngineError must be defined in the calling scope.
#define LOG_RTCERR0(func) \
LOG_RTCERR0_EX(func, GetLastEngineError())
#define LOG_RTCERR1(func, a1) \
LOG_RTCERR1_EX(func, a1, GetLastEngineError())
#define LOG_RTCERR2(func, a1, a2) \
LOG_RTCERR2_EX(func, a1, a2, GetLastEngineError())
#define LOG_RTCERR3(func, a1, a2, a3) \
LOG_RTCERR3_EX(func, a1, a2, a3, GetLastEngineError())
#define LOG_RTCERR4(func, a1, a2, a3, a4) \
LOG_RTCERR4_EX(func, a1, a2, a3, a4, GetLastEngineError())
#define LOG_RTCERR5(func, a1, a2, a3, a4, a5) \
LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, GetLastEngineError())
#define LOG_RTCERR6(func, a1, a2, a3, a4, a5, a6) \
LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, GetLastEngineError())
#define LOG_RTCERR0_EX(func, err) LOG(LS_WARNING) \
<< "" << #func << "() failed, err=" << err
#define LOG_RTCERR1_EX(func, a1, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ") failed, err=" << err
#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
<< err
#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ") failed, err=" << err
#define LOG_RTCERR4_EX(func, a1, a2, a3, a4, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ") failed, err=" << err
#define LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ") failed, err=" << err
#define LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ", " << a6 << ") failed, err=" << err
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCCOMMON_H_

View File

@ -0,0 +1,916 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_WEBRTC
#include "talk/session/phone/webrtcvideoengine.h"
#include "talk/base/common.h"
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/webrtcvoiceengine.h"
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/session/phone/webrtcvie.h"
#include "talk/session/phone/webrtcvoe.h"
namespace cricket {
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const int kStartVideoBitrate = 300;
static const int kMaxVideoBitrate = 1000;
class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
public:
explicit WebRtcRenderAdapter(VideoRenderer* renderer)
: renderer_(renderer) {
}
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) {
ASSERT(renderer_ != NULL);
width_ = width;
height_ = height;
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
virtual int DeliverFrame(unsigned char* buffer, int buffer_size) {
ASSERT(renderer_ != NULL);
WebRtcVideoFrame video_frame;
// TODO(ronghuawu): Currently by the time DeliverFrame got called,
// ViE expects the frame will be rendered ASAP. However, the libjingle
// renderer may have its own internal delays. Can you disable the buffering
// inside ViE and surface the timing information to this callback?
video_frame.Attach(buffer, buffer_size, width_, height_, 0, 0);
int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
uint8* buffer_temp;
size_t buffer_size_temp;
video_frame.Detach(&buffer_temp, &buffer_size_temp);
return ret;
}
virtual ~WebRtcRenderAdapter() {}
private:
VideoRenderer* renderer_;
unsigned int width_;
unsigned int height_;
};
const WebRtcVideoEngine::VideoCodecPref
WebRtcVideoEngine::kVideoCodecPrefs[] = {
{"VP8", 104, 0},
{"H264", 105, 1}
};
WebRtcVideoEngine::WebRtcVideoEngine()
: vie_wrapper_(new ViEWrapper()),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(NULL),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture)
: vie_wrapper_(new ViEWrapper()),
capture_(capture),
external_capture_(true),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
ViEWrapper* vie_wrapper)
: vie_wrapper_(vie_wrapper),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::~WebRtcVideoEngine() {
LOG(LS_INFO) << " WebRtcVideoEngine::~WebRtcVideoEngine";
vie_wrapper_->engine()->SetTraceCallback(NULL);
Terminate();
vie_wrapper_.reset();
if (capture_) {
webrtc::VideoCaptureModule::Destroy(capture_);
}
if (renderer_) {
webrtc::VideoRender::DestroyVideoRender(renderer_);
}
}
bool WebRtcVideoEngine::Init() {
LOG(LS_INFO) << "WebRtcVideoEngine::Init";
ApplyLogging();
if (vie_wrapper_->engine()->SetTraceCallback(this) != 0) {
LOG_RTCERR1(SetTraceCallback, this);
}
bool result = InitVideoEngine();
if (result) {
LOG(LS_INFO) << "VideoEngine Init done";
} else {
LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
Terminate();
}
return result;
}
bool WebRtcVideoEngine::InitVideoEngine() {
LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine";
if (vie_wrapper_->base()->Init() != 0) {
LOG_RTCERR0(Init);
return false;
}
if (!voice_engine_) {
LOG(LS_WARNING) << "NULL voice engine";
} else if ((vie_wrapper_->base()->SetVoiceEngine(
voice_engine_->voe()->engine())) != 0) {
LOG_RTCERR0(SetVoiceEngine);
return false;
}
if ((vie_wrapper_->base()->RegisterObserver(*this)) != 0) {
LOG_RTCERR0(RegisterObserver);
return false;
}
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::VideoCodec wcodec;
if ((vie_wrapper_->codec()->GetCodec(i, wcodec) == 0) &&
(strncmp(wcodec.plName, "I420", 4) != 0) &&
(strncmp(wcodec.plName, "ULPFEC", 4) != 0) &&
(strncmp(wcodec.plName, "RED", 4) != 0)) {
// ignore I420, FEC(RED and ULPFEC)
VideoCodec codec(wcodec.plType, wcodec.plName, wcodec.width,
wcodec.height, wcodec.maxFramerate, i);
LOG(LS_INFO) << codec.ToString();
video_codecs_.push_back(codec);
}
}
if (vie_wrapper_->render()->RegisterVideoRenderModule(*renderer_) != 0) {
LOG_RTCERR0(RegisterVideoRenderModule);
return false;
}
std::sort(video_codecs_.begin(), video_codecs_.end(),
&VideoCodec::Preferable);
return true;
}
void WebRtcVideoEngine::PerformanceAlarm(const unsigned int cpu_load) {
LOG(LS_INFO) << "WebRtcVideoEngine::PerformanceAlarm";
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
static bool ShouldIgnoreTrace(const std::string& trace) {
static const char* kTracesToIgnore[] = {
"\tfailed to GetReportBlockInformation",
NULL
};
for (const char* const* p = kTracesToIgnore; *p; ++p) {
if (trace.find(*p) == 0) {
return true;
}
}
return false;
}
void WebRtcVideoEngine::Print(const webrtc::TraceLevel level,
const char* trace, const int length) {
talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
sev = talk_base::LS_ERROR;
else if (level == webrtc::kTraceWarning)
sev = talk_base::LS_WARNING;
else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
sev = talk_base::LS_INFO;
if (sev >= log_level_) {
// Skip past boilerplate prefix text
if (length < 72) {
std::string msg(trace, length);
LOG(LS_ERROR) << "Malformed webrtc log message: ";
LOG_V(sev) << msg;
} else {
std::string msg(trace + 71, length - 72);
if (!ShouldIgnoreTrace(msg)) {
LOG_V(sev) << "WebRtc ViE:" << msg;
}
}
}
}
int WebRtcVideoEngine::GetCodecPreference(const char* name) {
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
if (strcmp(kVideoCodecPrefs[i].payload_name, name) == 0) {
return kVideoCodecPrefs[i].pref;
}
}
return -1;
}
void WebRtcVideoEngine::ApplyLogging() {
int filter = 0;
switch (log_level_) {
case talk_base::LS_VERBOSE: filter |= webrtc::kTraceAll;
case talk_base::LS_INFO: filter |= webrtc::kTraceStateInfo;
case talk_base::LS_WARNING: filter |= webrtc::kTraceWarning;
case talk_base::LS_ERROR: filter |=
webrtc::kTraceError | webrtc::kTraceCritical;
}
}
void WebRtcVideoEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
SetCapture(false);
if (local_renderer_.get()) {
// If the renderer already set, stop it first
if (vie_wrapper_->render()->StopRender(capture_id_) != 0)
LOG_RTCERR1(StopRender, capture_id_);
}
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(*renderer_) != 0)
LOG_RTCERR0(DeRegisterVideoRenderModule);
if ((vie_wrapper_->base()->DeregisterObserver()) != 0)
LOG_RTCERR0(DeregisterObserver);
if ((vie_wrapper_->base()->SetVoiceEngine(NULL)) != 0)
LOG_RTCERR0(SetVoiceEngine);
if (vie_wrapper_->engine()->SetTraceCallback(NULL) != 0)
LOG_RTCERR0(SetTraceCallback);
}
int WebRtcVideoEngine::GetCapabilities() {
return MediaEngine::VIDEO_RECV | MediaEngine::VIDEO_SEND;
}
bool WebRtcVideoEngine::SetOptions(int options) {
return true;
}
bool WebRtcVideoEngine::ReleaseCaptureDevice() {
if (capture_id_ != -1) {
// Stop capture
SetCapture(false);
// DisconnectCaptureDevice
WebRtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_wrapper_->capture()->DisconnectCaptureDevice(
channel->video_channel());
}
// ReleaseCaptureDevice
vie_wrapper_->capture()->ReleaseCaptureDevice(capture_id_);
capture_id_ = -1;
}
return true;
}
bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
ASSERT(vie_wrapper_.get());
ASSERT(cam != NULL);
ReleaseCaptureDevice();
webrtc::ViECapture* vie_capture = vie_wrapper_->capture();
// There's an external VCM
if (capture_) {
if (vie_capture->AllocateCaptureDevice(*capture_, capture_id_) != 0)
ASSERT(capture_id_ == -1);
} else if (!external_capture_) {
const unsigned int KMaxDeviceNameLength = 128;
const unsigned int KMaxUniqueIdLength = 256;
char device_name[KMaxDeviceNameLength];
char device_id[KMaxUniqueIdLength];
bool found = false;
for (int i = 0; i < vie_capture->NumberOfCaptureDevices(); ++i) {
memset(device_name, 0, KMaxDeviceNameLength);
memset(device_id, 0, KMaxUniqueIdLength);
if (vie_capture->GetCaptureDevice(i, device_name, KMaxDeviceNameLength,
device_id, KMaxUniqueIdLength) == 0) {
// TODO(ronghuawu): We should only compare the device_id here,
// however the devicemanager and webrtc use different format for th v4l2
// device id. So here we also compare the device_name for now.
// For example "usb-0000:00:1d.7-6" vs "/dev/video0".
if ((cam->name.compare(reinterpret_cast<char*>(device_name)) == 0) ||
(cam->id.compare(reinterpret_cast<char*>(device_id)) == 0)) {
LOG(INFO) << "Found video capture device: " << device_name;
found = true;
break;
}
}
}
if (!found)
return false;
if (vie_capture->AllocateCaptureDevice(device_id, KMaxUniqueIdLength,
capture_id_) != 0)
ASSERT(capture_id_ == -1);
}
if (capture_id_ != -1) {
// Connect to all the channels
WebRtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_capture->ConnectCaptureDevice(capture_id_, channel->video_channel());
}
SetCapture(true);
}
return (capture_id_ != -1);
}
bool WebRtcVideoEngine::SetCaptureModule(webrtc::VideoCaptureModule* vcm) {
ReleaseCaptureDevice();
if (capture_) {
webrtc::VideoCaptureModule::Destroy(capture_);
}
capture_ = vcm;
external_capture_ = true;
return true;
}
bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
if (local_renderer_.get()) {
// If the renderer already set, stop it first
vie_wrapper_->render()->StopRender(capture_id_);
}
local_renderer_.reset(new WebRtcRenderAdapter(renderer));
int ret;
ret = vie_wrapper_->render()->AddRenderer(capture_id_,
webrtc::kVideoI420,
local_renderer_.get());
if (ret != 0)
return false;
ret = vie_wrapper_->render()->StartRender(capture_id_);
return (ret == 0);
}
CaptureResult WebRtcVideoEngine::SetCapture(bool capture) {
if ((capture_started_ != capture) && (capture_id_ != -1)) {
int ret;
if (capture)
ret = vie_wrapper_->capture()->StartCapture(capture_id_);
else
ret = vie_wrapper_->capture()->StopCapture(capture_id_);
if (ret != 0)
return CR_NO_DEVICE;
capture_started_ = capture;
}
return CR_SUCCESS;
}
const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
return video_codecs_;
}
void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) {
log_level_ = min_sev;
ApplyLogging();
}
int WebRtcVideoEngine::GetLastEngineError() {
return vie_wrapper_->error();
}
bool WebRtcVideoEngine::SetDefaultEncoderConfig(
const VideoEncoderConfig& config) {
default_encoder_config_ = config;
return true;
}
WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
VoiceMediaChannel* voice_channel) {
WebRtcVideoMediaChannel* channel =
new WebRtcVideoMediaChannel(this, voice_channel);
if (channel) {
if (!channel->Init()) {
delete channel;
channel = NULL;
}
}
return channel;
}
bool WebRtcVideoEngine::FindCodec(const VideoCodec& codec) {
for (size_t i = 0; i < video_codecs_.size(); ++i) {
if (video_codecs_[i].Matches(codec)) {
return true;
}
}
return false;
}
void WebRtcVideoEngine::ConvertToCricketVideoCodec(
const webrtc::VideoCodec& in_codec, VideoCodec& out_codec) {
out_codec.id = in_codec.plType;
out_codec.name = in_codec.plName;
out_codec.width = in_codec.width;
out_codec.height = in_codec.height;
out_codec.framerate = in_codec.maxFramerate;
}
bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
const VideoCodec& in_codec, webrtc::VideoCodec& out_codec) {
bool found = false;
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
if ((vie_wrapper_->codec()->GetCodec(i, out_codec) == 0) &&
(strncmp(out_codec.plName,
in_codec.name.c_str(),
webrtc::kPayloadNameSize - 1) == 0)) {
found = true;
break;
}
}
if (!found) {
LOG(LS_ERROR) << "invalid codec type";
return false;
}
if (in_codec.id != 0)
out_codec.plType = in_codec.id;
if (in_codec.width != 0)
out_codec.width = in_codec.width;
if (in_codec.height != 0)
out_codec.height = in_codec.height;
if (in_codec.framerate != 0)
out_codec.maxFramerate = in_codec.framerate;
out_codec.maxBitrate = kMaxVideoBitrate;
out_codec.startBitrate = kStartVideoBitrate;
out_codec.minBitrate = kStartVideoBitrate;
return true;
}
int WebRtcVideoEngine::GetLastVideoEngineError() {
return vie_wrapper_->base()->LastError();
}
void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) {
channels_.push_back(channel);
}
void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
VideoChannels::iterator i = std::find(channels_.begin(),
channels_.end(),
channel);
if (i != channels_.end()) {
channels_.erase(i);
}
}
// WebRtcVideoMediaChannel
WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine, VoiceMediaChannel* channel)
: engine_(engine),
voice_channel_(channel),
vie_channel_(-1),
sending_(false),
render_started_(false),
send_codec_(NULL) {
engine->RegisterChannel(this);
}
bool WebRtcVideoMediaChannel::Init() {
bool ret = true;
if (engine_->video_engine()->base()->CreateChannel(vie_channel_) != 0) {
LOG_RTCERR1(CreateChannel, vie_channel_);
return false;
}
LOG(LS_INFO) << "WebRtcVideoMediaChannel::Init "
<< "video_channel " << vie_channel_ << " created";
// connect audio channel
if (voice_channel_) {
WebRtcVoiceMediaChannel* channel =
static_cast<WebRtcVoiceMediaChannel*> (voice_channel_);
if (engine_->video_engine()->base()->ConnectAudioChannel(
vie_channel_, channel->voe_channel()) != 0) {
LOG(LS_WARNING) << "ViE ConnectAudioChannel failed"
<< "A/V not synchronized";
// Don't set ret to false;
}
}
// Register external transport
if (engine_->video_engine()->network()->RegisterSendTransport(
vie_channel_, *this) != 0) {
ret = false;
} else {
// EnableRtcp(); // by default RTCP is disabled.
EnablePLI();
}
return ret;
}
WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
// Stop and remote renderer
SetRender(false);
if (engine()->video_engine()->render()->RemoveRenderer(vie_channel_)
== -1) {
LOG_RTCERR1(RemoveRenderer, vie_channel_);
}
// DeRegister external transport
if (engine()->video_engine()->network()->DeregisterSendTransport(
vie_channel_) == -1) {
LOG_RTCERR1(DeregisterSendTransport, vie_channel_);
}
// Unregister RtcChannel with the engine.
engine()->UnregisterChannel(this);
// Delete VideoChannel
if (engine()->video_engine()->base()->DeleteChannel(vie_channel_) == -1) {
LOG_RTCERR1(DeleteChannel, vie_channel_);
}
}
bool WebRtcVideoMediaChannel::SetRecvCodecs(
const std::vector<VideoCodec>& codecs) {
bool ret = true;
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec)) {
if (engine()->video_engine()->codec()->SetReceiveCodec(
vie_channel_, wcodec) != 0) {
LOG_RTCERR2(SetReceiveCodec, vie_channel_, wcodec.plName);
ret = false;
}
}
} else {
LOG(LS_INFO) << "Unknown codec" << iter->name;
ret = false;
}
}
// make channel ready to receive packets
if (ret) {
if (engine()->video_engine()->base()->StartReceive(vie_channel_) != 0) {
LOG_RTCERR1(StartReceive, vie_channel_);
ret = false;
}
}
return ret;
}
bool WebRtcVideoMediaChannel::SetSendCodecs(
const std::vector<VideoCodec>& codecs) {
if (sending_) {
LOG(LS_ERROR) << "channel is alredy sending";
return false;
}
// match with local video codec list
std::vector<webrtc::VideoCodec> send_codecs;
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec))
send_codecs.push_back(wcodec);
}
}
// if none matches, return with set
if (send_codecs.empty()) {
LOG(LS_ERROR) << "No matching codecs avilable";
return false;
}
// select the first matched codec
const webrtc::VideoCodec& codec(send_codecs[0]);
send_codec_.reset(new webrtc::VideoCodec(codec));
if (engine()->video_engine()->codec()->SetSendCodec(
vie_channel_, codec) != 0) {
LOG_RTCERR2(SetSendCodec, vie_channel_, codec.plName);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::SetRender(bool render) {
if (render != render_started_) {
int ret;
if (render) {
ret = engine()->video_engine()->render()->StartRender(vie_channel_);
} else {
ret = engine()->video_engine()->render()->StopRender(vie_channel_);
}
if (ret != 0) {
return false;
}
render_started_ = render;
}
return true;
}
bool WebRtcVideoMediaChannel::SetSend(bool send) {
if (send == sending()) {
return true; // no action required
}
bool ret = true;
if (send) { // enable
if (engine()->video_engine()->base()->StartSend(vie_channel_) != 0) {
LOG_RTCERR1(StartSend, vie_channel_);
ret = false;
}
} else { // disable
if (engine()->video_engine()->base()->StopSend(vie_channel_) != 0) {
LOG_RTCERR1(StopSend, vie_channel_);
ret = false;
}
}
if (ret)
sending_ = send;
return ret;
}
bool WebRtcVideoMediaChannel::AddStream(uint32 ssrc, uint32 voice_ssrc) {
return false;
}
bool WebRtcVideoMediaChannel::RemoveStream(uint32 ssrc) {
return false;
}
bool WebRtcVideoMediaChannel::SetRenderer(
uint32 ssrc, VideoRenderer* renderer) {
ASSERT(vie_channel_ != -1);
if (ssrc != 0)
return false;
if (remote_renderer_.get()) {
// If the renderer already set, stop it first
engine_->video_engine()->render()->StopRender(vie_channel_);
}
remote_renderer_.reset(new WebRtcRenderAdapter(renderer));
if (engine_->video_engine()->render()->AddRenderer(vie_channel_,
webrtc::kVideoI420, remote_renderer_.get()) != 0) {
LOG_RTCERR3(AddRenderer, vie_channel_, webrtc::kVideoI420,
remote_renderer_.get());
remote_renderer_.reset();
return false;
}
if (engine_->video_engine()->render()->StartRender(vie_channel_) != 0) {
LOG_RTCERR1(StartRender, vie_channel_);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
VideoSenderInfo sinfo;
memset(&sinfo, 0, sizeof(sinfo));
unsigned int ssrc;
if (engine_->video_engine()->rtp()->GetLocalSSRC(vie_channel_,
ssrc) != 0) {
LOG_RTCERR2(GetLocalSSRC, vie_channel_, ssrc);
return false;
}
sinfo.ssrc = ssrc;
unsigned int cumulative_lost, extended_max, jitter;
int rtt_ms;
uint16 fraction_lost;
if (engine_->video_engine()->rtp()->GetReceivedRTCPStatistics(vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG_RTCERR6(GetReceivedRTCPStatistics, vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
return false;
}
sinfo.fraction_lost = fraction_lost;
sinfo.packets_lost = cumulative_lost;
sinfo.rtt_ms = rtt_ms;
unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
if (engine_->video_engine()->rtp()->GetRTPStatistics(vie_channel_,
bytes_sent, packets_sent, bytes_recv, packets_recv) != 0) {
LOG_RTCERR5(GetRTPStatistics, vie_channel_,
bytes_sent, packets_sent, bytes_recv, packets_recv);
return false;
}
sinfo.packets_sent = packets_sent;
sinfo.bytes_sent = bytes_sent;
sinfo.packets_lost = -1;
sinfo.packets_cached = -1;
info->senders.push_back(sinfo);
// build receiver info.
// reusing the above local variables
VideoReceiverInfo rinfo;
memset(&rinfo, 0, sizeof(rinfo));
if (engine_->video_engine()->rtp()->GetSentRTCPStatistics(vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG_RTCERR6(GetSentRTCPStatistics, vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
return false;
}
rinfo.bytes_rcvd = bytes_recv;
rinfo.packets_rcvd = packets_recv;
rinfo.fraction_lost = fraction_lost;
rinfo.packets_lost = cumulative_lost;
if (engine_->video_engine()->rtp()->GetRemoteSSRC(vie_channel_,
ssrc) != 0) {
return false;
}
rinfo.ssrc = ssrc;
// Get codec for wxh
info->receivers.push_back(rinfo);
return true;
}
bool WebRtcVideoMediaChannel::SendIntraFrame() {
bool ret = true;
if (engine()->video_engine()->codec()->SendKeyFrame(vie_channel_) != 0) {
LOG_RTCERR1(SendKeyFrame, vie_channel_);
ret = false;
}
return ret;
}
bool WebRtcVideoMediaChannel::RequestIntraFrame() {
// There is no API exposed to application to request a key frame
// ViE does this internally when there are errors from decoder
return false;
}
void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
engine()->video_engine()->network()->ReceivedRTPPacket(vie_channel_,
packet->data(),
packet->length());
}
void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
engine_->video_engine()->network()->ReceivedRTCPPacket(vie_channel_,
packet->data(),
packet->length());
}
void WebRtcVideoMediaChannel::SetSendSsrc(uint32 id) {
if (!sending_) {
if (engine()->video_engine()->rtp()->SetLocalSSRC(vie_channel_,
id) != 0) {
LOG_RTCERR1(SetLocalSSRC, vie_channel_);
}
} else {
LOG(LS_ERROR) << "Channel already in send state";
}
}
bool WebRtcVideoMediaChannel::SetRtcpCName(const std::string& cname) {
if (engine()->video_engine()->rtp()->SetRTCPCName(vie_channel_,
cname.c_str()) != 0) {
LOG_RTCERR2(SetRTCPCName, vie_channel_, cname.c_str());
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::Mute(bool on) {
// stop send??
return false;
}
bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
LOG(LS_INFO) << "RtcVideoMediaChanne::SetSendBandwidth";
if (!send_codec_.get()) {
LOG(LS_INFO) << "The send codec has not been set up yet.";
return true;
}
if (!autobw) {
send_codec_->startBitrate = bps;
send_codec_->minBitrate = bps;
}
send_codec_->maxBitrate = bps;
if (engine()->video_engine()->codec()->SetSendCodec(vie_channel_,
*send_codec_.get()) != 0) {
LOG_RTCERR2(SetSendCodec, vie_channel_, send_codec_->plName);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::SetOptions(int options) {
return true;
}
void WebRtcVideoMediaChannel::EnableRtcp() {
engine()->video_engine()->rtp()->SetRTCPStatus(
vie_channel_, webrtc::kRtcpCompound_RFC4585);
}
void WebRtcVideoMediaChannel::EnablePLI() {
engine_->video_engine()->rtp()->SetKeyFrameRequestMethod(
vie_channel_, webrtc::kViEKeyFrameRequestPliRtcp);
}
void WebRtcVideoMediaChannel::EnableTMMBR() {
engine_->video_engine()->rtp()->SetTMMBRStatus(vie_channel_, true);
}
int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return network_interface_->SendPacket(&packet) ? len : -1;
}
int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
const void* data,
int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return network_interface_->SendRtcp(&packet) ? len : -1;
}
} // namespace cricket
#endif // HAVE_WEBRTC

View File

@ -0,0 +1,197 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/videocommon.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/webrtccommon.h"
namespace webrtc {
class VideoCaptureModule;
class VideoRender;
}
namespace cricket {
struct Device;
class VideoRenderer;
class ViEWrapper;
class VoiceMediaChannel;
class WebRtcRenderAdapter;
class WebRtcVideoMediaChannel;
class WebRtcVoiceEngine;
class WebRtcVideoEngine : public webrtc::ViEBaseObserver,
public webrtc::TraceCallback {
public:
// Creates the WebRtcVideoEngine with internal VideoCaptureModule.
WebRtcVideoEngine();
// Creates the WebRtcVideoEngine, and specifies the WebRtcVoiceEngine and
// external VideoCaptureModule to use.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture);
// For testing purposes. Allows the WebRtcVoiceEngine and
// ViEWrapper to be mocks.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, ViEWrapper* vie_wrapper);
~WebRtcVideoEngine();
bool Init();
void Terminate();
WebRtcVideoMediaChannel* CreateChannel(
VoiceMediaChannel* voice_channel);
bool FindCodec(const VideoCodec& codec);
bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
void RegisterChannel(WebRtcVideoMediaChannel* channel);
void UnregisterChannel(WebRtcVideoMediaChannel* channel);
ViEWrapper* video_engine() { return vie_wrapper_.get(); }
int GetLastVideoEngineError();
int GetCapabilities();
bool SetOptions(int options);
bool SetCaptureDevice(const Device* device);
bool SetCaptureModule(webrtc::VideoCaptureModule* vcm);
bool SetLocalRenderer(VideoRenderer* renderer);
CaptureResult SetCapture(bool capture);
const std::vector<VideoCodec>& codecs() const;
void SetLogging(int min_sev, const char* filter);
int GetLastEngineError();
VideoEncoderConfig& default_encoder_config() {
return default_encoder_config_;
}
void ConvertToCricketVideoCodec(const webrtc::VideoCodec& in_codec,
VideoCodec& out_codec);
bool ConvertFromCricketVideoCodec(const VideoCodec& in_codec,
webrtc::VideoCodec& out_codec);
sigslot::signal1<CaptureResult> SignalCaptureResult;
private:
struct VideoCodecPref {
const char* payload_name;
int payload_type;
int pref;
};
static const VideoCodecPref kVideoCodecPrefs[];
int GetCodecPreference(const char* name);
void ApplyLogging();
bool InitVideoEngine();
void PerformanceAlarm(const unsigned int cpu_load);
bool ReleaseCaptureDevice();
virtual void Print(const webrtc::TraceLevel level, const char* trace_string,
const int length);
typedef std::vector<WebRtcVideoMediaChannel*> VideoChannels;
talk_base::scoped_ptr<ViEWrapper> vie_wrapper_;
webrtc::VideoCaptureModule* capture_;
bool external_capture_;
int capture_id_;
webrtc::VideoRender* renderer_;
WebRtcVoiceEngine* voice_engine_;
std::vector<VideoCodec> video_codecs_;
VideoChannels channels_;
int log_level_;
VideoEncoderConfig default_encoder_config_;
bool capture_started_;
talk_base::scoped_ptr<WebRtcRenderAdapter> local_renderer_;
};
class WebRtcVideoMediaChannel : public VideoMediaChannel,
public webrtc::Transport {
public:
WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine, VoiceMediaChannel* voice_channel);
~WebRtcVideoMediaChannel();
bool Init();
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetRender(bool render);
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
virtual bool GetStats(VideoMediaInfo* info);
virtual bool SendIntraFrame();
virtual bool RequestIntraFrame();
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool on);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendBandwidth(bool autobw, int bps);
virtual bool SetOptions(int options);
WebRtcVideoEngine* engine() { return engine_; }
VoiceMediaChannel* voice_channel() { return voice_channel_; }
int video_channel() { return vie_channel_; }
bool sending() { return sending_; }
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
virtual int SendPacket(int channel, const void* data, int len);
virtual int SendRTCPPacket(int channel, const void* data, int len);
private:
void EnableRtcp();
void EnablePLI();
void EnableTMMBR();
WebRtcVideoEngine* engine_;
VoiceMediaChannel* voice_channel_;
int vie_channel_;
bool sending_;
bool render_started_;
talk_base::scoped_ptr<webrtc::VideoCodec> send_codec_;
talk_base::scoped_ptr<WebRtcRenderAdapter> remote_renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_

View File

@ -0,0 +1,238 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/base/logging.h"
#include "talk/session/phone/videocommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_video/vplib/main/interface/vplib.h"
#else
#include "third_party/webrtc/files/include/vplib.h"
#endif
namespace cricket {
WebRtcVideoFrame::WebRtcVideoFrame() {
}
WebRtcVideoFrame::~WebRtcVideoFrame() {
}
void WebRtcVideoFrame::Attach(uint8* buffer, size_t buffer_size, size_t w,
size_t h, int64 elapsed_time, int64 time_stamp) {
video_frame_.Free();
WebRtc_UWord8* new_memory = buffer;
WebRtc_UWord32 new_length = buffer_size;
WebRtc_UWord32 new_size = buffer_size;
video_frame_.Swap(new_memory, new_length, new_size);
video_frame_.SetWidth(w);
video_frame_.SetHeight(h);
elapsed_time_ = elapsed_time;
video_frame_.SetTimeStamp(time_stamp);
}
void WebRtcVideoFrame::Detach(uint8** buffer, size_t* buffer_size) {
WebRtc_UWord8* new_memory = NULL;
WebRtc_UWord32 new_length = 0;
WebRtc_UWord32 new_size = 0;
video_frame_.Swap(new_memory, new_length, new_size);
*buffer = new_memory;
*buffer_size = new_size;
}
bool WebRtcVideoFrame::InitToBlack(size_t w, size_t h,
int64 elapsed_time, int64 time_stamp) {
size_t buffer_size = w * h * 3 / 2;
uint8* buffer = new uint8[buffer_size];
Attach(buffer, buffer_size, w, h, elapsed_time, time_stamp);
memset(GetYPlane(), 16, w * h);
memset(GetUPlane(), 128, w * h / 4);
memset(GetVPlane(), 128, w * h / 4);
return true;
}
size_t WebRtcVideoFrame::GetWidth() const {
return video_frame_.Width();
}
size_t WebRtcVideoFrame::GetHeight() const {
return video_frame_.Height();
}
const uint8* WebRtcVideoFrame::GetYPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
const uint8* WebRtcVideoFrame::GetUPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
const uint8* WebRtcVideoFrame::GetVPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
uint8* WebRtcVideoFrame::GetYPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
uint8* WebRtcVideoFrame::GetUPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
uint8* WebRtcVideoFrame::GetVPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
VideoFrame* WebRtcVideoFrame::Copy() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (!buffer)
return NULL;
size_t new_buffer_size = video_frame_.Length();
uint8* new_buffer = new uint8[new_buffer_size];
memcpy(new_buffer, buffer, new_buffer_size);
WebRtcVideoFrame* copy = new WebRtcVideoFrame();
copy->Attach(new_buffer, new_buffer_size,
video_frame_.Width(), video_frame_.Height(),
elapsed_time_, video_frame_.TimeStamp());
return copy;
}
size_t WebRtcVideoFrame::CopyToBuffer(
uint8* buffer, size_t size) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
memcpy(buffer, video_frame_.Buffer(), needed);
}
return needed;
}
size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
uint8* buffer,
size_t size,
size_t pitch_rgb) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t width = video_frame_.Width();
size_t height = video_frame_.Height();
// See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
// explanation of pitch and why this is the amount of space we need.
size_t needed = pitch_rgb * (height - 1) + 4 * width;
if (needed > size) {
LOG(LS_WARNING) << "RGB buffer is not large enough";
return 0;
}
webrtc::VideoType outgoingVideoType = webrtc::kUnknown;
switch (to_fourcc) {
case FOURCC_ARGB:
outgoingVideoType = webrtc::kARGB;
break;
default:
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
return 0;
break;
}
if (outgoingVideoType != webrtc::kUnknown)
webrtc::ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
width, height, buffer);
return needed;
}
void WebRtcVideoFrame::StretchToPlanes(
uint8* y, uint8* u, uint8* v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
size_t width, size_t height, bool interpolate, bool crop) const {
// TODO(ronghuawu): Implement StretchToPlanes
}
size_t WebRtcVideoFrame::StretchToBuffer(size_t w, size_t h,
uint8* buffer, size_t size,
bool interpolate,
bool crop) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
uint8* bufy = buffer;
uint8* bufu = bufy + w * h;
uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
interpolate, crop);
}
return needed;
}
void WebRtcVideoFrame::StretchToFrame(VideoFrame* target,
bool interpolate, bool crop) const {
if (!target) return;
StretchToPlanes(target->GetYPlane(),
target->GetUPlane(),
target->GetVPlane(),
target->GetYPitch(),
target->GetUPitch(),
target->GetVPitch(),
target->GetWidth(),
target->GetHeight(),
interpolate, crop);
target->SetElapsedTime(GetElapsedTime());
target->SetTimeStamp(GetTimeStamp());
}
VideoFrame* WebRtcVideoFrame::Stretch(size_t w, size_t h,
bool interpolate, bool crop) const {
// TODO(ronghuawu): implement
return NULL;
}
} // namespace cricket

View File

@ -0,0 +1,97 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#endif
#include "talk/session/phone/mediachannel.h"
namespace cricket {
// WebRtcVideoFrame only supports I420
class WebRtcVideoFrame : public VideoFrame {
public:
WebRtcVideoFrame();
~WebRtcVideoFrame();
void Attach(uint8* buffer, size_t buffer_size,
size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
void Detach(uint8** buffer, size_t* buffer_size);
bool InitToBlack(size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
bool HasImage() const { return video_frame_.Buffer() != NULL; }
virtual size_t GetWidth() const;
virtual size_t GetHeight() const;
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const { return video_frame_.Width(); }
virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) {
video_frame_.SetTimeStamp(time_stamp);
}
virtual VideoFrame* Copy() const;
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, size_t pitch_rgb) const;
virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
private:
webrtc::VideoFrame video_frame_;
int64 elapsed_time_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_

View File

@ -26,12 +26,16 @@
*/
#ifndef TALK_APP_WEBRTC_VIDEOENGINE_H_
#define TALK_APP_WEBRTC_VIDEOENGINE_H_
#ifndef TALK_SESSION_PHONE_WEBRTCVIE_H_
#define TALK_SESSION_PHONE_WEBRTCVIE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#include "modules/video_capture/main/interface/video_capture.h"
#include "modules/video_render/main/interface/video_render.h"
#include "video_engine/main/interface/vie_base.h"
#include "video_engine/main/interface/vie_capture.h"
#include "video_engine/main/interface/vie_codec.h"
@ -40,34 +44,48 @@
#include "video_engine/main/interface/vie_network.h"
#include "video_engine/main/interface/vie_render.h"
#include "video_engine/main/interface/vie_rtp_rtcp.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#include "third_party/webrtc/files/include/video_capture.h"
#include "third_party/webrtc/files/include/video_render.h"
#include "third_party/webrtc/files/include/vie_base.h"
#include "third_party/webrtc/files/include/vie_capture.h"
#include "third_party/webrtc/files/include/vie_codec.h"
#include "third_party/webrtc/files/include/vie_errors.h"
#include "third_party/webrtc/files/include/vie_image_process.h"
#include "third_party/webrtc/files/include/vie_network.h"
#include "third_party/webrtc/files/include/vie_render.h"
#include "third_party/webrtc/files/include/vie_rtp_rtcp.h"
#endif // WEBRTC_RELATIVE_PATH
namespace webrtc {
namespace cricket {
// all tracing macros should go to a common file
// automatically handles lifetime of VideoEngine
class scoped_video_engine {
class scoped_vie_engine {
public:
explicit scoped_video_engine(VideoEngine* e) : ptr(e) {}
explicit scoped_vie_engine(webrtc::VideoEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_video_engine() {
~scoped_vie_engine() {
if (ptr) {
VideoEngine::Delete(ptr);
webrtc::VideoEngine::Delete(ptr);
}
}
VideoEngine* get() const { return ptr; }
webrtc::VideoEngine* get() const { return ptr; }
private:
VideoEngine* ptr;
webrtc::VideoEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing VideoEngine
// interface pointers
template<class T> class scoped_video_ptr {
template<class T> class scoped_vie_ptr {
public:
explicit scoped_video_ptr(const scoped_video_engine& e)
explicit scoped_vie_ptr(const scoped_vie_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_video_ptr(T* p) : ptr(p) {}
~scoped_video_ptr() { if (ptr) ptr->Release(); }
explicit scoped_vie_ptr(T* p) : ptr(p) {}
~scoped_vie_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
private:
@ -76,46 +94,50 @@ template<class T> class scoped_video_ptr {
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class VideoEngineWrapper {
class ViEWrapper {
public:
VideoEngineWrapper()
: engine_(VideoEngine::Create()),
ViEWrapper()
: engine_(webrtc::VideoEngine::Create()),
base_(engine_), codec_(engine_), capture_(engine_),
network_(engine_), render_(engine_), rtp_(engine_),
image_(engine_) {
}
VideoEngineWrapper(ViEBase* base, ViECodec* codec, ViECapture* capture,
ViENetwork* network, ViERender* render,
ViERTP_RTCP* rtp, ViEImageProcess* image)
ViEWrapper(webrtc::ViEBase* base, webrtc::ViECodec* codec,
webrtc::ViECapture* capture, webrtc::ViENetwork* network,
webrtc::ViERender* render, webrtc::ViERTP_RTCP* rtp,
webrtc::ViEImageProcess* image)
: engine_(NULL),
base_(base), codec_(codec), capture_(capture),
network_(network), render_(render), rtp_(rtp),
base_(base),
codec_(codec),
capture_(capture),
network_(network),
render_(render),
rtp_(rtp),
image_(image) {
}
virtual ~VideoEngineWrapper() {}
VideoEngine* engine() { return engine_.get(); }
ViEBase* base() { return base_.get(); }
ViECodec* codec() { return codec_.get(); }
ViECapture* capture() { return capture_.get(); }
ViENetwork* network() { return network_.get(); }
ViERender* render() { return render_.get(); }
ViERTP_RTCP* rtp() { return rtp_.get(); }
ViEImageProcess* sync() { return image_.get(); }
virtual ~ViEWrapper() {}
webrtc::VideoEngine* engine() { return engine_.get(); }
webrtc::ViEBase* base() { return base_.get(); }
webrtc::ViECodec* codec() { return codec_.get(); }
webrtc::ViECapture* capture() { return capture_.get(); }
webrtc::ViENetwork* network() { return network_.get(); }
webrtc::ViERender* render() { return render_.get(); }
webrtc::ViERTP_RTCP* rtp() { return rtp_.get(); }
webrtc::ViEImageProcess* sync() { return image_.get(); }
int error() { return base_->LastError(); }
private:
scoped_video_engine engine_;
scoped_video_ptr<ViEBase> base_;
scoped_video_ptr<ViECodec> codec_;
scoped_video_ptr<ViECapture> capture_;
scoped_video_ptr<ViENetwork> network_;
scoped_video_ptr<ViERender> render_;
scoped_video_ptr<ViERTP_RTCP> rtp_;
scoped_video_ptr<ViEImageProcess> image_;
scoped_vie_engine engine_;
scoped_vie_ptr<webrtc::ViEBase> base_;
scoped_vie_ptr<webrtc::ViECodec> codec_;
scoped_vie_ptr<webrtc::ViECapture> capture_;
scoped_vie_ptr<webrtc::ViENetwork> network_;
scoped_vie_ptr<webrtc::ViERender> render_;
scoped_vie_ptr<webrtc::ViERTP_RTCP> rtp_;
scoped_vie_ptr<webrtc::ViEImageProcess> image_;
};
}
} //namespace webrtc
#endif // TALK_APP_WEBRTC_VOICEENGINE_H_
#endif // TALK_SESSION_PHONE_WEBRTCVIE_H_

View File

@ -0,0 +1,190 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOE_H_
#define TALK_SESSION_PHONE_WEBRTCVOE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/audio_device/main/interface/audio_device.h"
#include "voice_engine/main/interface/voe_audio_processing.h"
#include "voice_engine/main/interface/voe_base.h"
#include "voice_engine/main/interface/voe_codec.h"
#include "voice_engine/main/interface/voe_dtmf.h"
#include "voice_engine/main/interface/voe_errors.h"
#include "voice_engine/main/interface/voe_file.h"
#include "voice_engine/main/interface/voe_hardware.h"
#include "voice_engine/main/interface/voe_neteq_stats.h"
#include "voice_engine/main/interface/voe_network.h"
#include "voice_engine/main/interface/voe_rtp_rtcp.h"
#include "voice_engine/main/interface/voe_video_sync.h"
#include "voice_engine/main/interface/voe_volume_control.h"
#else
#include "third_party/webrtc/files/include/audio_device.h"
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_audio_processing.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/voe_codec.h"
#include "third_party/webrtc/files/include/voe_dtmf.h"
#include "third_party/webrtc/files/include/voe_errors.h"
#include "third_party/webrtc/files/include/voe_file.h"
#include "third_party/webrtc/files/include/voe_hardware.h"
#include "third_party/webrtc/files/include/voe_neteq_stats.h"
#include "third_party/webrtc/files/include/voe_network.h"
#include "third_party/webrtc/files/include/voe_rtp_rtcp.h"
#include "third_party/webrtc/files/include/voe_video_sync.h"
#include "third_party/webrtc/files/include/voe_volume_control.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// automatically handles lifetime of WebRtc VoiceEngine
class scoped_voe_engine {
public:
explicit scoped_voe_engine(webrtc::VoiceEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_voe_engine() { if (ptr) VERIFY(webrtc::VoiceEngine::Delete(ptr)); }
// Releases the current pointer.
void reset() {
if (ptr) {
VERIFY(webrtc::VoiceEngine::Delete(ptr));
ptr = NULL;
}
}
webrtc::VoiceEngine* get() const { return ptr; }
private:
webrtc::VoiceEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
template<class T>
class scoped_voe_ptr {
public:
explicit scoped_voe_ptr(const scoped_voe_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_voe_ptr(T* p) : ptr(p) {}
~scoped_voe_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
// Releases the current pointer.
void reset() {
if (ptr) {
ptr->Release();
ptr = NULL;
}
}
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class VoEWrapper {
public:
VoEWrapper()
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
base_(engine_), codec_(engine_), dtmf_(engine_), file_(engine_),
hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_),
sync_(engine_), volume_(engine_) {
}
VoEWrapper(webrtc::VoEAudioProcessing* processing,
webrtc::VoEBase* base,
webrtc::VoECodec* codec,
webrtc::VoEDtmf* dtmf,
webrtc::VoEFile* file,
webrtc::VoEHardware* hw,
webrtc::VoENetEqStats* neteq,
webrtc::VoENetwork* network,
webrtc::VoERTP_RTCP* rtp,
webrtc::VoEVideoSync* sync,
webrtc::VoEVolumeControl* volume)
: engine_(NULL),
processing_(processing),
base_(base),
codec_(codec),
dtmf_(dtmf),
file_(file),
hw_(hw),
neteq_(neteq),
network_(network),
rtp_(rtp),
sync_(sync),
volume_(volume) {
}
~VoEWrapper() {}
webrtc::VoiceEngine* engine() const { return engine_.get(); }
webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
webrtc::VoEBase* base() const { return base_.get(); }
webrtc::VoECodec* codec() const { return codec_.get(); }
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
webrtc::VoEFile* file() const { return file_.get(); }
webrtc::VoEHardware* hw() const { return hw_.get(); }
webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
webrtc::VoENetwork* network() const { return network_.get(); }
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
webrtc::VoEVideoSync* sync() const { return sync_.get(); }
webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
int error() { return base_->LastError(); }
private:
scoped_voe_engine engine_;
scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
scoped_voe_ptr<webrtc::VoEBase> base_;
scoped_voe_ptr<webrtc::VoECodec> codec_;
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
scoped_voe_ptr<webrtc::VoEFile> file_;
scoped_voe_ptr<webrtc::VoEHardware> hw_;
scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
scoped_voe_ptr<webrtc::VoENetwork> network_;
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
};
// Adds indirection to static WebRtc functions, allowing them to be mocked.
class VoETraceWrapper {
public:
virtual ~VoETraceWrapper() {}
virtual int SetTraceFilter(const unsigned int filter) {
return webrtc::VoiceEngine::SetTraceFilter(filter);
}
virtual int SetTraceFile(const char* fileNameUTF8) {
return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
}
virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
return webrtc::VoiceEngine::SetTraceCallback(callback);
}
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOE_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,320 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/stream.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/rtputils.h"
#include "talk/session/phone/webrtccommon.h"
namespace cricket {
// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
// passed into WebRtc, and support looping.
class WebRtcSoundclipStream : public webrtc::InStream {
public:
WebRtcSoundclipStream(const char* buf, size_t len)
: mem_(buf, len), loop_(true) {
}
void set_loop(bool loop) { loop_ = loop; }
virtual int Read(void* buf, int len);
virtual int Rewind();
private:
talk_base::MemoryStream mem_;
bool loop_;
};
// WebRtcMonitorStream is used to monitor a stream coming from WebRtc.
// For now we just dump the data.
class WebRtcMonitorStream : public webrtc::OutStream {
virtual bool Write(const void *buf, int len) {
return true;
}
};
class AudioDeviceModule;
class VoETraceWrapper;
class VoEWrapper;
class WebRtcSoundclipMedia;
class WebRtcVoiceMediaChannel;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine
: public webrtc::VoiceEngineObserver,
public webrtc::TraceCallback {
public:
WebRtcVoiceEngine();
WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc);
// Dependency injection for testing.
WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
VoEWrapper* voe_wrapper_sc,
VoETraceWrapper* tracing);
~WebRtcVoiceEngine();
bool Init();
void Terminate();
int GetCapabilities();
VoiceMediaChannel* CreateChannel();
SoundclipMedia* CreateSoundclip();
bool SetOptions(int options);
bool SetDevices(const Device* in_device, const Device* out_device);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
int GetInputLevel();
bool SetLocalMonitor(bool enable);
const std::vector<AudioCodec>& codecs();
bool FindCodec(const AudioCodec& codec);
bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
void SetLogging(int min_sev, const char* filter);
// For tracking WebRtc channels. Needed because we have to pause them
// all when switching devices.
// May only be called by WebRtcVoiceMediaChannel.
void RegisterChannel(WebRtcVoiceMediaChannel *channel);
void UnregisterChannel(WebRtcVoiceMediaChannel *channel);
// May only be called by WebRtcSoundclipMedia.
void RegisterSoundclip(WebRtcSoundclipMedia *channel);
void UnregisterSoundclip(WebRtcSoundclipMedia *channel);
// Called by WebRtcVoiceMediaChannel to set a gain offset from
// the default AGC target level.
bool AdjustAgcLevel(int delta);
// Called by WebRtcVoiceMediaChannel to configure echo cancellation
// and noise suppression modes.
bool SetConferenceMode(bool enable);
VoEWrapper* voe() { return voe_wrapper_.get(); }
VoEWrapper* voe_sc() { return voe_wrapper_sc_.get(); }
int GetLastEngineError();
private:
typedef std::vector<WebRtcSoundclipMedia *> SoundclipList;
typedef std::vector<WebRtcVoiceMediaChannel *> ChannelList;
struct CodecPref {
const char* name;
int clockrate;
};
void Construct();
bool InitInternal();
void ApplyLogging();
virtual void Print(const webrtc::TraceLevel level,
const char* trace_string, const int length);
virtual void CallbackOnError(const int channel, const int errCode);
static int GetCodecPreference(const char *name, int clockrate);
// Given the device type, name, and id, find device id. Return true and
// set the output parameter rtc_id if successful.
bool FindWebRtcAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
bool FindChannelAndSsrc(int channel_num,
WebRtcVoiceMediaChannel** channel,
uint32* ssrc) const;
bool ChangeLocalMonitor(bool enable);
bool PauseLocalMonitor();
bool ResumeLocalMonitor();
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const CodecPref kCodecPrefs[];
// The primary instance of WebRtc VoiceEngine.
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_;
// A secondary instance, for playing out soundclips (on the 'ring' device).
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_sc_;
talk_base::scoped_ptr<VoETraceWrapper> tracing_;
// The external audio device manager
webrtc::AudioDeviceModule* adm_;
webrtc::AudioDeviceModule* adm_sc_;
int log_level_;
bool is_dumping_aec_;
std::vector<AudioCodec> codecs_;
bool desired_local_monitor_enable_;
talk_base::scoped_ptr<WebRtcMonitorStream> monitor_;
SoundclipList soundclips_;
ChannelList channels_;
// channels_ can be read from WebRtc callback thread. We need a lock on that
// callback as well as the RegisterChannel/UnregisterChannel.
talk_base::CriticalSection channels_cs_;
webrtc::AgcConfig default_agc_config_;
};
// WebRtcMediaChannel is a class that implements the common WebRtc channel
// functionality.
template <class T, class E>
class WebRtcMediaChannel : public T, public webrtc::Transport {
public:
WebRtcMediaChannel(E *engine, int channel)
: engine_(engine), voe_channel_(channel), sequence_number_(-1) {}
E *engine() { return engine_; }
int voe_channel() const { return voe_channel_; }
bool valid() const { return voe_channel_ != -1; }
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
// We need to store the sequence number to be able to pick up
// the same sequence when the device is restarted.
// TODO(oja): Remove when WebRtc has fixed the problem.
int seq_num;
if (!GetRtpSeqNum(data, len, &seq_num)) {
return -1;
}
if (sequence_number() == -1) {
LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
<< seq_num;
}
sequence_number_ = seq_num;
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendPacket(&packet) ? len : -1;
}
virtual int SendRTCPPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendRtcp(&packet) ? len : -1;
}
int sequence_number() const {
return sequence_number_;
}
private:
E *engine_;
int voe_channel_;
int sequence_number_;
};
// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
// WebRtc Voice Engine.
class WebRtcVoiceMediaChannel
: public WebRtcMediaChannel<VoiceMediaChannel,
WebRtcVoiceEngine> {
public:
explicit WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine);
virtual ~WebRtcVoiceMediaChannel();
virtual bool SetOptions(int options);
virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetPlayout(bool playout);
bool PausePlayout();
bool ResumePlayout();
virtual bool SetSend(SendFlags send);
bool PauseSend();
bool ResumeSend();
virtual bool AddStream(uint32 ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
virtual int GetOutputLevel();
virtual bool SetRingbackTone(const char *buf, int len);
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
virtual bool PressDTMF(int event, bool playout);
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool mute);
virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
virtual bool GetStats(VoiceMediaInfo* info);
// Gets last reported error from WebRtc voice engine. This should be only
// called in response a failure.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error);
bool FindSsrc(int channel_num, uint32* ssrc);
void OnError(uint32 ssrc, int error);
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
int GetChannel(uint32 ssrc);
int GetOutputLevel(int channel);
bool GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs,
webrtc::CodecInst* send_codec);
bool EnableRtcp(int channel);
bool SetPlayout(int channel, bool playout);
static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
static Error WebRtcErrorToChannelError(int err_code);
private:
// Tandberg-bridged conferences require a -10dB gain adjustment,
// which is actually +10 in AgcConfig.targetLeveldBOv
static const int kTandbergDbAdjustment = 10;
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
typedef std::map<uint32, int> ChannelMap;
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
std::set<int> ringback_channels_; // channels playing ringback
int channel_options_;
bool agc_adjusted_;
bool dtmf_allowed_;
bool desired_playout_;
bool playout_;
SendFlags desired_send_;
SendFlags send_;
ChannelMap mux_channels_; // for multiple sources
// mux_channels_ can be read from WebRtc callback thread. Accesses off the
// WebRtc thread must be synchronized with edits on the worker thread. Reads
// on the worker thread are ok.
mutable talk_base::CriticalSection mux_channels_cs_;
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_