Revert back this change and wait when Tommi is only to submit the corresponding peerconnection test changes at the same time.

git-svn-id: http://webrtc.googlecode.com/svn/trunk@32 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
ronghuawu@google.com 2011-06-01 17:14:19 +00:00
parent 3025e6d9ef
commit e8c5948b52
15 changed files with 2195 additions and 426 deletions

View File

@ -466,7 +466,7 @@
'source/talk/session/phone/devicemanager.cc', 'source/talk/session/phone/devicemanager.cc',
'source/talk/session/phone/devicemanager.h', 'source/talk/session/phone/devicemanager.h',
'source/talk/session/phone/filemediaengine.cc', 'source/talk/session/phone/filemediaengine.cc',
'source/talk/session/phone/filemediaengine.h', 'source/talk/session/phone/filemediaengine.h',
'source/talk/session/phone/mediachannel.h', 'source/talk/session/phone/mediachannel.h',
'source/talk/session/phone/mediaengine.cc', 'source/talk/session/phone/mediaengine.cc',
'source/talk/session/phone/mediaengine.h', 'source/talk/session/phone/mediaengine.h',
@ -494,17 +494,9 @@
'source/talk/session/tunnel/tunnelsessionclient.cc', 'source/talk/session/tunnel/tunnelsessionclient.cc',
'source/talk/session/tunnel/tunnelsessionclient.h', 'source/talk/session/tunnel/tunnelsessionclient.h',
], ],
'conditions': [ 'conditions': [
['OS=="win"', {
'sources': [
'source/talk/session/phone/gdivideorenderer.cc',
'source/talk/session/phone/gdivideorenderer.h',
],
}],
['OS=="linux"', { ['OS=="linux"', {
'sources': [ 'sources': [
#'source/talk/session/phone/gtkvideorenderer.cc',
#'source/talk/session/phone/gtkvideorenderer.h',
'source/talk/session/phone/libudevsymboltable.cc', 'source/talk/session/phone/libudevsymboltable.cc',
'source/talk/session/phone/libudevsymboltable.h', 'source/talk/session/phone/libudevsymboltable.h',
'source/talk/session/phone/v4llookup.cc', 'source/talk/session/phone/v4llookup.cc',
@ -547,6 +539,8 @@
'source/talk/app/voicemediaengine.h', 'source/talk/app/voicemediaengine.h',
'source/talk/app/webrtc_json.cc', 'source/talk/app/webrtc_json.cc',
'source/talk/app/webrtc_json.h', 'source/talk/app/webrtc_json.h',
'source/talk/app/webrtcchannelmanager.cc',
'source/talk/app/webrtcchannelmanager.h',
'source/talk/app/webrtcsession.cc', 'source/talk/app/webrtcsession.cc',
'source/talk/app/webrtcsession.h', 'source/talk/app/webrtcsession.h',
'source/talk/app/webrtcsessionimpl.cc', 'source/talk/app/webrtcsessionimpl.cc',
@ -562,8 +556,8 @@
], ],
},{ },{
'sources': [ 'sources': [
'source/talk/app/p2p_transport_manager.cc', 'source/talk/app/p2p_transport_manager.cc',
'source/talk/app/p2p_transport_manager.h', 'source/talk/app/p2p_transport_manager.h',
], ],
}], }],
], ],

View File

@ -115,7 +115,7 @@ bool PeerConnection::Init() {
cricket::PORTALLOCATOR_DISABLE_RELAY); cricket::PORTALLOCATOR_DISABLE_RELAY);
// create channel manager // create channel manager
channel_manager_.reset(new cricket::ChannelManager(media_thread_.get())); channel_manager_.reset(new WebRtcChannelManager(media_thread_.get()));
//start the media thread //start the media thread
media_thread_->SetPriority(talk_base::PRIORITY_HIGH); media_thread_->SetPriority(talk_base::PRIORITY_HIGH);
@ -244,22 +244,24 @@ bool PeerConnection::SetAudioDevice(const std::string& wave_in_device,
return channel_manager_->SetAudioOptions(wave_in_device, wave_out_device, opts); return channel_manager_->SetAudioOptions(wave_in_device, wave_out_device, opts);
} }
bool PeerConnection::SetLocalVideoRenderer(cricket::VideoRenderer* renderer) {
return channel_manager_->SetLocalRenderer(renderer);
}
bool PeerConnection::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
ASSERT(session_ != NULL);
return session_->SetVideoRenderer(stream_id, renderer);
}
bool PeerConnection::SetVideoRenderer(const std::string& stream_id, bool PeerConnection::SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer) { ExternalRenderer* external_renderer) {
ASSERT(session_ != NULL); ASSERT(session_ != NULL);
return session_->SetVideoRenderer(stream_id, external_renderer); return session_->SetVideoRenderer(stream_id, external_renderer);
} }
bool PeerConnection::SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
ASSERT(session_ != NULL);
return session_->SetVideoRenderer(channel_id, window, zOrder, left, top,
right, bottom);
}
bool PeerConnection::SetVideoCapture(const std::string& cam_device) { bool PeerConnection::SetVideoCapture(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device); return channel_manager_->SetVideoOptions(cam_device);
} }

View File

@ -10,7 +10,7 @@
#include "talk/base/thread.h" #include "talk/base/thread.h"
#include "talk/base/scoped_ptr.h" #include "talk/base/scoped_ptr.h"
#include "talk/base/basicpacketsocketfactory.h" #include "talk/base/basicpacketsocketfactory.h"
#include "talk/session/phone/channelmanager.h" #include "talk/app/webrtcchannelmanager.h"
namespace Json { namespace Json {
class Value; class Value;
@ -18,8 +18,6 @@ class Value;
namespace cricket { namespace cricket {
class BasicPortAllocator; class BasicPortAllocator;
class ChannelManager;
class VideoRenderer;
} }
#ifdef PLATFORM_CHROMIUM #ifdef PLATFORM_CHROMIUM
@ -78,13 +76,16 @@ class PeerConnection : public sigslot::has_slots<> {
bool SetAudioDevice(const std::string& wave_in_device, bool SetAudioDevice(const std::string& wave_in_device,
const std::string& wave_out_device, int opts); const std::string& wave_out_device, int opts);
// Set the video renderer // Set the video renderer
bool SetLocalVideoRenderer(cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id, bool SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer); ExternalRenderer* external_renderer);
// Set channel_id to -1 for the local preview
bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
// Set video capture device // Set video capture device
// For Chromium the cam_device should use the capture session id. // For Chromium the cam_device should use the capture session id.
// For standalone app, cam_device is the camera name. It will try to // For standalone app, cam_device is the camera name. It will try to
@ -119,7 +120,7 @@ class PeerConnection : public sigslot::has_slots<> {
std::string config_; std::string config_;
talk_base::scoped_ptr<talk_base::Thread> media_thread_; talk_base::scoped_ptr<talk_base::Thread> media_thread_;
talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_; talk_base::scoped_ptr<WebRtcChannelManager> channel_manager_;
talk_base::scoped_ptr<talk_base::NetworkManager> network_manager_; talk_base::scoped_ptr<talk_base::NetworkManager> network_manager_;
talk_base::scoped_ptr<cricket::BasicPortAllocator> port_allocator_; talk_base::scoped_ptr<cricket::BasicPortAllocator> port_allocator_;
talk_base::scoped_ptr<talk_base::BasicPacketSocketFactory> socket_factory_; talk_base::scoped_ptr<talk_base::BasicPacketSocketFactory> socket_factory_;

View File

@ -20,7 +20,6 @@
#include "talk/app/peerconnection.h" #include "talk/app/peerconnection.h"
#include "talk/app/session_test/main_wnd.h" #include "talk/app/session_test/main_wnd.h"
#include "talk/base/logging.h" #include "talk/base/logging.h"
#include "talk/session/phone/videorendererfactory.h"
static const char kAudioLabel[] = "audio_label"; static const char kAudioLabel[] = "audio_label";
static const char kVideoLabel[] = "video_label"; static const char kVideoLabel[] = "video_label";
@ -218,7 +217,6 @@ class PeerConnectionClient : public sigslot::has_slots<> {
} }
void OnConnect(talk_base::AsyncSocket* socket) { void OnConnect(talk_base::AsyncSocket* socket) {
ASSERT(!onconnect_data_.empty());
int sent = socket->Send(onconnect_data_.c_str(), onconnect_data_.length()); int sent = socket->Send(onconnect_data_.c_str(), onconnect_data_.length());
ASSERT(sent == onconnect_data_.length()); ASSERT(sent == onconnect_data_.length());
onconnect_data_.clear(); onconnect_data_.clear();
@ -387,10 +385,8 @@ class PeerConnectionClient : public sigslot::has_slots<> {
notification_data_.clear(); notification_data_.clear();
} }
if (hanging_get_.GetState() == talk_base::Socket::CS_CLOSED && if (hanging_get_.GetState() == talk_base::Socket::CS_CLOSED)
state_ == CONNECTED) {
hanging_get_.Connect(server_address_); hanging_get_.Connect(server_address_);
}
} }
// Parses a single line entry in the form "<name>,<id>,<connected>" // Parses a single line entry in the form "<name>,<id>,<connected>"
@ -450,9 +446,7 @@ class PeerConnectionClient : public sigslot::has_slots<> {
void OnClose(talk_base::AsyncSocket* socket, int err) { void OnClose(talk_base::AsyncSocket* socket, int err) {
LOG(INFO) << __FUNCTION__; LOG(INFO) << __FUNCTION__;
socket->Close(); socket->Close();
if (err != WSAECONNREFUSED) { if (err != WSAECONNREFUSED) {
if (socket == &hanging_get_) { if (socket == &hanging_get_) {
if (state_ == CONNECTED) { if (state_ == CONNECTED) {
@ -559,7 +553,6 @@ class ConnectionObserver
void DeletePeerConnection() { void DeletePeerConnection() {
peer_connection_.reset(); peer_connection_.reset();
handshake_ = NONE;
} }
void StartCaptureDevice() { void StartCaptureDevice() {
@ -568,11 +561,8 @@ class ConnectionObserver
main_wnd_->SwitchToStreamingUI(); main_wnd_->SwitchToStreamingUI();
if (peer_connection_->SetVideoCapture("")) { if (peer_connection_->SetVideoCapture("")) {
if (!local_renderer_.get()) { peer_connection_->SetVideoRenderer(-1, main_wnd_->handle(), 0,
local_renderer_.reset( 0.7f, 0.7f, 0.95f, 0.95f);
cricket::VideoRendererFactory::CreateGuiVideoRenderer(176, 144));
}
peer_connection_->SetLocalVideoRenderer(local_renderer_.get());
} else { } else {
ASSERT(false); ASSERT(false);
} }
@ -622,12 +612,8 @@ class ConnectionObserver
video_channel_ = channel_id; video_channel_ = channel_id;
waiting_for_video_ = false; waiting_for_video_ = false;
LOG(INFO) << "Setting video renderer for channel: " << channel_id; LOG(INFO) << "Setting video renderer for channel: " << channel_id;
if (!remote_renderer_.get()) { bool ok = peer_connection_->SetVideoRenderer(channel_id,
remote_renderer_.reset( main_wnd_->handle(), 1, 0.0f, 0.0f, 1.0f, 1.0f);
cricket::VideoRendererFactory::CreateGuiVideoRenderer(352, 288));
}
bool ok = peer_connection_->SetVideoRenderer(stream_id,
remote_renderer_.get());
ASSERT(ok); ASSERT(ok);
} else { } else {
ASSERT(audio_channel_ == -1); ASSERT(audio_channel_ == -1);
@ -788,6 +774,7 @@ class ConnectionObserver
LOG(INFO) << "PEER_CONNECTION_CLOSED"; LOG(INFO) << "PEER_CONNECTION_CLOSED";
DeletePeerConnection(); DeletePeerConnection();
::InvalidateRect(main_wnd_->handle(), NULL, TRUE); ::InvalidateRect(main_wnd_->handle(), NULL, TRUE);
handshake_ = NONE;
waiting_for_audio_ = false; waiting_for_audio_ = false;
waiting_for_video_ = false; waiting_for_video_ = false;
peer_id_ = -1; peer_id_ = -1;
@ -803,12 +790,7 @@ class ConnectionObserver
DisconnectFromServer(); DisconnectFromServer();
} }
} else if (msg == SEND_MESSAGE_TO_PEER) { } else if (msg == SEND_MESSAGE_TO_PEER) {
bool ok = client_->SendToPeer(peer_id_, client_->SendToPeer(peer_id_, *reinterpret_cast<std::string*>(lp));
*reinterpret_cast<std::string*>(lp));
if (!ok) {
LOG(LS_ERROR) << "SendToPeer failed";
DisconnectFromServer();
}
} else { } else {
ret = false; ret = false;
} }
@ -826,8 +808,6 @@ class ConnectionObserver
MainWnd* main_wnd_; MainWnd* main_wnd_;
int video_channel_; int video_channel_;
int audio_channel_; int audio_channel_;
scoped_ptr<cricket::VideoRenderer> local_renderer_;
scoped_ptr<cricket::VideoRenderer> remote_renderer_;
}; };
int PASCAL wWinMain(HINSTANCE instance, HINSTANCE prev_instance, int PASCAL wWinMain(HINSTANCE instance, HINSTANCE prev_instance,

View File

@ -31,7 +31,6 @@
#include "talk/base/common.h" #include "talk/base/common.h"
#include "common_types.h" #include "common_types.h"
#include "modules/interface/module_common_types.h"
#include "video_engine/main/interface/vie_base.h" #include "video_engine/main/interface/vie_base.h"
#include "video_engine/main/interface/vie_capture.h" #include "video_engine/main/interface/vie_capture.h"
#include "video_engine/main/interface/vie_codec.h" #include "video_engine/main/interface/vie_codec.h"

View File

@ -14,7 +14,6 @@
#include "talk/app/voicemediaengine.h" #include "talk/app/voicemediaengine.h"
#include "modules/video_capture/main/interface/video_capture.h" #include "modules/video_capture/main/interface/video_capture.h"
#include "vplib.h"
#ifndef ARRAYSIZE #ifndef ARRAYSIZE
#define ARRAYSIZE(a) (sizeof(a) / sizeof((a)[0])) #define ARRAYSIZE(a) (sizeof(a) / sizeof((a)[0]))
@ -26,213 +25,6 @@ static const int kDefaultLogSeverity = 3;
static const int kStartVideoBitrate = 300; static const int kStartVideoBitrate = 300;
static const int kMaxVideoBitrate = 1000; static const int kMaxVideoBitrate = 1000;
CricketWebRTCVideoFrame::CricketWebRTCVideoFrame() {
}
CricketWebRTCVideoFrame::~CricketWebRTCVideoFrame() {
// TODO(ronghuawu): should the CricketWebRTCVideoFrame owns the buffer?
WebRtc_UWord8* newMemory = NULL;
WebRtc_UWord32 newLength = 0;
WebRtc_UWord32 newSize = 0;
video_frame_.Swap(newMemory, newLength, newSize);
}
void CricketWebRTCVideoFrame::Attach(unsigned char* buffer, int bufferSize,
int w, int h) {
WebRtc_UWord8* newMemory = buffer;
WebRtc_UWord32 newLength = bufferSize;
WebRtc_UWord32 newSize = bufferSize;
video_frame_.Swap(newMemory, newLength, newSize);
video_frame_.SetWidth(w);
video_frame_.SetHeight(h);
}
size_t CricketWebRTCVideoFrame::GetWidth() const {
return video_frame_.Width();
}
size_t CricketWebRTCVideoFrame::GetHeight() const {
return video_frame_.Height();
}
const uint8* CricketWebRTCVideoFrame::GetYPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
const uint8* CricketWebRTCVideoFrame::GetUPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
const uint8* CricketWebRTCVideoFrame::GetVPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetYPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetUPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
uint8* CricketWebRTCVideoFrame::GetVPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 3 / 2);
return buffer;
}
cricket::VideoFrame* CricketWebRTCVideoFrame::Copy() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer) {
int new_buffer_size = video_frame_.Length();
unsigned char* new_buffer = new unsigned char[new_buffer_size];
memcpy(new_buffer, buffer, new_buffer_size);
CricketWebRTCVideoFrame* copy = new CricketWebRTCVideoFrame();
copy->Attach(new_buffer, new_buffer_size,
video_frame_.Width(), video_frame_.Height());
copy->SetTimeStamp(video_frame_.TimeStamp());
copy->SetElapsedTime(elapsed_time_);
return copy;
}
return NULL;
}
size_t CricketWebRTCVideoFrame::CopyToBuffer(
uint8* buffer, size_t size) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
memcpy(buffer, video_frame_.Buffer(), needed);
}
return needed;
}
size_t CricketWebRTCVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
uint8* buffer,
size_t size,
size_t pitch_rgb) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t width = video_frame_.Width();
size_t height = video_frame_.Height();
// See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
// explanation of pitch and why this is the amount of space we need.
size_t needed = pitch_rgb * (height - 1) + 4 * width;
if (needed > size) {
LOG(LS_WARNING) << "RGB buffer is not large enough";
return needed;
}
VideoType outgoingVideoType = kUnknown;
switch (to_fourcc) {
case cricket::FOURCC_ARGB:
outgoingVideoType = kARGB;
break;
default:
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
break;
}
if (outgoingVideoType != kUnknown)
ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
width, height, buffer);
return needed;
}
// TODO(ronghuawu): Implement StretchToPlanes
void CricketWebRTCVideoFrame::StretchToPlanes(
uint8* y, uint8* u, uint8* v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
size_t width, size_t height, bool interpolate, bool crop) const {
}
size_t CricketWebRTCVideoFrame::StretchToBuffer(size_t w, size_t h,
uint8* buffer, size_t size,
bool interpolate,
bool crop) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
uint8* bufy = buffer;
uint8* bufu = bufy + w * h;
uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
interpolate, crop);
}
return needed;
}
void CricketWebRTCVideoFrame::StretchToFrame(cricket::VideoFrame *target,
bool interpolate, bool crop) const {
if (!target) return;
StretchToPlanes(target->GetYPlane(),
target->GetUPlane(),
target->GetVPlane(),
target->GetYPitch(),
target->GetUPitch(),
target->GetVPitch(),
target->GetWidth(),
target->GetHeight(),
interpolate, crop);
target->SetElapsedTime(GetElapsedTime());
target->SetTimeStamp(GetTimeStamp());
}
cricket::VideoFrame* CricketWebRTCVideoFrame::Stretch(size_t w, size_t h,
bool interpolate, bool crop) const {
// TODO(ronghuawu): implement
CricketWebRTCVideoFrame* frame = new CricketWebRTCVideoFrame();
return frame;
}
CricketWebRTCVideoRenderer::CricketWebRTCVideoRenderer
(cricket::VideoRenderer* renderer)
:renderer_(renderer) {
}
CricketWebRTCVideoRenderer::~CricketWebRTCVideoRenderer() {
}
int CricketWebRTCVideoRenderer::FrameSizeChange(unsigned int width,
unsigned int height,
unsigned int numberOfStreams) {
ASSERT(renderer_ != NULL);
width_ = width;
height_ = height;
number_of_streams_ = numberOfStreams;
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
int CricketWebRTCVideoRenderer::DeliverFrame(unsigned char* buffer,
int bufferSize) {
ASSERT(renderer_ != NULL);
video_frame_.Attach(buffer, bufferSize, width_, height_);
return renderer_->RenderFrame(&video_frame_) ? 0 : -1;
}
const RtcVideoEngine::VideoCodecPref RtcVideoEngine::kVideoCodecPrefs[] = { const RtcVideoEngine::VideoCodecPref RtcVideoEngine::kVideoCodecPrefs[] = {
{"VP8", 104, 0}, {"VP8", 104, 0},
{"H264", 105, 1} {"H264", 105, 1}
@ -439,23 +231,29 @@ bool RtcVideoEngine::SetCaptureDevice(const cricket::Device* cam) {
return (capture_id_ != -1); return (capture_id_ != -1);
} }
bool RtcVideoEngine::SetLocalRenderer(cricket::VideoRenderer* renderer) { bool RtcVideoEngine::SetVideoRenderer(int channel_id,
if (!local_renderer_.get()) { void* window,
local_renderer_.reset(new CricketWebRTCVideoRenderer(renderer)); unsigned int zOrder,
} else { float left,
// Renderer already set float top,
return true; float right,
} float bottom) {
int ret; int ret;
ret = video_engine_->render()->AddRenderer(capture_id_, if (channel_id == -1)
kVideoI420, channel_id = capture_id_;
local_renderer_.get()); ret = video_engine_->render()->AddRenderer(
if (ret != 0) channel_id, window, zOrder, left, top, right, bottom);
if (ret !=0 )
return false; return false;
ret = video_engine_->render()->StartRender(capture_id_); ret = video_engine_->render()->StartRender(channel_id);
if (ret !=0 )
return false;
return true;
}
return (ret == 0); bool RtcVideoEngine::SetLocalRenderer(cricket::VideoRenderer* renderer) {
LOG(LS_WARNING) << "Not required call SetLocalRenderer for webrtc";
return false;
} }
cricket::CaptureResult RtcVideoEngine::SetCapture(bool capture) { cricket::CaptureResult RtcVideoEngine::SetCapture(bool capture) {
@ -767,22 +565,7 @@ bool RtcVideoMediaChannel::RemoveStream(uint32 ssrc) {
bool RtcVideoMediaChannel::SetRenderer( bool RtcVideoMediaChannel::SetRenderer(
uint32 ssrc, cricket::VideoRenderer* renderer) { uint32 ssrc, cricket::VideoRenderer* renderer) {
if (!remote_renderer_.get()) { return false;
remote_renderer_.reset(new CricketWebRTCVideoRenderer(renderer));
} else {
// Renderer already set
return true;
}
int ret;
ret = engine_->video_engine()->render()->AddRenderer(video_channel_,
kVideoI420,
remote_renderer_.get());
if (ret != 0)
return false;
ret = engine_->video_engine()->render()->StartRender(video_channel_);
return (ret == 0);
} }
bool RtcVideoMediaChannel::SetExternalRenderer(uint32 ssrc, void* renderer) bool RtcVideoMediaChannel::SetExternalRenderer(uint32 ssrc, void* renderer)
@ -792,11 +575,12 @@ bool RtcVideoMediaChannel::SetExternalRenderer(uint32 ssrc, void* renderer)
video_channel_, video_channel_,
kVideoI420, kVideoI420,
static_cast<ExternalRenderer*>(renderer)); static_cast<ExternalRenderer*>(renderer));
if (ret != 0) if (ret !=0 )
return false; return false;
ret = engine_->video_engine()->render()->StartRender(video_channel_); ret = engine_->video_engine()->render()->StartRender(video_channel_);
if (ret !=0 )
return (ret == 0); return false;
return true;
} }
bool RtcVideoMediaChannel::GetStats(cricket::VideoMediaInfo* info) { bool RtcVideoMediaChannel::GetStats(cricket::VideoMediaInfo* info) {

View File

@ -49,74 +49,6 @@ class RtcVideoMediaChannel;
class RtcVoiceEngine; class RtcVoiceEngine;
class ExternalRenderer; class ExternalRenderer;
// CricketWebRTCVideoFrame only supports I420
class CricketWebRTCVideoFrame : public cricket::VideoFrame {
public:
CricketWebRTCVideoFrame();
~CricketWebRTCVideoFrame();
void Attach(unsigned char* buffer, int bufferSize, int w, int h);
virtual size_t GetWidth() const;
virtual size_t GetHeight() const;
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const { return video_frame_.Width(); }
virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) {
video_frame_.SetTimeStamp(time_stamp);
}
virtual VideoFrame* Copy() const;
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, size_t pitch_rgb) const;
virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
private:
webrtc::VideoFrame video_frame_;
int64 elapsed_time_;
};
class CricketWebRTCVideoRenderer : public ExternalRenderer {
public:
CricketWebRTCVideoRenderer(cricket::VideoRenderer* renderer);
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int numberOfStreams);
virtual int DeliverFrame(unsigned char* buffer, int bufferSize);
virtual ~CricketWebRTCVideoRenderer();
private:
cricket::VideoRenderer* renderer_;
CricketWebRTCVideoFrame video_frame_;
unsigned int width_;
unsigned int height_;
unsigned int number_of_streams_;
};
class RtcVideoEngine : public ViEBaseObserver, public TraceCallback { class RtcVideoEngine : public ViEBaseObserver, public TraceCallback {
public: public:
RtcVideoEngine(); RtcVideoEngine();
@ -140,6 +72,13 @@ class RtcVideoEngine : public ViEBaseObserver, public TraceCallback {
bool SetOptions(int options); bool SetOptions(int options);
//TODO - need to change this interface for webrtc //TODO - need to change this interface for webrtc
bool SetCaptureDevice(const cricket::Device* device); bool SetCaptureDevice(const cricket::Device* device);
bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
bool SetLocalRenderer(cricket::VideoRenderer* renderer); bool SetLocalRenderer(cricket::VideoRenderer* renderer);
cricket::CaptureResult SetCapture(bool capture); cricket::CaptureResult SetCapture(bool capture);
const std::vector<cricket::VideoCodec>& codecs() const; const std::vector<cricket::VideoCodec>& codecs() const;
@ -194,7 +133,6 @@ class RtcVideoEngine : public ViEBaseObserver, public TraceCallback {
cricket::VideoEncoderConfig default_encoder_config_; cricket::VideoEncoderConfig default_encoder_config_;
cricket::VideoCodec default_codec_; cricket::VideoCodec default_codec_;
bool capture_started_; bool capture_started_;
talk_base::scoped_ptr<CricketWebRTCVideoRenderer> local_renderer_;
}; };
class RtcVideoMediaChannel: public cricket::VideoMediaChannel, class RtcVideoMediaChannel: public cricket::VideoMediaChannel,
@ -250,7 +188,6 @@ class RtcVideoMediaChannel: public cricket::VideoMediaChannel,
bool sending_; bool sending_;
bool render_started_; bool render_started_;
webrtc::VideoCodec send_codec_; webrtc::VideoCodec send_codec_;
talk_base::scoped_ptr<CricketWebRTCVideoRenderer> remote_renderer_;
}; };
} }

View File

@ -0,0 +1,137 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: mallinath@google.com (Mallinath Bareddy)
#include "talk/app/webrtcchannelmanager.h"
namespace webrtc {
struct VideoCaptureDeviceParams : public talk_base::MessageData {
VideoCaptureDeviceParams(const std::string& cam_device)
: cam_device(cam_device),
result(false) {}
const std::string cam_device;
bool result;
};
struct RenderParams : public talk_base::MessageData {
RenderParams(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom)
:channel_id(channel_id)
,window(window)
,zOrder(zOrder)
,left(left)
,top(top)
,right(right)
,bottom(bottom) {}
int channel_id;
void* window;
unsigned int zOrder;
float left;
float top;
float right;
float bottom;
bool result;
};
bool WebRtcChannelManager::Init() {
return MaybeInit();
}
cricket::VoiceChannel* WebRtcChannelManager::CreateVoiceChannel(
cricket::BaseSession* s, const std::string& content_name, bool rtcp) {
return (MaybeInit()) ?
ChannelManager::CreateVoiceChannel(s, content_name, rtcp) : NULL;
}
cricket::VideoChannel* WebRtcChannelManager::CreateVideoChannel(
cricket::BaseSession* s, const std::string& content_name, bool rtcp,
cricket::VoiceChannel* vc) {
return (MaybeInit()) ?
ChannelManager::CreateVideoChannel(s, content_name, rtcp, vc) : NULL;
}
cricket::Soundclip* WebRtcChannelManager::CreateSoundclip() {
return (MaybeInit()) ? ChannelManager::CreateSoundclip() : NULL;
}
void WebRtcChannelManager::DestroyVoiceChannel(cricket::VoiceChannel* vc) {
ChannelManager::DestroyVoiceChannel(vc);
MaybeTerm();
}
void WebRtcChannelManager::DestroyVideoChannel(cricket::VideoChannel* vc) {
ChannelManager::DestroyVideoChannel(vc);
MaybeTerm();
}
void WebRtcChannelManager::DestroySoundclip(cricket::Soundclip* s) {
ChannelManager::DestroySoundclip(s);
MaybeTerm();
}
bool WebRtcChannelManager::MaybeInit() {
bool ret = initialized();
if (!ret) {
ret = ChannelManager::Init();
}
return ret;
}
void WebRtcChannelManager::MaybeTerm() {
if (initialized() && !has_channels()) {
Terminate();
}
}
bool WebRtcChannelManager::SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
if (MaybeInit()) {
RenderParams params(channel_id, window, zOrder, left, top, right, bottom);
return cricket::ChannelManager::Send(MSG_SETRTC_VIDEORENDERER, &params);
} else {
return false;
}
}
void WebRtcChannelManager::SetVideoRenderer_w(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
ASSERT(worker_thread() == talk_base::Thread::Current());
ASSERT(initialized());
media_engine()->SetVideoRenderer(channel_id, window, zOrder, left, top, right, bottom);
}
void WebRtcChannelManager::OnMessage(talk_base::Message *message) {
talk_base::MessageData* data = message->pdata;
switch(message->message_id) {
case MSG_SETRTC_VIDEORENDERER: {
RenderParams* p = static_cast<RenderParams*>(data);
SetVideoRenderer_w(p->channel_id,
p->window,
p->zOrder,
p->left,
p->top,
p->right,
p->bottom);
break;
}
default: {
ChannelManager::OnMessage(message);
}
}
}
} // namespace webrtc

View File

@ -0,0 +1,68 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: mallinath@google.com (Mallinath Bareddy)
#ifndef TALK_APP_WEBRTC_WEBRTCCHANNELMANAGER_H_
#define TALK_APP_WEBRTC_WEBRTCCHANNELMANAGER_H_
#include "talk/session/phone/channelmanager.h"
namespace webrtc {
class AudioDeviceModule;
enum {
MSG_SETRTC_VIDEORENDERER = 21, // Set internal video renderer
};
// WebRtcChannelManager automatically takes care of initialization and
// cricket::ChannelManager. Terminates when not needed
class WebRtcChannelManager : public cricket::ChannelManager {
public:
WebRtcChannelManager(talk_base::Thread* worker_thread)
: ChannelManager(worker_thread) {
}
WebRtcChannelManager(cricket::MediaEngine* me, cricket::DeviceManager* dm,
talk_base::Thread* worker_thread)
: ChannelManager(me, dm, worker_thread) {
}
bool Init();
cricket::VoiceChannel* CreateVoiceChannel(
cricket::BaseSession* s, const std::string& content_name, bool rtcp);
cricket::VideoChannel* CreateVideoChannel(
cricket::BaseSession* s, const std::string& content_name, bool rtcp,
cricket::VoiceChannel* vc);
cricket::Soundclip* CreateSoundclip();
void DestroyVoiceChannel(cricket::VoiceChannel* vc);
void DestroyVideoChannel(cricket::VideoChannel* vc);
void DestroySoundclip(cricket::Soundclip* s);
bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
private:
bool MaybeInit();
void MaybeTerm();
void SetExternalAdm_w(AudioDeviceModule* external_adm);
void SetVideoRenderer_w(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
void OnMessage(talk_base::Message *message);
};
} // namespace webrtc
#endif /* TALK_APP_WEBRTC_WEBRTCCHANNELMANAGER_H_ */

View File

@ -29,19 +29,20 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "talk/app/pc_transport_impl.h"
#include "talk/app/peerconnection.h"
#include "talk/app/webrtc_json.h"
#include "talk/base/common.h" #include "talk/base/common.h"
#include "talk/base/json.h" #include "talk/base/json.h"
#include "talk/base/scoped_ptr.h" #include "talk/base/scoped_ptr.h"
#include "talk/p2p/base/constants.h" #include "talk/p2p/base/constants.h"
#include "talk/p2p/base/sessiondescription.h" #include "talk/p2p/base/sessiondescription.h"
#include "talk/p2p/base/p2ptransport.h" #include "talk/p2p/base/p2ptransport.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/mediasessionclient.h" #include "talk/session/phone/mediasessionclient.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/voicechannel.h" #include "talk/session/phone/voicechannel.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/app/webrtc_json.h"
#include "talk/app/webrtcchannelmanager.h"
#include "talk/app/peerconnection.h"
#include "talk/app/pc_transport_impl.h"
using namespace cricket; using namespace cricket;
@ -54,7 +55,7 @@ enum {
MSG_RTC_SETVIDEOCAPTURE = 4, MSG_RTC_SETVIDEOCAPTURE = 4,
MSG_RTC_CANDIDATETIMEOUT = 5, MSG_RTC_CANDIDATETIMEOUT = 5,
MSG_RTC_SETEXTERNALRENDERER = 6, MSG_RTC_SETEXTERNALRENDERER = 6,
MSG_RTC_SETCRICKETRENDERER = 7, MSG_RTC_SETRENDERER = 7,
MSG_RTC_CHANNELENABLE = 8, MSG_RTC_CHANNELENABLE = 8,
MSG_RTC_SIGNALONWRITABLESTATE = 9, MSG_RTC_SIGNALONWRITABLESTATE = 9,
MSG_RTC_DESTROYVOICECHANNEL = 10, MSG_RTC_DESTROYVOICECHANNEL = 10,
@ -106,15 +107,29 @@ struct ExternalRenderParams : public talk_base::MessageData {
bool result; bool result;
}; };
struct CricketRenderParams : public talk_base::MessageData { struct RenderParams : public talk_base::MessageData {
CricketRenderParams(const std::string& stream_id, RenderParams(int channel_id,
cricket::VideoRenderer* renderer) void* window,
: stream_id(stream_id), unsigned int zOrder,
renderer(renderer), float left,
result(false) {} float top,
float right,
float bottom)
:channel_id(channel_id)
,window(window)
,zOrder(zOrder)
,left(left)
,top(top)
,right(right)
,bottom(bottom) {}
const std::string stream_id; int channel_id;
cricket::VideoRenderer* renderer; void* window;
unsigned int zOrder;
float left;
float top;
float right;
float bottom;
bool result; bool result;
}; };
@ -144,7 +159,7 @@ WebRTCSessionImpl::WebRTCSessionImpl(
const std::string& id, const std::string& id,
const std::string& direction, const std::string& direction,
cricket::PortAllocator* allocator, cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr, WebRtcChannelManager* channelmgr,
PeerConnection* connection, PeerConnection* connection,
talk_base::Thread* signaling_thread) talk_base::Thread* signaling_thread)
: WebRTCSession(id, direction, allocator, connection, signaling_thread), : WebRTCSession(id, direction, allocator, connection, signaling_thread),
@ -188,7 +203,7 @@ bool WebRTCSessionImpl::CreateVoiceChannel(const std::string& stream_id) {
this, &WebRTCSessionImpl::OnVoiceChannelCreated); this, &WebRTCSessionImpl::OnVoiceChannelCreated);
signaling_thread_->Post(this, MSG_RTC_CREATEAUDIOCHANNEL, signaling_thread_->Post(this, MSG_RTC_CREATEAUDIOCHANNEL,
new CreateChannelParams(stream_id, true, NULL)); new CreateChannelParams(stream_id, false, NULL));
return true; return true;
} }
@ -240,7 +255,7 @@ bool WebRTCSessionImpl::CreateVideoChannel(const std::string& stream_id) {
this, &WebRTCSessionImpl::OnVideoChannelCreated); this, &WebRTCSessionImpl::OnVideoChannelCreated);
signaling_thread_->Post(this, MSG_RTC_CREATEVIDEOCHANNEL, signaling_thread_->Post(this, MSG_RTC_CREATEVIDEOCHANNEL,
new CreateChannelParams(stream_id, true, NULL)); new CreateChannelParams(stream_id, false, NULL));
return true; return true;
} }
@ -288,33 +303,6 @@ void WebRTCSessionImpl::OnVideoChannelCreated(
} }
} }
bool WebRTCSessionImpl::SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer) {
if(signaling_thread_ != talk_base::Thread::Current()) {
signaling_thread_->Post(this, MSG_RTC_SETCRICKETRENDERER,
new CricketRenderParams(stream_id, renderer),
true);
return true;
}
ASSERT(signaling_thread_ == talk_base::Thread::Current());
bool ret = false;
StreamMap::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
StreamInfo* stream_info = (*iter);
if (stream_info->stream_id.compare(stream_id) == 0) {
ASSERT(stream_info->channel != NULL);
ASSERT(stream_info->video);
cricket::VideoChannel* channel = static_cast<cricket::VideoChannel*>(
stream_info->channel);
ret = channel->SetRenderer(0, renderer);
break;
}
}
return ret;
}
bool WebRTCSessionImpl::SetVideoRenderer(const std::string& stream_id, bool WebRTCSessionImpl::SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer) { ExternalRenderer* external_renderer) {
if(signaling_thread_ != talk_base::Thread::Current()) { if(signaling_thread_ != talk_base::Thread::Current()) {
@ -342,6 +330,30 @@ bool WebRTCSessionImpl::SetVideoRenderer(const std::string& stream_id,
return ret; return ret;
} }
bool WebRTCSessionImpl::SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
signaling_thread_->Post(this, MSG_RTC_SETRENDERER,
new RenderParams(channel_id, window, zOrder, left, top, right, bottom),
true);
return true;
}
bool WebRTCSessionImpl::SetVideoRenderer_w(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
ASSERT(signaling_thread_ == talk_base::Thread::Current());
return channel_manager_->SetVideoRenderer(channel_id, window, zOrder, left, top, right, bottom);
}
void WebRTCSessionImpl::OnMessage(talk_base::Message* message) { void WebRTCSessionImpl::OnMessage(talk_base::Message* message) {
using talk_base::TypedMessageData; using talk_base::TypedMessageData;
talk_base::MessageData* data = message->pdata; talk_base::MessageData* data = message->pdata;
@ -408,25 +420,31 @@ void WebRTCSessionImpl::OnMessage(talk_base::Message* message) {
break; break;
} }
case MSG_RTC_SETVIDEOCAPTURE : { case MSG_RTC_SETVIDEOCAPTURE : {
CaptureParams* p = reinterpret_cast<CaptureParams*>(data); CaptureParams* p = static_cast<CaptureParams*>(data);
p->result = SetVideoCapture_w(p->capture); p->result = SetVideoCapture_w(p->capture);
delete p; delete p;
break; break;
} }
case MSG_RTC_SETEXTERNALRENDERER : { case MSG_RTC_SETEXTERNALRENDERER : {
ExternalRenderParams* p = reinterpret_cast<ExternalRenderParams*>(data); ExternalRenderParams* p = static_cast<ExternalRenderParams*> (data);
p->result = SetVideoRenderer(p->stream_id, p->external_renderer); p->result = SetVideoRenderer(p->stream_id, p->external_renderer);
delete p; delete p;
break; break;
} }
case MSG_RTC_SETCRICKETRENDERER : { case MSG_RTC_SETRENDERER : {
CricketRenderParams* p = reinterpret_cast<CricketRenderParams*>(data); RenderParams* p = static_cast<RenderParams*> (data);
p->result = SetVideoRenderer(p->stream_id, p->renderer); p->result = SetVideoRenderer_w(p->channel_id,
p->window,
p->zOrder,
p->left,
p->top,
p->right,
p->bottom);
delete p; delete p;
break; break;
} }
case MSG_RTC_CHANNELENABLE : { case MSG_RTC_CHANNELENABLE : {
ChannelEnableParams* p = reinterpret_cast<ChannelEnableParams*>(data); ChannelEnableParams* p = static_cast<ChannelEnableParams*> (data);
ChannelEnable_w(p->channel, p->enable); ChannelEnable_w(p->channel, p->enable);
delete p; delete p;
break; break;
@ -689,6 +707,8 @@ void WebRTCSessionImpl::DestroyChannel(
break; break;
} }
} }
ASSERT(found);
} }
void WebRTCSessionImpl::DestroyVoiceChannel_w( void WebRTCSessionImpl::DestroyVoiceChannel_w(

View File

@ -79,6 +79,7 @@ typedef std::vector<cricket::VideoCodec> VideoCodecs;
class ExternalRenderer; class ExternalRenderer;
class PeerConnection; class PeerConnection;
class WebRtcChannelManager;
class WebRTCSessionImpl: public WebRTCSession { class WebRTCSessionImpl: public WebRTCSession {
@ -87,7 +88,7 @@ class WebRTCSessionImpl: public WebRTCSession {
WebRTCSessionImpl(const std::string& id, WebRTCSessionImpl(const std::string& id,
const std::string& direction, const std::string& direction,
cricket::PortAllocator* allocator, cricket::PortAllocator* allocator,
cricket::ChannelManager* channelmgr, WebRtcChannelManager* channelmgr,
PeerConnection* connection, PeerConnection* connection,
talk_base::Thread* signaling_thread); talk_base::Thread* signaling_thread);
@ -129,11 +130,15 @@ class WebRTCSessionImpl: public WebRTCSession {
void OnStateChange(P2PTransportClass::State state, void OnStateChange(P2PTransportClass::State state,
cricket::TransportChannel* channel); cricket::TransportChannel* channel);
void OnMessageReceived(const char* data, size_t data_size); void OnMessageReceived(const char* data, size_t data_size);
bool SetVideoRenderer(const std::string& stream_id,
cricket::VideoRenderer* renderer);
bool SetVideoRenderer(const std::string& stream_id, bool SetVideoRenderer(const std::string& stream_id,
ExternalRenderer* external_renderer); ExternalRenderer* external_renderer);
bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
sigslot::signal2<cricket::VideoChannel*, std::string&> SignalVideoChannel; sigslot::signal2<cricket::VideoChannel*, std::string&> SignalVideoChannel;
sigslot::signal2<cricket::VoiceChannel*, std::string&> SignalVoiceChannel; sigslot::signal2<cricket::VoiceChannel*, std::string&> SignalVoiceChannel;
sigslot::signal1<WebRTCSessionImpl*> SignalOnRemoveStream; sigslot::signal1<WebRTCSessionImpl*> SignalOnRemoveStream;
@ -150,6 +155,13 @@ class WebRTCSessionImpl: public WebRTCSession {
} }
private: private:
bool SetVideoRenderer_w(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom);
void ChannelEnable_w(cricket::BaseChannel* channel, bool enable); void ChannelEnable_w(cricket::BaseChannel* channel, bool enable);
void OnVoiceChannelError(cricket::VoiceChannel* voice_channel, uint32 ssrc, void OnVoiceChannelError(cricket::VoiceChannel* voice_channel, uint32 ssrc,
@ -220,7 +232,7 @@ class WebRTCSessionImpl: public WebRTCSession {
void SendLocalDescription_w(); void SendLocalDescription_w();
cricket::ChannelManager* channel_manager_; WebRtcChannelManager* channel_manager_;
std::vector<StreamInfo*> streams_; std::vector<StreamInfo*> streams_;
TransportChannelMap transport_channels_; TransportChannelMap transport_channels_;
bool all_writable_; bool all_writable_;

View File

@ -0,0 +1,798 @@
/*
* libjingle
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/channelmanager.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <algorithm>
#include "talk/base/common.h"
#include "talk/base/logging.h"
#include "talk/base/sigslotrepeater.h"
#include "talk/base/stringencode.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/soundclip.h"
namespace cricket {
enum {
MSG_CREATEVOICECHANNEL = 1,
MSG_DESTROYVOICECHANNEL = 2,
MSG_SETAUDIOOPTIONS = 3,
MSG_GETOUTPUTVOLUME = 4,
MSG_SETOUTPUTVOLUME = 5,
MSG_SETLOCALMONITOR = 6,
MSG_SETVOICELOGGING = 7,
MSG_CREATEVIDEOCHANNEL = 11,
MSG_DESTROYVIDEOCHANNEL = 12,
MSG_SETVIDEOOPTIONS = 13,
MSG_SETLOCALRENDERER = 14,
MSG_SETDEFAULTVIDEOENCODERCONFIG = 15,
MSG_SETVIDEOLOGGING = 16,
MSG_CREATESOUNDCLIP = 17,
MSG_DESTROYSOUNDCLIP = 18,
MSG_CAMERASTARTED = 19,
MSG_SETVIDEOCAPTURE = 20,
};
struct CreationParams : public talk_base::MessageData {
CreationParams(BaseSession* session, const std::string& content_name,
bool rtcp, VoiceChannel* voice_channel)
: session(session),
content_name(content_name),
rtcp(rtcp),
voice_channel(voice_channel),
video_channel(NULL) {}
BaseSession* session;
std::string content_name;
bool rtcp;
VoiceChannel* voice_channel;
VideoChannel* video_channel;
};
struct AudioOptions : public talk_base::MessageData {
AudioOptions(int o, const Device* in, const Device* out)
: options(o), in_device(in), out_device(out) {}
int options;
const Device* in_device;
const Device* out_device;
bool result;
};
struct VolumeLevel : public talk_base::MessageData {
VolumeLevel() : level(-1), result(false) {}
explicit VolumeLevel(int l) : level(l), result(false) {}
int level;
bool result;
};
struct VideoOptions : public talk_base::MessageData {
explicit VideoOptions(const Device* d) : cam_device(d), result(false) {}
const Device* cam_device;
bool result;
};
struct DefaultVideoEncoderConfig : public talk_base::MessageData {
explicit DefaultVideoEncoderConfig(const VideoEncoderConfig& c)
: config(c), result(false) {}
VideoEncoderConfig config;
bool result;
};
struct LocalMonitor : public talk_base::MessageData {
explicit LocalMonitor(bool e) : enable(e), result(false) {}
bool enable;
bool result;
};
struct LocalRenderer : public talk_base::MessageData {
explicit LocalRenderer(VideoRenderer* r) : renderer(r), result(false) {}
VideoRenderer* renderer;
bool result;
};
struct LoggingOptions : public talk_base::MessageData {
explicit LoggingOptions(int lev, const char* f) : level(lev), filter(f) {}
int level;
std::string filter;
};
struct CaptureParams : public talk_base::MessageData {
explicit CaptureParams(bool c) : capture(c), result(CR_FAILURE) {}
bool capture;
CaptureResult result;
};
ChannelManager::ChannelManager(talk_base::Thread* worker_thread)
: media_engine_(MediaEngine::Create()),
device_manager_(new DeviceManager()),
initialized_(false),
main_thread_(talk_base::Thread::Current()),
worker_thread_(worker_thread),
audio_in_device_(DeviceManager::kDefaultDeviceName),
audio_out_device_(DeviceManager::kDefaultDeviceName),
audio_options_(MediaEngine::DEFAULT_AUDIO_OPTIONS),
local_renderer_(NULL),
capturing_(false),
monitoring_(false) {
Construct();
}
ChannelManager::ChannelManager(MediaEngine* me, DeviceManager* dm,
talk_base::Thread* worker_thread)
: media_engine_(me),
device_manager_(dm),
initialized_(false),
main_thread_(talk_base::Thread::Current()),
worker_thread_(worker_thread),
audio_in_device_(DeviceManager::kDefaultDeviceName),
audio_out_device_(DeviceManager::kDefaultDeviceName),
audio_options_(MediaEngine::DEFAULT_AUDIO_OPTIONS),
local_renderer_(NULL),
capturing_(false),
monitoring_(false) {
Construct();
}
void ChannelManager::Construct() {
// Init the device manager immediately, and set up our default video device.
SignalDevicesChange.repeat(device_manager_->SignalDevicesChange);
device_manager_->Init();
// Set camera_device_ to the name of the default video capturer.
SetVideoOptions(DeviceManager::kDefaultDeviceName);
// Camera is started asynchronously, request callbacks when startup
// completes to be able to forward them to the rendering manager.
media_engine_->SignalVideoCaptureResult.connect(
this, &ChannelManager::OnVideoCaptureResult);
}
ChannelManager::~ChannelManager() {
if (initialized_)
Terminate();
}
int ChannelManager::GetCapabilities() {
return media_engine_->GetCapabilities() & device_manager_->GetCapabilities();
}
void ChannelManager::GetSupportedAudioCodecs(
std::vector<AudioCodec>* codecs) const {
codecs->clear();
for (std::vector<AudioCodec>::const_iterator it =
media_engine_->audio_codecs().begin();
it != media_engine_->audio_codecs().end(); ++it) {
codecs->push_back(*it);
}
}
void ChannelManager::GetSupportedVideoCodecs(
std::vector<VideoCodec>* codecs) const {
codecs->clear();
std::vector<VideoCodec>::const_iterator it;
for (it = media_engine_->video_codecs().begin();
it != media_engine_->video_codecs().end(); ++it) {
codecs->push_back(*it);
}
}
bool ChannelManager::Init() {
ASSERT(!initialized_);
if (initialized_) {
return false;
}
ASSERT(worker_thread_ != NULL);
if (worker_thread_ && worker_thread_->started()) {
if (media_engine_->Init()) {
initialized_ = true;
// Now that we're initialized, apply any stored preferences. A preferred
// device might have been unplugged. In this case, we fallback to the
// default device but keep the user preferences. The preferences are
// changed only when the Javascript FE changes them.
const std::string preferred_audio_in_device = audio_in_device_;
const std::string preferred_audio_out_device = audio_out_device_;
const std::string preferred_camera_device = camera_device_;
Device device;
if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
<< "' is unavailable. Fall back to the default.";
audio_in_device_ = DeviceManager::kDefaultDeviceName;
}
if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
<< "' is unavailable. Fall back to the default.";
audio_out_device_ = DeviceManager::kDefaultDeviceName;
}
if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
if (!camera_device_.empty()) {
LOG(LS_WARNING) << "The preferred camera '" << camera_device_
<< "' is unavailable. Fall back to the default.";
}
camera_device_ = DeviceManager::kDefaultDeviceName;
}
if (!SetAudioOptions(audio_in_device_, audio_out_device_,
audio_options_)) {
LOG(LS_WARNING) << "Failed to SetAudioOptions with"
<< " microphone: " << audio_in_device_
<< " speaker: " << audio_out_device_
<< " options: " << audio_options_;
}
if (!SetVideoOptions(camera_device_) && !camera_device_.empty()) {
LOG(LS_WARNING) << "Failed to SetVideoOptions with camera: "
<< camera_device_;
}
// Restore the user preferences.
audio_in_device_ = preferred_audio_in_device;
audio_out_device_ = preferred_audio_out_device;
camera_device_ = preferred_camera_device;
// Now apply the default video codec that has been set earlier.
if (default_video_encoder_config_.max_codec.id != 0) {
SetDefaultVideoEncoderConfig(default_video_encoder_config_);
}
// And the local renderer.
if (local_renderer_) {
SetLocalRenderer(local_renderer_);
}
}
}
return initialized_;
}
void ChannelManager::Terminate() {
ASSERT(initialized_);
if (!initialized_) {
return;
}
// Need to destroy the voice/video channels
while (!video_channels_.empty()) {
DestroyVideoChannel_w(video_channels_.back());
}
while (!voice_channels_.empty()) {
DestroyVoiceChannel_w(voice_channels_.back());
}
while (!soundclips_.empty()) {
DestroySoundclip_w(soundclips_.back());
}
media_engine_->Terminate();
initialized_ = false;
}
VoiceChannel* ChannelManager::CreateVoiceChannel(
BaseSession* session, const std::string& content_name, bool rtcp) {
CreationParams params(session, content_name, rtcp, NULL);
return (Send(MSG_CREATEVOICECHANNEL, &params)) ? params.voice_channel : NULL;
}
VoiceChannel* ChannelManager::CreateVoiceChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp) {
talk_base::CritScope cs(&crit_);
// This is ok to alloc from a thread other than the worker thread
ASSERT(initialized_);
VoiceMediaChannel* media_channel = media_engine_->CreateChannel();
if (media_channel == NULL)
return NULL;
VoiceChannel* voice_channel = new VoiceChannel(
worker_thread_, media_engine_.get(), media_channel,
session, content_name, rtcp);
voice_channels_.push_back(voice_channel);
return voice_channel;
}
void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) {
if (voice_channel) {
talk_base::TypedMessageData<VoiceChannel *> data(voice_channel);
Send(MSG_DESTROYVOICECHANNEL, &data);
}
}
void ChannelManager::DestroyVoiceChannel_w(VoiceChannel* voice_channel) {
talk_base::CritScope cs(&crit_);
// Destroy voice channel.
ASSERT(initialized_);
VoiceChannels::iterator it = std::find(voice_channels_.begin(),
voice_channels_.end(), voice_channel);
ASSERT(it != voice_channels_.end());
if (it == voice_channels_.end())
return;
voice_channels_.erase(it);
delete voice_channel;
}
VideoChannel* ChannelManager::CreateVideoChannel(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel) {
CreationParams params(session, content_name, rtcp, voice_channel);
return (Send(MSG_CREATEVIDEOCHANNEL, &params)) ? params.video_channel : NULL;
}
VideoChannel* ChannelManager::CreateVideoChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel) {
talk_base::CritScope cs(&crit_);
// This is ok to alloc from a thread other than the worker thread
ASSERT(initialized_);
VideoMediaChannel* media_channel =
// voice_channel can be NULL in case of NullVoiceEngine.
media_engine_->CreateVideoChannel(voice_channel ?
voice_channel->media_channel() : NULL);
if (media_channel == NULL)
return NULL;
VideoChannel* video_channel = new VideoChannel(
worker_thread_, media_engine_.get(), media_channel,
session, content_name, rtcp, voice_channel);
video_channels_.push_back(video_channel);
return video_channel;
}
void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) {
if (video_channel) {
talk_base::TypedMessageData<VideoChannel *> data(video_channel);
Send(MSG_DESTROYVIDEOCHANNEL, &data);
}
}
void ChannelManager::DestroyVideoChannel_w(VideoChannel *video_channel) {
talk_base::CritScope cs(&crit_);
// Destroy voice channel.
ASSERT(initialized_);
VideoChannels::iterator it = std::find(video_channels_.begin(),
video_channels_.end(), video_channel);
if (it == video_channels_.end())
return;
video_channels_.erase(it);
delete video_channel;
}
Soundclip* ChannelManager::CreateSoundclip() {
talk_base::TypedMessageData<Soundclip*> data(NULL);
Send(MSG_CREATESOUNDCLIP, &data);
return data.data();
}
Soundclip* ChannelManager::CreateSoundclip_w() {
talk_base::CritScope cs(&crit_);
ASSERT(initialized_);
ASSERT(worker_thread_ == talk_base::Thread::Current());
SoundclipMedia* soundclip_media = media_engine_->CreateSoundclip();
if (!soundclip_media) {
return NULL;
}
Soundclip* soundclip = new Soundclip(worker_thread_, soundclip_media);
soundclips_.push_back(soundclip);
return soundclip;
}
void ChannelManager::DestroySoundclip(Soundclip* soundclip) {
if (soundclip) {
talk_base::TypedMessageData<Soundclip*> data(soundclip);
Send(MSG_DESTROYSOUNDCLIP, &data);
}
}
void ChannelManager::DestroySoundclip_w(Soundclip* soundclip) {
talk_base::CritScope cs(&crit_);
// Destroy soundclip.
ASSERT(initialized_);
Soundclips::iterator it = std::find(soundclips_.begin(),
soundclips_.end(), soundclip);
ASSERT(it != soundclips_.end());
if (it == soundclips_.end())
return;
soundclips_.erase(it);
delete soundclip;
}
bool ChannelManager::GetAudioOptions(std::string* in_name,
std::string* out_name, int* opts) {
*in_name = audio_in_device_;
*out_name = audio_out_device_;
*opts = audio_options_;
return true;
}
bool ChannelManager::SetAudioOptions(const std::string& in_name,
const std::string& out_name, int opts) {
// Get device ids from DeviceManager.
Device in_dev, out_dev;
if (!device_manager_->GetAudioInputDevice(in_name, &in_dev)) {
LOG(LS_WARNING) << "Failed to GetAudioInputDevice: " << in_name;
return false;
}
if (!device_manager_->GetAudioOutputDevice(out_name, &out_dev)) {
LOG(LS_WARNING) << "Failed to GetAudioOutputDevice: " << out_name;
return false;
}
// If we're initialized, pass the settings to the media engine.
bool ret = true;
if (initialized_) {
AudioOptions options(opts, &in_dev, &out_dev);
ret = (Send(MSG_SETAUDIOOPTIONS, &options) && options.result);
}
// If all worked well, save the values for use in GetAudioOptions.
if (ret) {
audio_options_ = opts;
audio_in_device_ = in_name;
audio_out_device_ = out_name;
}
return ret;
}
bool ChannelManager::SetAudioOptions_w(int opts, const Device* in_dev,
const Device* out_dev) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
// Set audio options
bool ret = media_engine_->SetAudioOptions(opts);
// Set the audio devices
if (ret) {
talk_base::CritScope cs(&crit_);
ret = media_engine_->SetSoundDevices(in_dev, out_dev);
}
return ret;
}
bool ChannelManager::GetOutputVolume(int* level) {
VolumeLevel volume;
if (!Send(MSG_GETOUTPUTVOLUME, &volume) || !volume.result) {
return false;
}
*level = volume.level;
return true;
}
bool ChannelManager::GetOutputVolume_w(int* level) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->GetOutputVolume(level);
}
bool ChannelManager::SetOutputVolume(int level) {
VolumeLevel volume(level);
return (Send(MSG_SETOUTPUTVOLUME, &volume) && volume.result);
}
bool ChannelManager::SetOutputVolume_w(int level) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetOutputVolume(level);
}
bool ChannelManager::GetVideoOptions(std::string* cam_name) {
*cam_name = camera_device_;
return true;
}
bool ChannelManager::SetVideoOptions(const std::string& cam_name) {
Device device;
if (!device_manager_->GetVideoCaptureDevice(cam_name, &device)) {
if (!cam_name.empty()) {
LOG(LS_WARNING) << "Device manager can't find camera: " << cam_name;
}
return false;
}
// If we're running, tell the media engine about it.
bool ret = true;
if (initialized_) {
VideoOptions options(&device);
ret = (Send(MSG_SETVIDEOOPTIONS, &options) && options.result);
}
// If everything worked, retain the name of the selected camera.
if (ret) {
camera_device_ = device.name;
}
return ret;
}
bool ChannelManager::SetVideoOptions_w(const Device* cam_device) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
// Set the video input device
return media_engine_->SetVideoCaptureDevice(cam_device);
}
bool ChannelManager::SetDefaultVideoEncoderConfig(const VideoEncoderConfig& c) {
bool ret = true;
if (initialized_) {
DefaultVideoEncoderConfig config(c);
ret = Send(MSG_SETDEFAULTVIDEOENCODERCONFIG, &config) && config.result;
}
if (ret) {
default_video_encoder_config_ = c;
}
return ret;
}
bool ChannelManager::SetDefaultVideoEncoderConfig_w(
const VideoEncoderConfig& c) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetDefaultVideoEncoderConfig(c);
}
bool ChannelManager::SetLocalMonitor(bool enable) {
LocalMonitor monitor(enable);
bool ret = Send(MSG_SETLOCALMONITOR, &monitor) && monitor.result;
if (ret) {
monitoring_ = enable;
}
return ret;
}
bool ChannelManager::SetLocalMonitor_w(bool enable) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetLocalMonitor(enable);
}
bool ChannelManager::SetLocalRenderer(VideoRenderer* renderer) {
bool ret = true;
if (initialized_) {
LocalRenderer local(renderer);
ret = (Send(MSG_SETLOCALRENDERER, &local) && local.result);
}
if (ret) {
local_renderer_ = renderer;
}
return ret;
}
bool ChannelManager::SetLocalRenderer_w(VideoRenderer* renderer) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetLocalRenderer(renderer);
}
CaptureResult ChannelManager::SetVideoCapture(bool capture) {
bool ret;
CaptureParams capture_params(capture);
ret = (Send(MSG_SETVIDEOCAPTURE, &capture_params) &&
(capture_params.result != CR_FAILURE));
if (ret) {
capturing_ = capture;
}
return capture_params.result;
}
CaptureResult ChannelManager::SetVideoCapture_w(bool capture) {
ASSERT(worker_thread_ == talk_base::Thread::Current());
ASSERT(initialized_);
return media_engine_->SetVideoCapture(capture);
}
void ChannelManager::SetVoiceLogging(int level, const char* filter) {
SetMediaLogging(false, level, filter);
}
void ChannelManager::SetVideoLogging(int level, const char* filter) {
SetMediaLogging(true, level, filter);
}
void ChannelManager::SetMediaLogging(bool video, int level,
const char* filter) {
// Can be called before initialization; in this case, the worker function
// is simply called on the main thread.
if (initialized_) {
LoggingOptions options(level, filter);
Send((video) ? MSG_SETVIDEOLOGGING : MSG_SETVOICELOGGING, &options);
} else {
SetMediaLogging_w(video, level, filter);
}
}
void ChannelManager::SetMediaLogging_w(bool video, int level,
const char* filter) {
// Can be called before initialization
ASSERT(worker_thread_ == talk_base::Thread::Current() || !initialized_);
if (video) {
media_engine_->SetVideoLogging(level, filter);
} else {
media_engine_->SetVoiceLogging(level, filter);
}
}
bool ChannelManager::Send(uint32 id, talk_base::MessageData* data) {
if (!worker_thread_ || !initialized_) return false;
worker_thread_->Send(this, id, data);
return true;
}
void ChannelManager::OnVideoCaptureResult(CaptureResult result) {
capturing_ = result == CR_SUCCESS;
main_thread_->Post(this, MSG_CAMERASTARTED,
new talk_base::TypedMessageData<CaptureResult>(result));
}
void ChannelManager::OnMessage(talk_base::Message* message) {
talk_base::MessageData* data = message->pdata;
switch (message->message_id) {
case MSG_CREATEVOICECHANNEL: {
CreationParams* p = static_cast<CreationParams*>(data);
p->voice_channel =
CreateVoiceChannel_w(p->session, p->content_name, p->rtcp);
break;
}
case MSG_DESTROYVOICECHANNEL: {
VoiceChannel* p = static_cast<talk_base::TypedMessageData<VoiceChannel*>*>
(data)->data();
DestroyVoiceChannel_w(p);
break;
}
case MSG_CREATEVIDEOCHANNEL: {
CreationParams* p = static_cast<CreationParams*>(data);
p->video_channel = CreateVideoChannel_w(p->session, p->content_name,
p->rtcp, p->voice_channel);
break;
}
case MSG_DESTROYVIDEOCHANNEL: {
VideoChannel* p = static_cast<talk_base::TypedMessageData<VideoChannel*>*>
(data)->data();
DestroyVideoChannel_w(p);
break;
}
case MSG_CREATESOUNDCLIP: {
talk_base::TypedMessageData<Soundclip*> *p =
static_cast<talk_base::TypedMessageData<Soundclip*>*>(data);
p->data() = CreateSoundclip_w();
break;
}
case MSG_DESTROYSOUNDCLIP: {
talk_base::TypedMessageData<Soundclip*> *p =
static_cast<talk_base::TypedMessageData<Soundclip*>*>(data);
DestroySoundclip_w(p->data());
break;
}
case MSG_SETAUDIOOPTIONS: {
AudioOptions* p = static_cast<AudioOptions*>(data);
p->result = SetAudioOptions_w(p->options,
p->in_device, p->out_device);
break;
}
case MSG_GETOUTPUTVOLUME: {
VolumeLevel* p = static_cast<VolumeLevel*>(data);
p->result = GetOutputVolume_w(&p->level);
break;
}
case MSG_SETOUTPUTVOLUME: {
VolumeLevel* p = static_cast<VolumeLevel*>(data);
p->result = SetOutputVolume_w(p->level);
break;
}
case MSG_SETLOCALMONITOR: {
LocalMonitor* p = static_cast<LocalMonitor*>(data);
p->result = SetLocalMonitor_w(p->enable);
break;
}
case MSG_SETVIDEOOPTIONS: {
VideoOptions* p = static_cast<VideoOptions*>(data);
p->result = SetVideoOptions_w(p->cam_device);
break;
}
case MSG_SETDEFAULTVIDEOENCODERCONFIG: {
DefaultVideoEncoderConfig* p =
static_cast<DefaultVideoEncoderConfig*>(data);
p->result = SetDefaultVideoEncoderConfig_w(p->config);
break;
}
case MSG_SETLOCALRENDERER: {
LocalRenderer* p = static_cast<LocalRenderer*>(data);
p->result = SetLocalRenderer_w(p->renderer);
break;
}
case MSG_SETVIDEOCAPTURE: {
CaptureParams* p = static_cast<CaptureParams*>(data);
p->result = SetVideoCapture_w(p->capture);
break;
}
case MSG_SETVOICELOGGING:
case MSG_SETVIDEOLOGGING: {
LoggingOptions* p = static_cast<LoggingOptions*>(data);
bool video = (message->message_id == MSG_SETVIDEOLOGGING);
SetMediaLogging_w(video, p->level, p->filter.c_str());
break;
}
case MSG_CAMERASTARTED: {
talk_base::TypedMessageData<CaptureResult>* data =
static_cast<talk_base::TypedMessageData<CaptureResult>*>(
message->pdata);
SignalVideoCaptureResult(data->data());
delete data;
break;
}
}
}
static void GetDeviceNames(const std::vector<Device>& devs,
std::vector<std::string>* names) {
names->clear();
for (size_t i = 0; i < devs.size(); ++i) {
names->push_back(devs[i].name);
}
}
bool ChannelManager::GetAudioInputDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetAudioInputDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
bool ChannelManager::GetAudioOutputDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetAudioOutputDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
bool ChannelManager::GetVideoCaptureDevices(std::vector<std::string>* names) {
names->clear();
std::vector<Device> devs;
bool ret = device_manager_->GetVideoCaptureDevices(&devs);
if (ret)
GetDeviceNames(devs, names);
return ret;
}
} // namespace cricket

View File

@ -0,0 +1,208 @@
/*
* libjingle
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_CHANNELMANAGER_H_
#define TALK_SESSION_PHONE_CHANNELMANAGER_H_
#include <string>
#include <vector>
#include "talk/base/criticalsection.h"
#include "talk/base/sigslotrepeater.h"
#include "talk/base/thread.h"
#include "talk/p2p/base/session.h"
#include "talk/session/phone/voicechannel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/devicemanager.h"
namespace cricket {
class Soundclip;
class VoiceChannel;
// ChannelManager allows the MediaEngine to run on a separate thread, and takes
// care of marshalling calls between threads. It also creates and keeps track of
// voice and video channels; by doing so, it can temporarily pause all the
// channels when a new audio or video device is chosen. The voice and video
// channels are stored in separate vectors, to easily allow operations on just
// voice or just video channels.
// ChannelManager also allows the application to discover what devices it has
// using device manager.
class ChannelManager : public talk_base::MessageHandler,
public sigslot::has_slots<> {
public:
// Creates the channel manager, and specifies the worker thread to use.
explicit ChannelManager(talk_base::Thread* worker);
// For testing purposes. Allows the media engine and dev manager to be mocks.
// The ChannelManager takes ownership of these objects.
ChannelManager(MediaEngine* me, DeviceManager* dm, talk_base::Thread* worker);
~ChannelManager();
// Accessors for the worker thread, allowing it to be set after construction,
// but before Init. set_worker_thread will return false if called after Init.
talk_base::Thread* worker_thread() const { return worker_thread_; }
bool set_worker_thread(talk_base::Thread* thread) {
if (initialized_) return false;
worker_thread_ = thread;
return true;
}
// Gets capabilities. Can be called prior to starting the media engine.
int GetCapabilities();
// Retrieves the list of supported audio & video codec types.
// Can be called before starting the media engine.
void GetSupportedAudioCodecs(std::vector<AudioCodec>* codecs) const;
void GetSupportedVideoCodecs(std::vector<VideoCodec>* codecs) const;
// Indicates whether the media engine is started.
bool initialized() const { return initialized_; }
// Starts up the media engine.
bool Init();
// TODO: Remove this temporary API once Flute is updated.
bool Init(talk_base::Thread* thread) {
return set_worker_thread(thread) && Init();
}
// Shuts down the media engine.
void Terminate();
// The operations below all occur on the worker thread.
// Creates a voice channel, to be associated with the specified session.
VoiceChannel* CreateVoiceChannel(
BaseSession* session, const std::string& content_name, bool rtcp);
// Destroys a voice channel created with the Create API.
void DestroyVoiceChannel(VoiceChannel* voice_channel);
// Creates a video channel, synced with the specified voice channel, and
// associated with the specified session.
VideoChannel* CreateVideoChannel(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel);
// Destroys a video channel created with the Create API.
void DestroyVideoChannel(VideoChannel* video_channel);
// Creates a soundclip.
Soundclip* CreateSoundclip();
// Destroys a soundclip created with the Create API.
void DestroySoundclip(Soundclip* soundclip);
// Indicates whether any channels exist.
bool has_channels() const {
return (!voice_channels_.empty() || !video_channels_.empty() ||
!soundclips_.empty());
}
// Configures the audio and video devices.
bool GetAudioOptions(std::string* wave_in_device,
std::string* wave_out_device, int* opts);
bool SetAudioOptions(const std::string& wave_in_device,
const std::string& wave_out_device, int opts);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
bool GetVideoOptions(std::string* cam_device);
bool SetVideoOptions(const std::string& cam_device);
bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config);
// Starts/stops the local microphone and enables polling of the input level.
bool SetLocalMonitor(bool enable);
bool monitoring() const { return monitoring_; }
// Sets the local renderer where to renderer the local camera.
bool SetLocalRenderer(VideoRenderer* renderer);
// Starts and stops the local camera and renders it to the local renderer.
CaptureResult SetVideoCapture(bool capture);
bool capturing() const { return capturing_; }
// Configures the logging output of the mediaengine(s).
void SetVoiceLogging(int level, const char* filter);
void SetVideoLogging(int level, const char* filter);
// The operations below occur on the main thread.
bool GetAudioInputDevices(std::vector<std::string>* names);
bool GetAudioOutputDevices(std::vector<std::string>* names);
bool GetVideoCaptureDevices(std::vector<std::string>* names);
sigslot::repeater0<> SignalDevicesChange;
sigslot::signal1<CaptureResult> SignalVideoCaptureResult;
protected:
bool Send(uint32 id, talk_base::MessageData* pdata);
void OnMessage(talk_base::Message *message);
MediaEngine* media_engine() { return media_engine_.get(); }
private:
typedef std::vector<VoiceChannel*> VoiceChannels;
typedef std::vector<VideoChannel*> VideoChannels;
typedef std::vector<Soundclip*> Soundclips;
void Construct();
VoiceChannel* CreateVoiceChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp);
void DestroyVoiceChannel_w(VoiceChannel* voice_channel);
VideoChannel* CreateVideoChannel_w(
BaseSession* session, const std::string& content_name, bool rtcp,
VoiceChannel* voice_channel);
void DestroyVideoChannel_w(VideoChannel* video_channel);
Soundclip* CreateSoundclip_w();
void DestroySoundclip_w(Soundclip* soundclip);
bool SetAudioOptions_w(int opts, const Device* in_dev,
const Device* out_dev);
bool GetOutputVolume_w(int* level);
bool SetOutputVolume_w(int level);
bool SetLocalMonitor_w(bool enable);
bool SetVideoOptions_w(const Device* cam_device);
bool SetDefaultVideoEncoderConfig_w(const VideoEncoderConfig& config);
bool SetLocalRenderer_w(VideoRenderer* renderer);
CaptureResult SetVideoCapture_w(bool capture);
void SetMediaLogging(bool video, int level, const char* filter);
void SetMediaLogging_w(bool video, int level, const char* filter);
void OnVideoCaptureResult(CaptureResult result);
talk_base::CriticalSection crit_;
talk_base::scoped_ptr<MediaEngine> media_engine_;
talk_base::scoped_ptr<DeviceManager> device_manager_;
bool initialized_;
talk_base::Thread* main_thread_;
talk_base::Thread* worker_thread_;
VoiceChannels voice_channels_;
VideoChannels video_channels_;
Soundclips soundclips_;
std::string audio_in_device_;
std::string audio_out_device_;
int audio_options_;
std::string camera_device_;
VideoEncoderConfig default_video_encoder_config_;
VideoRenderer* local_renderer_;
bool capturing_;
bool monitoring_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_CHANNELMANAGER_H_

View File

@ -0,0 +1,501 @@
/*
* libjingle
* Copyright 2004--2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIACHANNEL_H_
#define TALK_SESSION_PHONE_MEDIACHANNEL_H_
#include <string>
#include <vector>
#include "talk/base/basictypes.h"
#include "talk/base/sigslot.h"
#include "talk/base/socket.h"
#include "talk/session/phone/codec.h"
// TODO: re-evaluate this include
#include "talk/session/phone/audiomonitor.h"
namespace talk_base {
class Buffer;
}
namespace flute {
class MagicCamVideoRenderer;
}
namespace cricket {
const int kMinRtpHeaderExtensionId = 1;
const int kMaxRtpHeaderExtensionId = 255;
struct RtpHeaderExtension {
RtpHeaderExtension(const std::string& u, int i) : uri(u), id(i) {}
std::string uri;
int id;
// TODO: SendRecv direction;
};
enum VoiceMediaChannelOptions {
OPT_CONFERENCE = 0x10000, // tune the audio stream for conference mode
};
enum VideoMediaChannelOptions {
OPT_INTERPOLATE = 0x10000 // Increase the output framerate by 2x by
// interpolating frames
};
class MediaChannel : public sigslot::has_slots<> {
public:
class NetworkInterface {
public:
enum SocketType { ST_RTP, ST_RTCP };
virtual bool SendPacket(talk_base::Buffer* packet) = 0;
virtual bool SendRtcp(talk_base::Buffer* packet) = 0;
virtual int SetOption(SocketType type, talk_base::Socket::Option opt,
int option) = 0;
virtual ~NetworkInterface() {}
};
MediaChannel() : network_interface_(NULL) {}
virtual ~MediaChannel() {}
// Gets/sets the abstract inteface class for sending RTP/RTCP data.
NetworkInterface *network_interface() { return network_interface_; }
virtual void SetInterface(NetworkInterface *iface) {
network_interface_ = iface;
}
// Called when a RTP packet is received.
virtual void OnPacketReceived(talk_base::Buffer* packet) = 0;
// Called when a RTCP packet is received.
virtual void OnRtcpReceived(talk_base::Buffer* packet) = 0;
// Sets the SSRC to be used for outgoing data.
virtual void SetSendSsrc(uint32 id) = 0;
// Set the CNAME of RTCP
virtual bool SetRtcpCName(const std::string& cname) = 0;
// Mutes the channel.
virtual bool Mute(bool on) = 0;
// Sets the RTP extension headers and IDs to use when sending RTP.
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
// Sets the rate control to use when sending data.
virtual bool SetSendBandwidth(bool autobw, int bps) = 0;
// Sets the media options to use.
virtual bool SetOptions(int options) = 0;
// Gets the Rtc channel id
virtual int GetMediaChannelId() = 0;
protected:
NetworkInterface *network_interface_;
};
enum SendFlags {
SEND_NOTHING,
SEND_RINGBACKTONE,
SEND_MICROPHONE
};
struct VoiceSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int rtt_ms;
int jitter_ms;
int audio_level;
};
struct VoiceReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
int packets_rcvd;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int jitter_ms;
int jitter_buffer_ms;
int jitter_buffer_preferred_ms;
int delay_estimate_ms;
int audio_level;
};
struct VideoSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_cached;
int packets_lost;
float fraction_lost;
int firs_rcvd;
int nacks_rcvd;
int rtt_ms;
int frame_width;
int frame_height;
int framerate_input;
int framerate_sent;
int nominal_bitrate;
int preferred_bitrate;
};
struct VideoReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
// vector<int> layer_bytes_rcvd;
int packets_rcvd;
int packets_lost;
int packets_concealed;
float fraction_lost;
int firs_sent;
int nacks_sent;
int frame_width;
int frame_height;
int framerate_rcvd;
int framerate_decoded;
int framerate_output;
};
struct BandwidthEstimationInfo {
int available_send_bandwidth;
int available_recv_bandwidth;
int target_enc_bitrate;
int actual_enc_bitrate;
int retransmit_bitrate;
int transmit_bitrate;
int bucket_delay;
};
struct VoiceMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
}
std::vector<VoiceSenderInfo> senders;
std::vector<VoiceReceiverInfo> receivers;
};
struct VideoMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
bw_estimations.clear();
}
std::vector<VideoSenderInfo> senders;
std::vector<VideoReceiverInfo> receivers;
std::vector<BandwidthEstimationInfo> bw_estimations;
};
class VoiceMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open mic.
ERROR_REC_DEVICE_MUTED, // Mic was muted by OS.
ERROR_REC_DEVICE_SILENT, // No background noise picked up.
ERROR_REC_DEVICE_SATURATION, // Mic input is clipping.
ERROR_REC_DEVICE_REMOVED, // Mic was removed while active.
ERROR_REC_RUNTIME_ERROR, // Processing is encountering errors.
ERROR_REC_SRTP_ERROR, // Generic SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_REC_TYPING_NOISE_DETECTED, // Typing noise is detected.
ERROR_PLAY_DEVICE_OPEN_FAILED = 200, // Could not open playout.
ERROR_PLAY_DEVICE_MUTED, // Playout muted by OS.
ERROR_PLAY_DEVICE_REMOVED, // Playout removed while active.
ERROR_PLAY_RUNTIME_ERROR, // Errors in voice processing.
ERROR_PLAY_SRTP_ERROR, // Generic SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VoiceMediaChannel() {}
virtual ~VoiceMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Starts or stops playout of received audio.
virtual bool SetPlayout(bool playout) = 0;
// Starts or stops sending (and potentially capture) of local audio.
virtual bool SetSend(SendFlags flag) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Gets current energy levels for all incoming streams.
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) = 0;
// Get the current energy level for the outgoing stream.
virtual int GetOutputLevel() = 0;
// Specifies a ringback tone to be played during call setup.
virtual bool SetRingbackTone(const char *buf, int len) = 0;
// Plays or stops the aforementioned ringback tone
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop) = 0;
// Sends a out-of-band DTMF signal using the specified event.
virtual bool PressDTMF(int event, bool playout) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VoiceMediaInfo* info) = 0;
// Gets last reported error for this media channel.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error) {
ASSERT(error != NULL);
*error = ERROR_NONE;
}
// Signal errors from MediaChannel. Arguments are:
// ssrc(uint32), and error(VoiceMediaChannel::Error).
sigslot::signal2<uint32, VoiceMediaChannel::Error> SignalMediaError;
};
// Represents a YUV420 (a.k.a. I420) video frame.
class VideoFrame {
friend class flute::MagicCamVideoRenderer;
public:
VideoFrame() : rendered_(false) {}
virtual ~VideoFrame() {}
virtual size_t GetWidth() const = 0;
virtual size_t GetHeight() const = 0;
virtual const uint8 *GetYPlane() const = 0;
virtual const uint8 *GetUPlane() const = 0;
virtual const uint8 *GetVPlane() const = 0;
virtual uint8 *GetYPlane() = 0;
virtual uint8 *GetUPlane() = 0;
virtual uint8 *GetVPlane() = 0;
virtual int32 GetYPitch() const = 0;
virtual int32 GetUPitch() const = 0;
virtual int32 GetVPitch() const = 0;
// For retrieving the aspect ratio of each pixel. Usually this is 1x1, but
// the aspect_ratio_idc parameter of H.264 can specify non-square pixels.
virtual size_t GetPixelWidth() const = 0;
virtual size_t GetPixelHeight() const = 0;
// TODO: Add a fourcc format here and probably combine VideoFrame
// with CapturedFrame.
virtual int64 GetElapsedTime() const = 0;
virtual int64 GetTimeStamp() const = 0;
virtual void SetElapsedTime(int64 elapsed_time) = 0;
virtual void SetTimeStamp(int64 time_stamp) = 0;
// Make a copy of the frame. The frame buffer itself may not be copied,
// in which case both the current and new VideoFrame will share a single
// reference-counted frame buffer.
virtual VideoFrame *Copy() const = 0;
// Writes the frame into the given frame buffer, provided that it is of
// sufficient size. Returns the frame's actual size, regardless of whether
// it was written or not (like snprintf). If there is insufficient space,
// nothing is written.
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const = 0;
// Converts the I420 data to RGB of a certain type such as ARGB and ABGR.
// Returns the frame's actual size, regardless of whether it was written or
// not (like snprintf). Parameters size and pitch_rgb are in units of bytes.
// If there is insufficient space, nothing is written.
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const = 0;
// Writes the frame into the given planes, stretched to the given width and
// height. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the given dimensions before stretching.
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const = 0;
// Writes the frame into the given frame buffer, stretched to the given width
// and height, provided that it is of sufficient size. Returns the frame's
// actual size, regardless of whether it was written or not (like snprintf).
// If there is insufficient space, nothing is written. The parameter
// "interpolate" controls whether to interpolate or just take the
// nearest-point. The parameter "crop" controls whether to crop this frame to
// the aspect ratio of the given dimensions before stretching.
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const = 0;
// Writes the frame into the target VideoFrame, stretched to the size of that
// frame. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the target frame before stretching.
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const = 0;
// Stretches the frame to the given size, creating a new VideoFrame object to
// hold it. The parameter "interpolate" controls whether to interpolate or
// just take the nearest-point. The parameter "crop" controls whether to crop
// this frame to the aspect ratio of the given dimensions before stretching.
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const = 0;
// Size of an I420 image of given dimensions when stored as a frame buffer.
static size_t SizeOf(size_t w, size_t h) {
return w * h + ((w + 1) / 2) * ((h + 1) / 2) * 2;
}
protected:
// The frame needs to be rendered to magiccam only once.
// TODO: Remove this flag once magiccam rendering is fully replaced
// by client3d rendering.
mutable bool rendered_;
};
// Simple subclass for use in mocks.
class NullVideoFrame : public VideoFrame {
public:
virtual size_t GetWidth() const { return 0; }
virtual size_t GetHeight() const { return 0; }
virtual const uint8 *GetYPlane() const { return NULL; }
virtual const uint8 *GetUPlane() const { return NULL; }
virtual const uint8 *GetVPlane() const { return NULL; }
virtual uint8 *GetYPlane() { return NULL; }
virtual uint8 *GetUPlane() { return NULL; }
virtual uint8 *GetVPlane() { return NULL; }
virtual int32 GetYPitch() const { return 0; }
virtual int32 GetUPitch() const { return 0; }
virtual int32 GetVPitch() const { return 0; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return 0; }
virtual int64 GetTimeStamp() const { return 0; }
virtual void SetElapsedTime(int64 elapsed_time) {}
virtual void SetTimeStamp(int64 time_stamp) {}
virtual VideoFrame *Copy() const {
return NULL;
}
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const {
return 0;
}
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const {
return 0;
}
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const {
}
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const {
return 0;
}
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const {
}
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const {
return NULL;
}
};
// Abstract interface for rendering VideoFrames.
class VideoRenderer {
public:
virtual ~VideoRenderer() {}
// Called when the video has changed size.
virtual bool SetSize(int width, int height, int reserved) = 0;
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) = 0;
};
// Simple implementation for use in tests.
class NullVideoRenderer : public VideoRenderer {
virtual bool SetSize(int width, int height, int reserved) {
return true;
}
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) {
return true;
}
};
class VideoMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open camera.
ERROR_REC_DEVICE_NO_DEVICE, // No camera.
ERROR_REC_DEVICE_IN_USE, // Device is in already use.
ERROR_REC_DEVICE_REMOVED, // Device is removed.
ERROR_REC_SRTP_ERROR, // Generic sender SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_ERROR = 200, // Generic receiver SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VideoMediaChannel() { renderer_ = NULL; }
virtual ~VideoMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Starts or stops playout of received video.
virtual bool SetRender(bool render) = 0;
// Starts or stops transmission (and potentially capture) of local video.
virtual bool SetSend(bool send) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VideoMediaInfo* info) = 0;
// Send an intra frame to the receivers.
virtual bool SendIntraFrame() = 0;
// Reuqest each of the remote senders to send an intra frame.
virtual bool RequestIntraFrame() = 0;
sigslot::signal2<uint32, Error> SignalMediaError;
protected:
VideoRenderer *renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIACHANNEL_H_

View File

@ -0,0 +1,328 @@
/*
* libjingle
* Copyright 2004--2007, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIAENGINE_H_
#define TALK_SESSION_PHONE_MEDIAENGINE_H_
#ifdef OSX
#include <CoreAudio/CoreAudio.h>
#endif
#include <string>
#include <vector>
#include "talk/base/sigslotrepeater.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/devicemanager.h"
#include "talk/session/phone/mediachannel.h"
#include "talk/session/phone/videocommon.h"
namespace cricket {
// A class for playing out soundclips.
class SoundclipMedia {
public:
enum SoundclipFlags {
SF_LOOP = 1,
};
virtual ~SoundclipMedia() {}
// Plays a sound out to the speakers with the given audio stream. The stream
// must be 16-bit little-endian 16 kHz PCM. If a stream is already playing
// on this SoundclipMedia, it is stopped. If clip is NULL, nothing is played.
// Returns whether it was successful.
virtual bool PlaySound(const char *clip, int len, int flags) = 0;
};
// MediaEngine is an abstraction of a media engine which can be subclassed
// to support different media componentry backends. It supports voice and
// video operations in the same class to facilitate proper synchronization
// between both media types.
class MediaEngine {
public:
// TODO: Move this to a global location (also used in DeviceManager)
// Capabilities of the media engine.
enum Capabilities {
AUDIO_RECV = 1 << 0,
AUDIO_SEND = 1 << 1,
VIDEO_RECV = 1 << 2,
VIDEO_SEND = 1 << 3,
};
// Bitmask flags for options that may be supported by the media engine
// implementation
enum AudioOptions {
ECHO_CANCELLATION = 1 << 0,
AUTO_GAIN_CONTROL = 1 << 1,
DEFAULT_AUDIO_OPTIONS = ECHO_CANCELLATION | AUTO_GAIN_CONTROL
};
enum VideoOptions {
};
virtual ~MediaEngine() {}
static MediaEngine* Create();
// Initialization
// Starts the engine.
virtual bool Init() = 0;
// Shuts down the engine.
virtual void Terminate() = 0;
// Returns what the engine is capable of, as a set of Capabilities, above.
virtual int GetCapabilities() = 0;
// MediaChannel creation
// Creates a voice media channel. Returns NULL on failure.
virtual VoiceMediaChannel *CreateChannel() = 0;
// Creates a video media channel, paired with the specified voice channel.
// Returns NULL on failure.
virtual VideoMediaChannel *CreateVideoChannel(
VoiceMediaChannel* voice_media_channel) = 0;
// Creates a soundclip object for playing sounds on. Returns NULL on failure.
virtual SoundclipMedia *CreateSoundclip() = 0;
// Configuration
// Sets global audio options. "options" are from AudioOptions, above.
virtual bool SetAudioOptions(int options) = 0;
// Sets global video options. "options" are from VideoOptions, above.
virtual bool SetVideoOptions(int options) = 0;
// Sets the default (maximum) codec/resolution and encoder option to capture
// and encode video.
virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config)
= 0;
// Device selection
// TODO: Add method for selecting the soundclip device.
virtual bool SetSoundDevices(const Device* in_device,
const Device* out_device) = 0;
virtual bool SetVideoCaptureDevice(const Device* cam_device) = 0;
virtual bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) = 0;
// Device configuration
// Gets the current speaker volume, as a value between 0 and 255.
virtual bool GetOutputVolume(int* level) = 0;
// Sets the current speaker volume, as a value between 0 and 255.
virtual bool SetOutputVolume(int level) = 0;
// Local monitoring
// Gets the current microphone level, as a value between 0 and 10.
virtual int GetInputLevel() = 0;
// Starts or stops the local microphone. Useful if local mic info is needed
// prior to a call being connected; the mic will be started automatically
// when a VoiceMediaChannel starts sending.
virtual bool SetLocalMonitor(bool enable) = 0;
// Installs a callback for raw frames from the local camera.
virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0;
// Starts/stops local camera.
virtual CaptureResult SetVideoCapture(bool capture) = 0;
virtual const std::vector<AudioCodec>& audio_codecs() = 0;
virtual const std::vector<VideoCodec>& video_codecs() = 0;
// Logging control
virtual void SetVoiceLogging(int min_sev, const char* filter) = 0;
virtual void SetVideoLogging(int min_sev, const char* filter) = 0;
sigslot::repeater1<CaptureResult> SignalVideoCaptureResult;
};
// CompositeMediaEngine constructs a MediaEngine from separate
// voice and video engine classes.
template<class VOICE, class VIDEO>
class CompositeMediaEngine : public MediaEngine {
public:
CompositeMediaEngine() {}
virtual ~CompositeMediaEngine() {}
virtual bool Init() {
if (!voice_.Init())
return false;
if (!video_.Init()) {
voice_.Terminate();
return false;
}
SignalVideoCaptureResult.repeat(video_.SignalCaptureResult);
return true;
}
virtual void Terminate() {
video_.Terminate();
voice_.Terminate();
}
virtual int GetCapabilities() {
return (voice_.GetCapabilities() | video_.GetCapabilities());
}
virtual VoiceMediaChannel *CreateChannel() {
return voice_.CreateChannel();
}
virtual VideoMediaChannel *CreateVideoChannel(VoiceMediaChannel* channel) {
return video_.CreateChannel(channel);
}
virtual SoundclipMedia *CreateSoundclip() {
return voice_.CreateSoundclip();
}
virtual bool SetAudioOptions(int o) {
return voice_.SetOptions(o);
}
virtual bool SetVideoOptions(int o) {
return video_.SetOptions(o);
}
virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
return video_.SetDefaultEncoderConfig(config);
}
virtual bool SetSoundDevices(const Device* in_device,
const Device* out_device) {
return voice_.SetDevices(in_device, out_device);
}
virtual bool SetVideoCaptureDevice(const Device* cam_device) {
return video_.SetCaptureDevice(cam_device);
}
virtual bool SetVideoRenderer(int channel_id,
void* window,
unsigned int zOrder,
float left,
float top,
float right,
float bottom) {
return video_.SetVideoRenderer(channel_id,
window,
zOrder,
left,
top,
right,
bottom);
}
virtual bool GetOutputVolume(int* level) {
return voice_.GetOutputVolume(level);
}
virtual bool SetOutputVolume(int level) {
return voice_.SetOutputVolume(level);
}
virtual int GetInputLevel() {
return voice_.GetInputLevel();
}
virtual bool SetLocalMonitor(bool enable) {
return voice_.SetLocalMonitor(enable);
}
virtual bool SetLocalRenderer(VideoRenderer* renderer) {
return video_.SetLocalRenderer(renderer);
}
virtual CaptureResult SetVideoCapture(bool capture) {
return video_.SetCapture(capture);
}
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_.codecs();
}
virtual const std::vector<VideoCodec>& video_codecs() {
return video_.codecs();
}
virtual void SetVoiceLogging(int min_sev, const char* filter) {
return voice_.SetLogging(min_sev, filter);
}
virtual void SetVideoLogging(int min_sev, const char* filter) {
return video_.SetLogging(min_sev, filter);
}
protected:
VOICE voice_;
VIDEO video_;
};
// NullVoiceEngine can be used with CompositeMediaEngine in the case where only
// a video engine is desired.
class NullVoiceEngine {
public:
bool Init() { return true; }
void Terminate() {}
int GetCapabilities() { return 0; }
// If you need this to return an actual channel, use FakeMediaEngine instead.
VoiceMediaChannel* CreateChannel() {
return NULL;
}
SoundclipMedia* CreateSoundclip() {
return NULL;
}
bool SetOptions(int opts) { return true; }
bool SetDevices(const Device* in_device, const Device* out_device) {
return true;
}
bool GetOutputVolume(int* level) { *level = 0; return true; }
bool SetOutputVolume(int level) { return true; }
int GetInputLevel() { return 0; }
bool SetLocalMonitor(bool enable) { return true; }
const std::vector<AudioCodec>& codecs() { return codecs_; }
void SetLogging(int min_sev, const char* filter) {}
private:
std::vector<AudioCodec> codecs_;
};
// NullVideoEngine can be used with CompositeMediaEngine in the case where only
// a voice engine is desired.
class NullVideoEngine {
public:
bool Init() { return true; }
void Terminate() {}
int GetCapabilities() { return 0; }
// If you need this to return an actual channel, use FakeMediaEngine instead.
VideoMediaChannel* CreateChannel(
VoiceMediaChannel* voice_media_channel) {
return NULL;
}
bool SetOptions(int opts) { return true; }
bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
return true;
}
bool SetCaptureDevice(const Device* cam_device) { return true; }
bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
CaptureResult SetCapture(bool capture) { return CR_SUCCESS; }
const std::vector<VideoCodec>& codecs() { return codecs_; }
void SetLogging(int min_sev, const char* filter) {}
sigslot::signal1<CaptureResult> SignalCaptureResult;
private:
std::vector<VideoCodec> codecs_;
};
typedef CompositeMediaEngine<NullVoiceEngine, NullVideoEngine> NullMediaEngine;
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIAENGINE_H_