* Push the //depotGoogle/chrome/third_party/libjingle/...@38654 to svn third_party_mods\libjingle.

* Update the peerconnection sample client accordingly.
Review URL: http://webrtc-codereview.appspot.com/60008

git-svn-id: http://webrtc.googlecode.com/svn/trunk@302 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
ronghuawu@google.com
2011-08-04 17:44:30 +00:00
parent 88bd440ef6
commit e256187f8b
68 changed files with 8798 additions and 11865 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
* Copyright 2004--2008, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,25 +31,15 @@
#include <atlbase.h>
#include <dbt.h>
#include <strmif.h> // must come before ks.h
#include <mmsystem.h>
#include <ks.h>
#include <ksmedia.h>
#define INITGUID // For PKEY_AudioEndpoint_GUID
#include <mmdeviceapi.h>
#include <MMSystem.h>
#include <functiondiscoverykeys_devpkey.h>
#include <uuids.h>
#include "talk/base/win32.h" // ToUtf8
#include "talk/base/win32window.h"
// PKEY_AudioEndpoint_GUID isn't included in uuid.lib and we don't want
// to define INITGUID in order to define all the uuids in this object file
// as it will conflict with uuid.lib (multiply defined symbols).
// So our workaround is to define this one missing symbol here manually.
EXTERN_C const PROPERTYKEY PKEY_AudioEndpoint_GUID = { {
0x1da5d803, 0xd492, 0x4edd, {
0x8c, 0x23, 0xe0, 0xc0, 0xff, 0xee, 0x7f, 0x0e
} }, 4
};
#elif OSX
#include <CoreAudio/CoreAudio.h>
#include <QuickTime/QuickTime.h>
@@ -79,14 +69,7 @@ namespace cricket {
// Initialize to empty string.
const std::string DeviceManager::kDefaultDeviceName;
#ifdef PLATFORM_CHROMIUM
class DeviceWatcher {
public:
explicit DeviceWatcher(DeviceManager* dm);
bool Start();
void Stop();
};
#elif defined(WIN32)
#ifdef WIN32
class DeviceWatcher : public talk_base::Win32Window {
public:
explicit DeviceWatcher(DeviceManager* dm);
@@ -135,8 +118,11 @@ class DeviceWatcher {
};
#endif
#if defined(CHROMEOS)
static bool ShouldAudioDeviceBeIgnored(const std::string& device_name);
#endif
#if !defined(LINUX) && !defined(IOS)
static bool ShouldDeviceBeIgnored(const std::string& device_name);
static bool ShouldVideoDeviceBeIgnored(const std::string& device_name);
#endif
#ifndef OSX
static bool GetVideoDevices(std::vector<Device>* out);
@@ -180,7 +166,7 @@ DeviceManager::~DeviceManager() {
bool DeviceManager::Init() {
if (!initialized_) {
#if defined(WIN32) && !defined(PLATFORM_CHROMIUM)
#if defined(WIN32)
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
need_couninitialize_ = SUCCEEDED(hr);
if (FAILED(hr)) {
@@ -201,7 +187,7 @@ bool DeviceManager::Init() {
void DeviceManager::Terminate() {
if (initialized_) {
watcher_->Stop();
#if defined(WIN32) && !defined(PLATFORM_CHROMIUM)
#if defined(WIN32)
if (need_couninitialize_) {
CoUninitialize();
need_couninitialize_ = false;
@@ -244,16 +230,13 @@ bool DeviceManager::GetAudioOutputDevice(const std::string& name, Device* out) {
#ifdef OSX
static bool FilterDevice(const Device& d) {
return ShouldDeviceBeIgnored(d.name);
return ShouldVideoDeviceBeIgnored(d.name);
}
#endif
bool DeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
devices->clear();
#ifdef PLATFORM_CHROMIUM
devices->push_back(Device("", -1));
return true;
#elif OSX
#ifdef OSX
if (GetQTKitVideoDevices(devices)) {
// Now filter out any known incompatible devices
devices->erase(remove_if(devices->begin(), devices->end(), FilterDevice),
@@ -268,10 +251,7 @@ bool DeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
bool DeviceManager::GetDefaultVideoCaptureDevice(Device* device) {
bool ret = false;
#ifdef PLATFORM_CHROMIUM
*device = Device("", -1);
ret = true;
#elif WIN32
#if WIN32
// If there are multiple capture devices, we want the first USB one.
// This avoids issues with defaulting to virtual cameras or grabber cards.
std::vector<Device> devices;
@@ -309,10 +289,6 @@ bool DeviceManager::GetVideoCaptureDevice(const std::string& name,
return false;
}
#ifdef PLATFORM_CHROMIUM
*out = Device(name, name);
return true;
#else
for (std::vector<Device>::const_iterator it = devices.begin();
it != devices.end(); ++it) {
if (name == it->name) {
@@ -320,7 +296,6 @@ bool DeviceManager::GetVideoCaptureDevice(const std::string& name,
return true;
}
}
#endif
return false;
}
@@ -352,10 +327,7 @@ bool DeviceManager::GetAudioDevice(bool is_input, const std::string& name,
bool DeviceManager::GetAudioDevicesByPlatform(bool input,
std::vector<Device>* devs) {
devs->clear();
#ifdef PLATFORM_CHROMIUM
devs->push_back(Device("", -1));
return true;
#elif defined(LINUX_SOUND_USED)
#if defined(LINUX_SOUND_USED)
if (!sound_system_.get()) {
return false;
}
@@ -378,7 +350,14 @@ bool DeviceManager::GetAudioDevicesByPlatform(bool input,
for (SoundSystemInterface::SoundDeviceLocatorList::iterator i = list.begin();
i != list.end();
++i, ++index) {
devs->push_back(Device((*i)->name(), index));
#if defined(CHROMEOS)
// On ChromeOS, we ignore ALSA surround and S/PDIF devices.
if (!ShouldAudioDeviceBeIgnored((*i)->device_name())) {
#endif
devs->push_back(Device((*i)->name(), index));
#if defined(CHROMEOS)
}
#endif
}
SoundSystemInterface::ClearSoundDeviceLocatorList(&list);
sound_system_.release();
@@ -409,18 +388,7 @@ bool DeviceManager::GetAudioDevicesByPlatform(bool input,
#endif
}
#if defined(PLATFORM_CHROMIUM)
DeviceWatcher::DeviceWatcher(DeviceManager* manager) {
}
bool DeviceWatcher::Start() {
return true;
}
void DeviceWatcher::Stop() {
}
#elif defined(WIN32)
#if defined(WIN32)
bool GetVideoDevices(std::vector<Device>* devices) {
return GetDevices(CLSID_VideoInputDeviceCategory, devices);
}
@@ -452,7 +420,7 @@ bool GetDevices(const CLSID& catid, std::vector<Device>* devices) {
if (SUCCEEDED(bag->Read(kFriendlyName, &name, 0)) &&
name.vt == VT_BSTR) {
name_str = talk_base::ToUtf8(name.bstrVal);
if (!ShouldDeviceBeIgnored(name_str)) {
if (!ShouldVideoDeviceBeIgnored(name_str)) {
// Get the device id if one exists.
if (SUCCEEDED(bag->Read(kDevicePath, &path, 0)) &&
path.vt == VT_BSTR) {
@@ -999,11 +967,32 @@ bool DeviceWatcher::IsDescriptorClosed() {
#endif
#if defined(CHROMEOS)
// Checks if we want to ignore this audio device.
static bool ShouldAudioDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredAudioDevicesName[] = {
"surround40:",
"surround41:",
"surround50:",
"surround51:",
"surround71:",
"iec958:" // S/PDIF
};
for (int i = 0; i < ARRAY_SIZE(kFilteredAudioDevicesName); ++i) {
if (0 == device_name.find(kFilteredAudioDevicesName[i])) {
LOG(LS_INFO) << "Ignoring device " << device_name;
return true;
}
}
return false;
}
#endif
// TODO: Try to get hold of a copy of Final Cut to understand why we
// crash while scanning their components on OS X.
#if !defined(LINUX) && !defined(IOS)
static bool ShouldDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredDevices[] = {
static bool ShouldVideoDeviceBeIgnored(const std::string& device_name) {
static const char* const kFilteredVideoDevicesName[] = {
"Google Camera Adapter", // Our own magiccams
#ifdef WIN32
"Asus virtual Camera", // Bad Asus desktop virtual cam
@@ -1014,9 +1003,9 @@ static bool ShouldDeviceBeIgnored(const std::string& device_name) {
#endif
};
for (int i = 0; i < ARRAY_SIZE(kFilteredDevices); ++i) {
if (strnicmp(device_name.c_str(), kFilteredDevices[i],
strlen(kFilteredDevices[i])) == 0) {
for (int i = 0; i < ARRAY_SIZE(kFilteredVideoDevicesName); ++i) {
if (strnicmp(device_name.c_str(), kFilteredVideoDevicesName[i],
strlen(kFilteredVideoDevicesName[i])) == 0) {
LOG(LS_INFO) << "Ignoring device " << device_name;
return true;
}

View File

@@ -42,8 +42,7 @@ namespace cricket {
class DeviceWatcher;
// Used to represent an audio or video capture or render device.
class Device {
public:
struct Device {
Device() {}
Device(const std::string& first, int second)
: name(first),

View File

@@ -1,221 +0,0 @@
// libjingle
// Copyright 2004--2005, Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef TALK_SESSION_PHONE_FILEMEDIAENGINE_H_
#define TALK_SESSION_PHONE_FILEMEDIAENGINE_H_
#include <string>
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/mediachannel.h"
#include "talk/session/phone/mediaengine.h"
namespace talk_base {
class StreamInterface;
}
namespace cricket {
// A media engine contains a capturer, an encoder, and a sender in the sender
// side and a receiver, a decoder, and a renderer in the receiver side.
// FileMediaEngine simulates the capturer and the encoder via an input RTP dump
// stream and simulates the decoder and the renderer via an output RTP dump
// stream. Depending on the parameters of the constructor, FileMediaEngine can
// act as file voice engine, file video engine, or both. Currently, we use
// only the RTP dump packets. TODO: Enable RTCP packets.
class FileMediaEngine : public MediaEngine {
public:
FileMediaEngine() {}
virtual ~FileMediaEngine() {}
// Set the file name of the input or output RTP dump for voice or video.
// Should be called before the channel is created.
void set_voice_input_filename(const std::string& filename) {
voice_input_filename_ = filename;
}
void set_voice_output_filename(const std::string& filename) {
voice_output_filename_ = filename;
}
void set_video_input_filename(const std::string& filename) {
video_input_filename_ = filename;
}
void set_video_output_filename(const std::string& filename) {
video_output_filename_ = filename;
}
// Should be called before codecs() and video_codecs() are called. We need to
// set the voice and video codecs; otherwise, Jingle initiation will fail.
void set_voice_codecs(const std::vector<AudioCodec>& codecs) {
voice_codecs_ = codecs;
}
void set_video_codecs(const std::vector<VideoCodec>& codecs) {
video_codecs_ = codecs;
}
// Implement pure virtual methods of MediaEngine.
virtual bool Init() { return true; }
virtual void Terminate() {}
virtual int GetCapabilities();
virtual VoiceMediaChannel* CreateChannel();
virtual VideoMediaChannel* CreateVideoChannel(VoiceMediaChannel* voice_ch);
virtual SoundclipMedia* CreateSoundclip() { return NULL; }
virtual bool SetAudioOptions(int options) { return true; }
virtual bool SetVideoOptions(int options) { return true; }
virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
return true;
}
virtual bool SetSoundDevices(const Device* in_dev, const Device* out_dev) {
return true;
}
virtual bool SetVideoCaptureDevice(const Device* cam_device) { return true; }
virtual bool GetOutputVolume(int* level) { *level = 0; return true; }
virtual bool SetOutputVolume(int level) { return true; }
virtual int GetInputLevel() { return 0; }
virtual bool SetLocalMonitor(bool enable) { return true; }
virtual bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
// TODO: control channel send?
virtual CaptureResult SetVideoCapture(bool capture) { return CR_SUCCESS; }
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_codecs_;
}
virtual const std::vector<VideoCodec>& video_codecs() {
return video_codecs_;
}
virtual bool FindAudioCodec(const AudioCodec& codec) { return true; }
virtual bool FindVideoCodec(const VideoCodec& codec) { return true; }
virtual void SetVoiceLogging(int min_sev, const char* filter) {}
virtual void SetVideoLogging(int min_sev, const char* filter) {}
private:
std::string voice_input_filename_;
std::string voice_output_filename_;
std::string video_input_filename_;
std::string video_output_filename_;
std::vector<AudioCodec> voice_codecs_;
std::vector<VideoCodec> video_codecs_;
DISALLOW_COPY_AND_ASSIGN(FileMediaEngine);
};
class RtpSenderReceiver; // Forward declaration. Defined in the .cc file.
class FileVoiceChannel : public VoiceMediaChannel {
public:
FileVoiceChannel(const std::string& in_file, const std::string& out_file);
virtual ~FileVoiceChannel();
// Implement pure virtual methods of VoiceMediaChannel.
virtual bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
return true;
}
virtual bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetPlayout(bool playout) { return true; }
virtual bool SetSend(SendFlags flag);
virtual bool AddStream(uint32 ssrc) { return true; }
virtual bool RemoveStream(uint32 ssrc) { return true; }
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) { return true; }
virtual int GetOutputLevel() { return 0; }
virtual bool SetRingbackTone(const char* buf, int len) { return true; }
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop) {
return true;
}
virtual bool PressDTMF(int event, bool playout) { return true; }
virtual bool GetStats(VoiceMediaInfo* info) { return true; }
// Implement pure virtual methods of MediaChannel.
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet) {}
virtual void SetSendSsrc(uint32 id) {} // TODO: change RTP packet?
virtual bool SetRtcpCName(const std::string& cname) { return true; }
virtual bool Mute(bool on) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps) { return true; }
virtual bool SetOptions(int options) { return true; }
virtual int GetMediaChannelId() { return -1; }
private:
talk_base::scoped_ptr<RtpSenderReceiver> rtp_sender_receiver_;
DISALLOW_COPY_AND_ASSIGN(FileVoiceChannel);
};
class FileVideoChannel : public VideoMediaChannel {
public:
FileVideoChannel(const std::string& in_file, const std::string& out_file);
virtual ~FileVideoChannel();
// Implement pure virtual methods of VideoMediaChannel.
virtual bool SetRecvCodecs(const std::vector<VideoCodec>& codecs) {
return true;
}
virtual bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return true;
}
virtual bool SetRender(bool render) { return true; }
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc) { return true; }
virtual bool RemoveStream(uint32 ssrc) { return true; }
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer) {
return true;
}
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer) {
return true;
}
virtual bool GetStats(VideoMediaInfo* info) { return true; }
virtual bool SendIntraFrame() { return false; }
virtual bool RequestIntraFrame() { return false; }
// Implement pure virtual methods of MediaChannel.
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet) {}
virtual void SetSendSsrc(uint32 id) {} // TODO: change RTP packet?
virtual bool SetRtcpCName(const std::string& cname) { return true; }
virtual bool Mute(bool on) { return false; }
virtual bool SetSendBandwidth(bool autobw, int bps) { return true; }
virtual bool SetOptions(int options) { return true; }
virtual int GetMediaChannelId() { return -1; }
private:
talk_base::scoped_ptr<RtpSenderReceiver> rtp_sender_receiver_;
DISALLOW_COPY_AND_ASSIGN(FileVideoChannel);
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_FILEMEDIAENGINE_H_

View File

@@ -1,501 +0,0 @@
/*
* libjingle
* Copyright 2004--2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIACHANNEL_H_
#define TALK_SESSION_PHONE_MEDIACHANNEL_H_
#include <string>
#include <vector>
#include "talk/base/basictypes.h"
#include "talk/base/sigslot.h"
#include "talk/base/socket.h"
#include "talk/session/phone/codec.h"
// TODO: re-evaluate this include
#include "talk/session/phone/audiomonitor.h"
namespace talk_base {
class Buffer;
}
namespace flute {
class MagicCamVideoRenderer;
}
namespace cricket {
const int kMinRtpHeaderExtensionId = 1;
const int kMaxRtpHeaderExtensionId = 255;
struct RtpHeaderExtension {
RtpHeaderExtension(const std::string& u, int i) : uri(u), id(i) {}
std::string uri;
int id;
// TODO: SendRecv direction;
};
enum VoiceMediaChannelOptions {
OPT_CONFERENCE = 0x10000, // tune the audio stream for conference mode
};
enum VideoMediaChannelOptions {
OPT_INTERPOLATE = 0x10000 // Increase the output framerate by 2x by
// interpolating frames
};
class MediaChannel : public sigslot::has_slots<> {
public:
class NetworkInterface {
public:
enum SocketType { ST_RTP, ST_RTCP };
virtual bool SendPacket(talk_base::Buffer* packet) = 0;
virtual bool SendRtcp(talk_base::Buffer* packet) = 0;
virtual int SetOption(SocketType type, talk_base::Socket::Option opt,
int option) = 0;
virtual ~NetworkInterface() {}
};
MediaChannel() : network_interface_(NULL) {}
virtual ~MediaChannel() {}
// Gets/sets the abstract inteface class for sending RTP/RTCP data.
NetworkInterface *network_interface() { return network_interface_; }
virtual void SetInterface(NetworkInterface *iface) {
network_interface_ = iface;
}
// Called when a RTP packet is received.
virtual void OnPacketReceived(talk_base::Buffer* packet) = 0;
// Called when a RTCP packet is received.
virtual void OnRtcpReceived(talk_base::Buffer* packet) = 0;
// Sets the SSRC to be used for outgoing data.
virtual void SetSendSsrc(uint32 id) = 0;
// Set the CNAME of RTCP
virtual bool SetRtcpCName(const std::string& cname) = 0;
// Mutes the channel.
virtual bool Mute(bool on) = 0;
// Sets the RTP extension headers and IDs to use when sending RTP.
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) = 0;
// Sets the rate control to use when sending data.
virtual bool SetSendBandwidth(bool autobw, int bps) = 0;
// Sets the media options to use.
virtual bool SetOptions(int options) = 0;
// Gets the Rtc channel id
virtual int GetMediaChannelId() = 0;
protected:
NetworkInterface *network_interface_;
};
enum SendFlags {
SEND_NOTHING,
SEND_RINGBACKTONE,
SEND_MICROPHONE
};
struct VoiceSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int rtt_ms;
int jitter_ms;
int audio_level;
};
struct VoiceReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
int packets_rcvd;
int packets_lost;
float fraction_lost;
int ext_seqnum;
int jitter_ms;
int jitter_buffer_ms;
int jitter_buffer_preferred_ms;
int delay_estimate_ms;
int audio_level;
};
struct VideoSenderInfo {
uint32 ssrc;
int bytes_sent;
int packets_sent;
int packets_cached;
int packets_lost;
float fraction_lost;
int firs_rcvd;
int nacks_rcvd;
int rtt_ms;
int frame_width;
int frame_height;
int framerate_input;
int framerate_sent;
int nominal_bitrate;
int preferred_bitrate;
};
struct VideoReceiverInfo {
uint32 ssrc;
int bytes_rcvd;
// vector<int> layer_bytes_rcvd;
int packets_rcvd;
int packets_lost;
int packets_concealed;
float fraction_lost;
int firs_sent;
int nacks_sent;
int frame_width;
int frame_height;
int framerate_rcvd;
int framerate_decoded;
int framerate_output;
};
struct BandwidthEstimationInfo {
int available_send_bandwidth;
int available_recv_bandwidth;
int target_enc_bitrate;
int actual_enc_bitrate;
int retransmit_bitrate;
int transmit_bitrate;
int bucket_delay;
};
struct VoiceMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
}
std::vector<VoiceSenderInfo> senders;
std::vector<VoiceReceiverInfo> receivers;
};
struct VideoMediaInfo {
void Clear() {
senders.clear();
receivers.clear();
bw_estimations.clear();
}
std::vector<VideoSenderInfo> senders;
std::vector<VideoReceiverInfo> receivers;
std::vector<BandwidthEstimationInfo> bw_estimations;
};
class VoiceMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open mic.
ERROR_REC_DEVICE_MUTED, // Mic was muted by OS.
ERROR_REC_DEVICE_SILENT, // No background noise picked up.
ERROR_REC_DEVICE_SATURATION, // Mic input is clipping.
ERROR_REC_DEVICE_REMOVED, // Mic was removed while active.
ERROR_REC_RUNTIME_ERROR, // Processing is encountering errors.
ERROR_REC_SRTP_ERROR, // Generic SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_REC_TYPING_NOISE_DETECTED, // Typing noise is detected.
ERROR_PLAY_DEVICE_OPEN_FAILED = 200, // Could not open playout.
ERROR_PLAY_DEVICE_MUTED, // Playout muted by OS.
ERROR_PLAY_DEVICE_REMOVED, // Playout removed while active.
ERROR_PLAY_RUNTIME_ERROR, // Errors in voice processing.
ERROR_PLAY_SRTP_ERROR, // Generic SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VoiceMediaChannel() {}
virtual ~VoiceMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<AudioCodec>& codecs) = 0;
// Starts or stops playout of received audio.
virtual bool SetPlayout(bool playout) = 0;
// Starts or stops sending (and potentially capture) of local audio.
virtual bool SetSend(SendFlags flag) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Gets current energy levels for all incoming streams.
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) = 0;
// Get the current energy level for the outgoing stream.
virtual int GetOutputLevel() = 0;
// Specifies a ringback tone to be played during call setup.
virtual bool SetRingbackTone(const char *buf, int len) = 0;
// Plays or stops the aforementioned ringback tone
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop) = 0;
// Sends a out-of-band DTMF signal using the specified event.
virtual bool PressDTMF(int event, bool playout) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VoiceMediaInfo* info) = 0;
// Gets last reported error for this media channel.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error) {
ASSERT(error != NULL);
*error = ERROR_NONE;
}
// Signal errors from MediaChannel. Arguments are:
// ssrc(uint32), and error(VoiceMediaChannel::Error).
sigslot::signal2<uint32, VoiceMediaChannel::Error> SignalMediaError;
};
// Represents a YUV420 (a.k.a. I420) video frame.
class VideoFrame {
friend class flute::MagicCamVideoRenderer;
public:
VideoFrame() : rendered_(false) {}
virtual ~VideoFrame() {}
virtual size_t GetWidth() const = 0;
virtual size_t GetHeight() const = 0;
virtual const uint8 *GetYPlane() const = 0;
virtual const uint8 *GetUPlane() const = 0;
virtual const uint8 *GetVPlane() const = 0;
virtual uint8 *GetYPlane() = 0;
virtual uint8 *GetUPlane() = 0;
virtual uint8 *GetVPlane() = 0;
virtual int32 GetYPitch() const = 0;
virtual int32 GetUPitch() const = 0;
virtual int32 GetVPitch() const = 0;
// For retrieving the aspect ratio of each pixel. Usually this is 1x1, but
// the aspect_ratio_idc parameter of H.264 can specify non-square pixels.
virtual size_t GetPixelWidth() const = 0;
virtual size_t GetPixelHeight() const = 0;
// TODO: Add a fourcc format here and probably combine VideoFrame
// with CapturedFrame.
virtual int64 GetElapsedTime() const = 0;
virtual int64 GetTimeStamp() const = 0;
virtual void SetElapsedTime(int64 elapsed_time) = 0;
virtual void SetTimeStamp(int64 time_stamp) = 0;
// Make a copy of the frame. The frame buffer itself may not be copied,
// in which case both the current and new VideoFrame will share a single
// reference-counted frame buffer.
virtual VideoFrame *Copy() const = 0;
// Writes the frame into the given frame buffer, provided that it is of
// sufficient size. Returns the frame's actual size, regardless of whether
// it was written or not (like snprintf). If there is insufficient space,
// nothing is written.
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const = 0;
// Converts the I420 data to RGB of a certain type such as ARGB and ABGR.
// Returns the frame's actual size, regardless of whether it was written or
// not (like snprintf). Parameters size and pitch_rgb are in units of bytes.
// If there is insufficient space, nothing is written.
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const = 0;
// Writes the frame into the given planes, stretched to the given width and
// height. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the given dimensions before stretching.
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const = 0;
// Writes the frame into the given frame buffer, stretched to the given width
// and height, provided that it is of sufficient size. Returns the frame's
// actual size, regardless of whether it was written or not (like snprintf).
// If there is insufficient space, nothing is written. The parameter
// "interpolate" controls whether to interpolate or just take the
// nearest-point. The parameter "crop" controls whether to crop this frame to
// the aspect ratio of the given dimensions before stretching.
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const = 0;
// Writes the frame into the target VideoFrame, stretched to the size of that
// frame. The parameter "interpolate" controls whether to interpolate or just
// take the nearest-point. The parameter "crop" controls whether to crop this
// frame to the aspect ratio of the target frame before stretching.
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const = 0;
// Stretches the frame to the given size, creating a new VideoFrame object to
// hold it. The parameter "interpolate" controls whether to interpolate or
// just take the nearest-point. The parameter "crop" controls whether to crop
// this frame to the aspect ratio of the given dimensions before stretching.
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const = 0;
// Size of an I420 image of given dimensions when stored as a frame buffer.
static size_t SizeOf(size_t w, size_t h) {
return w * h + ((w + 1) / 2) * ((h + 1) / 2) * 2;
}
protected:
// The frame needs to be rendered to magiccam only once.
// TODO: Remove this flag once magiccam rendering is fully replaced
// by client3d rendering.
mutable bool rendered_;
};
// Simple subclass for use in mocks.
class NullVideoFrame : public VideoFrame {
public:
virtual size_t GetWidth() const { return 0; }
virtual size_t GetHeight() const { return 0; }
virtual const uint8 *GetYPlane() const { return NULL; }
virtual const uint8 *GetUPlane() const { return NULL; }
virtual const uint8 *GetVPlane() const { return NULL; }
virtual uint8 *GetYPlane() { return NULL; }
virtual uint8 *GetUPlane() { return NULL; }
virtual uint8 *GetVPlane() { return NULL; }
virtual int32 GetYPitch() const { return 0; }
virtual int32 GetUPitch() const { return 0; }
virtual int32 GetVPitch() const { return 0; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return 0; }
virtual int64 GetTimeStamp() const { return 0; }
virtual void SetElapsedTime(int64 elapsed_time) {}
virtual void SetTimeStamp(int64 time_stamp) {}
virtual VideoFrame *Copy() const {
return NULL;
}
virtual size_t CopyToBuffer(uint8 *buffer, size_t size) const {
return 0;
}
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8 *buffer,
size_t size, size_t pitch_rgb) const {
return 0;
}
virtual void StretchToPlanes(uint8 *y, uint8 *u, uint8 *v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const {
}
virtual size_t StretchToBuffer(size_t w, size_t h, uint8 *buffer, size_t size,
bool interpolate, bool crop) const {
return 0;
}
virtual void StretchToFrame(VideoFrame *target, bool interpolate,
bool crop) const {
}
virtual VideoFrame *Stretch(size_t w, size_t h, bool interpolate,
bool crop) const {
return NULL;
}
};
// Abstract interface for rendering VideoFrames.
class VideoRenderer {
public:
virtual ~VideoRenderer() {}
// Called when the video has changed size.
virtual bool SetSize(int width, int height, int reserved) = 0;
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) = 0;
};
// Simple implementation for use in tests.
class NullVideoRenderer : public VideoRenderer {
virtual bool SetSize(int width, int height, int reserved) {
return true;
}
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) {
return true;
}
};
class VideoMediaChannel : public MediaChannel {
public:
enum Error {
ERROR_NONE = 0, // No error.
ERROR_OTHER, // Other errors.
ERROR_REC_DEVICE_OPEN_FAILED = 100, // Could not open camera.
ERROR_REC_DEVICE_NO_DEVICE, // No camera.
ERROR_REC_DEVICE_IN_USE, // Device is in already use.
ERROR_REC_DEVICE_REMOVED, // Device is removed.
ERROR_REC_SRTP_ERROR, // Generic sender SRTP failure.
ERROR_REC_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_ERROR = 200, // Generic receiver SRTP failure.
ERROR_PLAY_SRTP_AUTH_FAILED, // Failed to authenticate packets.
ERROR_PLAY_SRTP_REPLAY, // Packet replay detected.
};
VideoMediaChannel() { renderer_ = NULL; }
virtual ~VideoMediaChannel() {}
// Sets the codecs/payload types to be used for incoming media.
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Sets the codecs/payload types to be used for outgoing media.
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs) = 0;
// Starts or stops playout of received video.
virtual bool SetRender(bool render) = 0;
// Starts or stops transmission (and potentially capture) of local video.
virtual bool SetSend(bool send) = 0;
// Adds a new receive-only stream with the specified SSRC.
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc) = 0;
// Removes a stream added with AddStream.
virtual bool RemoveStream(uint32 ssrc) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer) = 0;
// Sets the renderer object to be used for the specified stream.
// If SSRC is 0, the renderer is used for the 'default' stream.
virtual bool SetExternalRenderer(uint32 ssrc, void* renderer) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VideoMediaInfo* info) = 0;
// Send an intra frame to the receivers.
virtual bool SendIntraFrame() = 0;
// Reuqest each of the remote senders to send an intra frame.
virtual bool RequestIntraFrame() = 0;
sigslot::signal2<uint32, Error> SignalMediaError;
protected:
VideoRenderer *renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIACHANNEL_H_

View File

@@ -1,58 +0,0 @@
//
// libjingle
// Copyright 2004--2007, Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifdef HAVE_WEBRTC
#include "talk/app/voicemediaengine.h"
#include "talk/app/videomediaengine.h"
#endif
#include "talk/session/phone/mediaengine.h"
#ifdef HAVE_LINPHONE
#include "talk/session/phone/linphonemediaengine.h"
#endif
namespace cricket {
#ifdef HAVE_WEBRTC
template<>
CompositeMediaEngine<webrtc::RtcVoiceEngine, webrtc::RtcVideoEngine>
::CompositeMediaEngine() : video_(&voice_) {
}
MediaEngine* MediaEngine::Create() {
return new CompositeMediaEngine<webrtc::RtcVoiceEngine,
webrtc::RtcVideoEngine>();
}
#else
MediaEngine* MediaEngine::Create() {
#ifdef HAVE_LINPHONE
return new LinphoneMediaEngine("", "");
#else
return new NullMediaEngine();
#endif
}
#endif
}; // namespace cricket

View File

@@ -1,242 +0,0 @@
/*
* libjingle
* Copyright 2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/mediamessages.h"
#include "talk/base/stringencode.h"
#include "talk/p2p/base/constants.h"
#include "talk/session/phone/mediasessionclient.h"
#include "talk/xmllite/xmlelement.h"
namespace cricket {
const NamedSource* GetFirstSourceByNick(const NamedSources& sources,
const std::string& nick) {
for (NamedSources::const_iterator source = sources.begin();
source != sources.end(); ++source) {
if (source->nick == nick) {
return &*source;
}
}
return NULL;
}
const NamedSource* GetSourceBySsrc(const NamedSources& sources, uint32 ssrc) {
for (NamedSources::const_iterator source = sources.begin();
source != sources.end(); ++source) {
if (source->ssrc == ssrc) {
return &*source;
}
}
return NULL;
}
const NamedSource* MediaSources::GetFirstAudioSourceByNick(
const std::string& nick) {
return GetFirstSourceByNick(audio, nick);
}
const NamedSource* MediaSources::GetFirstVideoSourceByNick(
const std::string& nick) {
return GetFirstSourceByNick(video, nick);
}
const NamedSource* MediaSources::GetAudioSourceBySsrc(uint32 ssrc) {
return GetSourceBySsrc(audio, ssrc);
}
const NamedSource* MediaSources::GetVideoSourceBySsrc(uint32 ssrc) {
return GetSourceBySsrc(video, ssrc);
}
// NOTE: There is no check here for duplicate sources, so check before
// adding.
void AddSource(NamedSources* sources, const NamedSource& source) {
sources->push_back(source);
}
void MediaSources::AddAudioSource(const NamedSource& source) {
AddSource(&audio, source);
}
void MediaSources::AddVideoSource(const NamedSource& source) {
AddSource(&video, source);
}
void RemoveSourceBySsrc(NamedSources* sources, uint32 ssrc) {
for (NamedSources::iterator source = sources->begin();
source != sources->end(); ) {
if (source->ssrc == ssrc) {
source = sources->erase(source);
} else {
++source;
}
}
}
void MediaSources::RemoveAudioSourceBySsrc(uint32 ssrc) {
RemoveSourceBySsrc(&audio, ssrc);
}
void MediaSources::RemoveVideoSourceBySsrc(uint32 ssrc) {
RemoveSourceBySsrc(&video, ssrc);
}
bool ParseSsrc(const std::string& string, uint32* ssrc) {
return talk_base::FromString(string, ssrc);
}
bool ParseSsrc(const buzz::XmlElement* element, uint32* ssrc) {
if (element == NULL) {
return false;
}
return ParseSsrc(element->BodyText(), ssrc);
}
bool ParseNamedSource(const buzz::XmlElement* source_elem,
NamedSource* named_source,
ParseError* error) {
named_source->nick = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_NICK);
if (named_source->nick.empty()) {
return BadParse("Missing or invalid nick.", error);
}
named_source->name = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_NAME);
named_source->usage = source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_USAGE);
named_source->removed =
(STR_JINGLE_DRAFT_SOURCE_STATE_REMOVED ==
source_elem->Attr(QN_JINGLE_DRAFT_SOURCE_STATE));
const buzz::XmlElement* ssrc_elem =
source_elem->FirstNamed(QN_JINGLE_DRAFT_SOURCE_SSRC);
if (ssrc_elem != NULL && !ssrc_elem->BodyText().empty()) {
uint32 ssrc;
if (!ParseSsrc(ssrc_elem->BodyText(), &ssrc)) {
return BadParse("Missing or invalid ssrc.", error);
}
named_source->SetSsrc(ssrc);
}
return true;
}
bool IsSourcesNotify(const buzz::XmlElement* action_elem) {
return action_elem->FirstNamed(QN_JINGLE_DRAFT_NOTIFY) != NULL;
}
bool ParseSourcesNotify(const buzz::XmlElement* action_elem,
const SessionDescription* session_description,
MediaSources* sources,
ParseError* error) {
for (const buzz::XmlElement* notify_elem
= action_elem->FirstNamed(QN_JINGLE_DRAFT_NOTIFY);
notify_elem != NULL;
notify_elem = notify_elem->NextNamed(QN_JINGLE_DRAFT_NOTIFY)) {
std::string content_name = notify_elem->Attr(QN_JINGLE_DRAFT_CONTENT_NAME);
for (const buzz::XmlElement* source_elem
= notify_elem->FirstNamed(QN_JINGLE_DRAFT_SOURCE);
source_elem != NULL;
source_elem = source_elem->NextNamed(QN_JINGLE_DRAFT_SOURCE)) {
NamedSource named_source;
if (!ParseNamedSource(source_elem, &named_source, error)) {
return false;
}
if (session_description == NULL) {
return BadParse("unknown content name: " + content_name, error);
}
const ContentInfo* content =
FindContentInfoByName(session_description->contents(), content_name);
if (content == NULL) {
return BadParse("unknown content name: " + content_name, error);
}
if (IsAudioContent(content)) {
sources->audio.push_back(named_source);
} else if (IsVideoContent(content)) {
sources->video.push_back(named_source);
}
}
}
return true;
}
buzz::XmlElement* CreateViewElem(const std::string& name,
const std::string& type) {
buzz::XmlElement* view_elem =
new buzz::XmlElement(QN_JINGLE_DRAFT_VIEW, true);
view_elem->AddAttr(QN_JINGLE_DRAFT_CONTENT_NAME, name);
view_elem->SetAttr(QN_JINGLE_DRAFT_VIEW_TYPE, type);
return view_elem;
}
buzz::XmlElement* CreateVideoViewElem(const std::string& content_name,
const std::string& type) {
return CreateViewElem(content_name, type);
}
buzz::XmlElement* CreateNoneVideoViewElem(const std::string& content_name) {
return CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_NONE);
}
buzz::XmlElement* CreateStaticVideoViewElem(const std::string& content_name,
const StaticVideoView& view) {
buzz::XmlElement* view_elem =
CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_STATIC);
AddXmlAttr(view_elem, QN_JINGLE_DRAFT_VIEW_SSRC, view.ssrc);
buzz::XmlElement* params_elem = new buzz::XmlElement(
QN_JINGLE_DRAFT_VIEW_PARAMS);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_WIDTH, view.width);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_HEIGHT, view.height);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_FRAMERATE,
view.framerate);
AddXmlAttr(params_elem, QN_JINGLE_DRAFT_VIEW_PARAMS_PREFERENCE,
view.preference);
view_elem->AddElement(params_elem);
return view_elem;
}
bool WriteViewRequest(const std::string& content_name,
const ViewRequest& request,
XmlElements* elems,
WriteError* error) {
if (request.static_video_views.size() == 0) {
elems->push_back(CreateNoneVideoViewElem(content_name));
} else {
for (StaticVideoViews::const_iterator view =
request.static_video_views.begin();
view != request.static_video_views.end(); ++view) {
elems->push_back(CreateStaticVideoViewElem(content_name, *view));
}
}
return true;
}
} // namespace cricket

View File

@@ -1,106 +0,0 @@
/*
* libjingle
* Copyright 2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIAMESSAGES_H_
#define TALK_SESSION_PHONE_MEDIAMESSAGES_H_
#include <string>
#include <vector>
#include "talk/base/basictypes.h"
#include "talk/p2p/base/parsing.h"
#include "talk/p2p/base/sessiondescription.h"
namespace cricket {
struct NamedSource {
NamedSource() : ssrc(0), ssrc_set(false), removed(false) {}
void SetSsrc(uint32 ssrc) {
this->ssrc = ssrc;
this->ssrc_set = true;
}
std::string nick;
std::string name;
std::string usage;
uint32 ssrc;
bool ssrc_set;
bool removed;
};
typedef std::vector<NamedSource> NamedSources;
class MediaSources {
public:
const NamedSource* GetAudioSourceBySsrc(uint32 ssrc);
const NamedSource* GetVideoSourceBySsrc(uint32 ssrc);
// TODO: Remove once all senders use excplict remove by ssrc.
const NamedSource* GetFirstAudioSourceByNick(const std::string& nick);
const NamedSource* GetFirstVideoSourceByNick(const std::string& nick);
void AddAudioSource(const NamedSource& source);
void AddVideoSource(const NamedSource& source);
void RemoveAudioSourceBySsrc(uint32 ssrc);
void RemoveVideoSourceBySsrc(uint32 ssrc);
NamedSources audio;
NamedSources video;
};
struct StaticVideoView {
StaticVideoView(uint32 ssrc, int width, int height, int framerate)
: ssrc(ssrc),
width(width),
height(height),
framerate(framerate),
preference(0) {}
uint32 ssrc;
int width;
int height;
int framerate;
int preference;
};
typedef std::vector<StaticVideoView> StaticVideoViews;
struct ViewRequest {
StaticVideoViews static_video_views;
};
bool WriteViewRequest(const std::string& content_name,
const ViewRequest& view,
XmlElements* elems,
WriteError* error);
bool IsSourcesNotify(const buzz::XmlElement* action_elem);
// The session_description is needed to map content_name => media type.
bool ParseSourcesNotify(const buzz::XmlElement* action_elem,
const SessionDescription* session_description,
MediaSources* sources,
ParseError* error);
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIAMESSAGES_H_

View File

@@ -1,289 +0,0 @@
/*
* libjingle
* Copyright 2004--2005, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_
#define TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_
#include <string>
#include <vector>
#include <map>
#include <algorithm>
#include "talk/session/phone/call.h"
#include "talk/session/phone/channelmanager.h"
#include "talk/session/phone/cryptoparams.h"
#include "talk/base/sigslot.h"
#include "talk/base/sigslotrepeater.h"
#include "talk/base/messagequeue.h"
#include "talk/base/thread.h"
#include "talk/p2p/base/sessionmanager.h"
#include "talk/p2p/base/session.h"
#include "talk/p2p/base/sessionclient.h"
#include "talk/p2p/base/sessiondescription.h"
namespace cricket {
class Call;
class SessionDescription;
typedef std::vector<AudioCodec> AudioCodecs;
typedef std::vector<VideoCodec> VideoCodecs;
// SEC_ENABLED and SEC_REQUIRED should only be used if the session
// was negotiated over TLS, to protect the inline crypto material
// exchange.
// SEC_DISABLED: No crypto in outgoing offer and answer. Fail any
// offer with crypto required.
// SEC_ENABLED: Crypto in outgoing offer and answer. Fail any offer
// with unsupported required crypto. Crypto set but not
// required in outgoing offer.
// SEC_REQUIRED: Crypto in outgoing offer and answer with
// required='true'. Fail any offer with no or
// unsupported crypto (implicit crypto required='true'
// in the offer.)
enum SecureMediaPolicy {SEC_DISABLED, SEC_ENABLED, SEC_REQUIRED};
const int kAutoBandwidth = -1;
struct CallOptions {
CallOptions() :
is_video(false),
is_muc(false),
video_bandwidth(kAutoBandwidth) {
}
bool is_video;
bool is_muc;
// bps. -1 == auto.
int video_bandwidth;
};
class MediaSessionClient: public SessionClient, public sigslot::has_slots<> {
public:
MediaSessionClient(const buzz::Jid& jid, SessionManager *manager);
// Alternative constructor, allowing injection of media_engine
// and device_manager.
MediaSessionClient(const buzz::Jid& jid, SessionManager *manager,
MediaEngine* media_engine, DeviceManager* device_manager);
~MediaSessionClient();
const buzz::Jid &jid() const { return jid_; }
SessionManager* session_manager() const { return session_manager_; }
ChannelManager* channel_manager() const { return channel_manager_; }
int GetCapabilities() { return channel_manager_->GetCapabilities(); }
Call *CreateCall();
void DestroyCall(Call *call);
Call *GetFocus();
void SetFocus(Call *call);
void JoinCalls(Call *call_to_join, Call *call);
bool GetAudioInputDevices(std::vector<std::string>* names) {
return channel_manager_->GetAudioInputDevices(names);
}
bool GetAudioOutputDevices(std::vector<std::string>* names) {
return channel_manager_->GetAudioOutputDevices(names);
}
bool GetVideoCaptureDevices(std::vector<std::string>* names) {
return channel_manager_->GetVideoCaptureDevices(names);
}
bool SetAudioOptions(const std::string& in_name, const std::string& out_name,
int opts) {
return channel_manager_->SetAudioOptions(in_name, out_name, opts);
}
bool SetOutputVolume(int level) {
return channel_manager_->SetOutputVolume(level);
}
bool SetVideoOptions(const std::string& cam_device) {
return channel_manager_->SetVideoOptions(cam_device);
}
sigslot::signal2<Call *, Call *> SignalFocus;
sigslot::signal1<Call *> SignalCallCreate;
sigslot::signal1<Call *> SignalCallDestroy;
sigslot::repeater0<> SignalDevicesChange;
SessionDescription* CreateOffer(const CallOptions& options);
SessionDescription* CreateAnswer(const SessionDescription* offer,
const CallOptions& options);
SecureMediaPolicy secure() const { return secure_; }
void set_secure(SecureMediaPolicy s) { secure_ = s; }
private:
void Construct();
void OnSessionCreate(Session *session, bool received_initiate);
void OnSessionState(BaseSession *session, BaseSession::State state);
void OnSessionDestroy(Session *session);
virtual bool ParseContent(SignalingProtocol protocol,
const buzz::XmlElement* elem,
const ContentDescription** content,
ParseError* error);
virtual bool WriteContent(SignalingProtocol protocol,
const ContentDescription* content,
buzz::XmlElement** elem,
WriteError* error);
Session *CreateSession(Call *call);
buzz::Jid jid_;
SessionManager* session_manager_;
Call *focus_call_;
ChannelManager *channel_manager_;
std::map<uint32, Call *> calls_;
std::map<std::string, Call *> session_map_;
SecureMediaPolicy secure_;
friend class Call;
};
enum MediaType {
MEDIA_TYPE_AUDIO,
MEDIA_TYPE_VIDEO
};
class MediaContentDescription : public ContentDescription {
public:
MediaContentDescription()
: ssrc_(0),
ssrc_set_(false),
rtcp_mux_(false),
bandwidth_(kAutoBandwidth),
crypto_required_(false),
rtp_header_extensions_set_(false) {
}
virtual MediaType type() const = 0;
uint32 ssrc() const { return ssrc_; }
bool ssrc_set() const { return ssrc_set_; }
void set_ssrc(uint32 ssrc) {
ssrc_ = ssrc;
ssrc_set_ = true;
}
bool rtcp_mux() const { return rtcp_mux_; }
void set_rtcp_mux(bool mux) { rtcp_mux_ = mux; }
int bandwidth() const { return bandwidth_; }
void set_bandwidth(int bandwidth) { bandwidth_ = bandwidth; }
const std::vector<CryptoParams>& cryptos() const { return cryptos_; }
void AddCrypto(const CryptoParams& params) {
cryptos_.push_back(params);
}
bool crypto_required() const { return crypto_required_; }
void set_crypto_required(bool crypto) {
crypto_required_ = crypto;
}
const std::vector<RtpHeaderExtension>& rtp_header_extensions() const {
return rtp_header_extensions_;
}
void AddRtpHeaderExtension(const RtpHeaderExtension& ext) {
rtp_header_extensions_.push_back(ext);
rtp_header_extensions_set_ = true;
}
void ClearRtpHeaderExtensions() {
rtp_header_extensions_.clear();
rtp_header_extensions_set_ = true;
}
// We can't always tell if an empty list of header extensions is
// because the other side doesn't support them, or just isn't hooked up to
// signal them. For now we assume an empty list means no signaling, but
// provide the ClearRtpHeaderExtensions method to allow "no support" to be
// clearly indicated (i.e. when derived from other information).
bool rtp_header_extensions_set() const {
return rtp_header_extensions_set_;
}
protected:
uint32 ssrc_;
bool ssrc_set_;
bool rtcp_mux_;
int bandwidth_;
std::vector<CryptoParams> cryptos_;
bool crypto_required_;
std::vector<RtpHeaderExtension> rtp_header_extensions_;
bool rtp_header_extensions_set_;
};
template <class C>
class MediaContentDescriptionImpl : public MediaContentDescription {
public:
struct PreferenceSort {
bool operator()(C a, C b) { return a.preference > b.preference; }
};
const std::vector<C>& codecs() const { return codecs_; }
void AddCodec(const C& codec) {
codecs_.push_back(codec);
}
void SortCodecs() {
std::sort(codecs_.begin(), codecs_.end(), PreferenceSort());
}
private:
std::vector<C> codecs_;
};
class AudioContentDescription : public MediaContentDescriptionImpl<AudioCodec> {
public:
AudioContentDescription() :
conference_mode_(false) {}
virtual MediaType type() const { return MEDIA_TYPE_AUDIO; }
bool conference_mode() const { return conference_mode_; }
void set_conference_mode(bool enable) {
conference_mode_ = enable;
}
const std::string &lang() const { return lang_; }
void set_lang(const std::string &lang) { lang_ = lang; }
private:
bool conference_mode_;
std::string lang_;
};
class VideoContentDescription : public MediaContentDescriptionImpl<VideoCodec> {
public:
virtual MediaType type() const { return MEDIA_TYPE_VIDEO; }
};
// Convenience functions.
bool IsAudioContent(const ContentInfo* content);
bool IsVideoContent(const ContentInfo* content);
const ContentInfo* GetFirstAudioContent(const SessionDescription* sdesc);
const ContentInfo* GetFirstVideoContent(const SessionDescription* sdesc);
} // namespace cricket
#endif // TALK_SESSION_PHONE_MEDIASESSIONCLIENT_H_

View File

@@ -0,0 +1,660 @@
/*
* libjingle
* Copyright 2009, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// talk's config.h, generated from mac_config_dot_h for OSX, conflicts with the
// one included by the libsrtp headers. Don't use it. Instead, we keep HAVE_SRTP
// and LOGGING defined in config.h.
#undef HAVE_CONFIG_H
#ifdef OSX
// TODO: For the XCode build, we force SRTP (b/2500074)
#ifndef HAVE_SRTP
#define HAVE_SRTP 1
#endif // HAVE_SRTP
// If LOGGING is not defined, define it to 1 (b/3245816)
#ifndef LOGGING
#define LOGGING 1
#endif // HAVE_SRTP
#endif
#include "talk/session/phone/srtpfilter.h"
#include <algorithm>
#include <cstring>
#include "talk/base/base64.h"
#include "talk/base/logging.h"
#include "talk/base/time.h"
#include "talk/session/phone/rtputils.h"
// Enable this line to turn on SRTP debugging
// #define SRTP_DEBUG
#ifdef HAVE_SRTP
#ifdef SRTP_RELATIVE_PATH
#include "srtp.h" // NOLINT
#else
#include "third_party/libsrtp/include/srtp.h"
#endif // SRTP_RELATIVE_PATH
#ifdef _DEBUG
extern "C" debug_module_t mod_srtp;
extern "C" debug_module_t mod_auth;
extern "C" debug_module_t mod_cipher;
extern "C" debug_module_t mod_stat;
extern "C" debug_module_t mod_alloc;
extern "C" debug_module_t mod_aes_icm;
extern "C" debug_module_t mod_aes_hmac;
#endif
#else
// SrtpFilter needs that constant.
#define SRTP_MASTER_KEY_LEN 30
#endif // HAVE_SRTP
namespace cricket {
const std::string& CS_DEFAULT = CS_AES_CM_128_HMAC_SHA1_80;
const std::string CS_AES_CM_128_HMAC_SHA1_80 = "AES_CM_128_HMAC_SHA1_80";
const std::string CS_AES_CM_128_HMAC_SHA1_32 = "AES_CM_128_HMAC_SHA1_32";
const int SRTP_MASTER_KEY_BASE64_LEN = SRTP_MASTER_KEY_LEN * 4 / 3;
#ifndef HAVE_SRTP
// This helper function is used on systems that don't (yet) have SRTP,
// to log that the functions that require it won't do anything.
namespace {
bool SrtpNotAvailable(const char *func) {
LOG(LS_ERROR) << func << ": SRTP is not available on your system.";
return false;
}
} // anonymous namespace
#endif // !HAVE_SRTP
#ifdef HAVE_SRTP //due to cricket namespace it can't be clubbed with above cond
void EnableSrtpDebugging() {
#ifdef _DEBUG
debug_on(mod_srtp);
debug_on(mod_auth);
debug_on(mod_cipher);
debug_on(mod_stat);
debug_on(mod_alloc);
debug_on(mod_aes_icm);
// debug_on(mod_aes_cbc);
// debug_on(mod_hmac);
#endif
}
#endif
SrtpFilter::SrtpFilter()
: state_(ST_INIT),
send_session_(new SrtpSession()),
recv_session_(new SrtpSession()) {
SignalSrtpError.repeat(send_session_->SignalSrtpError);
SignalSrtpError.repeat(recv_session_->SignalSrtpError);
}
SrtpFilter::~SrtpFilter() {
}
bool SrtpFilter::IsActive() const {
return (state_ == ST_ACTIVE);
}
bool SrtpFilter::SetOffer(const std::vector<CryptoParams>& offer_params,
ContentSource source) {
bool ret = false;
if (state_ == ST_INIT) {
ret = StoreParams(offer_params, source);
} else {
LOG(LS_ERROR) << "Invalid state for SRTP offer";
}
return ret;
}
bool SrtpFilter::SetAnswer(const std::vector<CryptoParams>& answer_params,
ContentSource source) {
bool ret = false;
if ((state_ == ST_SENTOFFER && source == CS_REMOTE) ||
(state_ == ST_RECEIVEDOFFER && source == CS_LOCAL)) {
// If the answer requests crypto, finalize the parameters and apply them.
// Otherwise, complete the negotiation of a unencrypted session.
if (!answer_params.empty()) {
CryptoParams selected_params;
ret = NegotiateParams(answer_params, &selected_params);
if (ret) {
if (state_ == ST_SENTOFFER) {
ret = ApplyParams(selected_params, answer_params[0]);
} else { // ST_RECEIVEDOFFER
ret = ApplyParams(answer_params[0], selected_params);
}
}
} else {
ret = ResetParams();
}
} else {
LOG(LS_ERROR) << "Invalid state for SRTP answer";
}
return ret;
}
bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
return false;
}
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
}
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
return false;
}
return send_session_->ProtectRtcp(p, in_len, max_len, out_len);
}
bool SrtpFilter::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtp(p, in_len, out_len);
}
bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to UnprotectRtcp: SRTP not active";
return false;
}
return recv_session_->UnprotectRtcp(p, in_len, out_len);
}
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
send_session_->set_signal_silent_time(signal_silent_time_in_ms);
recv_session_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpFilter::StoreParams(const std::vector<CryptoParams>& params,
ContentSource source) {
offer_params_ = params;
state_ = (source == CS_LOCAL) ? ST_SENTOFFER : ST_RECEIVEDOFFER;
return true;
}
bool SrtpFilter::NegotiateParams(const std::vector<CryptoParams>& answer_params,
CryptoParams* selected_params) {
// We're processing an accept. We should have exactly one set of params,
// unless the offer didn't mention crypto, in which case we shouldn't be here.
bool ret = (answer_params.size() == 1U && !offer_params_.empty());
if (ret) {
// We should find a match between the answer params and the offered params.
std::vector<CryptoParams>::const_iterator it;
for (it = offer_params_.begin(); it != offer_params_.end(); ++it) {
if (answer_params[0].Matches(*it)) {
break;
}
}
if (it != offer_params_.end()) {
*selected_params = *it;
} else {
ret = false;
}
}
if (!ret) {
LOG(LS_WARNING) << "Invalid parameters in SRTP answer";
}
return ret;
}
bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
const CryptoParams& recv_params) {
// TODO: Zero these buffers after use.
bool ret;
uint8 send_key[SRTP_MASTER_KEY_LEN], recv_key[SRTP_MASTER_KEY_LEN];
ret = (ParseKeyParams(send_params.key_params, send_key, sizeof(send_key)) &&
ParseKeyParams(recv_params.key_params, recv_key, sizeof(recv_key)));
if (ret) {
ret = (send_session_->SetSend(send_params.cipher_suite,
send_key, sizeof(send_key)) &&
recv_session_->SetRecv(recv_params.cipher_suite,
recv_key, sizeof(recv_key)));
}
if (ret) {
offer_params_.clear();
state_ = ST_ACTIVE;
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
<< " send cipher_suite " << send_params.cipher_suite
<< " recv cipher_suite " << recv_params.cipher_suite;
} else {
LOG(LS_WARNING) << "Failed to apply negotiated SRTP parameters";
}
return ret;
}
bool SrtpFilter::ResetParams() {
offer_params_.clear();
state_ = ST_INIT;
LOG(LS_INFO) << "SRTP reset to init state";
return true;
}
bool SrtpFilter::ParseKeyParams(const std::string& key_params,
uint8* key, int len) {
// example key_params: "inline:YUJDZGVmZ2hpSktMbW9QUXJzVHVWd3l6MTIzNDU2"
// Fail if key-method is wrong.
if (key_params.find("inline:") != 0) {
return false;
}
// Fail if base64 decode fails, or the key is the wrong size.
std::string key_b64(key_params.substr(7)), key_str;
if (!talk_base::Base64::Decode(key_b64, talk_base::Base64::DO_STRICT,
&key_str, NULL) ||
static_cast<int>(key_str.size()) != len) {
return false;
}
memcpy(key, key_str.c_str(), len);
return true;
}
///////////////////////////////////////////////////////////////////////////////
// SrtpSession
#ifdef HAVE_SRTP
bool SrtpSession::inited_ = false;
std::list<SrtpSession*> SrtpSession::sessions_;
SrtpSession::SrtpSession()
: session_(NULL),
rtp_auth_tag_len_(0),
rtcp_auth_tag_len_(0),
srtp_stat_(new SrtpStat()),
last_send_seq_num_(-1) {
sessions_.push_back(this);
SignalSrtpError.repeat(srtp_stat_->SignalSrtpError);
}
SrtpSession::~SrtpSession() {
sessions_.erase(std::find(sessions_.begin(), sessions_.end(), this));
if (session_) {
srtp_dealloc(session_);
}
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_outbound, cs, key, len);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SetKey(ssrc_any_inbound, cs, key, len);
}
bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: no SRTP Session";
return false;
}
int need_len = in_len + rtp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddProtectRtpResult(ssrc, err);
}
int seq_num;
GetRtpSeqNum(p, in_len, &seq_num);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTP packet, seqnum="
<< seq_num << ", err=" << err << ", last seqnum="
<< last_send_seq_num_;
return false;
}
last_send_seq_num_ = seq_num;
return true;
}
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
return false;
}
int need_len = in_len + sizeof(uint32) + rtcp_auth_tag_len_; // NOLINT
if (max_len < need_len) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: The buffer length "
<< max_len << " is less than the needed " << need_len;
return false;
}
*out_len = in_len;
int err = srtp_protect_rtcp(session_, p, out_len);
srtp_stat_->AddProtectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect(session_, p, out_len);
uint32 ssrc;
if (GetRtpSsrc(p, in_len, &ssrc)) {
srtp_stat_->AddUnprotectRtpResult(ssrc, err);
}
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTP packet, err=" << err;
return false;
}
return true;
}
bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet: no SRTP Session";
return false;
}
*out_len = in_len;
int err = srtp_unprotect_rtcp(session_, p, out_len);
srtp_stat_->AddUnprotectRtcpResult(err);
if (err != err_status_ok) {
LOG(LS_WARNING) << "Failed to unprotect SRTCP packet, err=" << err;
return false;
}
return true;
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
}
bool SrtpSession::SetKey(int type, const std::string& cs,
const uint8* key, int len) {
if (session_) {
LOG(LS_ERROR) << "Failed to create SRTP session: "
<< "SRTP session already created";
return false;
}
if (!Init()) {
return false;
}
srtp_policy_t policy;
memset(&policy, 0, sizeof(policy));
if (cs == CS_AES_CM_128_HMAC_SHA1_80) {
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
} else if (cs == CS_AES_CM_128_HMAC_SHA1_32) {
crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); // rtp is 32,
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // rtcp still 80
} else {
LOG(LS_WARNING) << "Failed to create SRTP session: unsupported"
<< " cipher_suite " << cs.c_str();
return false;
}
if (!key || len != SRTP_MASTER_KEY_LEN) {
LOG(LS_WARNING) << "Failed to create SRTP session: invalid key";
return false;
}
policy.ssrc.type = static_cast<ssrc_type_t>(type);
policy.ssrc.value = 0;
policy.key = const_cast<uint8*>(key);
// TODO parse window size from WSH session-param
policy.window_size = 1024;
policy.allow_repeat_tx = 1;
policy.next = NULL;
int err = srtp_create(&session_, &policy);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to create SRTP session, err=" << err;
return false;
}
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
return true;
}
bool SrtpSession::Init() {
if (!inited_) {
int err;
err = srtp_init();
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to init SRTP, err=" << err;
return false;
}
err = srtp_install_event_handler(&SrtpSession::HandleEventThunk);
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
return false;
}
inited_ = true;
}
return true;
}
void SrtpSession::HandleEvent(const srtp_event_data_t* ev) {
switch (ev->event) {
case event_ssrc_collision:
LOG(LS_INFO) << "SRTP event: SSRC collision";
break;
case event_key_soft_limit:
LOG(LS_INFO) << "SRTP event: reached soft key usage limit";
break;
case event_key_hard_limit:
LOG(LS_INFO) << "SRTP event: reached hard key usage limit";
break;
case event_packet_index_limit:
LOG(LS_INFO) << "SRTP event: reached hard packet limit (2^48 packets)";
break;
default:
LOG(LS_INFO) << "SRTP event: unknown " << ev->event;
break;
}
}
void SrtpSession::HandleEventThunk(srtp_event_data_t* ev) {
for (std::list<SrtpSession*>::iterator it = sessions_.begin();
it != sessions_.end(); ++it) {
if ((*it)->session_ == ev->session) {
(*it)->HandleEvent(ev);
break;
}
}
}
#else // !HAVE_SRTP
SrtpSession::SrtpSession() {
LOG(WARNING) << "SRTP implementation is missing.";
}
SrtpSession::~SrtpSession() {
}
bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::ProtectRtcp(void* data, int in_len, int max_len,
int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
bool SrtpSession::UnprotectRtcp(void* data, int in_len, int* out_len) {
return SrtpNotAvailable(__FUNCTION__);
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time) {
// Do nothing.
}
#endif // HAVE_SRTP
///////////////////////////////////////////////////////////////////////////////
// SrtpStat
#ifdef HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::PROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
FailureKey key;
key.ssrc = ssrc;
key.mode = SrtpFilter::UNPROTECT;
switch (result) {
case err_status_ok:
key.error = SrtpFilter::ERROR_NONE;
break;
case err_status_auth_fail:
key.error = SrtpFilter::ERROR_AUTH;
break;
case err_status_replay_fail:
case err_status_replay_old:
key.error = SrtpFilter::ERROR_REPLAY;
break;
default:
key.error = SrtpFilter::ERROR_FAIL;
}
HandleSrtpResult(key);
}
void SrtpStat::AddProtectRtcpResult(int result) {
AddProtectRtpResult(0U, result);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
AddUnprotectRtpResult(0U, result);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
// Handle some cases where error should be signalled right away. For other
// errors, trigger error for the first time seeing it. After that, silent
// the same error for a certain amount of time (default 1 sec).
if (key.error != SrtpFilter::ERROR_NONE) {
// For errors, signal first time and wait for 1 sec.
FailureStat* stat = &(failures_[key]);
uint32 current_time = talk_base::Time();
if (stat->last_signal_time == 0 ||
talk_base::TimeDiff(current_time, stat->last_signal_time) >
static_cast<int>(signal_silent_time_)) {
SignalSrtpError(key.ssrc, key.mode, key.error);
stat->last_signal_time = current_time;
}
}
}
#else // !HAVE_SRTP
SrtpStat::SrtpStat()
: signal_silent_time_(1000) {
LOG(WARNING) << "SRTP implementation is missing.";
}
void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddProtectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::AddUnprotectRtcpResult(int result) {
SrtpNotAvailable(__FUNCTION__);
}
void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
SrtpNotAvailable(__FUNCTION__);
}
#endif // HAVE_SRTP
} // namespace cricket

View File

@@ -0,0 +1,84 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#define TALK_SESSION_PHONE_WEBRTCCOMMON_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "video_engine/main/interface/vie_base.h"
#include "voice_engine/main/interface/voe_base.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/vie_base.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// Tracing helpers, for easy logging when WebRTC calls fail.
// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
// "StartSend(1) failed, err=XXXX"
// The method GetLastEngineError must be defined in the calling scope.
#define LOG_RTCERR0(func) \
LOG_RTCERR0_EX(func, GetLastEngineError())
#define LOG_RTCERR1(func, a1) \
LOG_RTCERR1_EX(func, a1, GetLastEngineError())
#define LOG_RTCERR2(func, a1, a2) \
LOG_RTCERR2_EX(func, a1, a2, GetLastEngineError())
#define LOG_RTCERR3(func, a1, a2, a3) \
LOG_RTCERR3_EX(func, a1, a2, a3, GetLastEngineError())
#define LOG_RTCERR4(func, a1, a2, a3, a4) \
LOG_RTCERR4_EX(func, a1, a2, a3, a4, GetLastEngineError())
#define LOG_RTCERR5(func, a1, a2, a3, a4, a5) \
LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, GetLastEngineError())
#define LOG_RTCERR6(func, a1, a2, a3, a4, a5, a6) \
LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, GetLastEngineError())
#define LOG_RTCERR0_EX(func, err) LOG(LS_WARNING) \
<< "" << #func << "() failed, err=" << err
#define LOG_RTCERR1_EX(func, a1, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ") failed, err=" << err
#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
<< err
#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ") failed, err=" << err
#define LOG_RTCERR4_EX(func, a1, a2, a3, a4, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ") failed, err=" << err
#define LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ") failed, err=" << err
#define LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, err) LOG(LS_WARNING) \
<< "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
<< ", " << a4 << ", " << a5 << ", " << a6 << ") failed, err=" << err
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCCOMMON_H_

View File

@@ -0,0 +1,916 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_WEBRTC
#include "talk/session/phone/webrtcvideoengine.h"
#include "talk/base/common.h"
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/session/phone/webrtcvoiceengine.h"
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/session/phone/webrtcvie.h"
#include "talk/session/phone/webrtcvoe.h"
namespace cricket {
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const int kStartVideoBitrate = 300;
static const int kMaxVideoBitrate = 1000;
class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
public:
explicit WebRtcRenderAdapter(VideoRenderer* renderer)
: renderer_(renderer) {
}
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) {
ASSERT(renderer_ != NULL);
width_ = width;
height_ = height;
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
virtual int DeliverFrame(unsigned char* buffer, int buffer_size) {
ASSERT(renderer_ != NULL);
WebRtcVideoFrame video_frame;
// TODO(ronghuawu): Currently by the time DeliverFrame got called,
// ViE expects the frame will be rendered ASAP. However, the libjingle
// renderer may have its own internal delays. Can you disable the buffering
// inside ViE and surface the timing information to this callback?
video_frame.Attach(buffer, buffer_size, width_, height_, 0, 0);
int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
uint8* buffer_temp;
size_t buffer_size_temp;
video_frame.Detach(&buffer_temp, &buffer_size_temp);
return ret;
}
virtual ~WebRtcRenderAdapter() {}
private:
VideoRenderer* renderer_;
unsigned int width_;
unsigned int height_;
};
const WebRtcVideoEngine::VideoCodecPref
WebRtcVideoEngine::kVideoCodecPrefs[] = {
{"VP8", 104, 0},
{"H264", 105, 1}
};
WebRtcVideoEngine::WebRtcVideoEngine()
: vie_wrapper_(new ViEWrapper()),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(NULL),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture)
: vie_wrapper_(new ViEWrapper()),
capture_(capture),
external_capture_(true),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
ViEWrapper* vie_wrapper)
: vie_wrapper_(vie_wrapper),
capture_(NULL),
external_capture_(false),
capture_id_(-1),
renderer_(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal)),
voice_engine_(voice_engine),
log_level_(kDefaultLogSeverity),
capture_started_(false) {
}
WebRtcVideoEngine::~WebRtcVideoEngine() {
LOG(LS_INFO) << " WebRtcVideoEngine::~WebRtcVideoEngine";
vie_wrapper_->engine()->SetTraceCallback(NULL);
Terminate();
vie_wrapper_.reset();
if (capture_) {
webrtc::VideoCaptureModule::Destroy(capture_);
}
if (renderer_) {
webrtc::VideoRender::DestroyVideoRender(renderer_);
}
}
bool WebRtcVideoEngine::Init() {
LOG(LS_INFO) << "WebRtcVideoEngine::Init";
ApplyLogging();
if (vie_wrapper_->engine()->SetTraceCallback(this) != 0) {
LOG_RTCERR1(SetTraceCallback, this);
}
bool result = InitVideoEngine();
if (result) {
LOG(LS_INFO) << "VideoEngine Init done";
} else {
LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
Terminate();
}
return result;
}
bool WebRtcVideoEngine::InitVideoEngine() {
LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine";
if (vie_wrapper_->base()->Init() != 0) {
LOG_RTCERR0(Init);
return false;
}
if (!voice_engine_) {
LOG(LS_WARNING) << "NULL voice engine";
} else if ((vie_wrapper_->base()->SetVoiceEngine(
voice_engine_->voe()->engine())) != 0) {
LOG_RTCERR0(SetVoiceEngine);
return false;
}
if ((vie_wrapper_->base()->RegisterObserver(*this)) != 0) {
LOG_RTCERR0(RegisterObserver);
return false;
}
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
webrtc::VideoCodec wcodec;
if ((vie_wrapper_->codec()->GetCodec(i, wcodec) == 0) &&
(strncmp(wcodec.plName, "I420", 4) != 0) &&
(strncmp(wcodec.plName, "ULPFEC", 4) != 0) &&
(strncmp(wcodec.plName, "RED", 4) != 0)) {
// ignore I420, FEC(RED and ULPFEC)
VideoCodec codec(wcodec.plType, wcodec.plName, wcodec.width,
wcodec.height, wcodec.maxFramerate, i);
LOG(LS_INFO) << codec.ToString();
video_codecs_.push_back(codec);
}
}
if (vie_wrapper_->render()->RegisterVideoRenderModule(*renderer_) != 0) {
LOG_RTCERR0(RegisterVideoRenderModule);
return false;
}
std::sort(video_codecs_.begin(), video_codecs_.end(),
&VideoCodec::Preferable);
return true;
}
void WebRtcVideoEngine::PerformanceAlarm(const unsigned int cpu_load) {
LOG(LS_INFO) << "WebRtcVideoEngine::PerformanceAlarm";
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
static bool ShouldIgnoreTrace(const std::string& trace) {
static const char* kTracesToIgnore[] = {
"\tfailed to GetReportBlockInformation",
NULL
};
for (const char* const* p = kTracesToIgnore; *p; ++p) {
if (trace.find(*p) == 0) {
return true;
}
}
return false;
}
void WebRtcVideoEngine::Print(const webrtc::TraceLevel level,
const char* trace, const int length) {
talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
sev = talk_base::LS_ERROR;
else if (level == webrtc::kTraceWarning)
sev = talk_base::LS_WARNING;
else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
sev = talk_base::LS_INFO;
if (sev >= log_level_) {
// Skip past boilerplate prefix text
if (length < 72) {
std::string msg(trace, length);
LOG(LS_ERROR) << "Malformed webrtc log message: ";
LOG_V(sev) << msg;
} else {
std::string msg(trace + 71, length - 72);
if (!ShouldIgnoreTrace(msg)) {
LOG_V(sev) << "WebRtc ViE:" << msg;
}
}
}
}
int WebRtcVideoEngine::GetCodecPreference(const char* name) {
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
if (strcmp(kVideoCodecPrefs[i].payload_name, name) == 0) {
return kVideoCodecPrefs[i].pref;
}
}
return -1;
}
void WebRtcVideoEngine::ApplyLogging() {
int filter = 0;
switch (log_level_) {
case talk_base::LS_VERBOSE: filter |= webrtc::kTraceAll;
case talk_base::LS_INFO: filter |= webrtc::kTraceStateInfo;
case talk_base::LS_WARNING: filter |= webrtc::kTraceWarning;
case talk_base::LS_ERROR: filter |=
webrtc::kTraceError | webrtc::kTraceCritical;
}
}
void WebRtcVideoEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
SetCapture(false);
if (local_renderer_.get()) {
// If the renderer already set, stop it first
if (vie_wrapper_->render()->StopRender(capture_id_) != 0)
LOG_RTCERR1(StopRender, capture_id_);
}
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(*renderer_) != 0)
LOG_RTCERR0(DeRegisterVideoRenderModule);
if ((vie_wrapper_->base()->DeregisterObserver()) != 0)
LOG_RTCERR0(DeregisterObserver);
if ((vie_wrapper_->base()->SetVoiceEngine(NULL)) != 0)
LOG_RTCERR0(SetVoiceEngine);
if (vie_wrapper_->engine()->SetTraceCallback(NULL) != 0)
LOG_RTCERR0(SetTraceCallback);
}
int WebRtcVideoEngine::GetCapabilities() {
return MediaEngine::VIDEO_RECV | MediaEngine::VIDEO_SEND;
}
bool WebRtcVideoEngine::SetOptions(int options) {
return true;
}
bool WebRtcVideoEngine::ReleaseCaptureDevice() {
if (capture_id_ != -1) {
// Stop capture
SetCapture(false);
// DisconnectCaptureDevice
WebRtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_wrapper_->capture()->DisconnectCaptureDevice(
channel->video_channel());
}
// ReleaseCaptureDevice
vie_wrapper_->capture()->ReleaseCaptureDevice(capture_id_);
capture_id_ = -1;
}
return true;
}
bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
ASSERT(vie_wrapper_.get());
ASSERT(cam != NULL);
ReleaseCaptureDevice();
webrtc::ViECapture* vie_capture = vie_wrapper_->capture();
// There's an external VCM
if (capture_) {
if (vie_capture->AllocateCaptureDevice(*capture_, capture_id_) != 0)
ASSERT(capture_id_ == -1);
} else if (!external_capture_) {
const unsigned int KMaxDeviceNameLength = 128;
const unsigned int KMaxUniqueIdLength = 256;
char device_name[KMaxDeviceNameLength];
char device_id[KMaxUniqueIdLength];
bool found = false;
for (int i = 0; i < vie_capture->NumberOfCaptureDevices(); ++i) {
memset(device_name, 0, KMaxDeviceNameLength);
memset(device_id, 0, KMaxUniqueIdLength);
if (vie_capture->GetCaptureDevice(i, device_name, KMaxDeviceNameLength,
device_id, KMaxUniqueIdLength) == 0) {
// TODO(ronghuawu): We should only compare the device_id here,
// however the devicemanager and webrtc use different format for th v4l2
// device id. So here we also compare the device_name for now.
// For example "usb-0000:00:1d.7-6" vs "/dev/video0".
if ((cam->name.compare(reinterpret_cast<char*>(device_name)) == 0) ||
(cam->id.compare(reinterpret_cast<char*>(device_id)) == 0)) {
LOG(INFO) << "Found video capture device: " << device_name;
found = true;
break;
}
}
}
if (!found)
return false;
if (vie_capture->AllocateCaptureDevice(device_id, KMaxUniqueIdLength,
capture_id_) != 0)
ASSERT(capture_id_ == -1);
}
if (capture_id_ != -1) {
// Connect to all the channels
WebRtcVideoMediaChannel* channel;
for (VideoChannels::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
channel = *it;
vie_capture->ConnectCaptureDevice(capture_id_, channel->video_channel());
}
SetCapture(true);
}
return (capture_id_ != -1);
}
bool WebRtcVideoEngine::SetCaptureModule(webrtc::VideoCaptureModule* vcm) {
ReleaseCaptureDevice();
if (capture_) {
webrtc::VideoCaptureModule::Destroy(capture_);
}
capture_ = vcm;
external_capture_ = true;
return true;
}
bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
if (local_renderer_.get()) {
// If the renderer already set, stop it first
vie_wrapper_->render()->StopRender(capture_id_);
}
local_renderer_.reset(new WebRtcRenderAdapter(renderer));
int ret;
ret = vie_wrapper_->render()->AddRenderer(capture_id_,
webrtc::kVideoI420,
local_renderer_.get());
if (ret != 0)
return false;
ret = vie_wrapper_->render()->StartRender(capture_id_);
return (ret == 0);
}
CaptureResult WebRtcVideoEngine::SetCapture(bool capture) {
if ((capture_started_ != capture) && (capture_id_ != -1)) {
int ret;
if (capture)
ret = vie_wrapper_->capture()->StartCapture(capture_id_);
else
ret = vie_wrapper_->capture()->StopCapture(capture_id_);
if (ret != 0)
return CR_NO_DEVICE;
capture_started_ = capture;
}
return CR_SUCCESS;
}
const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
return video_codecs_;
}
void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) {
log_level_ = min_sev;
ApplyLogging();
}
int WebRtcVideoEngine::GetLastEngineError() {
return vie_wrapper_->error();
}
bool WebRtcVideoEngine::SetDefaultEncoderConfig(
const VideoEncoderConfig& config) {
default_encoder_config_ = config;
return true;
}
WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
VoiceMediaChannel* voice_channel) {
WebRtcVideoMediaChannel* channel =
new WebRtcVideoMediaChannel(this, voice_channel);
if (channel) {
if (!channel->Init()) {
delete channel;
channel = NULL;
}
}
return channel;
}
bool WebRtcVideoEngine::FindCodec(const VideoCodec& codec) {
for (size_t i = 0; i < video_codecs_.size(); ++i) {
if (video_codecs_[i].Matches(codec)) {
return true;
}
}
return false;
}
void WebRtcVideoEngine::ConvertToCricketVideoCodec(
const webrtc::VideoCodec& in_codec, VideoCodec& out_codec) {
out_codec.id = in_codec.plType;
out_codec.name = in_codec.plName;
out_codec.width = in_codec.width;
out_codec.height = in_codec.height;
out_codec.framerate = in_codec.maxFramerate;
}
bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
const VideoCodec& in_codec, webrtc::VideoCodec& out_codec) {
bool found = false;
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
if ((vie_wrapper_->codec()->GetCodec(i, out_codec) == 0) &&
(strncmp(out_codec.plName,
in_codec.name.c_str(),
webrtc::kPayloadNameSize - 1) == 0)) {
found = true;
break;
}
}
if (!found) {
LOG(LS_ERROR) << "invalid codec type";
return false;
}
if (in_codec.id != 0)
out_codec.plType = in_codec.id;
if (in_codec.width != 0)
out_codec.width = in_codec.width;
if (in_codec.height != 0)
out_codec.height = in_codec.height;
if (in_codec.framerate != 0)
out_codec.maxFramerate = in_codec.framerate;
out_codec.maxBitrate = kMaxVideoBitrate;
out_codec.startBitrate = kStartVideoBitrate;
out_codec.minBitrate = kStartVideoBitrate;
return true;
}
int WebRtcVideoEngine::GetLastVideoEngineError() {
return vie_wrapper_->base()->LastError();
}
void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) {
channels_.push_back(channel);
}
void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
VideoChannels::iterator i = std::find(channels_.begin(),
channels_.end(),
channel);
if (i != channels_.end()) {
channels_.erase(i);
}
}
// WebRtcVideoMediaChannel
WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine, VoiceMediaChannel* channel)
: engine_(engine),
voice_channel_(channel),
vie_channel_(-1),
sending_(false),
render_started_(false),
send_codec_(NULL) {
engine->RegisterChannel(this);
}
bool WebRtcVideoMediaChannel::Init() {
bool ret = true;
if (engine_->video_engine()->base()->CreateChannel(vie_channel_) != 0) {
LOG_RTCERR1(CreateChannel, vie_channel_);
return false;
}
LOG(LS_INFO) << "WebRtcVideoMediaChannel::Init "
<< "video_channel " << vie_channel_ << " created";
// connect audio channel
if (voice_channel_) {
WebRtcVoiceMediaChannel* channel =
static_cast<WebRtcVoiceMediaChannel*> (voice_channel_);
if (engine_->video_engine()->base()->ConnectAudioChannel(
vie_channel_, channel->voe_channel()) != 0) {
LOG(LS_WARNING) << "ViE ConnectAudioChannel failed"
<< "A/V not synchronized";
// Don't set ret to false;
}
}
// Register external transport
if (engine_->video_engine()->network()->RegisterSendTransport(
vie_channel_, *this) != 0) {
ret = false;
} else {
// EnableRtcp(); // by default RTCP is disabled.
EnablePLI();
}
return ret;
}
WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
// Stop and remote renderer
SetRender(false);
if (engine()->video_engine()->render()->RemoveRenderer(vie_channel_)
== -1) {
LOG_RTCERR1(RemoveRenderer, vie_channel_);
}
// DeRegister external transport
if (engine()->video_engine()->network()->DeregisterSendTransport(
vie_channel_) == -1) {
LOG_RTCERR1(DeregisterSendTransport, vie_channel_);
}
// Unregister RtcChannel with the engine.
engine()->UnregisterChannel(this);
// Delete VideoChannel
if (engine()->video_engine()->base()->DeleteChannel(vie_channel_) == -1) {
LOG_RTCERR1(DeleteChannel, vie_channel_);
}
}
bool WebRtcVideoMediaChannel::SetRecvCodecs(
const std::vector<VideoCodec>& codecs) {
bool ret = true;
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec)) {
if (engine()->video_engine()->codec()->SetReceiveCodec(
vie_channel_, wcodec) != 0) {
LOG_RTCERR2(SetReceiveCodec, vie_channel_, wcodec.plName);
ret = false;
}
}
} else {
LOG(LS_INFO) << "Unknown codec" << iter->name;
ret = false;
}
}
// make channel ready to receive packets
if (ret) {
if (engine()->video_engine()->base()->StartReceive(vie_channel_) != 0) {
LOG_RTCERR1(StartReceive, vie_channel_);
ret = false;
}
}
return ret;
}
bool WebRtcVideoMediaChannel::SetSendCodecs(
const std::vector<VideoCodec>& codecs) {
if (sending_) {
LOG(LS_ERROR) << "channel is alredy sending";
return false;
}
// match with local video codec list
std::vector<webrtc::VideoCodec> send_codecs;
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec))
send_codecs.push_back(wcodec);
}
}
// if none matches, return with set
if (send_codecs.empty()) {
LOG(LS_ERROR) << "No matching codecs avilable";
return false;
}
// select the first matched codec
const webrtc::VideoCodec& codec(send_codecs[0]);
send_codec_.reset(new webrtc::VideoCodec(codec));
if (engine()->video_engine()->codec()->SetSendCodec(
vie_channel_, codec) != 0) {
LOG_RTCERR2(SetSendCodec, vie_channel_, codec.plName);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::SetRender(bool render) {
if (render != render_started_) {
int ret;
if (render) {
ret = engine()->video_engine()->render()->StartRender(vie_channel_);
} else {
ret = engine()->video_engine()->render()->StopRender(vie_channel_);
}
if (ret != 0) {
return false;
}
render_started_ = render;
}
return true;
}
bool WebRtcVideoMediaChannel::SetSend(bool send) {
if (send == sending()) {
return true; // no action required
}
bool ret = true;
if (send) { // enable
if (engine()->video_engine()->base()->StartSend(vie_channel_) != 0) {
LOG_RTCERR1(StartSend, vie_channel_);
ret = false;
}
} else { // disable
if (engine()->video_engine()->base()->StopSend(vie_channel_) != 0) {
LOG_RTCERR1(StopSend, vie_channel_);
ret = false;
}
}
if (ret)
sending_ = send;
return ret;
}
bool WebRtcVideoMediaChannel::AddStream(uint32 ssrc, uint32 voice_ssrc) {
return false;
}
bool WebRtcVideoMediaChannel::RemoveStream(uint32 ssrc) {
return false;
}
bool WebRtcVideoMediaChannel::SetRenderer(
uint32 ssrc, VideoRenderer* renderer) {
ASSERT(vie_channel_ != -1);
if (ssrc != 0)
return false;
if (remote_renderer_.get()) {
// If the renderer already set, stop it first
engine_->video_engine()->render()->StopRender(vie_channel_);
}
remote_renderer_.reset(new WebRtcRenderAdapter(renderer));
if (engine_->video_engine()->render()->AddRenderer(vie_channel_,
webrtc::kVideoI420, remote_renderer_.get()) != 0) {
LOG_RTCERR3(AddRenderer, vie_channel_, webrtc::kVideoI420,
remote_renderer_.get());
remote_renderer_.reset();
return false;
}
if (engine_->video_engine()->render()->StartRender(vie_channel_) != 0) {
LOG_RTCERR1(StartRender, vie_channel_);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
VideoSenderInfo sinfo;
memset(&sinfo, 0, sizeof(sinfo));
unsigned int ssrc;
if (engine_->video_engine()->rtp()->GetLocalSSRC(vie_channel_,
ssrc) != 0) {
LOG_RTCERR2(GetLocalSSRC, vie_channel_, ssrc);
return false;
}
sinfo.ssrc = ssrc;
unsigned int cumulative_lost, extended_max, jitter;
int rtt_ms;
uint16 fraction_lost;
if (engine_->video_engine()->rtp()->GetReceivedRTCPStatistics(vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG_RTCERR6(GetReceivedRTCPStatistics, vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
return false;
}
sinfo.fraction_lost = fraction_lost;
sinfo.packets_lost = cumulative_lost;
sinfo.rtt_ms = rtt_ms;
unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
if (engine_->video_engine()->rtp()->GetRTPStatistics(vie_channel_,
bytes_sent, packets_sent, bytes_recv, packets_recv) != 0) {
LOG_RTCERR5(GetRTPStatistics, vie_channel_,
bytes_sent, packets_sent, bytes_recv, packets_recv);
return false;
}
sinfo.packets_sent = packets_sent;
sinfo.bytes_sent = bytes_sent;
sinfo.packets_lost = -1;
sinfo.packets_cached = -1;
info->senders.push_back(sinfo);
// build receiver info.
// reusing the above local variables
VideoReceiverInfo rinfo;
memset(&rinfo, 0, sizeof(rinfo));
if (engine_->video_engine()->rtp()->GetSentRTCPStatistics(vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
LOG_RTCERR6(GetSentRTCPStatistics, vie_channel_,
fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
return false;
}
rinfo.bytes_rcvd = bytes_recv;
rinfo.packets_rcvd = packets_recv;
rinfo.fraction_lost = fraction_lost;
rinfo.packets_lost = cumulative_lost;
if (engine_->video_engine()->rtp()->GetRemoteSSRC(vie_channel_,
ssrc) != 0) {
return false;
}
rinfo.ssrc = ssrc;
// Get codec for wxh
info->receivers.push_back(rinfo);
return true;
}
bool WebRtcVideoMediaChannel::SendIntraFrame() {
bool ret = true;
if (engine()->video_engine()->codec()->SendKeyFrame(vie_channel_) != 0) {
LOG_RTCERR1(SendKeyFrame, vie_channel_);
ret = false;
}
return ret;
}
bool WebRtcVideoMediaChannel::RequestIntraFrame() {
// There is no API exposed to application to request a key frame
// ViE does this internally when there are errors from decoder
return false;
}
void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
engine()->video_engine()->network()->ReceivedRTPPacket(vie_channel_,
packet->data(),
packet->length());
}
void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
engine_->video_engine()->network()->ReceivedRTCPPacket(vie_channel_,
packet->data(),
packet->length());
}
void WebRtcVideoMediaChannel::SetSendSsrc(uint32 id) {
if (!sending_) {
if (engine()->video_engine()->rtp()->SetLocalSSRC(vie_channel_,
id) != 0) {
LOG_RTCERR1(SetLocalSSRC, vie_channel_);
}
} else {
LOG(LS_ERROR) << "Channel already in send state";
}
}
bool WebRtcVideoMediaChannel::SetRtcpCName(const std::string& cname) {
if (engine()->video_engine()->rtp()->SetRTCPCName(vie_channel_,
cname.c_str()) != 0) {
LOG_RTCERR2(SetRTCPCName, vie_channel_, cname.c_str());
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::Mute(bool on) {
// stop send??
return false;
}
bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
LOG(LS_INFO) << "RtcVideoMediaChanne::SetSendBandwidth";
if (!send_codec_.get()) {
LOG(LS_INFO) << "The send codec has not been set up yet.";
return true;
}
if (!autobw) {
send_codec_->startBitrate = bps;
send_codec_->minBitrate = bps;
}
send_codec_->maxBitrate = bps;
if (engine()->video_engine()->codec()->SetSendCodec(vie_channel_,
*send_codec_.get()) != 0) {
LOG_RTCERR2(SetSendCodec, vie_channel_, send_codec_->plName);
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::SetOptions(int options) {
return true;
}
void WebRtcVideoMediaChannel::EnableRtcp() {
engine()->video_engine()->rtp()->SetRTCPStatus(
vie_channel_, webrtc::kRtcpCompound_RFC4585);
}
void WebRtcVideoMediaChannel::EnablePLI() {
engine_->video_engine()->rtp()->SetKeyFrameRequestMethod(
vie_channel_, webrtc::kViEKeyFrameRequestPliRtcp);
}
void WebRtcVideoMediaChannel::EnableTMMBR() {
engine_->video_engine()->rtp()->SetTMMBRStatus(vie_channel_, true);
}
int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return network_interface_->SendPacket(&packet) ? len : -1;
}
int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
const void* data,
int len) {
if (!network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return network_interface_->SendRtcp(&packet) ? len : -1;
}
} // namespace cricket
#endif // HAVE_WEBRTC

View File

@@ -0,0 +1,197 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
#include <vector>
#include "talk/base/scoped_ptr.h"
#include "talk/session/phone/videocommon.h"
#include "talk/session/phone/codec.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/webrtccommon.h"
namespace webrtc {
class VideoCaptureModule;
class VideoRender;
}
namespace cricket {
struct Device;
class VideoRenderer;
class ViEWrapper;
class VoiceMediaChannel;
class WebRtcRenderAdapter;
class WebRtcVideoMediaChannel;
class WebRtcVoiceEngine;
class WebRtcVideoEngine : public webrtc::ViEBaseObserver,
public webrtc::TraceCallback {
public:
// Creates the WebRtcVideoEngine with internal VideoCaptureModule.
WebRtcVideoEngine();
// Creates the WebRtcVideoEngine, and specifies the WebRtcVoiceEngine and
// external VideoCaptureModule to use.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
webrtc::VideoCaptureModule* capture);
// For testing purposes. Allows the WebRtcVoiceEngine and
// ViEWrapper to be mocks.
WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, ViEWrapper* vie_wrapper);
~WebRtcVideoEngine();
bool Init();
void Terminate();
WebRtcVideoMediaChannel* CreateChannel(
VoiceMediaChannel* voice_channel);
bool FindCodec(const VideoCodec& codec);
bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
void RegisterChannel(WebRtcVideoMediaChannel* channel);
void UnregisterChannel(WebRtcVideoMediaChannel* channel);
ViEWrapper* video_engine() { return vie_wrapper_.get(); }
int GetLastVideoEngineError();
int GetCapabilities();
bool SetOptions(int options);
bool SetCaptureDevice(const Device* device);
bool SetCaptureModule(webrtc::VideoCaptureModule* vcm);
bool SetLocalRenderer(VideoRenderer* renderer);
CaptureResult SetCapture(bool capture);
const std::vector<VideoCodec>& codecs() const;
void SetLogging(int min_sev, const char* filter);
int GetLastEngineError();
VideoEncoderConfig& default_encoder_config() {
return default_encoder_config_;
}
void ConvertToCricketVideoCodec(const webrtc::VideoCodec& in_codec,
VideoCodec& out_codec);
bool ConvertFromCricketVideoCodec(const VideoCodec& in_codec,
webrtc::VideoCodec& out_codec);
sigslot::signal1<CaptureResult> SignalCaptureResult;
private:
struct VideoCodecPref {
const char* payload_name;
int payload_type;
int pref;
};
static const VideoCodecPref kVideoCodecPrefs[];
int GetCodecPreference(const char* name);
void ApplyLogging();
bool InitVideoEngine();
void PerformanceAlarm(const unsigned int cpu_load);
bool ReleaseCaptureDevice();
virtual void Print(const webrtc::TraceLevel level, const char* trace_string,
const int length);
typedef std::vector<WebRtcVideoMediaChannel*> VideoChannels;
talk_base::scoped_ptr<ViEWrapper> vie_wrapper_;
webrtc::VideoCaptureModule* capture_;
bool external_capture_;
int capture_id_;
webrtc::VideoRender* renderer_;
WebRtcVoiceEngine* voice_engine_;
std::vector<VideoCodec> video_codecs_;
VideoChannels channels_;
int log_level_;
VideoEncoderConfig default_encoder_config_;
bool capture_started_;
talk_base::scoped_ptr<WebRtcRenderAdapter> local_renderer_;
};
class WebRtcVideoMediaChannel : public VideoMediaChannel,
public webrtc::Transport {
public:
WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine, VoiceMediaChannel* voice_channel);
~WebRtcVideoMediaChannel();
bool Init();
virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs);
virtual bool SetRender(bool render);
virtual bool SetSend(bool send);
virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
virtual bool GetStats(VideoMediaInfo* info);
virtual bool SendIntraFrame();
virtual bool RequestIntraFrame();
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool on);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
return false;
}
virtual bool SetSendBandwidth(bool autobw, int bps);
virtual bool SetOptions(int options);
WebRtcVideoEngine* engine() { return engine_; }
VoiceMediaChannel* voice_channel() { return voice_channel_; }
int video_channel() { return vie_channel_; }
bool sending() { return sending_; }
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
virtual int SendPacket(int channel, const void* data, int len);
virtual int SendRTCPPacket(int channel, const void* data, int len);
private:
void EnableRtcp();
void EnablePLI();
void EnableTMMBR();
WebRtcVideoEngine* engine_;
VoiceMediaChannel* voice_channel_;
int vie_channel_;
bool sending_;
bool render_started_;
talk_base::scoped_ptr<webrtc::VideoCodec> send_codec_;
talk_base::scoped_ptr<WebRtcRenderAdapter> remote_renderer_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_

View File

@@ -0,0 +1,238 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/session/phone/webrtcvideoframe.h"
#include "talk/base/logging.h"
#include "talk/session/phone/videocommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_video/vplib/main/interface/vplib.h"
#else
#include "third_party/webrtc/files/include/vplib.h"
#endif
namespace cricket {
WebRtcVideoFrame::WebRtcVideoFrame() {
}
WebRtcVideoFrame::~WebRtcVideoFrame() {
}
void WebRtcVideoFrame::Attach(uint8* buffer, size_t buffer_size, size_t w,
size_t h, int64 elapsed_time, int64 time_stamp) {
video_frame_.Free();
WebRtc_UWord8* new_memory = buffer;
WebRtc_UWord32 new_length = buffer_size;
WebRtc_UWord32 new_size = buffer_size;
video_frame_.Swap(new_memory, new_length, new_size);
video_frame_.SetWidth(w);
video_frame_.SetHeight(h);
elapsed_time_ = elapsed_time;
video_frame_.SetTimeStamp(time_stamp);
}
void WebRtcVideoFrame::Detach(uint8** buffer, size_t* buffer_size) {
WebRtc_UWord8* new_memory = NULL;
WebRtc_UWord32 new_length = 0;
WebRtc_UWord32 new_size = 0;
video_frame_.Swap(new_memory, new_length, new_size);
*buffer = new_memory;
*buffer_size = new_size;
}
bool WebRtcVideoFrame::InitToBlack(size_t w, size_t h,
int64 elapsed_time, int64 time_stamp) {
size_t buffer_size = w * h * 3 / 2;
uint8* buffer = new uint8[buffer_size];
Attach(buffer, buffer_size, w, h, elapsed_time, time_stamp);
memset(GetYPlane(), 16, w * h);
memset(GetUPlane(), 128, w * h / 4);
memset(GetVPlane(), 128, w * h / 4);
return true;
}
size_t WebRtcVideoFrame::GetWidth() const {
return video_frame_.Width();
}
size_t WebRtcVideoFrame::GetHeight() const {
return video_frame_.Height();
}
const uint8* WebRtcVideoFrame::GetYPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
const uint8* WebRtcVideoFrame::GetUPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
const uint8* WebRtcVideoFrame::GetVPlane() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
uint8* WebRtcVideoFrame::GetYPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
return buffer;
}
uint8* WebRtcVideoFrame::GetUPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height());
return buffer;
}
uint8* WebRtcVideoFrame::GetVPlane() {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (buffer)
buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
return buffer;
}
VideoFrame* WebRtcVideoFrame::Copy() const {
WebRtc_UWord8* buffer = video_frame_.Buffer();
if (!buffer)
return NULL;
size_t new_buffer_size = video_frame_.Length();
uint8* new_buffer = new uint8[new_buffer_size];
memcpy(new_buffer, buffer, new_buffer_size);
WebRtcVideoFrame* copy = new WebRtcVideoFrame();
copy->Attach(new_buffer, new_buffer_size,
video_frame_.Width(), video_frame_.Height(),
elapsed_time_, video_frame_.TimeStamp());
return copy;
}
size_t WebRtcVideoFrame::CopyToBuffer(
uint8* buffer, size_t size) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
memcpy(buffer, video_frame_.Buffer(), needed);
}
return needed;
}
size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
uint8* buffer,
size_t size,
size_t pitch_rgb) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t width = video_frame_.Width();
size_t height = video_frame_.Height();
// See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
// explanation of pitch and why this is the amount of space we need.
size_t needed = pitch_rgb * (height - 1) + 4 * width;
if (needed > size) {
LOG(LS_WARNING) << "RGB buffer is not large enough";
return 0;
}
webrtc::VideoType outgoingVideoType = webrtc::kUnknown;
switch (to_fourcc) {
case FOURCC_ARGB:
outgoingVideoType = webrtc::kARGB;
break;
default:
LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
return 0;
break;
}
if (outgoingVideoType != webrtc::kUnknown)
webrtc::ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
width, height, buffer);
return needed;
}
void WebRtcVideoFrame::StretchToPlanes(
uint8* y, uint8* u, uint8* v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
size_t width, size_t height, bool interpolate, bool crop) const {
// TODO(ronghuawu): Implement StretchToPlanes
}
size_t WebRtcVideoFrame::StretchToBuffer(size_t w, size_t h,
uint8* buffer, size_t size,
bool interpolate,
bool crop) const {
if (!video_frame_.Buffer()) {
return 0;
}
size_t needed = video_frame_.Length();
if (needed <= size) {
uint8* bufy = buffer;
uint8* bufu = bufy + w * h;
uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
interpolate, crop);
}
return needed;
}
void WebRtcVideoFrame::StretchToFrame(VideoFrame* target,
bool interpolate, bool crop) const {
if (!target) return;
StretchToPlanes(target->GetYPlane(),
target->GetUPlane(),
target->GetVPlane(),
target->GetYPitch(),
target->GetUPitch(),
target->GetVPitch(),
target->GetWidth(),
target->GetHeight(),
interpolate, crop);
target->SetElapsedTime(GetElapsedTime());
target->SetTimeStamp(GetTimeStamp());
}
VideoFrame* WebRtcVideoFrame::Stretch(size_t w, size_t h,
bool interpolate, bool crop) const {
// TODO(ronghuawu): implement
return NULL;
}
} // namespace cricket

View File

@@ -0,0 +1,97 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#define TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#endif
#include "talk/session/phone/mediachannel.h"
namespace cricket {
// WebRtcVideoFrame only supports I420
class WebRtcVideoFrame : public VideoFrame {
public:
WebRtcVideoFrame();
~WebRtcVideoFrame();
void Attach(uint8* buffer, size_t buffer_size,
size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
void Detach(uint8** buffer, size_t* buffer_size);
bool InitToBlack(size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
bool HasImage() const { return video_frame_.Buffer() != NULL; }
virtual size_t GetWidth() const;
virtual size_t GetHeight() const;
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const { return video_frame_.Width(); }
virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) {
video_frame_.SetTimeStamp(time_stamp);
}
virtual VideoFrame* Copy() const;
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, size_t pitch_rgb) const;
virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height,
bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
private:
webrtc::VideoFrame video_frame_;
int64 elapsed_time_;
};
} // namespace cricket
#endif // TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_

View File

@@ -0,0 +1,143 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVIE_H_
#define TALK_SESSION_PHONE_WEBRTCVIE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/interface/module_common_types.h"
#include "modules/video_capture/main/interface/video_capture.h"
#include "modules/video_render/main/interface/video_render.h"
#include "video_engine/main/interface/vie_base.h"
#include "video_engine/main/interface/vie_capture.h"
#include "video_engine/main/interface/vie_codec.h"
#include "video_engine/main/interface/vie_errors.h"
#include "video_engine/main/interface/vie_image_process.h"
#include "video_engine/main/interface/vie_network.h"
#include "video_engine/main/interface/vie_render.h"
#include "video_engine/main/interface/vie_rtp_rtcp.h"
#else
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/module_common_types.h"
#include "third_party/webrtc/files/include/video_capture.h"
#include "third_party/webrtc/files/include/video_render.h"
#include "third_party/webrtc/files/include/vie_base.h"
#include "third_party/webrtc/files/include/vie_capture.h"
#include "third_party/webrtc/files/include/vie_codec.h"
#include "third_party/webrtc/files/include/vie_errors.h"
#include "third_party/webrtc/files/include/vie_image_process.h"
#include "third_party/webrtc/files/include/vie_network.h"
#include "third_party/webrtc/files/include/vie_render.h"
#include "third_party/webrtc/files/include/vie_rtp_rtcp.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// all tracing macros should go to a common file
// automatically handles lifetime of VideoEngine
class scoped_vie_engine {
public:
explicit scoped_vie_engine(webrtc::VideoEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_vie_engine() {
if (ptr) {
webrtc::VideoEngine::Delete(ptr);
}
}
webrtc::VideoEngine* get() const { return ptr; }
private:
webrtc::VideoEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing VideoEngine
// interface pointers
template<class T> class scoped_vie_ptr {
public:
explicit scoped_vie_ptr(const scoped_vie_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_vie_ptr(T* p) : ptr(p) {}
~scoped_vie_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class ViEWrapper {
public:
ViEWrapper()
: engine_(webrtc::VideoEngine::Create()),
base_(engine_), codec_(engine_), capture_(engine_),
network_(engine_), render_(engine_), rtp_(engine_),
image_(engine_) {
}
ViEWrapper(webrtc::ViEBase* base, webrtc::ViECodec* codec,
webrtc::ViECapture* capture, webrtc::ViENetwork* network,
webrtc::ViERender* render, webrtc::ViERTP_RTCP* rtp,
webrtc::ViEImageProcess* image)
: engine_(NULL),
base_(base),
codec_(codec),
capture_(capture),
network_(network),
render_(render),
rtp_(rtp),
image_(image) {
}
virtual ~ViEWrapper() {}
webrtc::VideoEngine* engine() { return engine_.get(); }
webrtc::ViEBase* base() { return base_.get(); }
webrtc::ViECodec* codec() { return codec_.get(); }
webrtc::ViECapture* capture() { return capture_.get(); }
webrtc::ViENetwork* network() { return network_.get(); }
webrtc::ViERender* render() { return render_.get(); }
webrtc::ViERTP_RTCP* rtp() { return rtp_.get(); }
webrtc::ViEImageProcess* sync() { return image_.get(); }
int error() { return base_->LastError(); }
private:
scoped_vie_engine engine_;
scoped_vie_ptr<webrtc::ViEBase> base_;
scoped_vie_ptr<webrtc::ViECodec> codec_;
scoped_vie_ptr<webrtc::ViECapture> capture_;
scoped_vie_ptr<webrtc::ViENetwork> network_;
scoped_vie_ptr<webrtc::ViERender> render_;
scoped_vie_ptr<webrtc::ViERTP_RTCP> rtp_;
scoped_vie_ptr<webrtc::ViEImageProcess> image_;
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVIE_H_

View File

@@ -0,0 +1,190 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOE_H_
#define TALK_SESSION_PHONE_WEBRTCVOE_H_
#include "talk/base/common.h"
#include "talk/session/phone/webrtccommon.h"
#ifdef WEBRTC_RELATIVE_PATH
#include "common_types.h"
#include "modules/audio_device/main/interface/audio_device.h"
#include "voice_engine/main/interface/voe_audio_processing.h"
#include "voice_engine/main/interface/voe_base.h"
#include "voice_engine/main/interface/voe_codec.h"
#include "voice_engine/main/interface/voe_dtmf.h"
#include "voice_engine/main/interface/voe_errors.h"
#include "voice_engine/main/interface/voe_file.h"
#include "voice_engine/main/interface/voe_hardware.h"
#include "voice_engine/main/interface/voe_neteq_stats.h"
#include "voice_engine/main/interface/voe_network.h"
#include "voice_engine/main/interface/voe_rtp_rtcp.h"
#include "voice_engine/main/interface/voe_video_sync.h"
#include "voice_engine/main/interface/voe_volume_control.h"
#else
#include "third_party/webrtc/files/include/audio_device.h"
#include "third_party/webrtc/files/include/common_types.h"
#include "third_party/webrtc/files/include/voe_audio_processing.h"
#include "third_party/webrtc/files/include/voe_base.h"
#include "third_party/webrtc/files/include/voe_codec.h"
#include "third_party/webrtc/files/include/voe_dtmf.h"
#include "third_party/webrtc/files/include/voe_errors.h"
#include "third_party/webrtc/files/include/voe_file.h"
#include "third_party/webrtc/files/include/voe_hardware.h"
#include "third_party/webrtc/files/include/voe_neteq_stats.h"
#include "third_party/webrtc/files/include/voe_network.h"
#include "third_party/webrtc/files/include/voe_rtp_rtcp.h"
#include "third_party/webrtc/files/include/voe_video_sync.h"
#include "third_party/webrtc/files/include/voe_volume_control.h"
#endif // WEBRTC_RELATIVE_PATH
namespace cricket {
// automatically handles lifetime of WebRtc VoiceEngine
class scoped_voe_engine {
public:
explicit scoped_voe_engine(webrtc::VoiceEngine* e) : ptr(e) {}
// VERIFY, to ensure that there are no leaks at shutdown
~scoped_voe_engine() { if (ptr) VERIFY(webrtc::VoiceEngine::Delete(ptr)); }
// Releases the current pointer.
void reset() {
if (ptr) {
VERIFY(webrtc::VoiceEngine::Delete(ptr));
ptr = NULL;
}
}
webrtc::VoiceEngine* get() const { return ptr; }
private:
webrtc::VoiceEngine* ptr;
};
// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
template<class T>
class scoped_voe_ptr {
public:
explicit scoped_voe_ptr(const scoped_voe_engine& e)
: ptr(T::GetInterface(e.get())) {}
explicit scoped_voe_ptr(T* p) : ptr(p) {}
~scoped_voe_ptr() { if (ptr) ptr->Release(); }
T* operator->() const { return ptr; }
T* get() const { return ptr; }
// Releases the current pointer.
void reset() {
if (ptr) {
ptr->Release();
ptr = NULL;
}
}
private:
T* ptr;
};
// Utility class for aggregating the various WebRTC interface.
// Fake implementations can also be injected for testing.
class VoEWrapper {
public:
VoEWrapper()
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
base_(engine_), codec_(engine_), dtmf_(engine_), file_(engine_),
hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_),
sync_(engine_), volume_(engine_) {
}
VoEWrapper(webrtc::VoEAudioProcessing* processing,
webrtc::VoEBase* base,
webrtc::VoECodec* codec,
webrtc::VoEDtmf* dtmf,
webrtc::VoEFile* file,
webrtc::VoEHardware* hw,
webrtc::VoENetEqStats* neteq,
webrtc::VoENetwork* network,
webrtc::VoERTP_RTCP* rtp,
webrtc::VoEVideoSync* sync,
webrtc::VoEVolumeControl* volume)
: engine_(NULL),
processing_(processing),
base_(base),
codec_(codec),
dtmf_(dtmf),
file_(file),
hw_(hw),
neteq_(neteq),
network_(network),
rtp_(rtp),
sync_(sync),
volume_(volume) {
}
~VoEWrapper() {}
webrtc::VoiceEngine* engine() const { return engine_.get(); }
webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
webrtc::VoEBase* base() const { return base_.get(); }
webrtc::VoECodec* codec() const { return codec_.get(); }
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
webrtc::VoEFile* file() const { return file_.get(); }
webrtc::VoEHardware* hw() const { return hw_.get(); }
webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
webrtc::VoENetwork* network() const { return network_.get(); }
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
webrtc::VoEVideoSync* sync() const { return sync_.get(); }
webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
int error() { return base_->LastError(); }
private:
scoped_voe_engine engine_;
scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
scoped_voe_ptr<webrtc::VoEBase> base_;
scoped_voe_ptr<webrtc::VoECodec> codec_;
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
scoped_voe_ptr<webrtc::VoEFile> file_;
scoped_voe_ptr<webrtc::VoEHardware> hw_;
scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
scoped_voe_ptr<webrtc::VoENetwork> network_;
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
};
// Adds indirection to static WebRtc functions, allowing them to be mocked.
class VoETraceWrapper {
public:
virtual ~VoETraceWrapper() {}
virtual int SetTraceFilter(const unsigned int filter) {
return webrtc::VoiceEngine::SetTraceFilter(filter);
}
virtual int SetTraceFile(const char* fileNameUTF8) {
return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
}
virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
return webrtc::VoiceEngine::SetTraceCallback(callback);
}
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOE_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,320 @@
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#define TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/logging.h"
#include "talk/base/scoped_ptr.h"
#include "talk/base/stream.h"
#include "talk/session/phone/channel.h"
#include "talk/session/phone/mediaengine.h"
#include "talk/session/phone/rtputils.h"
#include "talk/session/phone/webrtccommon.h"
namespace cricket {
// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
// passed into WebRtc, and support looping.
class WebRtcSoundclipStream : public webrtc::InStream {
public:
WebRtcSoundclipStream(const char* buf, size_t len)
: mem_(buf, len), loop_(true) {
}
void set_loop(bool loop) { loop_ = loop; }
virtual int Read(void* buf, int len);
virtual int Rewind();
private:
talk_base::MemoryStream mem_;
bool loop_;
};
// WebRtcMonitorStream is used to monitor a stream coming from WebRtc.
// For now we just dump the data.
class WebRtcMonitorStream : public webrtc::OutStream {
virtual bool Write(const void *buf, int len) {
return true;
}
};
class AudioDeviceModule;
class VoETraceWrapper;
class VoEWrapper;
class WebRtcSoundclipMedia;
class WebRtcVoiceMediaChannel;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine
: public webrtc::VoiceEngineObserver,
public webrtc::TraceCallback {
public:
WebRtcVoiceEngine();
WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
webrtc::AudioDeviceModule* adm_sc);
// Dependency injection for testing.
WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
VoEWrapper* voe_wrapper_sc,
VoETraceWrapper* tracing);
~WebRtcVoiceEngine();
bool Init();
void Terminate();
int GetCapabilities();
VoiceMediaChannel* CreateChannel();
SoundclipMedia* CreateSoundclip();
bool SetOptions(int options);
bool SetDevices(const Device* in_device, const Device* out_device);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
int GetInputLevel();
bool SetLocalMonitor(bool enable);
const std::vector<AudioCodec>& codecs();
bool FindCodec(const AudioCodec& codec);
bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
void SetLogging(int min_sev, const char* filter);
// For tracking WebRtc channels. Needed because we have to pause them
// all when switching devices.
// May only be called by WebRtcVoiceMediaChannel.
void RegisterChannel(WebRtcVoiceMediaChannel *channel);
void UnregisterChannel(WebRtcVoiceMediaChannel *channel);
// May only be called by WebRtcSoundclipMedia.
void RegisterSoundclip(WebRtcSoundclipMedia *channel);
void UnregisterSoundclip(WebRtcSoundclipMedia *channel);
// Called by WebRtcVoiceMediaChannel to set a gain offset from
// the default AGC target level.
bool AdjustAgcLevel(int delta);
// Called by WebRtcVoiceMediaChannel to configure echo cancellation
// and noise suppression modes.
bool SetConferenceMode(bool enable);
VoEWrapper* voe() { return voe_wrapper_.get(); }
VoEWrapper* voe_sc() { return voe_wrapper_sc_.get(); }
int GetLastEngineError();
private:
typedef std::vector<WebRtcSoundclipMedia *> SoundclipList;
typedef std::vector<WebRtcVoiceMediaChannel *> ChannelList;
struct CodecPref {
const char* name;
int clockrate;
};
void Construct();
bool InitInternal();
void ApplyLogging();
virtual void Print(const webrtc::TraceLevel level,
const char* trace_string, const int length);
virtual void CallbackOnError(const int channel, const int errCode);
static int GetCodecPreference(const char *name, int clockrate);
// Given the device type, name, and id, find device id. Return true and
// set the output parameter rtc_id if successful.
bool FindWebRtcAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
bool FindChannelAndSsrc(int channel_num,
WebRtcVoiceMediaChannel** channel,
uint32* ssrc) const;
bool ChangeLocalMonitor(bool enable);
bool PauseLocalMonitor();
bool ResumeLocalMonitor();
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const CodecPref kCodecPrefs[];
// The primary instance of WebRtc VoiceEngine.
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_;
// A secondary instance, for playing out soundclips (on the 'ring' device).
talk_base::scoped_ptr<VoEWrapper> voe_wrapper_sc_;
talk_base::scoped_ptr<VoETraceWrapper> tracing_;
// The external audio device manager
webrtc::AudioDeviceModule* adm_;
webrtc::AudioDeviceModule* adm_sc_;
int log_level_;
bool is_dumping_aec_;
std::vector<AudioCodec> codecs_;
bool desired_local_monitor_enable_;
talk_base::scoped_ptr<WebRtcMonitorStream> monitor_;
SoundclipList soundclips_;
ChannelList channels_;
// channels_ can be read from WebRtc callback thread. We need a lock on that
// callback as well as the RegisterChannel/UnregisterChannel.
talk_base::CriticalSection channels_cs_;
webrtc::AgcConfig default_agc_config_;
};
// WebRtcMediaChannel is a class that implements the common WebRtc channel
// functionality.
template <class T, class E>
class WebRtcMediaChannel : public T, public webrtc::Transport {
public:
WebRtcMediaChannel(E *engine, int channel)
: engine_(engine), voe_channel_(channel), sequence_number_(-1) {}
E *engine() { return engine_; }
int voe_channel() const { return voe_channel_; }
bool valid() const { return voe_channel_ != -1; }
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
// We need to store the sequence number to be able to pick up
// the same sequence when the device is restarted.
// TODO(oja): Remove when WebRtc has fixed the problem.
int seq_num;
if (!GetRtpSeqNum(data, len, &seq_num)) {
return -1;
}
if (sequence_number() == -1) {
LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
<< seq_num;
}
sequence_number_ = seq_num;
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendPacket(&packet) ? len : -1;
}
virtual int SendRTCPPacket(int channel, const void *data, int len) {
if (!T::network_interface_) {
return -1;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
return T::network_interface_->SendRtcp(&packet) ? len : -1;
}
int sequence_number() const {
return sequence_number_;
}
private:
E *engine_;
int voe_channel_;
int sequence_number_;
};
// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
// WebRtc Voice Engine.
class WebRtcVoiceMediaChannel
: public WebRtcMediaChannel<VoiceMediaChannel,
WebRtcVoiceEngine> {
public:
explicit WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine);
virtual ~WebRtcVoiceMediaChannel();
virtual bool SetOptions(int options);
virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
virtual bool SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions);
virtual bool SetPlayout(bool playout);
bool PausePlayout();
bool ResumePlayout();
virtual bool SetSend(SendFlags send);
bool PauseSend();
bool ResumeSend();
virtual bool AddStream(uint32 ssrc);
virtual bool RemoveStream(uint32 ssrc);
virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
virtual int GetOutputLevel();
virtual bool SetRingbackTone(const char *buf, int len);
virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
virtual bool PressDTMF(int event, bool playout);
virtual void OnPacketReceived(talk_base::Buffer* packet);
virtual void OnRtcpReceived(talk_base::Buffer* packet);
virtual void SetSendSsrc(uint32 id);
virtual bool SetRtcpCName(const std::string& cname);
virtual bool Mute(bool mute);
virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
virtual bool GetStats(VoiceMediaInfo* info);
// Gets last reported error from WebRtc voice engine. This should be only
// called in response a failure.
virtual void GetLastMediaError(uint32* ssrc,
VoiceMediaChannel::Error* error);
bool FindSsrc(int channel_num, uint32* ssrc);
void OnError(uint32 ssrc, int error);
protected:
int GetLastEngineError() { return engine()->GetLastEngineError(); }
int GetChannel(uint32 ssrc);
int GetOutputLevel(int channel);
bool GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs,
webrtc::CodecInst* send_codec);
bool EnableRtcp(int channel);
bool SetPlayout(int channel, bool playout);
static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
static Error WebRtcErrorToChannelError(int err_code);
private:
// Tandberg-bridged conferences require a -10dB gain adjustment,
// which is actually +10 in AgcConfig.targetLeveldBOv
static const int kTandbergDbAdjustment = 10;
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
typedef std::map<uint32, int> ChannelMap;
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
std::set<int> ringback_channels_; // channels playing ringback
int channel_options_;
bool agc_adjusted_;
bool dtmf_allowed_;
bool desired_playout_;
bool playout_;
SendFlags desired_send_;
SendFlags send_;
ChannelMap mux_channels_; // for multiple sources
// mux_channels_ can be read from WebRtc callback thread. Accesses off the
// WebRtc thread must be synchronized with edits on the worker thread. Reads
// on the worker thread are ok.
mutable talk_base::CriticalSection mux_channels_cs_;
};
}
#endif // TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_