description
git-svn-id: http://webrtc.googlecode.com/svn/trunk@5590 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
3f170dd309
commit
2643805a20
@ -58,7 +58,11 @@ void TrackHandler::OnChanged() {
|
|||||||
|
|
||||||
LocalAudioSinkAdapter::LocalAudioSinkAdapter() : sink_(NULL) {}
|
LocalAudioSinkAdapter::LocalAudioSinkAdapter() : sink_(NULL) {}
|
||||||
|
|
||||||
LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {}
|
LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {
|
||||||
|
talk_base::CritScope lock(&lock_);
|
||||||
|
if (sink_)
|
||||||
|
sink_->OnClose();
|
||||||
|
}
|
||||||
|
|
||||||
void LocalAudioSinkAdapter::OnData(const void* audio_data,
|
void LocalAudioSinkAdapter::OnData(const void* audio_data,
|
||||||
int bits_per_sample,
|
int bits_per_sample,
|
||||||
|
@ -56,6 +56,7 @@ class FakePeriodicVideoCapturer : public cricket::FakeVideoCapturer {
|
|||||||
virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
|
virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
|
||||||
cricket::CaptureState state = FakeVideoCapturer::Start(format);
|
cricket::CaptureState state = FakeVideoCapturer::Start(format);
|
||||||
if (state != cricket::CS_FAILED) {
|
if (state != cricket::CS_FAILED) {
|
||||||
|
set_enable_video_adapter(false); // Simplify testing.
|
||||||
talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
|
talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
|
||||||
}
|
}
|
||||||
return state;
|
return state;
|
||||||
|
@ -249,7 +249,11 @@ class WebRtcSessionCreateSDPObserverForTest
|
|||||||
|
|
||||||
class FakeAudioRenderer : public cricket::AudioRenderer {
|
class FakeAudioRenderer : public cricket::AudioRenderer {
|
||||||
public:
|
public:
|
||||||
FakeAudioRenderer() : channel_id_(-1) {}
|
FakeAudioRenderer() : channel_id_(-1), sink_(NULL) {}
|
||||||
|
virtual ~FakeAudioRenderer() {
|
||||||
|
if (sink_)
|
||||||
|
sink_->OnClose();
|
||||||
|
}
|
||||||
|
|
||||||
virtual void AddChannel(int channel_id) OVERRIDE {
|
virtual void AddChannel(int channel_id) OVERRIDE {
|
||||||
ASSERT(channel_id_ == -1);
|
ASSERT(channel_id_ == -1);
|
||||||
@ -259,10 +263,15 @@ class FakeAudioRenderer : public cricket::AudioRenderer {
|
|||||||
ASSERT(channel_id == channel_id_);
|
ASSERT(channel_id == channel_id_);
|
||||||
channel_id_ = -1;
|
channel_id_ = -1;
|
||||||
}
|
}
|
||||||
|
virtual void SetSink(Sink* sink) OVERRIDE {
|
||||||
|
sink_ = sink;
|
||||||
|
}
|
||||||
|
|
||||||
int channel_id() const { return channel_id_; }
|
int channel_id() const { return channel_id_; }
|
||||||
|
cricket::AudioRenderer::Sink* sink() const { return sink_; }
|
||||||
private:
|
private:
|
||||||
int channel_id_;
|
int channel_id_;
|
||||||
|
cricket::AudioRenderer::Sink* sink_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class WebRtcSessionTest : public testing::Test {
|
class WebRtcSessionTest : public testing::Test {
|
||||||
@ -2187,13 +2196,39 @@ TEST_F(WebRtcSessionTest, SetAudioSend) {
|
|||||||
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
|
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
|
||||||
EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
|
EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
|
||||||
EXPECT_EQ(0, renderer->channel_id());
|
EXPECT_EQ(0, renderer->channel_id());
|
||||||
|
EXPECT_TRUE(renderer->sink() != NULL);
|
||||||
|
|
||||||
|
// This will trigger SetSink(NULL) to the |renderer|.
|
||||||
session_->SetAudioSend(send_ssrc, true, options, NULL);
|
session_->SetAudioSend(send_ssrc, true, options, NULL);
|
||||||
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
|
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
|
||||||
bool value;
|
bool value;
|
||||||
EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
|
EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
|
||||||
EXPECT_TRUE(value);
|
EXPECT_TRUE(value);
|
||||||
EXPECT_EQ(-1, renderer->channel_id());
|
EXPECT_EQ(-1, renderer->channel_id());
|
||||||
|
EXPECT_TRUE(renderer->sink() == NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcSessionTest, AudioRendererForLocalStream) {
|
||||||
|
Init(NULL);
|
||||||
|
mediastream_signaling_.SendAudioVideoStream1();
|
||||||
|
CreateAndSetRemoteOfferAndLocalAnswer();
|
||||||
|
cricket::FakeVoiceMediaChannel* channel = media_engine_->GetVoiceChannel(0);
|
||||||
|
ASSERT_TRUE(channel != NULL);
|
||||||
|
ASSERT_EQ(1u, channel->send_streams().size());
|
||||||
|
uint32 send_ssrc = channel->send_streams()[0].first_ssrc();
|
||||||
|
|
||||||
|
talk_base::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
|
||||||
|
cricket::AudioOptions options;
|
||||||
|
session_->SetAudioSend(send_ssrc, true, options, renderer.get());
|
||||||
|
EXPECT_TRUE(renderer->sink() != NULL);
|
||||||
|
|
||||||
|
// Delete the |renderer| and it will trigger OnClose() to the sink, and this
|
||||||
|
// will invalidate the |renderer_| pointer in the sink and prevent getting a
|
||||||
|
// SetSink(NULL) callback afterwards.
|
||||||
|
renderer.reset();
|
||||||
|
|
||||||
|
// This will trigger SetSink(NULL) if no OnClose() callback.
|
||||||
|
session_->SetAudioSend(send_ssrc, true, options, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebRtcSessionTest, SetVideoPlayout) {
|
TEST_F(WebRtcSessionTest, SetVideoPlayout) {
|
||||||
|
@ -49,8 +49,6 @@ struct PacketTimeUpdateParams {
|
|||||||
std::vector<char> srtp_auth_key; // Authentication key.
|
std::vector<char> srtp_auth_key; // Authentication key.
|
||||||
int srtp_auth_tag_len; // Authentication tag length.
|
int srtp_auth_tag_len; // Authentication tag length.
|
||||||
int64 srtp_packet_index; // Required for Rtp Packet authentication.
|
int64 srtp_packet_index; // Required for Rtp Packet authentication.
|
||||||
int payload_len; // Raw payload length, before any wrapping
|
|
||||||
// like TURN/GTURN.
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// This structure holds meta information for the packet which is about to send
|
// This structure holds meta information for the packet which is about to send
|
||||||
|
@ -35,11 +35,16 @@ class AudioRenderer {
|
|||||||
public:
|
public:
|
||||||
class Sink {
|
class Sink {
|
||||||
public:
|
public:
|
||||||
|
// Callback to receive data from the AudioRenderer.
|
||||||
virtual void OnData(const void* audio_data,
|
virtual void OnData(const void* audio_data,
|
||||||
int bits_per_sample,
|
int bits_per_sample,
|
||||||
int sample_rate,
|
int sample_rate,
|
||||||
int number_of_channels,
|
int number_of_channels,
|
||||||
int number_of_frames) = 0;
|
int number_of_frames) = 0;
|
||||||
|
|
||||||
|
// Called when the AudioRenderer is going away.
|
||||||
|
virtual void OnClose() = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual ~Sink() {}
|
virtual ~Sink() {}
|
||||||
};
|
};
|
||||||
|
@ -316,17 +316,18 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
|
virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
|
||||||
std::map<uint32, AudioRenderer*>::iterator it = local_renderers_.find(ssrc);
|
std::map<uint32, VoiceChannelAudioSink*>::iterator it =
|
||||||
|
local_renderers_.find(ssrc);
|
||||||
if (renderer) {
|
if (renderer) {
|
||||||
if (it != local_renderers_.end()) {
|
if (it != local_renderers_.end()) {
|
||||||
ASSERT(it->second == renderer);
|
ASSERT(it->second->renderer() == renderer);
|
||||||
} else {
|
} else {
|
||||||
local_renderers_.insert(std::make_pair(ssrc, renderer));
|
local_renderers_.insert(std::make_pair(
|
||||||
renderer->AddChannel(0);
|
ssrc, new VoiceChannelAudioSink(renderer)));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (it != local_renderers_.end()) {
|
if (it != local_renderers_.end()) {
|
||||||
it->second->RemoveChannel(0);
|
delete it->second;
|
||||||
local_renderers_.erase(it);
|
local_renderers_.erase(it);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
@ -419,6 +420,34 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
|
|||||||
double left, right;
|
double left, right;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class VoiceChannelAudioSink : public AudioRenderer::Sink {
|
||||||
|
public:
|
||||||
|
explicit VoiceChannelAudioSink(AudioRenderer* renderer)
|
||||||
|
: renderer_(renderer) {
|
||||||
|
renderer_->AddChannel(0);
|
||||||
|
renderer_->SetSink(this);
|
||||||
|
}
|
||||||
|
virtual ~VoiceChannelAudioSink() {
|
||||||
|
if (renderer_) {
|
||||||
|
renderer_->RemoveChannel(0);
|
||||||
|
renderer_->SetSink(NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
virtual void OnData(const void* audio_data,
|
||||||
|
int bits_per_sample,
|
||||||
|
int sample_rate,
|
||||||
|
int number_of_channels,
|
||||||
|
int number_of_frames) OVERRIDE {}
|
||||||
|
virtual void OnClose() OVERRIDE {
|
||||||
|
renderer_ = NULL;
|
||||||
|
}
|
||||||
|
AudioRenderer* renderer() const { return renderer_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
AudioRenderer* renderer_;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
FakeVoiceEngine* engine_;
|
FakeVoiceEngine* engine_;
|
||||||
std::vector<AudioCodec> recv_codecs_;
|
std::vector<AudioCodec> recv_codecs_;
|
||||||
std::vector<AudioCodec> send_codecs_;
|
std::vector<AudioCodec> send_codecs_;
|
||||||
@ -430,7 +459,7 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
|
|||||||
bool ringback_tone_loop_;
|
bool ringback_tone_loop_;
|
||||||
int time_since_last_typing_;
|
int time_since_last_typing_;
|
||||||
AudioOptions options_;
|
AudioOptions options_;
|
||||||
std::map<uint32, AudioRenderer*> local_renderers_;
|
std::map<uint32, VoiceChannelAudioSink*> local_renderers_;
|
||||||
std::map<uint32, AudioRenderer*> remote_renderers_;
|
std::map<uint32, AudioRenderer*> remote_renderers_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -421,7 +421,7 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter()
|
|||||||
view_desired_interval_(0),
|
view_desired_interval_(0),
|
||||||
encoder_desired_num_pixels_(INT_MAX),
|
encoder_desired_num_pixels_(INT_MAX),
|
||||||
cpu_desired_num_pixels_(INT_MAX),
|
cpu_desired_num_pixels_(INT_MAX),
|
||||||
adapt_reason_(0),
|
adapt_reason_(ADAPTREASON_NONE),
|
||||||
system_load_average_(kCpuLoadInitialAverage) {
|
system_load_average_(kCpuLoadInitialAverage) {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
|
|||||||
}
|
}
|
||||||
int old_num_pixels = GetOutputNumPixels();
|
int old_num_pixels = GetOutputNumPixels();
|
||||||
int min_num_pixels = INT_MAX;
|
int min_num_pixels = INT_MAX;
|
||||||
adapt_reason_ = 0;
|
adapt_reason_ = ADAPTREASON_NONE;
|
||||||
|
|
||||||
// Reduce resolution based on encoder bandwidth (GD).
|
// Reduce resolution based on encoder bandwidth (GD).
|
||||||
if (encoder_desired_num_pixels_ &&
|
if (encoder_desired_num_pixels_ &&
|
||||||
@ -677,7 +677,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
|
|||||||
static_cast<int>(input.height * scale + .5f);
|
static_cast<int>(input.height * scale + .5f);
|
||||||
}
|
}
|
||||||
if (scale == 1.0f) {
|
if (scale == 1.0f) {
|
||||||
adapt_reason_ = 0;
|
adapt_reason_ = ADAPTREASON_NONE;
|
||||||
}
|
}
|
||||||
*new_width = new_output.width = static_cast<int>(input.width * scale + .5f);
|
*new_width = new_output.width = static_cast<int>(input.width * scale + .5f);
|
||||||
*new_height = new_output.height = static_cast<int>(input.height * scale +
|
*new_height = new_output.height = static_cast<int>(input.height * scale +
|
||||||
|
@ -111,6 +111,7 @@ class CoordinatedVideoAdapter
|
|||||||
public:
|
public:
|
||||||
enum AdaptRequest { UPGRADE, KEEP, DOWNGRADE };
|
enum AdaptRequest { UPGRADE, KEEP, DOWNGRADE };
|
||||||
enum AdaptReasonEnum {
|
enum AdaptReasonEnum {
|
||||||
|
ADAPTREASON_NONE = 0,
|
||||||
ADAPTREASON_CPU = 1,
|
ADAPTREASON_CPU = 1,
|
||||||
ADAPTREASON_BANDWIDTH = 2,
|
ADAPTREASON_BANDWIDTH = 2,
|
||||||
ADAPTREASON_VIEW = 4
|
ADAPTREASON_VIEW = 4
|
||||||
|
@ -111,6 +111,7 @@ void VideoCapturer::Construct() {
|
|||||||
screencast_max_pixels_ = 0;
|
screencast_max_pixels_ = 0;
|
||||||
muted_ = false;
|
muted_ = false;
|
||||||
black_frame_count_down_ = kNumBlackFramesOnMute;
|
black_frame_count_down_ = kNumBlackFramesOnMute;
|
||||||
|
enable_video_adapter_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
|
const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
|
||||||
@ -477,9 +478,9 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
|||||||
}
|
}
|
||||||
|
|
||||||
VideoFrame* adapted_frame = &i420_frame;
|
VideoFrame* adapted_frame = &i420_frame;
|
||||||
if (!SignalAdaptFrame.is_empty() && !IsScreencast()) {
|
if (enable_video_adapter_ && !IsScreencast()) {
|
||||||
VideoFrame* out_frame = NULL;
|
VideoFrame* out_frame = NULL;
|
||||||
SignalAdaptFrame(this, adapted_frame, &out_frame);
|
video_adapter_.AdaptFrame(adapted_frame, &out_frame);
|
||||||
if (!out_frame) {
|
if (!out_frame) {
|
||||||
return; // VideoAdapter dropped the frame.
|
return; // VideoAdapter dropped the frame.
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "talk/base/scoped_ptr.h"
|
#include "talk/base/scoped_ptr.h"
|
||||||
#include "talk/base/sigslot.h"
|
#include "talk/base/sigslot.h"
|
||||||
#include "talk/base/thread.h"
|
#include "talk/base/thread.h"
|
||||||
|
#include "talk/media/base/videoadapter.h"
|
||||||
#include "talk/media/base/videocommon.h"
|
#include "talk/media/base/videocommon.h"
|
||||||
#include "talk/media/devices/devicemanager.h"
|
#include "talk/media/devices/devicemanager.h"
|
||||||
|
|
||||||
@ -97,12 +98,10 @@ struct CapturedFrame {
|
|||||||
// capturing. The subclasses implement the video capturer for various types of
|
// capturing. The subclasses implement the video capturer for various types of
|
||||||
// capturers and various platforms.
|
// capturers and various platforms.
|
||||||
//
|
//
|
||||||
// The captured frames may need to be adapted (for example, cropping). Adaptors
|
// The captured frames may need to be adapted (for example, cropping).
|
||||||
// can be registered to the capturer or applied externally to the capturer.
|
// Video adaptation is built into and enabled by default. After a frame has
|
||||||
// If the adaptor is needed, it acts as the downstream of VideoCapturer, adapts
|
// been captured from the device, it is sent to the video adapter, then video
|
||||||
// the captured frames, and delivers the adapted frames to other components
|
// processors, then out to the encoder.
|
||||||
// such as the encoder. Effects can also be registered to the capturer or
|
|
||||||
// applied externally.
|
|
||||||
//
|
//
|
||||||
// Programming model:
|
// Programming model:
|
||||||
// Create an object of a subclass of VideoCapturer
|
// Create an object of a subclass of VideoCapturer
|
||||||
@ -111,6 +110,7 @@ struct CapturedFrame {
|
|||||||
// SignalFrameCaptured.connect()
|
// SignalFrameCaptured.connect()
|
||||||
// Find the capture format for Start() by either calling GetSupportedFormats()
|
// Find the capture format for Start() by either calling GetSupportedFormats()
|
||||||
// and selecting one of the supported or calling GetBestCaptureFormat().
|
// and selecting one of the supported or calling GetBestCaptureFormat().
|
||||||
|
// video_adapter()->OnOutputFormatRequest(desired_encoding_format)
|
||||||
// Start()
|
// Start()
|
||||||
// GetCaptureFormat() optionally
|
// GetCaptureFormat() optionally
|
||||||
// Stop()
|
// Stop()
|
||||||
@ -255,12 +255,6 @@ class VideoCapturer
|
|||||||
// Signal the captured frame to downstream.
|
// Signal the captured frame to downstream.
|
||||||
sigslot::signal2<VideoCapturer*, const CapturedFrame*,
|
sigslot::signal2<VideoCapturer*, const CapturedFrame*,
|
||||||
sigslot::multi_threaded_local> SignalFrameCaptured;
|
sigslot::multi_threaded_local> SignalFrameCaptured;
|
||||||
// A VideoAdapter should be hooked up to SignalAdaptFrame which will be
|
|
||||||
// called before forwarding the frame to SignalVideoFrame. The parameters
|
|
||||||
// are this capturer instance, the input video frame and output frame
|
|
||||||
// pointer, respectively.
|
|
||||||
sigslot::signal3<VideoCapturer*, const VideoFrame*, VideoFrame**,
|
|
||||||
sigslot::multi_threaded_local> SignalAdaptFrame;
|
|
||||||
// Signal the captured and possibly adapted frame to downstream consumers
|
// Signal the captured and possibly adapted frame to downstream consumers
|
||||||
// such as the encoder.
|
// such as the encoder.
|
||||||
sigslot::signal2<VideoCapturer*, const VideoFrame*,
|
sigslot::signal2<VideoCapturer*, const VideoFrame*,
|
||||||
@ -279,6 +273,19 @@ class VideoCapturer
|
|||||||
screencast_max_pixels_ = talk_base::_max(0, p);
|
screencast_max_pixels_ = talk_base::_max(0, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If true, run video adaptation. By default, video adaptation is enabled
|
||||||
|
// and users must call video_adapter()->OnOutputFormatRequest()
|
||||||
|
// to receive frames.
|
||||||
|
bool enable_video_adapter() const { return enable_video_adapter_; }
|
||||||
|
void set_enable_video_adapter(bool enable_video_adapter) {
|
||||||
|
enable_video_adapter_ = enable_video_adapter;
|
||||||
|
}
|
||||||
|
|
||||||
|
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
|
||||||
|
const CoordinatedVideoAdapter* video_adapter() const {
|
||||||
|
return &video_adapter_;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
|
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
|
||||||
void OnFrameCaptured(VideoCapturer* video_capturer,
|
void OnFrameCaptured(VideoCapturer* video_capturer,
|
||||||
@ -299,6 +306,12 @@ class VideoCapturer
|
|||||||
|
|
||||||
void SetCaptureFormat(const VideoFormat* format) {
|
void SetCaptureFormat(const VideoFormat* format) {
|
||||||
capture_format_.reset(format ? new VideoFormat(*format) : NULL);
|
capture_format_.reset(format ? new VideoFormat(*format) : NULL);
|
||||||
|
if (capture_format_) {
|
||||||
|
ASSERT(capture_format_->interval > 0 &&
|
||||||
|
"Capture format expected to have positive interval.");
|
||||||
|
// Video adapter really only cares about capture format interval.
|
||||||
|
video_adapter_.SetInputFormat(*capture_format_);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetSupportedFormats(const std::vector<VideoFormat>& formats);
|
void SetSupportedFormats(const std::vector<VideoFormat>& formats);
|
||||||
@ -343,6 +356,9 @@ class VideoCapturer
|
|||||||
bool muted_;
|
bool muted_;
|
||||||
int black_frame_count_down_;
|
int black_frame_count_down_;
|
||||||
|
|
||||||
|
bool enable_video_adapter_;
|
||||||
|
CoordinatedVideoAdapter video_adapter_;
|
||||||
|
|
||||||
talk_base::CriticalSection crit_;
|
talk_base::CriticalSection crit_;
|
||||||
VideoProcessors video_processors_;
|
VideoProcessors video_processors_;
|
||||||
|
|
||||||
|
@ -94,6 +94,7 @@ class VideoCapturerTest
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(VideoCapturerTest, CaptureState) {
|
TEST_F(VideoCapturerTest, CaptureState) {
|
||||||
|
EXPECT_TRUE(capturer_.enable_video_adapter());
|
||||||
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
|
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
|
||||||
640,
|
640,
|
||||||
480,
|
480,
|
||||||
|
@ -473,6 +473,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::FOURCC_I420);
|
cricket::FOURCC_I420);
|
||||||
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
|
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
||||||
|
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, format));
|
||||||
}
|
}
|
||||||
void SetUpSecondStream() {
|
void SetUpSecondStream() {
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
@ -494,6 +495,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
|
||||||
// Make the second renderer available for use by a new stream.
|
// Make the second renderer available for use by a new stream.
|
||||||
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
||||||
|
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc + 2, format));
|
||||||
}
|
}
|
||||||
virtual void TearDown() {
|
virtual void TearDown() {
|
||||||
channel_.reset();
|
channel_.reset();
|
||||||
@ -524,7 +526,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
if (video_capturer_) {
|
if (video_capturer_) {
|
||||||
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format));
|
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (video_capturer_2_) {
|
if (video_capturer_2_) {
|
||||||
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(capture_format));
|
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(capture_format));
|
||||||
}
|
}
|
||||||
@ -540,6 +541,12 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
bool SetSend(bool send) {
|
bool SetSend(bool send) {
|
||||||
return channel_->SetSend(send);
|
return channel_->SetSend(send);
|
||||||
}
|
}
|
||||||
|
bool SetSendStreamFormat(uint32 ssrc, const cricket::VideoCodec& codec) {
|
||||||
|
return channel_->SetSendStreamFormat(ssrc, cricket::VideoFormat(
|
||||||
|
codec.width, codec.height,
|
||||||
|
cricket::VideoFormat::FpsToInterval(codec.framerate),
|
||||||
|
cricket::FOURCC_ANY));
|
||||||
|
}
|
||||||
int DrainOutgoingPackets() {
|
int DrainOutgoingPackets() {
|
||||||
int packets = 0;
|
int packets = 0;
|
||||||
do {
|
do {
|
||||||
@ -711,6 +718,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
EXPECT_FALSE(channel_->sending());
|
EXPECT_FALSE(channel_->sending());
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->sending());
|
EXPECT_TRUE(channel_->sending());
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@ -747,6 +755,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Tests that we can send and receive frames.
|
// Tests that we can send and receive frames.
|
||||||
void SendAndReceive(const cricket::VideoCodec& codec) {
|
void SendAndReceive(const cricket::VideoCodec& codec) {
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -759,6 +768,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
void SendManyResizeOnce() {
|
void SendManyResizeOnce() {
|
||||||
cricket::VideoCodec codec(DefaultCodec());
|
cricket::VideoCodec codec(DefaultCodec());
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -773,6 +783,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.width /= 2;
|
codec.width /= 2;
|
||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
|
||||||
EXPECT_EQ(2, renderer_.num_set_sizes());
|
EXPECT_EQ(2, renderer_.num_set_sizes());
|
||||||
@ -882,6 +893,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
cricket::StreamParams::CreateLegacy(1234)));
|
cricket::StreamParams::CreateLegacy(1234)));
|
||||||
channel_->UpdateAspectRatio(640, 400);
|
channel_->UpdateAspectRatio(640, 400);
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@ -902,6 +914,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->AddSendStream(
|
EXPECT_TRUE(channel_->AddSendStream(
|
||||||
cricket::StreamParams::CreateLegacy(5678)));
|
cricket::StreamParams::CreateLegacy(5678)));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
|
||||||
|
EXPECT_TRUE(channel_->SetSendStreamFormat(5678, format));
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
cricket::StreamParams::CreateLegacy(5678)));
|
cricket::StreamParams::CreateLegacy(5678)));
|
||||||
EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
|
EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
|
||||||
@ -937,6 +950,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Test that we can set the SSRC for the default send source.
|
// Test that we can set the SSRC for the default send source.
|
||||||
void SetSendSsrc() {
|
void SetSendSsrc() {
|
||||||
EXPECT_TRUE(SetDefaultCodec());
|
EXPECT_TRUE(SetDefaultCodec());
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
|
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
|
||||||
@ -958,6 +972,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->AddSendStream(
|
EXPECT_TRUE(channel_->AddSendStream(
|
||||||
cricket::StreamParams::CreateLegacy(999)));
|
cricket::StreamParams::CreateLegacy(999)));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(999u, video_capturer_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(999u, video_capturer_.get()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(999u, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(WaitAndSendFrame(0));
|
EXPECT_TRUE(WaitAndSendFrame(0));
|
||||||
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
|
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
|
||||||
@ -982,6 +997,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
talk_base::SetBE32(packet1.data() + 8, kSsrc);
|
talk_base::SetBE32(packet1.data() + 8, kSsrc);
|
||||||
channel_->SetRenderer(0, NULL);
|
channel_->SetRenderer(0, NULL);
|
||||||
EXPECT_TRUE(SetDefaultCodec());
|
EXPECT_TRUE(SetDefaultCodec());
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -1005,6 +1021,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Tests setting up and configuring a send stream.
|
// Tests setting up and configuring a send stream.
|
||||||
void AddRemoveSendStreams() {
|
void AddRemoveSendStreams() {
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@ -1151,6 +1168,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
void AddRemoveRecvStreamAndRender() {
|
void AddRemoveRecvStreamAndRender() {
|
||||||
cricket::FakeVideoRenderer renderer1;
|
cricket::FakeVideoRenderer renderer1;
|
||||||
EXPECT_TRUE(SetDefaultCodec());
|
EXPECT_TRUE(SetDefaultCodec());
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
@ -1195,6 +1213,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::VideoOptions vmo;
|
cricket::VideoOptions vmo;
|
||||||
vmo.conference_mode.Set(true);
|
vmo.conference_mode.Set(true);
|
||||||
EXPECT_TRUE(channel_->SetOptions(vmo));
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
@ -1232,6 +1251,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height = 240;
|
codec.height = 240;
|
||||||
const int time_between_send = TimeBetweenSend(codec);
|
const int time_between_send = TimeBetweenSend(codec);
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -1253,6 +1273,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
int captured_frames = 1;
|
int captured_frames = 1;
|
||||||
for (int iterations = 0; iterations < 2; ++iterations) {
|
for (int iterations = 0; iterations < 2; ++iterations) {
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
talk_base::Thread::Current()->ProcessMessages(time_between_send);
|
talk_base::Thread::Current()->ProcessMessages(time_between_send);
|
||||||
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
|
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
|
||||||
cricket::FOURCC_I420));
|
cricket::FOURCC_I420));
|
||||||
@ -1292,6 +1313,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// added, the plugin shouldn't crash (and no black frame should be sent).
|
// added, the plugin shouldn't crash (and no black frame should be sent).
|
||||||
void RemoveCapturerWithoutAdd() {
|
void RemoveCapturerWithoutAdd() {
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -1353,6 +1375,8 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// TODO(hellner): this seems like an unnecessary constraint, fix it.
|
// TODO(hellner): this seems like an unnecessary constraint, fix it.
|
||||||
EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
|
EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
|
EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(1, DefaultCodec()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(2, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
// Test capturer associated with engine.
|
// Test capturer associated with engine.
|
||||||
@ -1385,6 +1409,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
|
|
||||||
cricket::VideoCodec codec(DefaultCodec());
|
cricket::VideoCodec codec(DefaultCodec());
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
|
|
||||||
cricket::FakeVideoRenderer renderer;
|
cricket::FakeVideoRenderer renderer;
|
||||||
@ -1410,6 +1435,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Capture frame to not get same frame timestamps as previous capturer.
|
// Capture frame to not get same frame timestamps as previous capturer.
|
||||||
capturer->CaptureFrame();
|
capturer->CaptureFrame();
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
||||||
|
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, capture_format));
|
||||||
EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
|
EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
|
||||||
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
|
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
|
||||||
cricket::FOURCC_ARGB));
|
cricket::FOURCC_ARGB));
|
||||||
@ -1429,6 +1455,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
// Adapt the resolution.
|
// Adapt the resolution.
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
||||||
}
|
}
|
||||||
@ -1442,6 +1469,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
// Adapt the resolution.
|
// Adapt the resolution.
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
||||||
}
|
}
|
||||||
@ -1543,6 +1571,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// frames being dropped.
|
// frames being dropped.
|
||||||
void SetSendStreamFormat0x0() {
|
void SetSendStreamFormat0x0() {
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@ -1575,6 +1604,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::VideoFormat::FpsToInterval(30),
|
cricket::VideoFormat::FpsToInterval(30),
|
||||||
cricket::FOURCC_I420));
|
cricket::FOURCC_I420));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
|
||||||
|
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(frame_count, renderer_.num_rendered_frames());
|
EXPECT_EQ(frame_count, renderer_.num_rendered_frames());
|
||||||
|
@ -562,6 +562,8 @@ class WebRtcOveruseObserver : public webrtc::CpuOveruseObserver {
|
|||||||
enabled_ = enable;
|
enabled_ = enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool enabled() const { return enabled_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
CoordinatedVideoAdapter* video_adapter_;
|
CoordinatedVideoAdapter* video_adapter_;
|
||||||
bool enabled_;
|
bool enabled_;
|
||||||
@ -584,13 +586,8 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
external_capture_(external_capture),
|
external_capture_(external_capture),
|
||||||
capturer_updated_(false),
|
capturer_updated_(false),
|
||||||
interval_(0),
|
interval_(0),
|
||||||
cpu_monitor_(cpu_monitor) {
|
cpu_monitor_(cpu_monitor),
|
||||||
overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_));
|
overuse_observer_enabled_(false) {
|
||||||
SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable);
|
|
||||||
if (cpu_monitor) {
|
|
||||||
cpu_monitor->SignalUpdate.connect(
|
|
||||||
&video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int channel_id() const { return channel_id_; }
|
int channel_id() const { return channel_id_; }
|
||||||
@ -614,7 +611,10 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
if (video_format_ != cricket::VideoFormat()) {
|
if (video_format_ != cricket::VideoFormat()) {
|
||||||
interval_ = video_format_.interval;
|
interval_ = video_format_.interval;
|
||||||
}
|
}
|
||||||
video_adapter_.OnOutputFormatRequest(video_format_);
|
CoordinatedVideoAdapter* adapter = video_adapter();
|
||||||
|
if (adapter) {
|
||||||
|
adapter->OnOutputFormatRequest(video_format_);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
void set_interval(int64 interval) {
|
void set_interval(int64 interval) {
|
||||||
if (video_format() == cricket::VideoFormat()) {
|
if (video_format() == cricket::VideoFormat()) {
|
||||||
@ -623,17 +623,12 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
}
|
}
|
||||||
int64 interval() { return interval_; }
|
int64 interval() { return interval_; }
|
||||||
|
|
||||||
void InitializeAdapterOutputFormat(const webrtc::VideoCodec& codec) {
|
|
||||||
VideoFormat format(codec.width, codec.height,
|
|
||||||
VideoFormat::FpsToInterval(codec.maxFramerate),
|
|
||||||
FOURCC_I420);
|
|
||||||
if (video_adapter_.output_format().IsSize0x0()) {
|
|
||||||
video_adapter_.SetOutputFormat(format);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int CurrentAdaptReason() const {
|
int CurrentAdaptReason() const {
|
||||||
return video_adapter_.adapt_reason();
|
const CoordinatedVideoAdapter* adapter = video_adapter();
|
||||||
|
if (!adapter) {
|
||||||
|
return CoordinatedVideoAdapter::ADAPTREASON_NONE;
|
||||||
|
}
|
||||||
|
return video_adapter()->adapt_reason();
|
||||||
}
|
}
|
||||||
webrtc::CpuOveruseObserver* overuse_observer() {
|
webrtc::CpuOveruseObserver* overuse_observer() {
|
||||||
return overuse_observer_.get();
|
return overuse_observer_.get();
|
||||||
@ -658,69 +653,113 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
if (video_capturer == video_capturer_) {
|
if (video_capturer == video_capturer_) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
capturer_updated_ = true;
|
|
||||||
|
|
||||||
// Disconnect from the previous video capturer.
|
CoordinatedVideoAdapter* old_video_adapter = video_adapter();
|
||||||
if (video_capturer_) {
|
if (old_video_adapter) {
|
||||||
video_capturer_->SignalAdaptFrame.disconnect(this);
|
// Disconnect signals from old video adapter.
|
||||||
}
|
SignalCpuAdaptationUnable.disconnect(old_video_adapter);
|
||||||
|
if (cpu_monitor_) {
|
||||||
video_capturer_ = video_capturer;
|
cpu_monitor_->SignalUpdate.disconnect(old_video_adapter);
|
||||||
if (video_capturer && !video_capturer->IsScreencast()) {
|
|
||||||
const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
|
|
||||||
if (capture_format) {
|
|
||||||
// TODO(thorcarpenter): This is broken. Video capturer doesn't have
|
|
||||||
// a capture format until the capturer is started. So, if
|
|
||||||
// the capturer is started immediately after calling set_video_capturer
|
|
||||||
// video adapter may not have the input format set, the interval may
|
|
||||||
// be zero, and all frames may be dropped.
|
|
||||||
// Consider fixing this by having video_adapter keep a pointer to the
|
|
||||||
// video capturer.
|
|
||||||
video_adapter_.SetInputFormat(*capture_format);
|
|
||||||
}
|
}
|
||||||
// TODO(thorcarpenter): When the adapter supports "only frame dropping"
|
|
||||||
// mode, also hook it up to screencast capturers.
|
|
||||||
video_capturer->SignalAdaptFrame.connect(
|
|
||||||
this, &WebRtcVideoChannelSendInfo::AdaptFrame);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
capturer_updated_ = true;
|
||||||
|
video_capturer_ = video_capturer;
|
||||||
|
|
||||||
|
if (!video_capturer) {
|
||||||
|
overuse_observer_.reset();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CoordinatedVideoAdapter* adapter = video_adapter();
|
||||||
|
ASSERT(adapter && "Video adapter should not be null here.");
|
||||||
|
|
||||||
|
UpdateAdapterCpuOptions();
|
||||||
|
adapter->OnOutputFormatRequest(video_format_);
|
||||||
|
|
||||||
|
overuse_observer_.reset(new WebRtcOveruseObserver(adapter));
|
||||||
|
// (Dis)connect the video adapter from the cpu monitor as appropriate.
|
||||||
|
SetCpuOveruseDetection(overuse_observer_enabled_);
|
||||||
|
|
||||||
|
SignalCpuAdaptationUnable.repeat(adapter->SignalCpuAdaptationUnable);
|
||||||
}
|
}
|
||||||
|
|
||||||
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
|
CoordinatedVideoAdapter* video_adapter() {
|
||||||
|
if (!video_capturer_) {
|
||||||
void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input,
|
return NULL;
|
||||||
VideoFrame** adapted) {
|
}
|
||||||
video_adapter_.AdaptFrame(input, adapted);
|
return video_capturer_->video_adapter();
|
||||||
|
}
|
||||||
|
const CoordinatedVideoAdapter* video_adapter() const {
|
||||||
|
if (!video_capturer_) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return video_capturer_->video_adapter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ApplyCpuOptions(const VideoOptions& options) {
|
void ApplyCpuOptions(const VideoOptions& video_options) {
|
||||||
|
// Use video_options_.SetAll() instead of assignment so that unset value in
|
||||||
|
// video_options will not overwrite the previous option value.
|
||||||
|
video_options_.SetAll(video_options);
|
||||||
|
UpdateAdapterCpuOptions();
|
||||||
|
}
|
||||||
|
|
||||||
|
void UpdateAdapterCpuOptions() {
|
||||||
|
if (!video_capturer_) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
bool cpu_adapt, cpu_smoothing, adapt_third;
|
bool cpu_adapt, cpu_smoothing, adapt_third;
|
||||||
float low, med, high;
|
float low, med, high;
|
||||||
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
|
|
||||||
video_adapter_.set_cpu_adaptation(cpu_adapt);
|
// TODO(thorcarpenter): Have VideoAdapter be responsible for setting
|
||||||
|
// all these video options.
|
||||||
|
CoordinatedVideoAdapter* video_adapter = video_capturer_->video_adapter();
|
||||||
|
if (video_options_.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
|
||||||
|
video_adapter->set_cpu_adaptation(cpu_adapt);
|
||||||
}
|
}
|
||||||
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
|
if (video_options_.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
|
||||||
video_adapter_.set_cpu_smoothing(cpu_smoothing);
|
video_adapter->set_cpu_smoothing(cpu_smoothing);
|
||||||
}
|
}
|
||||||
if (options.process_adaptation_threshhold.Get(&med)) {
|
if (video_options_.process_adaptation_threshhold.Get(&med)) {
|
||||||
video_adapter_.set_process_threshold(med);
|
video_adapter->set_process_threshold(med);
|
||||||
}
|
}
|
||||||
if (options.system_low_adaptation_threshhold.Get(&low)) {
|
if (video_options_.system_low_adaptation_threshhold.Get(&low)) {
|
||||||
video_adapter_.set_low_system_threshold(low);
|
video_adapter->set_low_system_threshold(low);
|
||||||
}
|
}
|
||||||
if (options.system_high_adaptation_threshhold.Get(&high)) {
|
if (video_options_.system_high_adaptation_threshhold.Get(&high)) {
|
||||||
video_adapter_.set_high_system_threshold(high);
|
video_adapter->set_high_system_threshold(high);
|
||||||
}
|
}
|
||||||
if (options.video_adapt_third.Get(&adapt_third)) {
|
if (video_options_.video_adapt_third.Get(&adapt_third)) {
|
||||||
video_adapter_.set_scale_third(adapt_third);
|
video_adapter->set_scale_third(adapt_third);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetCpuOveruseDetection(bool enable) {
|
void SetCpuOveruseDetection(bool enable) {
|
||||||
if (cpu_monitor_ && enable) {
|
overuse_observer_enabled_ = enable;
|
||||||
cpu_monitor_->SignalUpdate.disconnect(&video_adapter_);
|
|
||||||
|
if (!overuse_observer_) {
|
||||||
|
// Cannot actually use the overuse detector until it is initialized
|
||||||
|
// with a video adapter.
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
overuse_observer_->Enable(enable);
|
overuse_observer_->Enable(enable);
|
||||||
video_adapter_.set_cpu_adaptation(enable);
|
|
||||||
|
// If overuse detection is enabled, it will signal the video adapter
|
||||||
|
// instead of the cpu monitor. If disabled, connect the adapter to the
|
||||||
|
// cpu monitor.
|
||||||
|
CoordinatedVideoAdapter* adapter = video_adapter();
|
||||||
|
if (adapter) {
|
||||||
|
adapter->set_cpu_adaptation(enable);
|
||||||
|
if (cpu_monitor_) {
|
||||||
|
if (enable) {
|
||||||
|
cpu_monitor_->SignalUpdate.disconnect(adapter);
|
||||||
|
} else {
|
||||||
|
cpu_monitor_->SignalUpdate.connect(
|
||||||
|
adapter, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProcessFrame(const VideoFrame& original_frame, bool mute,
|
void ProcessFrame(const VideoFrame& original_frame, bool mute,
|
||||||
@ -774,9 +813,11 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
|
|
||||||
int64 interval_;
|
int64 interval_;
|
||||||
|
|
||||||
CoordinatedVideoAdapter video_adapter_;
|
|
||||||
talk_base::CpuMonitor* cpu_monitor_;
|
talk_base::CpuMonitor* cpu_monitor_;
|
||||||
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
|
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
|
||||||
|
bool overuse_observer_enabled_;
|
||||||
|
|
||||||
|
VideoOptions video_options_;
|
||||||
};
|
};
|
||||||
|
|
||||||
const WebRtcVideoEngine::VideoCodecPref
|
const WebRtcVideoEngine::VideoCodecPref
|
||||||
@ -1677,12 +1718,6 @@ bool WebRtcVideoMediaChannel::SetSendCodecs(
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
|
||||||
iter != send_channels_.end(); ++iter) {
|
|
||||||
WebRtcVideoChannelSendInfo* send_channel = iter->second;
|
|
||||||
send_channel->InitializeAdapterOutputFormat(codec);
|
|
||||||
}
|
|
||||||
|
|
||||||
LogSendCodecChange("SetSendCodecs()");
|
LogSendCodecChange("SetSendCodecs()");
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -1698,10 +1733,6 @@ bool WebRtcVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
|
|||||||
|
|
||||||
bool WebRtcVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
|
bool WebRtcVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
|
||||||
const VideoFormat& format) {
|
const VideoFormat& format) {
|
||||||
if (!send_codec_) {
|
|
||||||
LOG(LS_ERROR) << "The send codec has not been set yet.";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
|
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
|
||||||
if (!send_channel) {
|
if (!send_channel) {
|
||||||
LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
|
LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
|
||||||
|
@ -1292,6 +1292,7 @@ TEST_F(WebRtcVideoEngineTestFake, MultipleSendStreamsWithOneCapturer) {
|
|||||||
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
|
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
|
||||||
// Register the capturer to the ssrc.
|
// Register the capturer to the ssrc.
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
|
||||||
|
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrcs2[i], capture_format_vga));
|
||||||
}
|
}
|
||||||
|
|
||||||
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
|
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
|
||||||
|
@ -1675,10 +1675,12 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
|
|||||||
|
|
||||||
// Starts the rendering by setting a sink to the renderer to get data
|
// Starts the rendering by setting a sink to the renderer to get data
|
||||||
// callback.
|
// callback.
|
||||||
|
// This method is called on the libjingle worker thread.
|
||||||
// TODO(xians): Make sure Start() is called only once.
|
// TODO(xians): Make sure Start() is called only once.
|
||||||
void Start(AudioRenderer* renderer) {
|
void Start(AudioRenderer* renderer) {
|
||||||
|
talk_base::CritScope lock(&lock_);
|
||||||
ASSERT(renderer != NULL);
|
ASSERT(renderer != NULL);
|
||||||
if (renderer_) {
|
if (renderer_ != NULL) {
|
||||||
ASSERT(renderer_ == renderer);
|
ASSERT(renderer_ == renderer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1692,8 +1694,10 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
|
|||||||
|
|
||||||
// Stops rendering by setting the sink of the renderer to NULL. No data
|
// Stops rendering by setting the sink of the renderer to NULL. No data
|
||||||
// callback will be received after this method.
|
// callback will be received after this method.
|
||||||
|
// This method is called on the libjingle worker thread.
|
||||||
void Stop() {
|
void Stop() {
|
||||||
if (!renderer_)
|
talk_base::CritScope lock(&lock_);
|
||||||
|
if (renderer_ == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
renderer_->RemoveChannel(channel_);
|
renderer_->RemoveChannel(channel_);
|
||||||
@ -1702,13 +1706,29 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AudioRenderer::Sink implementation.
|
// AudioRenderer::Sink implementation.
|
||||||
|
// This method is called on the audio thread.
|
||||||
virtual void OnData(const void* audio_data,
|
virtual void OnData(const void* audio_data,
|
||||||
int bits_per_sample,
|
int bits_per_sample,
|
||||||
int sample_rate,
|
int sample_rate,
|
||||||
int number_of_channels,
|
int number_of_channels,
|
||||||
int number_of_frames) OVERRIDE {
|
int number_of_frames) OVERRIDE {
|
||||||
// TODO(xians): Make new interface in AudioTransport to pass the data to
|
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||||
// WebRtc VoE channel.
|
voe_audio_transport_->OnData(channel_,
|
||||||
|
audio_data,
|
||||||
|
bits_per_sample,
|
||||||
|
sample_rate,
|
||||||
|
number_of_channels,
|
||||||
|
number_of_frames);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback from the |renderer_| when it is going away. In case Start() has
|
||||||
|
// never been called, this callback won't be triggered.
|
||||||
|
virtual void OnClose() OVERRIDE {
|
||||||
|
talk_base::CritScope lock(&lock_);
|
||||||
|
// Set |renderer_| to NULL to make sure no more callback will get into
|
||||||
|
// the renderer.
|
||||||
|
renderer_ = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessor to the VoE channel ID.
|
// Accessor to the VoE channel ID.
|
||||||
@ -1722,6 +1742,9 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
|
|||||||
// PeerConnection will make sure invalidating the pointer before the object
|
// PeerConnection will make sure invalidating the pointer before the object
|
||||||
// goes away.
|
// goes away.
|
||||||
AudioRenderer* renderer_;
|
AudioRenderer* renderer_;
|
||||||
|
|
||||||
|
// Protects |renderer_| in Start(), Stop() and OnClose().
|
||||||
|
talk_base::CriticalSection lock_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// WebRtcVoiceMediaChannel
|
// WebRtcVoiceMediaChannel
|
||||||
|
186
talk/session/media/externalhmac.cc
Normal file
186
talk/session/media/externalhmac.cc
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
/*
|
||||||
|
* libjingle
|
||||||
|
* Copyright 2014 Google Inc.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the author may not be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
|
||||||
|
#ifdef SRTP_RELATIVE_PATH
|
||||||
|
#include "srtp.h" // NOLINT
|
||||||
|
#else
|
||||||
|
#include "third_party/libsrtp/include/srtp.h"
|
||||||
|
#endif // SRTP_RELATIVE_PATH
|
||||||
|
|
||||||
|
#include "talk/session/media/external_hmac.h"
|
||||||
|
|
||||||
|
#include "talk/base/logging.h"
|
||||||
|
|
||||||
|
// The debug module for authentiation
|
||||||
|
debug_module_t mod_external_hmac = {
|
||||||
|
0, // Debugging is off by default
|
||||||
|
(char*)"external-hmac-sha-1" // Printable name for module
|
||||||
|
};
|
||||||
|
|
||||||
|
extern auth_type_t external_hmac;
|
||||||
|
|
||||||
|
// Begin test case 0 */
|
||||||
|
uint8_t
|
||||||
|
external_hmac_test_case_0_key[20] = {
|
||||||
|
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
|
||||||
|
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
|
||||||
|
0x0b, 0x0b, 0x0b, 0x0b
|
||||||
|
};
|
||||||
|
|
||||||
|
uint8_t
|
||||||
|
external_hmac_test_case_0_data[8] = {
|
||||||
|
0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 // "Hi There"
|
||||||
|
};
|
||||||
|
|
||||||
|
uint8_t
|
||||||
|
external_hmac_fake_tag[10] = {
|
||||||
|
0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd
|
||||||
|
};
|
||||||
|
|
||||||
|
auth_test_case_t
|
||||||
|
external_hmac_test_case_0 = {
|
||||||
|
20, // Octets in key
|
||||||
|
external_hmac_test_case_0_key, // Key
|
||||||
|
8, // Octets in data
|
||||||
|
external_hmac_test_case_0_data, // Data
|
||||||
|
10, // Octets in tag
|
||||||
|
external_hmac_fake_tag, // Tag
|
||||||
|
NULL // Pointer to next testcase
|
||||||
|
};
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_alloc(auth_t** a, int key_len, int out_len) {
|
||||||
|
uint8_t* pointer;
|
||||||
|
|
||||||
|
// Check key length - note that we don't support keys larger
|
||||||
|
// than 20 bytes yet
|
||||||
|
if (key_len > 20)
|
||||||
|
return err_status_bad_param;
|
||||||
|
|
||||||
|
// Check output length - should be less than 20 bytes/
|
||||||
|
if (out_len > 20)
|
||||||
|
return err_status_bad_param;
|
||||||
|
|
||||||
|
// Allocate memory for auth and hmac_ctx_t structures.
|
||||||
|
pointer = reinterpret_cast<uint8_t*>(
|
||||||
|
crypto_alloc(sizeof(external_hmac_ctx_t) + sizeof(auth_t)));
|
||||||
|
if (pointer == NULL)
|
||||||
|
return err_status_alloc_fail;
|
||||||
|
|
||||||
|
// Set pointers
|
||||||
|
*a = (auth_t *)pointer;
|
||||||
|
(*a)->type = &external_hmac;
|
||||||
|
(*a)->state = pointer + sizeof(auth_t);
|
||||||
|
(*a)->out_len = out_len;
|
||||||
|
(*a)->key_len = key_len;
|
||||||
|
(*a)->prefix_len = 0;
|
||||||
|
|
||||||
|
// Increment global count of all hmac uses.
|
||||||
|
external_hmac.ref_count++;
|
||||||
|
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_dealloc(auth_t* a) {
|
||||||
|
// Zeroize entire state
|
||||||
|
octet_string_set_to_zero((uint8_t *)a,
|
||||||
|
sizeof(external_hmac_ctx_t) + sizeof(auth_t));
|
||||||
|
|
||||||
|
// Free memory
|
||||||
|
crypto_free(a);
|
||||||
|
|
||||||
|
// Decrement global count of all hmac uses.
|
||||||
|
external_hmac.ref_count--;
|
||||||
|
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_init(external_hmac_ctx_t* state,
|
||||||
|
const uint8_t* key, int key_len) {
|
||||||
|
if (key_len > HMAC_KEY_LENGTH)
|
||||||
|
return err_status_bad_param;
|
||||||
|
|
||||||
|
memset(state->key, 0, key_len);
|
||||||
|
memcpy(state->key, key, key_len);
|
||||||
|
state->key_length = key_len;
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_start(external_hmac_ctx_t* state) {
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_update(external_hmac_ctx_t* state, const uint8_t* message,
|
||||||
|
int msg_octets) {
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_compute(external_hmac_ctx_t* state, const void* message,
|
||||||
|
int msg_octets, int tag_len, uint8_t* result) {
|
||||||
|
memcpy(result, external_hmac_fake_tag, tag_len);
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
char external_hmac_description[] = "external hmac sha-1 authentication";
|
||||||
|
|
||||||
|
// auth_type_t external_hmac is the hmac metaobject
|
||||||
|
|
||||||
|
auth_type_t
|
||||||
|
external_hmac = {
|
||||||
|
(auth_alloc_func) external_hmac_alloc,
|
||||||
|
(auth_dealloc_func) external_hmac_dealloc,
|
||||||
|
(auth_init_func) external_hmac_init,
|
||||||
|
(auth_compute_func) external_hmac_compute,
|
||||||
|
(auth_update_func) external_hmac_update,
|
||||||
|
(auth_start_func) external_hmac_start,
|
||||||
|
(char *) external_hmac_description,
|
||||||
|
(int) 0, /* instance count */
|
||||||
|
(auth_test_case_t *) &external_hmac_test_case_0,
|
||||||
|
(debug_module_t *) &mod_external_hmac,
|
||||||
|
(auth_type_id_t) EXTERNAL_HMAC_SHA1
|
||||||
|
};
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_crypto_init() {
|
||||||
|
err_status_t status = crypto_kernel_replace_auth_type(
|
||||||
|
&external_hmac, EXTERNAL_HMAC_SHA1);
|
||||||
|
if (status) {
|
||||||
|
LOG(LS_ERROR) << "Error in replacing default auth module, error: "
|
||||||
|
<< status;
|
||||||
|
return err_status_fail;
|
||||||
|
}
|
||||||
|
return err_status_ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
|
91
talk/session/media/externalhmac.h
Normal file
91
talk/session/media/externalhmac.h
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
* libjingle
|
||||||
|
* Copyright 2014 Google Inc.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the author may not be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_
|
||||||
|
#define TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_
|
||||||
|
|
||||||
|
// External libsrtp HMAC auth module which implements methods defined in
|
||||||
|
// auth_type_t.
|
||||||
|
// The default auth module will be replaced only when the ENABLE_EXTERNAL_AUTH
|
||||||
|
// flag is enabled. This allows us to access to authentication keys,
|
||||||
|
// as the default auth implementation doesn't provide access and avoids
|
||||||
|
// hashing each packet twice.
|
||||||
|
|
||||||
|
// How will libsrtp select this module?
|
||||||
|
// Libsrtp defines authentication function types identified by an unsigned
|
||||||
|
// integer, e.g. HMAC_SHA1 is 3. Using authentication ids, the application
|
||||||
|
// can plug any desired authentication modules into libsrtp.
|
||||||
|
// libsrtp also provides a mechanism to select different auth functions for
|
||||||
|
// individual streams. This can be done by setting the right value in
|
||||||
|
// the auth_type of srtp_policy_t. The application must first register auth
|
||||||
|
// functions and the corresponding authentication id using
|
||||||
|
// crypto_kernel_replace_auth_type function.
|
||||||
|
#if defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
|
||||||
|
#ifdef SRTP_RELATIVE_PATH
|
||||||
|
#include "crypto_types.h" // NOLINT
|
||||||
|
#else
|
||||||
|
#include "third_party/libsrtp/crypto/include/crypto_types.h"
|
||||||
|
#endif // SRTP_RELATIVE_PATH
|
||||||
|
|
||||||
|
#define EXTERNAL_HMAC_SHA1 HMAC_SHA1 + 1
|
||||||
|
#define HMAC_KEY_LENGTH 20
|
||||||
|
|
||||||
|
// The HMAC context structure used to store authentication keys.
|
||||||
|
// The pointer to the key will be allocated in the external_hmac_init function.
|
||||||
|
// This pointer is owned by srtp_t in a template context.
|
||||||
|
typedef struct {
|
||||||
|
uint8_t key[HMAC_KEY_LENGTH];
|
||||||
|
int key_length;
|
||||||
|
} external_hmac_ctx_t;
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_alloc(auth_t** a, int key_len, int out_len);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_dealloc(auth_t* a);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_init(external_hmac_ctx_t* state,
|
||||||
|
const uint8_t* key, int key_len);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_start(external_hmac_ctx_t* state);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_update(external_hmac_ctx_t* state, const uint8_t* message,
|
||||||
|
int msg_octets);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_hmac_compute(external_hmac_ctx_t* state, const void* message,
|
||||||
|
int msg_octets, int tag_len, uint8_t* result);
|
||||||
|
|
||||||
|
err_status_t
|
||||||
|
external_crypto_init();
|
||||||
|
|
||||||
|
#endif // defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
#endif // TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_
|
@ -44,9 +44,16 @@
|
|||||||
#ifdef HAVE_SRTP
|
#ifdef HAVE_SRTP
|
||||||
#ifdef SRTP_RELATIVE_PATH
|
#ifdef SRTP_RELATIVE_PATH
|
||||||
#include "srtp.h" // NOLINT
|
#include "srtp.h" // NOLINT
|
||||||
|
extern "C" srtp_stream_t srtp_get_stream(srtp_t srtp, uint32_t ssrc);
|
||||||
|
#include "srtp_priv.h" // NOLINT
|
||||||
#else
|
#else
|
||||||
#include "third_party/libsrtp/include/srtp.h"
|
#include "third_party/libsrtp/include/srtp.h"
|
||||||
|
extern "C" srtp_stream_t srtp_get_stream(srtp_t srtp, uint32_t ssrc);
|
||||||
|
#include "third_party/libsrtp/include/srtp_priv.h"
|
||||||
#endif // SRTP_RELATIVE_PATH
|
#endif // SRTP_RELATIVE_PATH
|
||||||
|
#ifdef ENABLE_EXTERNAL_AUTH
|
||||||
|
#include "talk/session/media/external_hmac.h"
|
||||||
|
#endif // ENABLE_EXTERNAL_AUTH
|
||||||
#ifdef _DEBUG
|
#ifdef _DEBUG
|
||||||
extern "C" debug_module_t mod_srtp;
|
extern "C" debug_module_t mod_srtp;
|
||||||
extern "C" debug_module_t mod_auth;
|
extern "C" debug_module_t mod_auth;
|
||||||
@ -158,7 +165,6 @@ bool SrtpFilter::SetRtpParams(const std::string& send_cs,
|
|||||||
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
|
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
|
||||||
<< " send cipher_suite " << send_cs
|
<< " send cipher_suite " << send_cs
|
||||||
<< " recv cipher_suite " << recv_cs;
|
<< " recv cipher_suite " << recv_cs;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,6 +214,16 @@ bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
|
|||||||
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
|
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len,
|
||||||
|
int64* index) {
|
||||||
|
if (!IsActive()) {
|
||||||
|
LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return send_session_->ProtectRtp(p, in_len, max_len, out_len, index);
|
||||||
|
}
|
||||||
|
|
||||||
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
|
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
|
||||||
if (!IsActive()) {
|
if (!IsActive()) {
|
||||||
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
|
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
|
||||||
@ -240,6 +256,15 @@ bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SrtpFilter::GetRtpAuthParams(uint8** key, int* key_len, int* tag_len) {
|
||||||
|
if (!IsActive()) {
|
||||||
|
LOG(LS_WARNING) << "Failed to GetRtpAuthParams: SRTP not active";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return send_session_->GetRtpAuthParams(key, key_len, tag_len);
|
||||||
|
}
|
||||||
|
|
||||||
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
|
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
|
||||||
signal_silent_time_in_ms_ = signal_silent_time_in_ms;
|
signal_silent_time_in_ms_ = signal_silent_time_in_ms;
|
||||||
if (state_ == ST_ACTIVE) {
|
if (state_ == ST_ACTIVE) {
|
||||||
@ -496,6 +521,14 @@ bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len,
|
||||||
|
int64* index) {
|
||||||
|
if (!ProtectRtp(p, in_len, max_len, out_len)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return (index) ? GetSendStreamPacketIndex(p, in_len, index) : true;
|
||||||
|
}
|
||||||
|
|
||||||
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
|
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
|
||||||
if (!session_) {
|
if (!session_) {
|
||||||
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
|
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
|
||||||
@ -554,6 +587,42 @@ bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SrtpSession::GetRtpAuthParams(uint8** key, int* key_len,
|
||||||
|
int* tag_len) {
|
||||||
|
#if defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
external_hmac_ctx_t* external_hmac = NULL;
|
||||||
|
// stream_template will be the reference context for other streams.
|
||||||
|
// Let's use it for getting the keys.
|
||||||
|
srtp_stream_ctx_t* srtp_context = session_->stream_template;
|
||||||
|
if (srtp_context && srtp_context->rtp_auth) {
|
||||||
|
external_hmac = reinterpret_cast<external_hmac_ctx_t*>(
|
||||||
|
srtp_context->rtp_auth->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!external_hmac) {
|
||||||
|
LOG(LS_ERROR) << "Failed to get auth keys from libsrtp!.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
*key = external_hmac->key;
|
||||||
|
*key_len = external_hmac->key_length;
|
||||||
|
*tag_len = rtp_auth_tag_len_;
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SrtpSession::GetSendStreamPacketIndex(void* p, int in_len, int64* index) {
|
||||||
|
srtp_hdr_t* hdr = reinterpret_cast<srtp_hdr_t*>(p);
|
||||||
|
srtp_stream_ctx_t* stream = srtp_get_stream(session_, hdr->ssrc);
|
||||||
|
if (stream == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
*index = rdbx_get_packet_index(&stream->rtp_rdbx);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
|
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
|
||||||
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
|
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
|
||||||
}
|
}
|
||||||
@ -596,6 +665,13 @@ bool SrtpSession::SetKey(int type, const std::string& cs,
|
|||||||
// TODO(astor) parse window size from WSH session-param
|
// TODO(astor) parse window size from WSH session-param
|
||||||
policy.window_size = 1024;
|
policy.window_size = 1024;
|
||||||
policy.allow_repeat_tx = 1;
|
policy.allow_repeat_tx = 1;
|
||||||
|
// If external authentication option is enabled, supply custom auth module
|
||||||
|
// id EXTERNAL_HMAC_SHA1 in the policy structure.
|
||||||
|
// We want to set this option only for rtp packets.
|
||||||
|
// By default policy structure is initialized to HMAC_SHA1.
|
||||||
|
#if defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
policy.rtp.auth_type = EXTERNAL_HMAC_SHA1;
|
||||||
|
#endif
|
||||||
policy.next = NULL;
|
policy.next = NULL;
|
||||||
|
|
||||||
int err = srtp_create(&session_, &policy);
|
int err = srtp_create(&session_, &policy);
|
||||||
@ -604,6 +680,7 @@ bool SrtpSession::SetKey(int type, const std::string& cs,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
|
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
|
||||||
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
|
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
|
||||||
return true;
|
return true;
|
||||||
@ -623,7 +700,13 @@ bool SrtpSession::Init() {
|
|||||||
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
|
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
#if defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
err = external_crypto_init();
|
||||||
|
if (err != err_status_ok) {
|
||||||
|
LOG(LS_ERROR) << "Failed to initialize fake auth, err=" << err;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
inited_ = true;
|
inited_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,12 +122,18 @@ class SrtpFilter {
|
|||||||
// Encrypts/signs an individual RTP/RTCP packet, in-place.
|
// Encrypts/signs an individual RTP/RTCP packet, in-place.
|
||||||
// If an HMAC is used, this will increase the packet size.
|
// If an HMAC is used, this will increase the packet size.
|
||||||
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
|
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
|
||||||
|
// Overloaded version, outputs packet index.
|
||||||
|
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len,
|
||||||
|
int64* index);
|
||||||
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
|
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
|
||||||
// Decrypts/verifies an invidiual RTP/RTCP packet.
|
// Decrypts/verifies an invidiual RTP/RTCP packet.
|
||||||
// If an HMAC is used, this will decrease the packet size.
|
// If an HMAC is used, this will decrease the packet size.
|
||||||
bool UnprotectRtp(void* data, int in_len, int* out_len);
|
bool UnprotectRtp(void* data, int in_len, int* out_len);
|
||||||
bool UnprotectRtcp(void* data, int in_len, int* out_len);
|
bool UnprotectRtcp(void* data, int in_len, int* out_len);
|
||||||
|
|
||||||
|
// Returns rtp auth params from srtp context.
|
||||||
|
bool GetRtpAuthParams(uint8** key, int* key_len, int* tag_len);
|
||||||
|
|
||||||
// Update the silent threshold (in ms) for signaling errors.
|
// Update the silent threshold (in ms) for signaling errors.
|
||||||
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
|
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
|
||||||
|
|
||||||
@ -200,12 +206,18 @@ class SrtpSession {
|
|||||||
// Encrypts/signs an individual RTP/RTCP packet, in-place.
|
// Encrypts/signs an individual RTP/RTCP packet, in-place.
|
||||||
// If an HMAC is used, this will increase the packet size.
|
// If an HMAC is used, this will increase the packet size.
|
||||||
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
|
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
|
||||||
|
// Overloaded version, outputs packet index.
|
||||||
|
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len,
|
||||||
|
int64* index);
|
||||||
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
|
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
|
||||||
// Decrypts/verifies an invidiual RTP/RTCP packet.
|
// Decrypts/verifies an invidiual RTP/RTCP packet.
|
||||||
// If an HMAC is used, this will decrease the packet size.
|
// If an HMAC is used, this will decrease the packet size.
|
||||||
bool UnprotectRtp(void* data, int in_len, int* out_len);
|
bool UnprotectRtp(void* data, int in_len, int* out_len);
|
||||||
bool UnprotectRtcp(void* data, int in_len, int* out_len);
|
bool UnprotectRtcp(void* data, int in_len, int* out_len);
|
||||||
|
|
||||||
|
// Helper method to get authentication params.
|
||||||
|
bool GetRtpAuthParams(uint8** key, int* key_len, int* tag_len);
|
||||||
|
|
||||||
// Update the silent threshold (in ms) for signaling errors.
|
// Update the silent threshold (in ms) for signaling errors.
|
||||||
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
|
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
|
||||||
|
|
||||||
@ -217,9 +229,13 @@ class SrtpSession {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
bool SetKey(int type, const std::string& cs, const uint8* key, int len);
|
bool SetKey(int type, const std::string& cs, const uint8* key, int len);
|
||||||
|
// Returns send stream current packet index from srtp db.
|
||||||
|
bool GetSendStreamPacketIndex(void* data, int in_len, int64* index);
|
||||||
|
|
||||||
static bool Init();
|
static bool Init();
|
||||||
void HandleEvent(const srtp_event_data_t* ev);
|
void HandleEvent(const srtp_event_data_t* ev);
|
||||||
static void HandleEventThunk(srtp_event_data_t* ev);
|
static void HandleEventThunk(srtp_event_data_t* ev);
|
||||||
|
|
||||||
static std::list<SrtpSession*>* sessions();
|
static std::list<SrtpSession*>* sessions();
|
||||||
|
|
||||||
srtp_t session_;
|
srtp_t session_;
|
||||||
|
@ -522,6 +522,25 @@ TEST_F(SrtpFilterTest, TestSetParamsKeyTooShort) {
|
|||||||
kTestKey1, kTestKeyLen - 1));
|
kTestKey1, kTestKeyLen - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(ENABLE_EXTERNAL_AUTH)
|
||||||
|
TEST_F(SrtpFilterTest, TestGetSendAuthParams) {
|
||||||
|
EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
|
||||||
|
kTestKey1, kTestKeyLen,
|
||||||
|
CS_AES_CM_128_HMAC_SHA1_32,
|
||||||
|
kTestKey2, kTestKeyLen));
|
||||||
|
EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
|
||||||
|
kTestKey1, kTestKeyLen,
|
||||||
|
CS_AES_CM_128_HMAC_SHA1_32,
|
||||||
|
kTestKey2, kTestKeyLen));
|
||||||
|
uint8* auth_key = NULL;
|
||||||
|
int auth_key_len = 0, auth_tag_len = 0;
|
||||||
|
EXPECT_TRUE(f1_.GetRtpAuthParams(&auth_key, &auth_key_len, &auth_tag_len));
|
||||||
|
EXPECT_TRUE(auth_key != NULL);
|
||||||
|
EXPECT_EQ(20, auth_key_len);
|
||||||
|
EXPECT_EQ(4, auth_tag_len);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
class SrtpSessionTest : public testing::Test {
|
class SrtpSessionTest : public testing::Test {
|
||||||
protected:
|
protected:
|
||||||
virtual void SetUp() {
|
virtual void SetUp() {
|
||||||
@ -606,6 +625,15 @@ TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
|
|||||||
TestUnprotectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
|
TestUnprotectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(SrtpSessionTest, TestGetSendStreamPacketIndex) {
|
||||||
|
EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
|
||||||
|
int64 index;
|
||||||
|
int out_len = 0;
|
||||||
|
EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
|
||||||
|
sizeof(rtp_packet_), &out_len, &index));
|
||||||
|
EXPECT_EQ(1, index);
|
||||||
|
}
|
||||||
|
|
||||||
// Test that we fail to unprotect if someone tampers with the RTP/RTCP paylaods.
|
// Test that we fail to unprotect if someone tampers with the RTP/RTCP paylaods.
|
||||||
TEST_F(SrtpSessionTest, TestTamperReject) {
|
TEST_F(SrtpSessionTest, TestTamperReject) {
|
||||||
int out_len;
|
int out_len;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user