Revert 5590 "description"

> description

TBR=henrike@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/8949006

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5593 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
xians@webrtc.org 2014-02-21 10:31:29 +00:00
parent 0f2809a5ac
commit ef2215110c
20 changed files with 106 additions and 686 deletions

View File

@ -58,11 +58,7 @@ void TrackHandler::OnChanged() {
LocalAudioSinkAdapter::LocalAudioSinkAdapter() : sink_(NULL) {}
LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {
talk_base::CritScope lock(&lock_);
if (sink_)
sink_->OnClose();
}
LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {}
void LocalAudioSinkAdapter::OnData(const void* audio_data,
int bits_per_sample,

View File

@ -56,7 +56,6 @@ class FakePeriodicVideoCapturer : public cricket::FakeVideoCapturer {
virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
cricket::CaptureState state = FakeVideoCapturer::Start(format);
if (state != cricket::CS_FAILED) {
set_enable_video_adapter(false); // Simplify testing.
talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
}
return state;

View File

@ -249,11 +249,7 @@ class WebRtcSessionCreateSDPObserverForTest
class FakeAudioRenderer : public cricket::AudioRenderer {
public:
FakeAudioRenderer() : channel_id_(-1), sink_(NULL) {}
virtual ~FakeAudioRenderer() {
if (sink_)
sink_->OnClose();
}
FakeAudioRenderer() : channel_id_(-1) {}
virtual void AddChannel(int channel_id) OVERRIDE {
ASSERT(channel_id_ == -1);
@ -263,15 +259,10 @@ class FakeAudioRenderer : public cricket::AudioRenderer {
ASSERT(channel_id == channel_id_);
channel_id_ = -1;
}
virtual void SetSink(Sink* sink) OVERRIDE {
sink_ = sink;
}
int channel_id() const { return channel_id_; }
cricket::AudioRenderer::Sink* sink() const { return sink_; }
private:
int channel_id_;
cricket::AudioRenderer::Sink* sink_;
};
class WebRtcSessionTest : public testing::Test {
@ -2196,39 +2187,13 @@ TEST_F(WebRtcSessionTest, SetAudioSend) {
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
EXPECT_EQ(0, renderer->channel_id());
EXPECT_TRUE(renderer->sink() != NULL);
// This will trigger SetSink(NULL) to the |renderer|.
session_->SetAudioSend(send_ssrc, true, options, NULL);
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
bool value;
EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
EXPECT_TRUE(value);
EXPECT_EQ(-1, renderer->channel_id());
EXPECT_TRUE(renderer->sink() == NULL);
}
TEST_F(WebRtcSessionTest, AudioRendererForLocalStream) {
Init(NULL);
mediastream_signaling_.SendAudioVideoStream1();
CreateAndSetRemoteOfferAndLocalAnswer();
cricket::FakeVoiceMediaChannel* channel = media_engine_->GetVoiceChannel(0);
ASSERT_TRUE(channel != NULL);
ASSERT_EQ(1u, channel->send_streams().size());
uint32 send_ssrc = channel->send_streams()[0].first_ssrc();
talk_base::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
cricket::AudioOptions options;
session_->SetAudioSend(send_ssrc, true, options, renderer.get());
EXPECT_TRUE(renderer->sink() != NULL);
// Delete the |renderer| and it will trigger OnClose() to the sink, and this
// will invalidate the |renderer_| pointer in the sink and prevent getting a
// SetSink(NULL) callback afterwards.
renderer.reset();
// This will trigger SetSink(NULL) if no OnClose() callback.
session_->SetAudioSend(send_ssrc, true, options, NULL);
}
TEST_F(WebRtcSessionTest, SetVideoPlayout) {

View File

@ -49,6 +49,8 @@ struct PacketTimeUpdateParams {
std::vector<char> srtp_auth_key; // Authentication key.
int srtp_auth_tag_len; // Authentication tag length.
int64 srtp_packet_index; // Required for Rtp Packet authentication.
int payload_len; // Raw payload length, before any wrapping
// like TURN/GTURN.
};
// This structure holds meta information for the packet which is about to send

View File

@ -35,16 +35,11 @@ class AudioRenderer {
public:
class Sink {
public:
// Callback to receive data from the AudioRenderer.
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) = 0;
// Called when the AudioRenderer is going away.
virtual void OnClose() = 0;
protected:
virtual ~Sink() {}
};

View File

@ -316,18 +316,17 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
return true;
}
virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
std::map<uint32, VoiceChannelAudioSink*>::iterator it =
local_renderers_.find(ssrc);
std::map<uint32, AudioRenderer*>::iterator it = local_renderers_.find(ssrc);
if (renderer) {
if (it != local_renderers_.end()) {
ASSERT(it->second->renderer() == renderer);
ASSERT(it->second == renderer);
} else {
local_renderers_.insert(std::make_pair(
ssrc, new VoiceChannelAudioSink(renderer)));
local_renderers_.insert(std::make_pair(ssrc, renderer));
renderer->AddChannel(0);
}
} else {
if (it != local_renderers_.end()) {
delete it->second;
it->second->RemoveChannel(0);
local_renderers_.erase(it);
} else {
return false;
@ -420,34 +419,6 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
double left, right;
};
class VoiceChannelAudioSink : public AudioRenderer::Sink {
public:
explicit VoiceChannelAudioSink(AudioRenderer* renderer)
: renderer_(renderer) {
renderer_->AddChannel(0);
renderer_->SetSink(this);
}
virtual ~VoiceChannelAudioSink() {
if (renderer_) {
renderer_->RemoveChannel(0);
renderer_->SetSink(NULL);
}
}
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) OVERRIDE {}
virtual void OnClose() OVERRIDE {
renderer_ = NULL;
}
AudioRenderer* renderer() const { return renderer_; }
private:
AudioRenderer* renderer_;
};
FakeVoiceEngine* engine_;
std::vector<AudioCodec> recv_codecs_;
std::vector<AudioCodec> send_codecs_;
@ -459,7 +430,7 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
bool ringback_tone_loop_;
int time_since_last_typing_;
AudioOptions options_;
std::map<uint32, VoiceChannelAudioSink*> local_renderers_;
std::map<uint32, AudioRenderer*> local_renderers_;
std::map<uint32, AudioRenderer*> remote_renderers_;
};

View File

@ -421,7 +421,7 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter()
view_desired_interval_(0),
encoder_desired_num_pixels_(INT_MAX),
cpu_desired_num_pixels_(INT_MAX),
adapt_reason_(ADAPTREASON_NONE),
adapt_reason_(0),
system_load_average_(kCpuLoadInitialAverage) {
}
@ -636,7 +636,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
}
int old_num_pixels = GetOutputNumPixels();
int min_num_pixels = INT_MAX;
adapt_reason_ = ADAPTREASON_NONE;
adapt_reason_ = 0;
// Reduce resolution based on encoder bandwidth (GD).
if (encoder_desired_num_pixels_ &&
@ -677,7 +677,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
static_cast<int>(input.height * scale + .5f);
}
if (scale == 1.0f) {
adapt_reason_ = ADAPTREASON_NONE;
adapt_reason_ = 0;
}
*new_width = new_output.width = static_cast<int>(input.width * scale + .5f);
*new_height = new_output.height = static_cast<int>(input.height * scale +

View File

@ -111,7 +111,6 @@ class CoordinatedVideoAdapter
public:
enum AdaptRequest { UPGRADE, KEEP, DOWNGRADE };
enum AdaptReasonEnum {
ADAPTREASON_NONE = 0,
ADAPTREASON_CPU = 1,
ADAPTREASON_BANDWIDTH = 2,
ADAPTREASON_VIEW = 4

View File

@ -111,7 +111,6 @@ void VideoCapturer::Construct() {
screencast_max_pixels_ = 0;
muted_ = false;
black_frame_count_down_ = kNumBlackFramesOnMute;
enable_video_adapter_ = true;
}
const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
@ -478,9 +477,9 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
}
VideoFrame* adapted_frame = &i420_frame;
if (enable_video_adapter_ && !IsScreencast()) {
if (!SignalAdaptFrame.is_empty() && !IsScreencast()) {
VideoFrame* out_frame = NULL;
video_adapter_.AdaptFrame(adapted_frame, &out_frame);
SignalAdaptFrame(this, adapted_frame, &out_frame);
if (!out_frame) {
return; // VideoAdapter dropped the frame.
}

View File

@ -37,7 +37,6 @@
#include "talk/base/scoped_ptr.h"
#include "talk/base/sigslot.h"
#include "talk/base/thread.h"
#include "talk/media/base/videoadapter.h"
#include "talk/media/base/videocommon.h"
#include "talk/media/devices/devicemanager.h"
@ -98,10 +97,12 @@ struct CapturedFrame {
// capturing. The subclasses implement the video capturer for various types of
// capturers and various platforms.
//
// The captured frames may need to be adapted (for example, cropping).
// Video adaptation is built into and enabled by default. After a frame has
// been captured from the device, it is sent to the video adapter, then video
// processors, then out to the encoder.
// The captured frames may need to be adapted (for example, cropping). Adaptors
// can be registered to the capturer or applied externally to the capturer.
// If the adaptor is needed, it acts as the downstream of VideoCapturer, adapts
// the captured frames, and delivers the adapted frames to other components
// such as the encoder. Effects can also be registered to the capturer or
// applied externally.
//
// Programming model:
// Create an object of a subclass of VideoCapturer
@ -110,7 +111,6 @@ struct CapturedFrame {
// SignalFrameCaptured.connect()
// Find the capture format for Start() by either calling GetSupportedFormats()
// and selecting one of the supported or calling GetBestCaptureFormat().
// video_adapter()->OnOutputFormatRequest(desired_encoding_format)
// Start()
// GetCaptureFormat() optionally
// Stop()
@ -255,6 +255,12 @@ class VideoCapturer
// Signal the captured frame to downstream.
sigslot::signal2<VideoCapturer*, const CapturedFrame*,
sigslot::multi_threaded_local> SignalFrameCaptured;
// A VideoAdapter should be hooked up to SignalAdaptFrame which will be
// called before forwarding the frame to SignalVideoFrame. The parameters
// are this capturer instance, the input video frame and output frame
// pointer, respectively.
sigslot::signal3<VideoCapturer*, const VideoFrame*, VideoFrame**,
sigslot::multi_threaded_local> SignalAdaptFrame;
// Signal the captured and possibly adapted frame to downstream consumers
// such as the encoder.
sigslot::signal2<VideoCapturer*, const VideoFrame*,
@ -273,19 +279,6 @@ class VideoCapturer
screencast_max_pixels_ = talk_base::_max(0, p);
}
// If true, run video adaptation. By default, video adaptation is enabled
// and users must call video_adapter()->OnOutputFormatRequest()
// to receive frames.
bool enable_video_adapter() const { return enable_video_adapter_; }
void set_enable_video_adapter(bool enable_video_adapter) {
enable_video_adapter_ = enable_video_adapter;
}
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
const CoordinatedVideoAdapter* video_adapter() const {
return &video_adapter_;
}
protected:
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
void OnFrameCaptured(VideoCapturer* video_capturer,
@ -306,12 +299,6 @@ class VideoCapturer
void SetCaptureFormat(const VideoFormat* format) {
capture_format_.reset(format ? new VideoFormat(*format) : NULL);
if (capture_format_) {
ASSERT(capture_format_->interval > 0 &&
"Capture format expected to have positive interval.");
// Video adapter really only cares about capture format interval.
video_adapter_.SetInputFormat(*capture_format_);
}
}
void SetSupportedFormats(const std::vector<VideoFormat>& formats);
@ -356,9 +343,6 @@ class VideoCapturer
bool muted_;
int black_frame_count_down_;
bool enable_video_adapter_;
CoordinatedVideoAdapter video_adapter_;
talk_base::CriticalSection crit_;
VideoProcessors video_processors_;

View File

@ -94,7 +94,6 @@ class VideoCapturerTest
};
TEST_F(VideoCapturerTest, CaptureState) {
EXPECT_TRUE(capturer_.enable_video_adapter());
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
640,
480,

View File

@ -473,7 +473,6 @@ class VideoMediaChannelTest : public testing::Test,
cricket::FOURCC_I420);
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, format));
}
void SetUpSecondStream() {
EXPECT_TRUE(channel_->AddRecvStream(
@ -495,7 +494,6 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
// Make the second renderer available for use by a new stream.
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc + 2, format));
}
virtual void TearDown() {
channel_.reset();
@ -526,6 +524,7 @@ class VideoMediaChannelTest : public testing::Test,
if (video_capturer_) {
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format));
}
if (video_capturer_2_) {
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(capture_format));
}
@ -541,12 +540,6 @@ class VideoMediaChannelTest : public testing::Test,
bool SetSend(bool send) {
return channel_->SetSend(send);
}
bool SetSendStreamFormat(uint32 ssrc, const cricket::VideoCodec& codec) {
return channel_->SetSendStreamFormat(ssrc, cricket::VideoFormat(
codec.width, codec.height,
cricket::VideoFormat::FpsToInterval(codec.framerate),
cricket::FOURCC_ANY));
}
int DrainOutgoingPackets() {
int packets = 0;
do {
@ -718,7 +711,6 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
EXPECT_FALSE(channel_->sending());
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->sending());
EXPECT_TRUE(SendFrame());
@ -755,7 +747,6 @@ class VideoMediaChannelTest : public testing::Test,
// Tests that we can send and receive frames.
void SendAndReceive(const cricket::VideoCodec& codec) {
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -768,7 +759,6 @@ class VideoMediaChannelTest : public testing::Test,
void SendManyResizeOnce() {
cricket::VideoCodec codec(DefaultCodec());
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -783,7 +773,6 @@ class VideoMediaChannelTest : public testing::Test,
codec.width /= 2;
codec.height /= 2;
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(WaitAndSendFrame(30));
EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
EXPECT_EQ(2, renderer_.num_set_sizes());
@ -893,7 +882,6 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(1234)));
channel_->UpdateAspectRatio(640, 400);
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_TRUE(SendFrame());
@ -914,7 +902,6 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(5678)));
EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
EXPECT_TRUE(channel_->SetSendStreamFormat(5678, format));
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(5678)));
EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
@ -950,7 +937,6 @@ class VideoMediaChannelTest : public testing::Test,
// Test that we can set the SSRC for the default send source.
void SetSendSsrc() {
EXPECT_TRUE(SetDefaultCodec());
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(SendFrame());
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
@ -972,7 +958,6 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(999)));
EXPECT_TRUE(channel_->SetCapturer(999u, video_capturer_.get()));
EXPECT_TRUE(SetSendStreamFormat(999u, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(WaitAndSendFrame(0));
EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
@ -997,7 +982,6 @@ class VideoMediaChannelTest : public testing::Test,
talk_base::SetBE32(packet1.data() + 8, kSsrc);
channel_->SetRenderer(0, NULL);
EXPECT_TRUE(SetDefaultCodec());
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -1021,7 +1005,6 @@ class VideoMediaChannelTest : public testing::Test,
// Tests setting up and configuring a send stream.
void AddRemoveSendStreams() {
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_TRUE(SendFrame());
@ -1168,7 +1151,6 @@ class VideoMediaChannelTest : public testing::Test,
void AddRemoveRecvStreamAndRender() {
cricket::FakeVideoRenderer renderer1;
EXPECT_TRUE(SetDefaultCodec());
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_TRUE(channel_->AddRecvStream(
@ -1213,7 +1195,6 @@ class VideoMediaChannelTest : public testing::Test,
cricket::VideoOptions vmo;
vmo.conference_mode.Set(true);
EXPECT_TRUE(channel_->SetOptions(vmo));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_TRUE(channel_->AddRecvStream(
@ -1251,7 +1232,6 @@ class VideoMediaChannelTest : public testing::Test,
codec.height = 240;
const int time_between_send = TimeBetweenSend(codec);
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -1273,7 +1253,6 @@ class VideoMediaChannelTest : public testing::Test,
int captured_frames = 1;
for (int iterations = 0; iterations < 2; ++iterations) {
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
talk_base::Thread::Current()->ProcessMessages(time_between_send);
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
cricket::FOURCC_I420));
@ -1313,7 +1292,6 @@ class VideoMediaChannelTest : public testing::Test,
// added, the plugin shouldn't crash (and no black frame should be sent).
void RemoveCapturerWithoutAdd() {
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -1375,8 +1353,6 @@ class VideoMediaChannelTest : public testing::Test,
// TODO(hellner): this seems like an unnecessary constraint, fix it.
EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
EXPECT_TRUE(SetSendStreamFormat(1, DefaultCodec()));
EXPECT_TRUE(SetSendStreamFormat(2, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
// Test capturer associated with engine.
@ -1409,7 +1385,6 @@ class VideoMediaChannelTest : public testing::Test,
cricket::VideoCodec codec(DefaultCodec());
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
cricket::FakeVideoRenderer renderer;
@ -1435,7 +1410,6 @@ class VideoMediaChannelTest : public testing::Test,
// Capture frame to not get same frame timestamps as previous capturer.
capturer->CaptureFrame();
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, capture_format));
EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
cricket::FOURCC_ARGB));
@ -1455,7 +1429,6 @@ class VideoMediaChannelTest : public testing::Test,
codec.height /= 2;
// Adapt the resolution.
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(WaitAndSendFrame(30));
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
}
@ -1469,7 +1442,6 @@ class VideoMediaChannelTest : public testing::Test,
codec.height /= 2;
// Adapt the resolution.
EXPECT_TRUE(SetOneCodec(codec));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
EXPECT_TRUE(WaitAndSendFrame(30));
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
}
@ -1571,7 +1543,6 @@ class VideoMediaChannelTest : public testing::Test,
// frames being dropped.
void SetSendStreamFormat0x0() {
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(0, renderer_.num_rendered_frames());
@ -1604,7 +1575,6 @@ class VideoMediaChannelTest : public testing::Test,
cricket::VideoFormat::FpsToInterval(30),
cricket::FOURCC_I420));
EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->SetRender(true));
EXPECT_EQ(frame_count, renderer_.num_rendered_frames());

View File

@ -562,8 +562,6 @@ class WebRtcOveruseObserver : public webrtc::CpuOveruseObserver {
enabled_ = enable;
}
bool enabled() const { return enabled_; }
private:
CoordinatedVideoAdapter* video_adapter_;
bool enabled_;
@ -586,8 +584,13 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
external_capture_(external_capture),
capturer_updated_(false),
interval_(0),
cpu_monitor_(cpu_monitor),
overuse_observer_enabled_(false) {
cpu_monitor_(cpu_monitor) {
overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_));
SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable);
if (cpu_monitor) {
cpu_monitor->SignalUpdate.connect(
&video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
}
}
int channel_id() const { return channel_id_; }
@ -611,10 +614,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
if (video_format_ != cricket::VideoFormat()) {
interval_ = video_format_.interval;
}
CoordinatedVideoAdapter* adapter = video_adapter();
if (adapter) {
adapter->OnOutputFormatRequest(video_format_);
}
video_adapter_.OnOutputFormatRequest(video_format_);
}
void set_interval(int64 interval) {
if (video_format() == cricket::VideoFormat()) {
@ -623,12 +623,17 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
}
int64 interval() { return interval_; }
int CurrentAdaptReason() const {
const CoordinatedVideoAdapter* adapter = video_adapter();
if (!adapter) {
return CoordinatedVideoAdapter::ADAPTREASON_NONE;
void InitializeAdapterOutputFormat(const webrtc::VideoCodec& codec) {
VideoFormat format(codec.width, codec.height,
VideoFormat::FpsToInterval(codec.maxFramerate),
FOURCC_I420);
if (video_adapter_.output_format().IsSize0x0()) {
video_adapter_.SetOutputFormat(format);
}
return video_adapter()->adapt_reason();
}
int CurrentAdaptReason() const {
return video_adapter_.adapt_reason();
}
webrtc::CpuOveruseObserver* overuse_observer() {
return overuse_observer_.get();
@ -653,113 +658,69 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
if (video_capturer == video_capturer_) {
return;
}
CoordinatedVideoAdapter* old_video_adapter = video_adapter();
if (old_video_adapter) {
// Disconnect signals from old video adapter.
SignalCpuAdaptationUnable.disconnect(old_video_adapter);
if (cpu_monitor_) {
cpu_monitor_->SignalUpdate.disconnect(old_video_adapter);
}
}
capturer_updated_ = true;
// Disconnect from the previous video capturer.
if (video_capturer_) {
video_capturer_->SignalAdaptFrame.disconnect(this);
}
video_capturer_ = video_capturer;
if (!video_capturer) {
overuse_observer_.reset();
return;
if (video_capturer && !video_capturer->IsScreencast()) {
const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
if (capture_format) {
// TODO(thorcarpenter): This is broken. Video capturer doesn't have
// a capture format until the capturer is started. So, if
// the capturer is started immediately after calling set_video_capturer
// video adapter may not have the input format set, the interval may
// be zero, and all frames may be dropped.
// Consider fixing this by having video_adapter keep a pointer to the
// video capturer.
video_adapter_.SetInputFormat(*capture_format);
}
// TODO(thorcarpenter): When the adapter supports "only frame dropping"
// mode, also hook it up to screencast capturers.
video_capturer->SignalAdaptFrame.connect(
this, &WebRtcVideoChannelSendInfo::AdaptFrame);
}
CoordinatedVideoAdapter* adapter = video_adapter();
ASSERT(adapter && "Video adapter should not be null here.");
UpdateAdapterCpuOptions();
adapter->OnOutputFormatRequest(video_format_);
overuse_observer_.reset(new WebRtcOveruseObserver(adapter));
// (Dis)connect the video adapter from the cpu monitor as appropriate.
SetCpuOveruseDetection(overuse_observer_enabled_);
SignalCpuAdaptationUnable.repeat(adapter->SignalCpuAdaptationUnable);
}
CoordinatedVideoAdapter* video_adapter() {
if (!video_capturer_) {
return NULL;
}
return video_capturer_->video_adapter();
}
const CoordinatedVideoAdapter* video_adapter() const {
if (!video_capturer_) {
return NULL;
}
return video_capturer_->video_adapter();
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input,
VideoFrame** adapted) {
video_adapter_.AdaptFrame(input, adapted);
}
void ApplyCpuOptions(const VideoOptions& video_options) {
// Use video_options_.SetAll() instead of assignment so that unset value in
// video_options will not overwrite the previous option value.
video_options_.SetAll(video_options);
UpdateAdapterCpuOptions();
}
void UpdateAdapterCpuOptions() {
if (!video_capturer_) {
return;
}
void ApplyCpuOptions(const VideoOptions& options) {
bool cpu_adapt, cpu_smoothing, adapt_third;
float low, med, high;
// TODO(thorcarpenter): Have VideoAdapter be responsible for setting
// all these video options.
CoordinatedVideoAdapter* video_adapter = video_capturer_->video_adapter();
if (video_options_.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter->set_cpu_adaptation(cpu_adapt);
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter_.set_cpu_adaptation(cpu_adapt);
}
if (video_options_.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter->set_cpu_smoothing(cpu_smoothing);
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter_.set_cpu_smoothing(cpu_smoothing);
}
if (video_options_.process_adaptation_threshhold.Get(&med)) {
video_adapter->set_process_threshold(med);
if (options.process_adaptation_threshhold.Get(&med)) {
video_adapter_.set_process_threshold(med);
}
if (video_options_.system_low_adaptation_threshhold.Get(&low)) {
video_adapter->set_low_system_threshold(low);
if (options.system_low_adaptation_threshhold.Get(&low)) {
video_adapter_.set_low_system_threshold(low);
}
if (video_options_.system_high_adaptation_threshhold.Get(&high)) {
video_adapter->set_high_system_threshold(high);
if (options.system_high_adaptation_threshhold.Get(&high)) {
video_adapter_.set_high_system_threshold(high);
}
if (video_options_.video_adapt_third.Get(&adapt_third)) {
video_adapter->set_scale_third(adapt_third);
if (options.video_adapt_third.Get(&adapt_third)) {
video_adapter_.set_scale_third(adapt_third);
}
}
void SetCpuOveruseDetection(bool enable) {
overuse_observer_enabled_ = enable;
if (!overuse_observer_) {
// Cannot actually use the overuse detector until it is initialized
// with a video adapter.
return;
if (cpu_monitor_ && enable) {
cpu_monitor_->SignalUpdate.disconnect(&video_adapter_);
}
overuse_observer_->Enable(enable);
// If overuse detection is enabled, it will signal the video adapter
// instead of the cpu monitor. If disabled, connect the adapter to the
// cpu monitor.
CoordinatedVideoAdapter* adapter = video_adapter();
if (adapter) {
adapter->set_cpu_adaptation(enable);
if (cpu_monitor_) {
if (enable) {
cpu_monitor_->SignalUpdate.disconnect(adapter);
} else {
cpu_monitor_->SignalUpdate.connect(
adapter, &CoordinatedVideoAdapter::OnCpuLoadUpdated);
}
}
}
video_adapter_.set_cpu_adaptation(enable);
}
void ProcessFrame(const VideoFrame& original_frame, bool mute,
@ -813,11 +774,9 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
int64 interval_;
CoordinatedVideoAdapter video_adapter_;
talk_base::CpuMonitor* cpu_monitor_;
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
bool overuse_observer_enabled_;
VideoOptions video_options_;
};
const WebRtcVideoEngine::VideoCodecPref
@ -1718,6 +1677,12 @@ bool WebRtcVideoMediaChannel::SetSendCodecs(
return false;
}
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
send_channel->InitializeAdapterOutputFormat(codec);
}
LogSendCodecChange("SetSendCodecs()");
return true;
@ -1733,6 +1698,10 @@ bool WebRtcVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
bool WebRtcVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
const VideoFormat& format) {
if (!send_codec_) {
LOG(LS_ERROR) << "The send codec has not been set yet.";
return false;
}
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
if (!send_channel) {
LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";

View File

@ -1292,7 +1292,6 @@ TEST_F(WebRtcVideoEngineTestFake, MultipleSendStreamsWithOneCapturer) {
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
// Register the capturer to the ssrc.
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrcs2[i], capture_format_vga));
}
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);

View File

@ -1675,12 +1675,10 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
// Starts the rendering by setting a sink to the renderer to get data
// callback.
// This method is called on the libjingle worker thread.
// TODO(xians): Make sure Start() is called only once.
void Start(AudioRenderer* renderer) {
talk_base::CritScope lock(&lock_);
ASSERT(renderer != NULL);
if (renderer_ != NULL) {
if (renderer_) {
ASSERT(renderer_ == renderer);
return;
}
@ -1694,10 +1692,8 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
// Stops rendering by setting the sink of the renderer to NULL. No data
// callback will be received after this method.
// This method is called on the libjingle worker thread.
void Stop() {
talk_base::CritScope lock(&lock_);
if (renderer_ == NULL)
if (!renderer_)
return;
renderer_->RemoveChannel(channel_);
@ -1706,29 +1702,13 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
}
// AudioRenderer::Sink implementation.
// This method is called on the audio thread.
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
int number_of_channels,
int number_of_frames) OVERRIDE {
#ifdef USE_WEBRTC_DEV_BRANCH
voe_audio_transport_->OnData(channel_,
audio_data,
bits_per_sample,
sample_rate,
number_of_channels,
number_of_frames);
#endif
}
// Callback from the |renderer_| when it is going away. In case Start() has
// never been called, this callback won't be triggered.
virtual void OnClose() OVERRIDE {
talk_base::CritScope lock(&lock_);
// Set |renderer_| to NULL to make sure no more callback will get into
// the renderer.
renderer_ = NULL;
// TODO(xians): Make new interface in AudioTransport to pass the data to
// WebRtc VoE channel.
}
// Accessor to the VoE channel ID.
@ -1742,9 +1722,6 @@ class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
// PeerConnection will make sure invalidating the pointer before the object
// goes away.
AudioRenderer* renderer_;
// Protects |renderer_| in Start(), Stop() and OnClose().
talk_base::CriticalSection lock_;
};
// WebRtcVoiceMediaChannel

View File

@ -1,186 +0,0 @@
/*
* libjingle
* Copyright 2014 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
#ifdef SRTP_RELATIVE_PATH
#include "srtp.h" // NOLINT
#else
#include "third_party/libsrtp/include/srtp.h"
#endif // SRTP_RELATIVE_PATH
#include "talk/session/media/external_hmac.h"
#include "talk/base/logging.h"
// The debug module for authentiation
debug_module_t mod_external_hmac = {
0, // Debugging is off by default
(char*)"external-hmac-sha-1" // Printable name for module
};
extern auth_type_t external_hmac;
// Begin test case 0 */
uint8_t
external_hmac_test_case_0_key[20] = {
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b
};
uint8_t
external_hmac_test_case_0_data[8] = {
0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 // "Hi There"
};
uint8_t
external_hmac_fake_tag[10] = {
0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd, 0xba, 0xdd
};
auth_test_case_t
external_hmac_test_case_0 = {
20, // Octets in key
external_hmac_test_case_0_key, // Key
8, // Octets in data
external_hmac_test_case_0_data, // Data
10, // Octets in tag
external_hmac_fake_tag, // Tag
NULL // Pointer to next testcase
};
err_status_t
external_hmac_alloc(auth_t** a, int key_len, int out_len) {
uint8_t* pointer;
// Check key length - note that we don't support keys larger
// than 20 bytes yet
if (key_len > 20)
return err_status_bad_param;
// Check output length - should be less than 20 bytes/
if (out_len > 20)
return err_status_bad_param;
// Allocate memory for auth and hmac_ctx_t structures.
pointer = reinterpret_cast<uint8_t*>(
crypto_alloc(sizeof(external_hmac_ctx_t) + sizeof(auth_t)));
if (pointer == NULL)
return err_status_alloc_fail;
// Set pointers
*a = (auth_t *)pointer;
(*a)->type = &external_hmac;
(*a)->state = pointer + sizeof(auth_t);
(*a)->out_len = out_len;
(*a)->key_len = key_len;
(*a)->prefix_len = 0;
// Increment global count of all hmac uses.
external_hmac.ref_count++;
return err_status_ok;
}
err_status_t
external_hmac_dealloc(auth_t* a) {
// Zeroize entire state
octet_string_set_to_zero((uint8_t *)a,
sizeof(external_hmac_ctx_t) + sizeof(auth_t));
// Free memory
crypto_free(a);
// Decrement global count of all hmac uses.
external_hmac.ref_count--;
return err_status_ok;
}
err_status_t
external_hmac_init(external_hmac_ctx_t* state,
const uint8_t* key, int key_len) {
if (key_len > HMAC_KEY_LENGTH)
return err_status_bad_param;
memset(state->key, 0, key_len);
memcpy(state->key, key, key_len);
state->key_length = key_len;
return err_status_ok;
}
err_status_t
external_hmac_start(external_hmac_ctx_t* state) {
return err_status_ok;
}
err_status_t
external_hmac_update(external_hmac_ctx_t* state, const uint8_t* message,
int msg_octets) {
return err_status_ok;
}
err_status_t
external_hmac_compute(external_hmac_ctx_t* state, const void* message,
int msg_octets, int tag_len, uint8_t* result) {
memcpy(result, external_hmac_fake_tag, tag_len);
return err_status_ok;
}
char external_hmac_description[] = "external hmac sha-1 authentication";
// auth_type_t external_hmac is the hmac metaobject
auth_type_t
external_hmac = {
(auth_alloc_func) external_hmac_alloc,
(auth_dealloc_func) external_hmac_dealloc,
(auth_init_func) external_hmac_init,
(auth_compute_func) external_hmac_compute,
(auth_update_func) external_hmac_update,
(auth_start_func) external_hmac_start,
(char *) external_hmac_description,
(int) 0, /* instance count */
(auth_test_case_t *) &external_hmac_test_case_0,
(debug_module_t *) &mod_external_hmac,
(auth_type_id_t) EXTERNAL_HMAC_SHA1
};
err_status_t
external_crypto_init() {
err_status_t status = crypto_kernel_replace_auth_type(
&external_hmac, EXTERNAL_HMAC_SHA1);
if (status) {
LOG(LS_ERROR) << "Error in replacing default auth module, error: "
<< status;
return err_status_fail;
}
return err_status_ok;
}
#endif // defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)

View File

@ -1,91 +0,0 @@
/*
* libjingle
* Copyright 2014 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_
#define TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_
// External libsrtp HMAC auth module which implements methods defined in
// auth_type_t.
// The default auth module will be replaced only when the ENABLE_EXTERNAL_AUTH
// flag is enabled. This allows us to access to authentication keys,
// as the default auth implementation doesn't provide access and avoids
// hashing each packet twice.
// How will libsrtp select this module?
// Libsrtp defines authentication function types identified by an unsigned
// integer, e.g. HMAC_SHA1 is 3. Using authentication ids, the application
// can plug any desired authentication modules into libsrtp.
// libsrtp also provides a mechanism to select different auth functions for
// individual streams. This can be done by setting the right value in
// the auth_type of srtp_policy_t. The application must first register auth
// functions and the corresponding authentication id using
// crypto_kernel_replace_auth_type function.
#if defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
#ifdef SRTP_RELATIVE_PATH
#include "crypto_types.h" // NOLINT
#else
#include "third_party/libsrtp/crypto/include/crypto_types.h"
#endif // SRTP_RELATIVE_PATH
#define EXTERNAL_HMAC_SHA1 HMAC_SHA1 + 1
#define HMAC_KEY_LENGTH 20
// The HMAC context structure used to store authentication keys.
// The pointer to the key will be allocated in the external_hmac_init function.
// This pointer is owned by srtp_t in a template context.
typedef struct {
uint8_t key[HMAC_KEY_LENGTH];
int key_length;
} external_hmac_ctx_t;
err_status_t
external_hmac_alloc(auth_t** a, int key_len, int out_len);
err_status_t
external_hmac_dealloc(auth_t* a);
err_status_t
external_hmac_init(external_hmac_ctx_t* state,
const uint8_t* key, int key_len);
err_status_t
external_hmac_start(external_hmac_ctx_t* state);
err_status_t
external_hmac_update(external_hmac_ctx_t* state, const uint8_t* message,
int msg_octets);
err_status_t
external_hmac_compute(external_hmac_ctx_t* state, const void* message,
int msg_octets, int tag_len, uint8_t* result);
err_status_t
external_crypto_init();
#endif // defined(HAVE_SRTP) && defined(ENABLE_EXTERNAL_AUTH)
#endif // TALK_SESSION_MEDIA_EXTERNAL_HMAC_H_

View File

@ -44,16 +44,9 @@
#ifdef HAVE_SRTP
#ifdef SRTP_RELATIVE_PATH
#include "srtp.h" // NOLINT
extern "C" srtp_stream_t srtp_get_stream(srtp_t srtp, uint32_t ssrc);
#include "srtp_priv.h" // NOLINT
#else
#include "third_party/libsrtp/include/srtp.h"
extern "C" srtp_stream_t srtp_get_stream(srtp_t srtp, uint32_t ssrc);
#include "third_party/libsrtp/include/srtp_priv.h"
#endif // SRTP_RELATIVE_PATH
#ifdef ENABLE_EXTERNAL_AUTH
#include "talk/session/media/external_hmac.h"
#endif // ENABLE_EXTERNAL_AUTH
#ifdef _DEBUG
extern "C" debug_module_t mod_srtp;
extern "C" debug_module_t mod_auth;
@ -165,6 +158,7 @@ bool SrtpFilter::SetRtpParams(const std::string& send_cs,
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
<< " send cipher_suite " << send_cs
<< " recv cipher_suite " << recv_cs;
return true;
}
@ -214,16 +208,6 @@ bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
return send_session_->ProtectRtp(p, in_len, max_len, out_len);
}
bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len,
int64* index) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
return false;
}
return send_session_->ProtectRtp(p, in_len, max_len, out_len, index);
}
bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
@ -256,15 +240,6 @@ bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
}
}
bool SrtpFilter::GetRtpAuthParams(uint8** key, int* key_len, int* tag_len) {
if (!IsActive()) {
LOG(LS_WARNING) << "Failed to GetRtpAuthParams: SRTP not active";
return false;
}
return send_session_->GetRtpAuthParams(key, key_len, tag_len);
}
void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
signal_silent_time_in_ms_ = signal_silent_time_in_ms;
if (state_ == ST_ACTIVE) {
@ -521,14 +496,6 @@ bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
return true;
}
bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len,
int64* index) {
if (!ProtectRtp(p, in_len, max_len, out_len)) {
return false;
}
return (index) ? GetSendStreamPacketIndex(p, in_len, index) : true;
}
bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
if (!session_) {
LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
@ -587,42 +554,6 @@ bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
return true;
}
bool SrtpSession::GetRtpAuthParams(uint8** key, int* key_len,
int* tag_len) {
#if defined(ENABLE_EXTERNAL_AUTH)
external_hmac_ctx_t* external_hmac = NULL;
// stream_template will be the reference context for other streams.
// Let's use it for getting the keys.
srtp_stream_ctx_t* srtp_context = session_->stream_template;
if (srtp_context && srtp_context->rtp_auth) {
external_hmac = reinterpret_cast<external_hmac_ctx_t*>(
srtp_context->rtp_auth->state);
}
if (!external_hmac) {
LOG(LS_ERROR) << "Failed to get auth keys from libsrtp!.";
return false;
}
*key = external_hmac->key;
*key_len = external_hmac->key_length;
*tag_len = rtp_auth_tag_len_;
return true;
#else
return false;
#endif
}
bool SrtpSession::GetSendStreamPacketIndex(void* p, int in_len, int64* index) {
srtp_hdr_t* hdr = reinterpret_cast<srtp_hdr_t*>(p);
srtp_stream_ctx_t* stream = srtp_get_stream(session_, hdr->ssrc);
if (stream == NULL)
return false;
*index = rdbx_get_packet_index(&stream->rtp_rdbx);
return true;
}
void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
}
@ -665,13 +596,6 @@ bool SrtpSession::SetKey(int type, const std::string& cs,
// TODO(astor) parse window size from WSH session-param
policy.window_size = 1024;
policy.allow_repeat_tx = 1;
// If external authentication option is enabled, supply custom auth module
// id EXTERNAL_HMAC_SHA1 in the policy structure.
// We want to set this option only for rtp packets.
// By default policy structure is initialized to HMAC_SHA1.
#if defined(ENABLE_EXTERNAL_AUTH)
policy.rtp.auth_type = EXTERNAL_HMAC_SHA1;
#endif
policy.next = NULL;
int err = srtp_create(&session_, &policy);
@ -680,7 +604,6 @@ bool SrtpSession::SetKey(int type, const std::string& cs,
return false;
}
rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
return true;
@ -700,13 +623,7 @@ bool SrtpSession::Init() {
LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
return false;
}
#if defined(ENABLE_EXTERNAL_AUTH)
err = external_crypto_init();
if (err != err_status_ok) {
LOG(LS_ERROR) << "Failed to initialize fake auth, err=" << err;
return false;
}
#endif
inited_ = true;
}

View File

@ -122,18 +122,12 @@ class SrtpFilter {
// Encrypts/signs an individual RTP/RTCP packet, in-place.
// If an HMAC is used, this will increase the packet size.
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
// Overloaded version, outputs packet index.
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len,
int64* index);
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
// Decrypts/verifies an invidiual RTP/RTCP packet.
// If an HMAC is used, this will decrease the packet size.
bool UnprotectRtp(void* data, int in_len, int* out_len);
bool UnprotectRtcp(void* data, int in_len, int* out_len);
// Returns rtp auth params from srtp context.
bool GetRtpAuthParams(uint8** key, int* key_len, int* tag_len);
// Update the silent threshold (in ms) for signaling errors.
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
@ -206,18 +200,12 @@ class SrtpSession {
// Encrypts/signs an individual RTP/RTCP packet, in-place.
// If an HMAC is used, this will increase the packet size.
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
// Overloaded version, outputs packet index.
bool ProtectRtp(void* data, int in_len, int max_len, int* out_len,
int64* index);
bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
// Decrypts/verifies an invidiual RTP/RTCP packet.
// If an HMAC is used, this will decrease the packet size.
bool UnprotectRtp(void* data, int in_len, int* out_len);
bool UnprotectRtcp(void* data, int in_len, int* out_len);
// Helper method to get authentication params.
bool GetRtpAuthParams(uint8** key, int* key_len, int* tag_len);
// Update the silent threshold (in ms) for signaling errors.
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
@ -229,13 +217,9 @@ class SrtpSession {
private:
bool SetKey(int type, const std::string& cs, const uint8* key, int len);
// Returns send stream current packet index from srtp db.
bool GetSendStreamPacketIndex(void* data, int in_len, int64* index);
static bool Init();
void HandleEvent(const srtp_event_data_t* ev);
static void HandleEventThunk(srtp_event_data_t* ev);
static std::list<SrtpSession*>* sessions();
srtp_t session_;

View File

@ -522,25 +522,6 @@ TEST_F(SrtpFilterTest, TestSetParamsKeyTooShort) {
kTestKey1, kTestKeyLen - 1));
}
#if defined(ENABLE_EXTERNAL_AUTH)
TEST_F(SrtpFilterTest, TestGetSendAuthParams) {
EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
kTestKey1, kTestKeyLen,
CS_AES_CM_128_HMAC_SHA1_32,
kTestKey2, kTestKeyLen));
EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
kTestKey1, kTestKeyLen,
CS_AES_CM_128_HMAC_SHA1_32,
kTestKey2, kTestKeyLen));
uint8* auth_key = NULL;
int auth_key_len = 0, auth_tag_len = 0;
EXPECT_TRUE(f1_.GetRtpAuthParams(&auth_key, &auth_key_len, &auth_tag_len));
EXPECT_TRUE(auth_key != NULL);
EXPECT_EQ(20, auth_key_len);
EXPECT_EQ(4, auth_tag_len);
}
#endif
class SrtpSessionTest : public testing::Test {
protected:
virtual void SetUp() {
@ -625,15 +606,6 @@ TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
TestUnprotectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
}
TEST_F(SrtpSessionTest, TestGetSendStreamPacketIndex) {
EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
int64 index;
int out_len = 0;
EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
sizeof(rtp_packet_), &out_len, &index));
EXPECT_EQ(1, index);
}
// Test that we fail to unprotect if someone tampers with the RTP/RTCP paylaods.
TEST_F(SrtpSessionTest, TestTamperReject) {
int out_len;