* Update libjingle to 50389769.

* Together with "Add texture support for i420 video frame." from
wuchengli@chromium.org.
https://webrtc-codereview.appspot.com/1413004

RISK=P1
TESTED=try bots
R=fischman@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1967004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4489 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
wu@webrtc.org 2013-08-05 20:36:57 +00:00
parent f696f253b2
commit 9dba525627
54 changed files with 1797 additions and 518 deletions

View File

@ -235,17 +235,6 @@ bool CanAddLocalMediaStream(webrtc::StreamCollectionInterface* current_streams,
return false;
}
bool audio_track_exist = false;
for (size_t j = 0; j < current_streams->count(); ++j) {
if (!audio_track_exist) {
audio_track_exist = current_streams->at(j)->GetAudioTracks().size() > 0;
}
}
if (audio_track_exist && (new_stream->GetAudioTracks().size() > 0)) {
LOG(LS_ERROR) << "AddStream - Currently only one audio track is supported"
<< "per PeerConnection.";
return false;
}
return true;
}

View File

@ -551,20 +551,27 @@ TEST_F(PeerConnectionInterfaceTest, AddStreams) {
AddVoiceStream(kStreamLabel2);
ASSERT_EQ(2u, pc_->local_streams()->count());
// Fail to add another stream with audio since we already have an audio track.
// Test we can add multiple local streams to one peerconnection.
scoped_refptr<MediaStreamInterface> stream(
pc_factory_->CreateLocalMediaStream(kStreamLabel3));
scoped_refptr<AudioTrackInterface> audio_track(
pc_factory_->CreateAudioTrack(
kStreamLabel3, static_cast<AudioSourceInterface*>(NULL)));
stream->AddTrack(audio_track.get());
EXPECT_FALSE(pc_->AddStream(stream, NULL));
// Remove the stream with the audio track.
pc_->RemoveStream(pc_->local_streams()->at(1));
// Test that we now can add the stream with the audio track.
EXPECT_TRUE(pc_->AddStream(stream, NULL));
EXPECT_EQ(3u, pc_->local_streams()->count());
// Remove the third stream.
pc_->RemoveStream(pc_->local_streams()->at(2));
EXPECT_EQ(2u, pc_->local_streams()->count());
// Remove the second stream.
pc_->RemoveStream(pc_->local_streams()->at(1));
EXPECT_EQ(1u, pc_->local_streams()->count());
// Remove the first stream.
pc_->RemoveStream(pc_->local_streams()->at(0));
EXPECT_EQ(0u, pc_->local_streams()->count());
}
TEST_F(PeerConnectionInterfaceTest, RemoveStream) {

View File

@ -835,6 +835,8 @@
'media/webrtc/webrtcmediaengine.h',
'media/webrtc/webrtcpassthroughrender.cc',
'media/webrtc/webrtcpassthroughrender.h',
'media/webrtc/webrtctexturevideoframe.cc',
'media/webrtc/webrtctexturevideoframe.h',
'media/webrtc/webrtcvideocapturer.cc',
'media/webrtc/webrtcvideocapturer.h',
'media/webrtc/webrtcvideodecoderfactory.h',

View File

@ -402,7 +402,7 @@
}, # target libjingle_peerconnection_unittest
],
'conditions': [
['OS=="linux" or OS=="android"', {
['OS=="linux"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_test_jar',

View File

@ -57,6 +57,7 @@ class NullVideoFrame : public VideoFrame {
virtual int32 GetYPitch() const { return 0; }
virtual int32 GetUPitch() const { return 0; }
virtual int32 GetVPitch() const { return 0; }
virtual void* GetNativeHandle() const { return NULL; }
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }

View File

@ -66,16 +66,23 @@ class VideoFrame {
size_t GetChromaWidth() const { return (GetWidth() + 1) / 2; }
size_t GetChromaHeight() const { return (GetHeight() + 1) / 2; }
size_t GetChromaSize() const { return GetUPitch() * GetChromaHeight(); }
// These can return NULL if the object is not backed by a buffer.
virtual const uint8 *GetYPlane() const = 0;
virtual const uint8 *GetUPlane() const = 0;
virtual const uint8 *GetVPlane() const = 0;
virtual uint8 *GetYPlane() = 0;
virtual uint8 *GetUPlane() = 0;
virtual uint8 *GetVPlane() = 0;
virtual int32 GetYPitch() const = 0;
virtual int32 GetUPitch() const = 0;
virtual int32 GetVPitch() const = 0;
// Returns the handle of the underlying video frame. This is used when the
// frame is backed by a texture. The object should be destroyed when it is no
// longer in use, so the underlying resource can be freed.
virtual void* GetNativeHandle() const = 0;
// For retrieving the aspect ratio of each pixel. Usually this is 1x1, but
// the aspect_ratio_idc parameter of H.264 can specify non-square pixels.
virtual size_t GetPixelWidth() const = 0;
@ -165,7 +172,7 @@ class VideoFrame {
bool crop) const;
// Sets the video frame to black.
bool SetToBlack();
virtual bool SetToBlack();
// Tests if sample is valid. Returns true if valid.
static bool Validate(uint32 fourcc, int w, int h, const uint8 *sample,

View File

@ -597,10 +597,8 @@ class FakeWebRtcVideoEngine
channels_.erase(channel);
return 0;
}
#ifdef USE_WEBRTC_DEV_BRANCH
WEBRTC_STUB(RegisterCpuOveruseObserver,
(int channel, webrtc::CpuOveruseObserver* observer));
#endif
WEBRTC_STUB(ConnectAudioChannel, (const int, const int));
WEBRTC_STUB(DisconnectAudioChannel, (const int));
WEBRTC_FUNC(StartSend, (const int channel)) {

View File

@ -161,6 +161,14 @@ class FakeWebRtcVoiceEngine
}
bool IsInited() const { return inited_; }
int GetLastChannel() const { return last_channel_; }
int GetChannelFromLocalSsrc(uint32 local_ssrc) const {
for (std::map<int, Channel*>::const_iterator iter = channels_.begin();
iter != channels_.end(); ++iter) {
if (local_ssrc == iter->second->send_ssrc)
return iter->first;
}
return -1;
}
int GetNumChannels() const { return channels_.size(); }
bool GetPlayout(int channel) {
return channels_[channel]->playout;

View File

@ -0,0 +1,183 @@
/*
* libjingle
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/media/webrtc/webrtctexturevideoframe.h"
#include "talk/base/common.h"
#include "talk/base/logging.h"
#include "talk/base/stream.h"
#define UNIMPLEMENTED \
LOG(LS_ERROR) << "Call to unimplemented function "<< __FUNCTION__; \
ASSERT(false)
namespace cricket {
WebRtcTextureVideoFrame::WebRtcTextureVideoFrame(
webrtc::NativeHandle* handle, int width, int height, int64 elapsed_time,
int64 time_stamp)
: handle_(handle), width_(width), height_(height),
elapsed_time_(elapsed_time), time_stamp_(time_stamp) {}
WebRtcTextureVideoFrame::~WebRtcTextureVideoFrame() {}
bool WebRtcTextureVideoFrame::InitToBlack(
int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
int64 time_stamp) {
UNIMPLEMENTED;
return false;
}
bool WebRtcTextureVideoFrame::Reset(
uint32 fourcc, int w, int h, int dw, int dh, uint8* sample,
size_t sample_size, size_t pixel_width, size_t pixel_height,
int64 elapsed_time, int64 time_stamp, int rotation) {
UNIMPLEMENTED;
return false;
}
const uint8* WebRtcTextureVideoFrame::GetYPlane() const {
UNIMPLEMENTED;
return NULL;
}
const uint8* WebRtcTextureVideoFrame::GetUPlane() const {
UNIMPLEMENTED;
return NULL;
}
const uint8* WebRtcTextureVideoFrame::GetVPlane() const {
UNIMPLEMENTED;
return NULL;
}
uint8* WebRtcTextureVideoFrame::GetYPlane() {
UNIMPLEMENTED;
return NULL;
}
uint8* WebRtcTextureVideoFrame::GetUPlane() {
UNIMPLEMENTED;
return NULL;
}
uint8* WebRtcTextureVideoFrame::GetVPlane() {
UNIMPLEMENTED;
return NULL;
}
int32 WebRtcTextureVideoFrame::GetYPitch() const {
UNIMPLEMENTED;
return width_;
}
int32 WebRtcTextureVideoFrame::GetUPitch() const {
UNIMPLEMENTED;
return (width_ + 1) / 2;
}
int32 WebRtcTextureVideoFrame::GetVPitch() const {
UNIMPLEMENTED;
return (width_ + 1) / 2;
}
VideoFrame* WebRtcTextureVideoFrame::Copy() const {
return new WebRtcTextureVideoFrame(
handle_, width_, height_, elapsed_time_, time_stamp_);
}
bool WebRtcTextureVideoFrame::MakeExclusive() {
UNIMPLEMENTED;
return false;
}
size_t WebRtcTextureVideoFrame::CopyToBuffer(uint8* buffer, size_t size) const {
UNIMPLEMENTED;
return 0;
}
size_t WebRtcTextureVideoFrame::ConvertToRgbBuffer(
uint32 to_fourcc, uint8* buffer, size_t size, int stride_rgb) const {
UNIMPLEMENTED;
return 0;
}
bool WebRtcTextureVideoFrame::CopyToPlanes(
uint8* dst_y, uint8* dst_u, uint8* dst_v, int32 dst_pitch_y,
int32 dst_pitch_u, int32 dst_pitch_v) const {
UNIMPLEMENTED;
return false;
}
void WebRtcTextureVideoFrame::CopyToFrame(VideoFrame* dst) const {
UNIMPLEMENTED;
}
talk_base::StreamResult WebRtcTextureVideoFrame::Write(
talk_base::StreamInterface* stream, int* error) {
UNIMPLEMENTED;
return talk_base::SR_ERROR;
}
void WebRtcTextureVideoFrame::StretchToPlanes(
uint8* dst_y, uint8* dst_u, uint8* dst_v, int32 dst_pitch_y,
int32 dst_pitch_u, int32 dst_pitch_v, size_t width, size_t height,
bool interpolate, bool vert_crop) const {
UNIMPLEMENTED;
}
size_t WebRtcTextureVideoFrame::StretchToBuffer(
size_t dst_width, size_t dst_height, uint8* dst_buffer, size_t size,
bool interpolate, bool vert_crop) const {
UNIMPLEMENTED;
return 0;
}
void WebRtcTextureVideoFrame::StretchToFrame(
VideoFrame* dst, bool interpolate, bool vert_crop) const {
UNIMPLEMENTED;
}
VideoFrame* WebRtcTextureVideoFrame::Stretch(
size_t dst_width, size_t dst_height, bool interpolate,
bool vert_crop) const {
UNIMPLEMENTED;
return NULL;
}
bool WebRtcTextureVideoFrame::SetToBlack() {
UNIMPLEMENTED;
return false;
}
VideoFrame* WebRtcTextureVideoFrame::CreateEmptyFrame(
int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
int64 time_stamp) const {
UNIMPLEMENTED;
return NULL;
}
} // namespace cricket

View File

@ -0,0 +1,120 @@
/*
* libjingle
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_
#define TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_
#include "talk/base/refcount.h"
#include "talk/base/scoped_ref_ptr.h"
#include "talk/media/base/videoframe.h"
#ifdef USE_WEBRTC_DEV_BRANCH
#include "webrtc/common_video/interface/native_handle.h"
#else
#include "webrtc/common_video/interface/i420_video_frame.h"
// Define NativeHandle to an existing type so we don't need to add lots of
// USE_WEBRTC_DEV_BRANCH.
#define NativeHandle I420VideoFrame
#endif
namespace cricket {
// A video frame backed by the texture via a native handle.
class WebRtcTextureVideoFrame : public VideoFrame {
public:
WebRtcTextureVideoFrame(webrtc::NativeHandle* handle, int width, int height,
int64 elapsed_time, int64 time_stamp);
virtual ~WebRtcTextureVideoFrame();
// From base class VideoFrame.
virtual bool InitToBlack(int w, int h, size_t pixel_width,
size_t pixel_height, int64 elapsed_time,
int64 time_stamp);
virtual bool Reset(uint32 fourcc, int w, int h, int dw, int dh, uint8* sample,
size_t sample_size, size_t pixel_width,
size_t pixel_height, int64 elapsed_time, int64 time_stamp,
int rotation);
virtual size_t GetWidth() const { return width_; }
virtual size_t GetHeight() const { return height_; }
virtual const uint8* GetYPlane() const;
virtual const uint8* GetUPlane() const;
virtual const uint8* GetVPlane() const;
virtual uint8* GetYPlane();
virtual uint8* GetUPlane();
virtual uint8* GetVPlane();
virtual int32 GetYPitch() const;
virtual int32 GetUPitch() const;
virtual int32 GetVPitch() const;
virtual size_t GetPixelWidth() const { return 1; }
virtual size_t GetPixelHeight() const { return 1; }
virtual int64 GetElapsedTime() const { return elapsed_time_; }
virtual int64 GetTimeStamp() const { return time_stamp_; }
virtual void SetElapsedTime(int64 elapsed_time) {
elapsed_time_ = elapsed_time;
}
virtual void SetTimeStamp(int64 time_stamp) { time_stamp_ = time_stamp; }
virtual int GetRotation() const { return 0; }
virtual VideoFrame* Copy() const;
virtual bool MakeExclusive();
virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
size_t size, int stride_rgb) const;
virtual void* GetNativeHandle() const { return handle_.get(); }
virtual bool CopyToPlanes(
uint8* dst_y, uint8* dst_u, uint8* dst_v,
int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v) const;
virtual void CopyToFrame(VideoFrame* target) const;
virtual talk_base::StreamResult Write(talk_base::StreamInterface* stream,
int* error);
virtual void StretchToPlanes(
uint8* y, uint8* u, uint8* v, int32 pitchY, int32 pitchU, int32 pitchV,
size_t width, size_t height, bool interpolate, bool crop) const;
virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
bool interpolate, bool crop) const;
virtual void StretchToFrame(VideoFrame* target, bool interpolate,
bool crop) const;
virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
bool crop) const;
virtual bool SetToBlack();
protected:
virtual VideoFrame* CreateEmptyFrame(int w, int h, size_t pixel_width,
size_t pixel_height, int64 elapsed_time,
int64 time_stamp) const;
private:
// The handle of the underlying video frame.
talk_base::scoped_refptr<webrtc::NativeHandle> handle_;
int width_;
int height_;
int64 elapsed_time_;
int64 time_stamp_;
};
} // namespace cricket
#endif // TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_

View File

@ -0,0 +1,84 @@
/*
* libjingle
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/media/webrtc/webrtctexturevideoframe.h"
#include "talk/base/gunit.h"
#include "talk/media/base/videocommon.h"
class NativeHandleImpl : public webrtc::NativeHandle {
public:
NativeHandleImpl() : ref_count_(0) {}
virtual ~NativeHandleImpl() {}
virtual int32_t AddRef() { return ++ref_count_; }
virtual int32_t Release() { return --ref_count_; }
virtual void* GetHandle() { return NULL; }
int32_t ref_count() { return ref_count_; }
private:
int32_t ref_count_;
};
TEST(WebRtcTextureVideoFrameTest, InitialValues) {
NativeHandleImpl handle;
cricket::WebRtcTextureVideoFrame frame(&handle, 640, 480, 100, 200);
EXPECT_EQ(&handle, frame.GetNativeHandle());
EXPECT_EQ(640u, frame.GetWidth());
EXPECT_EQ(480u, frame.GetHeight());
EXPECT_EQ(100, frame.GetElapsedTime());
EXPECT_EQ(200, frame.GetTimeStamp());
frame.SetElapsedTime(300);
EXPECT_EQ(300, frame.GetElapsedTime());
frame.SetTimeStamp(400);
EXPECT_EQ(400, frame.GetTimeStamp());
}
TEST(WebRtcTextureVideoFrameTest, CopyFrame) {
NativeHandleImpl handle;
cricket::WebRtcTextureVideoFrame frame1(&handle, 640, 480, 100, 200);
cricket::VideoFrame* frame2 = frame1.Copy();
EXPECT_EQ(frame1.GetNativeHandle(), frame2->GetNativeHandle());
EXPECT_EQ(frame1.GetWidth(), frame2->GetWidth());
EXPECT_EQ(frame1.GetHeight(), frame2->GetHeight());
EXPECT_EQ(frame1.GetElapsedTime(), frame2->GetElapsedTime());
EXPECT_EQ(frame1.GetTimeStamp(), frame2->GetTimeStamp());
delete frame2;
}
TEST(WebRtcTextureVideoFrameTest, RefCount) {
NativeHandleImpl handle;
EXPECT_EQ(0, handle.ref_count());
cricket::WebRtcTextureVideoFrame* frame1 =
new cricket::WebRtcTextureVideoFrame(&handle, 640, 480, 100, 200);
EXPECT_EQ(1, handle.ref_count());
cricket::VideoFrame* frame2 = frame1->Copy();
EXPECT_EQ(2, handle.ref_count());
delete frame2;
EXPECT_EQ(1, handle.ref_count());
delete frame1;
EXPECT_EQ(0, handle.ref_count());
}

View File

@ -51,10 +51,11 @@
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videorenderer.h"
#include "talk/media/devices/filevideocapturer.h"
#include "talk/media/webrtc/webrtcpassthroughrender.h"
#include "talk/media/webrtc/webrtctexturevideoframe.h"
#include "talk/media/webrtc/webrtcvideocapturer.h"
#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "talk/media/webrtc/webrtcpassthroughrender.h"
#include "talk/media/webrtc/webrtcvideocapturer.h"
#include "talk/media/webrtc/webrtcvideoframe.h"
#include "talk/media/webrtc/webrtcvie.h"
#include "talk/media/webrtc/webrtcvoe.h"
@ -176,12 +177,15 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
explicit WebRtcRenderAdapter(VideoRenderer* renderer)
: renderer_(renderer), width_(0), height_(0), watermark_enabled_(false) {
}
virtual ~WebRtcRenderAdapter() {
}
void set_watermark_enabled(bool enable) {
talk_base::CritScope cs(&crit_);
watermark_enabled_ = enable;
}
void SetRenderer(VideoRenderer* renderer) {
talk_base::CritScope cs(&crit_);
renderer_ = renderer;
@ -198,6 +202,7 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
}
}
}
// Implementation of webrtc::ExternalRenderer.
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) {
@ -213,14 +218,18 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
}
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
virtual int DeliverFrame(unsigned char* buffer, int buffer_size,
uint32_t time_stamp, int64_t render_time) {
uint32_t time_stamp, int64_t render_time
#ifdef USE_WEBRTC_DEV_BRANCH
, void* handle
#endif
) {
talk_base::CritScope cs(&crit_);
frame_rate_tracker_.Update(1);
if (renderer_ == NULL) {
return 0;
}
WebRtcVideoFrame video_frame;
// Convert 90K rtp timestamp to ns timestamp.
int64 rtp_time_stamp_in_ns = (time_stamp / 90) *
talk_base::kNumNanosecsPerMillisec;
@ -229,9 +238,26 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
talk_base::kNumNanosecsPerMillisec;
// Send the rtp timestamp to renderer as the VideoFrame timestamp.
// and the render timestamp as the VideoFrame elapsed_time.
#ifdef USE_WEBRTC_DEV_BRANCH
if (handle == NULL) {
#endif
return DeliverBufferFrame(buffer, buffer_size, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
#ifdef USE_WEBRTC_DEV_BRANCH
} else {
return DeliverTextureFrame(handle, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
}
#endif
}
virtual bool IsTextureSupported() { return true; }
int DeliverBufferFrame(unsigned char* buffer, int buffer_size,
int64 elapsed_time, int64 time_stamp) {
WebRtcVideoFrame video_frame;
video_frame.Attach(buffer, buffer_size, width_, height_,
1, 1, render_time_stamp_in_ns,
rtp_time_stamp_in_ns, 0);
1, 1, elapsed_time, time_stamp, 0);
// Sanity check on decoded frame size.
@ -247,18 +273,28 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
return ret;
}
int DeliverTextureFrame(void* handle, int64 elapsed_time, int64 time_stamp) {
WebRtcTextureVideoFrame video_frame(
static_cast<webrtc::NativeHandle*>(handle), width_, height_,
elapsed_time, time_stamp);
return renderer_->RenderFrame(&video_frame);
}
unsigned int width() {
talk_base::CritScope cs(&crit_);
return width_;
}
unsigned int height() {
talk_base::CritScope cs(&crit_);
return height_;
}
int framerate() {
talk_base::CritScope cs(&crit_);
return static_cast<int>(frame_rate_tracker_.units_second());
}
VideoRenderer* renderer() {
talk_base::CritScope cs(&crit_);
return renderer_;

View File

@ -106,6 +106,7 @@ class WebRtcVideoFrame : public VideoFrame {
virtual int32 GetYPitch() const { return frame()->Width(); }
virtual int32 GetUPitch() const { return (frame()->Width() + 1) / 2; }
virtual int32 GetVPitch() const { return (frame()->Width() + 1) / 2; }
virtual void* GetNativeHandle() const { return NULL; }
virtual size_t GetPixelWidth() const { return pixel_width_; }
virtual size_t GetPixelHeight() const { return pixel_height_; }

View File

@ -548,6 +548,12 @@ bool WebRtcVoiceEngine::InitInternal() {
}
#endif
// Disable the DTMF playout when a tone is sent.
// PlayDtmfTone will be used if local playout is needed.
if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
LOG_RTCERR1(SetDtmfFeedbackStatus, false);
}
initialized_ = true;
return true;
}
@ -675,6 +681,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
options.experimental_aec.Set(false);
#endif
LOG(LS_INFO) << "Applying audio options: " << options.ToString();
webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
@ -1490,41 +1497,22 @@ WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
playout_(false),
desired_send_(SEND_NOTHING),
send_(SEND_NOTHING),
send_ssrc_(0),
local_renderer_(NULL),
default_receive_ssrc_(0) {
engine->RegisterChannel(this);
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
<< voe_channel();
// Register external transport
if (engine->voe()->network()->RegisterExternalTransport(
voe_channel(), *static_cast<Transport*>(this)) == -1) {
LOG_RTCERR2(RegisterExternalTransport, voe_channel(), this);
}
// Enable RTCP (for quality stats and feedback messages)
EnableRtcp(voe_channel());
// Reset all recv codecs; they will be enabled via SetRecvCodecs.
ResetRecvCodecs(voe_channel());
// Disable the DTMF playout when a tone is sent.
// PlayDtmfTone will be used if local playout is needed.
if (engine->voe()->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
LOG_RTCERR1(SetDtmfFeedbackStatus, false);
}
ConfigureSendChannel(voe_channel());
}
WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
<< voe_channel();
// DeRegister external transport
if (engine()->voe()->network()->DeRegisterExternalTransport(
voe_channel()) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, voe_channel());
}
// Remove any remaining send streams, the default channel will be deleted
// later.
while (!send_channels_.empty())
RemoveSendStream(send_channels_.begin()->first);
// Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
@ -1533,16 +1521,17 @@ WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
RemoveRecvStream(receive_channels_.begin()->first);
}
// Delete the primary channel.
if (engine()->voe()->base()->DeleteChannel(voe_channel()) == -1) {
LOG_RTCERR1(DeleteChannel, voe_channel());
}
// Delete the default channel.
DeleteChannel(voe_channel());
}
bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
LOG(LS_INFO) << "Setting voice channel options: "
<< options.ToString();
// TODO(xians): Add support to set different options for different send
// streams after we support multiple APMs.
// We retain all of the existing options, and apply the given ones
// on top. This means there is no way to "clear" options such that
// they go back to the engine default.
@ -1644,11 +1633,17 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
bool WebRtcVoiceMediaChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
// TODO(xians): Break down this function into SetSendCodecs(channel, codecs)
// to support per-channel codecs.
// Disable DTMF, VAD, and FEC unless we know the other side wants them.
dtmf_allowed_ = false;
engine()->voe()->codec()->SetVADStatus(voe_channel(), false);
engine()->voe()->rtp()->SetNACKStatus(voe_channel(), false, 0);
engine()->voe()->rtp()->SetFECStatus(voe_channel(), false);
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
engine()->voe()->codec()->SetVADStatus(iter->second.channel, false);
engine()->voe()->rtp()->SetNACKStatus(iter->second.channel, false, 0);
engine()->voe()->rtp()->SetFECStatus(iter->second.channel, false);
}
// Scan through the list to figure out the codec to use for sending, along
// with the proper configuration for VAD and DTMF.
@ -1701,14 +1696,19 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
}
// Find the DTMF telephone event "codec" and tell VoiceEngine about it.
// Find the DTMF telephone event "codec" and tell VoiceEngine channels
// about it.
if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
_stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
voe_channel(), it->id) == -1) {
LOG_RTCERR2(SetSendTelephoneEventPayloadType, voe_channel(), it->id);
iter->second.channel, it->id) == -1) {
LOG_RTCERR2(SetSendTelephoneEventPayloadType,
iter->second.channel, it->id);
return false;
}
}
dtmf_allowed_ = true;
}
@ -1732,11 +1732,16 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
<< " not supported.";
continue;
}
// Loop through the existing send channels and set the CN payloadtype
// and the VAD status.
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
int channel = iter->second.channel;
// The CN payload type for 8000 Hz clockrate is fixed at 13.
if (cn_freq != webrtc::kFreq8000Hz) {
if (engine()->voe()->codec()->SetSendCNPayloadType(voe_channel(),
it->id, cn_freq) == -1) {
LOG_RTCERR3(SetSendCNPayloadType, voe_channel(), it->id, cn_freq);
if (engine()->voe()->codec()->SetSendCNPayloadType(
channel, it->id, cn_freq) == -1) {
LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
// TODO(ajm): This failure condition will be removed from VoE.
// Restore the return here when we update to a new enough webrtc.
//
@ -1747,16 +1752,18 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// send the offer.
}
}
// Only turn on VAD if we have a CN payload type that matches the
// clockrate for the codec we are going to use.
if (it->clockrate == send_codec.plfreq) {
LOG(LS_INFO) << "Enabling VAD";
if (engine()->voe()->codec()->SetVADStatus(voe_channel(), true) == -1) {
LOG_RTCERR2(SetVADStatus, voe_channel(), true);
if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
LOG_RTCERR2(SetVADStatus, channel, true);
return false;
}
}
}
}
// We'll use the first codec in the list to actually send audio data.
// Be sure to use the payload type requested by the remote side.
@ -1773,15 +1780,18 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Enable redundant encoding of the specified codec. Treat any
// failure as a fatal internal error.
LOG(LS_INFO) << "Enabling FEC";
if (engine()->voe()->rtp()->SetFECStatus(voe_channel(),
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (engine()->voe()->rtp()->SetFECStatus(iter->second.channel,
true, it->id) == -1) {
LOG_RTCERR3(SetFECStatus, voe_channel(), true, it->id);
LOG_RTCERR3(SetFECStatus, iter->second.channel, true, it->id);
return false;
}
}
} else {
send_codec = voe_codec;
nack_enabled_ = IsNackEnabled(*it);
SetNack(send_ssrc_, voe_channel(), nack_enabled_);
SetNack(send_channels_, nack_enabled_);
}
first = false;
// Set the codec immediately, since SetVADStatus() depends on whether
@ -1790,10 +1800,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
return false;
}
}
for (ChannelMap::iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
SetNack(it->first, it->second.channel, nack_enabled_);
}
SetNack(receive_channels_, nack_enabled_);
// If we're being asked to set an empty list of codecs, due to a buggy client,
@ -1808,6 +1815,15 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
return true;
}
void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
bool nack_enabled) {
for (ChannelMap::const_iterator it = channels.begin();
it != channels.end(); ++it) {
SetNack(it->first, it->second.channel, nack_enabled_);
}
}
void WebRtcVoiceMediaChannel::SetNack(uint32 ssrc, int channel,
bool nack_enabled) {
if (nack_enabled) {
@ -1819,17 +1835,32 @@ void WebRtcVoiceMediaChannel::SetNack(uint32 ssrc, int channel,
}
}
bool WebRtcVoiceMediaChannel::SetSendCodec(
const webrtc::CodecInst& send_codec) {
LOG(LS_INFO) << "Selected voice codec " << ToString(send_codec)
<< ", bitrate=" << send_codec.rate;
if (engine()->voe()->codec()->SetSendCodec(voe_channel(),
send_codec) == -1) {
LOG_RTCERR2(SetSendCodec, voe_channel(), ToString(send_codec));
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (!SetSendCodec(iter->second.channel, send_codec))
return false;
}
// All SetSendCodec calls were successful. Update the global state
// accordingly.
send_codec_.reset(new webrtc::CodecInst(send_codec));
return true;
}
bool WebRtcVoiceMediaChannel::SetSendCodec(
int channel, const webrtc::CodecInst& send_codec) {
LOG(LS_INFO) << "Send channel " << channel << " selected voice codec "
<< ToString(send_codec) << ", bitrate=" << send_codec.rate;
if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) {
LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec));
return false;
}
return true;
}
@ -1862,11 +1893,15 @@ bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
}
LOG(LS_INFO) << "Enabling audio level header extension with ID " << id;
for (ChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (engine()->voe()->rtp()->SetRTPAudioLevelIndicationStatus(
voe_channel(), enable, id) == -1) {
LOG_RTCERR3(SetRTPAudioLevelIndicationStatus, voe_channel(), enable, id);
iter->second.channel, enable, id) == -1) {
LOG_RTCERR3(SetRTPAudioLevelIndicationStatus,
iter->second.channel, enable, id);
return false;
}
}
return true;
}
@ -1912,7 +1947,7 @@ bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
desired_send_ = send;
if (send_ssrc_ != 0)
if (!send_channels_.empty())
return ChangeSend(desired_send_);
return true;
}
@ -1930,131 +1965,177 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
return true;
}
if (send == SEND_MICROPHONE) {
// Change the settings on each send channel.
if (send == SEND_MICROPHONE)
engine()->SetOptionOverrides(options_);
// VoiceEngine resets sequence number when StopSend is called. This
// sometimes causes libSRTP to complain about packets being
// replayed. To get around this we store the last sent sequence
// number and initializes the channel with the next to continue on
// the same sequence.
if (sequence_number() != -1) {
LOG(LS_INFO) << "WebRtcVoiceMediaChannel restores seqnum="
<< sequence_number() + 1;
if (engine()->voe()->sync()->SetInitSequenceNumber(
voe_channel(), sequence_number() + 1) == -1) {
LOG_RTCERR2(SetInitSequenceNumber, voe_channel(),
sequence_number() + 1);
}
}
if (engine()->voe()->base()->StartSend(voe_channel()) == -1) {
LOG_RTCERR1(StartSend, voe_channel());
// Change the settings on each send channel.
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (!ChangeSend(iter->second.channel, send))
return false;
}
// It's OK not to have file() here, since we don't need to call Stop if
// no file is playing.
if (engine()->voe()->file() &&
engine()->voe()->file()->StopPlayingFileAsMicrophone(
voe_channel()) == -1) {
LOG_RTCERR1(StopPlayingFileAsMicrophone, voe_channel());
return false;
}
} else if (send == SEND_RINGBACKTONE) {
ASSERT(ringback_tone_);
if (!ringback_tone_) {
return false;
}
if (engine()->voe()->file() &&
engine()->voe()->file()->StartPlayingFileAsMicrophone(
voe_channel(), ringback_tone_.get(), false) != -1) {
LOG(LS_INFO) << "File StartPlayingFileAsMicrophone Succeeded. channel:"
<< voe_channel();
} else {
LOG_RTCERR3(StartPlayingFileAsMicrophone, voe_channel(),
ringback_tone_.get(), false);
return false;
}
// VoiceEngine resets sequence number when StopSend is called. This
// sometimes causes libSRTP to complain about packets being
// replayed. To get around this we store the last sent sequence
// number and initializes the channel with the next to continue on
// the same sequence.
if (sequence_number() != -1) {
LOG(LS_INFO) << "WebRtcVoiceMediaChannel restores seqnum="
<< sequence_number() + 1;
if (engine()->voe()->sync()->SetInitSequenceNumber(
voe_channel(), sequence_number() + 1) == -1) {
LOG_RTCERR2(SetInitSequenceNumber, voe_channel(),
sequence_number() + 1);
}
}
if (engine()->voe()->base()->StartSend(voe_channel()) == -1) {
LOG_RTCERR1(StartSend, voe_channel());
return false;
}
} else { // SEND_NOTHING
if (engine()->voe()->base()->StopSend(voe_channel()) == -1) {
LOG_RTCERR1(StopSend, voe_channel());
}
// Clear up the options after stopping sending.
if (send == SEND_NOTHING)
engine()->ClearOptionOverrides();
}
send_ = send;
return true;
}
bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
if (send_ssrc_ != 0) {
LOG(LS_ERROR) << "WebRtcVoiceMediaChannel supports one sending channel.";
bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) {
if (send == SEND_MICROPHONE) {
if (engine()->voe()->base()->StartSend(channel) == -1) {
LOG_RTCERR1(StartSend, channel);
return false;
}
if (engine()->voe()->rtp()->SetLocalSSRC(voe_channel(), sp.first_ssrc())
== -1) {
LOG_RTCERR2(SetSendSSRC, voe_channel(), sp.first_ssrc());
if (engine()->voe()->file() &&
engine()->voe()->file()->StopPlayingFileAsMicrophone(channel) == -1) {
LOG_RTCERR1(StopPlayingFileAsMicrophone, channel);
return false;
}
// Set the SSRC on the receive channels.
// Receive channels have to have the same SSRC in order to send receiver
// reports with this SSRC.
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
int channel_id = it->second.channel;
if (channel_id != voe_channel()) {
if (engine()->voe()->rtp()->SetLocalSSRC(channel_id,
sp.first_ssrc()) != 0) {
LOG_RTCERR1(SetLocalSSRC, it->first);
} else { // SEND_NOTHING
ASSERT(send == SEND_NOTHING);
if (engine()->voe()->base()->StopSend(channel) == -1) {
LOG_RTCERR1(StopSend, channel);
return false;
}
}
}
if (engine()->voe()->rtp()->SetRTCP_CNAME(voe_channel(),
sp.cname.c_str()) == -1) {
LOG_RTCERR2(SetRTCP_CNAME, voe_channel(), sp.cname);
return false;
}
send_ssrc_ = sp.first_ssrc();
if (desired_send_ != send_)
return ChangeSend(desired_send_);
if (local_renderer_)
local_renderer_->AddChannel(voe_channel());
return true;
}
bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
if (ssrc != send_ssrc_) {
void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) {
if (engine()->voe()->network()->RegisterExternalTransport(
channel, *this) == -1) {
LOG_RTCERR2(RegisterExternalTransport, channel, this);
}
// Enable RTCP (for quality stats and feedback messages)
EnableRtcp(channel);
// Reset all recv codecs; they will be enabled via SetRecvCodecs.
ResetRecvCodecs(channel);
}
bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, channel);
}
if (engine()->voe()->base()->DeleteChannel(channel) == -1) {
LOG_RTCERR1(DeleteChannel, channel);
return false;
}
if (local_renderer_)
local_renderer_->RemoveChannel(voe_channel());
return true;
}
send_ssrc_ = 0;
bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
// If the default channel is already used for sending create a new channel
// otherwise use the default channel for sending.
int channel = GetSendChannelNum(sp.first_ssrc());
if (channel != -1) {
LOG(LS_ERROR) << "Stream already exists with ssrc " << sp.first_ssrc();
return false;
}
bool default_channel_is_available = true;
for (ChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
if (IsDefaultChannel(iter->second.channel)) {
default_channel_is_available = false;
break;
}
}
if (default_channel_is_available) {
channel = voe_channel();
} else {
// Create a new channel for sending audio data.
channel = engine()->voe()->base()->CreateChannel();
if (channel == -1) {
LOG_RTCERR0(CreateChannel);
return false;
}
ConfigureSendChannel(channel);
}
// Save the channel to send_channels_, so that RemoveSendStream() can still
// delete the channel in case failure happens below.
send_channels_[sp.first_ssrc()] = WebRtcVoiceChannelInfo(channel, NULL);
// Set the send (local) SSRC.
// If there are multiple send SSRCs, we can only set the first one here, and
// the rest of the SSRC(s) need to be set after SetSendCodec has been called
// (with a codec requires multiple SSRC(s)).
if (engine()->voe()->rtp()->SetLocalSSRC(channel, sp.first_ssrc()) == -1) {
LOG_RTCERR2(SetSendSSRC, channel, sp.first_ssrc());
return false;
}
// At this point the channel's local SSRC has been updated. If the channel is
// the default channel make sure that all the receive channels are updated as
// well. Receive channels have to have the same SSRC as the default channel in
// order to send receiver reports with this SSRC.
if (IsDefaultChannel(channel)) {
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
// Only update the SSRC for non-default channels.
if (!IsDefaultChannel(it->second.channel)) {
if (engine()->voe()->rtp()->SetLocalSSRC(it->second.channel,
sp.first_ssrc()) != 0) {
LOG_RTCERR2(SetLocalSSRC, it->second.channel, sp.first_ssrc());
return false;
}
}
}
}
if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
return false;
}
// Set the current codec to be used for the new channel.
if (send_codec_ && !SetSendCodec(channel, *send_codec_))
return false;
return ChangeSend(channel, desired_send_);
}
bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
ChannelMap::iterator it = send_channels_.find(ssrc);
if (it == send_channels_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
<< " which doesn't exist.";
return false;
}
int channel = it->second.channel;
ChangeSend(channel, SEND_NOTHING);
// Notify the audio renderer that the send channel is going away.
if (it->second.renderer)
it->second.renderer->RemoveChannel(channel);
if (IsDefaultChannel(channel)) {
// Do not delete the default channel since the receive channels depend on
// the default channel, recycle it instead.
ChangeSend(channel, SEND_NOTHING);
} else {
// Clean up and delete the send channel.
LOG(LS_INFO) << "Removing audio send stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
if (!DeleteChannel(channel))
return false;
}
send_channels_.erase(it);
if (send_channels_.empty())
ChangeSend(SEND_NOTHING);
return true;
}
@ -2157,11 +2238,14 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
talk_base::CritScope lock(&receive_channels_cs_);
ChannelMap::iterator it = receive_channels_.find(ssrc);
if (it == receive_channels_.end())
if (it == receive_channels_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
<< " which doesn't exist.";
return false;
}
if (ssrc == default_receive_ssrc_) {
ASSERT(voe_channel() == it->second.channel);
ASSERT(IsDefaultChannel(it->second.channel));
// Recycle the default channel is for recv stream.
if (playout_)
SetPlayout(voe_channel(), false);
@ -2179,16 +2263,9 @@ bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
if (it->second.renderer)
it->second.renderer->RemoveChannel(it->second.channel);
if (engine()->voe()->network()->DeRegisterExternalTransport(
it->second.channel) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, it->second.channel);
}
LOG(LS_INFO) << "Removing audio stream " << ssrc
<< " with VoiceEngine channel #"
<< it->second.channel << ".";
if (engine()->voe()->base()->DeleteChannel(it->second.channel) == -1) {
LOG_RTCERR1(DeleteChannel, voe_channel());
<< " with VoiceEngine channel #" << it->second.channel << ".";
if (!DeleteChannel(it->second.channel)) {
// Erase the entry anyhow.
receive_channels_.erase(it);
return false;
@ -2224,7 +2301,7 @@ bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
if (it == receive_channels_.end()) {
if (renderer) {
// Return an error if trying to set a valid renderer with an invalid ssrc.
LOG_RTCERR1(SetRemoteRenderer, ssrc);
LOG(LS_ERROR) << "SetRemoteRenderer failed with ssrc "<< ssrc;
return false;
}
@ -2232,43 +2309,47 @@ bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
return true;
}
AudioRenderer* remote_renderer = it->second.renderer;
if (renderer) {
ASSERT(it->second.renderer == NULL || it->second.renderer == renderer);
if (!it->second.renderer) {
ASSERT(remote_renderer == NULL || remote_renderer == renderer);
if (!remote_renderer) {
renderer->AddChannel(it->second.channel);
}
} else if (it->second.renderer) {
} else if (remote_renderer) {
// |renderer| == NULL, remove the channel from the renderer.
it->second.renderer->RemoveChannel(it->second.channel);
remote_renderer->RemoveChannel(it->second.channel);
}
// Assign the new value to the struct.
it->second.renderer = renderer;
return true;
}
bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
AudioRenderer* renderer) {
if (!renderer && !local_renderer_)
return true;
int channel = GetSendChannelNum(ssrc);
if (channel == -1) {
// Invalidate the |local_renderer_| before quitting.
if (!renderer)
local_renderer_ = NULL;
ChannelMap::iterator it = send_channels_.find(ssrc);
if (it == send_channels_.end()) {
if (renderer) {
// Return an error if trying to set a valid renderer with an invalid ssrc.
LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc;
return false;
}
if (renderer) {
ASSERT(local_renderer_ == NULL || local_renderer_ == renderer);
if (!local_renderer_)
renderer->AddChannel(channel);
} else {
local_renderer_->RemoveChannel(channel);
// The channel likely has gone away, do nothing.
return true;
}
local_renderer_ = renderer;
AudioRenderer* local_renderer = it->second.renderer;
if (renderer) {
ASSERT(local_renderer == NULL || local_renderer == renderer);
if (!local_renderer)
renderer->AddChannel(it->second.channel);
} else if (local_renderer) {
local_renderer->RemoveChannel(it->second.channel);
}
// Assign the new value to the struct.
it->second.renderer = renderer;
return true;
}
@ -2466,15 +2547,16 @@ bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
// Send the event.
if (flags & cricket::DF_SEND) {
if (send_ssrc_ != ssrc && ssrc != 0) {
int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
<< ssrc << " is not in use.";
return false;
}
// Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
if (engine()->voe()->dtmf()->SendTelephoneEvent(voe_channel(),
event, true, duration) == -1) {
LOG_RTCERR4(SendTelephoneEvent, voe_channel(), event, true, duration);
if (engine()->voe()->dtmf()->SendTelephoneEvent(
channel, event, true, duration) == -1) {
LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
return false;
}
}
@ -2525,27 +2607,56 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
}
void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
// See above.
int which_channel = GetReceiveChannelNum(
ParseSsrc(packet->data(), packet->length(), true));
if (which_channel == -1) {
which_channel = voe_channel();
// Sending channels need all RTCP packets with feedback information.
// Even sender reports can contain attached report blocks.
// Receiving channels need sender reports in order to create
// correct receiver reports.
int type = 0;
if (!GetRtcpType(packet->data(), packet->length(), &type)) {
LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
return;
}
// If it is a sender report, find the channel that is listening.
bool has_sent_to_default_channel = false;
if (type == kRtcpTypeSR) {
int which_channel = GetReceiveChannelNum(
ParseSsrc(packet->data(), packet->length(), true));
if (which_channel != -1) {
engine()->voe()->network()->ReceivedRTCPPacket(
which_channel,
packet->data(),
static_cast<unsigned int>(packet->length()));
if (IsDefaultChannel(which_channel))
has_sent_to_default_channel = true;
}
}
// SR may continue RR and any RR entry may correspond to any one of the send
// channels. So all RTCP packets must be forwarded all send channels. VoE
// will filter out RR internally.
for (ChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
// Make sure not sending the same packet to default channel more than once.
if (IsDefaultChannel(iter->second.channel) && has_sent_to_default_channel)
continue;
engine()->voe()->network()->ReceivedRTCPPacket(
iter->second.channel,
packet->data(),
static_cast<unsigned int>(packet->length()));
}
}
bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
if (send_ssrc_ != ssrc && ssrc != 0) {
int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
return false;
}
if (engine()->voe()->volume()->SetInputMute(voe_channel(),
muted) == -1) {
LOG_RTCERR2(SetInputMute, voe_channel(), muted);
if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) {
LOG_RTCERR2(SetInputMute, channel, muted);
return false;
}
return true;
@ -2590,24 +2701,51 @@ bool WebRtcVoiceMediaChannel::SetSendBandwidth(bool autobw, int bps) {
}
bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
// In VoiceEngine 3.5, GetRTCPStatistics will return 0 even when it fails,
// causing the stats to contain garbage information. To prevent this, we
// zero the stats structure before calling this API.
// TODO(juberti): Remove this workaround.
bool echo_metrics_on = false;
// These can take on valid negative values, so use the lowest possible level
// as default rather than -1.
int echo_return_loss = -100;
int echo_return_loss_enhancement = -100;
// These can also be negative, but in practice -1 is only used to signal
// insufficient data, since the resolution is limited to multiples of 4 ms.
int echo_delay_median_ms = -1;
int echo_delay_std_ms = -1;
if (engine()->voe()->processing()->GetEcMetricsStatus(
echo_metrics_on) != -1 && echo_metrics_on) {
// TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
// here, but it appears to be unsuitable currently. Revisit after this is
// investigated: http://b/issue?id=5666755
int erl, erle, rerl, anlp;
if (engine()->voe()->processing()->GetEchoMetrics(
erl, erle, rerl, anlp) != -1) {
echo_return_loss = erl;
echo_return_loss_enhancement = erle;
}
int median, std;
if (engine()->voe()->processing()->GetEcDelayMetrics(median, std) != -1) {
echo_delay_median_ms = median;
echo_delay_std_ms = std;
}
}
webrtc::CallStatistics cs;
unsigned int ssrc;
webrtc::CodecInst codec;
unsigned int level;
for (ChannelMap::const_iterator channel_iter = send_channels_.begin();
channel_iter != send_channels_.end(); ++channel_iter) {
const int channel = channel_iter->second.channel;
// Fill in the sender info, based on what we know, and what the
// remote side told us it got from its RTCP report.
VoiceSenderInfo sinfo;
// Data we obtain locally.
memset(&cs, 0, sizeof(cs));
if (engine()->voe()->rtp()->GetRTCPStatistics(voe_channel(), cs) == -1 ||
engine()->voe()->rtp()->GetLocalSSRC(voe_channel(), ssrc) == -1) {
return false;
if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
continue;
}
sinfo.ssrc = ssrc;
@ -2626,11 +2764,11 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
sinfo.ext_seqnum = -1;
std::vector<webrtc::ReportBlock> receive_blocks;
if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
voe_channel(), &receive_blocks) != -1 &&
engine()->voe()->codec()->GetSendCodec(voe_channel(),
codec) != -1) {
channel, &receive_blocks) != -1 &&
engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
std::vector<webrtc::ReportBlock>::iterator iter;
for (iter = receive_blocks.begin(); iter != receive_blocks.end(); ++iter) {
for (iter = receive_blocks.begin(); iter != receive_blocks.end();
++iter) {
// Lookup report for send ssrc only.
if (iter->source_SSRC == sinfo.ssrc) {
// Convert Q8 to floating point.
@ -2650,35 +2788,18 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
sinfo.audio_level = (engine()->voe()->volume()->
GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
bool echo_metrics_on = false;
// These can take on valid negative values, so use the lowest possible level
// as default rather than -1.
sinfo.echo_return_loss = -100;
sinfo.echo_return_loss_enhancement = -100;
// These can also be negative, but in practice -1 is only used to signal
// insufficient data, since the resolution is limited to multiples of 4 ms.
sinfo.echo_delay_median_ms = -1;
sinfo.echo_delay_std_ms = -1;
if (engine()->voe()->processing()->GetEcMetricsStatus(echo_metrics_on) !=
-1 && echo_metrics_on) {
// TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
// here, but it appears to be unsuitable currently. Revisit after this is
// investigated: http://b/issue?id=5666755
int erl, erle, rerl, anlp;
if (engine()->voe()->processing()->GetEchoMetrics(erl, erle, rerl, anlp) !=
-1) {
sinfo.echo_return_loss = erl;
sinfo.echo_return_loss_enhancement = erle;
}
int median, std;
if (engine()->voe()->processing()->GetEcDelayMetrics(median, std) != -1) {
sinfo.echo_delay_median_ms = median;
sinfo.echo_delay_std_ms = std;
}
}
// TODO(xians): We are injecting the same APM logging to all the send
// channels here because there is no good way to know which send channel
// is using the APM. The correct fix is to allow the send channels to have
// their own APM so that we can feed the correct APM logging to different
// send channels. See issue crbug/264611 .
sinfo.echo_return_loss = echo_return_loss;
sinfo.echo_return_loss_enhancement = echo_return_loss_enhancement;
sinfo.echo_delay_median_ms = echo_delay_median_ms;
sinfo.echo_delay_std_ms = echo_delay_std_ms;
info->senders.push_back(sinfo);
}
// Build the list of receivers, one for each receiving channel, or 1 in
// a 1:1 call.
@ -2749,15 +2870,7 @@ void WebRtcVoiceMediaChannel::GetLastMediaError(
bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
talk_base::CritScope lock(&receive_channels_cs_);
ASSERT(ssrc != NULL);
if (channel_num == voe_channel()) {
unsigned local_ssrc = 0;
// This is a sending channel.
if (engine()->voe()->rtp()->GetLocalSSRC(
channel_num, local_ssrc) != -1) {
*ssrc = local_ssrc;
}
return true;
} else if (channel_num == -1 && send_ != SEND_NOTHING) {
if (channel_num == -1 && send_ != SEND_NOTHING) {
// Sometimes the VoiceEngine core will throw error with channel_num = -1.
// This means the error is not limited to a specific channel. Signal the
// message using ssrc=0. If the current channel is sending, use this
@ -2765,6 +2878,20 @@ bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
*ssrc = 0;
return true;
} else {
// Check whether this is a sending channel.
for (ChannelMap::const_iterator it = send_channels_.begin();
it != send_channels_.end(); ++it) {
if (it->second.channel == channel_num) {
// This is a sending channel.
uint32 local_ssrc = 0;
if (engine()->voe()->rtp()->GetLocalSSRC(
channel_num, local_ssrc) != -1) {
*ssrc = local_ssrc;
}
return true;
}
}
// Check whether this is a receiving channel.
for (ChannelMap::const_iterator it = receive_channels_.begin();
it != receive_channels_.end(); ++it) {
@ -2796,7 +2923,11 @@ int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
}
int WebRtcVoiceMediaChannel::GetSendChannelNum(uint32 ssrc) {
return (ssrc == send_ssrc_) ? voe_channel() : -1;
ChannelMap::iterator it = send_channels_.find(ssrc);
if (it != send_channels_.end())
return it->second.channel;
return -1;
}
bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
@ -2848,7 +2979,7 @@ bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
LOG_RTCERR2(SetRTCPStatus, voe_channel(), 1);
LOG_RTCERR2(SetRTCPStatus, channel, 1);
return false;
}
// TODO(juberti): Enable VQMon and RTCP XR reports, once we know what

View File

@ -275,7 +275,7 @@ template <class T, class E>
class WebRtcMediaChannel : public T, public webrtc::Transport {
public:
WebRtcMediaChannel(E *engine, int channel)
: engine_(engine), voe_channel_(channel), sequence_number_(-1) {}
: engine_(engine), voe_channel_(channel) {}
E *engine() { return engine_; }
int voe_channel() const { return voe_channel_; }
bool valid() const { return voe_channel_ != -1; }
@ -283,23 +283,10 @@ class WebRtcMediaChannel : public T, public webrtc::Transport {
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
// We need to store the sequence number to be able to pick up
// the same sequence when the device is restarted.
// TODO(oja): Remove when WebRtc has fixed the problem.
int seq_num;
if (!GetRtpSeqNum(data, len, &seq_num)) {
return -1;
}
if (sequence_number() == -1) {
LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
<< seq_num;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
if (!T::SendPacket(&packet)) {
return -1;
}
sequence_number_ = seq_num;
return len;
}
@ -308,14 +295,9 @@ class WebRtcMediaChannel : public T, public webrtc::Transport {
return T::SendRtcp(&packet) ? len : -1;
}
int sequence_number() const {
return sequence_number_;
}
private:
E *engine_;
int voe_channel_;
int sequence_number_;
};
// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
@ -393,16 +375,24 @@ class WebRtcVoiceMediaChannel
private:
struct WebRtcVoiceChannelInfo;
typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
void SetNack(uint32 ssrc, int channel, bool nack_enabled);
void SetNack(const ChannelMap& channels, bool nack_enabled);
bool SetSendCodec(const webrtc::CodecInst& send_codec);
bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
bool ChangeSend(int channel, SendFlags send);
void ConfigureSendChannel(int channel);
bool DeleteChannel(int channel);
bool InConferenceMode() const {
return options_.conference_mode.GetWithDefaultIfUnset(false);
}
bool IsDefaultChannel(int channel_id) const {
return channel_id == voe_channel();
}
typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
std::set<int> ringback_channels_; // channels playing ringback
std::vector<AudioCodec> recv_codecs_;
@ -415,17 +405,14 @@ class WebRtcVoiceMediaChannel
SendFlags desired_send_;
SendFlags send_;
// TODO(xians): Add support for multiple send channels.
uint32 send_ssrc_;
// Weak pointer to the renderer of the local audio track. It is owned by the
// track and will set to NULL when the track is going away or channel gets
// deleted. Used to notify the audio track that the media channel is added
// or removed.
AudioRenderer* local_renderer_;
// send_channels_ contains the channels which are being used for sending.
// When the default channel (voe_channel) is used for sending, it is
// contained in send_channels_, otherwise not.
ChannelMap send_channels_;
uint32 default_receive_ssrc_;
// Note the default channel (voe_channel()) can reside in both
// receive_channels_ and send channel in non-conference mode and in that case
// it will only be there if a non-zero default_receive_ssrc_ is set.
// receive_channels_ and send_channels_ in non-conference mode and in that
// case it will only be there if a non-zero default_receive_ssrc_ is set.
ChannelMap receive_channels_; // for multiple sources
// receive_channels_ can be read from WebRtc callback thread. Access from
// the WebRtc thread must be synchronized with edits on the worker thread.

View File

@ -121,6 +121,18 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
}
return result;
}
void SetupForMultiSendStream() {
EXPECT_TRUE(SetupEngine());
// Remove stream added in Setup, which is corresponding to default channel.
int default_channel_num = voe_.GetLastChannel();
uint32 default_send_ssrc;
EXPECT_EQ(0, voe_.GetLocalSSRC(default_channel_num, default_send_ssrc));
EXPECT_EQ(kSsrc1, default_send_ssrc);
EXPECT_TRUE(channel_->RemoveSendStream(default_send_ssrc));
// Verify the default channel still exists.
EXPECT_EQ(0, voe_.GetLocalSSRC(default_channel_num, default_send_ssrc));
}
void DeliverPacket(const void* data, int len) {
talk_base::Buffer packet(data, len);
channel_->OnPacketReceived(&packet);
@ -205,6 +217,47 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
}
void TestSetSendRtpHeaderExtensions(int channel_id) {
std::vector<cricket::RtpHeaderExtension> extensions;
bool enable = false;
unsigned char id = 0;
// Ensure audio levels are off by default.
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_id, enable, id));
EXPECT_FALSE(enable);
// Ensure unknown extensions won't cause an error.
extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:unknowextention", 1));
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_id, enable, id));
EXPECT_FALSE(enable);
// Ensure audio levels stay off with an empty list of headers.
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_id, enable, id));
EXPECT_FALSE(enable);
// Ensure audio levels are enabled if the audio-level header is specified.
extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_id, enable, id));
EXPECT_TRUE(enable);
EXPECT_EQ(8, id);
// Ensure audio levels go back off with an empty list.
extensions.clear();
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_id, enable, id));
EXPECT_FALSE(enable);
}
protected:
cricket::FakeWebRtcVoiceEngine voe_;
cricket::FakeWebRtcVoiceEngine voe_sc_;
@ -1336,43 +1389,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendRtpHeaderExtensions) {
EXPECT_TRUE(SetupEngine());
std::vector<cricket::RtpHeaderExtension> extensions;
int channel_num = voe_.GetLastChannel();
bool enable = false;
unsigned char id = 0;
// Ensure audio levels are off by default.
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_num, enable, id));
EXPECT_FALSE(enable);
// Ensure unknown extentions won't cause an error.
extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:unknowextention", 1));
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_num, enable, id));
EXPECT_FALSE(enable);
// Ensure audio levels stay off with an empty list of headers.
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_num, enable, id));
EXPECT_FALSE(enable);
// Ensure audio levels are enabled if the audio-level header is specified.
extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_num, enable, id));
EXPECT_TRUE(enable);
EXPECT_EQ(8, id);
// Ensure audio levels go back off with an empty list.
extensions.clear();
EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
channel_num, enable, id));
EXPECT_FALSE(enable);
TestSetSendRtpHeaderExtensions(channel_num);
}
// Test that we can create a channel and start sending/playing out on it.
@ -1392,9 +1409,169 @@ TEST_F(WebRtcVoiceEngineTestFake, SendAndPlayout) {
EXPECT_FALSE(voe_.GetPlayout(channel_num));
}
// Test that we can add and remove streams, and do proper send/playout.
// We can receive on multiple streams, but will only send on one.
TEST_F(WebRtcVoiceEngineTestFake, SendAndPlayoutWithMultipleStreams) {
// Test that we can add and remove send streams.
TEST_F(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
SetupForMultiSendStream();
static const uint32 kSsrcs4[] = {1, 2, 3, 4};
// Set the global state for sending.
EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
// Verify that we are in a sending state for all the created streams.
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
EXPECT_TRUE(voe_.GetSend(channel_num));
}
// Remove the first send channel, which is the default channel. It will only
// recycle the default channel but not delete it.
EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs4[0]));
// Stream should already be Removed from the send stream list.
EXPECT_FALSE(channel_->RemoveSendStream(kSsrcs4[0]));
// But the default still exists.
EXPECT_EQ(0, voe_.GetChannelFromLocalSsrc(kSsrcs4[0]));
// Delete the rest of send channel streams.
for (unsigned int i = 1; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs4[i]));
// Stream should already be deleted.
EXPECT_FALSE(channel_->RemoveSendStream(kSsrcs4[i]));
EXPECT_EQ(-1, voe_.GetChannelFromLocalSsrc(kSsrcs4[i]));
}
}
// Test SetSendCodecs correctly configure the codecs in all send streams.
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
SetupForMultiSendStream();
static const uint32 kSsrcs4[] = {1, 2, 3, 4};
// Create send streams.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
}
std::vector<cricket::AudioCodec> codecs;
// Set ISAC(16K) and CN(16K). VAD should be activated.
codecs.push_back(kIsacCodec);
codecs.push_back(kCn16000Codec);
codecs[1].id = 97;
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
// Verify ISAC and VAD are corrected configured on all send channels.
webrtc::CodecInst gcodec;
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
EXPECT_STREQ("ISAC", gcodec.plname);
EXPECT_TRUE(voe_.GetVAD(channel_num));
EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
}
// Change to PCMU(8K) and CN(16K). VAD should not be activated.
codecs[0] = kPcmuCodec;
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
EXPECT_STREQ("PCMU", gcodec.plname);
EXPECT_FALSE(voe_.GetVAD(channel_num));
}
}
// Test we can SetSend on all send streams correctly.
TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
SetupForMultiSendStream();
static const uint32 kSsrcs4[] = {1, 2, 3, 4};
// Create the send channels and they should be a SEND_NOTHING date.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
int channel_num = voe_.GetLastChannel();
EXPECT_FALSE(voe_.GetSend(channel_num));
}
// Set the global state for starting sending.
EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
// Verify that we are in a sending state for all the send streams.
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
EXPECT_TRUE(voe_.GetSend(channel_num));
}
// Set the global state for stopping sending.
EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
for (unsigned int i = 1; i < ARRAY_SIZE(kSsrcs4); ++i) {
// Verify that we are in a stop state for all the send streams.
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
EXPECT_FALSE(voe_.GetSend(channel_num));
}
}
// Test we can set the correct statistics on all send streams.
TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
SetupForMultiSendStream();
static const uint32 kSsrcs4[] = {1, 2, 3, 4};
// Create send streams.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
}
// We need send codec to be set to get all stats.
std::vector<cricket::AudioCodec> codecs;
codecs.push_back(kPcmuCodec);
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
cricket::VoiceMediaInfo info;
EXPECT_EQ(true, channel_->GetStats(&info));
EXPECT_EQ(static_cast<size_t>(ARRAY_SIZE(kSsrcs4)), info.senders.size());
// Verify the statistic information is correct.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_EQ(kSsrcs4[i], info.senders[i].ssrc);
EXPECT_EQ(kPcmuCodec.name, info.senders[i].codec_name);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].bytes_sent);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].packets_sent);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].packets_lost);
EXPECT_EQ(cricket::kFractionLostStatValue, info.senders[i].fraction_lost);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].ext_seqnum);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].rtt_ms);
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].jitter_ms);
}
EXPECT_EQ(1u, info.receivers.size());
}
// Test that we support setting certain send header extensions on multiple
// send streams.
TEST_F(WebRtcVoiceEngineTestFake,
SetSendRtpHeaderExtensionsWithMultpleSendStreams) {
SetupForMultiSendStream();
static const uint32 kSsrcs4[] = {1, 2, 3, 4};
// Create send streams.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
}
// Test SendRtpHeaderExtensions on each send channel.
for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
TestSetSendRtpHeaderExtensions(channel_num);
}
}
// Test that we can add and remove receive streams, and do proper send/playout.
// We can receive on multiple streams while sending one stream.
TEST_F(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
EXPECT_TRUE(SetupEngine());
int channel_num1 = voe_.GetLastChannel();

View File

@ -79,9 +79,16 @@ SessionClient* SessionManager::GetClient(const std::string& content_type) {
Session* SessionManager::CreateSession(const std::string& local_name,
const std::string& content_type) {
return CreateSession(local_name, local_name,
talk_base::ToString(talk_base::CreateRandomId64()),
content_type, false);
std::string id;
return CreateSession(id, local_name, content_type);
}
Session* SessionManager::CreateSession(const std::string& id,
const std::string& local_name,
const std::string& content_type) {
std::string sid =
id.empty() ? talk_base::ToString(talk_base::CreateRandomId64()) : id;
return CreateSession(local_name, local_name, sid, content_type, false);
}
Session* SessionManager::CreateSession(

View File

@ -92,6 +92,10 @@ class SessionManager : public sigslot::has_slots<> {
Session *CreateSession(const std::string& local_name,
const std::string& content_type);
Session *CreateSession(const std::string& id,
const std::string& local_name,
const std::string& content_type);
// Destroys the given session.
void DestroySession(Session *session);

View File

@ -59,6 +59,19 @@ V FindOrNull(const std::map<K, V>& map,
return (it != map.end()) ? it->second : NULL;
}
bool ContentContainsCrypto(const cricket::ContentInfo* content) {
if (content != NULL) {
const cricket::MediaContentDescription* desc =
static_cast<const cricket::MediaContentDescription*>(
content->description);
if (!desc || desc->cryptos().empty()) {
return false;
}
}
return true;
}
}
Call::Call(MediaSessionClient* session_client)
@ -85,22 +98,16 @@ Call::~Call() {
Session* Call::InitiateSession(const buzz::Jid& to,
const buzz::Jid& initiator,
const CallOptions& options) {
const SessionDescription* offer = session_client_->CreateOffer(options);
std::string id;
std::string initiator_name = initiator.Str();
return InternalInitiateSession(id, to, initiator_name, options);
}
Session* session = session_client_->CreateSession(this);
session->set_initiator_name(initiator.Str());
AddSession(session, offer);
session->Initiate(to.Str(), offer);
// After this timeout, terminate the call because the callee isn't
// answering
session_client_->session_manager()->signaling_thread()->Clear(this,
MSG_TERMINATECALL);
session_client_->session_manager()->signaling_thread()->PostDelayed(
send_to_voicemail_ ? kSendToVoicemailTimeout : kNoVoicemailTimeout,
this, MSG_TERMINATECALL);
return session;
Session *Call::InitiateSession(const std::string& id,
const buzz::Jid& to,
const CallOptions& options) {
std::string initiator_name;
return InternalInitiateSession(id, to, initiator_name, options);
}
void Call::IncomingSession(Session* session, const SessionDescription* offer) {
@ -1025,4 +1032,66 @@ void Call::OnReceivedTerminateReason(Session* session,
SignalReceivedTerminateReason(this, session, reason);
}
// TODO(mdodd): Get ride of this method since all Hangouts are using a secure
// connection.
bool Call::secure() const {
if (session_client_->secure() == SEC_DISABLED) {
return false;
}
bool ret = true;
int i = 0;
MediaSessionMap::const_iterator it;
for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
LOG_F(LS_VERBOSE) << "session[" << i
<< "], check local and remote descriptions";
i++;
if (!SessionDescriptionContainsCrypto(
it->second.session->local_description()) ||
!SessionDescriptionContainsCrypto(
it->second.session->remote_description())) {
ret = false;
break;
}
}
LOG_F(LS_VERBOSE) << "secure=" << ret;
return ret;
}
bool Call::SessionDescriptionContainsCrypto(
const SessionDescription* sdesc) const {
if (sdesc == NULL) {
LOG_F(LS_VERBOSE) << "sessionDescription is NULL";
return false;
}
return ContentContainsCrypto(sdesc->GetContentByName(CN_AUDIO)) &&
ContentContainsCrypto(sdesc->GetContentByName(CN_VIDEO));
}
Session* Call::InternalInitiateSession(const std::string& id,
const buzz::Jid& to,
const std::string& initiator_name,
const CallOptions& options) {
const SessionDescription* offer = session_client_->CreateOffer(options);
Session* session = session_client_->CreateSession(id, this);
session->set_initiator_name(initiator_name);
AddSession(session, offer);
session->Initiate(to.Str(), offer);
// After this timeout, terminate the call because the callee isn't
// answering
session_client_->session_manager()->signaling_thread()->Clear(this,
MSG_TERMINATECALL);
session_client_->session_manager()->signaling_thread()->PostDelayed(
send_to_voicemail_ ? kSendToVoicemailTimeout : kNoVoicemailTimeout,
this, MSG_TERMINATECALL);
return session;
}
} // namespace cricket

View File

@ -66,6 +66,8 @@ class Call : public talk_base::MessageHandler, public sigslot::has_slots<> {
// |initiator| can be empty.
Session* InitiateSession(const buzz::Jid& to, const buzz::Jid& initiator,
const CallOptions& options);
Session* InitiateSession(const std::string& id, const buzz::Jid& to,
const CallOptions& options);
void AcceptSession(Session* session, const CallOptions& options);
void RejectSession(Session* session);
void TerminateSession(Session* session);
@ -100,6 +102,8 @@ class Call : public talk_base::MessageHandler, public sigslot::has_slots<> {
bool has_video() const { return has_video_; }
bool has_data() const { return has_data_; }
bool muted() const { return muted_; }
bool video() const { return has_video_; }
bool secure() const;
bool video_muted() const { return video_muted_; }
const std::vector<StreamParams>* GetDataRecvStreams(Session* session) const {
MediaStreams* recv_streams = GetMediaStreams(session);
@ -222,6 +226,11 @@ class Call : public talk_base::MessageHandler, public sigslot::has_slots<> {
void ContinuePlayDTMF();
bool StopScreencastWithoutSendingUpdate(Session* session, uint32 ssrc);
bool StopAllScreencastsWithoutSendingUpdate(Session* session);
bool SessionDescriptionContainsCrypto(const SessionDescription* sdesc) const;
Session* InternalInitiateSession(const std::string& id,
const buzz::Jid& to,
const std::string& initiator_name,
const CallOptions& options);
uint32 id_;
MediaSessionClient* session_client_;

View File

@ -48,6 +48,7 @@
#include "talk/media/sctp/sctpdataengine.h"
#endif
#include "talk/session/media/soundclip.h"
#include "talk/session/media/srtpfilter.h"
namespace cricket {
@ -138,8 +139,15 @@ void ChannelManager::Construct(MediaEngineInterface* me,
}
ChannelManager::~ChannelManager() {
if (initialized_)
if (initialized_) {
Terminate();
// If srtp is initialized (done by the Channel) then we must call
// srtp_shutdown to free all crypto kernel lists. But we need to make sure
// shutdown always called at the end, after channels are destroyed.
// ChannelManager d'tor is always called last, it's safe place to call
// shutdown.
ShutdownSrtp();
}
}
bool ChannelManager::SetVideoRtxEnabled(bool enable) {

View File

@ -220,8 +220,13 @@ void MediaSessionClient::JoinCalls(Call *call_to_join, Call *call) {
}
Session *MediaSessionClient::CreateSession(Call *call) {
std::string id;
return CreateSession(id, call);
}
Session *MediaSessionClient::CreateSession(const std::string& id, Call* call) {
const std::string& type = NS_JINGLE_RTP;
Session *session = session_manager_->CreateSession(jid().Str(), type);
Session *session = session_manager_->CreateSession(id, jid().Str(), type);
session_map_[session->id()] = call;
return session;
}

View File

@ -152,6 +152,7 @@ class MediaSessionClient : public SessionClient, public sigslot::has_slots<> {
void OnSessionState(BaseSession *session, BaseSession::State state);
void OnSessionDestroy(Session *session);
Session *CreateSession(Call *call);
Session *CreateSession(const std::string& id, Call* call);
Call *FindCallByRemoteName(const std::string &remote_name);
buzz::Jid jid_;

View File

@ -97,6 +97,15 @@ void EnableSrtpDebugging() {
#endif // HAVE_SRTP
}
// NOTE: This is called from ChannelManager D'tor.
void ShutdownSrtp() {
#ifdef HAVE_SRTP
// If srtp_dealloc is not executed then this will clear all existing sessions.
// This should be called when application is shutting down.
SrtpSession::Terminate();
#endif
}
SrtpFilter::SrtpFilter()
: state_(ST_INIT),
signal_silent_time_in_ms_(0) {
@ -621,6 +630,17 @@ bool SrtpSession::Init() {
return true;
}
void SrtpSession::Terminate() {
if (inited_) {
int err = srtp_shutdown();
if (err) {
LOG(LS_ERROR) << "srtp_shutdown failed. err=" << err;
return;
}
inited_ = false;
}
}
void SrtpSession::HandleEvent(const srtp_event_data_t* ev) {
switch (ev->event) {
case event_ssrc_collision:

View File

@ -65,6 +65,7 @@ class SrtpSession;
class SrtpStat;
void EnableSrtpDebugging();
void ShutdownSrtp();
// Class to transform SRTP to/from RTP.
// Initialize by calling SetSend with the local security params, then call
@ -208,6 +209,9 @@ class SrtpSession {
// Update the silent threshold (in ms) for signaling errors.
void set_signal_silent_time(uint32 signal_silent_time_in_ms);
// Calls srtp_shutdown if it's initialized.
static void Terminate();
sigslot::repeater3<uint32, SrtpFilter::Mode, SrtpFilter::Error>
SignalSrtpError;

View File

@ -119,6 +119,27 @@ const char STR_MUC_ROOM_FEATURE_HANGOUT_LITE[] = "muc_lite";
const char STR_MUC_ROOM_FEATURE_BROADCAST[] = "broadcast";
const char STR_MUC_ROOM_FEATURE_MULTI_USER_VC[] = "muc_muvc";
const char STR_ID_TYPE_CONVERSATION[] = "conversation";
const char NS_GOOGLE_MUC_HANGOUT[] = "google:muc#hangout";
const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITE =
{ NS_GOOGLE_MUC_HANGOUT, "invite" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITE_TYPE =
{ NS_GOOGLE_MUC_HANGOUT, "invite-type" };
const StaticQName QN_ATTR_CREATE_ACTIVITY =
{ STR_EMPTY, "create-activity" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_PUBLIC =
{ NS_GOOGLE_MUC_HANGOUT, "public" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITEE =
{ NS_GOOGLE_MUC_HANGOUT, "invitee" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_NOTIFICATION_STATUS =
{ NS_GOOGLE_MUC_HANGOUT, "notification-status" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_NOTIFICATION_TYPE = {
NS_GOOGLE_MUC_HANGOUT, "notification-type" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_HANGOUT_START_CONTEXT = {
NS_GOOGLE_MUC_HANGOUT, "hangout-start-context" };
const StaticQName QN_GOOGLE_MUC_HANGOUT_CONVERSATION_ID = {
NS_GOOGLE_MUC_HANGOUT, "conversation-id" };
const StaticQName QN_STREAM_STREAM = { NS_STREAM, STR_STREAM };
const StaticQName QN_STREAM_FEATURES = { NS_STREAM, "features" };
const StaticQName QN_STREAM_ERROR = { NS_STREAM, "error" };

View File

@ -112,6 +112,17 @@ extern const char STR_MUC_ROOM_FEATURE_HANGOUT_LITE[];
extern const char STR_MUC_ROOM_FEATURE_BROADCAST[];
extern const char STR_MUC_ROOM_FEATURE_MULTI_USER_VC[];
extern const char STR_ID_TYPE_CONVERSATION[];
extern const char NS_GOOGLE_MUC_HANGOUT[];
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITE;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITE_TYPE;
extern const StaticQName QN_ATTR_CREATE_ACTIVITY;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_PUBLIC;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_INVITEE;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_NOTIFICATION_TYPE;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_HANGOUT_START_CONTEXT;
extern const StaticQName QN_GOOGLE_MUC_HANGOUT_CONVERSATION_ID;
extern const StaticQName QN_STREAM_STREAM;
extern const StaticQName QN_STREAM_FEATURES;
extern const StaticQName QN_STREAM_ERROR;

View File

@ -49,12 +49,20 @@ void MucRoomDiscoveryTask::HandleResult(const XmlElement* stanza) {
std::map<std::string, std::string> extended_info;
const XmlElement* identity = query->FirstNamed(QN_DISCO_IDENTITY);
if (identity == NULL || !identity->HasAttr(QN_NAME)) {
SignalResult(this, false, "", features, extended_info);
SignalResult(this, false, "", "", features, extended_info);
return;
}
const std::string name(identity->Attr(QN_NAME));
// Get the conversation id
const XmlElement* convIdElement =
identity->FirstNamed(QN_GOOGLE_MUC_HANGOUT_CONVERSATION_ID);
std::string conversation_id;
if (convIdElement != NULL) {
conversation_id = convIdElement->BodyText();
}
for (const XmlElement* feature = query->FirstNamed(QN_DISCO_FEATURE);
feature != NULL; feature = feature->NextNamed(QN_DISCO_FEATURE)) {
features.insert(feature->Attr(QN_VAR));
@ -69,7 +77,7 @@ void MucRoomDiscoveryTask::HandleResult(const XmlElement* stanza) {
}
}
SignalResult(this, true, name, features, extended_info);
SignalResult(this, true, name, conversation_id, features, extended_info);
}
} // namespace buzz

View File

@ -41,10 +41,11 @@ class MucRoomDiscoveryTask : public IqTask {
MucRoomDiscoveryTask(XmppTaskParentInterface* parent,
const Jid& room_jid);
// Signal (exists, name, features, extended_info)
sigslot::signal5<MucRoomDiscoveryTask*,
// Signal (exists, name, conversationId, features, extended_info)
sigslot::signal6<MucRoomDiscoveryTask*,
bool,
const std::string&,
const std::string&,
const std::set<std::string>&,
const std::map<std::string, std::string>& > SignalResult;

View File

@ -43,10 +43,12 @@ class MucRoomDiscoveryListener : public sigslot::has_slots<> {
void OnResult(buzz::MucRoomDiscoveryTask* task,
bool exists,
const std::string& name,
const std::string& conversation_id,
const std::set<std::string>& features,
const std::map<std::string, std::string>& extended_info) {
last_exists = exists;
last_name = name;
last_conversation_id = conversation_id;
last_features = features;
last_extended_info = extended_info;
}
@ -58,6 +60,7 @@ class MucRoomDiscoveryListener : public sigslot::has_slots<> {
bool last_exists;
std::string last_name;
std::string last_conversation_id;
std::set<std::string> last_features;
std::map<std::string, std::string> last_extended_info;
int error_count;
@ -67,7 +70,8 @@ class MucRoomDiscoveryTaskTest : public testing::Test {
public:
MucRoomDiscoveryTaskTest() :
room_jid("muc-jid-ponies@domain.com"),
room_name("ponies") {
room_name("ponies"),
conversation_id("test_conversation_id") {
}
virtual void SetUp() {
@ -87,6 +91,7 @@ class MucRoomDiscoveryTaskTest : public testing::Test {
MucRoomDiscoveryListener* listener;
buzz::Jid room_jid;
std::string room_name;
std::string conversation_id;
};
TEST_F(MucRoomDiscoveryTaskTest, TestDiscovery) {
@ -107,12 +112,16 @@ TEST_F(MucRoomDiscoveryTaskTest, TestDiscovery) {
EXPECT_EQ(expected_iq, xmpp_client->sent_stanzas()[0]->Str());
EXPECT_EQ("", listener->last_name);
EXPECT_EQ("", listener->last_conversation_id);
std::string response_iq =
"<iq xmlns='jabber:client'"
" from='muc-jid-ponies@domain.com' id='0' type='result'>"
" <info:query xmlns:info='http://jabber.org/protocol/disco#info'>"
" <info:identity name='ponies'/>"
" <info:identity name='ponies'>"
" <han:conversation-id xmlns:han='google:muc#hangout'>"
"test_conversation_id</han:conversation-id>"
" </info:identity>"
" <info:feature var='feature1'/>"
" <info:feature var='feature2'/>"
" <data:x xmlns:data='jabber:x:data'>"
@ -126,6 +135,7 @@ TEST_F(MucRoomDiscoveryTaskTest, TestDiscovery) {
EXPECT_EQ(true, listener->last_exists);
EXPECT_EQ(room_name, listener->last_name);
EXPECT_EQ(conversation_id, listener->last_conversation_id);
EXPECT_EQ(2U, listener->last_features.size());
EXPECT_EQ(1U, listener->last_features.count("feature1"));
EXPECT_EQ(2U, listener->last_extended_info.size());

View File

@ -46,6 +46,11 @@ struct MucRoomInfo {
class MucRoomLookupTask : public IqTask {
public:
enum IdType {
ID_TYPE_CONVERSATION,
ID_TYPE_HANGOUT
};
static MucRoomLookupTask*
CreateLookupTaskForRoomName(XmppTaskParentInterface* parent,
const Jid& lookup_server_jid,

View File

@ -54,6 +54,7 @@
],
'sources': [
'interface/i420_video_frame.h',
'interface/texture_video_frame.h',
'i420_video_frame.cc',
'jpeg/include/jpeg.h',
'jpeg/data_manager.cc',
@ -65,6 +66,7 @@
'libyuv/scaler.cc',
'plane.h',
'plane.cc',
'texture_video_frame.cc'
],
# Silence jpeg struct padding warnings.
'msvs_disabled_warnings': [ 4324, ],
@ -88,6 +90,7 @@
'libyuv/libyuv_unittest.cc',
'libyuv/scaler_unittest.cc',
'plane_unittest.cc',
'texture_video_frame_unittest.cc'
],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [

View File

@ -142,6 +142,8 @@ void I420VideoFrame::ResetSize() {
v_plane_.ResetSize();
}
void* I420VideoFrame::native_handle() const { return NULL; }
int I420VideoFrame::CheckDimensions(int width, int height,
int stride_y, int stride_u, int stride_v) {
int half_width = (width + 1) / 2;
@ -179,5 +181,4 @@ Plane* I420VideoFrame::GetPlane(PlaneType type) {
return NULL;
}
} // namespace webrtc

View File

@ -16,6 +16,7 @@
// Storing and handling of YUV (I420) video frames.
#include "webrtc/common_video/plane.h"
#include "webrtc/system_wrappers/interface/scoped_refptr.h"
#include "webrtc/typedefs.h"
/*
@ -49,13 +50,13 @@ class I420VideoFrame {
// If required size is bigger than the allocated one, new buffers of adequate
// size will be allocated.
// Return value: 0 on success ,-1 on error.
int CreateEmptyFrame(int width, int height,
virtual int CreateEmptyFrame(int width, int height,
int stride_y, int stride_u, int stride_v);
// CreateFrame: Sets the frame's members and buffers. If required size is
// bigger than allocated one, new buffers of adequate size will be allocated.
// Return value: 0 on success ,-1 on error.
int CreateFrame(int size_y, const uint8_t* buffer_y,
virtual int CreateFrame(int size_y, const uint8_t* buffer_y,
int size_u, const uint8_t* buffer_u,
int size_v, const uint8_t* buffer_v,
int width, int height,
@ -64,59 +65,66 @@ class I420VideoFrame {
// Copy frame: If required size is bigger than allocated one, new buffers of
// adequate size will be allocated.
// Return value: 0 on success ,-1 on error.
int CopyFrame(const I420VideoFrame& videoFrame);
virtual int CopyFrame(const I420VideoFrame& videoFrame);
// Swap Frame.
void SwapFrame(I420VideoFrame* videoFrame);
virtual void SwapFrame(I420VideoFrame* videoFrame);
// Get pointer to buffer per plane.
uint8_t* buffer(PlaneType type);
virtual uint8_t* buffer(PlaneType type);
// Overloading with const.
const uint8_t* buffer(PlaneType type) const;
virtual const uint8_t* buffer(PlaneType type) const;
// Get allocated size per plane.
int allocated_size(PlaneType type) const;
virtual int allocated_size(PlaneType type) const;
// Get allocated stride per plane.
int stride(PlaneType type) const;
virtual int stride(PlaneType type) const;
// Set frame width.
int set_width(int width);
virtual int set_width(int width);
// Set frame height.
int set_height(int height);
virtual int set_height(int height);
// Get frame width.
int width() const {return width_;}
virtual int width() const {return width_;}
// Get frame height.
int height() const {return height_;}
virtual int height() const {return height_;}
// Set frame timestamp (90kHz).
void set_timestamp(uint32_t timestamp) {timestamp_ = timestamp;}
virtual void set_timestamp(uint32_t timestamp) {timestamp_ = timestamp;}
// Get frame timestamp (90kHz).
uint32_t timestamp() const {return timestamp_;}
virtual uint32_t timestamp() const {return timestamp_;}
// Set render time in miliseconds.
void set_render_time_ms(int64_t render_time_ms) {render_time_ms_ =
virtual void set_render_time_ms(int64_t render_time_ms) {render_time_ms_ =
render_time_ms;}
// Get render time in miliseconds.
int64_t render_time_ms() const {return render_time_ms_;}
virtual int64_t render_time_ms() const {return render_time_ms_;}
// Return true if underlying plane buffers are of zero size, false if not.
bool IsZeroSize() const;
virtual bool IsZeroSize() const;
// Reset underlying plane buffers sizes to 0. This function doesn't
// clear memory.
void ResetSize();
virtual void ResetSize();
// Return the handle of the underlying video frame. This is used when the
// frame is backed by a texture. The object should be destroyed when it is no
// longer in use, so the underlying resource can be freed.
virtual void* native_handle() const;
protected:
// Verifies legality of parameters.
// Return value: 0 on success, -1 on error.
virtual int CheckDimensions(int width, int height,
int stride_y, int stride_u, int stride_v);
private:
// Verifies legality of parameters.
// Return value: 0 on success ,-1 on error.
int CheckDimensions(int width, int height,
int stride_y, int stride_u, int stride_v);
// Get the pointer to a specific plane.
const Plane* GetPlane(PlaneType type) const;
// Overloading with non-const.

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef COMMON_VIDEO_INTERFACE_NATIVEHANDLE_H_
#define COMMON_VIDEO_INTERFACE_NATIVEHANDLE_H_
#include "webrtc/typedefs.h"
namespace webrtc {
// A class to store an opaque handle of the underlying video frame. This is used
// when the frame is backed by a texture. WebRTC carries the handle in
// TextureVideoFrame. This object keeps a reference to the handle. The reference
// is cleared when the object is destroyed. It is important to destroy the
// object as soon as possible so the texture can be recycled.
class NativeHandle {
public:
virtual ~NativeHandle() {}
// For scoped_refptr
virtual int32_t AddRef() = 0;
virtual int32_t Release() = 0;
// Gets the handle.
virtual void* GetHandle() = 0;
};
} // namespace webrtc
#endif // COMMON_VIDEO_INTERFACE_NATIVEHANDLE_H_

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef COMMON_VIDEO_INTERFACE_TEXTURE_VIDEO_FRAME_H
#define COMMON_VIDEO_INTERFACE_TEXTURE_VIDEO_FRAME_H
// TextureVideoFrame class
//
// Storing and handling of video frames backed by textures.
#include "webrtc/common_video/interface/i420_video_frame.h"
#include "webrtc/common_video/interface/native_handle.h"
#include "webrtc/system_wrappers/interface/scoped_refptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class TextureVideoFrame : public I420VideoFrame {
public:
TextureVideoFrame(NativeHandle* handle,
int width,
int height,
uint32_t timestamp,
int64_t render_time_ms);
virtual ~TextureVideoFrame();
// I420VideoFrame implementation
virtual int CreateEmptyFrame(int width,
int height,
int stride_y,
int stride_u,
int stride_v) OVERRIDE;
virtual int CreateFrame(int size_y,
const uint8_t* buffer_y,
int size_u,
const uint8_t* buffer_u,
int size_v,
const uint8_t* buffer_v,
int width,
int height,
int stride_y,
int stride_u,
int stride_v) OVERRIDE;
virtual int CopyFrame(const I420VideoFrame& videoFrame) OVERRIDE;
virtual void SwapFrame(I420VideoFrame* videoFrame) OVERRIDE;
virtual uint8_t* buffer(PlaneType type) OVERRIDE;
virtual const uint8_t* buffer(PlaneType type) const OVERRIDE;
virtual int allocated_size(PlaneType type) const OVERRIDE;
virtual int stride(PlaneType type) const OVERRIDE;
virtual bool IsZeroSize() const OVERRIDE;
virtual void ResetSize() OVERRIDE;
virtual void* native_handle() const OVERRIDE;
protected:
virtual int CheckDimensions(
int width, int height, int stride_y, int stride_u, int stride_v) OVERRIDE;
private:
// An opaque handle that stores the underlying video frame.
scoped_refptr<NativeHandle> handle_;
};
} // namespace webrtc
#endif // COMMON_VIDEO_INTERFACE_TEXTURE_VIDEO_FRAME_H

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_video/interface/texture_video_frame.h"
#include <assert.h>
#include "webrtc/system_wrappers/interface/trace.h"
#define NOTREACHED() \
do { \
WEBRTC_TRACE(kTraceError, kTraceVideoRenderer, -1, "Not reached"); \
assert(false); \
} while (0)
namespace webrtc {
TextureVideoFrame::TextureVideoFrame(NativeHandle* handle,
int width,
int height,
uint32_t timestamp,
int64_t render_time_ms)
: handle_(handle) {
set_width(width);
set_height(height);
set_timestamp(timestamp);
set_render_time_ms(render_time_ms);
}
TextureVideoFrame::~TextureVideoFrame() {}
int TextureVideoFrame::CreateEmptyFrame(int width,
int height,
int stride_y,
int stride_u,
int stride_v) {
NOTREACHED();
return -1;
}
int TextureVideoFrame::CreateFrame(int size_y,
const uint8_t* buffer_y,
int size_u,
const uint8_t* buffer_u,
int size_v,
const uint8_t* buffer_v,
int width,
int height,
int stride_y,
int stride_u,
int stride_v) {
NOTREACHED();
return -1;
}
int TextureVideoFrame::CopyFrame(const I420VideoFrame& videoFrame) {
NOTREACHED();
return -1;
}
void TextureVideoFrame::SwapFrame(I420VideoFrame* videoFrame) {
NOTREACHED();
}
uint8_t* TextureVideoFrame::buffer(PlaneType type) {
NOTREACHED();
return NULL;
}
const uint8_t* TextureVideoFrame::buffer(PlaneType type) const {
NOTREACHED();
return NULL;
}
int TextureVideoFrame::allocated_size(PlaneType type) const {
NOTREACHED();
return -1;
}
int TextureVideoFrame::stride(PlaneType type) const {
NOTREACHED();
return -1;
}
bool TextureVideoFrame::IsZeroSize() const {
NOTREACHED();
return true;
}
void TextureVideoFrame::ResetSize() {
NOTREACHED();
}
void* TextureVideoFrame::native_handle() const { return handle_.get(); }
int TextureVideoFrame::CheckDimensions(
int width, int height, int stride_y, int stride_u, int stride_v) {
return 0;
}
} // namespace webrtc

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_video/interface/native_handle.h"
#include "webrtc/common_video/interface/texture_video_frame.h"
namespace webrtc {
class NativeHandleImpl : public NativeHandle {
public:
NativeHandleImpl() : ref_count_(0) {}
virtual ~NativeHandleImpl() {}
virtual int32_t AddRef() { return ++ref_count_; }
virtual int32_t Release() { return --ref_count_; }
virtual void* GetHandle() { return NULL; }
int32_t ref_count() { return ref_count_; }
private:
int32_t ref_count_;
};
TEST(TestTextureVideoFrame, InitialValues) {
NativeHandleImpl handle;
TextureVideoFrame frame(&handle, 640, 480, 100, 10);
EXPECT_EQ(640, frame.width());
EXPECT_EQ(480, frame.height());
EXPECT_EQ(100u, frame.timestamp());
EXPECT_EQ(10, frame.render_time_ms());
EXPECT_EQ(&handle, frame.native_handle());
EXPECT_EQ(0, frame.set_width(320));
EXPECT_EQ(320, frame.width());
EXPECT_EQ(0, frame.set_height(240));
EXPECT_EQ(240, frame.height());
frame.set_timestamp(200);
EXPECT_EQ(200u, frame.timestamp());
frame.set_render_time_ms(20);
EXPECT_EQ(20, frame.render_time_ms());
}
TEST(TestTextureVideoFrame, RefCount) {
NativeHandleImpl handle;
EXPECT_EQ(0, handle.ref_count());
TextureVideoFrame *frame = new TextureVideoFrame(&handle, 640, 480, 100, 200);
EXPECT_EQ(1, handle.ref_count());
delete frame;
EXPECT_EQ(0, handle.ref_count());
}
} // namespace webrtc

View File

@ -14,6 +14,7 @@
#include <assert.h>
#include "webrtc/common_video/interface/texture_video_frame.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -48,6 +49,16 @@ VideoFramesQueue::~VideoFramesQueue() {
}
int32_t VideoFramesQueue::AddFrame(const I420VideoFrame& newFrame) {
if (newFrame.native_handle() != NULL) {
_incomingFrames.PushBack(new TextureVideoFrame(
static_cast<NativeHandle*>(newFrame.native_handle()),
newFrame.width(),
newFrame.height(),
newFrame.timestamp(),
newFrame.render_time_ms()));
return 0;
}
I420VideoFrame* ptrFrameToAdd = NULL;
// Try to re-use a VideoFrame. Only allocate new memory if it is necessary.
if (!_emptyFrames.Empty()) {
@ -113,12 +124,17 @@ I420VideoFrame* VideoFramesQueue::FrameToRecord() {
}
int32_t VideoFramesQueue::ReturnFrame(I420VideoFrame* ptrOldFrame) {
// No need to reuse texture frames because they do not allocate memory.
if (ptrOldFrame->native_handle() == NULL) {
ptrOldFrame->set_timestamp(0);
ptrOldFrame->set_width(0);
ptrOldFrame->set_height(0);
ptrOldFrame->set_render_time_ms(0);
ptrOldFrame->ResetSize();
_emptyFrames.PushBack(ptrOldFrame);
} else {
delete ptrOldFrame;
}
return 0;
}

View File

@ -101,7 +101,8 @@ int32_t IncomingVideoStream::RenderFrame(const uint32_t stream_id,
return -1;
}
if (true == mirror_frames_enabled_) {
// Mirroring is not supported if the frame is backed by a texture.
if (true == mirror_frames_enabled_ && video_frame.native_handle() == NULL) {
transformed_video_frame_.CreateEmptyFrame(video_frame.width(),
video_frame.height(),
video_frame.stride(kYPlane),

View File

@ -12,6 +12,7 @@
#include <assert.h>
#include "webrtc/common_video/interface/texture_video_frame.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -47,6 +48,16 @@ int32_t VideoRenderFrames::AddFrame(I420VideoFrame* new_frame) {
return -1;
}
if (new_frame->native_handle() != NULL) {
incoming_frames_.PushBack(new TextureVideoFrame(
static_cast<NativeHandle*>(new_frame->native_handle()),
new_frame->width(),
new_frame->height(),
new_frame->timestamp(),
new_frame->render_time_ms()));
return incoming_frames_.GetSize();
}
// Get an empty frame
I420VideoFrame* frame_to_add = NULL;
if (!empty_frames_.Empty()) {
@ -103,10 +114,7 @@ I420VideoFrame* VideoRenderFrames::FrameToRender() {
// This is the oldest one so far and it's OK to render.
if (render_frame) {
// This one is older than the newly found frame, remove this one.
render_frame->ResetSize();
render_frame->set_timestamp(0);
render_frame->set_render_time_ms(0);
empty_frames_.PushFront(render_frame);
ReturnFrame(render_frame);
}
render_frame = oldest_frame_in_list;
incoming_frames_.Erase(item);
@ -122,10 +130,15 @@ I420VideoFrame* VideoRenderFrames::FrameToRender() {
}
int32_t VideoRenderFrames::ReturnFrame(I420VideoFrame* old_frame) {
// No need to reuse texture frames because they do not allocate memory.
if (old_frame->native_handle() == NULL) {
old_frame->ResetSize();
old_frame->set_timestamp(0);
old_frame->set_render_time_ms(0);
empty_frames_.PushBack(old_frame);
} else {
delete old_frame;
}
return 0;
}

View File

@ -39,7 +39,13 @@ class WEBRTC_DLLEXPORT ExternalRenderer {
// RTP timestamp in 90kHz.
uint32_t time_stamp,
// Wallclock render time in miliseconds
int64_t render_time) = 0;
int64_t render_time,
// Handle of the underlying video frame,
void* handle) = 0;
// Returns true if the renderer supports textures. DeliverFrame can be called
// with NULL |buffer| and non-NULL |handle|.
virtual bool IsTextureSupported() = 0;
protected:
virtual ~ExternalRenderer() {}

View File

@ -117,7 +117,8 @@ int VideoReceiveStream::FrameSizeChange(unsigned int width, unsigned int height,
}
int VideoReceiveStream::DeliverFrame(uint8_t* frame, int buffer_size,
uint32_t timestamp, int64_t render_time) {
uint32_t timestamp, int64_t render_time,
void* /*handle*/) {
if (config_.renderer == NULL) {
return 0;
}
@ -142,6 +143,8 @@ int VideoReceiveStream::DeliverFrame(uint8_t* frame, int buffer_size,
return 0;
}
bool VideoReceiveStream::IsTextureSupported() { return false; }
int VideoReceiveStream::SendPacket(int /*channel*/,
const void* packet,
int length) {

View File

@ -46,7 +46,9 @@ class VideoReceiveStream : public newapi::VideoReceiveStream,
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) OVERRIDE;
virtual int DeliverFrame(uint8_t* frame, int buffer_size, uint32_t timestamp,
int64_t render_time) OVERRIDE;
int64_t render_time, void* /*handle*/) OVERRIDE;
virtual bool IsTextureSupported() OVERRIDE;
virtual int SendPacket(int /*channel*/, const void* packet, int length)
OVERRIDE;

View File

@ -588,7 +588,7 @@ int FrameDropDetector::GetNumberOfFramesDroppedAt(State state) {
int FrameDropMonitoringRemoteFileRenderer::DeliverFrame(
unsigned char *buffer, int buffer_size, uint32_t time_stamp,
int64_t render_time) {
int64_t render_time, void* /*handle*/) {
// |render_time| provides the ideal render time for this frame. If that time
// has already passed we will render it immediately.
int64_t report_render_time_us = render_time * 1000;
@ -600,7 +600,7 @@ int FrameDropMonitoringRemoteFileRenderer::DeliverFrame(
frame_drop_detector_->ReportFrameState(FrameDropDetector::kRendered,
time_stamp, report_render_time_us);
return ViEToFileRenderer::DeliverFrame(buffer, buffer_size,
time_stamp, render_time);
time_stamp, render_time, NULL);
}
int FrameDropMonitoringRemoteFileRenderer::FrameSizeChange(

View File

@ -223,10 +223,11 @@ class FrameDropMonitoringRemoteFileRenderer : public ViEToFileRenderer {
// Implementation of ExternalRenderer:
int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int number_of_streams);
unsigned int number_of_streams) OVERRIDE;
int DeliverFrame(unsigned char* buffer, int buffer_size,
uint32_t time_stamp,
int64_t render_time);
int64_t render_time,
void* handle) OVERRIDE;
private:
FrameDropDetector* frame_drop_detector_;
};

View File

@ -58,7 +58,8 @@ public:
virtual int DeliverFrame(unsigned char* buffer, int bufferSize,
uint32_t time_stamp,
int64_t render_time) {
int64_t render_time,
void* /*handle*/) {
if (bufferSize != CalcBufferSize(webrtc::kI420, _width, _height)) {
ViETest::Log("Incorrect render buffer received, of length = %d\n",
bufferSize);
@ -67,6 +68,8 @@ public:
return 0;
}
virtual bool IsTextureSupported() { return false; }
public:
virtual ~ViEAutoTestExternalRenderer()
{

View File

@ -123,7 +123,8 @@ void ViEToFileRenderer::ForgetOutputFile() {
int ViEToFileRenderer::DeliverFrame(unsigned char *buffer,
int buffer_size,
uint32_t time_stamp,
int64_t render_time) {
int64_t render_time,
void* /*handle*/) {
webrtc::CriticalSectionScoped lock(frame_queue_cs_.get());
test::Frame* frame;
if (free_frame_queue_.empty()) {
@ -146,6 +147,8 @@ int ViEToFileRenderer::DeliverFrame(unsigned char *buffer,
return 0;
}
bool ViEToFileRenderer::IsTextureSupported() { return false; }
int ViEToFileRenderer::FrameSizeChange(unsigned int width,
unsigned int height,
unsigned int number_of_streams) {

View File

@ -35,7 +35,8 @@ class ExternalRendererEffectFilter : public webrtc::ViEEffectFilter {
return renderer_->DeliverFrame(frame_buffer,
size,
time_stamp90KHz,
webrtc::TickTime::MillisecondTimestamp());
webrtc::TickTime::MillisecondTimestamp(),
NULL);
}
private:

View File

@ -55,12 +55,15 @@ class ViEToFileRenderer: public webrtc::ExternalRenderer {
// Implementation of ExternalRenderer:
int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int number_of_streams);
unsigned int number_of_streams) OVERRIDE;
int DeliverFrame(unsigned char* buffer,
int buffer_size,
uint32_t time_stamp,
int64_t render_time);
int64_t render_time,
void* handle) OVERRIDE;
bool IsTextureSupported() OVERRIDE;
const std::string GetFullOutputPath() const;

View File

@ -1684,6 +1684,8 @@ int32_t ViEChannel::FrameToRender(
}
decoder_reset_ = false;
}
// Post processing is not supported if the frame is backed by a texture.
if (video_frame.native_handle() == NULL) {
if (effect_filter_) {
unsigned int length = CalcBufferSize(kI420,
video_frame.width(),
@ -1697,6 +1699,7 @@ int32_t ViEChannel::FrameToRender(
if (color_enhancement_) {
VideoProcessingModule::ColorEnhancement(&video_frame);
}
}
uint32_t arr_ofCSRC[kRtpCsrcSize];
int32_t no_of_csrcs = rtp_rtcp_->RemoteCSRCs(arr_ofCSRC);

View File

@ -56,7 +56,7 @@ void ViEFrameProviderBase::DeliverFrame(
// Deliver the frame to all registered callbacks.
if (frame_callbacks_.size() > 0) {
if (frame_callbacks_.size() == 1) {
if (frame_callbacks_.size() == 1 || video_frame->native_handle() != NULL) {
// We don't have to copy the frame.
frame_callbacks_.front()->DeliverFrame(id_, video_frame, num_csrcs, CSRC);
} else {

View File

@ -169,6 +169,21 @@ int ViEExternalRendererImpl::SetViEExternalRenderer(
int32_t ViEExternalRendererImpl::RenderFrame(
const uint32_t stream_id,
I420VideoFrame& video_frame) {
if (video_frame.native_handle() != NULL) {
NotifyFrameSizeChange(stream_id, video_frame);
if (external_renderer_->IsTextureSupported()) {
external_renderer_->DeliverFrame(NULL,
0,
video_frame.timestamp(),
video_frame.render_time_ms(),
video_frame.native_handle());
} else {
// TODO(wuchengli): readback the pixels and deliver the frame.
}
return 0;
}
VideoFrame* out_frame = converted_frame_.get();
// Convert to requested format.
@ -218,21 +233,28 @@ int32_t ViEExternalRendererImpl::RenderFrame(
break;
}
if (external_renderer_width_ != video_frame.width() ||
external_renderer_height_ != video_frame.height()) {
external_renderer_width_ = video_frame.width();
external_renderer_height_ = video_frame.height();
external_renderer_->FrameSizeChange(external_renderer_width_,
external_renderer_height_, stream_id);
}
NotifyFrameSizeChange(stream_id, video_frame);
if (out_frame) {
external_renderer_->DeliverFrame(out_frame->Buffer(),
out_frame->Length(),
video_frame.timestamp(),
video_frame.render_time_ms());
video_frame.render_time_ms(),
NULL);
}
return 0;
}
void ViEExternalRendererImpl::NotifyFrameSizeChange(
const uint32_t stream_id,
I420VideoFrame& video_frame) {
if (external_renderer_width_ != video_frame.width() ||
external_renderer_height_ != video_frame.height()) {
external_renderer_width_ = video_frame.width();
external_renderer_height_ = video_frame.height();
external_renderer_->FrameSizeChange(
external_renderer_width_, external_renderer_height_, stream_id);
}
}
} // namespace webrtc

View File

@ -36,6 +36,8 @@ class ViEExternalRendererImpl : public VideoRenderCallback {
I420VideoFrame& video_frame);
private:
void NotifyFrameSizeChange(const uint32_t stream_id,
I420VideoFrame& video_frame);
ExternalRenderer* external_renderer_;
RawVideoType external_renderer_format_;
int external_renderer_width_;