diff --git a/talk/media/base/videocapturer.cc b/talk/media/base/videocapturer.cc index 900a6f1a0..0d2a20f90 100644 --- a/talk/media/base/videocapturer.cc +++ b/talk/media/base/videocapturer.cc @@ -468,7 +468,7 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*, } VIDEO_FRAME_NAME i420_frame; - if (!i420_frame.Init(captured_frame, desired_width, desired_height)) { + if (!i420_frame.Alias(captured_frame, desired_width, desired_height)) { // TODO(fbarchard): LOG more information about captured frame attributes. LOG(LS_ERROR) << "Couldn't convert to I420! " << "From " << ToString(captured_frame) << " To " diff --git a/talk/media/base/videoframe_unittest.h b/talk/media/base/videoframe_unittest.h index 361c195c2..42485f155 100644 --- a/talk/media/base/videoframe_unittest.h +++ b/talk/media/base/videoframe_unittest.h @@ -1154,6 +1154,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \ int size_adjust, bool expected_result) { T frame; talk_base::scoped_ptr ms(LoadSample(name)); + ASSERT_TRUE(ms.get() != NULL); const uint8* sample = reinterpret_cast(ms.get()->GetBuffer()); size_t sample_size; ms->GetSize(&sample_size); @@ -1393,6 +1394,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \ T frame1, frame2; talk_base::scoped_ptr ms( LoadSample(kImageFilename)); + ASSERT_TRUE(ms.get() != NULL); size_t data_size; ms->GetSize(&data_size); EXPECT_TRUE(frame1.InitToBlack(kWidth, kHeight, 1, 1, 0, 0)); @@ -2007,6 +2009,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \ T frame; talk_base::scoped_ptr ms( LoadSample(kImageFilename)); + ASSERT_TRUE(ms.get() != NULL); ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight, &frame)); size_t out_size = kWidth * kHeight * 3 / 2; @@ -2021,6 +2024,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \ T source; talk_base::scoped_ptr ms( LoadSample(kImageFilename)); + ASSERT_TRUE(ms.get() != NULL); ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight, &source)); @@ -2039,6 +2043,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \ T frame; talk_base::scoped_ptr ms( LoadSample(kImageFilename)); + ASSERT_TRUE(ms.get() != NULL); talk_base::MemoryStream ms2; size_t size; ASSERT_TRUE(ms->GetSize(&size)); diff --git a/talk/media/webrtc/webrtcvideocapturer.cc b/talk/media/webrtc/webrtcvideocapturer.cc index bcfda4e84..6e81b4016 100644 --- a/talk/media/webrtc/webrtcvideocapturer.cc +++ b/talk/media/webrtc/webrtcvideocapturer.cc @@ -328,14 +328,10 @@ void WebRtcVideoCapturer::OnIncomingCapturedFrame(const int32_t id, // to one block for it. int length = webrtc::CalcBufferSize(webrtc::kI420, sample.width(), sample.height()); - if (!captured_frame_.get() || - captured_frame_->length() != static_cast(length)) { - captured_frame_.reset(new FrameBuffer(length)); - } - // TODO(ronghuawu): Refactor the WebRtcVideoFrame to avoid memory copy. - webrtc::ExtractBuffer(sample, length, - reinterpret_cast(captured_frame_->data())); - WebRtcCapturedFrame frame(sample, captured_frame_->data(), length); + capture_buffer_.resize(length); + // TODO(ronghuawu): Refactor the WebRtcCapturedFrame to avoid memory copy. + webrtc::ExtractBuffer(sample, length, &capture_buffer_[0]); + WebRtcCapturedFrame frame(sample, &capture_buffer_[0], length); SignalFrameCaptured(this, &frame); } diff --git a/talk/media/webrtc/webrtcvideocapturer.h b/talk/media/webrtc/webrtcvideocapturer.h index eb9995644..c20a05919 100644 --- a/talk/media/webrtc/webrtcvideocapturer.h +++ b/talk/media/webrtc/webrtcvideocapturer.h @@ -88,7 +88,7 @@ class WebRtcVideoCapturer : public VideoCapturer, talk_base::scoped_ptr factory_; webrtc::VideoCaptureModule* module_; int captured_frames_; - talk_base::scoped_ptr captured_frame_; + std::vector capture_buffer_; }; struct WebRtcCapturedFrame : public CapturedFrame { diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc index 6067507df..ec38eeacf 100644 --- a/talk/media/webrtc/webrtcvideoengine.cc +++ b/talk/media/webrtc/webrtcvideoengine.cc @@ -258,8 +258,8 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer { int DeliverBufferFrame(unsigned char* buffer, int buffer_size, int64 elapsed_time, int64 time_stamp) { WebRtcVideoFrame video_frame; - video_frame.Attach(buffer, buffer_size, width_, height_, - 1, 1, elapsed_time, time_stamp, 0); + video_frame.Alias(buffer, buffer_size, width_, height_, + 1, 1, elapsed_time, time_stamp, 0); // Sanity check on decoded frame size. @@ -269,9 +269,6 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer { } int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1; - uint8* buffer_temp; - size_t buffer_size_temp; - video_frame.Detach(&buffer_temp, &buffer_size_temp); return ret; } diff --git a/talk/media/webrtc/webrtcvideoframe.cc b/talk/media/webrtc/webrtcvideoframe.cc index 0938ae6ab..16aa4cdce 100644 --- a/talk/media/webrtc/webrtcvideoframe.cc +++ b/talk/media/webrtc/webrtcvideoframe.cc @@ -42,42 +42,74 @@ static const int kWatermarkOffsetFromLeft = 8; static const int kWatermarkOffsetFromBottom = 8; static const unsigned char kWatermarkMaxYValue = 64; -FrameBuffer::FrameBuffer() {} +// Class that wraps ownerhip semantics of a buffer passed to it. +// * Buffers passed using Attach() become owned by this FrameBuffer and will be +// destroyed on FrameBuffer destruction. +// * Buffers passed using Alias() are not owned and will not be destroyed on +// FrameBuffer destruction, The buffer then must outlive the FrameBuffer. +class WebRtcVideoFrame::FrameBuffer { + public: + FrameBuffer(); + explicit FrameBuffer(size_t length); + ~FrameBuffer(); -FrameBuffer::FrameBuffer(size_t length) { - char* buffer = new char[length]; - SetData(buffer, length); + void Attach(uint8* data, size_t length); + void Alias(uint8* data, size_t length); + uint8* data(); + size_t length() const; + + webrtc::VideoFrame* frame(); + const webrtc::VideoFrame* frame() const; + + private: + talk_base::scoped_ptr owned_data_; + webrtc::VideoFrame video_frame_; +}; + +WebRtcVideoFrame::FrameBuffer::FrameBuffer() {} + +WebRtcVideoFrame::FrameBuffer::FrameBuffer(size_t length) { + uint8* buffer = new uint8[length]; + Attach(buffer, length); } -FrameBuffer::~FrameBuffer() {} +WebRtcVideoFrame::FrameBuffer::~FrameBuffer() { + // Make sure that |video_frame_| doesn't delete the buffer, as |owned_data_| + // will release the buffer if this FrameBuffer owns it. + uint8_t* new_memory = NULL; + uint32_t new_length = 0; + uint32_t new_size = 0; + video_frame_.Swap(new_memory, new_length, new_size); +} -void FrameBuffer::SetData(char* data, size_t length) { +void WebRtcVideoFrame::FrameBuffer::Attach(uint8* data, size_t length) { + Alias(data, length); + owned_data_.reset(data); +} + +void WebRtcVideoFrame::FrameBuffer::Alias(uint8* data, size_t length) { + owned_data_.reset(); uint8_t* new_memory = reinterpret_cast(data); uint32_t new_length = static_cast(length); uint32_t new_size = static_cast(length); video_frame_.Swap(new_memory, new_length, new_size); } -void FrameBuffer::ReturnData(char** data, size_t* length) { - *data = NULL; - uint32_t old_length = 0; - uint32_t old_size = 0; - video_frame_.Swap(reinterpret_cast(*data), - old_length, old_size); - *length = old_length; +uint8* WebRtcVideoFrame::FrameBuffer::data() { + return video_frame_.Buffer(); } -char* FrameBuffer::data() { - return reinterpret_cast(video_frame_.Buffer()); +size_t WebRtcVideoFrame::FrameBuffer::length() const { + return video_frame_.Length(); } -size_t FrameBuffer::length() const { - return static_cast(video_frame_.Length()); +webrtc::VideoFrame* WebRtcVideoFrame::FrameBuffer::frame() { + return &video_frame_; } -webrtc::VideoFrame* FrameBuffer::frame() { return &video_frame_; } - -const webrtc::VideoFrame* FrameBuffer::frame() const { return &video_frame_; } +const webrtc::VideoFrame* WebRtcVideoFrame::FrameBuffer::frame() const { + return &video_frame_; +} WebRtcVideoFrame::WebRtcVideoFrame() : video_buffer_(new RefCountedBuffer()), is_black_(false) {} @@ -99,6 +131,25 @@ bool WebRtcVideoFrame::Init(const CapturedFrame* frame, int dw, int dh) { frame->time_stamp, frame->rotation); } +bool WebRtcVideoFrame::Alias(const CapturedFrame* frame, int dw, int dh) { + if (CanonicalFourCC(frame->fourcc) != FOURCC_I420 || frame->rotation != 0 || + frame->width != dw || frame->height != dh) { + // TODO(fbarchard): Enable aliasing of more formats. + return Init(frame, dw, dh); + } else { + Alias(static_cast(frame->data), + frame->data_size, + frame->width, + frame->height, + frame->pixel_width, + frame->pixel_height, + frame->elapsed_time, + frame->time_stamp, + frame->rotation); + return true; + } +} + bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time, int64 time_stamp) { @@ -109,20 +160,16 @@ bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width, return true; } -void WebRtcVideoFrame::Attach( +void WebRtcVideoFrame::Alias( uint8* buffer, size_t buffer_size, int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time, int64 time_stamp, int rotation) { talk_base::scoped_refptr video_buffer( new RefCountedBuffer()); - video_buffer->SetData(reinterpret_cast(buffer), buffer_size); + video_buffer->Alias(buffer, buffer_size); Attach(video_buffer.get(), buffer_size, w, h, pixel_width, pixel_height, elapsed_time, time_stamp, rotation); } -void WebRtcVideoFrame::Detach(uint8** data, size_t* length) { - video_buffer_->ReturnData(reinterpret_cast(data), length); -} - size_t WebRtcVideoFrame::GetWidth() const { return frame()->Width(); } size_t WebRtcVideoFrame::GetHeight() const { return frame()->Height(); } @@ -172,7 +219,7 @@ uint8* WebRtcVideoFrame::GetVPlane() { } VideoFrame* WebRtcVideoFrame::Copy() const { - const char* old_buffer = video_buffer_->data(); + uint8* old_buffer = video_buffer_->data(); if (!old_buffer) return NULL; size_t new_buffer_size = video_buffer_->length(); @@ -185,7 +232,7 @@ VideoFrame* WebRtcVideoFrame::Copy() const { } bool WebRtcVideoFrame::MakeExclusive() { - const int length = static_cast(video_buffer_->length()); + const size_t length = video_buffer_->length(); RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length); memcpy(exclusive_buffer->data(), video_buffer_->data(), length); Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(), @@ -272,6 +319,14 @@ bool WebRtcVideoFrame::AddWatermark() { return true; } +webrtc::VideoFrame* WebRtcVideoFrame::frame() { + return video_buffer_->frame(); +} + +const webrtc::VideoFrame* WebRtcVideoFrame::frame() const { + return video_buffer_->frame(); +} + bool WebRtcVideoFrame::Reset( uint32 format, int w, int h, int dw, int dh, uint8* sample, size_t sample_size, size_t pixel_width, size_t pixel_height, diff --git a/talk/media/webrtc/webrtcvideoframe.h b/talk/media/webrtc/webrtcvideoframe.h index ce1317053..8191a58d8 100644 --- a/talk/media/webrtc/webrtcvideoframe.h +++ b/talk/media/webrtc/webrtcvideoframe.h @@ -39,29 +39,8 @@ namespace cricket { struct CapturedFrame; -// Class that takes ownership of the frame passed to it. -class FrameBuffer { - public: - FrameBuffer(); - explicit FrameBuffer(size_t length); - ~FrameBuffer(); - - void SetData(char* data, size_t length); - void ReturnData(char** data, size_t* length); - char* data(); - size_t length() const; - - webrtc::VideoFrame* frame(); - const webrtc::VideoFrame* frame() const; - - private: - webrtc::VideoFrame video_frame_; -}; - class WebRtcVideoFrame : public VideoFrame { public: - typedef talk_base::RefCountedObject RefCountedBuffer; - WebRtcVideoFrame(); ~WebRtcVideoFrame(); @@ -75,17 +54,22 @@ class WebRtcVideoFrame : public VideoFrame { bool Init(const CapturedFrame* frame, int dw, int dh); + // Aliases this WebRtcVideoFrame to a CapturedFrame. |frame| must outlive + // this WebRtcVideoFrame. + bool Alias(const CapturedFrame* frame, int dw, int dh); + bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time, int64 time_stamp); - void Attach(uint8* buffer, size_t buffer_size, int w, int h, - size_t pixel_width, size_t pixel_height, int64 elapsed_time, - int64 time_stamp, int rotation); + // Aliases this WebRtcVideoFrame to a memory buffer. |buffer| must outlive + // this WebRtcVideoFrame. + void Alias(uint8* buffer, size_t buffer_size, int w, int h, + size_t pixel_width, size_t pixel_height, int64 elapsed_time, + int64 time_stamp, int rotation); - void Detach(uint8** data, size_t* length); bool AddWatermark(); - webrtc::VideoFrame* frame() { return video_buffer_->frame(); } - webrtc::VideoFrame* frame() const { return video_buffer_->frame(); } + webrtc::VideoFrame* frame(); + const webrtc::VideoFrame* frame() const; // From base class VideoFrame. virtual bool Reset(uint32 format, int w, int h, int dw, int dh, uint8* sample, @@ -124,6 +108,9 @@ class WebRtcVideoFrame : public VideoFrame { size_t size, int stride_rgb) const; private: + class FrameBuffer; + typedef talk_base::RefCountedObject RefCountedBuffer; + void Attach(RefCountedBuffer* video_buffer, size_t buffer_size, int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time, int64 time_stamp, int rotation); diff --git a/talk/media/webrtc/webrtcvideoframe_unittest.cc b/talk/media/webrtc/webrtcvideoframe_unittest.cc index ebc345e65..e63c5d5e5 100644 --- a/talk/media/webrtc/webrtcvideoframe_unittest.cc +++ b/talk/media/webrtc/webrtcvideoframe_unittest.cc @@ -264,35 +264,16 @@ TEST_WEBRTCVIDEOFRAME(CopyIsRef) TEST_WEBRTCVIDEOFRAME(MakeExclusive) // These functions test implementation-specific details. -TEST_F(WebRtcVideoFrameTest, AttachAndRelease) { +TEST_F(WebRtcVideoFrameTest, Alias) { cricket::WebRtcVideoFrame frame1, frame2; ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); - const int64 time_stamp = 0x7FFFFFFFFFFFFFF0LL; + const int64 time_stamp = INT64_C(0x7FFFFFFFFFFFFFF0); frame1.SetTimeStamp(time_stamp); EXPECT_EQ(time_stamp, frame1.GetTimeStamp()); - frame2.Attach(frame1.frame()->Buffer(), frame1.frame()->Size(), - kWidth, kHeight, 1, 1, - frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0); + frame2.Alias(frame1.frame()->Buffer(), frame1.frame()->Size(), + kWidth, kHeight, 1, 1, + frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0); EXPECT_TRUE(IsEqual(frame1, frame2, 0)); - uint8* buffer; - size_t size; - frame2.Detach(&buffer, &size); - EXPECT_EQ(frame1.frame()->Buffer(), buffer); - EXPECT_EQ(frame1.frame()->Size(), size); - EXPECT_TRUE(IsNull(frame2)); - EXPECT_TRUE(IsSize(frame1, kWidth, kHeight)); -} - -TEST_F(WebRtcVideoFrameTest, Transfer) { - cricket::WebRtcVideoFrame frame1, frame2; - ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); - uint8* buffer; - size_t size; - frame1.Detach(&buffer, &size); - frame2.Attach(buffer, size, kWidth, kHeight, 1, 1, - frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0); - EXPECT_TRUE(IsNull(frame1)); - EXPECT_TRUE(IsSize(frame2, kWidth, kHeight)); } // Tests the Init function with different cropped size.