Update talk to 56183333.

TEST=try bots
R=sheu@chromium.org

Review URL: https://webrtc-codereview.appspot.com/3469004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5087 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
wu@webrtc.org
2013-11-05 23:45:14 +00:00
parent d16d307218
commit 16d6254e8c
8 changed files with 115 additions and 94 deletions

View File

@@ -468,7 +468,7 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
} }
VIDEO_FRAME_NAME i420_frame; VIDEO_FRAME_NAME i420_frame;
if (!i420_frame.Init(captured_frame, desired_width, desired_height)) { if (!i420_frame.Alias(captured_frame, desired_width, desired_height)) {
// TODO(fbarchard): LOG more information about captured frame attributes. // TODO(fbarchard): LOG more information about captured frame attributes.
LOG(LS_ERROR) << "Couldn't convert to I420! " LOG(LS_ERROR) << "Couldn't convert to I420! "
<< "From " << ToString(captured_frame) << " To " << "From " << ToString(captured_frame) << " To "

View File

@@ -1154,6 +1154,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
int size_adjust, bool expected_result) { int size_adjust, bool expected_result) {
T frame; T frame;
talk_base::scoped_ptr<talk_base::MemoryStream> ms(LoadSample(name)); talk_base::scoped_ptr<talk_base::MemoryStream> ms(LoadSample(name));
ASSERT_TRUE(ms.get() != NULL);
const uint8* sample = reinterpret_cast<const uint8*>(ms.get()->GetBuffer()); const uint8* sample = reinterpret_cast<const uint8*>(ms.get()->GetBuffer());
size_t sample_size; size_t sample_size;
ms->GetSize(&sample_size); ms->GetSize(&sample_size);
@@ -1393,6 +1394,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
T frame1, frame2; T frame1, frame2;
talk_base::scoped_ptr<talk_base::MemoryStream> ms( talk_base::scoped_ptr<talk_base::MemoryStream> ms(
LoadSample(kImageFilename)); LoadSample(kImageFilename));
ASSERT_TRUE(ms.get() != NULL);
size_t data_size; size_t data_size;
ms->GetSize(&data_size); ms->GetSize(&data_size);
EXPECT_TRUE(frame1.InitToBlack(kWidth, kHeight, 1, 1, 0, 0)); EXPECT_TRUE(frame1.InitToBlack(kWidth, kHeight, 1, 1, 0, 0));
@@ -2007,6 +2009,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
T frame; T frame;
talk_base::scoped_ptr<talk_base::MemoryStream> ms( talk_base::scoped_ptr<talk_base::MemoryStream> ms(
LoadSample(kImageFilename)); LoadSample(kImageFilename));
ASSERT_TRUE(ms.get() != NULL);
ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight, ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight,
&frame)); &frame));
size_t out_size = kWidth * kHeight * 3 / 2; size_t out_size = kWidth * kHeight * 3 / 2;
@@ -2021,6 +2024,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
T source; T source;
talk_base::scoped_ptr<talk_base::MemoryStream> ms( talk_base::scoped_ptr<talk_base::MemoryStream> ms(
LoadSample(kImageFilename)); LoadSample(kImageFilename));
ASSERT_TRUE(ms.get() != NULL);
ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight, ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight,
&source)); &source));
@@ -2039,6 +2043,7 @@ void Construct##FOURCC##Rotate##ROTATE() { \
T frame; T frame;
talk_base::scoped_ptr<talk_base::MemoryStream> ms( talk_base::scoped_ptr<talk_base::MemoryStream> ms(
LoadSample(kImageFilename)); LoadSample(kImageFilename));
ASSERT_TRUE(ms.get() != NULL);
talk_base::MemoryStream ms2; talk_base::MemoryStream ms2;
size_t size; size_t size;
ASSERT_TRUE(ms->GetSize(&size)); ASSERT_TRUE(ms->GetSize(&size));

View File

@@ -328,14 +328,10 @@ void WebRtcVideoCapturer::OnIncomingCapturedFrame(const int32_t id,
// to one block for it. // to one block for it.
int length = webrtc::CalcBufferSize(webrtc::kI420, int length = webrtc::CalcBufferSize(webrtc::kI420,
sample.width(), sample.height()); sample.width(), sample.height());
if (!captured_frame_.get() || capture_buffer_.resize(length);
captured_frame_->length() != static_cast<size_t>(length)) { // TODO(ronghuawu): Refactor the WebRtcCapturedFrame to avoid memory copy.
captured_frame_.reset(new FrameBuffer(length)); webrtc::ExtractBuffer(sample, length, &capture_buffer_[0]);
} WebRtcCapturedFrame frame(sample, &capture_buffer_[0], length);
// TODO(ronghuawu): Refactor the WebRtcVideoFrame to avoid memory copy.
webrtc::ExtractBuffer(sample, length,
reinterpret_cast<uint8_t*>(captured_frame_->data()));
WebRtcCapturedFrame frame(sample, captured_frame_->data(), length);
SignalFrameCaptured(this, &frame); SignalFrameCaptured(this, &frame);
} }

View File

@@ -88,7 +88,7 @@ class WebRtcVideoCapturer : public VideoCapturer,
talk_base::scoped_ptr<WebRtcVcmFactoryInterface> factory_; talk_base::scoped_ptr<WebRtcVcmFactoryInterface> factory_;
webrtc::VideoCaptureModule* module_; webrtc::VideoCaptureModule* module_;
int captured_frames_; int captured_frames_;
talk_base::scoped_ptr<FrameBuffer> captured_frame_; std::vector<uint8_t> capture_buffer_;
}; };
struct WebRtcCapturedFrame : public CapturedFrame { struct WebRtcCapturedFrame : public CapturedFrame {

View File

@@ -258,8 +258,8 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
int DeliverBufferFrame(unsigned char* buffer, int buffer_size, int DeliverBufferFrame(unsigned char* buffer, int buffer_size,
int64 elapsed_time, int64 time_stamp) { int64 elapsed_time, int64 time_stamp) {
WebRtcVideoFrame video_frame; WebRtcVideoFrame video_frame;
video_frame.Attach(buffer, buffer_size, width_, height_, video_frame.Alias(buffer, buffer_size, width_, height_,
1, 1, elapsed_time, time_stamp, 0); 1, 1, elapsed_time, time_stamp, 0);
// Sanity check on decoded frame size. // Sanity check on decoded frame size.
@@ -269,9 +269,6 @@ class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
} }
int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1; int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
uint8* buffer_temp;
size_t buffer_size_temp;
video_frame.Detach(&buffer_temp, &buffer_size_temp);
return ret; return ret;
} }

View File

@@ -42,42 +42,74 @@ static const int kWatermarkOffsetFromLeft = 8;
static const int kWatermarkOffsetFromBottom = 8; static const int kWatermarkOffsetFromBottom = 8;
static const unsigned char kWatermarkMaxYValue = 64; static const unsigned char kWatermarkMaxYValue = 64;
FrameBuffer::FrameBuffer() {} // Class that wraps ownerhip semantics of a buffer passed to it.
// * Buffers passed using Attach() become owned by this FrameBuffer and will be
// destroyed on FrameBuffer destruction.
// * Buffers passed using Alias() are not owned and will not be destroyed on
// FrameBuffer destruction, The buffer then must outlive the FrameBuffer.
class WebRtcVideoFrame::FrameBuffer {
public:
FrameBuffer();
explicit FrameBuffer(size_t length);
~FrameBuffer();
FrameBuffer::FrameBuffer(size_t length) { void Attach(uint8* data, size_t length);
char* buffer = new char[length]; void Alias(uint8* data, size_t length);
SetData(buffer, length); uint8* data();
size_t length() const;
webrtc::VideoFrame* frame();
const webrtc::VideoFrame* frame() const;
private:
talk_base::scoped_ptr<uint8[]> owned_data_;
webrtc::VideoFrame video_frame_;
};
WebRtcVideoFrame::FrameBuffer::FrameBuffer() {}
WebRtcVideoFrame::FrameBuffer::FrameBuffer(size_t length) {
uint8* buffer = new uint8[length];
Attach(buffer, length);
} }
FrameBuffer::~FrameBuffer() {} WebRtcVideoFrame::FrameBuffer::~FrameBuffer() {
// Make sure that |video_frame_| doesn't delete the buffer, as |owned_data_|
// will release the buffer if this FrameBuffer owns it.
uint8_t* new_memory = NULL;
uint32_t new_length = 0;
uint32_t new_size = 0;
video_frame_.Swap(new_memory, new_length, new_size);
}
void FrameBuffer::SetData(char* data, size_t length) { void WebRtcVideoFrame::FrameBuffer::Attach(uint8* data, size_t length) {
Alias(data, length);
owned_data_.reset(data);
}
void WebRtcVideoFrame::FrameBuffer::Alias(uint8* data, size_t length) {
owned_data_.reset();
uint8_t* new_memory = reinterpret_cast<uint8_t*>(data); uint8_t* new_memory = reinterpret_cast<uint8_t*>(data);
uint32_t new_length = static_cast<uint32_t>(length); uint32_t new_length = static_cast<uint32_t>(length);
uint32_t new_size = static_cast<uint32_t>(length); uint32_t new_size = static_cast<uint32_t>(length);
video_frame_.Swap(new_memory, new_length, new_size); video_frame_.Swap(new_memory, new_length, new_size);
} }
void FrameBuffer::ReturnData(char** data, size_t* length) { uint8* WebRtcVideoFrame::FrameBuffer::data() {
*data = NULL; return video_frame_.Buffer();
uint32_t old_length = 0;
uint32_t old_size = 0;
video_frame_.Swap(reinterpret_cast<uint8_t*&>(*data),
old_length, old_size);
*length = old_length;
} }
char* FrameBuffer::data() { size_t WebRtcVideoFrame::FrameBuffer::length() const {
return reinterpret_cast<char*>(video_frame_.Buffer()); return video_frame_.Length();
} }
size_t FrameBuffer::length() const { webrtc::VideoFrame* WebRtcVideoFrame::FrameBuffer::frame() {
return static_cast<size_t>(video_frame_.Length()); return &video_frame_;
} }
webrtc::VideoFrame* FrameBuffer::frame() { return &video_frame_; } const webrtc::VideoFrame* WebRtcVideoFrame::FrameBuffer::frame() const {
return &video_frame_;
const webrtc::VideoFrame* FrameBuffer::frame() const { return &video_frame_; } }
WebRtcVideoFrame::WebRtcVideoFrame() WebRtcVideoFrame::WebRtcVideoFrame()
: video_buffer_(new RefCountedBuffer()), is_black_(false) {} : video_buffer_(new RefCountedBuffer()), is_black_(false) {}
@@ -99,6 +131,25 @@ bool WebRtcVideoFrame::Init(const CapturedFrame* frame, int dw, int dh) {
frame->time_stamp, frame->rotation); frame->time_stamp, frame->rotation);
} }
bool WebRtcVideoFrame::Alias(const CapturedFrame* frame, int dw, int dh) {
if (CanonicalFourCC(frame->fourcc) != FOURCC_I420 || frame->rotation != 0 ||
frame->width != dw || frame->height != dh) {
// TODO(fbarchard): Enable aliasing of more formats.
return Init(frame, dw, dh);
} else {
Alias(static_cast<uint8*>(frame->data),
frame->data_size,
frame->width,
frame->height,
frame->pixel_width,
frame->pixel_height,
frame->elapsed_time,
frame->time_stamp,
frame->rotation);
return true;
}
}
bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width, bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width,
size_t pixel_height, int64 elapsed_time, size_t pixel_height, int64 elapsed_time,
int64 time_stamp) { int64 time_stamp) {
@@ -109,20 +160,16 @@ bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width,
return true; return true;
} }
void WebRtcVideoFrame::Attach( void WebRtcVideoFrame::Alias(
uint8* buffer, size_t buffer_size, int w, int h, size_t pixel_width, uint8* buffer, size_t buffer_size, int w, int h, size_t pixel_width,
size_t pixel_height, int64 elapsed_time, int64 time_stamp, int rotation) { size_t pixel_height, int64 elapsed_time, int64 time_stamp, int rotation) {
talk_base::scoped_refptr<RefCountedBuffer> video_buffer( talk_base::scoped_refptr<RefCountedBuffer> video_buffer(
new RefCountedBuffer()); new RefCountedBuffer());
video_buffer->SetData(reinterpret_cast<char*>(buffer), buffer_size); video_buffer->Alias(buffer, buffer_size);
Attach(video_buffer.get(), buffer_size, w, h, pixel_width, pixel_height, Attach(video_buffer.get(), buffer_size, w, h, pixel_width, pixel_height,
elapsed_time, time_stamp, rotation); elapsed_time, time_stamp, rotation);
} }
void WebRtcVideoFrame::Detach(uint8** data, size_t* length) {
video_buffer_->ReturnData(reinterpret_cast<char**>(data), length);
}
size_t WebRtcVideoFrame::GetWidth() const { return frame()->Width(); } size_t WebRtcVideoFrame::GetWidth() const { return frame()->Width(); }
size_t WebRtcVideoFrame::GetHeight() const { return frame()->Height(); } size_t WebRtcVideoFrame::GetHeight() const { return frame()->Height(); }
@@ -172,7 +219,7 @@ uint8* WebRtcVideoFrame::GetVPlane() {
} }
VideoFrame* WebRtcVideoFrame::Copy() const { VideoFrame* WebRtcVideoFrame::Copy() const {
const char* old_buffer = video_buffer_->data(); uint8* old_buffer = video_buffer_->data();
if (!old_buffer) if (!old_buffer)
return NULL; return NULL;
size_t new_buffer_size = video_buffer_->length(); size_t new_buffer_size = video_buffer_->length();
@@ -185,7 +232,7 @@ VideoFrame* WebRtcVideoFrame::Copy() const {
} }
bool WebRtcVideoFrame::MakeExclusive() { bool WebRtcVideoFrame::MakeExclusive() {
const int length = static_cast<int>(video_buffer_->length()); const size_t length = video_buffer_->length();
RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length); RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length);
memcpy(exclusive_buffer->data(), video_buffer_->data(), length); memcpy(exclusive_buffer->data(), video_buffer_->data(), length);
Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(), Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(),
@@ -272,6 +319,14 @@ bool WebRtcVideoFrame::AddWatermark() {
return true; return true;
} }
webrtc::VideoFrame* WebRtcVideoFrame::frame() {
return video_buffer_->frame();
}
const webrtc::VideoFrame* WebRtcVideoFrame::frame() const {
return video_buffer_->frame();
}
bool WebRtcVideoFrame::Reset( bool WebRtcVideoFrame::Reset(
uint32 format, int w, int h, int dw, int dh, uint8* sample, uint32 format, int w, int h, int dw, int dh, uint8* sample,
size_t sample_size, size_t pixel_width, size_t pixel_height, size_t sample_size, size_t pixel_width, size_t pixel_height,

View File

@@ -39,29 +39,8 @@ namespace cricket {
struct CapturedFrame; struct CapturedFrame;
// Class that takes ownership of the frame passed to it.
class FrameBuffer {
public:
FrameBuffer();
explicit FrameBuffer(size_t length);
~FrameBuffer();
void SetData(char* data, size_t length);
void ReturnData(char** data, size_t* length);
char* data();
size_t length() const;
webrtc::VideoFrame* frame();
const webrtc::VideoFrame* frame() const;
private:
webrtc::VideoFrame video_frame_;
};
class WebRtcVideoFrame : public VideoFrame { class WebRtcVideoFrame : public VideoFrame {
public: public:
typedef talk_base::RefCountedObject<FrameBuffer> RefCountedBuffer;
WebRtcVideoFrame(); WebRtcVideoFrame();
~WebRtcVideoFrame(); ~WebRtcVideoFrame();
@@ -75,17 +54,22 @@ class WebRtcVideoFrame : public VideoFrame {
bool Init(const CapturedFrame* frame, int dw, int dh); bool Init(const CapturedFrame* frame, int dw, int dh);
// Aliases this WebRtcVideoFrame to a CapturedFrame. |frame| must outlive
// this WebRtcVideoFrame.
bool Alias(const CapturedFrame* frame, int dw, int dh);
bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height, bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height,
int64 elapsed_time, int64 time_stamp); int64 elapsed_time, int64 time_stamp);
void Attach(uint8* buffer, size_t buffer_size, int w, int h, // Aliases this WebRtcVideoFrame to a memory buffer. |buffer| must outlive
size_t pixel_width, size_t pixel_height, int64 elapsed_time, // this WebRtcVideoFrame.
int64 time_stamp, int rotation); void Alias(uint8* buffer, size_t buffer_size, int w, int h,
size_t pixel_width, size_t pixel_height, int64 elapsed_time,
int64 time_stamp, int rotation);
void Detach(uint8** data, size_t* length);
bool AddWatermark(); bool AddWatermark();
webrtc::VideoFrame* frame() { return video_buffer_->frame(); } webrtc::VideoFrame* frame();
webrtc::VideoFrame* frame() const { return video_buffer_->frame(); } const webrtc::VideoFrame* frame() const;
// From base class VideoFrame. // From base class VideoFrame.
virtual bool Reset(uint32 format, int w, int h, int dw, int dh, uint8* sample, virtual bool Reset(uint32 format, int w, int h, int dw, int dh, uint8* sample,
@@ -124,6 +108,9 @@ class WebRtcVideoFrame : public VideoFrame {
size_t size, int stride_rgb) const; size_t size, int stride_rgb) const;
private: private:
class FrameBuffer;
typedef talk_base::RefCountedObject<FrameBuffer> RefCountedBuffer;
void Attach(RefCountedBuffer* video_buffer, size_t buffer_size, int w, int h, void Attach(RefCountedBuffer* video_buffer, size_t buffer_size, int w, int h,
size_t pixel_width, size_t pixel_height, int64 elapsed_time, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
int64 time_stamp, int rotation); int64 time_stamp, int rotation);

View File

@@ -264,35 +264,16 @@ TEST_WEBRTCVIDEOFRAME(CopyIsRef)
TEST_WEBRTCVIDEOFRAME(MakeExclusive) TEST_WEBRTCVIDEOFRAME(MakeExclusive)
// These functions test implementation-specific details. // These functions test implementation-specific details.
TEST_F(WebRtcVideoFrameTest, AttachAndRelease) { TEST_F(WebRtcVideoFrameTest, Alias) {
cricket::WebRtcVideoFrame frame1, frame2; cricket::WebRtcVideoFrame frame1, frame2;
ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
const int64 time_stamp = 0x7FFFFFFFFFFFFFF0LL; const int64 time_stamp = INT64_C(0x7FFFFFFFFFFFFFF0);
frame1.SetTimeStamp(time_stamp); frame1.SetTimeStamp(time_stamp);
EXPECT_EQ(time_stamp, frame1.GetTimeStamp()); EXPECT_EQ(time_stamp, frame1.GetTimeStamp());
frame2.Attach(frame1.frame()->Buffer(), frame1.frame()->Size(), frame2.Alias(frame1.frame()->Buffer(), frame1.frame()->Size(),
kWidth, kHeight, 1, 1, kWidth, kHeight, 1, 1,
frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0); frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0);
EXPECT_TRUE(IsEqual(frame1, frame2, 0)); EXPECT_TRUE(IsEqual(frame1, frame2, 0));
uint8* buffer;
size_t size;
frame2.Detach(&buffer, &size);
EXPECT_EQ(frame1.frame()->Buffer(), buffer);
EXPECT_EQ(frame1.frame()->Size(), size);
EXPECT_TRUE(IsNull(frame2));
EXPECT_TRUE(IsSize(frame1, kWidth, kHeight));
}
TEST_F(WebRtcVideoFrameTest, Transfer) {
cricket::WebRtcVideoFrame frame1, frame2;
ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
uint8* buffer;
size_t size;
frame1.Detach(&buffer, &size);
frame2.Attach(buffer, size, kWidth, kHeight, 1, 1,
frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0);
EXPECT_TRUE(IsNull(frame1));
EXPECT_TRUE(IsSize(frame2, kWidth, kHeight));
} }
// Tests the Init function with different cropped size. // Tests the Init function with different cropped size.