Add H.264 packetization.

This also includes:
- Creating new packetizer and depacketizer interfaces.
- Moved VP8 packetization was H264 packetization and depacketization to these interfaces. This is a work in progress and should be continued to get this 100% generic. This also required changing the return type for RtpFormatVp8::NextPacket(), which now returns bool instead of the index of the first partition.
- Created a Create() factory method for packetizers and depacketizers.

R=niklas.enbom@webrtc.org, pbos@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/21009004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@6804 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
stefan@webrtc.org 2014-07-31 14:59:24 +00:00
parent bfe6e08195
commit 2ec560606b
28 changed files with 1405 additions and 377 deletions

View File

@ -61,8 +61,8 @@ struct RTPVideoHeaderVP8 {
};
struct RTPVideoHeaderH264 {
uint8_t nalu_header;
bool single_nalu;
bool stap_a;
bool single_nalu;
};
union RTPVideoTypeHeader {

View File

@ -206,6 +206,7 @@
'rtp_rtcp/source/rtcp_receiver_unittest.cc',
'rtp_rtcp/source/rtcp_sender_unittest.cc',
'rtp_rtcp/source/rtp_fec_unittest.cc',
'rtp_rtcp/source/rtp_format_h264_unittest.cc',
'rtp_rtcp/source/rtp_format_vp8_unittest.cc',
'rtp_rtcp/source/rtp_format_vp8_test_helper.cc',
'rtp_rtcp/source/rtp_format_vp8_test_helper.h',

View File

@ -79,6 +79,10 @@ source_set("rtp_rtcp") {
"source/rtp_sender_video.cc",
"source/rtp_sender_video.h",
"source/video_codec_information.h",
'source/rtp_format.cc',
'source/rtp_format.h',
'source/rtp_format_h264.cc',
'source/rtp_format_h264.h',
"source/rtp_format_vp8.cc",
"source/rtp_format_vp8.h",
"source/rtp_format_video_generic.h",

View File

@ -19,6 +19,17 @@
namespace webrtc {
class MockRtpData : public RtpData {
public:
MOCK_METHOD3(OnReceivedPayloadData,
int32_t(const uint8_t* payloadData,
const uint16_t payloadSize,
const WebRtcRTPHeader* rtpHeader));
MOCK_METHOD2(OnRecoveredPacket,
bool(const uint8_t* packet, int packet_length));
};
class MockRtpRtcp : public RtpRtcp {
public:
MOCK_METHOD1(ChangeUniqueId,

View File

@ -15,6 +15,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/interface/fec_receiver.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/source/fec_test_helper.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
@ -25,17 +26,6 @@ using ::testing::Return;
namespace webrtc {
class MockRtpData : public RtpData {
public:
MOCK_METHOD3(OnReceivedPayloadData,
int32_t(const uint8_t* payloadData,
const uint16_t payloadSize,
const WebRtcRTPHeader* rtpHeader));
MOCK_METHOD2(OnRecoveredPacket,
bool(const uint8_t* packet, int packet_length));
};
class ReceiverFecTest : public ::testing::Test {
protected:
virtual void SetUp() {

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
namespace webrtc {
RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
size_t max_payload_len) {
switch (type) {
case kRtpVideoH264:
return new RtpPacketizerH264(max_payload_len);
case kRtpVideoNone:
case kRtpVideoGeneric:
case kRtpVideoVp8:
assert(false);
}
return NULL;
}
RtpDepacketizer* RtpDepacketizer::Create(RtpVideoCodecTypes type,
RtpData* const callback) {
switch (type) {
case kRtpVideoH264:
return new RtpDepacketizerH264(callback);
case kRtpVideoNone:
case kRtpVideoGeneric:
case kRtpVideoVp8:
assert(false);
}
return NULL;
}
} // namespace webrtc

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
namespace webrtc {
class RtpPacketizer {
public:
static RtpPacketizer* Create(RtpVideoCodecTypes type, size_t max_payload_len);
virtual ~RtpPacketizer() {}
virtual void SetPayloadData(const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) = 0;
// Get the next payload with payload header.
// buffer is a pointer to where the output will be written.
// bytes_to_send is an output variable that will contain number of bytes
// written to buffer. The parameter last_packet is true for the last packet of
// the frame, false otherwise (i.e., call the function again to get the
// next packet).
// Returns true on success or false if there was no payload to packetize.
virtual bool NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) = 0;
};
class RtpDepacketizer {
public:
static RtpDepacketizer* Create(RtpVideoCodecTypes type,
RtpData* const callback);
virtual ~RtpDepacketizer() {}
virtual bool Parse(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
size_t payload_data_length) = 0;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H_

View File

@ -0,0 +1,293 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h>
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
namespace webrtc {
namespace {
enum Nalu {
kSlice = 1,
kIdr = 5,
kSei = 6,
kSps = 7,
kPps = 8,
kStapA = 24,
kFuA = 28
};
static const size_t kNalHeaderSize = 1;
static const size_t kFuAHeaderSize = 2;
// Bit masks for FU (A and B) indicators.
enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
// Bit masks for FU (A and B) headers.
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
void ParseSingleNalu(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
size_t payload_data_length) {
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = true;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
h264_header->single_nalu = true;
h264_header->stap_a = false;
uint8_t nal_type = payload_data[0] & NalDefs::kTypeMask;
if (nal_type == Nalu::kStapA) {
nal_type = payload_data[3] & NalDefs::kTypeMask;
h264_header->stap_a = true;
}
switch (nal_type) {
case Nalu::kSps:
case Nalu::kPps:
case Nalu::kIdr:
rtp_header->frameType = kVideoFrameKey;
break;
default:
rtp_header->frameType = kVideoFrameDelta;
break;
}
}
void ParseFuaNalu(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
size_t payload_data_length,
size_t* offset) {
uint8_t fnri = payload_data[0] & (NalDefs::kFBit | NalDefs::kNriMask);
uint8_t original_nal_type = payload_data[1] & NalDefs::kTypeMask;
bool first_fragment = (payload_data[1] & FuDefs::kSBit) > 0;
uint8_t original_nal_header = fnri | original_nal_type;
if (first_fragment) {
*offset = kNalHeaderSize;
uint8_t* payload = const_cast<uint8_t*>(payload_data + *offset);
payload[0] = original_nal_header;
} else {
*offset = kFuAHeaderSize;
}
if (original_nal_type == Nalu::kIdr) {
rtp_header->frameType = kVideoFrameKey;
} else {
rtp_header->frameType = kVideoFrameDelta;
}
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = first_fragment;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
h264_header->single_nalu = false;
h264_header->stap_a = false;
}
} // namespace
RtpPacketizerH264::RtpPacketizerH264(size_t max_payload_len)
: payload_data_(NULL), payload_size_(0), max_payload_len_(max_payload_len) {
}
RtpPacketizerH264::~RtpPacketizerH264() {
}
void RtpPacketizerH264::SetPayloadData(
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
assert(packets_.empty());
assert(fragmentation);
payload_data_ = payload_data;
payload_size_ = payload_size;
fragmentation_.CopyFrom(*fragmentation);
GeneratePackets();
}
void RtpPacketizerH264::GeneratePackets() {
for (size_t i = 0; i < fragmentation_.fragmentationVectorSize;) {
size_t fragment_offset = fragmentation_.fragmentationOffset[i];
size_t fragment_length = fragmentation_.fragmentationLength[i];
if (fragment_length > max_payload_len_) {
PacketizeFuA(fragment_offset, fragment_length);
++i;
} else {
i = PacketizeStapA(i, fragment_offset, fragment_length);
}
}
}
void RtpPacketizerH264::PacketizeFuA(size_t fragment_offset,
size_t fragment_length) {
// Fragment payload into packets (FU-A).
// Strip out the original header and leave room for the FU-A header.
fragment_length -= kNalHeaderSize;
size_t offset = fragment_offset + kNalHeaderSize;
size_t bytes_available = max_payload_len_ - kFuAHeaderSize;
size_t fragments =
(fragment_length + (bytes_available - 1)) / bytes_available;
size_t avg_size = (fragment_length + fragments - 1) / fragments;
while (fragment_length > 0) {
size_t packet_length = avg_size;
if (fragment_length < avg_size)
packet_length = fragment_length;
uint8_t header = payload_data_[fragment_offset];
packets_.push(Packet(offset,
packet_length,
offset - kNalHeaderSize == fragment_offset,
fragment_length == packet_length,
false,
header));
offset += packet_length;
fragment_length -= packet_length;
}
}
int RtpPacketizerH264::PacketizeStapA(size_t fragment_index,
size_t fragment_offset,
size_t fragment_length) {
// Aggregate fragments into one packet (STAP-A).
size_t payload_size_left = max_payload_len_;
int aggregated_fragments = 0;
assert(payload_size_left >= fragment_length);
while (payload_size_left >= fragment_length) {
if (fragment_length > 0) {
assert(fragment_length > 0);
uint8_t header = payload_data_[fragment_offset];
packets_.push(Packet(fragment_offset,
fragment_length,
aggregated_fragments == 0,
false,
true,
header));
// If we are going to try to aggregate more fragments into this packet
// we need to add the STAP-A NALU header.
if (aggregated_fragments == 0)
payload_size_left -= kNalHeaderSize;
payload_size_left -= fragment_length;
++aggregated_fragments;
}
// Next fragment.
++fragment_index;
if (fragment_index == fragmentation_.fragmentationVectorSize)
break;
fragment_offset = fragmentation_.fragmentationOffset[fragment_index];
fragment_length = fragmentation_.fragmentationLength[fragment_index];
}
packets_.back().last_fragment = true;
return fragment_index;
}
bool RtpPacketizerH264::NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) {
*bytes_to_send = 0;
if (packets_.empty()) {
*bytes_to_send = 0;
*last_packet = true;
return false;
}
Packet packet = packets_.front();
if (packet.first_fragment && packet.last_fragment) {
// Single NAL unit packet.
*bytes_to_send = packet.size;
memcpy(buffer, &payload_data_[packet.offset], packet.size);
packets_.pop();
} else if (packet.aggregated) {
NextAggregatePacket(buffer, bytes_to_send);
} else {
NextFragmentPacket(buffer, bytes_to_send);
}
*last_packet = packets_.empty();
assert(*bytes_to_send <= max_payload_len_);
return true;
}
void RtpPacketizerH264::NextAggregatePacket(uint8_t* buffer,
size_t* bytes_to_send) {
Packet packet = packets_.front();
assert(packet.first_fragment);
// STAP-A NALU header.
buffer[0] = (packet.header & (kFBit | kNriMask)) | kStapA;
int index = kNalHeaderSize;
*bytes_to_send += kNalHeaderSize;
while (packet.aggregated) {
// Add NAL unit length field.
RtpUtility::AssignUWord16ToBuffer(&buffer[index], packet.size);
index += 2;
*bytes_to_send += 2;
// Add NAL unit.
memcpy(&buffer[index], &payload_data_[packet.offset], packet.size);
index += packet.size;
*bytes_to_send += packet.size;
packets_.pop();
if (packet.last_fragment)
break;
packet = packets_.front();
}
assert(packet.last_fragment);
}
void RtpPacketizerH264::NextFragmentPacket(uint8_t* buffer,
size_t* bytes_to_send) {
Packet packet = packets_.front();
// NAL unit fragmented over multiple packets (FU-A).
// We do not send original NALU header, so it will be replaced by the
// FU indicator header of the first packet.
uint8_t fu_indicator = (packet.header & (kFBit | kNriMask)) | kFuA;
uint8_t fu_header = 0;
// S | E | R | 5 bit type.
fu_header |= (packet.first_fragment ? kSBit : 0);
fu_header |= (packet.last_fragment ? kEBit : 0);
uint8_t type = packet.header & kTypeMask;
fu_header |= type;
buffer[0] = fu_indicator;
buffer[1] = fu_header;
if (packet.last_fragment) {
*bytes_to_send = packet.size + kFuAHeaderSize;
memcpy(buffer + kFuAHeaderSize, &payload_data_[packet.offset], packet.size);
} else {
*bytes_to_send = packet.size + kFuAHeaderSize;
memcpy(buffer + kFuAHeaderSize, &payload_data_[packet.offset], packet.size);
}
packets_.pop();
}
RtpDepacketizerH264::RtpDepacketizerH264(RtpData* const callback)
: callback_(callback) {
}
bool RtpDepacketizerH264::Parse(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
size_t payload_data_length) {
uint8_t nal_type = payload_data[0] & NalDefs::kTypeMask;
size_t offset = 0;
if (nal_type == Nalu::kFuA) {
// Fragmented NAL units (FU-A).
ParseFuaNalu(rtp_header, payload_data, payload_data_length, &offset);
} else {
// We handle STAP-A and single NALU's the same way here. The jitter buffer
// will depacketize the STAP-A into NAL units later.
ParseSingleNalu(rtp_header, payload_data, payload_data_length);
}
if (callback_->OnReceivedPayloadData(payload_data + offset,
payload_data_length - offset,
rtp_header) != 0) {
return false;
}
return true;
}
} // namespace webrtc

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
#include <queue>
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
namespace webrtc {
class RtpPacketizerH264 : public RtpPacketizer {
public:
// Initialize with payload from encoder.
// The payload_data must be exactly one encoded H264 frame.
explicit RtpPacketizerH264(size_t max_payload_len);
virtual ~RtpPacketizerH264();
virtual void SetPayloadData(
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) OVERRIDE;
// Get the next payload with H264 payload header.
// buffer is a pointer to where the output will be written.
// bytes_to_send is an output variable that will contain number of bytes
// written to buffer. The parameter last_packet is true for the last packet of
// the frame, false otherwise (i.e., call the function again to get the
// next packet).
// Returns true on success or false if there was no payload to packetize.
virtual bool NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) OVERRIDE;
private:
struct Packet {
Packet(size_t offset,
size_t size,
bool first_fragment,
bool last_fragment,
bool aggregated,
uint8_t header)
: offset(offset),
size(size),
first_fragment(first_fragment),
last_fragment(last_fragment),
aggregated(aggregated),
header(header) {}
size_t offset;
size_t size;
bool first_fragment;
bool last_fragment;
bool aggregated;
uint8_t header;
};
typedef std::queue<Packet> PacketQueue;
void GeneratePackets();
void PacketizeFuA(size_t fragment_offset, size_t fragment_length);
int PacketizeStapA(size_t fragment_index,
size_t fragment_offset,
size_t fragment_length);
void NextAggregatePacket(uint8_t* buffer, size_t* bytes_to_send);
void NextFragmentPacket(uint8_t* buffer, size_t* bytes_to_send);
const uint8_t* payload_data_;
size_t payload_size_;
const size_t max_payload_len_;
RTPFragmentationHeader fragmentation_;
PacketQueue packets_;
DISALLOW_COPY_AND_ASSIGN(RtpPacketizerH264);
};
// Depacketizer for H264.
class RtpDepacketizerH264 : public RtpDepacketizer {
public:
explicit RtpDepacketizerH264(RtpData* const callback);
virtual ~RtpDepacketizerH264() {}
virtual bool Parse(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
size_t payload_data_length) OVERRIDE;
private:
RtpData* const callback_;
DISALLOW_COPY_AND_ASSIGN(RtpDepacketizerH264);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_

View File

@ -0,0 +1,412 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
using ::testing::_;
using ::testing::Args;
using ::testing::ElementsAreArray;
using ::testing::Return;
using ::testing::SaveArgPointee;
namespace webrtc {
namespace {
const size_t kMaxPayloadSize = 1200;
const size_t kLengthFieldLength = 2;
enum Nalu {
kSlice = 1,
kIdr = 5,
kSei = 6,
kSps = 7,
kPps = 8,
kStapA = 24,
kFuA = 28
};
static const size_t kNalHeaderSize = 1;
static const size_t kFuAHeaderSize = 2;
// Bit masks for FU (A and B) indicators.
enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
// Bit masks for FU (A and B) headers.
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
void VerifyFua(size_t fua_index,
const uint8_t* expected_payload,
int offset,
const uint8_t* packet,
size_t length,
const std::vector<size_t>& expected_sizes) {
ASSERT_EQ(expected_sizes[fua_index] + kFuAHeaderSize, length)
<< "FUA index: " << fua_index;
const uint8_t kFuIndicator = 0x1C; // F=0, NRI=0, Type=28.
EXPECT_EQ(kFuIndicator, packet[0]) << "FUA index: " << fua_index;
bool should_be_last_fua = (fua_index == expected_sizes.size() - 1);
uint8_t fu_header = 0;
if (fua_index == 0)
fu_header = 0x85; // S=1, E=0, R=0, Type=5.
else if (should_be_last_fua)
fu_header = 0x45; // S=0, E=1, R=0, Type=5.
else
fu_header = 0x05; // S=0, E=0, R=0, Type=5.
EXPECT_EQ(fu_header, packet[1]) << "FUA index: " << fua_index;
std::vector<uint8_t> expected_packet_payload(
&expected_payload[offset],
&expected_payload[offset + expected_sizes[fua_index]]);
EXPECT_THAT(
expected_packet_payload,
::testing::ElementsAreArray(&packet[2], expected_sizes[fua_index]))
<< "FUA index: " << fua_index;
}
void TestFua(size_t frame_size,
size_t max_payload_size,
const std::vector<size_t>& expected_sizes) {
scoped_ptr<uint8_t[]> frame;
frame.reset(new uint8_t[frame_size]);
frame[0] = 0x05; // F=0, NRI=0, Type=5.
for (size_t i = 0; i < frame_size - kNalHeaderSize; ++i) {
frame[i + kNalHeaderSize] = i;
}
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = frame_size;
scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(kRtpVideoH264, max_payload_size));
packetizer->SetPayloadData(frame.get(), frame_size, &fragmentation);
scoped_ptr<uint8_t[]> packet(new uint8_t[max_payload_size]);
size_t length = 0;
bool last = false;
size_t offset = kNalHeaderSize;
for (size_t i = 0; i < expected_sizes.size(); ++i) {
ASSERT_TRUE(packetizer->NextPacket(packet.get(), &length, &last));
VerifyFua(i, frame.get(), offset, packet.get(), length, expected_sizes);
EXPECT_EQ(i == expected_sizes.size() - 1, last) << "FUA index: " << i;
offset += expected_sizes[i];
}
EXPECT_FALSE(packetizer->NextPacket(packet.get(), &length, &last));
}
size_t GetExpectedNaluOffset(const RTPFragmentationHeader& fragmentation,
size_t start_index,
size_t nalu_index) {
assert(nalu_index < fragmentation.fragmentationVectorSize);
size_t expected_nalu_offset = kNalHeaderSize; // STAP-A header.
for (size_t i = start_index; i < nalu_index; ++i) {
expected_nalu_offset +=
kLengthFieldLength + fragmentation.fragmentationLength[i];
}
return expected_nalu_offset;
}
void VerifyStapAPayload(const RTPFragmentationHeader& fragmentation,
size_t first_stapa_index,
size_t nalu_index,
const uint8_t* frame,
size_t frame_length,
const uint8_t* packet,
size_t packet_length) {
size_t expected_payload_offset =
GetExpectedNaluOffset(fragmentation, first_stapa_index, nalu_index) +
kLengthFieldLength;
size_t offset = fragmentation.fragmentationOffset[nalu_index];
const uint8_t* expected_payload = &frame[offset];
size_t expected_payload_length =
fragmentation.fragmentationLength[nalu_index];
ASSERT_LE(offset + expected_payload_length, frame_length);
ASSERT_LE(expected_payload_offset + expected_payload_length, packet_length);
std::vector<uint8_t> expected_payload_vector(
expected_payload, &expected_payload[expected_payload_length]);
EXPECT_THAT(expected_payload_vector,
::testing::ElementsAreArray(&packet[expected_payload_offset],
expected_payload_length));
}
} // namespace
TEST(RtpPacketizerH264Test, TestSingleNalu) {
const uint8_t frame[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5.
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = sizeof(frame);
scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize));
packetizer->SetPayloadData(frame, sizeof(frame), &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
size_t length = 0;
bool last = false;
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
EXPECT_EQ(2u, length);
EXPECT_TRUE(last);
EXPECT_EQ(frame[0], packet[0]);
EXPECT_EQ(frame[1], packet[1]);
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST(RtpPacketizerH264Test, TestStapA) {
const size_t kFrameSize = kMaxPayloadSize - 100;
uint8_t frame[kFrameSize] = {0x07, 0xFF, // F=0, NRI=0, Type=7.
0x08, 0xFF, // F=0, NRI=0, Type=8.
0x05}; // F=0, NRI=0, Type=5.
const size_t kPayloadOffset = 5;
for (size_t i = 0; i < kFrameSize - kPayloadOffset; ++i)
frame[i + kPayloadOffset] = i;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(3);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = 2;
fragmentation.fragmentationOffset[1] = 2;
fragmentation.fragmentationLength[1] = 2;
fragmentation.fragmentationOffset[2] = 4;
fragmentation.fragmentationLength[2] =
kNalHeaderSize + kFrameSize - kPayloadOffset;
scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
size_t length = 0;
bool last = false;
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
size_t expected_packet_size =
kNalHeaderSize + 3 * kLengthFieldLength + kFrameSize;
ASSERT_EQ(expected_packet_size, length);
EXPECT_TRUE(last);
for (size_t i = 0; i < fragmentation.fragmentationVectorSize; ++i)
VerifyStapAPayload(fragmentation, 0, i, frame, kFrameSize, packet, length);
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST(RtpPacketizerH264Test, TestMixedStapA_FUA) {
const size_t kFuaNaluSize = 2 * (kMaxPayloadSize - 100);
const size_t kStapANaluSize = 100;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(3);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = kFuaNaluSize;
fragmentation.fragmentationOffset[1] = kFuaNaluSize;
fragmentation.fragmentationLength[1] = kStapANaluSize;
fragmentation.fragmentationOffset[2] = kFuaNaluSize + kStapANaluSize;
fragmentation.fragmentationLength[2] = kStapANaluSize;
const size_t kFrameSize = kFuaNaluSize + 2 * kStapANaluSize;
uint8_t frame[kFrameSize];
size_t nalu_offset = 0;
for (size_t i = 0; i < fragmentation.fragmentationVectorSize; ++i) {
nalu_offset = fragmentation.fragmentationOffset[i];
frame[nalu_offset] = 0x05; // F=0, NRI=0, Type=5.
for (size_t j = 1; j < fragmentation.fragmentationLength[i]; ++j) {
frame[nalu_offset + j] = i + j;
}
}
scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
// First expecting two FU-A packets.
std::vector<size_t> fua_sizes;
fua_sizes.push_back(1100);
fua_sizes.push_back(1099);
uint8_t packet[kMaxPayloadSize] = {0};
size_t length = 0;
bool last = false;
int fua_offset = kNalHeaderSize;
for (size_t i = 0; i < 2; ++i) {
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
VerifyFua(i, frame, fua_offset, packet, length, fua_sizes);
EXPECT_FALSE(last);
fua_offset += fua_sizes[i];
}
// Then expecting one STAP-A packet with two nal units.
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
size_t expected_packet_size =
kNalHeaderSize + 2 * kLengthFieldLength + 2 * kStapANaluSize;
ASSERT_EQ(expected_packet_size, length);
EXPECT_TRUE(last);
for (size_t i = 1; i < fragmentation.fragmentationVectorSize; ++i)
VerifyStapAPayload(fragmentation, 1, i, frame, kFrameSize, packet, length);
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST(RtpPacketizerH264Test, TestFUAOddSize) {
const size_t kExpectedPayloadSizes[2] = {600, 600};
TestFua(
kMaxPayloadSize + 1,
kMaxPayloadSize,
std::vector<size_t>(kExpectedPayloadSizes,
kExpectedPayloadSizes +
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
}
TEST(RtpPacketizerH264Test, TestFUAEvenSize) {
const size_t kExpectedPayloadSizes[2] = {601, 600};
TestFua(
kMaxPayloadSize + 2,
kMaxPayloadSize,
std::vector<size_t>(kExpectedPayloadSizes,
kExpectedPayloadSizes +
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
}
TEST(RtpPacketizerH264Test, TestFUARounding) {
const size_t kExpectedPayloadSizes[8] = {1266, 1266, 1266, 1266,
1266, 1266, 1266, 1261};
TestFua(
10124,
1448,
std::vector<size_t>(kExpectedPayloadSizes,
kExpectedPayloadSizes +
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
}
TEST(RtpPacketizerH264Test, TestFUABig) {
const size_t kExpectedPayloadSizes[10] = {1198, 1198, 1198, 1198, 1198,
1198, 1198, 1198, 1198, 1198};
// Generate 10 full sized packets, leave room for FU-A headers minus the NALU
// header.
TestFua(
10 * (kMaxPayloadSize - kFuAHeaderSize) + kNalHeaderSize,
kMaxPayloadSize,
std::vector<size_t>(kExpectedPayloadSizes,
kExpectedPayloadSizes +
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
}
class RtpDepacketizerH264Test : public ::testing::Test {
protected:
RtpDepacketizerH264Test()
: callback_(),
depacketizer_(RtpDepacketizer::Create(kRtpVideoH264, &callback_)) {
memset(&last_header_, 0, sizeof(last_header_));
}
void ExpectPacket(const uint8_t* data, size_t length) {
EXPECT_CALL(callback_, OnReceivedPayloadData(_, length, _))
.With(Args<0, 1>(ElementsAreArray(data, length)))
.Times(1)
.WillRepeatedly(DoAll(SaveArgPointee<2>(&last_header_), Return(0)));
}
MockRtpData callback_;
scoped_ptr<RtpDepacketizer> depacketizer_;
WebRtcRTPHeader last_header_;
};
TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
uint8_t packet[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5.
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
ExpectPacket(packet, sizeof(packet));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameKey, last_header_.frameType);
EXPECT_TRUE(last_header_.type.Video.isFirstPacket);
EXPECT_TRUE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
uint8_t packet[16] = {Nalu::kStapA, // F=0, NRI=0, Type=24.
// Length, nal header, payload.
0, 0x02, Nalu::kIdr, 0xFF, 0,
0x03, Nalu::kIdr, 0xFF, 0x00, 0,
0x04, Nalu::kIdr, 0xFF, 0x00, 0x11};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
ExpectPacket(packet, sizeof(packet));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameKey, last_header_.frameType);
EXPECT_TRUE(last_header_.type.Video.isFirstPacket);
EXPECT_TRUE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(last_header_.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
uint8_t packet[16] = {Nalu::kStapA, // F=0, NRI=0, Type=24.
// Length, nal header, payload.
0, 0x02, Nalu::kSlice, 0xFF, 0,
0x03, Nalu::kSlice, 0xFF, 0x00, 0,
0x04, Nalu::kSlice, 0xFF, 0x00, 0x11};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
ExpectPacket(packet, sizeof(packet));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameDelta, last_header_.frameType);
EXPECT_TRUE(last_header_.type.Video.isFirstPacket);
EXPECT_TRUE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(last_header_.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestFuA) {
uint8_t packet1[3] = {
Nalu::kFuA, // F=0, NRI=0, Type=28.
FuDefs::kSBit | Nalu::kIdr, // FU header.
0x01 // Payload.
};
const uint8_t kExpected1[2] = {Nalu::kIdr, 0x01};
uint8_t packet2[3] = {
Nalu::kFuA, // F=0, NRI=0, Type=28.
Nalu::kIdr, // FU header.
0x02 // Payload.
};
const uint8_t kExpected2[1] = {0x02};
uint8_t packet3[3] = {
Nalu::kFuA, // F=0, NRI=0, Type=28.
FuDefs::kEBit | Nalu::kIdr, // FU header.
0x03 // Payload.
};
const uint8_t kExpected3[1] = {0x03};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
// We expect that the first packet is one byte shorter since the FU-A header
// has been replaced by the original nal header.
ExpectPacket(kExpected1, sizeof(kExpected1));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet1, sizeof(packet1)));
EXPECT_EQ(kVideoFrameKey, last_header_.frameType);
EXPECT_TRUE(last_header_.type.Video.isFirstPacket);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.stap_a);
// Following packets will be 2 bytes shorter since they will only be appended
// onto the first packet.
ExpectPacket(kExpected2, sizeof(kExpected2));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet2, sizeof(packet2)));
EXPECT_EQ(kVideoFrameKey, last_header_.frameType);
EXPECT_FALSE(last_header_.type.Video.isFirstPacket);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.stap_a);
ExpectPacket(kExpected3, sizeof(kExpected3));
EXPECT_TRUE(depacketizer_->Parse(&expected_header, packet3, sizeof(packet3)));
EXPECT_EQ(kVideoFrameKey, last_header_.frameType);
EXPECT_FALSE(last_header_.type.Video.isFirstPacket);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(last_header_.type.Video.codecHeader.H264.stap_a);
}
} // namespace webrtc

View File

@ -21,57 +21,65 @@ namespace webrtc {
// Define how the VP8PacketizerModes are implemented.
// Modes are: kStrict, kAggregate, kEqualSize.
const RtpFormatVp8::AggregationMode RtpFormatVp8::aggr_modes_[kNumModes] =
{ kAggrNone, kAggrPartitions, kAggrFragments };
const bool RtpFormatVp8::balance_modes_[kNumModes] =
{ true, true, true };
const bool RtpFormatVp8::separate_first_modes_[kNumModes] =
{ true, false, false };
const RtpPacketizerVp8::AggregationMode RtpPacketizerVp8::aggr_modes_
[kNumModes] = {kAggrNone, kAggrPartitions, kAggrFragments};
const bool RtpPacketizerVp8::balance_modes_[kNumModes] = {true, true, true};
const bool RtpPacketizerVp8::separate_first_modes_[kNumModes] = {true, false,
false};
RtpFormatVp8::RtpFormatVp8(const uint8_t* payload_data,
uint32_t payload_size,
const RTPVideoHeaderVP8& hdr_info,
int max_payload_len,
const RTPFragmentationHeader& fragmentation,
VP8PacketizerMode mode)
: payload_data_(payload_data),
payload_size_(static_cast<int>(payload_size)),
RtpPacketizerVp8::RtpPacketizerVp8(const RTPVideoHeaderVP8& hdr_info,
int max_payload_len,
VP8PacketizerMode mode)
: payload_data_(NULL),
payload_size_(0),
vp8_fixed_payload_descriptor_bytes_(1),
aggr_mode_(aggr_modes_[mode]),
balance_(balance_modes_[mode]),
separate_first_(separate_first_modes_[mode]),
hdr_info_(hdr_info),
num_partitions_(fragmentation.fragmentationVectorSize),
num_partitions_(0),
max_payload_len_(max_payload_len),
packets_calculated_(false) {
part_info_.CopyFrom(fragmentation);
}
RtpFormatVp8::RtpFormatVp8(const uint8_t* payload_data,
uint32_t payload_size,
const RTPVideoHeaderVP8& hdr_info,
int max_payload_len)
: payload_data_(payload_data),
payload_size_(static_cast<int>(payload_size)),
RtpPacketizerVp8::RtpPacketizerVp8(const RTPVideoHeaderVP8& hdr_info,
int max_payload_len)
: payload_data_(NULL),
payload_size_(0),
part_info_(),
vp8_fixed_payload_descriptor_bytes_(1),
aggr_mode_(aggr_modes_[kEqualSize]),
balance_(balance_modes_[kEqualSize]),
separate_first_(separate_first_modes_[kEqualSize]),
hdr_info_(hdr_info),
num_partitions_(1),
num_partitions_(0),
max_payload_len_(max_payload_len),
packets_calculated_(false) {
}
RtpPacketizerVp8::~RtpPacketizerVp8() {
}
void RtpPacketizerVp8::SetPayloadData(
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
payload_data_ = payload_data;
payload_size_ = payload_size;
if (fragmentation) {
part_info_.CopyFrom(*fragmentation);
num_partitions_ = fragmentation->fragmentationVectorSize;
} else {
part_info_.VerifyAndAllocateFragmentationHeader(1);
part_info_.fragmentationLength[0] = payload_size;
part_info_.fragmentationOffset[0] = 0;
num_partitions_ = part_info_.fragmentationVectorSize;
}
}
RtpFormatVp8::~RtpFormatVp8() {}
int RtpFormatVp8::NextPacket(uint8_t* buffer,
int* bytes_to_send,
bool* last_packet) {
bool RtpPacketizerVp8::NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) {
if (!packets_calculated_) {
int ret = 0;
if (aggr_mode_ == kAggrPartitions && balance_) {
@ -80,26 +88,28 @@ int RtpFormatVp8::NextPacket(uint8_t* buffer,
ret = GeneratePackets();
}
if (ret < 0) {
return ret;
return false;
}
}
if (packets_.empty()) {
return -1;
return false;
}
InfoStruct packet_info = packets_.front();
packets_.pop();
*bytes_to_send = WriteHeaderAndPayload(packet_info, buffer, max_payload_len_);
if (*bytes_to_send < 0) {
return -1;
int bytes = WriteHeaderAndPayload(packet_info, buffer, max_payload_len_);
if (bytes < 0) {
return false;
}
*bytes_to_send = bytes;
*last_packet = packets_.empty();
return packet_info.first_partition_ix;
return true;
}
int RtpFormatVp8::CalcNextSize(int max_payload_len, int remaining_bytes,
bool split_payload) const {
int RtpPacketizerVp8::CalcNextSize(int max_payload_len,
int remaining_bytes,
bool split_payload) const {
if (max_payload_len == 0 || remaining_bytes == 0) {
return 0;
}
@ -121,7 +131,7 @@ int RtpFormatVp8::CalcNextSize(int max_payload_len, int remaining_bytes,
}
}
int RtpFormatVp8::GeneratePackets() {
int RtpPacketizerVp8::GeneratePackets() {
if (max_payload_len_ < vp8_fixed_payload_descriptor_bytes_
+ PayloadDescriptorExtraLength() + 1) {
// The provided payload length is not long enough for the payload
@ -182,7 +192,7 @@ int RtpFormatVp8::GeneratePackets() {
return 0;
}
int RtpFormatVp8::GeneratePacketsBalancedAggregates() {
int RtpPacketizerVp8::GeneratePacketsBalancedAggregates() {
if (max_payload_len_ < vp8_fixed_payload_descriptor_bytes_
+ PayloadDescriptorExtraLength() + 1) {
// The provided payload length is not long enough for the payload
@ -241,9 +251,9 @@ int RtpFormatVp8::GeneratePacketsBalancedAggregates() {
return 0;
}
void RtpFormatVp8::AggregateSmallPartitions(std::vector<int>* partition_vec,
int* min_size,
int* max_size) {
void RtpPacketizerVp8::AggregateSmallPartitions(std::vector<int>* partition_vec,
int* min_size,
int* max_size) {
assert(min_size && max_size);
*min_size = -1;
*max_size = -1;
@ -285,10 +295,10 @@ void RtpFormatVp8::AggregateSmallPartitions(std::vector<int>* partition_vec,
}
}
void RtpFormatVp8::QueuePacket(int start_pos,
int packet_size,
int first_partition_in_packet,
bool start_on_new_fragment) {
void RtpPacketizerVp8::QueuePacket(int start_pos,
int packet_size,
int first_partition_in_packet,
bool start_on_new_fragment) {
// Write info to packet info struct and store in packet info queue.
InfoStruct packet_info;
packet_info.payload_start_pos = start_pos;
@ -298,9 +308,9 @@ void RtpFormatVp8::QueuePacket(int start_pos,
packets_.push(packet_info);
}
int RtpFormatVp8::WriteHeaderAndPayload(const InfoStruct& packet_info,
uint8_t* buffer,
int buffer_length) const {
int RtpPacketizerVp8::WriteHeaderAndPayload(const InfoStruct& packet_info,
uint8_t* buffer,
int buffer_length) const {
// Write the VP8 payload descriptor.
// 0
// 0 1 2 3 4 5 6 7 8
@ -333,8 +343,8 @@ int RtpFormatVp8::WriteHeaderAndPayload(const InfoStruct& packet_info,
+ extension_length;
}
int RtpFormatVp8::WriteExtensionFields(uint8_t* buffer,
int buffer_length) const {
int RtpPacketizerVp8::WriteExtensionFields(uint8_t* buffer,
int buffer_length) const {
int extension_length = 0;
if (XFieldPresent()) {
uint8_t* x_field = buffer + vp8_fixed_payload_descriptor_bytes_;
@ -363,10 +373,10 @@ int RtpFormatVp8::WriteExtensionFields(uint8_t* buffer,
return extension_length;
}
int RtpFormatVp8::WritePictureIDFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
int RtpPacketizerVp8::WritePictureIDFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
*x_field |= kIBit;
const int pic_id_length = WritePictureID(
buffer + vp8_fixed_payload_descriptor_bytes_ + *extension_length,
@ -377,8 +387,7 @@ int RtpFormatVp8::WritePictureIDFields(uint8_t* x_field,
return 0;
}
int RtpFormatVp8::WritePictureID(uint8_t* buffer,
int buffer_length) const {
int RtpPacketizerVp8::WritePictureID(uint8_t* buffer, int buffer_length) const {
const uint16_t pic_id =
static_cast<uint16_t> (hdr_info_.pictureId);
int picture_id_len = PictureIdLength();
@ -392,10 +401,10 @@ int RtpFormatVp8::WritePictureID(uint8_t* buffer,
return picture_id_len;
}
int RtpFormatVp8::WriteTl0PicIdxFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
int RtpPacketizerVp8::WriteTl0PicIdxFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
if (buffer_length < vp8_fixed_payload_descriptor_bytes_ + *extension_length
+ 1) {
return -1;
@ -407,10 +416,10 @@ int RtpFormatVp8::WriteTl0PicIdxFields(uint8_t* x_field,
return 0;
}
int RtpFormatVp8::WriteTIDAndKeyIdxFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
int RtpPacketizerVp8::WriteTIDAndKeyIdxFields(uint8_t* x_field,
uint8_t* buffer,
int buffer_length,
int* extension_length) const {
if (buffer_length < vp8_fixed_payload_descriptor_bytes_ + *extension_length
+ 1) {
return -1;
@ -432,7 +441,7 @@ int RtpFormatVp8::WriteTIDAndKeyIdxFields(uint8_t* x_field,
return 0;
}
int RtpFormatVp8::PayloadDescriptorExtraLength() const {
int RtpPacketizerVp8::PayloadDescriptorExtraLength() const {
int length_bytes = PictureIdLength();
if (TL0PicIdxFieldPresent()) ++length_bytes;
if (TIDFieldPresent() || KeyIdxFieldPresent()) ++length_bytes;
@ -440,7 +449,7 @@ int RtpFormatVp8::PayloadDescriptorExtraLength() const {
return length_bytes;
}
int RtpFormatVp8::PictureIdLength() const {
int RtpPacketizerVp8::PictureIdLength() const {
if (hdr_info_.pictureId == kNoPictureId) {
return 0;
}
@ -450,22 +459,22 @@ int RtpFormatVp8::PictureIdLength() const {
return 2;
}
bool RtpFormatVp8::XFieldPresent() const {
bool RtpPacketizerVp8::XFieldPresent() const {
return (TIDFieldPresent() || TL0PicIdxFieldPresent() || PictureIdPresent()
|| KeyIdxFieldPresent());
}
bool RtpFormatVp8::TIDFieldPresent() const {
bool RtpPacketizerVp8::TIDFieldPresent() const {
assert((hdr_info_.layerSync == false) ||
(hdr_info_.temporalIdx != kNoTemporalIdx));
return (hdr_info_.temporalIdx != kNoTemporalIdx);
}
bool RtpFormatVp8::KeyIdxFieldPresent() const {
bool RtpPacketizerVp8::KeyIdxFieldPresent() const {
return (hdr_info_.keyIdx != kNoKeyIdx);
}
bool RtpFormatVp8::TL0PicIdxFieldPresent() const {
bool RtpPacketizerVp8::TL0PicIdxFieldPresent() const {
return (hdr_info_.tl0PicIdx != kNoTl0PicIdx);
}
} // namespace webrtc

View File

@ -30,6 +30,7 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@ -44,25 +45,24 @@ enum VP8PacketizerMode {
};
// Packetizer for VP8.
class RtpFormatVp8 {
class RtpPacketizerVp8 : public RtpPacketizer {
public:
// Initialize with payload from encoder and fragmentation info.
// The payload_data must be exactly one encoded VP8 frame.
RtpFormatVp8(const uint8_t* payload_data,
uint32_t payload_size,
const RTPVideoHeaderVP8& hdr_info,
int max_payload_len,
const RTPFragmentationHeader& fragmentation,
VP8PacketizerMode mode);
RtpPacketizerVp8(const RTPVideoHeaderVP8& hdr_info,
int max_payload_len,
VP8PacketizerMode mode);
// Initialize without fragmentation info. Mode kEqualSize will be used.
// The payload_data must be exactly one encoded VP8 frame.
RtpFormatVp8(const uint8_t* payload_data,
uint32_t payload_size,
const RTPVideoHeaderVP8& hdr_info,
int max_payload_len);
RtpPacketizerVp8(const RTPVideoHeaderVP8& hdr_info, int max_payload_len);
~RtpFormatVp8();
virtual ~RtpPacketizerVp8();
virtual void SetPayloadData(
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) OVERRIDE;
// Get the next payload with VP8 payload header.
// max_payload_len limits the sum length of payload and VP8 payload header.
@ -75,9 +75,9 @@ class RtpFormatVp8 {
// the first payload byte in the packet is taken, with the first partition
// having index 0; returns negative on error.
// For the kEqualSize mode: returns 0 on success, return negative on error.
int NextPacket(uint8_t* buffer,
int* bytes_to_send,
bool* last_packet);
virtual bool NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) OVERRIDE;
private:
typedef struct {
@ -187,7 +187,7 @@ class RtpFormatVp8 {
bool PictureIdPresent() const { return (PictureIdLength() > 0); }
const uint8_t* payload_data_;
const int payload_size_;
int payload_size_;
RTPFragmentationHeader part_info_;
const int vp8_fixed_payload_descriptor_bytes_; // Length of VP8 payload
// descriptors's fixed part.
@ -195,14 +195,12 @@ class RtpFormatVp8 {
const bool balance_;
const bool separate_first_;
const RTPVideoHeaderVP8 hdr_info_;
const int num_partitions_;
int num_partitions_;
const int max_payload_len_;
InfoQueue packets_;
bool packets_calculated_;
DISALLOW_COPY_AND_ASSIGN(RtpFormatVp8);
DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp8);
};
} // namespace
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_H_

View File

@ -64,20 +64,19 @@ bool RtpFormatVp8TestHelper::Init(const int* partition_sizes,
}
void RtpFormatVp8TestHelper::GetAllPacketsAndCheck(
RtpFormatVp8* packetizer,
RtpPacketizerVp8* packetizer,
const int* expected_sizes,
const int* expected_part,
const bool* expected_frag_start,
int expected_num_packets) {
ASSERT_TRUE(inited_);
int send_bytes = 0;
size_t send_bytes = 0;
bool last = false;
for (int i = 0; i < expected_num_packets; ++i) {
std::ostringstream ss;
ss << "Checking packet " << i;
SCOPED_TRACE(ss.str());
EXPECT_EQ(expected_part[i],
packetizer->NextPacket(buffer_, &send_bytes, &last));
EXPECT_TRUE(packetizer->NextPacket(buffer_, &send_bytes, &last));
CheckPacket(send_bytes, expected_sizes[i], last,
expected_frag_start[i]);
}

View File

@ -32,7 +32,7 @@ class RtpFormatVp8TestHelper {
explicit RtpFormatVp8TestHelper(const RTPVideoHeaderVP8* hdr);
~RtpFormatVp8TestHelper();
bool Init(const int* partition_sizes, int num_partitions);
void GetAllPacketsAndCheck(RtpFormatVp8* packetizer,
void GetAllPacketsAndCheck(RtpPacketizerVp8* packetizer,
const int* expected_sizes,
const int* expected_part,
const bool* expected_frag_start,

View File

@ -24,9 +24,9 @@
namespace webrtc {
class RtpFormatVp8Test : public ::testing::Test {
class RtpPacketizerVp8Test : public ::testing::Test {
protected:
RtpFormatVp8Test() : helper_(NULL) {}
RtpPacketizerVp8Test() : helper_(NULL) {}
virtual void TearDown() { delete helper_; }
bool Init(const int* partition_sizes, int num_partitions) {
hdr_info_.pictureId = kNoPictureId;
@ -44,19 +44,17 @@ class RtpFormatVp8Test : public ::testing::Test {
test::RtpFormatVp8TestHelper* helper_;
};
TEST_F(RtpFormatVp8Test, TestStrictMode) {
TEST_F(RtpPacketizerVp8Test, TestStrictMode) {
const int kSizeVector[] = {10, 8, 27};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 200; // > 0x7F should produce 2-byte PictureID.
const int kMaxSize = 13;
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kStrict);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kStrict);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// The expected sizes are obtained by running a verified good implementation.
const int kExpectedSizes[] = {9, 9, 12, 11, 11, 11, 10};
@ -71,19 +69,17 @@ TEST_F(RtpFormatVp8Test, TestStrictMode) {
kExpectedFragStart, kExpectedNum);
}
TEST_F(RtpFormatVp8Test, TestAggregateMode) {
TEST_F(RtpPacketizerVp8Test, TestAggregateMode) {
const int kSizeVector[] = {60, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 20; // <= 0x7F should produce 1-byte PictureID.
const int kMaxSize = 25;
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// The expected sizes are obtained by running a verified good implementation.
const int kExpectedSizes[] = {23, 23, 23, 23};
@ -97,19 +93,17 @@ TEST_F(RtpFormatVp8Test, TestAggregateMode) {
kExpectedFragStart, kExpectedNum);
}
TEST_F(RtpFormatVp8Test, TestAggregateModeManyPartitions1) {
TEST_F(RtpPacketizerVp8Test, TestAggregateModeManyPartitions1) {
const int kSizeVector[] = {1600, 200, 200, 200, 200, 200, 200, 200, 200};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 20; // <= 0x7F should produce 1-byte PictureID.
const int kMaxSize = 1500;
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// The expected sizes are obtained by running a verified good implementation.
const int kExpectedSizes[] = {803, 803, 803, 803};
@ -123,19 +117,17 @@ TEST_F(RtpFormatVp8Test, TestAggregateModeManyPartitions1) {
kExpectedFragStart, kExpectedNum);
}
TEST_F(RtpFormatVp8Test, TestAggregateModeManyPartitions2) {
TEST_F(RtpPacketizerVp8Test, TestAggregateModeManyPartitions2) {
const int kSizeVector[] = {1599, 200, 200, 200, 1600, 200, 200, 200, 200};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 20; // <= 0x7F should produce 1-byte PictureID.
const int kMaxSize = 1500;
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// The expected sizes are obtained by running a verified good implementation.
const int kExpectedSizes[] = {803, 802, 603, 803, 803, 803};
@ -149,19 +141,17 @@ TEST_F(RtpFormatVp8Test, TestAggregateModeManyPartitions2) {
kExpectedFragStart, kExpectedNum);
}
TEST_F(RtpFormatVp8Test, TestAggregateModeTwoLargePartitions) {
TEST_F(RtpPacketizerVp8Test, TestAggregateModeTwoLargePartitions) {
const int kSizeVector[] = {1654, 2268};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 20; // <= 0x7F should produce 1-byte PictureID.
const int kMaxSize = 1460;
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// The expected sizes are obtained by running a verified good implementation.
const int kExpectedSizes[] = {830, 830, 1137, 1137};
@ -176,17 +166,16 @@ TEST_F(RtpFormatVp8Test, TestAggregateModeTwoLargePartitions) {
}
// Verify that EqualSize mode is forced if fragmentation info is missing.
TEST_F(RtpFormatVp8Test, TestEqualSizeModeFallback) {
TEST_F(RtpPacketizerVp8Test, TestEqualSizeModeFallback) {
const int kSizeVector[] = {10, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.pictureId = 200; // > 0x7F should produce 2-byte PictureID
const int kMaxSize = 12; // Small enough to produce 4 packets.
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize);
packetizer.SetPayloadData(
helper_->payload_data(), helper_->payload_size(), NULL);
// Expecting three full packets, and one with the remainder.
const int kExpectedSizes[] = {12, 11, 12, 11};
@ -203,17 +192,16 @@ TEST_F(RtpFormatVp8Test, TestEqualSizeModeFallback) {
}
// Verify that non-reference bit is set. EqualSize mode fallback is expected.
TEST_F(RtpFormatVp8Test, TestNonReferenceBit) {
TEST_F(RtpPacketizerVp8Test, TestNonReferenceBit) {
const int kSizeVector[] = {10, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
hdr_info_.nonReference = true;
const int kMaxSize = 25; // Small enough to produce two packets.
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize);
packetizer.SetPayloadData(
helper_->payload_data(), helper_->payload_size(), NULL);
// EqualSize mode => First packet full; other not.
const int kExpectedSizes[] = {16, 16};
@ -230,7 +218,7 @@ TEST_F(RtpFormatVp8Test, TestNonReferenceBit) {
}
// Verify Tl0PicIdx and TID fields, and layerSync bit.
TEST_F(RtpFormatVp8Test, TestTl0PicIdxAndTID) {
TEST_F(RtpPacketizerVp8Test, TestTl0PicIdxAndTID) {
const int kSizeVector[] = {10, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
@ -240,12 +228,10 @@ TEST_F(RtpFormatVp8Test, TestTl0PicIdxAndTID) {
hdr_info_.layerSync = true;
// kMaxSize is only limited by allocated buffer size.
const int kMaxSize = helper_->buffer_size();
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// Expect one single packet of payload_size() + 4 bytes header.
const int kExpectedSizes[1] = {helper_->payload_size() + 4};
@ -260,7 +246,7 @@ TEST_F(RtpFormatVp8Test, TestTl0PicIdxAndTID) {
}
// Verify KeyIdx field.
TEST_F(RtpFormatVp8Test, TestKeyIdx) {
TEST_F(RtpPacketizerVp8Test, TestKeyIdx) {
const int kSizeVector[] = {10, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
@ -268,12 +254,10 @@ TEST_F(RtpFormatVp8Test, TestKeyIdx) {
hdr_info_.keyIdx = 17;
// kMaxSize is only limited by allocated buffer size.
const int kMaxSize = helper_->buffer_size();
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// Expect one single packet of payload_size() + 3 bytes header.
const int kExpectedSizes[1] = {helper_->payload_size() + 3};
@ -288,7 +272,7 @@ TEST_F(RtpFormatVp8Test, TestKeyIdx) {
}
// Verify TID field and KeyIdx field in combination.
TEST_F(RtpFormatVp8Test, TestTIDAndKeyIdx) {
TEST_F(RtpPacketizerVp8Test, TestTIDAndKeyIdx) {
const int kSizeVector[] = {10, 10, 10};
const int kNumPartitions = sizeof(kSizeVector) / sizeof(kSizeVector[0]);
ASSERT_TRUE(Init(kSizeVector, kNumPartitions));
@ -297,12 +281,10 @@ TEST_F(RtpFormatVp8Test, TestTIDAndKeyIdx) {
hdr_info_.keyIdx = 5;
// kMaxSize is only limited by allocated buffer size.
const int kMaxSize = helper_->buffer_size();
RtpFormatVp8 packetizer(helper_->payload_data(),
helper_->payload_size(),
hdr_info_,
kMaxSize,
*(helper_->fragmentation()),
kAggregate);
RtpPacketizerVp8 packetizer(hdr_info_, kMaxSize, kAggregate);
packetizer.SetPayloadData(helper_->payload_data(),
helper_->payload_size(),
helper_->fragmentation());
// Expect one single packet of payload_size() + 3 bytes header.
const int kExpectedSizes[1] = {helper_->payload_size() + 3};

View File

@ -14,6 +14,7 @@
#include <string.h>
#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@ -113,8 +114,13 @@ int32_t RTPReceiverVideo::ParseVideoCodecSpecific(
return ReceiveGenericCodec(rtp_header, payload_data, payload_data_length);
case kRtpVideoVp8:
return ReceiveVp8Codec(rtp_header, payload_data, payload_data_length);
case kRtpVideoH264:
assert(false); // Not yet supported.
case kRtpVideoH264: {
scoped_ptr<RtpDepacketizer> depacketizer(RtpDepacketizer::Create(
rtp_header->type.Video.codec, data_callback_));
return depacketizer->Parse(rtp_header, payload_data, payload_data_length)
? 0
: -1;
}
case kRtpVideoNone:
break;
}

View File

@ -60,7 +60,7 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
void SetPacketOverHead(uint16_t packet_over_head);
protected:
private:
int32_t ReceiveGenericCodec(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
uint16_t payload_data_length);
@ -69,10 +69,13 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
const uint8_t* payload_data,
uint16_t payload_data_length);
int32_t ReceiveH264Codec(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
uint16_t payload_data_length);
int32_t BuildRTPheader(const WebRtcRTPHeader* rtp_header,
uint8_t* data_buffer) const;
private:
int32_t ParseVideoCodecSpecific(
WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,

View File

@ -86,6 +86,10 @@
'rtp_sender_video.cc',
'rtp_sender_video.h',
'video_codec_information.h',
'rtp_format.cc',
'rtp_format.h',
'rtp_format_h264.cc',
'rtp_format_h264.h',
'rtp_format_vp8.cc',
'rtp_format_vp8.h',
'rtp_format_video_generic.h',

View File

@ -275,61 +275,63 @@ int32_t RTPSenderVideo::SetFecParameters(
return 0;
}
int32_t
RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codecInfo,
const RTPVideoTypeHeader* rtpTypeHdr)
{
if( payloadSize == 0)
{
return -1;
}
int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codecInfo,
const RTPVideoTypeHeader* rtpTypeHdr) {
if (payloadSize == 0) {
return -1;
}
if (frameType == kVideoFrameKey) {
producer_fec_.SetFecParameters(&key_fec_params_,
_numberFirstPartition);
} else {
producer_fec_.SetFecParameters(&delta_fec_params_,
_numberFirstPartition);
}
if (frameType == kVideoFrameKey) {
producer_fec_.SetFecParameters(&key_fec_params_, _numberFirstPartition);
} else {
producer_fec_.SetFecParameters(&delta_fec_params_, _numberFirstPartition);
}
// Default setting for number of first partition packets:
// Will be extracted in SendVP8 for VP8 codec; other codecs use 0
_numberFirstPartition = 0;
// Default setting for number of first partition packets:
// Will be extracted in SendVP8 for VP8 codec; other codecs use 0
_numberFirstPartition = 0;
int32_t retVal = -1;
switch(videoType)
{
switch (videoType) {
case kRtpVideoGeneric:
retVal = SendGeneric(frameType, payloadType, captureTimeStamp,
capture_time_ms, payloadData, payloadSize);
break;
case kRtpVideoVp8:
retVal = SendVP8(frameType,
return SendGeneric(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr);
break;
payloadSize);
case kRtpVideoVp8:
return SendVP8(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr);
case kRtpVideoH264:
return SendH264(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr)
? 0
: -1;
default:
assert(false);
break;
}
if(retVal <= 0)
{
return retVal;
}
return 0;
assert(false);
break;
}
return 0;
}
int32_t RTPSenderVideo::SendGeneric(const FrameType frame_type,
@ -427,8 +429,8 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
assert(rtpTypeHdr);
// Initialize disregarding partition boundaries: this will use kEqualSize
// packetization mode, which produces ~equal size packets for each frame.
RtpFormatVp8 packetizer(data, payloadBytesToSend, rtpTypeHdr->VP8,
maxPayloadLengthVP8);
RtpPacketizerVp8 packetizer(rtpTypeHdr->VP8, maxPayloadLengthVP8);
packetizer.SetPayloadData(data, payloadBytesToSend, NULL);
StorageType storage = kAllowRetransmission;
if (rtpTypeHdr->VP8.temporalIdx == 0 &&
@ -450,22 +452,10 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
{
// Write VP8 Payload Descriptor and VP8 payload.
uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
int payloadBytesInPacket = 0;
int packetStartPartition =
packetizer.NextPacket(&dataBuffer[rtpHeaderLength],
&payloadBytesInPacket, &last);
// TODO(holmer): Temporarily disable first partition packet counting
// to avoid a bug in ProducerFec which doesn't properly handle
// important packets.
// if (packetStartPartition == 0)
// {
// ++_numberFirstPartition;
// }
// else
if (packetStartPartition < 0)
{
return -1;
}
size_t payloadBytesInPacket = 0;
if (!packetizer.NextPacket(
&dataBuffer[rtpHeaderLength], &payloadBytesInPacket, &last))
return -1;
// Write RTP header.
// Set marker bit true if this is the last packet in frame.
@ -485,6 +475,55 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
return 0;
}
bool RTPSenderVideo::SendH264(const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr) {
size_t rtp_header_length = _rtpSender.RTPHeaderLength();
int32_t payload_bytes_to_send = payloadSize;
const uint8_t* data = payloadData;
size_t max_payload_length = _rtpSender.MaxDataPayloadLength();
scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(kRtpVideoH264, max_payload_length));
packetizer->SetPayloadData(data, payload_bytes_to_send, fragmentation);
StorageType storage = kAllowRetransmission;
bool protect = (frameType == kVideoFrameKey);
bool last = false;
while (!last) {
// Write H264 payload.
uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
size_t payload_bytes_in_packet = 0;
if (!packetizer->NextPacket(
&dataBuffer[rtp_header_length], &payload_bytes_in_packet, &last)) {
return false;
}
// Write RTP header.
// Set marker bit true if this is the last packet in frame.
_rtpSender.BuildRTPheader(
dataBuffer, payloadType, last, captureTimeStamp, capture_time_ms);
if (SendVideoPacket(dataBuffer,
payload_bytes_in_packet,
rtp_header_length,
captureTimeStamp,
capture_time_ms,
storage,
protect)) {
LOG(LS_WARNING)
<< "RTPSenderVideo::SendH264 failed to send packet number "
<< _rtpSender.SequenceNumber();
}
}
return true;
}
void RTPSenderVideo::ProcessBitrate() {
_videoBitrate.Process();
_fecOverheadRate.Process();

View File

@ -110,6 +110,15 @@ private:
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr);
bool SendH264(const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr);
private:
RTPSenderInterface& _rtpSender;

View File

@ -251,10 +251,11 @@ TEST(ParseVP8Test, TestWithPacketizer) {
inputHeader.layerSync = false;
inputHeader.tl0PicIdx = kNoTl0PicIdx; // Disable.
inputHeader.keyIdx = 31;
RtpFormatVp8 packetizer(payload, 10, inputHeader, 20);
RtpPacketizerVp8 packetizer(inputHeader, 20);
packetizer.SetPayloadData(payload, 10, NULL);
bool last;
int send_bytes;
ASSERT_EQ(0, packetizer.NextPacket(packet, &send_bytes, &last));
size_t send_bytes;
ASSERT_TRUE(packetizer.NextPacket(packet, &send_bytes, &last));
ASSERT_TRUE(last);
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, packet, send_bytes);

View File

@ -28,29 +28,30 @@ class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below.
struct CodecSpecificInfoVP8
{
bool hasReceivedSLI;
uint8_t pictureIdSLI;
bool hasReceivedRPSI;
uint64_t pictureIdRPSI;
int16_t pictureId; // negative value to skip pictureId
bool nonReference;
uint8_t simulcastIdx;
uint8_t temporalIdx;
bool layerSync;
int tl0PicIdx; // Negative value to skip tl0PicIdx
int8_t keyIdx; // negative value to skip keyIdx
struct CodecSpecificInfoVP8 {
bool hasReceivedSLI;
uint8_t pictureIdSLI;
bool hasReceivedRPSI;
uint64_t pictureIdRPSI;
int16_t pictureId; // Negative value to skip pictureId.
bool nonReference;
uint8_t simulcastIdx;
uint8_t temporalIdx;
bool layerSync;
int tl0PicIdx; // Negative value to skip tl0PicIdx.
int8_t keyIdx; // Negative value to skip keyIdx.
};
struct CodecSpecificInfoGeneric {
uint8_t simulcast_idx;
};
union CodecSpecificInfoUnion
{
CodecSpecificInfoGeneric generic;
CodecSpecificInfoVP8 VP8;
struct CodecSpecificInfoH264 {};
union CodecSpecificInfoUnion {
CodecSpecificInfoGeneric generic;
CodecSpecificInfoVP8 VP8;
CodecSpecificInfoH264 H264;
};
// Note: if any pointers are added to this struct or its sub-structs, it

View File

@ -100,49 +100,45 @@ void VCMEncodedFrame::Reset()
void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
{
if (header)
{
switch (header->codec)
{
case kRtpVideoVp8:
{
if (_codecSpecificInfo.codecType != kVideoCodecVP8)
{
// This is the first packet for this frame.
_codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
_codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
_codecSpecificInfo.codecSpecific.VP8.layerSync = false;
_codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
_codecSpecificInfo.codecType = kVideoCodecVP8;
}
_codecSpecificInfo.codecSpecific.VP8.nonReference =
header->codecHeader.VP8.nonReference;
if (header->codecHeader.VP8.pictureId != kNoPictureId)
{
_codecSpecificInfo.codecSpecific.VP8.pictureId =
header->codecHeader.VP8.pictureId;
}
if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx)
{
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
header->codecHeader.VP8.temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync =
header->codecHeader.VP8.layerSync;
}
if (header->codecHeader.VP8.keyIdx != kNoKeyIdx)
{
_codecSpecificInfo.codecSpecific.VP8.keyIdx =
header->codecHeader.VP8.keyIdx;
}
break;
}
default:
{
_codecSpecificInfo.codecType = kVideoCodecUnknown;
break;
}
if (header) {
switch (header->codec) {
case kRtpVideoVp8: {
if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
// This is the first packet for this frame.
_codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
_codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
_codecSpecificInfo.codecSpecific.VP8.layerSync = false;
_codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
_codecSpecificInfo.codecType = kVideoCodecVP8;
}
_codecSpecificInfo.codecSpecific.VP8.nonReference =
header->codecHeader.VP8.nonReference;
if (header->codecHeader.VP8.pictureId != kNoPictureId) {
_codecSpecificInfo.codecSpecific.VP8.pictureId =
header->codecHeader.VP8.pictureId;
}
if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
header->codecHeader.VP8.temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync =
header->codecHeader.VP8.layerSync;
}
if (header->codecHeader.VP8.keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx =
header->codecHeader.VP8.keyIdx;
}
break;
}
case kRtpVideoH264: {
_codecSpecificInfo.codecType = kVideoCodecH264;
break;
}
default: {
_codecSpecificInfo.codecType = kVideoCodecUnknown;
break;
}
}
}
}
const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {

View File

@ -38,6 +38,9 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader** rtp) {
(*rtp)->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
return;
}
case kVideoCodecH264:
(*rtp)->codec = kRtpVideoH264;
return;
case kVideoCodecGeneric:
(*rtp)->codec = kRtpVideoGeneric;
(*rtp)->simulcastIdx = info->codecSpecific.generic.simulcast_idx;

View File

@ -482,7 +482,6 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
CriticalSectionScoped cs(crit_sect_);
if (!running_) {
return NULL;
}
@ -611,7 +610,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
if (error != kNoError && frame == NULL) {
return error;
}
int64_t now_ms = clock_->TimeInMilliseconds();
// We are keeping track of the first and latest seq numbers, and
// the number of wraps to be able to calculate how many packets we expect.

View File

@ -94,33 +94,44 @@ void VCMPacket::Reset() {
memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
}
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader)
{
switch(videoHeader.codec)
{
case kRtpVideoVp8:
{
// Handle all packets within a frame as depending on the previous packet
// TODO(holmer): This should be changed to make fragments independent
// when the VP8 RTP receiver supports fragments.
if (isFirstPacket && markerBit)
completeNALU = kNaluComplete;
else if (isFirstPacket)
completeNALU = kNaluStart;
else if (markerBit)
completeNALU = kNaluEnd;
else
completeNALU = kNaluIncomplete;
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader) {
switch (videoHeader.codec) {
case kRtpVideoVp8:
// Handle all packets within a frame as depending on the previous packet
// TODO(holmer): This should be changed to make fragments independent
// when the VP8 RTP receiver supports fragments.
if (isFirstPacket && markerBit)
completeNALU = kNaluComplete;
else if (isFirstPacket)
completeNALU = kNaluStart;
else if (markerBit)
completeNALU = kNaluEnd;
else
completeNALU = kNaluIncomplete;
codec = kVideoCodecVP8;
break;
}
default:
{
codec = kVideoCodecUnknown;
break;
}
}
codec = kVideoCodecVP8;
return;
case kRtpVideoH264:
isFirstPacket = videoHeader.isFirstPacket;
if (isFirstPacket)
insertStartCode = true;
if (videoHeader.codecHeader.H264.single_nalu) {
completeNALU = kNaluComplete;
} else if (isFirstPacket) {
completeNALU = kNaluStart;
} else if (markerBit) {
completeNALU = kNaluEnd;
} else {
completeNALU = kNaluIncomplete;
}
codec = kVideoCodecH264;
return;
case kRtpVideoGeneric:
case kRtpVideoNone:
codec = kVideoCodecUnknown;
return;
}
}
}
} // namespace webrtc

View File

@ -14,7 +14,7 @@
#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
namespace {
// Used in determining whether a frame is decodable.
enum {kRttThreshold = 100}; // Not decodable if Rtt is lower than this.
@ -23,6 +23,11 @@ enum {kRttThreshold = 100}; // Not decodable if Rtt is lower than this.
static const float kLowPacketPercentageThreshold = 0.2f;
static const float kHighPacketPercentageThreshold = 0.8f;
uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
return (dataBuffer[0] << 8) | dataBuffer[1];
}
} // namespace
VCMSessionInfo::VCMSessionInfo()
: session_nack_(false),
complete_(false),
@ -121,9 +126,6 @@ int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
VCMPacket& packet = *packet_it;
PacketIterator it;
int packet_size = packet.sizeBytes;
packet_size += (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
// Calculate the offset into the frame buffer for this packet.
int offset = 0;
for (it = packets_.begin(); it != packet_it; ++it)
@ -131,23 +133,63 @@ int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
// Set the data pointer to pointing to the start of this packet in the
// frame buffer.
const uint8_t* data = packet.dataPtr;
const uint8_t* packet_buffer = packet.dataPtr;
packet.dataPtr = frame_buffer + offset;
packet.sizeBytes = packet_size;
ShiftSubsequentPackets(packet_it, packet_size);
const unsigned char startCode[] = {0, 0, 0, 1};
if (packet.insertStartCode) {
memcpy(const_cast<uint8_t*>(packet.dataPtr), startCode,
kH264StartCodeLengthBytes);
// We handle H.264 STAP-A packets in a special way as we need to remove the
// two length bytes between each NAL unit, and potentially add start codes.
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
if (packet.codecSpecificHeader.codecHeader.H264.stap_a) {
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
uint32_t length = BufferToUWord16(nalu_ptr);
required_length +=
length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
nalu_ptr += kLengthFieldLength + length;
}
ShiftSubsequentPackets(packet_it, required_length);
nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
uint8_t* frame_buffer_ptr = frame_buffer + offset;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
uint32_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
frame_buffer_ptr += Insert(nalu_ptr,
length,
packet.insertStartCode,
const_cast<uint8_t*>(frame_buffer_ptr));
nalu_ptr += length;
}
packet.sizeBytes = required_length;
return packet.sizeBytes;
}
memcpy(const_cast<uint8_t*>(packet.dataPtr
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0)),
data,
packet.sizeBytes);
ShiftSubsequentPackets(
packet_it,
packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
return packet_size;
packet.sizeBytes = Insert(packet_buffer,
packet.sizeBytes,
packet.insertStartCode,
const_cast<uint8_t*>(packet.dataPtr));
return packet.sizeBytes;
}
size_t VCMSessionInfo::Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
uint8_t* frame_buffer) {
if (insert_start_code) {
const unsigned char startCode[] = {0, 0, 0, 1};
memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
}
memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
buffer,
length);
length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
return length;
}
void VCMSessionInfo::ShiftSubsequentPackets(PacketIterator it,
@ -420,34 +462,49 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
(*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
return -2;
// Only insert media packets between first and last packets (when available).
// Placing check here, as to properly account for duplicate packets.
// Check if this is first packet (only valid for some codecs)
// Should only be set for one packet per session.
if (packet.isFirstPacket && first_packet_seq_num_ == -1) {
// The first packet in a frame signals the frame type.
if (packet.codec == kVideoCodecH264) {
frame_type_ = packet.frameType;
// Store the sequence number for the first packet.
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (first_packet_seq_num_ != -1 &&
!IsNewerSequenceNumber(packet.seqNum, first_packet_seq_num_)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out of"
"frame boundaries";
return -3;
} else if (frame_type_ == kFrameEmpty && packet.frameType != kFrameEmpty) {
// Update the frame type with the type of the first media packet.
// TODO(mikhal): Can this trigger?
frame_type_ = packet.frameType;
}
if (packet.isFirstPacket &&
(first_packet_seq_num_ == -1 ||
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
first_packet_seq_num_ = packet.seqNum;
}
if (packet.markerBit &&
(last_packet_seq_num_ == -1 ||
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_))) {
last_packet_seq_num_ = packet.seqNum;
}
} else {
// Only insert media packets between first and last packets (when
// available).
// Placing check here, as to properly account for duplicate packets.
// Check if this is first packet (only valid for some codecs)
// Should only be set for one packet per session.
if (packet.isFirstPacket && first_packet_seq_num_ == -1) {
// The first packet in a frame signals the frame type.
frame_type_ = packet.frameType;
// Store the sequence number for the first packet.
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (first_packet_seq_num_ != -1 &&
!IsNewerSequenceNumber(packet.seqNum, first_packet_seq_num_)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
"of frame boundaries";
return -3;
} else if (frame_type_ == kFrameEmpty && packet.frameType != kFrameEmpty) {
// Update the frame type with the type of the first media packet.
// TODO(mikhal): Can this trigger?
frame_type_ = packet.frameType;
}
// Track the marker bit, should only be set for one packet per session.
if (packet.markerBit && last_packet_seq_num_ == -1) {
last_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (last_packet_seq_num_ != -1 &&
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out of"
"frame boundaries";
return -3;
// Track the marker bit, should only be set for one packet per session.
if (packet.markerBit && last_packet_seq_num_ == -1) {
last_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (last_packet_seq_num_ != -1 &&
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
"of frame boundaries";
return -3;
}
}
// The insert operation invalidates the iterator |rit|.

View File

@ -116,6 +116,10 @@ class VCMSessionInfo {
const PacketIterator& prev_it);
int InsertBuffer(uint8_t* frame_buffer,
PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
uint8_t* frame_buffer);
void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively.