Remove partially defined WebRtcRTPHeader from Parse().

It' bit ugly that RtpDepacketizer::ParsedPayload partially defines WebRtcRTPHeader, and then sent to Parse() function for internal change.
To make it clearer, the CL gets rid of using partially-defined WebRtcRTPHeader.

BUG=
R=pbos@webrtc.org, stefan@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/28919004

Patch from Changbin Shao <changbin.shao@intel.com>.

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7660 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
pbos@webrtc.org 2014-11-07 11:02:12 +00:00
parent a2ef4fe9c3
commit d42a3adf42
7 changed files with 141 additions and 156 deletions

View File

@ -53,12 +53,10 @@ class RtpPacketizer {
class RtpDepacketizer {
public:
struct ParsedPayload {
explicit ParsedPayload(WebRtcRTPHeader* rtp_header)
: payload(NULL), payload_length(0), header(rtp_header) {}
const uint8_t* payload;
size_t payload_length;
WebRtcRTPHeader* header;
FrameType frame_type;
RTPTypeHeader type;
};
static RtpDepacketizer* Create(RtpVideoCodecTypes type);

View File

@ -37,12 +37,15 @@ enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
// Bit masks for FU (A and B) headers.
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
void ParseSingleNalu(WebRtcRTPHeader* rtp_header,
void ParseSingleNalu(RtpDepacketizer::ParsedPayload* parsed_payload,
const uint8_t* payload_data,
size_t payload_data_length) {
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = true;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
parsed_payload->type.Video.width = 0;
parsed_payload->type.Video.height = 0;
parsed_payload->type.Video.codec = kRtpVideoH264;
parsed_payload->type.Video.isFirstPacket = true;
RTPVideoHeaderH264* h264_header =
&parsed_payload->type.Video.codecHeader.H264;
h264_header->single_nalu = true;
h264_header->stap_a = false;
@ -56,15 +59,15 @@ void ParseSingleNalu(WebRtcRTPHeader* rtp_header,
case kSps:
case kPps:
case kIdr:
rtp_header->frameType = kVideoFrameKey;
parsed_payload->frame_type = kVideoFrameKey;
break;
default:
rtp_header->frameType = kVideoFrameDelta;
parsed_payload->frame_type = kVideoFrameDelta;
break;
}
}
void ParseFuaNalu(WebRtcRTPHeader* rtp_header,
void ParseFuaNalu(RtpDepacketizer::ParsedPayload* parsed_payload,
const uint8_t* payload_data,
size_t payload_data_length,
size_t* offset) {
@ -82,13 +85,16 @@ void ParseFuaNalu(WebRtcRTPHeader* rtp_header,
}
if (original_nal_type == kIdr) {
rtp_header->frameType = kVideoFrameKey;
parsed_payload->frame_type = kVideoFrameKey;
} else {
rtp_header->frameType = kVideoFrameDelta;
parsed_payload->frame_type = kVideoFrameDelta;
}
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = first_fragment;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
parsed_payload->type.Video.width = 0;
parsed_payload->type.Video.height = 0;
parsed_payload->type.Video.codec = kRtpVideoH264;
parsed_payload->type.Video.isFirstPacket = first_fragment;
RTPVideoHeaderH264* h264_header =
&parsed_payload->type.Video.codecHeader.H264;
h264_header->single_nalu = false;
h264_header->stap_a = false;
}
@ -298,12 +304,11 @@ bool RtpDepacketizerH264::Parse(ParsedPayload* parsed_payload,
size_t offset = 0;
if (nal_type == kFuA) {
// Fragmented NAL units (FU-A).
ParseFuaNalu(
parsed_payload->header, payload_data, payload_data_length, &offset);
ParseFuaNalu(parsed_payload, payload_data, payload_data_length, &offset);
} else {
// We handle STAP-A and single NALU's the same way here. The jitter buffer
// will depacketize the STAP-A into NAL units later.
ParseSingleNalu(parsed_payload->header, payload_data, payload_data_length);
ParseSingleNalu(parsed_payload, payload_data, payload_data_length);
}
parsed_payload->payload = payload_data + offset;

View File

@ -399,17 +399,15 @@ class RtpDepacketizerH264Test : public ::testing::Test {
TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
uint8_t packet[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5.
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
RtpDepacketizer::ParsedPayload payload(&expected_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
EXPECT_TRUE(payload.header->type.Video.isFirstPacket);
EXPECT_TRUE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_TRUE(payload.type.Video.isFirstPacket);
EXPECT_TRUE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
@ -417,17 +415,15 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
// Length, nal header, payload.
0, 0x02, kIdr, 0xFF, 0, 0x03, kIdr, 0xFF,
0x00, 0, 0x04, kIdr, 0xFF, 0x00, 0x11};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
RtpDepacketizer::ParsedPayload payload(&expected_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
EXPECT_TRUE(payload.header->type.Video.isFirstPacket);
EXPECT_TRUE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_TRUE(payload.type.Video.isFirstPacket);
EXPECT_TRUE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(payload.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
@ -435,17 +431,15 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
// Length, nal header, payload.
0, 0x02, kSlice, 0xFF, 0, 0x03, kSlice, 0xFF,
0x00, 0, 0x04, kSlice, 0xFF, 0x00, 0x11};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
RtpDepacketizer::ParsedPayload payload(&expected_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
EXPECT_TRUE(payload.header->type.Video.isFirstPacket);
EXPECT_TRUE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_TRUE(payload.type.Video.isFirstPacket);
EXPECT_TRUE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_TRUE(payload.type.Video.codecHeader.H264.stap_a);
}
TEST_F(RtpDepacketizerH264Test, TestFuA) {
@ -470,33 +464,36 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
};
const uint8_t kExpected3[1] = {0x03};
WebRtcRTPHeader expected_header;
memset(&expected_header, 0, sizeof(expected_header));
RtpDepacketizer::ParsedPayload payload(&expected_header);
RtpDepacketizer::ParsedPayload payload;
// We expect that the first packet is one byte shorter since the FU-A header
// has been replaced by the original nal header.
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
EXPECT_TRUE(payload.header->type.Video.isFirstPacket);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_TRUE(payload.type.Video.isFirstPacket);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.stap_a);
// Following packets will be 2 bytes shorter since they will only be appended
// onto the first packet.
payload = RtpDepacketizer::ParsedPayload();
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
EXPECT_FALSE(payload.header->type.Video.isFirstPacket);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_FALSE(payload.type.Video.isFirstPacket);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.stap_a);
payload = RtpDepacketizer::ParsedPayload();
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
EXPECT_FALSE(payload.header->type.Video.isFirstPacket);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.header->type.Video.codecHeader.H264.stap_a);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoH264, payload.type.Video.codec);
EXPECT_FALSE(payload.type.Video.isFirstPacket);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.single_nalu);
EXPECT_FALSE(payload.type.Video.codecHeader.H264.stap_a);
}
} // namespace webrtc

View File

@ -90,17 +90,19 @@ bool RtpDepacketizerGeneric::Parse(ParsedPayload* parsed_payload,
const uint8_t* payload_data,
size_t payload_data_length) {
assert(parsed_payload != NULL);
assert(parsed_payload->header != NULL);
uint8_t generic_header = *payload_data++;
--payload_data_length;
parsed_payload->header->frameType =
parsed_payload->frame_type =
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
? kVideoFrameKey
: kVideoFrameDelta;
parsed_payload->header->type.Video.isFirstPacket =
parsed_payload->type.Video.isFirstPacket =
(generic_header & RtpFormatVideoGeneric::kFirstPacketBit) != 0;
parsed_payload->type.Video.codec = kRtpVideoGeneric;
parsed_payload->type.Video.width = 0;
parsed_payload->type.Video.height = 0;
parsed_payload->payload = payload_data;
parsed_payload->payload_length = payload_data_length;

View File

@ -121,11 +121,11 @@ int ParseVP8Extension(RTPVideoHeaderVP8* vp8,
return parsed_bytes;
}
int ParseVP8FrameSize(WebRtcRTPHeader* rtp_header,
int ParseVP8FrameSize(RtpDepacketizer::ParsedPayload* parsed_payload,
const uint8_t* data,
int data_length) {
assert(rtp_header != NULL);
if (rtp_header->frameType != kVideoFrameKey) {
assert(parsed_payload != NULL);
if (parsed_payload->frame_type != kVideoFrameKey) {
// Included in payload header for I-frames.
return 0;
}
@ -134,8 +134,8 @@ int ParseVP8FrameSize(WebRtcRTPHeader* rtp_header,
// in the beginning of the partition.
return -1;
}
rtp_header->type.Video.width = ((data[7] << 8) + data[6]) & 0x3FFF;
rtp_header->type.Video.height = ((data[9] << 8) + data[8]) & 0x3FFF;
parsed_payload->type.Video.width = ((data[7] << 8) + data[6]) & 0x3FFF;
parsed_payload->type.Video.height = ((data[9] << 8) + data[8]) & 0x3FFF;
return 0;
}
} // namespace
@ -664,27 +664,27 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
const uint8_t* payload_data,
size_t payload_data_length) {
assert(parsed_payload != NULL);
assert(parsed_payload->header != NULL);
// Parse mandatory first byte of payload descriptor.
bool extension = (*payload_data & 0x80) ? true : false; // X bit
bool beginning_of_partition = (*payload_data & 0x10) ? true : false; // S bit
int partition_id = (*payload_data & 0x0F); // PartID field
parsed_payload->header->type.Video.isFirstPacket =
parsed_payload->type.Video.width = 0;
parsed_payload->type.Video.height = 0;
parsed_payload->type.Video.isFirstPacket =
beginning_of_partition && (partition_id == 0);
parsed_payload->header->type.Video.codecHeader.VP8.nonReference =
parsed_payload->type.Video.codec = kRtpVideoVp8;
parsed_payload->type.Video.codecHeader.VP8.nonReference =
(*payload_data & 0x20) ? true : false; // N bit
parsed_payload->header->type.Video.codecHeader.VP8.partitionId = partition_id;
parsed_payload->header->type.Video.codecHeader.VP8.beginningOfPartition =
parsed_payload->type.Video.codecHeader.VP8.partitionId = partition_id;
parsed_payload->type.Video.codecHeader.VP8.beginningOfPartition =
beginning_of_partition;
parsed_payload->header->type.Video.codecHeader.VP8.pictureId = kNoPictureId;
parsed_payload->header->type.Video.codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
parsed_payload->header->type.Video.codecHeader.VP8.temporalIdx =
kNoTemporalIdx;
parsed_payload->header->type.Video.codecHeader.VP8.layerSync = false;
parsed_payload->header->type.Video.codecHeader.VP8.keyIdx = kNoKeyIdx;
parsed_payload->type.Video.codecHeader.VP8.pictureId = kNoPictureId;
parsed_payload->type.Video.codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
parsed_payload->type.Video.codecHeader.VP8.temporalIdx = kNoTemporalIdx;
parsed_payload->type.Video.codecHeader.VP8.layerSync = false;
parsed_payload->type.Video.codecHeader.VP8.keyIdx = kNoKeyIdx;
if (partition_id > 8) {
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
@ -697,7 +697,7 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
if (extension) {
const int parsed_bytes =
ParseVP8Extension(&parsed_payload->header->type.Video.codecHeader.VP8,
ParseVP8Extension(&parsed_payload->type.Video.codecHeader.VP8,
payload_data,
payload_data_length);
if (parsed_bytes < 0)
@ -713,14 +713,14 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
// Read P bit from payload header (only at beginning of first partition).
if (payload_data_length > 0 && beginning_of_partition && partition_id == 0) {
parsed_payload->header->frameType =
parsed_payload->frame_type =
(*payload_data & 0x01) ? kVideoFrameDelta : kVideoFrameKey;
} else {
parsed_payload->header->frameType = kVideoFrameDelta;
parsed_payload->frame_type = kVideoFrameDelta;
}
if (0 != ParseVP8FrameSize(
parsed_payload->header, payload_data, payload_data_length)) {
if (ParseVP8FrameSize(parsed_payload, payload_data, payload_data_length) !=
0) {
return false;
}

View File

@ -56,24 +56,23 @@ namespace {
// | padding |
// : :
// +-+-+-+-+-+-+-+-+
void VerifyBasicHeader(WebRtcRTPHeader* header, bool N, bool S, int part_id) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(N, header->type.Video.codecHeader.VP8.nonReference);
EXPECT_EQ(S, header->type.Video.codecHeader.VP8.beginningOfPartition);
EXPECT_EQ(part_id, header->type.Video.codecHeader.VP8.partitionId);
void VerifyBasicHeader(RTPTypeHeader* type, bool N, bool S, int part_id) {
ASSERT_TRUE(type != NULL);
EXPECT_EQ(N, type->Video.codecHeader.VP8.nonReference);
EXPECT_EQ(S, type->Video.codecHeader.VP8.beginningOfPartition);
EXPECT_EQ(part_id, type->Video.codecHeader.VP8.partitionId);
}
void VerifyExtensions(WebRtcRTPHeader* header,
void VerifyExtensions(RTPTypeHeader* type,
int16_t picture_id, /* I */
int16_t tl0_pic_idx, /* L */
uint8_t temporal_idx, /* T */
int key_idx /* K */) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(picture_id, header->type.Video.codecHeader.VP8.pictureId);
EXPECT_EQ(tl0_pic_idx, header->type.Video.codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(temporal_idx, header->type.Video.codecHeader.VP8.temporalIdx);
EXPECT_EQ(key_idx, header->type.Video.codecHeader.VP8.keyIdx);
ASSERT_TRUE(type != NULL);
EXPECT_EQ(picture_id, type->Video.codecHeader.VP8.pictureId);
EXPECT_EQ(tl0_pic_idx, type->Video.codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(temporal_idx, type->Video.codecHeader.VP8.temporalIdx);
EXPECT_EQ(key_idx, type->Video.codecHeader.VP8.keyIdx);
}
} // namespace
@ -405,18 +404,16 @@ TEST_F(RtpDepacketizerVp8Test, BasicHeader) {
uint8_t packet[4] = {0};
packet[0] = 0x14; // Binary 0001 0100; S = 1, PartID = 4.
packet[1] = 0x01; // P frame.
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
VerifyBasicHeader(payload.header, 0, 1, 4);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 0, 1, 4);
VerifyExtensions(
payload.header, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, PictureID) {
@ -427,29 +424,27 @@ TEST_F(RtpDepacketizerVp8Test, PictureID) {
packet[0] = 0xA0;
packet[1] = 0x80;
packet[2] = kPictureId;
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength1, sizeof(packet) - kHeaderLength1);
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
VerifyBasicHeader(payload.header, 1, 0, 0);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 1, 0, 0);
VerifyExtensions(
payload.header, kPictureId, kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
&payload.type, kPictureId, kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
// Re-use packet, but change to long PictureID.
packet[2] = 0x80 | kPictureId;
packet[3] = kPictureId;
memset(payload.header, 0, sizeof(rtp_header));
payload = RtpDepacketizer::ParsedPayload();
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength2, sizeof(packet) - kHeaderLength2);
VerifyBasicHeader(payload.header, 1, 0, 0);
VerifyExtensions(payload.header,
VerifyBasicHeader(&payload.type, 1, 0, 0);
VerifyExtensions(&payload.type,
(kPictureId << 8) + kPictureId,
kNoTl0PicIdx,
kNoTemporalIdx,
@ -463,18 +458,16 @@ TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
packet[0] = 0x90;
packet[1] = 0x40;
packet[2] = kTl0PicIdx;
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
VerifyBasicHeader(payload.header, 0, 1, 0);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 0, 1, 0);
VerifyExtensions(
payload.header, kNoPictureId, kTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
&payload.type, kNoPictureId, kTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
@ -483,18 +476,16 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
packet[0] = 0x88;
packet[1] = 0x20;
packet[2] = 0x80; // TID(2) + LayerSync(false)
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
VerifyBasicHeader(payload.header, 0, 0, 8);
VerifyExtensions(payload.header, kNoPictureId, kNoTl0PicIdx, 2, kNoKeyIdx);
EXPECT_FALSE(payload.header->type.Video.codecHeader.VP8.layerSync);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 0, 0, 8);
VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, 2, kNoKeyIdx);
EXPECT_FALSE(payload.type.Video.codecHeader.VP8.layerSync);
}
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
@ -504,18 +495,16 @@ TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
packet[0] = 0x88;
packet[1] = 0x10; // K = 1.
packet[2] = kKeyIdx;
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
VerifyBasicHeader(payload.header, 0, 0, 8);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 0, 0, 8);
VerifyExtensions(
payload.header, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx, kKeyIdx);
&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx, kKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
@ -527,17 +516,15 @@ TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
packet[3] = 17; // PictureID, low 8 bits.
packet[4] = 42; // Tl0PicIdx.
packet[5] = 0x40 | 0x20 | 0x11; // TID(1) + LayerSync(true) + KEYIDX(17).
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.header->frameType);
VerifyBasicHeader(payload.header, 0, 0, 8);
VerifyExtensions(payload.header, (17 << 8) + 17, 42, 1, 17);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 0, 0, 8);
VerifyExtensions(&payload.type, (17 << 8) + 17, 42, 1, 17);
}
TEST_F(RtpDepacketizerVp8Test, TooShortHeader) {
@ -546,10 +533,7 @@ TEST_F(RtpDepacketizerVp8Test, TooShortHeader) {
packet[1] = 0x80 | 0x40 | 0x20 | 0x10; // All extensions are enabled...
packet[2] = 0x80 | 17; // ... but only 2 bytes PictureID is provided.
packet[3] = 17; // PictureID, low 8 bits.
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
EXPECT_FALSE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
}
@ -571,23 +555,20 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
size_t send_bytes;
ASSERT_TRUE(packetizer.NextPacket(packet, &send_bytes, &last));
ASSERT_TRUE(last);
WebRtcRTPHeader rtp_header;
memset(&rtp_header, 0, sizeof(rtp_header));
RtpDepacketizer::ParsedPayload payload(&rtp_header);
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(
&payload, packet + kHeaderLength, sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameKey, payload.header->frameType);
VerifyBasicHeader(payload.header, 1, 1, 0);
VerifyExtensions(payload.header,
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kRtpVideoVp8, payload.type.Video.codec);
VerifyBasicHeader(&payload.type, 1, 1, 0);
VerifyExtensions(&payload.type,
input_header.pictureId,
input_header.tl0PicIdx,
input_header.temporalIdx,
input_header.keyIdx);
EXPECT_EQ(payload.header->type.Video.codecHeader.VP8.layerSync,
EXPECT_EQ(payload.type.Video.codecHeader.VP8.layerSync,
input_header.layerSync);
}
} // namespace webrtc

View File

@ -79,13 +79,15 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
}
rtp_header->type.Video.isFirstPacket = is_first_packet;
RtpDepacketizer::ParsedPayload parsed_payload(rtp_header);
RtpDepacketizer::ParsedPayload parsed_payload;
if (!depacketizer->Parse(&parsed_payload, payload, payload_data_length))
return -1;
rtp_header->frameType = parsed_payload.frame_type;
rtp_header->type = parsed_payload.type;
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
parsed_payload.payload_length,
parsed_payload.header) == 0
rtp_header) == 0
? 0
: -1;
}