Sets up framework for decoding with errors: collects frame sizes (in number of packets) in JB and passes this information to VCMSessionInfo with rtt_ms as FrameData.

R=marpan@google.com, mikhal@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1841004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4424 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
agalusza@google.com 2013-07-29 21:48:11 +00:00
parent a0b2f1794b
commit d818dcb939
10 changed files with 309 additions and 233 deletions

View File

@ -39,38 +39,41 @@ TEST(TestDecodingState, FrameContinuity) {
packet->frameType = kVideoFrameDelta;
packet->codecSpecificHeader.codec = kRTPVideoVP8;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0x007F;
frame.InsertPacket(*packet, 0, false, 0);
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
frame.InsertPacket(*packet, 0, false, frame_data);
// Always start with a key frame.
dec_state.Reset();
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
packet->frameType = kVideoFrameKey;
frame_key.InsertPacket(*packet, 0, false, 0);
frame_key.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
dec_state.SetState(&frame);
frame.Reset();
packet->frameType = kVideoFrameDelta;
// Use pictureId
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0x0002;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet->seqNum = 10;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Use sequence numbers.
packet->codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
frame.Reset();
packet->seqNum = dec_state.sequence_num() - 1u;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
packet->seqNum = dec_state.sequence_num() + 1u;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
// Insert another packet to this frame
packet->seqNum++;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
// Verify wrap.
EXPECT_EQ(dec_state.sequence_num(), 0xffff);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
@ -85,7 +88,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->seqNum = 1;
packet->timestamp = 1;
EXPECT_TRUE(dec_state.full_sync());
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
@ -95,7 +98,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
packet->seqNum = 2;
packet->timestamp = 2;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -106,7 +109,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 3;
packet->seqNum = 4;
packet->timestamp = 4;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Now insert the next non-base layer (belonging to a next tl0PicId).
frame.Reset();
@ -115,7 +118,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 4;
packet->seqNum = 5;
packet->timestamp = 5;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
// Checking continuity and not updating the state - this should not trigger
// an update of sync state.
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
@ -127,7 +130,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 5;
packet->seqNum = 6;
packet->timestamp = 6;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
@ -139,7 +142,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 6;
packet->seqNum = 7;
packet->timestamp = 7;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
frame.Reset();
@ -148,7 +151,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 7;
packet->seqNum = 8;
packet->timestamp = 8;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// The current frame is not continuous
dec_state.SetState(&frame);
@ -165,7 +168,10 @@ TEST(TestDecodingState, UpdateOldPacket) {
packet->timestamp = 1;
packet->seqNum = 1;
packet->frameType = kVideoFrameDelta;
frame.InsertPacket(*packet, 0, false, 0);
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_EQ(dec_state.sequence_num(), 1);
// Insert an empty packet that does not belong to the same frame.
@ -214,7 +220,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
frame.InsertPacket(*packet, 0, false, 0);
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
// tl0PicIdx 0, temporal id 1.
frame.Reset();
@ -223,7 +232,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -235,7 +244,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 3;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
@ -246,7 +255,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 4;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
@ -260,7 +269,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 2;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 5;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -273,7 +282,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 3;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 6;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
@ -284,7 +293,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 8;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
dec_state.SetState(&frame);
@ -300,7 +309,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 9;
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -321,7 +330,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0;
packet->codecSpecificHeader.codecHeader.VP8.layerSync = false;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
// Layer 2 - 2 packets (insert one, lose one).
@ -335,7 +344,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 1;
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
frame.Reset();
@ -348,7 +357,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet->codecSpecificHeader.codecHeader.VP8.pictureId = 2;
packet->codecSpecificHeader.codecHeader.VP8.layerSync = true;
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -367,7 +376,10 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
frame.InsertPacket(packet, 0, false, 0);
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
frame.InsertPacket(packet, 0, false, frame_data);
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -379,7 +391,7 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
++packet.seqNum;
packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
frame.InsertPacket(packet, 0, false, 0);
frame.InsertPacket(packet, 0, false, frame_data);
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
@ -393,13 +405,16 @@ TEST(TestDecodingState, OldInput) {
VCMPacket* packet = new VCMPacket();
packet->timestamp = 10;
packet->seqNum = 1;
frame.InsertPacket(*packet, 0, false, 0);
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
frame.InsertPacket(*packet, 0, false, frame_data);
dec_state.SetState(&frame);
packet->timestamp = 9;
EXPECT_TRUE(dec_state.IsOldPacket(packet));
// Check for old frame
frame.Reset();
frame.InsertPacket(*packet, 0, false, 0);
frame.InsertPacket(*packet, 0, false, frame_data);
EXPECT_TRUE(dec_state.IsOldFrame(&frame));

View File

@ -9,11 +9,12 @@
*/
#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
#include "webrtc/modules/video_coding/main/source/packet.h"
#include <cassert>
#include <string.h>
#include "webrtc/modules/video_coding/main/source/packet.h"
namespace webrtc {
VCMFrameBuffer::VCMFrameBuffer()
@ -27,34 +28,30 @@ VCMFrameBuffer::VCMFrameBuffer()
VCMFrameBuffer::~VCMFrameBuffer() {
}
VCMFrameBuffer::VCMFrameBuffer(VCMFrameBuffer& rhs)
VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
:
VCMEncodedFrame(rhs),
_state(rhs._state),
_frameCounted(rhs._frameCounted),
_sessionInfo(),
_nackCount(rhs._nackCount),
_latestPacketTimeMs(rhs._latestPacketTimeMs)
{
_latestPacketTimeMs(rhs._latestPacketTimeMs) {
_sessionInfo = rhs._sessionInfo;
_sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
}
webrtc::FrameType
VCMFrameBuffer::FrameType() const
{
VCMFrameBuffer::FrameType() const {
return _sessionInfo.FrameType();
}
int32_t
VCMFrameBuffer::GetLowSeqNum() const
{
VCMFrameBuffer::GetLowSeqNum() const {
return _sessionInfo.LowSequenceNumber();
}
int32_t
VCMFrameBuffer::GetHighSeqNum() const
{
VCMFrameBuffer::GetHighSeqNum() const {
return _sessionInfo.HighSequenceNumber();
}
@ -79,46 +76,39 @@ bool VCMFrameBuffer::NonReference() const {
}
bool
VCMFrameBuffer::IsSessionComplete() const
{
VCMFrameBuffer::IsSessionComplete() const {
return _sessionInfo.complete();
}
// Insert packet
VCMFrameBufferEnum
VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs,
bool enableDecodableState, uint32_t rttMS)
{
bool enableDecodableState,
const FrameData& frame_data) {
// is this packet part of this frame
if (TimeStamp() && (TimeStamp() != packet.timestamp))
{
if (TimeStamp() && (TimeStamp() != packet.timestamp)) {
return kTimeStampError;
}
// sanity checks
if (_size + packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0 )
> kMaxJBFrameSizeBytes)
{
> kMaxJBFrameSizeBytes) {
return kSizeError;
}
if (NULL == packet.dataPtr && packet.sizeBytes > 0)
{
if (NULL == packet.dataPtr && packet.sizeBytes > 0) {
return kSizeError;
}
if (packet.dataPtr != NULL)
{
if (packet.dataPtr != NULL) {
_payloadType = packet.payloadType;
}
if (kStateEmpty == _state)
{
if (kStateEmpty == _state) {
// First packet (empty and/or media) inserted into this frame.
// store some info and set some initial values.
_timeStamp = packet.timestamp;
_codec = packet.codec;
if (packet.frameType != kFrameEmpty)
{
if (packet.frameType != kFrameEmpty) {
// first media packet
SetState(kStateIncomplete);
}
@ -126,8 +116,7 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs,
uint32_t requiredSizeBytes = Length() + packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
if (requiredSizeBytes >= _size)
{
if (requiredSizeBytes >= _size) {
const uint8_t* prevBuffer = _buffer;
const uint32_t increments = requiredSizeBytes /
kBufferIncStepSizeBytes +
@ -135,12 +124,10 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs,
kBufferIncStepSizeBytes > 0);
const uint32_t newSize = _size +
increments * kBufferIncStepSizeBytes;
if (newSize > kMaxJBFrameSizeBytes)
{
if (newSize > kMaxJBFrameSizeBytes) {
return kSizeError;
}
if (VerifyAndAllocate(newSize) == -1)
{
if (VerifyAndAllocate(newSize) == -1) {
return kSizeError;
}
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
@ -155,13 +142,10 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs,
int retVal = _sessionInfo.InsertPacket(packet, _buffer,
enableDecodableState,
rttMS);
if (retVal == -1)
{
frame_data);
if (retVal == -1) {
return kSizeError;
}
else if (retVal == -2)
{
} else if (retVal == -2) {
return kDuplicatePacket;
}
// update length
@ -180,38 +164,37 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs,
}
int64_t
VCMFrameBuffer::LatestPacketTimeMs() const
{
VCMFrameBuffer::LatestPacketTimeMs() const {
return _latestPacketTimeMs;
}
void
VCMFrameBuffer::IncrementNackCount()
{
VCMFrameBuffer::IncrementNackCount() {
_nackCount++;
}
int16_t
VCMFrameBuffer::GetNackCount() const
{
VCMFrameBuffer::GetNackCount() const {
return _nackCount;
}
bool
VCMFrameBuffer::HaveFirstPacket() const
{
VCMFrameBuffer::HaveFirstPacket() const {
return _sessionInfo.HaveFirstPacket();
}
bool
VCMFrameBuffer::HaveLastPacket() const
{
VCMFrameBuffer::HaveLastPacket() const {
return _sessionInfo.HaveLastPacket();
}
int
VCMFrameBuffer::NumPackets() const {
return _sessionInfo.NumPackets();
}
void
VCMFrameBuffer::Reset()
{
VCMFrameBuffer::Reset() {
_length = 0;
_timeStamp = 0;
_sessionInfo.Reset();
@ -225,14 +208,11 @@ VCMFrameBuffer::Reset()
// Set state of frame
void
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state)
{
if (_state == state)
{
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
if (_state == state) {
return;
}
switch (state)
{
switch (state) {
case kStateIncomplete:
// we can go to this state from state kStateEmpty
assert(_state == kStateEmpty);
@ -261,8 +241,7 @@ VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state)
}
int32_t
VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage)
{
VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage) {
_frameType = ConvertFrameType(frameFromStorage.frameType);
_timeStamp = frameFromStorage.timeStamp;
_payloadType = frameFromStorage.payloadType;
@ -273,8 +252,7 @@ VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage)
_renderTimeMs = frameFromStorage.renderTimeMs;
_codec = frameFromStorage.codec;
const uint8_t *prevBuffer = _buffer;
if (VerifyAndAllocate(frameFromStorage.payloadSize) < 0)
{
if (VerifyAndAllocate(frameFromStorage.payloadSize) < 0) {
return VCM_MEMORY;
}
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
@ -288,43 +266,36 @@ int VCMFrameBuffer::NotDecodablePackets() const {
}
// Set counted status (as counted by JB or not)
void VCMFrameBuffer::SetCountedFrame(bool frameCounted)
{
void VCMFrameBuffer::SetCountedFrame(bool frameCounted) {
_frameCounted = frameCounted;
}
bool VCMFrameBuffer::GetCountedFrame() const
{
bool VCMFrameBuffer::GetCountedFrame() const {
return _frameCounted;
}
// Get current state of frame
VCMFrameBufferStateEnum
VCMFrameBuffer::GetState() const
{
VCMFrameBuffer::GetState() const {
return _state;
}
// Get current state of frame
VCMFrameBufferStateEnum
VCMFrameBuffer::GetState(uint32_t& timeStamp) const
{
VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
timeStamp = TimeStamp();
return GetState();
}
bool
VCMFrameBuffer::IsRetransmitted() const
{
VCMFrameBuffer::IsRetransmitted() const {
return _sessionInfo.session_nack();
}
void
VCMFrameBuffer::PrepareForDecode(bool continuous)
{
VCMFrameBuffer::PrepareForDecode(bool continuous) {
#ifdef INDEPENDENT_PARTITIONS
if (_codec == kVideoCodecVP8)
{
if (_codec == kVideoCodecVP8) {
_length =
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
&_fragmentation);
@ -343,4 +314,4 @@ VCMFrameBuffer::PrepareForDecode(bool continuous)
_missingFrame = !continuous;
}
}
} // namespace webrtc

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
@ -17,82 +17,81 @@
#include "webrtc/modules/video_coding/main/source/session_info.h"
#include "webrtc/typedefs.h"
namespace webrtc
{
namespace webrtc {
class VCMFrameBuffer : public VCMEncodedFrame
{
public:
VCMFrameBuffer();
virtual ~VCMFrameBuffer();
class VCMFrameBuffer : public VCMEncodedFrame {
public:
VCMFrameBuffer();
virtual ~VCMFrameBuffer();
VCMFrameBuffer(VCMFrameBuffer& rhs);
VCMFrameBuffer(const VCMFrameBuffer& rhs);
virtual void Reset();
virtual void Reset();
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
int64_t timeInMs,
bool enableDecodableState,
uint32_t rttMs);
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
int64_t timeInMs,
bool enableDecodableState,
const FrameData& frame_data);
// State
// Get current state of frame
VCMFrameBufferStateEnum GetState() const;
// Get current state and timestamp of frame
VCMFrameBufferStateEnum GetState(uint32_t& timeStamp) const;
void PrepareForDecode(bool continuous);
// State
// Get current state of frame
VCMFrameBufferStateEnum GetState() const;
// Get current state and timestamp of frame
VCMFrameBufferStateEnum GetState(uint32_t& timeStamp) const;
void PrepareForDecode(bool continuous);
bool IsRetransmitted() const;
bool IsSessionComplete() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
// Makes sure the session contain a decodable stream.
void MakeSessionDecodable();
bool IsRetransmitted() const;
bool IsSessionComplete() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
int NumPackets() const;
// Makes sure the session contain a decodable stream.
void MakeSessionDecodable();
// Sequence numbers
// Get lowest packet sequence number in frame
int32_t GetLowSeqNum() const;
// Get highest packet sequence number in frame
int32_t GetHighSeqNum() const;
// Sequence numbers
// Get lowest packet sequence number in frame
int32_t GetLowSeqNum() const;
// Get highest packet sequence number in frame
int32_t GetHighSeqNum() const;
int PictureId() const;
int TemporalId() const;
bool LayerSync() const;
int Tl0PicId() const;
bool NonReference() const;
int PictureId() const;
int TemporalId() const;
bool LayerSync() const;
int Tl0PicId() const;
bool NonReference() const;
// Set counted status (as counted by JB or not)
void SetCountedFrame(bool frameCounted);
bool GetCountedFrame() const;
// Set counted status (as counted by JB or not)
void SetCountedFrame(bool frameCounted);
bool GetCountedFrame() const;
// Increments a counter to keep track of the number of packets of this frame
// which were NACKed before they arrived.
void IncrementNackCount();
// Returns the number of packets of this frame which were NACKed before they
// arrived.
int16_t GetNackCount() const;
// Increments a counter to keep track of the number of packets of this frame
// which were NACKed before they arrived.
void IncrementNackCount();
// Returns the number of packets of this frame which were NACKed before they
// arrived.
int16_t GetNackCount() const;
int64_t LatestPacketTimeMs() const;
int64_t LatestPacketTimeMs() const;
webrtc::FrameType FrameType() const;
void SetPreviousFrameLoss();
webrtc::FrameType FrameType() const;
void SetPreviousFrameLoss();
int32_t ExtractFromStorage(const EncodedVideoData& frameFromStorage);
int32_t ExtractFromStorage(const EncodedVideoData& frameFromStorage);
// The number of packets discarded because the decoder can't make use of
// them.
int NotDecodablePackets() const;
// The number of packets discarded because the decoder can't make use of
// them.
int NotDecodablePackets() const;
private:
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
private:
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
VCMFrameBufferStateEnum _state; // Current state of the frame
bool _frameCounted; // Was this frame counted by JB?
VCMSessionInfo _sessionInfo;
uint16_t _nackCount;
int64_t _latestPacketTimeMs;
VCMFrameBufferStateEnum _state; // Current state of the frame
bool _frameCounted; // Was this frame counted by JB?
VCMSessionInfo _sessionInfo;
uint16_t _nackCount;
int64_t _latestPacketTimeMs;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_

View File

@ -11,6 +11,7 @@
#include <algorithm>
#include <cassert>
#include <utility>
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
@ -167,7 +168,9 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
max_nack_list_size_(0),
max_packet_age_to_nack_(0),
max_incomplete_time_ms_(0),
decode_with_errors_(false) {
decode_with_errors_(false),
average_packets_per_frame_(0.0f),
frame_counter_(0) {
memset(frame_buffers_, 0, sizeof(frame_buffers_));
memset(receive_statistics_, 0, sizeof(receive_statistics_));
@ -221,6 +224,7 @@ void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) {
nack_seq_nums_.resize(rhs.nack_seq_nums_.size());
missing_sequence_numbers_ = rhs.missing_sequence_numbers_;
latest_received_sequence_number_ = rhs.latest_received_sequence_number_;
average_packets_per_frame_ = rhs.average_packets_per_frame_;
for (int i = 0; i < kMaxNumberOfFrames; i++) {
if (frame_buffers_[i] != NULL) {
delete frame_buffers_[i];
@ -561,6 +565,10 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
// We have a frame - update the last decoded state and nack list.
last_decoded_state_.SetState(frame);
DropPacketsFromNackList(last_decoded_state_.sequence_num());
if ((*frame).IsSessionComplete())
UpdateAveragePacketsPerFrame(frame->NumPackets());
return frame;
}
@ -693,9 +701,12 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
// Note: Under current version, a decodable frame will never be
// triggered, as the body of the function is empty.
// TODO(mikhal): Update when decodable is enabled.
FrameData frame_data;
frame_data.rtt_ms = rtt_ms_;
frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
buffer_return = frame->InsertPacket(packet, now_ms,
decode_with_errors_,
rtt_ms_);
frame_data);
if (!frame->GetCountedFrame()) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
"timestamp", frame->TimeStamp());
@ -1210,6 +1221,22 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
}
}
void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
if (frame_counter_ > kFastConvergeThreshold) {
average_packets_per_frame_ = average_packets_per_frame_
* (1 - kNormalConvergeMultiplier)
+ current_number_packets * kNormalConvergeMultiplier;
} else if (frame_counter_ > 0) {
average_packets_per_frame_ = average_packets_per_frame_
* (1 - kFastConvergeMultiplier)
+ current_number_packets * kFastConvergeMultiplier;
frame_counter_++;
} else {
average_packets_per_frame_ = current_number_packets;
frame_counter_++;
}
}
// Must be called under the critical section |crit_sect_|.
void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
drop_count_ +=

View File

@ -58,8 +58,8 @@ class TimestampLessThan {
}
};
class FrameList :
public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
class FrameList
: public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
public:
void InsertFrame(VCMFrameBuffer* frame);
VCMFrameBuffer* FindFrame(uint32_t timestamp) const;
@ -244,6 +244,9 @@ class VCMJitterBuffer {
// Updates the frame statistics.
void CountFrame(const VCMFrameBuffer& frame);
// Update rolling average of packets per frame.
void UpdateAveragePacketsPerFrame(int current_number_packets_);
// Cleans the frame list in the JB from old/empty frames.
// Should only be called prior to actual use.
void CleanUpOldOrEmptyFrames();
@ -328,6 +331,11 @@ class VCMJitterBuffer {
int max_incomplete_time_ms_;
bool decode_with_errors_;
// Estimated rolling average of packets per frame
float average_packets_per_frame_;
// average_packets_per_frame converges fast if we have fewer than this many
// frames.
int frame_counter_;
DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
};
} // namespace webrtc

View File

@ -15,9 +15,15 @@
namespace webrtc {
// Used to estimate rolling average of packets per frame.
static const float kFastConvergeMultiplier = 0.4f;
static const float kNormalConvergeMultiplier = 0.2f;
enum { kMaxNumberOfFrames = 300 };
enum { kStartNumberOfFrames = 6 };
enum { kMaxVideoDelayMs = 10000 };
enum { kPacketsPerFrameMultiplier = 5 };
enum { kFastConvergeThreshold = 5};
enum VCMJitterBufferEnum {
kMaxConsecutiveOldFrames = 60,

View File

@ -586,7 +586,7 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
bool retransmitted = false;
EXPECT_EQ(kFirstPacket, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
int insert_return_val;
for (int i = 0; i < 11; ++i) {
webrtc::FrameType frametype = kVideoFrameDelta;
seq_num_++;
@ -612,9 +612,9 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->seqNum = seq_num_;
packet_->completeNALU = kNaluEnd;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
insert_return_val = jitter_buffer_->InsertPacket(*packet_, &retransmitted);
EXPECT_TRUE(insert_return_val == kIncomplete
|| insert_return_val == kDecodableSession);
// Insert an empty (non-media) packet.
seq_num_++;
@ -624,8 +624,9 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->completeNALU = kNaluEnd;
packet_->frameType = kFrameEmpty;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
insert_return_val = jitter_buffer_->InsertPacket(*packet_, &retransmitted);
EXPECT_TRUE(insert_return_val == kIncomplete
|| insert_return_val == kDecodableSession);
frame_out = DecodeIncompleteFrame();

View File

@ -14,6 +14,14 @@
namespace webrtc {
// Used in determining whether a frame is decodable.
enum {kRttThreshold = 100}; // Not decodable if Rtt is lower than this.
// Do not decode frames if the number of packets is between these two
// thresholds.
static const float kLowPacketPercentageThreshold = 0.2f;
static const float kHighPacketPercentageThreshold = 0.8f;
VCMSessionInfo::VCMSessionInfo()
: session_nack_(false),
complete_(false),
@ -101,6 +109,10 @@ int VCMSessionInfo::SessionLength() const {
return length;
}
int VCMSessionInfo::NumPackets() const {
return packets_.size();
}
int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
PacketIterator packet_it) {
VCMPacket& packet = *packet_it;
@ -169,11 +181,23 @@ void VCMSessionInfo::UpdateCompleteSession() {
}
}
void VCMSessionInfo::UpdateDecodableSession(int rttMs) {
void VCMSessionInfo::UpdateDecodableSession(const FrameData& frame_data) {
// Irrelevant if session is already complete or decodable
if (complete_ || decodable_)
return;
// First iteration - do nothing
// TODO(agalusza): Account for bursty loss.
// TODO(agalusza): Refine these values to better approximate optimal ones.
if (frame_data.rtt_ms < kRttThreshold
|| frame_type_ == kVideoFrameKey
|| !HaveFirstPacket()
|| (NumPackets() <= kHighPacketPercentageThreshold
* frame_data.rolling_average_packets_per_frame
&& NumPackets() > kLowPacketPercentageThreshold
* frame_data.rolling_average_packets_per_frame))
return;
decodable_ = true;
}
bool VCMSessionInfo::complete() const {
@ -369,7 +393,7 @@ VCMSessionInfo::session_nack() const {
int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
uint8_t* frame_buffer,
bool enable_decodable_state,
int rtt_ms) {
const FrameData& frame_data) {
// Check if this is first packet (only valid for some codecs)
if (packet.isFirstPacket) {
// The first packet in a frame signals the frame type.
@ -406,7 +430,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
int returnLength = InsertBuffer(frame_buffer, packet_list_it);
UpdateCompleteSession();
if (enable_decodable_state)
UpdateDecodableSession(rtt_ms);
UpdateDecodableSession(frame_data);
return returnLength;
}

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
#include <list>
@ -18,6 +18,12 @@
#include "webrtc/typedefs.h"
namespace webrtc {
// Used to pass data from jitter buffer to session info.
// This data is then used in determining whether a frame is decodable.
struct FrameData {
int rtt_ms;
float rolling_average_packets_per_frame;
};
class VCMSessionInfo {
public:
@ -42,7 +48,7 @@ class VCMSessionInfo {
int InsertPacket(const VCMPacket& packet,
uint8_t* frame_buffer,
bool enable_decodable_state,
int rtt_ms);
const FrameData& frame_data);
bool complete() const;
bool decodable() const;
@ -59,6 +65,7 @@ class VCMSessionInfo {
// Returns the number of bytes deleted from the session.
int MakeDecodable();
int SessionLength() const;
int NumPackets() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
bool session_nack() const;
@ -112,7 +119,21 @@ class VCMSessionInfo {
// When enabled, determine if session is decodable, i.e. incomplete but
// would be sent to the decoder.
void UpdateDecodableSession(int rtt_ms);
// Note: definition assumes random loss.
// A frame is defined to be decodable when:
// Round trip time is higher than threshold
// It is not a key frame
// It has the first packet: In VP8 the first packet contains all or part of
// the first partition, which consists of the most relevant information for
// decoding.
// Either more than the upper threshold of the average number of packets per
// frame is present
// or less than the lower threshold of the average number of packets per
// frame is present: suggests a small frame. Such a frame is unlikely
// to contain many motion vectors, so having the first packet will
// likely suffice. Once we have more than the lower threshold of the
// frame, we know that the frame is medium or large-sized.
void UpdateDecodableSession(const FrameData& frame_data);
// If this session has been NACKed by the jitter buffer.
bool session_nack_;
@ -130,4 +151,4 @@ class VCMSessionInfo {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_

View File

@ -29,6 +29,8 @@ class TestSessionInfo : public ::testing::Test {
packet_.dataPtr = packet_buffer_;
packet_.seqNum = 0;
packet_.timestamp = 0;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
}
void FillPacket(uint8_t start_value) {
@ -56,6 +58,7 @@ class TestSessionInfo : public ::testing::Test {
VCMSessionInfo session_;
VCMPacket packet_;
FrameData frame_data;
};
class TestVP8Partitions : public TestSessionInfo {
@ -151,7 +154,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.frameType = kVideoFrameKey;
FillPacket(0);
ASSERT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_, frame_buffer_, false, 0));
session_.InsertPacket(packet_, frame_buffer_, false, frame_data));
EXPECT_FALSE(session_.HaveLastPacket());
EXPECT_EQ(kVideoFrameKey, session_.FrameType());
@ -159,7 +162,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.markerBit = true;
packet_.seqNum += 1;
ASSERT_EQ(packet_buffer_size(),
session_.InsertPacket(packet_, frame_buffer_, false, 0));
session_.InsertPacket(packet_, frame_buffer_, false, frame_data));
EXPECT_TRUE(session_.HaveLastPacket());
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
@ -172,7 +175,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.sizeBytes = 0;
packet_.frameType = kFrameEmpty;
ASSERT_EQ(0,
session_.InsertPacket(packet_, frame_buffer_, false, 0));
session_.InsertPacket(packet_, frame_buffer_, false, frame_data));
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
}
@ -181,21 +184,21 @@ TEST_F(TestSessionInfo, NormalOperation) {
packet_.isFirstPacket = true;
packet_.markerBit = false;
FillPacket(0);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
for (int i = 1; i < 9; ++i) {
packet_.seqNum += 1;
FillPacket(i);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
}
packet_.seqNum += 1;
packet_.markerBit = true;
FillPacket(9);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(0, session_.packets_not_decodable());
@ -217,7 +220,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -228,7 +231,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.sequenceNumber += 2;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -239,7 +242,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -265,7 +268,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -276,7 +279,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -287,7 +290,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -298,7 +301,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.sequenceNumber += 2;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -325,7 +328,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -336,7 +339,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -347,7 +350,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -358,7 +361,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -385,7 +388,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -396,7 +399,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -407,7 +410,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -418,7 +421,7 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.sequenceNumber += 2;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -446,7 +449,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -457,7 +460,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -468,7 +471,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 3;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -479,7 +482,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -506,7 +509,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -518,7 +521,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -529,7 +532,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 2;
FillPacket(4);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -540,7 +543,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -551,7 +554,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -562,7 +565,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.sequenceNumber += 1;
FillPacket(7);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -589,7 +592,7 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -600,7 +603,7 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -611,7 +614,7 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_, false, frame_data),
packet_buffer_size());
delete packet;
@ -637,7 +640,8 @@ TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
packet_.sizeBytes = 0;
packet_.seqNum = 0;
packet_.markerBit = false;
ASSERT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, false, 0));
ASSERT_EQ(0,
session_.InsertPacket(packet_, frame_buffer_, false, frame_data));
EXPECT_EQ(0, session_.MakeDecodable());
EXPECT_EQ(0, session_.SessionLength());
@ -650,7 +654,7 @@ TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
@ -658,7 +662,7 @@ TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(0, session_.MakeDecodable());
@ -676,7 +680,7 @@ TEST_F(TestNalUnits, LossInMiddleOfNalu) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
@ -684,7 +688,7 @@ TEST_F(TestNalUnits, LossInMiddleOfNalu) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
@ -700,7 +704,7 @@ TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
@ -708,7 +712,7 @@ TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
packet_.seqNum += 2;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
@ -725,7 +729,7 @@ TEST_F(TestNalUnits, ReorderWrapNoLoss) {
packet_.seqNum += 1;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = true;
@ -733,7 +737,7 @@ TEST_F(TestNalUnits, ReorderWrapNoLoss) {
packet_.seqNum -= 1;
packet_.markerBit = false;
FillPacket(0);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
@ -741,7 +745,7 @@ TEST_F(TestNalUnits, ReorderWrapNoLoss) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(0, session_.MakeDecodable());
@ -757,7 +761,7 @@ TEST_F(TestNalUnits, WrapLosses) {
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.isFirstPacket = false;
@ -765,7 +769,7 @@ TEST_F(TestNalUnits, WrapLosses) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
@ -781,7 +785,7 @@ TEST_F(TestNalUnits, ReorderWrapLosses) {
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
packet_.seqNum -= 2;
@ -789,7 +793,7 @@ TEST_F(TestNalUnits, ReorderWrapLosses) {
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, 0),
ASSERT_EQ(session_.InsertPacket(packet_, frame_buffer_, false, frame_data),
packet_buffer_size());
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());