Adds support for VP8 partitions

This change adds support for VP8 partitions in the video jitter buffer and 
the VP8 encoder and decoder wrappers. The feature is currently disabled by
default since it requires a later version of libvpx.

With this change the jitter buffer will also start keeping track of each
packet header until decoding, and the VCMSessionInfo and VCMPacket objects 
will keep pointers into the encoded frame buffers.
Review URL: http://webrtc-codereview.appspot.com/137021

git-svn-id: http://webrtc.googlecode.com/svn/trunk@558 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
stefan@webrtc.org 2011-09-08 06:50:28 +00:00
parent b6fc9f1d6f
commit c3d891059e
19 changed files with 940 additions and 205 deletions

View File

@ -127,10 +127,12 @@ public:
//
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
// <0 - Error
virtual WebRtc_Word32 Decode(const EncodedImage& inputImage,
bool missingFrames,
const CodecSpecificInfo* /*codecSpecificInfo*/,
WebRtc_Word64 /*renderTimeMs*/);
virtual WebRtc_Word32 Decode(
const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
WebRtc_Word64 /*renderTimeMs*/);
// Register a decode complete callback object.
//

View File

@ -210,6 +210,7 @@ I420Decoder::InitDecode(const VideoCodec* codecSettings, WebRtc_Word32 /*numberO
WebRtc_Word32
I420Decoder::Decode(const EncodedImage& inputImage,
bool /*missingFrames*/,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
WebRtc_Word64 /*renderTimeMs*/)
{

View File

@ -201,6 +201,11 @@ public:
// - inputImage : Encoded image to be decoded
// - missingFrames : True if one or more frames have been lost
// since the previous decode call.
// - fragmentation : Specifies where the encoded frame can be
// split into separate fragments. The meaning
// of fragment is codec specific, but often
// means that each fragment is decodable by
// itself.
// - codecSpecificInfo : Pointer to codec specific data
// - renderTimeMs : System time to render in milliseconds. Only
// used by decoders with internal rendering.
@ -209,6 +214,7 @@ public:
virtual WebRtc_Word32
Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codecSpecificInfo = NULL,
WebRtc_Word64 renderTimeMs = -1) = 0;

View File

@ -479,8 +479,8 @@ NormalAsyncTest::Decode(int lossValue)
if (!_waitForKey || encodedImage._frameType == kKeyFrame)
{
_waitForKey = false;
ret = _decoder->Decode(encodedImage, _missingFrames,
_frameToDecode->_codecSpecificInfo);
ret = _decoder->Decode(encodedImage, _missingFrames, NULL,
_frameToDecode->_codecSpecificInfo);
if (ret >= 0)
{

View File

@ -283,7 +283,8 @@ int PerformanceTest::Decode(int lossValue)
EncodedImage encodedImage;
VideoEncodedBufferToEncodedImage(*(_frameToDecode->_frame), encodedImage);
encodedImage._completeFrame = !lossValue;
int ret = _decoder->Decode(encodedImage, _missingFrames, _frameToDecode->_codecSpecificInfo);
int ret = _decoder->Decode(encodedImage, _missingFrames, NULL,
_frameToDecode->_codecSpecificInfo);
_missingFrames = false;
return ret;
}

View File

@ -25,6 +25,7 @@ typedef struct vpx_codec_ctx vpx_dec_ctx_t;
typedef struct vpx_codec_enc_cfg vpx_codec_enc_cfg_t;
typedef struct vpx_image vpx_image_t;
typedef struct vpx_ref_frame vpx_ref_frame_t;
struct vpx_codec_cx_pkt;
namespace webrtc
{
@ -136,6 +137,13 @@ private:
// Call encoder initialize function and set control settings.
WebRtc_Word32 InitAndSetControlSettings();
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
const vpx_codec_cx_pkt& pkt);
WebRtc_Word32 GetEncodedFrame(const RawImage& input_image);
WebRtc_Word32 GetEncodedPartitions(const RawImage& input_image);
// Determine maximum target for Intra frames
//
// Input:
@ -162,6 +170,7 @@ private:
WebRtc_UWord16 _pictureIDLastAcknowledgedRef;
int _cpuSpeed;
WebRtc_UWord32 _rcMaxIntraTarget;
int _tokenPartitions;
vpx_codec_ctx_t* _encoder;
vpx_codec_enc_cfg_t* _cfg;
@ -192,6 +201,8 @@ public:
// - inputImage : Encoded image to be decoded
// - missingFrames : True if one or more frames have been lost
// since the previous decode call.
// - fragmentation : Specifies the start and length of each VP8
// partition.
// - codecSpecificInfo : pointer to specific codec data
// - renderTimeMs : Render time in Ms
//
@ -201,6 +212,7 @@ public:
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
virtual WebRtc_Word32 Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codecSpecificInfo,
WebRtc_Word64 /*renderTimeMs*/);
@ -238,6 +250,9 @@ private:
// frame type to copy in _refFrame->frame_type before the call to this function.
int CopyReference(VP8Decoder* copyTo);
WebRtc_Word32 DecodePartitions(const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation);
RawImage _decodedImage;
DecodedImageCallback* _decodeCompleteCallback;
bool _inited;

View File

@ -52,6 +52,7 @@ VP8Encoder::VP8Encoder():
_pictureIDLastAcknowledgedRef(0),
_cpuSpeed(-6), // default value
_rcMaxIntraTarget(0),
_tokenPartitions(VP8_ONE_TOKENPARTITION),
_encoder(NULL),
_cfg(NULL),
_raw(NULL)
@ -271,7 +272,11 @@ VP8Encoder::InitEncode(const VideoCodec* inst,
_cfg->g_timebase.num = 1;
_cfg->g_timebase.den = _maxFrameRate;
_cfg->g_error_resilient = 1; //enabled
#ifdef INDEPENDENT_PARTITIONS
_cfg->g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
#else
_cfg->g_error_resilient = 1;
#endif
_cfg->g_lag_in_frames = 0; // 0- no frame lagging
_cfg->g_threads = numberOfCores;
@ -339,12 +344,22 @@ VP8Encoder::InitAndSetControlSettings()
{
// construct encoder context
vpx_codec_enc_cfg_t cfg_copy = *_cfg;
if (vpx_codec_enc_init(_encoder, vpx_codec_vp8_cx(), _cfg, 0))
vpx_codec_flags_t flags = 0;
// TODO(holmer): We should make a smarter decision on the number of
// partitions. Eight is probably not the optimal number for low resolution
// video.
_tokenPartitions = VP8_EIGHT_TOKENPARTITION;
#if WEBRTC_LIBVPX_VERSION >= 971
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
#endif
if (vpx_codec_enc_init(_encoder, vpx_codec_vp8_cx(), _cfg, flags))
{
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
vpx_codec_control(_encoder, VP8E_SET_STATIC_THRESHOLD, 800);
vpx_codec_control(_encoder, VP8E_SET_CPUUSED, _cpuSpeed);
vpx_codec_control(_encoder, VP8E_SET_TOKEN_PARTITIONS,
static_cast<vp8e_token_partitions>(_tokenPartitions));
#if WEBRTC_LIBVPX_VERSION >= 971
vpx_codec_control(_encoder, VP8E_SET_MAX_INTRA_BITRATE_PCT,
_rcMaxIntraTarget);
@ -394,8 +409,6 @@ VP8Encoder::Encode(const RawImage& inputImage,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
vpx_codec_iter_t iter = NULL;
// image in vpx_image_t format
_raw->planes[PLANE_Y] = inputImage._buffer;
_raw->planes[PLANE_U] = &inputImage._buffer[_height * _width];
@ -529,6 +542,27 @@ VP8Encoder::Encode(const RawImage& inputImage,
}
_timeStamp++;
#if WEBRTC_LIBVPX_VERSION >= 971
return GetEncodedPartitions(inputImage);
#else
return GetEncodedFrame(inputImage);
#endif
}
void VP8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
const vpx_codec_cx_pkt& pkt) {
assert(codec_specific != NULL);
codec_specific->codecType = kVideoCodecVP8;
CodecSpecificInfoVP8 *vp8Info = &(codec_specific->codecSpecific.VP8);
vp8Info->pictureId = _pictureID;
vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE);
_pictureID = (_pictureID + 1) % 0x7FFF; // prepare next
}
WebRtc_Word32
VP8Encoder::GetEncodedFrame(const RawImage& input_image)
{
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt= vpx_codec_get_cx_data(_encoder, &iter); // no lagging => 1 frame at a time
if (pkt == NULL && !_encoder->err)
{
@ -538,13 +572,9 @@ VP8Encoder::Encode(const RawImage& inputImage,
else if (pkt->kind == VPX_CODEC_CX_FRAME_PKT)
{
CodecSpecificInfo codecSpecific;
codecSpecific.codecType = kVideoCodecVP8;
CodecSpecificInfoVP8 *vp8Info = &(codecSpecific.codecSpecific.VP8);
vp8Info->pictureId = _pictureID;
vp8Info->nonReference
= (pkt->data.frame.flags & VPX_FRAME_IS_DROPPABLE);
PopulateCodecSpecific(&codecSpecific, *pkt);
assert(pkt->data.frame.sz <= _encodedImage._size);
memcpy(_encodedImage._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
_encodedImage._length = WebRtc_UWord32(pkt->data.frame.sz);
_encodedImage._encodedHeight = _raw->h;
@ -558,7 +588,7 @@ VP8Encoder::Encode(const RawImage& inputImage,
if (_encodedImage._length > 0)
{
_encodedImage._timeStamp = inputImage._timeStamp;
_encodedImage._timeStamp = input_image._timeStamp;
// Figure out where partition boundaries are located.
RTPFragmentationHeader fragInfo;
@ -583,13 +613,60 @@ VP8Encoder::Encode(const RawImage& inputImage,
_encodedCompleteCallback->Encoded(_encodedImage, &codecSpecific,
&fragInfo);
}
_pictureID = (_pictureID + 1) % 0x7FFF; // prepare next
return WEBRTC_VIDEO_CODEC_OK;
}
return WEBRTC_VIDEO_CODEC_ERROR;
}
WebRtc_Word32
VP8Encoder::GetEncodedPartitions(const RawImage& input_image) {
vpx_codec_iter_t iter = NULL;
int part_idx = 0;
_encodedImage._length = 0;
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader((1 << _tokenPartitions) + 1);
CodecSpecificInfo codecSpecific;
const vpx_codec_cx_pkt_t *pkt = NULL;
while ((pkt = vpx_codec_get_cx_data(_encoder, &iter)) != NULL) {
switch(pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
memcpy(&_encodedImage._buffer[_encodedImage._length],
pkt->data.frame.buf,
pkt->data.frame.sz);
frag_info.fragmentationOffset[part_idx] = _encodedImage._length;
frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0; // not known here
frag_info.fragmentationTimeDiff[part_idx] = 0;
_encodedImage._length += pkt->data.frame.sz;
assert(_encodedImage._length <= _encodedImage._size);
++part_idx;
break;
}
default: {
break;
}
}
// End of frame
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
// check if encoded frame is a key frame
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
{
_encodedImage._frameType = kKeyFrame;
}
PopulateCodecSpecific(&codecSpecific, *pkt);
break;
}
}
if (_encodedImage._length == 0)
return WEBRTC_VIDEO_CODEC_ERROR;
_encodedImage._timeStamp = input_image._timeStamp;
_encodedImage._encodedHeight = _raw->h;
_encodedImage._encodedWidth = _raw->w;
_encodedCompleteCallback->Encoded(_encodedImage, &codecSpecific,
&frag_info);
return WEBRTC_VIDEO_CODEC_OK;
}
WebRtc_Word32
VP8Encoder::SetPacketLoss(WebRtc_UWord32 packetLoss)
{
@ -661,6 +738,9 @@ VP8Decoder::InitDecode(const VideoCodec* inst,
vpx_codec_flags_t flags = 0;
#if WEBRTC_LIBVPX_VERSION >= 971
flags = VPX_CODEC_USE_ERROR_CONCEALMENT;
#ifdef INDEPENDENT_PARTITIONS
flags |= VPX_CODEC_USE_INPUT_PARTITION;
#endif
#endif
if (vpx_codec_dec_init(_decoder, vpx_codec_vp8_dx(), NULL, flags))
@ -694,6 +774,7 @@ VP8Decoder::InitDecode(const VideoCodec* inst,
WebRtc_Word32
VP8Decoder::Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codecSpecificInfo,
WebRtc_Word64 /*renderTimeMs*/)
{
@ -723,6 +804,12 @@ VP8Decoder::Decode(const EncodedImage& inputImage,
}
// otherwise allow for incomplete frames to be decoded.
}
#ifdef INDEPENDENT_PARTITIONS
if (fragmentation == NULL)
{
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
#endif
vpx_dec_iter_t _iter = NULL;
vpx_image_t* img;
@ -737,7 +824,12 @@ VP8Decoder::Decode(const EncodedImage& inputImage,
}
}
// we remove the picture ID here
#ifdef INDEPENDENT_PARTITIONS
if (DecodePartitions(inputImage, fragmentation))
{
return WEBRTC_VIDEO_CODEC_ERROR;
}
#else
if (vpx_codec_decode(_decoder,
inputImage._buffer,
inputImage._length,
@ -746,6 +838,7 @@ VP8Decoder::Decode(const EncodedImage& inputImage,
{
return WEBRTC_VIDEO_CODEC_ERROR;
}
#endif
// Store encoded frame if key frame. (Used in Copy method.)
if (inputImage._frameType == kKeyFrame)
@ -873,6 +966,29 @@ VP8Decoder::Decode(const EncodedImage& inputImage,
return WEBRTC_VIDEO_CODEC_OK;
}
WebRtc_Word32
VP8Decoder::DecodePartitions(const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation) {
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
const WebRtc_UWord8* partition = input_image._buffer +
fragmentation->fragmentationOffset[i];
const WebRtc_UWord32 partition_length =
fragmentation->fragmentationLength[i];
if (vpx_codec_decode(_decoder,
partition,
partition_length,
0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Signal end of frame data
if (vpx_codec_decode(_decoder, NULL, 0, 0, VPX_DL_REALTIME))
return WEBRTC_VIDEO_CODEC_ERROR;
return WEBRTC_VIDEO_CODEC_OK;
}
WebRtc_Word32
VP8Decoder::RegisterDecodeCompleteCallback(DecodedImageCallback* callback)
{

View File

@ -58,8 +58,14 @@
'../../../../../../../third_party/libvpx/source/libvpx',
],
'defines': [
'WEBRTC_LIBVPX_VERSION=971' # Cayuga
'WEBRTC_LIBVPX_VERSION=971', # Cayuga
'INDEPENDENT_PARTITIONS',
],
'direct_dependent_settings': {
'defines': [
'INDEPENDENT_PARTITIONS',
],
},
}],
],
'direct_dependent_settings': {
@ -76,7 +82,7 @@
},
], # targets
# Exclude the test target when building with chromium.
'conditions': [
'conditions': [
['build_with_chromium==0', {
'targets': [
{

View File

@ -21,7 +21,7 @@ VP8NormalAsyncTest(bitRate)
_decoder2 = NULL;
}
VP8DualDecoderTest::VP8DualDecoderTest()
VP8DualDecoderTest::VP8DualDecoderTest()
:
VP8NormalAsyncTest("VP8 Dual Decoder Test", "Tests VP8 dual decoder", 1),
_decoder2(NULL)
@ -86,7 +86,7 @@ VP8DualDecoderTest::Perform()
if (!frameQueue.Empty() || complete)
{
while (!frameQueue.Empty())
{
{
_frameToDecode =
static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
int lost = DoPacketLoss();
@ -153,13 +153,14 @@ VP8DualDecoderTest::Decode(int lossValue)
encodedImage._completeFrame = !lossValue;
_decodeCompleteTime = 0;
_decodeTimes[encodedImage._timeStamp] = clock()/(double)CLOCKS_PER_SEC;
int ret = _decoder->Decode(encodedImage, _missingFrames,
_frameToDecode->_codecSpecificInfo);
int ret = _decoder->Decode(encodedImage, _missingFrames, NULL,
_frameToDecode->_codecSpecificInfo);
// second decoder
if (_decoder2)
{
int ret2 = _decoder2->Decode(encodedImage, _missingFrames,
_frameToDecode->_codecSpecificInfo, 0 /* dummy */);
int ret2 = _decoder2->Decode(encodedImage, _missingFrames, NULL,
_frameToDecode->_codecSpecificInfo,
0 /* dummy */);
// check return values
if (ret < 0 || ret2 < 0 || ret2 != ret)
@ -184,7 +185,7 @@ VP8DualDecoderTest::Decode(int lossValue)
bool
VP8DualDecoderTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
VP8DualDecoderTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
const void* ptrB, unsigned int bLengthBytes)
{
if (aLengthBytes != bLengthBytes)

View File

@ -21,7 +21,8 @@ webrtc::EncodedImage(),
_renderTimeMs(-1),
_payloadType(0),
_missingFrame(false),
_codec(kVideoCodecUnknown)
_codec(kVideoCodecUnknown),
_fragmentation()
{
_codecSpecificInfo.codecType = kVideoCodecUnknown;
}
@ -32,7 +33,8 @@ webrtc::EncodedImage(rhs),
_renderTimeMs(-1),
_payloadType(0),
_missingFrame(false),
_codec(kVideoCodecUnknown)
_codec(kVideoCodecUnknown),
_fragmentation()
{
_codecSpecificInfo.codecType = kVideoCodecUnknown;
_buffer = NULL;
@ -46,22 +48,24 @@ _codec(kVideoCodecUnknown)
}
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
:
webrtc::EncodedImage(rhs),
_renderTimeMs(rhs._renderTimeMs),
_payloadType(rhs._payloadType),
_missingFrame(rhs._missingFrame),
_codecSpecificInfo(rhs._codecSpecificInfo),
_codec(rhs._codec)
{
_buffer = NULL;
_size = 0;
_length = 0;
if (rhs._buffer != NULL)
{
VerifyAndAllocate(rhs._size);
memcpy(_buffer, rhs._buffer, rhs._length);
}
:
webrtc::EncodedImage(rhs),
_renderTimeMs(rhs._renderTimeMs),
_payloadType(rhs._payloadType),
_missingFrame(rhs._missingFrame),
_codecSpecificInfo(rhs._codecSpecificInfo),
_codec(rhs._codec),
_fragmentation() {
_buffer = NULL;
_size = 0;
_length = 0;
if (rhs._buffer != NULL)
{
VerifyAndAllocate(rhs._size);
memcpy(_buffer, rhs._buffer, rhs._length);
}
// Deep operator=
_fragmentation = rhs._fragmentation;
}
VCMEncodedFrame::~VCMEncodedFrame()
@ -126,6 +130,10 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
}
}
const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
return &_fragmentation;
}
WebRtc_Word32
VCMEncodedFrame::Store(VCMFrameStorageCallback& storeCallback) const
{

View File

@ -87,6 +87,8 @@ public:
*/
const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
const RTPFragmentationHeader* FragmentationHeader() const;
WebRtc_Word32 Store(VCMFrameStorageCallback& storeCallback) const;
static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
@ -110,6 +112,7 @@ protected:
bool _missingFrame;
CodecSpecificInfo _codecSpecificInfo;
webrtc::VideoCodecType _codec;
RTPFragmentationHeader _fragmentation;
};
} // namespace webrtc

View File

@ -22,19 +22,15 @@
namespace webrtc {
// Constructor
VCMFrameBuffer::VCMFrameBuffer() :
VCMFrameBuffer::VCMFrameBuffer()
:
_state(kStateFree),
_frameCounted(false),
_nackCount(0),
_latestPacketTimeMs(-1)
{
_latestPacketTimeMs(-1) {
}
// Destructor
VCMFrameBuffer::~VCMFrameBuffer()
{
Reset();
VCMFrameBuffer::~VCMFrameBuffer() {
}
VCMFrameBuffer::VCMFrameBuffer(VCMFrameBuffer& rhs)
@ -139,6 +135,7 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs)
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
if (requiredSizeBytes >= _size)
{
const WebRtc_UWord8* prevBuffer = _buffer;
const WebRtc_UWord32 increments = requiredSizeBytes /
kBufferIncStepSizeBytes +
(requiredSizeBytes %
@ -153,9 +150,10 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs)
{
return kSizeError;
}
_sessionInfo.UpdateDataPointers(_buffer, prevBuffer);
}
CopyCodecSpecific(packet.codecSpecificHeader);
CopyCodecSpecific(&packet.codecSpecificHeader);
WebRtc_Word64 retVal = _sessionInfo.InsertPacket(packet, _buffer);
if (retVal == -1)
@ -261,9 +259,16 @@ VCMFrameBuffer::Reset()
void
VCMFrameBuffer::MakeSessionDecodable()
{
WebRtc_Word32 retVal = _sessionInfo.MakeSessionDecodable(_buffer);
// update length
WebRtc_UWord32 retVal;
#ifdef INDEPENDENT_PARTITIONS
if (_codec != kVideoCodecVP8) {
retVal = _sessionInfo.MakeDecodable(_buffer);
_length -= retVal;
}
#else
retVal = _sessionInfo.MakeDecodable(_buffer);
_length -= retVal;
#endif
}
// Set state of frame
@ -397,7 +402,20 @@ VCMFrameBuffer::IsRetransmitted()
void
VCMFrameBuffer::PrepareForDecode()
{
#ifdef INDEPENDENT_PARTITIONS
if (_codec == kVideoCodecVP8)
{
_length =
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
&_fragmentation);
}
else
{
_length = _sessionInfo.PrepareForDecode(_buffer, _codec);
}
#else
_length = _sessionInfo.PrepareForDecode(_buffer, _codec);
#endif
}
}

View File

@ -157,6 +157,7 @@ WebRtc_Word32 VCMGenericDecoder::Decode(const VCMEncodedFrame& frame)
WebRtc_Word32 ret = _decoder.Decode(frame.EncodedImage(),
frame.MissingFrame(),
frame.FragmentationHeader(),
frame.CodecSpecific(),
frame.RenderTimeMs());

View File

@ -15,6 +15,23 @@
namespace webrtc {
VCMPacket::VCMPacket()
:
payloadType(0),
timestamp(0),
seqNum(0),
dataPtr(NULL),
sizeBytes(0),
markerBit(false),
frameType(kFrameEmpty),
codec(kVideoCodecUnknown),
isFirstPacket(false),
completeNALU(kNaluUnset),
insertStartCode(false),
bits(false),
codecSpecificHeader() {
}
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr,
const WebRtc_UWord32 size,
const WebRtcRTPHeader& rtpHeader) :
@ -31,7 +48,7 @@ VCMPacket::VCMPacket(const WebRtc_UWord8* ptr,
completeNALU(kNaluComplete),
insertStartCode(false),
bits(false),
codecSpecificHeader(&rtpHeader.type.Video)
codecSpecificHeader(rtpHeader.type.Video)
{
CopyCodecSpecifics(rtpHeader.type.Video);
}
@ -50,9 +67,25 @@ VCMPacket::VCMPacket(const WebRtc_UWord8* ptr, WebRtc_UWord32 size, WebRtc_UWord
completeNALU(kNaluComplete),
insertStartCode(false),
bits(false),
codecSpecificHeader(NULL)
codecSpecificHeader()
{}
void VCMPacket::Reset() {
payloadType = 0;
timestamp = 0;
seqNum = 0;
dataPtr = NULL;
sizeBytes = 0;
markerBit = false;
frameType = kFrameEmpty;
codec = kVideoCodecUnknown;
isFirstPacket = false;
completeNALU = kNaluUnset;
insertStartCode = false;
bits = false;
memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
}
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader)
{
switch(videoHeader.codec)

View File

@ -21,6 +21,7 @@ namespace webrtc
class VCMPacket
{
public:
VCMPacket();
VCMPacket(const WebRtc_UWord8* ptr,
const WebRtc_UWord32 size,
const WebRtcRTPHeader& rtpHeader);
@ -30,6 +31,8 @@ public:
WebRtc_UWord32 timestamp,
bool markerBit);
void Reset();
WebRtc_UWord8 payloadType;
WebRtc_UWord32 timestamp;
WebRtc_UWord16 seqNum;
@ -48,7 +51,7 @@ public:
// first
// byte should be ORed with the last packet of the
// previous frame.
const RTPVideoHeader *codecSpecificHeader;
RTPVideoHeader codecSpecificHeader;
protected:
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);

View File

@ -17,7 +17,6 @@
namespace webrtc {
VCMSessionInfo::VCMSessionInfo():
_haveFirstPacket(false),
_markerBit(false),
_sessionNACK(false),
_completeSession(false),
@ -30,15 +29,20 @@ VCMSessionInfo::VCMSessionInfo():
_emptySeqNumHigh(-1),
_markerSeqNum(-1)
{
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
}
VCMSessionInfo::~VCMSessionInfo()
{
}
void
VCMSessionInfo::UpdateDataPointers(const WebRtc_UWord8* frame_buffer,
const WebRtc_UWord8* prev_buffer_address) {
for (int i = 0; i <= _highestPacketIndex; ++i)
_packets[i].dataPtr = frame_buffer + (_packets[i].dataPtr -
prev_buffer_address);
}
WebRtc_Word32
VCMSessionInfo::GetLowSeqNum() const
{
@ -56,23 +60,20 @@ VCMSessionInfo::GetHighSeqNum() const
}
void
VCMSessionInfo::Reset()
{
_lowSeqNum = -1;
_highSeqNum = -1;
_emptySeqNumLow = -1;
_emptySeqNumHigh = -1;
_markerBit = false;
_haveFirstPacket = false;
_completeSession = false;
_frameType = kVideoFrameDelta;
_previousFrameLoss = false;
_sessionNACK = false;
_highestPacketIndex = 0;
_markerSeqNum = -1;
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
VCMSessionInfo::Reset() {
for (int i = 0; i <= _highestPacketIndex; ++i)
_packets[i].Reset();
_lowSeqNum = -1;
_highSeqNum = -1;
_emptySeqNumLow = -1;
_emptySeqNumHigh = -1;
_markerBit = false;
_completeSession = false;
_frameType = kVideoFrameDelta;
_previousFrameLoss = false;
_sessionNACK = false;
_highestPacketIndex = 0;
_markerSeqNum = -1;
}
WebRtc_UWord32
@ -81,7 +82,7 @@ VCMSessionInfo::GetSessionLength()
WebRtc_UWord32 length = 0;
for (WebRtc_Word32 i = 0; i <= _highestPacketIndex; ++i)
{
length += _packetSizeBytes[i];
length += _packets[i].sizeBytes;
}
return length;
}
@ -116,78 +117,66 @@ VCMSessionInfo::InsertBuffer(WebRtc_UWord8* ptrStartOfLayer,
WebRtc_UWord32 offset = 0;
WebRtc_UWord32 packetSize = 0;
// Shallow copy without overwriting the dataPtr and the sizeBytes
const WebRtc_UWord8* dataPtr = _packets[packetIndex].dataPtr;
const WebRtc_UWord32 sizeBytes = _packets[packetIndex].sizeBytes;
_packets[packetIndex] = packet;
_packets[packetIndex].dataPtr = dataPtr;
_packets[packetIndex].sizeBytes = sizeBytes;
// Store this packet length. Add length since we could have data present
// already (e.g. multicall case).
if (packet.bits)
packetSize = packet.sizeBytes;
if (!packet.bits)
{
packetSize = packet.sizeBytes;
packetSize += (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
}
else
{
packetSize = packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
}
_packetSizeBytes[packetIndex] += packetSize;
// count only the one in our layer
for (i = 0; i < packetIndex; ++i)
{
offset += _packetSizeBytes[i];
offset += _packets[i].sizeBytes;
}
// Set the data pointer to pointing to the first part of this packet.
if (_packets[packetIndex].dataPtr == NULL)
_packets[packetIndex].dataPtr = ptrStartOfLayer + offset;
_packets[packetIndex].sizeBytes += packetSize;
// Calculate the total move length and move the data pointers in advance.
for (i = packetIndex + 1; i <= _highestPacketIndex; ++i)
{
moveLength += _packetSizeBytes[i];
moveLength += _packets[i].sizeBytes;
if (_packets[i].dataPtr != NULL)
_packets[i].dataPtr += packetSize;
}
if (moveLength > 0)
{
memmove((void*)(ptrStartOfLayer + offset + packetSize),
ptrStartOfLayer + offset, moveLength);
memmove((void*)(_packets[packetIndex].dataPtr + packetSize),
_packets[packetIndex].dataPtr, moveLength);
}
if (packet.bits)
if (packet.dataPtr != NULL)
{
// Add the packet without ORing end and start bytes together.
// This is done when the frame is fetched for decoding by calling
// GlueTogether().
_ORwithPrevByte[packetIndex] = true;
if (packet.dataPtr != NULL)
const unsigned char startCode[] = {0, 0, 0, 1};
if (packet.insertStartCode)
{
memcpy((void*)(ptrStartOfLayer + offset), packet.dataPtr,
packetSize);
memcpy((void*)(_packets[packetIndex].dataPtr), startCode,
kH264StartCodeLengthBytes);
}
returnLength = packetSize;
}
else
{
_ORwithPrevByte[packetIndex] = false;
if (packet.dataPtr != NULL)
{
const unsigned char startCode[] = {0, 0, 0, 1};
if (packet.insertStartCode)
{
memcpy((void*)(ptrStartOfLayer + offset), startCode,
kH264StartCodeLengthBytes);
}
memcpy((void*)(ptrStartOfLayer + offset
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0)),
packet.dataPtr,
packet.sizeBytes);
}
returnLength = packetSize;
memcpy((void*)(_packets[packetIndex].dataPtr
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0)),
packet.dataPtr,
packet.sizeBytes);
}
returnLength = packetSize;
if (packet.isFirstPacket)
{
_haveFirstPacket = true;
}
if (packet.markerBit)
{
_markerBit = true;
_markerSeqNum = packet.seqNum;
}
// Store information about if the packet is decodable as is or not.
_naluCompleteness[packetIndex] = packet.completeNALU;
UpdateCompleteSession();
@ -197,14 +186,14 @@ VCMSessionInfo::InsertBuffer(WebRtc_UWord8* ptrStartOfLayer,
void
VCMSessionInfo::UpdateCompleteSession()
{
if (_haveFirstPacket && _markerBit)
if (_packets[0].isFirstPacket && _markerBit)
{
// Do we have all the packets in this session?
bool completeSession = true;
for (int i = 0; i <= _highestPacketIndex; ++i)
{
if (_naluCompleteness[i] == kNaluUnset)
if (_packets[i].completeNALU == kNaluUnset)
{
completeSession = false;
break;
@ -226,8 +215,8 @@ VCMSessionInfo::FindNaluBorder(WebRtc_Word32 packetIndex,
WebRtc_Word32& startIndex,
WebRtc_Word32& endIndex)
{
if (_naluCompleteness[packetIndex] == kNaluStart ||
_naluCompleteness[packetIndex] == kNaluComplete)
if (_packets[packetIndex].completeNALU == kNaluStart ||
_packets[packetIndex].completeNALU == kNaluComplete)
{
startIndex = packetIndex;
}
@ -236,25 +225,25 @@ VCMSessionInfo::FindNaluBorder(WebRtc_Word32 packetIndex,
for (startIndex = packetIndex - 1; startIndex >= 0; --startIndex)
{
if ((_naluCompleteness[startIndex] == kNaluComplete &&
_packetSizeBytes[startIndex] > 0) ||
if ((_packets[startIndex].completeNALU == kNaluComplete &&
_packets[startIndex].sizeBytes > 0) ||
// Found previous NALU.
(_naluCompleteness[startIndex] == kNaluEnd &&
(_packets[startIndex].completeNALU == kNaluEnd &&
startIndex > 0))
{
startIndex++;
break;
}
// This is where the NALU start.
if (_naluCompleteness[startIndex] == kNaluStart)
if (_packets[startIndex].completeNALU == kNaluStart)
{
break;
}
}
}
if (_naluCompleteness[packetIndex] == kNaluEnd ||
_naluCompleteness[packetIndex] == kNaluComplete)
if (_packets[packetIndex].completeNALU == kNaluEnd ||
_packets[packetIndex].completeNALU == kNaluComplete)
{
endIndex = packetIndex;
}
@ -264,15 +253,15 @@ VCMSessionInfo::FindNaluBorder(WebRtc_Word32 packetIndex,
for (endIndex = packetIndex + 1; endIndex <= _highestPacketIndex;
++endIndex)
{
if ((_naluCompleteness[endIndex] == kNaluComplete &&
_packetSizeBytes[endIndex] > 0) ||
if ((_packets[endIndex].completeNALU == kNaluComplete &&
_packets[endIndex].completeNALU > 0) ||
// Found next NALU.
_naluCompleteness[endIndex] == kNaluStart)
_packets[endIndex].completeNALU == kNaluStart)
{
endIndex--;
break;
}
if (_naluCompleteness[endIndex] == kNaluEnd)
if (_packets[endIndex].completeNALU == kNaluEnd)
{
// This is where the NALU end.
break;
@ -297,8 +286,8 @@ VCMSessionInfo::DeletePackets(WebRtc_UWord8* ptrStartOfLayer,
WebRtc_UWord32 bytesToDelete = 0; /// The number of bytes to delete.
for (int j = startIndex;j <= endIndex; ++j)
{
bytesToDelete += _packetSizeBytes[j];
_packetSizeBytes[j] = 0;
bytesToDelete += _packets[j].sizeBytes;
_packets[j].Reset();
}
if (bytesToDelete > 0)
{
@ -306,28 +295,119 @@ VCMSessionInfo::DeletePackets(WebRtc_UWord8* ptrStartOfLayer,
int destOffset = 0;
for (int j = 0;j < startIndex;j++)
{
destOffset += _packetSizeBytes[j];
destOffset += _packets[j].sizeBytes;
}
//Get the number of bytes to move
// Get the number of bytes to move and move the data pointers in advance
WebRtc_UWord32 numberOfBytesToMove = 0;
for (int j = endIndex + 1; j <= _highestPacketIndex; ++j)
{
numberOfBytesToMove += _packetSizeBytes[j];
if (_packets[j].dataPtr != NULL)
_packets[j].dataPtr -= bytesToDelete;
numberOfBytesToMove += _packets[j].sizeBytes;
}
memmove((void*)(ptrStartOfLayer + destOffset),(void*)(ptrStartOfLayer +
destOffset+bytesToDelete), numberOfBytesToMove);
destOffset + bytesToDelete), numberOfBytesToMove);
}
return bytesToDelete;
}
// Makes the layer decodable. Ie only contain decodable NALU
// return the number of bytes deleted from the session. -1 if an error occurs
int
VCMSessionInfo::BuildVP8FragmentationHeader(
WebRtc_UWord8* frame_buffer,
int frame_buffer_length,
RTPFragmentationHeader* fragmentation) {
int new_length = 0;
// Allocate space for max number of partitions
fragmentation->VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
fragmentation->fragmentationVectorSize = 0;
memset(fragmentation->fragmentationLength, 0,
kMaxVP8Partitions * sizeof(WebRtc_UWord32));
if (_lowSeqNum < 0)
return new_length;
int i = FindNextPartitionBeginning(0);
while (i <= _highestPacketIndex) {
const int partition_id =
_packets[i].codecSpecificHeader.codecHeader.VP8.partitionId;
const int partition_end = FindPartitionEnd(i);
fragmentation->fragmentationOffset[partition_id] =
_packets[i].dataPtr - frame_buffer;
assert(fragmentation->fragmentationOffset[partition_id] <
static_cast<WebRtc_UWord32>(frame_buffer_length));
fragmentation->fragmentationLength[partition_id] =
_packets[partition_end].dataPtr + _packets[partition_end].sizeBytes -
_packets[i].dataPtr;
assert(fragmentation->fragmentationLength[partition_id] <=
static_cast<WebRtc_UWord32>(frame_buffer_length));
new_length += fragmentation->fragmentationLength[partition_id];
i = FindNextPartitionBeginning(partition_end + 1);
if (partition_id + 1 > fragmentation->fragmentationVectorSize)
fragmentation->fragmentationVectorSize = partition_id + 1;
}
// Set all empty fragments to start where the previous fragment ends,
// and have zero length.
if (fragmentation->fragmentationLength[0] == 0)
fragmentation->fragmentationOffset[0] = 0;
for (i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
if (fragmentation->fragmentationLength[i] == 0)
fragmentation->fragmentationOffset[i] =
fragmentation->fragmentationOffset[i - 1] +
fragmentation->fragmentationLength[i - 1];
assert(i == 0 ||
fragmentation->fragmentationOffset[i] >=
fragmentation->fragmentationOffset[i - 1]);
}
assert(new_length <= frame_buffer_length);
return new_length;
}
int VCMSessionInfo::FindNextPartitionBeginning(int packet_index) const {
while (packet_index <= _highestPacketIndex) {
if (_packets[packet_index].completeNALU == kNaluUnset) {
// Missing packet
++packet_index;
continue;
}
const bool beginning = _packets[packet_index].codecSpecificHeader.
codecHeader.VP8.beginningOfPartition;
if (beginning)
return packet_index;
++packet_index;
}
return packet_index;
}
int VCMSessionInfo::FindPartitionEnd(int packet_index) const {
const int partition_id = _packets[packet_index].codecSpecificHeader.
codecHeader.VP8.partitionId;
while (packet_index <= _highestPacketIndex) {
const bool beginning = _packets[packet_index].codecSpecificHeader.
codecHeader.VP8.beginningOfPartition;
const bool packet_loss_found =
(_packets[packet_index].completeNALU == kNaluUnset || (!beginning &&
!InSequence(_packets[packet_index].seqNum,
_packets[packet_index - 1].seqNum)));
const int current_partition_id = _packets[packet_index].codecSpecificHeader.
codecHeader.VP8.partitionId;
if (packet_loss_found || current_partition_id != partition_id) {
// Missing packet, the previous packet was the last in sequence.
return packet_index - 1;
}
++packet_index;
}
return packet_index - 1;
}
bool VCMSessionInfo::InSequence(WebRtc_UWord16 seqNum,
WebRtc_UWord16 prevSeqNum) {
// prevSeqNum is allowed to wrap around here
return (static_cast<WebRtc_UWord16>(prevSeqNum + 1) == seqNum);
}
WebRtc_UWord32
VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
VCMSessionInfo::MakeDecodable(WebRtc_UWord8* ptrStartOfLayer)
{
if (_lowSeqNum < 0) // No packets in this session
{
@ -340,7 +420,7 @@ VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
WebRtc_UWord32 returnLength = 0;
for (packetIndex = 0; packetIndex <= _highestPacketIndex; ++packetIndex)
{
if (_naluCompleteness[packetIndex] == kNaluUnset) // Found a lost packet
if (_packets[packetIndex].completeNALU == kNaluUnset) // Found a lost packet
{
FindNaluBorder(packetIndex, startIndex, endIndex);
if (startIndex == -1)
@ -360,9 +440,9 @@ VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
// Make sure the first packet is decodable (Either complete nalu or start
// of NALU)
if (_packetSizeBytes[0] > 0)
if (_packets[0].sizeBytes > 0)
{
switch (_naluCompleteness[0])
switch (_packets[0].completeNALU)
{
case kNaluComplete: // Packet can be decoded as is.
break;
@ -372,7 +452,7 @@ VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
break;
case kNaluIncomplete: //Packet is not beginning or end of NALU
// Need to find the end of this NALU and delete all packets.
FindNaluBorder(0,startIndex,endIndex);
FindNaluBorder(0, startIndex, endIndex);
if (endIndex == -1) // No end found. Delete
{
endIndex = _highestPacketIndex;
@ -421,7 +501,7 @@ VCMSessionInfo::ZeroOutSeqNum(WebRtc_Word32* list,
int i = 0;
while ( i <= _highestPacketIndex && index < numberOfSeqNum)
{
if (_naluCompleteness[i] != kNaluUnset)
if (_packets[i].completeNALU != kNaluUnset)
{
list[index] = -1;
}
@ -432,7 +512,7 @@ VCMSessionInfo::ZeroOutSeqNum(WebRtc_Word32* list,
i++;
index++;
}
if (!_haveFirstPacket)
if (!_packets[0].isFirstPacket)
{
_sessionNACK = true;
}
@ -478,7 +558,7 @@ VCMSessionInfo::ZeroOutSeqNumHybrid(WebRtc_Word32* list,
}
}
bool allowNack = false;
if (!_haveFirstPacket || !isBaseAvailable)
if (!_packets[0].isFirstPacket || !isBaseAvailable)
{
allowNack = true;
}
@ -502,7 +582,7 @@ VCMSessionInfo::ZeroOutSeqNumHybrid(WebRtc_Word32* list,
while (list[index] <= highMediaPacket && index < numberOfSeqNum)
{
if (_naluCompleteness[i] != kNaluUnset)
if (_packets[i].completeNALU != kNaluUnset)
{
list[index] = -1;
}
@ -585,7 +665,7 @@ VCMSessionInfo::UpdatePacketSize(WebRtc_Word32 packetIndex,
assert(!"SessionInfo::UpdatePacketSize Error: invalid packetIndex");
return;
}
_packetSizeBytes[packetIndex] = length;
_packets[packetIndex].sizeBytes = length;
}
WebRtc_Word64
@ -663,24 +743,11 @@ VCMSessionInfo::InsertPacket(const VCMPacket& packet,
return -1;
}
// Shift _ORwithPrevByte array
memmove(&_ORwithPrevByte[positionsToShift],
&_ORwithPrevByte[0], numOfPacketsToMove*sizeof(bool));
memset(&_ORwithPrevByte[0], false, positionsToShift*sizeof(bool));
// Shift _packetSizeBytes array
memmove(&_packetSizeBytes[positionsToShift],
&_packetSizeBytes[0],
numOfPacketsToMove * sizeof(WebRtc_UWord32));
memset(&_packetSizeBytes[0], 0,
positionsToShift * sizeof(WebRtc_UWord32));
// Shift _naluCompleteness
memmove(&_naluCompleteness[positionsToShift],
&_naluCompleteness[0],
numOfPacketsToMove * sizeof(WebRtc_UWord8));
memset(&_naluCompleteness[0], kNaluUnset,
positionsToShift * sizeof(WebRtc_UWord8));
memmove(&_packets[positionsToShift],
&_packets[0], numOfPacketsToMove * sizeof(VCMPacket));
for (int i = 0; i < positionsToShift; ++i)
_packets[i].Reset();
_highestPacketIndex += positionsToShift;
_lowSeqNum = packet.seqNum;
@ -699,7 +766,7 @@ VCMSessionInfo::InsertPacket(const VCMPacket& packet,
}
// Check for duplicate packets
if (_packetSizeBytes[packetIndex] != 0)
if (_packets[packetIndex].sizeBytes != 0)
{
// We have already received a packet with this seq number, ignore it.
return -2;
@ -768,21 +835,21 @@ VCMSessionInfo::PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer,
bool previousLost = false;
for (int i = 0; i <= _highestPacketIndex; i++)
{
if (_ORwithPrevByte[i])
if (_packets[i].bits)
{
if (currentPacketOffset > 0)
{
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer +
currentPacketOffset;
if (_packetSizeBytes[i-1] == 0 || previousLost)
if (_packets[i - 1].sizeBytes == 0 || previousLost)
{
// It is be better to throw away this packet if we are
// missing the previous packet.
memset(ptrFirstByte, 0, _packetSizeBytes[i]);
memset(ptrFirstByte, 0, _packets[i].sizeBytes);
previousLost = true;
}
else if (_packetSizeBytes[i] > 0) // Ignore if empty packet
else if (_packets[i].sizeBytes > 0) // Ignore if empty packet
{
// Glue with previous byte
// Move everything from [this packet start + 1,
@ -793,40 +860,41 @@ VCMSessionInfo::PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer,
(currentPacketOffset + 1);
memmove((void*)ptrFirstByte, (void*)(ptrFirstByte + 1),
lengthToEnd);
_packetSizeBytes[i]--;
_packets[i].sizeBytes--;
length--;
previousLost = false;
realDataBytes += _packetSizeBytes[i];
realDataBytes += _packets[i].sizeBytes;
}
}
else
{
memset(ptrStartOfLayer, 0, _packetSizeBytes[i]);
memset(ptrStartOfLayer, 0, _packets[i].sizeBytes);
previousLost = true;
}
}
else if (_packetSizeBytes[i] == 0 && codec == kVideoCodecH263)
else if (_packets[i].sizeBytes == 0 && codec == kVideoCodecH263)
{
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer + currentPacketOffset;
memmove(ptrFirstByte + 10, ptrFirstByte,
length - currentPacketOffset);
memset(ptrFirstByte, 0, 10);
_packetSizeBytes[i] = 10;
length += _packetSizeBytes[i];
_packets[i].sizeBytes = 10;
length += _packets[i].sizeBytes;
previousLost = true;
}
else
{
realDataBytes += _packetSizeBytes[i];
realDataBytes += _packets[i].sizeBytes;
previousLost = false;
}
currentPacketOffset += _packetSizeBytes[i];
currentPacketOffset += _packets[i].sizeBytes;
}
if (realDataBytes == 0)
{
// Drop the frame since all it contains are zeros
length = 0;
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
for (int i = 0; i <= _highestPacketIndex; ++i)
_packets[i].Reset();
}
return length;
}

View File

@ -18,6 +18,8 @@
namespace webrtc
{
enum { kMaxVP8Partitions = 9 };
class VCMSessionInfo
{
public:
@ -26,6 +28,9 @@ public:
VCMSessionInfo(const VCMSessionInfo& rhs);
void UpdateDataPointers(const WebRtc_UWord8* frame_buffer,
const WebRtc_UWord8* prev_buffer_address);
WebRtc_Word32 ZeroOutSeqNum(WebRtc_Word32* list,
WebRtc_Word32 numberOfSeqNum);
// Hybrid version: Zero out seq num for NACK list
@ -40,7 +45,19 @@ public:
WebRtc_Word32 InformOfEmptyPacket(const WebRtc_UWord16 seqNum);
virtual bool IsSessionComplete();
WebRtc_UWord32 MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer);
// Builds fragmentation headers for VP8, each fragment being a decodable
// VP8 partition. Returns the total number of bytes which are decodable. Is
// used instead of MakeDecodable for VP8.
int BuildVP8FragmentationHeader(WebRtc_UWord8* frame_buffer,
int frame_buffer_length,
RTPFragmentationHeader* fragmentation);
// Makes the frame decodable. I.e., only contain decodable NALUs. All
// non-decodable NALUs will be deleted and packets will be moved to in
// memory to remove any empty space.
// Returns the number of bytes deleted from the session.
WebRtc_UWord32 MakeDecodable(WebRtc_UWord8* ptrStartOfLayer);
WebRtc_UWord32 GetSessionLength();
bool HaveLastPacket();
@ -67,6 +84,13 @@ public:
bool PreviousFrameLoss() const { return _previousFrameLoss; }
protected:
// Finds the packet index of the next VP8 partition. If none is found
// _highestPacketIndex + 1 is returned.
int FindNextPartitionBeginning(int packet_index) const;
// Finds the packet index of the end of the partition with index
// partitionIndex.
int FindPartitionEnd(int packet_index) const;
static bool InSequence(WebRtc_UWord16 seqNum, WebRtc_UWord16 prevSeqNum);
WebRtc_UWord32 InsertBuffer(WebRtc_UWord8* ptrStartOfLayer,
WebRtc_Word32 packetIndex,
const VCMPacket& packet);
@ -77,13 +101,11 @@ protected:
WebRtc_Word32 startIndex,
WebRtc_Word32 endIndex);
void UpdateCompleteSession();
// If we have inserted the first packet into this frame
bool _haveFirstPacket;
// If we have inserted a packet with markerbit into this frame
bool _markerBit;
bool _markerBit;
// If this session has been NACKed by JB
bool _sessionNACK;
bool _completeSession;
bool _sessionNACK;
bool _completeSession;
webrtc::FrameType _frameType;
bool _previousFrameLoss;
// Lowest/Highest packet sequence number in a session
@ -92,15 +114,15 @@ protected:
// Highest packet index in this frame
WebRtc_UWord16 _highestPacketIndex;
// Length of packet (used for reordering)
WebRtc_UWord32 _packetSizeBytes[kMaxPacketsInJitterBuffer];
// Completeness of packets. Used for deciding if the frame is decodable.
WebRtc_UWord8 _naluCompleteness[kMaxPacketsInJitterBuffer];
// Packets in this frame.
// TODO(holmer): Replace this with a std::list<VCMPacket*>. Doing that will
// make it possible to get rid of _markerBit, _lowSeqNum,
// _highSeqNum, _highestPacketIndex, etc.
VCMPacket _packets[kMaxPacketsInJitterBuffer];
WebRtc_Word32 _emptySeqNumLow;
WebRtc_Word32 _emptySeqNumHigh;
// Store the sequence number that marks the last media packet
WebRtc_Word32 _markerSeqNum;
bool _ORwithPrevByte[kMaxPacketsInJitterBuffer];
};
} // namespace webrtc

View File

@ -0,0 +1,415 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h>
#include "gtest/gtest.h"
#include "module_common_types.h"
#include "packet.h"
#include "session_info.h"
using webrtc::RTPFragmentationHeader;
using webrtc::RTPVideoHeaderVP8;
using webrtc::VCMPacket;
using webrtc::VCMSessionInfo;
using webrtc::WebRtcRTPHeader;
enum { kPacketBufferSize = 10 };
enum { kFrameBufferSize = 10*kPacketBufferSize };
class TestVP8MakeDecodable : public ::testing::Test {
protected:
virtual void SetUp() {
memset(packet_buffer_, 0, kPacketBufferSize);
memset(frame_buffer_, 0, kFrameBufferSize);
vp8_header_ = &packet_header_.type.Video.codecHeader.VP8;
packet_header_.frameType = webrtc::kVideoFrameDelta;
packet_header_.type.Video.codec = webrtc::kRTPVideoVP8;
vp8_header_->InitRTPVideoHeaderVP8();
fragmentation_.VerifyAndAllocateFragmentationHeader(
webrtc::kMaxVP8Partitions);
}
void FillPacket(WebRtc_UWord8 start_value) {
for (int i = 0; i < kPacketBufferSize; ++i)
packet_buffer_[i] = start_value + i;
}
bool VerifyPartition(int partition_id,
int packets_expected,
int start_value) {
EXPECT_EQ(static_cast<WebRtc_UWord32>(packets_expected * kPacketBufferSize),
fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) {
int packet_index = fragmentation_.fragmentationOffset[partition_id] +
i * kPacketBufferSize;
for (int j = 0; j < kPacketBufferSize; ++j) {
if (packet_index + j > kFrameBufferSize)
return false;
EXPECT_EQ(start_value + i + j, frame_buffer_[packet_index + j]);
}
}
return true;
}
WebRtc_UWord8 packet_buffer_[kPacketBufferSize];
WebRtc_UWord8 frame_buffer_[kFrameBufferSize];
WebRtcRTPHeader packet_header_;
VCMSessionInfo session_;
RTPVideoHeaderVP8* vp8_header_;
RTPFragmentationHeader fragmentation_;
};
TEST_F(TestVP8MakeDecodable, TwoPartitionsOneLoss) {
// Partition 0 | Partition 1
// [ 0 ] [ 2 ] | [ 3 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0;
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 2;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// One packet should be removed (end of partition 0).
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
2*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 1, 0));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(1, 1, 3));
}
TEST_F(TestVP8MakeDecodable, TwoPartitionsOneLoss2) {
// Partition 0 | Partition 1
// [ 1 ] [ 2 ] | [ 3 ] [ 5 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 2;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// One packet should be removed (end of partition 2), 3 left.
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
3*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(1, 1, 3));
}
TEST_F(TestVP8MakeDecodable, TwoPartitionsNoLossWrap) {
// Partition 0 | Partition 1
// [ fffd ] [ fffe ] | [ ffff ] [ 0 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// No packet should be removed.
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
4*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(1, 2, 2));
}
TEST_F(TestVP8MakeDecodable, TwoPartitionsLossWrap) {
// Partition 0 | Partition 1
// [ fffd ] [ fffe ] | [ ffff ] [ 1 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 2;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// One packet should be removed from the last partition
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
3*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(1, 1, 2));
}
TEST_F(TestVP8MakeDecodable, ThreePartitionsOneMissing) {
// Partition 1 |Partition 2 | Partition 3
// [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 2;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 3;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 2;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// No packet should be removed.
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
4*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(2, 2, 5));
}
TEST_F(TestVP8MakeDecodable, ThreePartitionsLossInSecond) {
// Partition 0 |Partition 1 | Partition 2
// [ 1 ] [ 2 ] | [ 4 ] [ 5 ] | [ 6 ] [ 7 ]
packet_header_.type.Video.isFirstPacket = true;
vp8_header_->beginningOfPartition = true;
vp8_header_->partitionId = 0;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, kPacketBufferSize,
packet_header_);
session_.SetStartSeqNumber(packet_header_.header.sequenceNumber);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 0;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 2;
FillPacket(4);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 1;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 2;
vp8_header_->beginningOfPartition = true;
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
packet_header_.type.Video.isFirstPacket = false;
vp8_header_->partitionId = 2;
vp8_header_->beginningOfPartition = false;
packet_header_.header.markerBit = true;
packet_header_.header.sequenceNumber += 1;
FillPacket(7);
packet = new VCMPacket(packet_buffer_, kPacketBufferSize, packet_header_);
ASSERT_EQ(session_.InsertPacket(*packet, frame_buffer_), kPacketBufferSize);
delete packet;
// 2 partitions left. 2 packets removed from second partition
ASSERT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
kFrameBufferSize,
&fragmentation_),
4*kPacketBufferSize);
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(2, 2, 6));
}

View File

@ -70,6 +70,22 @@
], # conditions
},
{
'target_name': 'video_coding_unit_test',
'type': 'executable',
'dependencies': [
'../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
'../../../../../testing/gtest.gyp:gtest',
'../../../../../testing/gtest.gyp:gtest_main',
'video_coding.gyp:webrtc_video_coding',
],
'include_dirs': [
'../../../interface',
],
'sources': [
'session_info_unittest.cc',
],
},
],
}