git-svn-id: http://webrtc.googlecode.com/svn/trunk@4 4adac7df-926f-26a2-2b94-8c16560cd09d

This commit is contained in:
niklase@google.com
2011-05-30 11:22:19 +00:00
parent 01813fe945
commit 77ae29bc81
1153 changed files with 404089 additions and 0 deletions

View File

@@ -0,0 +1,799 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "codec_database.h"
#include "../../../../engine_configurations.h"
#include "internal_defines.h"
#include "trace.h"
#if defined(_WIN32)
// VS 2005: Don't warn for default initialized arrays. See help for more info.
// Don't warn for strncpy being unsecure.
// switch statement contains 'default' but no 'case' labels
#pragma warning(disable:4351; disable:4996; disable:4065)
#endif
// Supported codecs
#ifdef VIDEOCODEC_VP8
#include "vp8.h"
#endif
#ifdef VIDEOCODEC_I420
#include "i420.h"
#endif
namespace webrtc
{
VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
WebRtc_UWord32 numberOfCores,
bool requireKeyFrame)
:
_settings(settings),
_numberOfCores(numberOfCores),
_requireKeyFrame(requireKeyFrame)
{
}
VCMExtDecoderMapItem::VCMExtDecoderMapItem(VideoDecoder* externalDecoderInstance,
WebRtc_UWord8 payloadType,
bool internalRenderTiming)
:
_payloadType(payloadType),
_externalDecoderInstance(externalDecoderInstance),
_internalRenderTiming(internalRenderTiming)
{
}
VCMCodecDataBase::VCMCodecDataBase(WebRtc_Word32 id):
_id(id),
_numberOfCores(0),
_maxPayloadSize(kDefaultPayloadSize),
_periodicKeyFrames(false),
_currentEncIsExternal(false),
_sendCodec(),
_receiveCodec(),
_externalPayloadType(0),
_externalEncoder(NULL),
_internalSource(false),
_ptrEncoder(NULL),
_ptrDecoder(NULL),
_currentDecIsExternal(false),
_decMap(),
_decExternalMap()
{
//
}
VCMCodecDataBase::~VCMCodecDataBase()
{
Reset();
}
WebRtc_Word32
VCMCodecDataBase::Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const
{
VCMGenericEncoder* encoder = NULL;
VideoCodec settings;
WebRtc_Word32 ret;
for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++)
{
ret = VCMCodecDataBase::Codec(i, &settings);
if (ret < 0)
{
return ret;
}
encoder = CreateEncoder(settings.codecType);
if (encoder == NULL)
{
return VCM_MEMORY;
}
ret = encoder->_encoder.Version(&version[position], remainingBufferInBytes);
if (ret < 0)
{
return ret;
}
remainingBufferInBytes -= ret;
position += ret;
delete &encoder->_encoder;
delete encoder;
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::Reset()
{
WebRtc_Word32 ret = ResetReceiver();
if (ret < 0)
{
return ret;
}
ret = ResetSender();
if (ret < 0)
{
return ret;
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::ResetSender()
{
DeleteEncoder();
_periodicKeyFrames = false;
return VCM_OK;
}
VCMGenericEncoder*
VCMCodecDataBase::CreateEncoder(VideoCodecType type) const
{
switch(type)
{
#ifdef VIDEOCODEC_VP8
case kVideoCodecVP8:
return new VCMGenericEncoder(*(new VP8Encoder));
break;
#endif
#ifdef VIDEOCODEC_I420
case kVideoCodecI420:
return new VCMGenericEncoder(*(new I420Encoder));
break;
#endif
default:
return NULL;
break;
}
}
void
VCMCodecDataBase::DeleteEncoder()
{
if (_ptrEncoder)
{
_ptrEncoder->Release();
if (!_currentEncIsExternal)
{
delete &_ptrEncoder->_encoder;
}
delete _ptrEncoder;
_ptrEncoder = NULL;
}
}
WebRtc_UWord8
VCMCodecDataBase::NumberOfCodecs()
{
return VCM_NUM_VIDEO_CODECS_AVAILABLE;
}
WebRtc_Word32
VCMCodecDataBase::Codec(WebRtc_UWord8 listId, VideoCodec *settings)
{
if (settings == NULL)
{
return VCM_PARAMETER_ERROR;
}
if (listId >= VCM_NUM_VIDEO_CODECS_AVAILABLE)
{
return VCM_PARAMETER_ERROR;
}
memset(settings, 0, sizeof(VideoCodec));
switch (listId)
{
#ifdef VIDEOCODEC_VP8
case VCM_VP8_IDX:
{
strncpy(settings->plName, "VP8", 3);
settings->codecType = kVideoCodecVP8;
// 96 to 127 dynamic payload types for video codecs
settings->plType = VCM_VP8_PAYLOAD_TYPE;
settings->startBitrate = 100;
settings->minBitrate = VCM_MIN_BITRATE;
settings->maxBitrate = 0;
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
settings->width = VCM_DEFAULT_CODEC_WIDTH;
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
break;
}
#endif
#ifdef VIDEOCODEC_I420
case VCM_I420_IDX:
{
strncpy(settings->plName, "I420", 4);
settings->codecType = kVideoCodecI420;
// 96 to 127 dynamic payload types for video codecs
settings->plType = VCM_I420_PAYLOAD_TYPE;
// Bitrate needed for this size and framerate
settings->startBitrate = 3*VCM_DEFAULT_CODEC_WIDTH*
VCM_DEFAULT_CODEC_HEIGHT*8*
VCM_DEFAULT_FRAME_RATE/1000/2;
settings->maxBitrate = settings->startBitrate;
settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
settings->width = VCM_DEFAULT_CODEC_WIDTH;
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
settings->minBitrate = VCM_MIN_BITRATE;
break;
}
#endif
default:
{
return VCM_PARAMETER_ERROR;
}
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::Codec(VideoCodecType codecType, VideoCodec* settings)
{
for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++)
{
const WebRtc_Word32 ret = VCMCodecDataBase::Codec(i, settings);
if (ret != VCM_OK)
{
return ret;
}
if (codecType == settings->codecType)
{
return VCM_OK;
}
}
return VCM_PARAMETER_ERROR;
}
// assuming only one registered encoder - since only one used, no need for more
WebRtc_Word32
VCMCodecDataBase::RegisterSendCodec(const VideoCodec* sendCodec,
WebRtc_UWord32 numberOfCores,
WebRtc_UWord32 maxPayloadSize)
{
if (sendCodec == NULL)
{
return VCM_UNINITIALIZED;
}
if (maxPayloadSize == 0)
{
maxPayloadSize = kDefaultPayloadSize;
}
if (numberOfCores > 32)
{
return VCM_PARAMETER_ERROR;
}
if (strcmp(sendCodec->plName, "H263") == 0 &&
(sendCodec->plType != 34))
{
return VCM_PARAMETER_ERROR;
}
if (sendCodec->plType <= 0)
{
return VCM_PARAMETER_ERROR;
}
// Make sure the start bit rate is sane...
if (sendCodec->startBitrate > 1000000)
{
return VCM_PARAMETER_ERROR;
}
if (sendCodec->codecType == kVideoCodecUnknown)
{
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
_numberOfCores = numberOfCores;
_maxPayloadSize = maxPayloadSize;
memcpy(&_sendCodec, sendCodec, sizeof(VideoCodec));
if (_sendCodec.maxBitrate == 0)
{
// max is one bit per pixel
_sendCodec.maxBitrate = ((WebRtc_Word32)_sendCodec.height *
(WebRtc_Word32)_sendCodec.width *
(WebRtc_Word32)_sendCodec.maxFramerate) / 1000;
if (_sendCodec.startBitrate > _sendCodec.maxBitrate)
{
// but if the customer tries to set a higher start bit rate we will increase
// the max accordingly
_sendCodec.maxBitrate = _sendCodec.startBitrate;
}
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::SendCodec(VideoCodec* currentSendCodec) const
{
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(_id), "SendCodec");
if(_ptrEncoder == NULL)
{
return VCM_UNINITIALIZED;
}
memcpy(currentSendCodec, &_sendCodec, sizeof(VideoCodec));
return VCM_OK;
}
VideoCodecType
VCMCodecDataBase::SendCodec() const
{
WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, VCMId(_id),
"SendCodec type");
if (_ptrEncoder == NULL)
{
return kVideoCodecUnknown;
}
return _sendCodec.codecType;
}
WebRtc_Word32
VCMCodecDataBase::DeRegisterExternalEncoder(WebRtc_UWord8 payloadType, bool& wasSendCodec)
{
wasSendCodec = false;
if (_externalPayloadType != payloadType)
{
return VCM_PARAMETER_ERROR;
}
if (_sendCodec.plType == payloadType)
{
//De-register as send codec if needed
DeleteEncoder();
memset(&_sendCodec, 0, sizeof(VideoCodec));
_currentEncIsExternal = false;
wasSendCodec = true;
}
_externalPayloadType = 0;
_externalEncoder = NULL;
_internalSource = false;
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::RegisterExternalEncoder(VideoEncoder* externalEncoder,
WebRtc_UWord8 payloadType,
bool internalSource)
{
// since only one encoder can be used at a given time,
// only one external encoder can be registered/used
_externalEncoder = externalEncoder;
_externalPayloadType = payloadType;
_internalSource = internalSource;
return VCM_OK;
}
VCMGenericEncoder*
VCMCodecDataBase::SetEncoder(const VideoCodec* settings,
VCMEncodedFrameCallback* VCMencodedFrameCallback)
{
// if encoder exists, will destroy it and create new one
DeleteEncoder();
if (settings->plType == _externalPayloadType)
{
// External encoder
_ptrEncoder = new VCMGenericEncoder(*_externalEncoder, _internalSource);
_currentEncIsExternal = true;
}
else
{
_ptrEncoder = CreateEncoder(settings->codecType);
_currentEncIsExternal = false;
}
VCMencodedFrameCallback->SetPayloadType(settings->plType);
if (_ptrEncoder == NULL)
{
return NULL;
}
if (_ptrEncoder->InitEncode(settings, _numberOfCores, _maxPayloadSize) < 0)
{
DeleteEncoder();
return NULL;
}
else if (_ptrEncoder->RegisterEncodeCallback(VCMencodedFrameCallback) < 0)
{
DeleteEncoder();
return NULL;
}
// Intentionally don't check return value since the encoder registration
// shouldn't fail because the codec doesn't support changing the
// periodic key frame setting.
_ptrEncoder->SetPeriodicKeyFrames(_periodicKeyFrames);
return _ptrEncoder;
}
WebRtc_Word32
VCMCodecDataBase::SetPeriodicKeyFrames(bool enable)
{
_periodicKeyFrames = enable;
if (_ptrEncoder != NULL)
{
return _ptrEncoder->SetPeriodicKeyFrames(_periodicKeyFrames);
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::RegisterReceiveCodec(const VideoCodec* receiveCodec,
WebRtc_UWord32 numberOfCores,
bool requireKeyFrame)
{
WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCoding, VCMId(_id),
"Codec: %s, Payload type %d, Height %d, Width %d, Bitrate %d, Framerate %d.",
receiveCodec->plName, receiveCodec->plType,
receiveCodec->height, receiveCodec->width,
receiveCodec->startBitrate, receiveCodec->maxFramerate);
// check if payload value already exists, if so - erase old and insert new
DeRegisterReceiveCodec(receiveCodec->plType);
if (receiveCodec->codecType == kVideoCodecUnknown)
{
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
VideoCodec* newReceiveCodec = new VideoCodec(*receiveCodec);
_decMap.Insert(receiveCodec->plType,
new VCMDecoderMapItem(newReceiveCodec, numberOfCores, requireKeyFrame));
return VCM_OK;
}
WebRtc_Word32 VCMCodecDataBase::DeRegisterReceiveCodec(WebRtc_UWord8 payloadType)
{
MapItem* item = _decMap.Find(payloadType);
if (item == NULL)
{
return VCM_PARAMETER_ERROR;
}
VCMDecoderMapItem* decItem = static_cast<VCMDecoderMapItem*>(item->GetItem());
delete decItem->_settings;
delete decItem;
_decMap.Erase(item);
if (_receiveCodec.plType == payloadType)
{
// This codec is currently in use.
memset(&_receiveCodec, 0, sizeof(VideoCodec));
_currentDecIsExternal = false;
}
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::ResetReceiver()
{
ReleaseDecoder(_ptrDecoder);
_ptrDecoder = NULL;
memset(&_receiveCodec, 0, sizeof(VideoCodec));
MapItem* item = _decMap.First();
while (item != NULL)
{
VCMDecoderMapItem* decItem = static_cast<VCMDecoderMapItem*>(item->GetItem());
if (decItem != NULL)
{
if (decItem->_settings != NULL)
{
delete decItem->_settings;
}
delete decItem;
}
_decMap.Erase(item);
item = _decMap.First();
}
item = _decExternalMap.First();
while (item != NULL)
{
VCMExtDecoderMapItem* decItem = static_cast<VCMExtDecoderMapItem*>(item->GetItem());
if (decItem != NULL)
{
delete decItem;
}
_decExternalMap.Erase(item);
item = _decExternalMap.First();
}
_currentDecIsExternal = false;
return VCM_OK;
}
WebRtc_Word32
VCMCodecDataBase::DeRegisterExternalDecoder(WebRtc_UWord8 payloadType)
{
MapItem* item = _decExternalMap.Find(payloadType);
if (item == NULL)
{
// Not found
return VCM_PARAMETER_ERROR;
}
if (_receiveCodec.plType == payloadType)
{
// Release it if it was registered and in use
ReleaseDecoder(_ptrDecoder);
_ptrDecoder = NULL;
}
DeRegisterReceiveCodec(payloadType);
VCMExtDecoderMapItem* decItem = static_cast<VCMExtDecoderMapItem*>(item->GetItem());
delete decItem;
_decExternalMap.Erase(item);
return VCM_OK;
}
// Add the external encoder object to the list of external decoders.
// Won't be registered as a receive codec until RegisterReceiveCodec is called.
WebRtc_Word32
VCMCodecDataBase::RegisterExternalDecoder(VideoDecoder* externalDecoder,
WebRtc_UWord8 payloadType,
bool internalRenderTiming)
{
// check if payload value already exists, if so - erase old and insert new
VCMExtDecoderMapItem* extDecoder = new VCMExtDecoderMapItem(externalDecoder,
payloadType,
internalRenderTiming);
if (extDecoder == NULL)
{
return VCM_MEMORY;
}
DeRegisterExternalDecoder(payloadType);
_decExternalMap.Insert(payloadType, extDecoder);
return VCM_OK;
}
bool
VCMCodecDataBase::DecoderRegistered() const
{
return (_decMap.Size() > 0);
}
WebRtc_Word32
VCMCodecDataBase::ReceiveCodec(VideoCodec* currentReceiveCodec) const
{
if (_ptrDecoder == NULL)
{
return VCM_NO_FRAME_DECODED;
}
memcpy(currentReceiveCodec, &_receiveCodec, sizeof(VideoCodec));
return VCM_OK;
}
VideoCodecType
VCMCodecDataBase::ReceiveCodec() const
{
if (_ptrDecoder == NULL)
{
return kVideoCodecUnknown;
}
return _receiveCodec.codecType;
}
VCMGenericDecoder*
VCMCodecDataBase::SetDecoder(WebRtc_UWord8 payloadType, VCMDecodedFrameCallback& callback)
{
if (payloadType == _receiveCodec.plType || payloadType == 0)
{
return _ptrDecoder;
}
// check for exisitng decoder, if exists - delete
if (_ptrDecoder)
{
ReleaseDecoder(_ptrDecoder);
_ptrDecoder = NULL;
memset(&_receiveCodec, 0, sizeof(VideoCodec));
}
_ptrDecoder = CreateAndInitDecoder(payloadType, _receiveCodec, _currentDecIsExternal);
if (_ptrDecoder == NULL)
{
return NULL;
}
if (_ptrDecoder->RegisterDecodeCompleteCallback(&callback) < 0)
{
ReleaseDecoder(_ptrDecoder);
_ptrDecoder = NULL;
memset(&_receiveCodec, 0, sizeof(VideoCodec));
return NULL;
}
return _ptrDecoder;
}
VCMGenericDecoder*
VCMCodecDataBase::CreateAndInitDecoder(WebRtc_UWord8 payloadType,
VideoCodec& newCodec,
bool &external) const
{
VCMDecoderMapItem* decoderItem = FindDecoderItem(payloadType);
if (decoderItem == NULL)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_id),
"Unknown payload type: %u", payloadType);
return NULL;
}
VCMGenericDecoder* ptrDecoder = NULL;
VCMExtDecoderMapItem* externalDecItem = FindExternalDecoderItem(payloadType);
if (externalDecItem != NULL)
{
// External codec
ptrDecoder = new VCMGenericDecoder(*externalDecItem->_externalDecoderInstance, _id,
true);
external = true;
}
else
{
// create decoder
ptrDecoder = CreateDecoder(decoderItem->_settings->codecType);
external = false;
}
if (ptrDecoder == NULL)
{
return NULL;
}
if (ptrDecoder->InitDecode(decoderItem->_settings,
decoderItem->_numberOfCores,
decoderItem->_requireKeyFrame) < 0)
{
ReleaseDecoder(ptrDecoder);
return NULL;
}
SetCodecConfigParameters(*ptrDecoder, *decoderItem->_settings);
memcpy(&newCodec, decoderItem->_settings, sizeof(VideoCodec));
return ptrDecoder;
}
VCMGenericDecoder*
VCMCodecDataBase::CreateDecoderCopy() const
{
if (_ptrDecoder == NULL)
{
return NULL;
}
VideoDecoder* decoderCopy = _ptrDecoder->_decoder.Copy();
if (decoderCopy == NULL)
{
return NULL;
}
return new VCMGenericDecoder(*decoderCopy, _id, _ptrDecoder->External());
}
void
VCMCodecDataBase::CopyDecoder(const VCMGenericDecoder& decoder)
{
VideoDecoder* decoderCopy = decoder._decoder.Copy();
if (decoderCopy != NULL)
{
ReleaseDecoder(_ptrDecoder);
_ptrDecoder = new VCMGenericDecoder(*decoderCopy, _id, decoder.External());
}
}
bool
VCMCodecDataBase::RenderTiming() const
{
bool renderTiming = true;
if (_currentDecIsExternal)
{
VCMExtDecoderMapItem* extItem = FindExternalDecoderItem(_receiveCodec.plType);
renderTiming = extItem->_internalRenderTiming;
}
return renderTiming;
}
void
VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const
{
if (decoder != NULL)
{
decoder->Release();
if (!decoder->External() && &decoder->_decoder != NULL)
{
delete &decoder->_decoder;
}
delete decoder;
}
}
WebRtc_Word32
VCMCodecDataBase::SetCodecConfigParameters(WebRtc_UWord8 payloadType,
const WebRtc_UWord8* buffer,
WebRtc_Word32 length)
{
VCMDecoderMapItem* decItem = FindDecoderItem(payloadType);
if (decItem == NULL)
{
return VCM_PARAMETER_ERROR;
}
switch (decItem->_settings->codecType)
{
case kVideoCodecMPEG4:
{
memcpy(decItem->_settings->codecSpecific.MPEG4.configParameters, buffer, length);
decItem->_settings->codecSpecific.MPEG4.configParametersSize =
static_cast<WebRtc_UWord8>(length);
break;
}
default:
// This codec doesn't have codec config parameters
return VCM_GENERAL_ERROR;
}
if (_ptrDecoder != NULL && _receiveCodec.plType == decItem->_settings->plType)
{
return _ptrDecoder->SetCodecConfigParameters(buffer, length);
}
return VCM_OK;
}
VCMDecoderMapItem*
VCMCodecDataBase::FindDecoderItem(WebRtc_UWord8 payloadType) const
{
MapItem* item = _decMap.Find(payloadType);
if (item != NULL)
{
return static_cast<VCMDecoderMapItem*>(item->GetItem());
}
return NULL;
}
VCMExtDecoderMapItem*
VCMCodecDataBase::FindExternalDecoderItem(WebRtc_UWord8 payloadType) const
{
MapItem* item = _decExternalMap.Find(payloadType);
if (item != NULL)
{
return static_cast<VCMExtDecoderMapItem*>(item->GetItem());
}
return NULL;
}
VCMGenericDecoder*
VCMCodecDataBase::CreateDecoder(VideoCodecType type) const
{
switch(type)
{
#ifdef VIDEOCODEC_VP8
case kVideoCodecVP8:
return new VCMGenericDecoder(*(new VP8Decoder), _id);
#endif
#ifdef VIDEOCODEC_I420
case kVideoCodecI420:
return new VCMGenericDecoder(*(new I420Decoder), _id);
#endif
default:
return NULL;
}
}
void
VCMCodecDataBase::SetCodecConfigParameters(VCMGenericDecoder& decoder,
const VideoCodec& settings)
{
switch (settings.codecType)
{
case kVideoCodecMPEG4:
{
if (settings.codecSpecific.MPEG4.configParametersSize > 0)
{
decoder.SetCodecConfigParameters(
settings.codecSpecific.MPEG4.configParameters,
settings.codecSpecific.MPEG4.configParametersSize);
}
break;
}
default:
// No codec config parameters for this codec
return;
}
return;
}
}

View File

@@ -0,0 +1,221 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
#include "video_coding.h"
#include "video_codec_interface.h"
#include "generic_decoder.h"
#include "generic_encoder.h"
#include "typedefs.h"
#include "map_wrapper.h"
namespace webrtc
{
enum VCMCodecDBProperties
{
kDefaultPayloadSize = 1440
};
class VCMDecoderMapItem {
public:
VCMDecoderMapItem(VideoCodec* settings,
WebRtc_UWord32 numberOfCores,
bool requireKeyFrame);
VideoCodec* _settings;
WebRtc_UWord32 _numberOfCores;
bool _requireKeyFrame;
};
class VCMExtDecoderMapItem {
public:
VCMExtDecoderMapItem(VideoDecoder* externalDecoderInstance,
WebRtc_UWord8 payloadType,
bool internalRenderTiming);
WebRtc_UWord8 _payloadType;
VideoDecoder* _externalDecoderInstance;
bool _internalRenderTiming;
};
/*******************************/
/* VCMCodecDataBase class */
/*******************************/
class VCMCodecDataBase
{
public:
VCMCodecDataBase(WebRtc_Word32 id);
~VCMCodecDataBase();
/**
* Fills "version" with the version of all codecs supported.
*/
WebRtc_Word32 Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const;
/**
* Release codecdatabase - release all memory for both send and receive side
*/
WebRtc_Word32 Reset();
/**
* Sender Side
*/
/**
* Returns the number of supported codecs (or -1 in case of error).
*/
static WebRtc_UWord8 NumberOfCodecs();
/**
* Get supported codecs with ID
* Input Values:
* listnr : Requested codec id number
* codec_inst: Pointer to the struct in which the returned codec information is copied
* Return Values: 0 if successful, otherwise
*/
static WebRtc_Word32 Codec(WebRtc_UWord8 listId, VideoCodec* settings);
static WebRtc_Word32 Codec(VideoCodecType codecType, VideoCodec* settings);
/**
* Reset Sender side
*/
WebRtc_Word32 ResetSender();
/**
* Setting the sender side codec and initiaiting the desired codec given the VideoCodec
* struct.
* Return Value: 0 if the codec and the settings are supported, otherwise
*/
WebRtc_Word32 RegisterSendCodec(const VideoCodec* sendCodec,
WebRtc_UWord32 numberOfCores,
WebRtc_UWord32 maxPayloadSize);
/**
* Get current send side codec. Relevant for internal codecs only.
*/
WebRtc_Word32 SendCodec(VideoCodec* currentSendCodec) const;
/**
* Get current send side codec type. Relevant for internal codecs only.
*/
VideoCodecType SendCodec() const;
/**
* Register external encoder - current assumption - if one is registered then it will also
* be used, and therefore it is also initialized
* Return value: A pointer to the encoder on success, or null, in case of an error.
*/
WebRtc_Word32 DeRegisterExternalEncoder(WebRtc_UWord8 payloadType, bool& wasSendCodec);
WebRtc_Word32 RegisterExternalEncoder(VideoEncoder* externalEncoder,
WebRtc_UWord8 payloadType,
bool internalSource);
/**
* Returns a encoder given a payloadname - to be used with internal encoders only.
* Special cases:
* Encoder exists - If payload matches, returns existing one, otherwise,
* deletes existing one and creates new one.
* No match found / Error - returns NULL.
*/
VCMGenericEncoder* SetEncoder(const VideoCodec* settings,
VCMEncodedFrameCallback* VCMencodedFrameCallback);
WebRtc_Word32 SetPeriodicKeyFrames(bool enable);
bool InternalSource() const;
/*
* Receiver Side
*/
WebRtc_Word32 ResetReceiver();
/**
* Register external decoder/render object
*/
WebRtc_Word32 DeRegisterExternalDecoder(WebRtc_UWord8 payloadType);
WebRtc_Word32 RegisterExternalDecoder(VideoDecoder* externalDecoder,
WebRtc_UWord8 payloadType,
bool internalRenderTiming);
bool DecoderRegistered() const;
/**
* Register recieve codec
*/
WebRtc_Word32 RegisterReceiveCodec(const VideoCodec* receiveCodec,
WebRtc_UWord32 numberOfCores,
bool requireKeyFrame);
WebRtc_Word32 DeRegisterReceiveCodec(WebRtc_UWord8 payloadType);
/**
* Get current receive side codec. Relevant for internal codecs only.
*/
WebRtc_Word32 ReceiveCodec(VideoCodec* currentReceiveCodec) const;
/**
* Get current receive side codec type. Relevant for internal codecs only.
*/
VideoCodecType ReceiveCodec() const;
/**
* Returns a decoder given which matches a payload type.
* Special cases:
* Decoder exists - If payload matches, returns existing one, otherwise, deletes
* existing one, and creates new one.
* No match found / Error - returns NULL.
*/
VCMGenericDecoder* SetDecoder(WebRtc_UWord8 payloadType, VCMDecodedFrameCallback& callback);
VCMGenericDecoder* CreateAndInitDecoder(WebRtc_UWord8 payloadType,
VideoCodec& newCodec,
bool &external) const;
VCMGenericDecoder* CreateDecoderCopy() const;
void ReleaseDecoder(VCMGenericDecoder* decoder) const;
void CopyDecoder(const VCMGenericDecoder& decoder);
bool RenderTiming() const;
WebRtc_Word32 SetCodecConfigParameters(WebRtc_UWord8 payloadType,
const WebRtc_UWord8* buffer,
WebRtc_Word32 length);
protected:
/**
* Create an internal Encoder given a codec type
*/
VCMGenericEncoder* CreateEncoder(VideoCodecType type) const;
void DeleteEncoder();
/*
* Create an internal Decoder given a codec type
*/
VCMGenericDecoder* CreateDecoder(VideoCodecType type) const;
static void SetCodecConfigParameters(VCMGenericDecoder& decoder,
const VideoCodec& settings);
VCMDecoderMapItem* FindDecoderItem(WebRtc_UWord8 payloadType) const;
VCMExtDecoderMapItem* FindExternalDecoderItem(WebRtc_UWord8 payloadType) const;
private:
WebRtc_Word32 _id;
WebRtc_UWord32 _numberOfCores;
WebRtc_UWord32 _maxPayloadSize;
bool _periodicKeyFrames;
bool _currentEncIsExternal;
VideoCodec _sendCodec;
VideoCodec _receiveCodec;
WebRtc_UWord8 _externalPayloadType;
VideoEncoder* _externalEncoder;
bool _internalSource;
VCMGenericEncoder* _ptrEncoder;
VCMGenericDecoder* _ptrDecoder;
bool _currentDecIsExternal;
MapWrapper _decMap;
MapWrapper _decExternalMap;
}; // end of VCMCodecDataBase class definition
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_

View File

@@ -0,0 +1,133 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "codec_timer.h"
#include <assert.h>
namespace webrtc
{
VCMCodecTimer::VCMCodecTimer()
:
_filteredMax(0),
_firstDecodeTime(true),
_shortMax(0),
_history()
{
Reset();
}
WebRtc_Word32 VCMCodecTimer::StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs)
{
const WebRtc_Word32 timeDiff = static_cast<WebRtc_Word32>(nowMs - startTimeMs);
MaxFilter(timeDiff, nowMs);
return timeDiff;
}
void VCMCodecTimer::Reset()
{
_filteredMax = 0;
_firstDecodeTime = true;
_shortMax = 0;
for (int i=0; i < MAX_HISTORY_SIZE; i++)
{
_history[i].shortMax = 0;
_history[i].timeMs = -1;
}
}
// Update the max-value filter
void VCMCodecTimer::MaxFilter(WebRtc_Word32 decodeTime, WebRtc_Word64 nowMs)
{
if (!_firstDecodeTime)
{
UpdateMaxHistory(decodeTime, nowMs);
ProcessHistory(nowMs);
}
else
{
_firstDecodeTime = false;
}
}
void
VCMCodecTimer::UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now)
{
if (_history[0].timeMs >= 0 &&
now - _history[0].timeMs < SHORT_FILTER_MS)
{
if (decodeTime > _shortMax)
{
_shortMax = decodeTime;
}
}
else
{
// Only add a new value to the history once a second
if(_history[0].timeMs == -1)
{
// First, no shift
_shortMax = decodeTime;
}
else
{
// Shift
for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
{
_history[i+1].shortMax = _history[i].shortMax;
_history[i+1].timeMs = _history[i].timeMs;
}
}
if (_shortMax == 0)
{
_shortMax = decodeTime;
}
_history[0].shortMax = _shortMax;
_history[0].timeMs = now;
_shortMax = 0;
}
}
void
VCMCodecTimer::ProcessHistory(WebRtc_Word64 nowMs)
{
_filteredMax = _shortMax;
if (_history[0].timeMs == -1)
{
return;
}
for (int i=0; i < MAX_HISTORY_SIZE; i++)
{
if (_history[i].timeMs == -1)
{
break;
}
if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
{
// This sample (and all samples after this) is too old
break;
}
if (_history[i].shortMax > _filteredMax)
{
// This sample is the largest one this far into the history
_filteredMax = _history[i].shortMax;
}
}
}
// Get the maximum observed time within a time window
WebRtc_Word32 VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
{
return _filteredMax;
}
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
#include "typedefs.h"
#include "module_common_types.h"
namespace webrtc
{
// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
#define MAX_HISTORY_SIZE 20
#define SHORT_FILTER_MS 1000
class VCMShortMaxSample
{
public:
VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
WebRtc_Word32 shortMax;
WebRtc_Word64 timeMs;
};
class VCMCodecTimer
{
public:
VCMCodecTimer();
// Updates and returns the max filtered decode time.
WebRtc_Word32 StopTimer(WebRtc_Word64 startTimeMs, WebRtc_Word64 nowMs);
// Empty the list of timers.
void Reset();
// Get the required decode time in ms.
WebRtc_Word32 RequiredDecodeTimeMs(FrameType frameType) const;
private:
void UpdateMaxHistory(WebRtc_Word32 decodeTime, WebRtc_Word64 now);
void MaxFilter(WebRtc_Word32 newTime, WebRtc_Word64 nowMs);
void ProcessHistory(WebRtc_Word64 nowMs);
WebRtc_Word32 _filteredMax;
bool _firstDecodeTime;
WebRtc_Word32 _shortMax;
VCMShortMaxSample _history[MAX_HISTORY_SIZE];
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_

View File

@@ -0,0 +1,225 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "content_metrics_processing.h"
#include "tick_time.h"
#include "module_common_types.h"
#include "video_coding_defines.h"
#include <math.h>
namespace webrtc {
//////////////////////////////////
/// VCMContentMetricsProcessing //
//////////////////////////////////
VCMContentMetricsProcessing::VCMContentMetricsProcessing():
_frameRate(0),
_recAvgFactor(1 / 150.0f), // matched to 30fps
_frameCnt(0),
_prevAvgSizeZeroMotion(0),
_avgSizeZeroMotion(0),
_prevAvgSpatialPredErr(0),
_avgSpatialPredErr(0),
_frameCntForCC(0),
_lastCCpdateTime(0)
{
_globalRecursiveAvg = new VideoContentMetrics();
}
VCMContentMetricsProcessing::~VCMContentMetricsProcessing()
{
delete _globalRecursiveAvg;
}
WebRtc_Word32
VCMContentMetricsProcessing::Reset()
{
_globalRecursiveAvg->Reset();
_frameCnt = 0;
_frameRate = 0;
//_recAvgFactor = 1 / 150.0f; // matched to 30 fps
_prevAvgSizeZeroMotion = 0;
_avgSizeZeroMotion = 0;
_prevAvgSpatialPredErr = 0;
_avgSpatialPredErr = 0;
_frameCntForCC = 0;
return VCM_OK;
}
void
VCMContentMetricsProcessing::UpdateFrameRate(WebRtc_UWord32 frameRate)
{
_frameRate = frameRate;
//Update recursive avg factor
_recAvgFactor = (float) 1000 / ((float)(_frameRate * kQmMinIntervalMs));
}
WebRtc_Word32
VCMContentMetricsProcessing::UpdateContentData(const VideoContentMetrics *contentMetrics)
{
if (contentMetrics == NULL)
{
return VCM_OK;
}
return ProcessContent(contentMetrics);
}
VideoContentMetrics*
VCMContentMetricsProcessing::Data()
{
if (_frameCnt == 0)
{
return NULL;
}
return _globalRecursiveAvg;
}
WebRtc_UWord32
VCMContentMetricsProcessing::ProcessContent(const VideoContentMetrics *contentMetrics)
{
// update global metric
UpdateGlobalMetric(contentMetrics);
//Update metrics over local window for content change (CC) detection:
//two metrics are used for CC detection: size of zero motion, and spatial prediction error
//Not currently used:
//UpdateLocalMetricCC(contentMetrics->sizeZeroMotion, contentMetrics->spatialPredErr);
return VCM_OK;
}
bool
VCMContentMetricsProcessing::ContentChangeCheck()
{
bool result = false;
// Thresholds for bitrate and content change detection
float qmContentChangePercMotion = 0.4f;
float qmContentChangePercSpatial = 0.4f;
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
if ( (now - _lastCCpdateTime) < kCcMinIntervalMs)
{
//keep averaging
return result;
}
else //check for detection and reset
{
//normalize
_avgSizeZeroMotion = _avgSizeZeroMotion / (float)(_frameCntForCC);
_prevAvgSpatialPredErr = _prevAvgSpatialPredErr / (float)(_frameCntForCC);
//check for content change
float diffMotion = fabs(_avgSizeZeroMotion - _prevAvgSizeZeroMotion);
float diffSpatial = fabs(_avgSpatialPredErr -_prevAvgSpatialPredErr);
if ((diffMotion > (_avgSizeZeroMotion * qmContentChangePercMotion)) ||
(diffSpatial > (_prevAvgSpatialPredErr * qmContentChangePercSpatial)))
{
result = true;
}
//copy to previous
_prevAvgSizeZeroMotion = _avgSizeZeroMotion;
_prevAvgSpatialPredErr = _avgSpatialPredErr;
//reset
_avgSizeZeroMotion = 0.;
_avgSpatialPredErr = 0.;
_frameCntForCC = 0;
_lastCCpdateTime = now;
}
return result;
}
//update metrics for content change detection: update is uniform average over soem time window
void VCMContentMetricsProcessing::UpdateLocalMetricCC(float motionVal, float spatialVal)
{
_frameCntForCC += 1;
_avgSizeZeroMotion += motionVal;
_avgSpatialPredErr += spatialVal;
return;
}
void
VCMContentMetricsProcessing::UpdateGlobalMetric(const VideoContentMetrics *contentMetrics)
{
// Threshold for size of zero motion cluster: for updating 3 metrics:
// motion magnitude, cluster distortion, and horizontalness
float nonZeroMvThr = 0.1f;
// first zero and one: take value as is (no motion search in frame zero).
float tmpRecAvgFactor = _recAvgFactor;
if (_frameCnt < 1)
{
_recAvgFactor = 1;
}
_globalRecursiveAvg->motionPredErr = (1 - _recAvgFactor) * _globalRecursiveAvg->motionPredErr +
_recAvgFactor * contentMetrics->motionPredErr;
_globalRecursiveAvg->sizeZeroMotion = (1 - _recAvgFactor) * _globalRecursiveAvg->sizeZeroMotion +
_recAvgFactor * contentMetrics->sizeZeroMotion;
_globalRecursiveAvg->spatialPredErr = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErr +
_recAvgFactor * contentMetrics->spatialPredErr;
_globalRecursiveAvg->spatialPredErrH = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErrH +
_recAvgFactor * contentMetrics->spatialPredErrH;
_globalRecursiveAvg->spatialPredErrV = (1 - _recAvgFactor) * _globalRecursiveAvg->spatialPredErrV +
_recAvgFactor * contentMetrics->spatialPredErrV;
//motionMag metric is derived from NFD (normalized frame difference)
if (kNfdMetric == 1)
{
_globalRecursiveAvg->motionMagnitudeNZ = (1 - _recAvgFactor) * _globalRecursiveAvg->motionMagnitudeNZ +
_recAvgFactor * contentMetrics->motionMagnitudeNZ;
}
if (contentMetrics->sizeZeroMotion > nonZeroMvThr)
{
_globalRecursiveAvg->motionClusterDistortion = (1 - _recAvgFactor) * _globalRecursiveAvg->motionClusterDistortion +
_recAvgFactor *contentMetrics->motionClusterDistortion;
_globalRecursiveAvg->motionHorizontalness = (1 - _recAvgFactor) * _globalRecursiveAvg->motionHorizontalness +
_recAvgFactor * contentMetrics->motionHorizontalness;
//motionMag metric is derived from motion vectors
if (kNfdMetric == 0)
{
_globalRecursiveAvg->motionMagnitudeNZ = (1 - _recAvgFactor) * _globalRecursiveAvg->motionMagnitudeNZ +
_recAvgFactor * contentMetrics->motionMagnitudeNZ;
}
}
// update native values:
_globalRecursiveAvg->nativeHeight = contentMetrics->nativeHeight;
_globalRecursiveAvg->nativeWidth = contentMetrics->nativeWidth;
_globalRecursiveAvg->nativeFrameRate = contentMetrics->nativeFrameRate;
if (_frameCnt < 1)
{
_recAvgFactor = tmpRecAvgFactor;
}
_frameCnt++;
return;
}
}

View File

@@ -0,0 +1,77 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
#include "typedefs.h"
namespace webrtc
{
struct VideoContentMetrics;
// QM interval time
enum { kQmMinIntervalMs = 10000 };
enum { kCcMinIntervalMs = 5000 };
//Flag for NFD metric vs motion metric
enum { kNfdMetric = 1 };
/**********************************/
/* Content Metrics Processing */
/**********************************/
class VCMContentMetricsProcessing
{
public:
VCMContentMetricsProcessing();
~VCMContentMetricsProcessing();
// Update class with latest metrics
WebRtc_Word32 UpdateContentData(const VideoContentMetrics *contentMetrics);
// Check for content change detection
bool ContentChangeCheck();
//Initialize to
WebRtc_Word32 Reset();
// Inform class of current frame rate
void UpdateFrameRate(WebRtc_UWord32 frameRate);
// Get working (avg) value
VideoContentMetrics* Data();
private:
// Compute working avg
WebRtc_UWord32 ProcessContent(const VideoContentMetrics *contentMetrics);
// Computation of global metric
void UpdateGlobalMetric(const VideoContentMetrics *contentMetrics);
// Compute local average of certain metrics for content change detection
void UpdateLocalMetricCC(float motionVal, float spatialVal);
VideoContentMetrics* _globalRecursiveAvg;
WebRtc_UWord32 _frameRate;
float _recAvgFactor;
WebRtc_UWord32 _frameCnt;
float _prevAvgSizeZeroMotion;
float _avgSizeZeroMotion;
float _prevAvgSpatialPredErr;
float _avgSpatialPredErr;
WebRtc_UWord32 _frameCntForCC;
WebRtc_UWord64 _lastCCpdateTime;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_

View File

@@ -0,0 +1,201 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "encoded_frame.h"
#include "generic_encoder.h"
#include "jitter_buffer_common.h"
#include "video_coding_defines.h"
namespace webrtc {
VCMEncodedFrame::VCMEncodedFrame()
:
webrtc::EncodedImage(),
_renderTimeMs(-1),
_payloadType(0),
_missingFrame(false),
_codecSpecificInfo(NULL),
_codecSpecificInfoLength(0),
_codec(kVideoCodecUnknown)
{
}
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
:
webrtc::EncodedImage(rhs),
_renderTimeMs(-1),
_payloadType(0),
_missingFrame(false),
_codecSpecificInfo(NULL),
_codecSpecificInfoLength(0),
_codec(kVideoCodecUnknown)
{
_buffer = NULL;
_size = NULL;
_length = NULL;
if (rhs._buffer != NULL)
{
VerifyAndAllocate(rhs._length);
memcpy(_buffer, rhs._buffer, rhs._length);
}
}
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
:
webrtc::EncodedImage(rhs),
_renderTimeMs(rhs._renderTimeMs),
_payloadType(rhs._payloadType),
_missingFrame(rhs._missingFrame),
_codecSpecificInfo(NULL),
_codecSpecificInfoLength(0),
_codec(rhs._codec)
{
_buffer = NULL;
_size = NULL;
_length = NULL;
if (rhs._buffer != NULL)
{
VerifyAndAllocate(rhs._size);
memcpy(_buffer, rhs._buffer, rhs._length);
}
}
VCMEncodedFrame::~VCMEncodedFrame()
{
Free();
}
void VCMEncodedFrame::Free()
{
Reset();
if (_buffer != NULL)
{
delete [] _buffer;
_buffer = NULL;
}
}
void VCMEncodedFrame::Reset()
{
_renderTimeMs = -1;
_timeStamp = 0;
_payloadType = 0;
_codecSpecificInfo = NULL;
_codecSpecificInfoLength = 0;
_frameType = kDeltaFrame;
_encodedWidth = 0;
_encodedHeight = 0;
_completeFrame = false;
_missingFrame = false;
_length = 0;
_codec = kVideoCodecUnknown;
}
WebRtc_Word32
VCMEncodedFrame::Store(VCMFrameStorageCallback& storeCallback) const
{
EncodedVideoData frameToStore;
frameToStore.codec = _codec;
if (_buffer != NULL)
{
frameToStore.VerifyAndAllocate(_length);
memcpy(frameToStore.payloadData, _buffer, _length);
frameToStore.payloadSize = _length;
}
frameToStore.completeFrame = _completeFrame;
frameToStore.encodedWidth = _encodedWidth;
frameToStore.encodedHeight = _encodedHeight;
frameToStore.frameType = ConvertFrameType(_frameType);
frameToStore.missingFrame = _missingFrame;
frameToStore.payloadType = _payloadType;
frameToStore.renderTimeMs = _renderTimeMs;
frameToStore.timeStamp = _timeStamp;
storeCallback.StoreReceivedFrame(frameToStore);
return VCM_OK;
}
WebRtc_Word32
VCMEncodedFrame::VerifyAndAllocate(const WebRtc_UWord32 minimumSize)
{
if(minimumSize > _size)
{
// create buffer of sufficient size
WebRtc_UWord8* newBuffer = new WebRtc_UWord8[minimumSize];
if (newBuffer == NULL)
{
return -1;
}
if(_buffer)
{
// copy old data
memcpy(newBuffer, _buffer, _size);
delete [] _buffer;
}
_buffer = newBuffer;
_size = minimumSize;
}
return 0;
}
webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
{
switch(frameType)
{
case kKeyFrame:
{
return kVideoFrameKey;
}
case kDeltaFrame:
{
return kVideoFrameDelta;
}
case kGoldenFrame:
{
return kVideoFrameGolden;
}
case kAltRefFrame:
{
return kVideoFrameAltRef;
}
default:
{
return kVideoFrameDelta;
}
}
}
VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frameType)
{
switch (frameType)
{
case kVideoFrameKey:
{
return kKeyFrame;
}
case kVideoFrameDelta:
{
return kDeltaFrame;
}
case kVideoFrameGolden:
{
return kGoldenFrame;
}
case kVideoFrameAltRef:
{
return kAltRefFrame;
}
default:
{
return kDeltaFrame;
}
}
}
}

View File

@@ -0,0 +1,112 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
#include "module_common_types.h"
#include "common_types.h"
#include "video_coding_defines.h"
#include "video_image.h"
namespace webrtc
{
class VCMEncodedFrame : protected EncodedImage
{
public:
VCMEncodedFrame();
VCMEncodedFrame(const webrtc::EncodedImage& rhs);
VCMEncodedFrame(const VCMEncodedFrame& rhs);
~VCMEncodedFrame();
/**
* Delete VideoFrame and resets members to zero
*/
void Free();
/**
* Set render time in milliseconds
*/
void SetRenderTime(const WebRtc_Word64 renderTimeMs) {_renderTimeMs = renderTimeMs;}
/**
* Set the encoded frame size
*/
void SetEncodedSize(WebRtc_UWord32 width, WebRtc_UWord32 height)
{ _encodedWidth = width; _encodedHeight = height; }
/**
* Get the encoded image
*/
const webrtc::EncodedImage& EncodedImage() const
{ return static_cast<const webrtc::EncodedImage&>(*this); }
/**
* Get pointer to frame buffer
*/
const WebRtc_UWord8* Buffer() const {return _buffer;}
/**
* Get frame length
*/
WebRtc_UWord32 Length() const {return _length;}
/**
* Get frame timestamp (90kHz)
*/
WebRtc_UWord32 TimeStamp() const {return _timeStamp;}
/**
* Get render time in milliseconds
*/
WebRtc_Word64 RenderTimeMs() const {return _renderTimeMs;}
/**
* Get frame type
*/
webrtc::FrameType FrameType() const {return ConvertFrameType(_frameType);}
/**
* True if this frame is complete, false otherwise
*/
bool Complete() const { return _completeFrame; }
/**
* True if there's a frame missing before this frame
*/
bool MissingFrame() const { return _missingFrame; }
/**
* Payload type of the encoded payload
*/
WebRtc_UWord8 PayloadType() const { return _payloadType; }
/**
* Get codec specific info
*/
const void* CodecSpecificInfo() const {return _codecSpecificInfo;}
WebRtc_Word32 Store(VCMFrameStorageCallback& storeCallback) const;
static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
static VideoFrameType ConvertFrameType(webrtc::FrameType frameType);
protected:
/**
* Verifies that current allocated buffer size is larger than or equal to the input size.
* If the current buffer size is smaller, a new allocation is made and the old buffer data
* is copied to the new buffer.
* Buffer size is updated to minimumSize.
*/
WebRtc_Word32 VerifyAndAllocate(const WebRtc_UWord32 minimumSize);
void Reset();
WebRtc_Word64 _renderTimeMs;
WebRtc_UWord8 _payloadType;
bool _missingFrame;
void* _codecSpecificInfo;
WebRtc_UWord32 _codecSpecificInfoLength;
webrtc::VideoCodecType _codec;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
#define WEBRTC_MODULES_VIDEO_CODING_EVENT_H_
#include "event_wrapper.h"
namespace webrtc
{
//#define EVENT_DEBUG
class VCMEvent : public EventWrapper
{
public:
VCMEvent() : _event(*EventWrapper::Create()) {};
virtual ~VCMEvent() { delete &_event; };
/**
* Release waiting threads
*/
bool Set() { return _event.Set(); };
bool Reset() { return _event.Reset(); };
/**
* Wait for this event
*/
EventTypeWrapper Wait(unsigned long maxTime)
{
#ifdef EVENT_DEBUG
return kEventTimeout;
#else
return _event.Wait(maxTime);
#endif
};
/**
* Start a timer
*/
bool StartTimer(bool periodic, unsigned long time)
{ return _event.StartTimer(periodic, time); };
/**
* Stop the timer
*/
bool StopTimer() { return _event.StopTimer(); };
private:
EventWrapper& _event;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_EVENT_H_

View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "exp_filter.h"
#include <math.h>
namespace webrtc {
void
VCMExpFilter::Reset(float alpha)
{
_alpha = alpha;
_filtered = -1.0;
}
float
VCMExpFilter::Apply(float exp, float sample)
{
if (_filtered == -1.0)
{
// Initialize filtered bit rates
_filtered = sample;
}
else if (exp == 1.0)
{
_filtered = _alpha * _filtered + (1 - _alpha) * sample;
}
else
{
float alpha = pow(_alpha, exp);
_filtered = alpha * _filtered + (1 - alpha) * sample;
}
if (_max != -1 && _filtered > _max)
{
_filtered = _max;
}
return _filtered;
}
void
VCMExpFilter::UpdateBase(float alpha)
{
_alpha = alpha;
}
float
VCMExpFilter::Value() const
{
return _filtered;
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
#define WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_
namespace webrtc
{
/**********************/
/* ExpFilter class */
/**********************/
class VCMExpFilter
{
public:
VCMExpFilter(float alpha, float max = -1.0) : _alpha(alpha), _filtered(-1.0), _max(max) {}
// Resets the filter to its initial state, and resets alpha to the given value
//
// Input:
// - alpha : the new value of the filter factor base.
void Reset(float alpha);
// Applies the filter with the given exponent on the provided sample
//
// Input:
// - exp : Exponent T in y(k) = alpha^T * y(k-1) + (1 - alpha^T) * x(k)
// - sample : x(k) in the above filter equation
float Apply(float exp, float sample);
// Return current filtered value: y(k)
//
// Return value : The current filter output
float Value() const;
// Change the filter factor base
//
// Input:
// - alpha : The new filter factor base.
void UpdateBase(float alpha);
private:
float _alpha; // Filter factor base
float _filtered; // Current filter output
const float _max;
}; // end of ExpFilter class
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_EXP_FILTER_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,370 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "../../../../engine_configurations.h"
#include "frame_buffer.h"
#include "packet.h"
#include <cassert>
#include <string.h>
#if defined(_WIN32)
// VS 2005: Don't warn for default initialized arrays. See help for more info.
#pragma warning(disable:4351)
#endif
namespace webrtc {
// Constructor
VCMFrameBuffer::VCMFrameBuffer() :
_state(kStateFree),
_frameCounted(false),
_nackCount(0),
_latestPacketTimeMs(-1)
{
}
// Destructor
VCMFrameBuffer::~VCMFrameBuffer()
{
Reset();
}
VCMFrameBuffer::VCMFrameBuffer(VCMFrameBuffer& rhs)
:
VCMEncodedFrame(rhs),
_state(rhs._state),
_frameCounted(rhs._frameCounted),
_sessionInfo(),
_nackCount(rhs._nackCount),
_latestPacketTimeMs(rhs._latestPacketTimeMs)
{
_sessionInfo = rhs._sessionInfo;
}
webrtc::FrameType
VCMFrameBuffer::FrameType() const
{
return _sessionInfo.FrameType();
}
void
VCMFrameBuffer::SetPreviousFrameLoss()
{
_sessionInfo.SetPreviousFrameLoss();
}
WebRtc_Word32
VCMFrameBuffer::GetLowSeqNum()
{
return _sessionInfo.GetLowSeqNum();
}
// Get highest sequence number for complete sessions
WebRtc_Word32
VCMFrameBuffer::GetHighSeqNumComplete()
{
if (_sessionInfo.IsSessionComplete())
{
return _sessionInfo.GetHighSeqNum();
}
return -1;
}
WebRtc_Word32
VCMFrameBuffer::GetHighSeqNum()
{
return _sessionInfo.GetHighSeqNum();
}
bool
VCMFrameBuffer::IsSessionComplete()
{
return _sessionInfo.IsSessionComplete();
}
// Insert packet
VCMFrameBufferEnum
VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs)
{
if (_state == kStateDecoding)
{
// Do not insert packet
return kIncomplete;
}
// Sanity to check if the frame has been freed. (Too old for example)
if(_state == kStateFree)
{
return kStateError;
}
// is this packet part of this frame
if (TimeStamp() && (TimeStamp() != packet.timestamp))
{
return kTimeStampError;
}
// sanity checks
if (_size + packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0) >
kMaxJBFrameSizeBytes)
{
return kSizeError;
}
if (NULL == packet.dataPtr && packet.sizeBytes > 0)
{
return kSizeError;
}
if(!_sessionInfo.HaveStartSeqNumber())
{
_sessionInfo.SetStartSeqNumber(packet.seqNum);
}
if (packet.dataPtr != NULL)
{
_payloadType = packet.payloadType;
}
if (kStateEmpty == _state)
{
// This is the first packet inserted into this frame,
// store some info and set some initial values.
_timeStamp = packet.timestamp;
_codec = packet.codec;
SetState(kStateIncomplete);
}
WebRtc_UWord32 requiredSizeBytes = Length() + packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0);
if (requiredSizeBytes >= _size)
{
const WebRtc_UWord32 increments = requiredSizeBytes / kBufferIncStepSizeBytes +
(requiredSizeBytes % kBufferIncStepSizeBytes > 0);
const WebRtc_UWord32 newSize = _size + increments * kBufferIncStepSizeBytes;
if (newSize > kMaxJBFrameSizeBytes)
{
return kSizeError;
}
if (VerifyAndAllocate(newSize) == -1)
{
return kSizeError;
}
}
WebRtc_Word64 retVal = _sessionInfo.InsertPacket(packet, _buffer);
if(retVal == -1)
{
return kSizeError;
}
else if (retVal == -2)
{
return kDuplicatePacket;
}
// update length
_length = Length() + static_cast<WebRtc_UWord32>(retVal);
_latestPacketTimeMs = timeInMs;
if(_sessionInfo.IsSessionComplete())
{
return kCompleteSession;
}
else
{
// this layer is not complete
if (_state == kStateComplete)
{
// we already have a complete layer
// wait for all independent layers belonging to the same frame
_state = kStateIncomplete;
}
}
return kIncomplete;
}
WebRtc_Word64 VCMFrameBuffer::LatestPacketTimeMs()
{
return _latestPacketTimeMs;
}
// Zero out all entries in list up to and including the (first) entry equal to _lowSeqNum
WebRtc_Word32 VCMFrameBuffer::ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num)
{
if(_sessionInfo.ZeroOutSeqNum(list, num) != 0)
{
return -1;
}
return 0;
}
void VCMFrameBuffer::IncrementNackCount()
{
_nackCount++;
}
WebRtc_Word16 VCMFrameBuffer::GetNackCount() const
{
return _nackCount;
}
bool VCMFrameBuffer::HaveLastPacket()
{
return _sessionInfo.HaveLastPacket();
}
bool
VCMFrameBuffer::ForceSetHaveLastPacket()
{
_sessionInfo.ForceSetHaveLastPacket();
return _sessionInfo.IsSessionComplete();
}
void VCMFrameBuffer::Reset()
{
_length = 0;
_timeStamp = 0;
_sessionInfo.Reset();
_frameCounted = false;
_payloadType = 0;
_nackCount = 0;
_latestPacketTimeMs = -1;
_state = kStateFree;
VCMEncodedFrame::Reset();
}
// Makes sure the session contain a decodable stream.
void
VCMFrameBuffer::MakeSessionDecodable()
{
WebRtc_Word32 retVal = _sessionInfo.MakeSessionDecodable(_buffer);
// update length
_length -= retVal;
}
// Set state of frame
void
VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state)
{
if(_state == state)
{
return;
}
switch (state)
{
case kStateFree:
// Reset everything
// We can go to this state from all other states.
// The one setting the state to free must ensure
// that the frame is removed from the timestamp
// ordered frame list in the jb.
Reset();
break;
case kStateIncomplete:
// we can go to this state from state kStateEmpty
assert(_state == kStateEmpty ||
_state == kStateDecoding);
// Do nothing, we received a packet
break;
case kStateComplete:
assert(_state == kStateEmpty ||
_state == kStateIncomplete);
break;
case kStateEmpty:
assert(_state == kStateFree);
// Do nothing
break;
case kStateDecoding:
// we can go to this state from state kStateComplete kStateIncomplete
assert(_state == kStateComplete || _state == kStateIncomplete);
// Transfer frame information to EncodedFrame and create any codec specific information
RestructureFrameInformation();
break;
default:
// Should never happen
assert(!"FrameBuffer::SetState Incorrect frame buffer state as input");
return;
}
_state = state;
}
void
VCMFrameBuffer::RestructureFrameInformation()
{
PrepareForDecode();
_frameType = ConvertFrameType(_sessionInfo.FrameType());
_completeFrame = _sessionInfo.IsSessionComplete();
_missingFrame = _sessionInfo.PreviousFrameLoss();
}
WebRtc_Word32
VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage)
{
_frameType = ConvertFrameType(frameFromStorage.frameType);
_timeStamp = frameFromStorage.timeStamp;
_payloadType = frameFromStorage.payloadType;
_encodedWidth = frameFromStorage.encodedWidth;
_encodedHeight = frameFromStorage.encodedHeight;
_missingFrame = frameFromStorage.missingFrame;
_completeFrame = frameFromStorage.completeFrame;
_renderTimeMs = frameFromStorage.renderTimeMs;
_codec = frameFromStorage.codec;
if (VerifyAndAllocate(frameFromStorage.payloadSize) < 0)
{
return VCM_MEMORY;
}
memcpy(_buffer, frameFromStorage.payloadData, frameFromStorage.payloadSize);
_length = frameFromStorage.payloadSize;
return VCM_OK;
}
// Set counted status (as counted by JB or not)
void VCMFrameBuffer::SetCountedFrame(bool frameCounted)
{
_frameCounted = frameCounted;
}
bool VCMFrameBuffer::GetCountedFrame()
{
return _frameCounted;
}
// Get current state of frame
VCMFrameBufferStateEnum
VCMFrameBuffer::GetState() const
{
return _state;
}
// Get current state of frame
VCMFrameBufferStateEnum
VCMFrameBuffer::GetState(WebRtc_UWord32& timeStamp) const
{
timeStamp = TimeStamp();
return GetState();
}
bool
VCMFrameBuffer::IsRetransmitted()
{
return _sessionInfo.IsRetransmitted();
}
void
VCMFrameBuffer::PrepareForDecode()
{
_length = _sessionInfo.PrepareForDecode(_buffer, _codec);
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
#include "typedefs.h"
#include "module_common_types.h"
#include "encoded_frame.h"
#include "frame_list.h"
#include "jitter_buffer_common.h"
#include "session_info.h"
namespace webrtc
{
class VCMFrameBuffer : public VCMEncodedFrame
{
public:
VCMFrameBuffer();
virtual ~VCMFrameBuffer();
VCMFrameBuffer(VCMFrameBuffer& rhs);
virtual void Reset();
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs);
// State
// Get current state of frame
VCMFrameBufferStateEnum GetState() const;
// Get current state and timestamp of frame
VCMFrameBufferStateEnum GetState(WebRtc_UWord32& timeStamp) const;
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
bool IsRetransmitted();
bool IsSessionComplete();
bool HaveLastPacket();
bool ForceSetHaveLastPacket();
// Makes sure the session contain a decodable stream.
void MakeSessionDecodable();
// Sequence numbers
// Get lowest packet sequence number in frame
WebRtc_Word32 GetLowSeqNum();
// Get highest packet sequence number in frame
WebRtc_Word32 GetHighSeqNum();
// Get highest sequence number of complete session
WebRtc_Word32 GetHighSeqNumComplete();
// Set counted status (as counted by JB or not)
void SetCountedFrame(bool frameCounted);
bool GetCountedFrame();
// NACK
// Zero out all entries in list up to and including the entry equal to _lowSeqNum
WebRtc_Word32 ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num);
void IncrementNackCount();
WebRtc_Word16 GetNackCount() const;
WebRtc_Word64 LatestPacketTimeMs();
webrtc::FrameType FrameType() const;
void SetPreviousFrameLoss();
WebRtc_Word32 ExtractFromStorage(const EncodedVideoData& frameFromStorage);
protected:
void RestructureFrameInformation();
void PrepareForDecode();
private:
VCMFrameBufferStateEnum _state; // Current state of the frame
bool _frameCounted; // If this frame has been counted by JB
VCMSessionInfo _sessionInfo;
WebRtc_UWord16 _nackCount;
WebRtc_Word64 _latestPacketTimeMs;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_

View File

@@ -0,0 +1,331 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "frame_dropper.h"
#include "internal_defines.h"
#include "trace.h"
namespace webrtc
{
VCMFrameDropper::VCMFrameDropper(WebRtc_Word32 vcmId)
:
_vcmId(vcmId),
_keyFrameSizeAvgKbits(0.9f),
_keyFrameRatio(0.99f),
_dropRatio(0.9f, 0.96f)
{
Reset();
}
void
VCMFrameDropper::Reset()
{
_keyFrameRatio.Reset(0.99f);
_keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
_keyFrameSizeAvgKbits.Reset(0.9f);
_keyFrameCount = 0;
_accumulator = 0.0f;
_accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
_targetBitRate = 300.0f;
_userFrameRate = 30;
_keyFrameSpreadFrames = 0.5f * _userFrameRate;
_dropNext = false;
_dropRatio.Reset(0.9f);
_dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
_dropCount = 0;
_windowSize = 0.5f;
_wasBelowMax = true;
_enabled = true;
_fastMode = false; // start with normal (non-aggressive) mode
}
void
VCMFrameDropper::Enable(bool enable)
{
_enabled = enable;
}
void
VCMFrameDropper::Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame)
{
if (!_enabled)
{
return;
}
float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
{
_keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
_keyFrameRatio.Apply(1.0, 1.0);
if (frameSizeKbits > _keyFrameSizeAvgKbits.Value())
{
// Remove the average key frame size since we
// compensate for key frames when adding delta
// frames.
frameSizeKbits -= _keyFrameSizeAvgKbits.Value();
}
else
{
// Shouldn't be negative, so zero is the lower bound.
frameSizeKbits = 0;
}
if (_keyFrameRatio.Value() > 1e-5 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
{
// We are sending key frames more often than our upper bound for
// how much we allow the key frame compensation to be spread
// out in time. Therefor we must use the key frame ratio rather
// than keyFrameSpreadFrames.
_keyFrameCount = static_cast<WebRtc_Word32>(1 / _keyFrameRatio.Value() + 0.5);
}
else
{
// Compensate for the key frame the following frames
_keyFrameCount = static_cast<WebRtc_Word32>(_keyFrameSpreadFrames + 0.5);
}
}
else
{
// Decrease the keyFrameRatio
_keyFrameRatio.Apply(1.0, 0.0);
}
// Change the level of the accumulator (bucket)
_accumulator += frameSizeKbits;
}
void
VCMFrameDropper::Leak(WebRtc_UWord32 inputFrameRate)
{
if (!_enabled)
{
return;
}
if (inputFrameRate < 1)
{
return;
}
if (_targetBitRate < 0.0f)
{
return;
}
_keyFrameSpreadFrames = 0.5f * inputFrameRate;
// T is the expected bits per frame (target). If all frames were the same size,
// we would get T bits per frame. Notice that T is also weighted to be able to
// force a lower frame rate if wanted.
float T = _targetBitRate / inputFrameRate;
if (_keyFrameCount > 0)
{
// Perform the key frame compensation
if (_keyFrameRatio.Value() > 0 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
{
T -= _keyFrameSizeAvgKbits.Value() * _keyFrameRatio.Value();
}
else
{
T -= _keyFrameSizeAvgKbits.Value() / _keyFrameSpreadFrames;
}
_keyFrameCount--;
}
_accumulator -= T;
UpdateRatio();
}
void
VCMFrameDropper::UpdateNack(WebRtc_UWord32 nackBytes)
{
if (!_enabled)
{
return;
}
_accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
}
void
VCMFrameDropper::FillBucket(float inKbits, float outKbits)
{
_accumulator += (inKbits - outKbits);
}
void
VCMFrameDropper::UpdateRatio()
{
if (_accumulator > 1.3f * _accumulatorMax)
{
// Too far above accumulator max, react faster
_dropRatio.UpdateBase(0.8f);
}
else
{
// Go back to normal reaction
_dropRatio.UpdateBase(0.9f);
}
if (_accumulator > _accumulatorMax)
{
// We are above accumulator max, and should ideally
// drop a frame. Increase the dropRatio and drop
// the frame later.
if (_wasBelowMax)
{
_dropNext = true;
}
if (_fastMode)
{
// always drop in aggressive mode
_dropNext = true;
}
_dropRatio.Apply(1.0f, 1.0f);
_dropRatio.UpdateBase(0.9f);
}
else
{
_dropRatio.Apply(1.0f, 0.0f);
}
if (_accumulator < 0.0f)
{
_accumulator = 0.0f;
}
_wasBelowMax = _accumulator < _accumulatorMax;
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId), "FrameDropper: dropRatio = %f accumulator = %f, accumulatorMax = %f", _dropRatio.Value(), _accumulator, _accumulatorMax);
}
// This function signals when to drop frames to the caller. It makes use of the dropRatio
// to smooth out the drops over time.
bool
VCMFrameDropper::DropFrame()
{
if (!_enabled)
{
return false;
}
if (_dropNext)
{
_dropNext = false;
_dropCount = 0;
}
if (_dropRatio.Value() >= 0.5f) // Drops per keep
{
// limit is the number of frames we should drop between each kept frame
// to keep our drop ratio. limit is positive in this case.
float denom = 1.0f - _dropRatio.Value();
if (denom < 1e-5)
{
denom = (float)1e-5;
}
WebRtc_Word32 limit = static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
if (_dropCount < 0)
{
// Reset the _dropCount since it was negative and should be positive.
if (_dropRatio.Value() > 0.4f)
{
_dropCount = -_dropCount;
}
else
{
_dropCount = 0;
}
}
if (_dropCount < limit)
{
// As long we are below the limit we should drop frames.
_dropCount++;
return true;
}
else
{
// Only when we reset _dropCount a frame should be kept.
_dropCount = 0;
return false;
}
}
else if (_dropRatio.Value() > 0.0f && _dropRatio.Value() < 0.5f) // Keeps per drop
{
// limit is the number of frames we should keep between each drop
// in order to keep the drop ratio. limit is negative in this case,
// and the _dropCount is also negative.
float denom = _dropRatio.Value();
if (denom < 1e-5)
{
denom = (float)1e-5;
}
WebRtc_Word32 limit = -static_cast<WebRtc_Word32>(1.0f / denom - 1.0f + 0.5f);
if (_dropCount > 0)
{
// Reset the _dropCount since we have a positive
// _dropCount, and it should be negative.
if (_dropRatio.Value() < 0.6f)
{
_dropCount = -_dropCount;
}
else
{
_dropCount = 0;
}
}
if (_dropCount > limit)
{
if (_dropCount == 0)
{
// Drop frames when we reset _dropCount.
_dropCount--;
return true;
}
else
{
// Keep frames as long as we haven't reached limit.
_dropCount--;
return false;
}
}
else
{
_dropCount = 0;
return false;
}
}
_dropCount = 0;
return false;
// A simpler version, unfiltered and quicker
//bool dropNext = _dropNext;
//_dropNext = false;
//return dropNext;
}
void
VCMFrameDropper::SetRates(float bitRate, float userFrameRate)
{
// Bit rate of -1 means infinite bandwidth.
_accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
{
// Rescale the accumulator level if the accumulator max decreases
_accumulator = bitRate / _targetBitRate * _accumulator;
}
_targetBitRate = bitRate;
if (userFrameRate > 0.0f)
{
_userFrameRate = userFrameRate;
}
}
float
VCMFrameDropper::ActualFrameRate(WebRtc_UWord32 inputFrameRate) const
{
if (!_enabled)
{
return static_cast<float>(inputFrameRate);
}
return inputFrameRate * (1.0f - _dropRatio.Value());
}
}

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_
#include "exp_filter.h"
#include "typedefs.h"
namespace webrtc
{
/******************************/
/* VCMFrameDropper class */
/****************************/
// The Frame Dropper implements a variant of the leaky bucket algorithm
// for keeping track of when to drop frames to avoid bit rate
// over use when the encoder can't keep its bit rate.
class VCMFrameDropper
{
public:
VCMFrameDropper(WebRtc_Word32 vcmId = 0);
// Resets the FrameDropper to its initial state.
// This means that the frameRateWeight is set to its
// default value as well.
void Reset();
void Enable(bool enable);
// Answers the question if it's time to drop a frame
// if we want to reach a given frame rate. Must be
// called for every frame.
//
// Return value : True if we should drop the current frame
bool DropFrame();
// Updates the FrameDropper with the size of the latest encoded
// frame. The FrameDropper calculates a new drop ratio (can be
// seen as the probability to drop a frame) and updates its
// internal statistics.
//
// Input:
// - frameSizeBytes : The size of the latest frame
// returned from the encoder.
// - deltaFrame : True if the encoder returned
// a key frame.
void Fill(WebRtc_UWord32 frameSizeBytes, bool deltaFrame);
void Leak(WebRtc_UWord32 inputFrameRate);
void UpdateNack(WebRtc_UWord32 nackBytes);
// Sets the target bit rate and the frame rate produced by
// the camera.
//
// Input:
// - bitRate : The target bit rate
void SetRates(float bitRate, float userFrameRate);
// Return value : The current average frame rate produced
// if the DropFrame() function is used as
// instruction of when to drop frames.
float ActualFrameRate(WebRtc_UWord32 inputFrameRate) const;
private:
void FillBucket(float inKbits, float outKbits);
void UpdateRatio();
WebRtc_Word32 _vcmId;
VCMExpFilter _keyFrameSizeAvgKbits;
VCMExpFilter _keyFrameRatio;
float _keyFrameSpreadFrames;
WebRtc_Word32 _keyFrameCount;
float _accumulator;
float _accumulatorMax;
float _targetBitRate;
bool _dropNext;
VCMExpFilter _dropRatio;
WebRtc_Word32 _dropCount;
float _windowSize;
float _userFrameRate;
bool _wasBelowMax;
bool _enabled;
bool _fastMode;
}; // end of VCMFrameDropper class
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_DROPPER_H_

View File

@@ -0,0 +1,113 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "frame_list.h"
#include "frame_buffer.h"
#include "jitter_buffer.h"
#include <cstdlib>
namespace webrtc {
VCMFrameListTimestampOrderAsc::~VCMFrameListTimestampOrderAsc()
{
Flush();
}
void
VCMFrameListTimestampOrderAsc::Flush()
{
while(Erase(First()) != -1) { }
}
// Inserts frame in timestamp order, with the oldest timestamp first. Takes wrap arounds into account
WebRtc_Word32
VCMFrameListTimestampOrderAsc::Insert(VCMFrameBuffer* frame)
{
VCMFrameListItem* item = static_cast<VCMFrameListItem*>(First());
VCMFrameListItem* newItem = new VCMFrameListItem(frame);
bool inserted = false;
if (newItem == NULL)
{
return -1;
}
while (item != NULL)
{
const WebRtc_UWord32 itemTimestamp = item->GetItem()->TimeStamp();
if (VCMJitterBuffer::LatestTimestamp(itemTimestamp, frame->TimeStamp()) == itemTimestamp)
{
if (InsertBefore(item, newItem) < 0)
{
delete newItem;
return -1;
}
inserted = true;
break;
}
item = Next(item);
}
if (!inserted && ListWrapper::Insert(ListWrapper::Last(), newItem) < 0)
{
delete newItem;
return -1;
}
return 0;
}
VCMFrameBuffer*
VCMFrameListTimestampOrderAsc::FirstFrame() const
{
VCMFrameListItem* item = First();
if (item != NULL)
{
return item->GetItem();
}
return NULL;
}
VCMFrameListItem*
VCMFrameListTimestampOrderAsc::FindFrameListItem(FindFrameCriteria criteria,
const void* compareWith,
VCMFrameListItem* startItem) const
{
if (startItem == NULL)
{
startItem = First();
}
if (criteria == NULL)
{
return NULL;
}
while (startItem != NULL)
{
if (criteria(startItem->GetItem(), compareWith))
{
return startItem;
}
startItem = Next(startItem);
}
// No frame found
return NULL;
}
VCMFrameBuffer*
VCMFrameListTimestampOrderAsc::FindFrame(FindFrameCriteria criteria,
const void* compareWith,
VCMFrameListItem* startItem) const
{
const VCMFrameListItem* frameListItem = FindFrameListItem(criteria, compareWith, startItem);
if (frameListItem == NULL)
{
return NULL;
}
return frameListItem->GetItem();
}
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_
#define WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_
#include "list_wrapper.h"
#include "typedefs.h"
#include <stdlib.h>
namespace webrtc
{
class VCMFrameBuffer;
typedef bool (*FindFrameCriteria)(VCMFrameBuffer*, const void*);
class VCMFrameListItem : public ListItem
{
friend class VCMFrameListTimestampOrderAsc;
public:
VCMFrameListItem(const VCMFrameBuffer* ptr) : ListItem(ptr) {}
~VCMFrameListItem() {};
VCMFrameBuffer* GetItem() const
{ return static_cast<VCMFrameBuffer*>(ListItem::GetItem()); }
};
class VCMFrameListTimestampOrderAsc : public ListWrapper
{
public:
VCMFrameListTimestampOrderAsc() : ListWrapper() {};
~VCMFrameListTimestampOrderAsc();
void Flush();
// Inserts frame in timestamp order, with the oldest timestamp first.
// Takes wrap arounds into account.
WebRtc_Word32 Insert(VCMFrameBuffer* frame);
VCMFrameBuffer* FirstFrame() const;
VCMFrameListItem* Next(VCMFrameListItem* item) const
{ return static_cast<VCMFrameListItem*>(ListWrapper::Next(item)); }
VCMFrameListItem* Previous(VCMFrameListItem* item) const
{ return static_cast<VCMFrameListItem*>(ListWrapper::Previous(item)); }
VCMFrameListItem* First() const
{ return static_cast<VCMFrameListItem*>(ListWrapper::First()); }
VCMFrameListItem* Last() const
{ return static_cast<VCMFrameListItem*>(ListWrapper::Last()); }
VCMFrameListItem* FindFrameListItem(FindFrameCriteria criteria,
const void* compareWith = NULL,
VCMFrameListItem* startItem = NULL) const;
VCMFrameBuffer* FindFrame(FindFrameCriteria criteria,
const void* compareWith = NULL,
VCMFrameListItem* startItem = NULL) const;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_LIST_H_

View File

@@ -0,0 +1,203 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video_coding.h"
#include "trace.h"
#include "generic_decoder.h"
#include "internal_defines.h"
#include "tick_time.h"
namespace webrtc {
VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing)
:
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_receiveCallback(NULL),
_timing(timing),
_timestampMap(kDecoderFrameMemoryLength)
{
}
VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
{
delete &_critSect;
}
void VCMDecodedFrameCallback::SetUserReceiveCallback(VCMReceiveCallback* receiveCallback)
{
CriticalSectionScoped cs(_critSect);
_receiveCallback = receiveCallback;
}
WebRtc_Word32 VCMDecodedFrameCallback::Decoded(RawImage& decodedImage)
{
CriticalSectionScoped cs(_critSect);
VCMFrameInformation* frameInfo = static_cast<VCMFrameInformation*>(_timestampMap.Pop(decodedImage._timeStamp));
if (frameInfo == NULL)
{
return WEBRTC_VIDEO_CODEC_ERROR;
}
WebRtc_Word32 ret = _timing.StopDecodeTimer(decodedImage._timeStamp, frameInfo->decodeStartTimeMs, VCMTickTime::MillisecondTimestamp());
if (_receiveCallback != NULL)
{
_frame.Swap(decodedImage._buffer, decodedImage._length, decodedImage._size);
_frame.SetWidth(decodedImage._width);
_frame.SetHeight(decodedImage._height);
_frame.SetTimeStamp(decodedImage._timeStamp);
_frame.SetRenderTime(frameInfo->renderTimeMs);
// Convert raw image to video frame
WebRtc_Word32 callbackReturn = _receiveCallback->FrameToRender(_frame);
if (callbackReturn < 0)
{
return callbackReturn;
}
}
if (ret < 0)
{
return ret;
}
return WEBRTC_VIDEO_CODEC_OK;
}
WebRtc_Word32
VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId)
{
CriticalSectionScoped cs(_critSect);
if (_receiveCallback != NULL)
{
return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
}
return -1;
}
WebRtc_Word32
VCMDecodedFrameCallback::ReceivedDecodedFrame(const WebRtc_UWord64 pictureId)
{
_lastReceivedPictureID = pictureId;
return 0;
}
WebRtc_UWord64 VCMDecodedFrameCallback::LastReceivedPictureID() const
{
return _lastReceivedPictureID;
}
WebRtc_Word32 VCMDecodedFrameCallback::Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo)
{
CriticalSectionScoped cs(_critSect);
return _timestampMap.Add(timestamp, frameInfo);
}
WebRtc_Word32 VCMDecodedFrameCallback::Pop(WebRtc_UWord32 timestamp)
{
CriticalSectionScoped cs(_critSect);
if (_timestampMap.Pop(timestamp) == NULL)
{
return VCM_GENERAL_ERROR;
}
return VCM_OK;
}
VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id, bool isExternal)
:
_id(id),
_callback(NULL),
_frameInfos(),
_nextFrameInfoIdx(0),
_decoder(decoder),
_codecType(kVideoCodecUnknown),
_isExternal(isExternal),
_requireKeyFrame(false),
_keyFrameDecoded(false)
{
}
VCMGenericDecoder::~VCMGenericDecoder()
{
}
WebRtc_Word32 VCMGenericDecoder::InitDecode(const VideoCodec* settings, WebRtc_Word32 numberOfCores, bool requireKeyFrame)
{
_requireKeyFrame = requireKeyFrame;
_keyFrameDecoded = false;
_codecType = settings->codecType;
return _decoder.InitDecode(settings, numberOfCores);
}
WebRtc_Word32 VCMGenericDecoder::Decode(const VCMEncodedFrame& frame)
{
if (_requireKeyFrame &&
!_keyFrameDecoded &&
frame.FrameType() != kVideoFrameKey &&
frame.FrameType() != kVideoFrameGolden)
{
// Require key frame is enabled, meaning that one key frame must be decoded
// before we can decode delta frames.
return VCM_CODEC_ERROR;
}
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = VCMTickTime::MillisecondTimestamp();
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_id),
"Decoding timestamp %u",
frame.TimeStamp());
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
WebRtc_Word32 ret = _decoder.Decode(frame.EncodedImage(),
frame.MissingFrame(),
frame.CodecSpecificInfo(),
frame.RenderTimeMs());
if (ret < WEBRTC_VIDEO_CODEC_OK)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_id), "Decoder error: %d\n", ret);
_callback->Pop(frame.TimeStamp());
return ret;
}
// Update the key frame decoded variable so that we know whether or not we've decoded a key frame since reset.
_keyFrameDecoded = (frame.FrameType() == kVideoFrameKey || frame.FrameType() == kVideoFrameGolden);
return ret;
}
WebRtc_Word32
VCMGenericDecoder::Release()
{
_keyFrameDecoded = false;
return _decoder.Release();
}
WebRtc_Word32 VCMGenericDecoder::Reset()
{
_keyFrameDecoded = false;
return _decoder.Reset();
}
WebRtc_Word32 VCMGenericDecoder::SetCodecConfigParameters(const WebRtc_UWord8* buffer, WebRtc_Word32 size)
{
return _decoder.SetCodecConfigParameters(buffer, size);
}
WebRtc_Word32 VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
{
_callback = callback;
return _decoder.RegisterDecodeCompleteCallback(callback);
}
bool VCMGenericDecoder::External() const
{
return _isExternal;
}
}

View File

@@ -0,0 +1,120 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
#include "timing.h"
#include "timestamp_map.h"
#include "video_codec_interface.h"
#include "encoded_frame.h"
#include "module_common_types.h"
namespace webrtc
{
class VCMReceiveCallback;
enum { kDecoderFrameMemoryLength = 10 };
struct VCMFrameInformation
{
WebRtc_Word64 renderTimeMs;
WebRtc_Word64 decodeStartTimeMs;
void* userData;
};
class VCMDecodedFrameCallback : public DecodedImageCallback
{
public:
VCMDecodedFrameCallback(VCMTiming& timing);
virtual ~VCMDecodedFrameCallback();
void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
virtual WebRtc_Word32 Decoded(RawImage& decodedImage);
virtual WebRtc_Word32 ReceivedDecodedReferenceFrame(const WebRtc_UWord64 pictureId);
virtual WebRtc_Word32 ReceivedDecodedFrame(const WebRtc_UWord64 pictureId);
WebRtc_UWord64 LastReceivedPictureID() const;
WebRtc_Word32 Map(WebRtc_UWord32 timestamp, VCMFrameInformation* frameInfo);
WebRtc_Word32 Pop(WebRtc_UWord32 timestamp);
private:
CriticalSectionWrapper& _critSect;
VideoFrame _frame;
VCMReceiveCallback* _receiveCallback;
VCMTiming& _timing;
VCMTimestampMap _timestampMap;
WebRtc_UWord64 _lastReceivedPictureID;
};
class VCMGenericDecoder
{
friend class VCMCodecDataBase;
public:
VCMGenericDecoder(VideoDecoder& decoder, WebRtc_Word32 id = 0, bool isExternal = false);
~VCMGenericDecoder();
/**
* Initialize the decoder with the information from the VideoCodec
*/
WebRtc_Word32 InitDecode(const VideoCodec* settings,
WebRtc_Word32 numberOfCores,
bool requireKeyFrame);
/**
* Decode to a raw I420 frame,
*
* inputVideoBuffer reference to encoded video frame
*/
WebRtc_Word32 Decode(const VCMEncodedFrame& inputFrame);
/**
* Free the decoder memory
*/
WebRtc_Word32 Release();
/**
* Reset the decoder state, prepare for a new call
*/
WebRtc_Word32 Reset();
/**
* Codec configuration data sent out-of-band, i.e. in SIP call setup
*
* buffer pointer to the configuration data
* size the size of the configuration data in bytes
*/
WebRtc_Word32 SetCodecConfigParameters(const WebRtc_UWord8* /*buffer*/,
WebRtc_Word32 /*size*/);
WebRtc_Word32 RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
bool External() const;
protected:
WebRtc_Word32 _id;
VCMDecodedFrameCallback* _callback;
VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
WebRtc_UWord32 _nextFrameInfoIdx;
VideoDecoder& _decoder;
VideoCodecType _codecType;
bool _isExternal;
bool _requireKeyFrame;
bool _keyFrameDecoded;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_

View File

@@ -0,0 +1,229 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "encoded_frame.h"
#include "generic_encoder.h"
#include "media_optimization.h"
#include "../../../../engine_configurations.h"
namespace webrtc {
//#define DEBUG_ENCODER_BIT_STREAM
VCMGenericEncoder::VCMGenericEncoder(VideoEncoder& encoder, bool internalSource /*= false*/)
:
_encoder(encoder),
_codecType(kVideoCodecUnknown),
_VCMencodedFrameCallback(NULL),
_bitRate(0),
_frameRate(0),
_internalSource(false)
{
}
VCMGenericEncoder::~VCMGenericEncoder()
{
}
WebRtc_Word32
VCMGenericEncoder::Reset()
{
_bitRate = 0;
_frameRate = 0;
_VCMencodedFrameCallback = NULL;
return _encoder.Reset();
}
WebRtc_Word32 VCMGenericEncoder::Release()
{
_bitRate = 0;
_frameRate = 0;
_VCMencodedFrameCallback = NULL;
return _encoder.Release();
}
WebRtc_Word32
VCMGenericEncoder::InitEncode(const VideoCodec* settings, WebRtc_Word32 numberOfCores, WebRtc_UWord32 maxPayloadSize)
{
_bitRate = settings->startBitrate;
_frameRate = settings->maxFramerate;
_codecType = settings->codecType;
if (_VCMencodedFrameCallback != NULL)
{
_VCMencodedFrameCallback->SetCodecType(_codecType);
}
return _encoder.InitEncode(settings, numberOfCores, maxPayloadSize);
}
WebRtc_Word32
VCMGenericEncoder::Encode(const VideoFrame& inputFrame, const void* codecSpecificInfo, FrameType frameType)
{
RawImage rawImage(inputFrame.Buffer(), inputFrame.Length(), inputFrame.Size());
rawImage._width = inputFrame.Width();
rawImage._height = inputFrame.Height();
rawImage._timeStamp = inputFrame.TimeStamp();
WebRtc_Word32 ret = _encoder.Encode(rawImage, codecSpecificInfo, VCMEncodedFrame::ConvertFrameType(frameType));
return ret;
}
WebRtc_Word32
VCMGenericEncoder::SetPacketLoss(WebRtc_Word32 packetLoss)
{
return _encoder.SetPacketLoss(packetLoss);
}
WebRtc_Word32
VCMGenericEncoder::SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate)
{
WebRtc_Word32 ret = _encoder.SetRates(newBitRate, frameRate);
if (ret < 0)
{
return ret;
}
_bitRate = newBitRate;
_frameRate = frameRate;
return VCM_OK;
}
WebRtc_Word32
VCMGenericEncoder::CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size)
{
WebRtc_Word32 ret = _encoder.CodecConfigParameters(buffer, size);
if (ret < 0)
{
return ret;
}
return ret;
}
WebRtc_UWord32 VCMGenericEncoder::BitRate() const
{
return _bitRate;
}
WebRtc_UWord32 VCMGenericEncoder::FrameRate() const
{
return _frameRate;
}
WebRtc_Word32
VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
{
return _encoder.SetPeriodicKeyFrames(enable);
}
WebRtc_Word32
VCMGenericEncoder::RequestFrame(FrameType frameType)
{
RawImage image;
return _encoder.Encode(image, NULL, VCMEncodedFrame::ConvertFrameType(frameType));
}
WebRtc_Word32
VCMGenericEncoder::RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback)
{
_VCMencodedFrameCallback = VCMencodedFrameCallback;
_VCMencodedFrameCallback->SetCodecType(_codecType);
_VCMencodedFrameCallback->SetInternalSource(_internalSource);
return _encoder.RegisterEncodeCompleteCallback(_VCMencodedFrameCallback);
}
bool
VCMGenericEncoder::InternalSource() const
{
return _internalSource;
}
/***************************
* Callback Implementation
***************************/
VCMEncodedFrameCallback::VCMEncodedFrameCallback():
_sendCallback(),
_encodedBytes(0),
_payloadType(0),
_bitStreamAfterEncoder(NULL)
{
#ifdef DEBUG_ENCODER_BIT_STREAM
_bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
#endif
}
VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
{
#ifdef DEBUG_ENCODER_BIT_STREAM
fclose(_bitStreamAfterEncoder);
#endif
}
WebRtc_Word32
VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
{
_sendCallback = transport;
return VCM_OK;
}
WebRtc_Word32
VCMEncodedFrameCallback::Encoded(EncodedImage &encodedImage, const void* codecSpecificInfo,
const RTPFragmentationHeader* fragmentationHeader)
{
FrameType frameType = VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
WebRtc_UWord32 encodedBytes = 0;
if (_sendCallback != NULL)
{
encodedBytes = encodedImage._length;
if (_bitStreamAfterEncoder != NULL)
{
fwrite(encodedImage._buffer, 1, encodedImage._length, _bitStreamAfterEncoder);
}
WebRtc_Word32 callbackReturn = _sendCallback->SendData(frameType,
_payloadType,
encodedImage._timeStamp,
encodedImage._buffer,
encodedBytes,
*fragmentationHeader);
if (callbackReturn < 0)
{
return callbackReturn;
}
}
else
{
return VCM_UNINITIALIZED;
}
_encodedBytes = encodedBytes;
_mediaOpt->UpdateWithEncodedData(_encodedBytes, frameType);
if (_internalSource)
{
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame
}
return VCM_OK;
}
WebRtc_UWord32
VCMEncodedFrameCallback::EncodedBytes()
{
return _encodedBytes;
}
void
VCMEncodedFrameCallback::SetMediaOpt(VCMMediaOptimization *mediaOpt)
{
_mediaOpt = mediaOpt;
}
}

View File

@@ -0,0 +1,139 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#include "video_codec_interface.h"
#include <stdio.h>
namespace webrtc
{
class VCMMediaOptimization;
/*************************************/
/* VCMEncodeFrameCallback class */
/***********************************/
class VCMEncodedFrameCallback : public EncodedImageCallback
{
public:
VCMEncodedFrameCallback();
virtual ~VCMEncodedFrameCallback();
/*
* Callback implementation - codec encode complete
*/
WebRtc_Word32 Encoded(EncodedImage& encodedImage, const void* codecSpecificInfo = NULL,
const RTPFragmentationHeader* fragmentationHeader = NULL);
/*
* Get number of encoded bytes
*/
WebRtc_UWord32 EncodedBytes();
/*
* Callback implementation - generic encoder encode complete
*/
WebRtc_Word32 SetTransportCallback(VCMPacketizationCallback* transport);
/**
* Set media Optimization
*/
void SetMediaOpt (VCMMediaOptimization* mediaOpt);
void SetPayloadType(WebRtc_UWord8 payloadType) { _payloadType = payloadType; };
void SetCodecType(VideoCodecType codecType) {_codecType = codecType;};
void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
private:
VCMPacketizationCallback* _sendCallback;
VCMMediaOptimization* _mediaOpt;
WebRtc_UWord32 _encodedBytes;
WebRtc_UWord8 _payloadType;
VideoCodecType _codecType;
bool _internalSource;
FILE* _bitStreamAfterEncoder;
};// end of VCMEncodeFrameCallback class
/******************************/
/* VCMGenericEncoder class */
/******************************/
class VCMGenericEncoder
{
friend class VCMCodecDataBase;
public:
VCMGenericEncoder(VideoEncoder& encoder, bool internalSource = false);
~VCMGenericEncoder();
/**
* Reset the encoder state, prepare for a new call
*/
WebRtc_Word32 Reset();
/**
* Free encoder memory
*/
WebRtc_Word32 Release();
/**
* Initialize the encoder with the information from the VideoCodec
*/
WebRtc_Word32 InitEncode(const VideoCodec* settings,
WebRtc_Word32 numberOfCores,
WebRtc_UWord32 maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image
* codecSpecificInfo : Specific codec data
* cameraFrameRate : request or information from the remote side
* frameType : The requested frame type to encode
*/
WebRtc_Word32 Encode(const VideoFrame& inputFrame,
const void* codecSpecificInfo,
FrameType frameType);
/**
* Set new target bit rate and frame rate
* Return Value: new bit rate if OK, otherwise <0s
*/
WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRate, WebRtc_UWord32 frameRate);
/**
* Set a new packet loss rate
*/
WebRtc_Word32 SetPacketLoss(WebRtc_Word32 packetLoss);
WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size);
/**
* Register a transport callback which will be called to deliver the encoded buffers
*/
WebRtc_Word32 RegisterEncodeCallback(VCMEncodedFrameCallback* VCMencodedFrameCallback);
/**
* Get encoder bit rate
*/
WebRtc_UWord32 BitRate() const;
/**
* Get encoder frame rate
*/
WebRtc_UWord32 FrameRate() const;
WebRtc_Word32 SetPeriodicKeyFrames(bool enable);
WebRtc_Word32 RequestFrame(FrameType frameType);
bool InternalSource() const;
private:
VideoEncoder& _encoder;
VideoCodecType _codecType;
VCMEncodedFrameCallback* _VCMencodedFrameCallback;
WebRtc_UWord32 _bitRate;
WebRtc_UWord32 _frameRate;
bool _internalSource;
}; // end of VCMGenericEncoder class
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_

View File

@@ -0,0 +1,120 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "inter_frame_delay.h"
#include "tick_time.h"
namespace webrtc {
VCMInterFrameDelay::VCMInterFrameDelay()
{
Reset();
}
// Resets the delay estimate
void
VCMInterFrameDelay::Reset()
{
_zeroWallClock = VCMTickTime::MillisecondTimestamp();
_wrapArounds = 0;
_prevWallClock = 0;
_prevTimestamp = 0;
_dTS = 0;
}
// Calculates the delay of a frame with the given timestamp.
// This method is called when the frame is complete.
bool
VCMInterFrameDelay::CalculateDelay(WebRtc_UWord32 timestamp,
WebRtc_Word64 *delay,
WebRtc_Word64 currentWallClock /* = -1 */)
{
if (currentWallClock <= -1)
{
currentWallClock = VCMTickTime::MillisecondTimestamp();
}
if (_prevWallClock == 0)
{
// First set of data, initialization, wait for next frame
_prevWallClock = currentWallClock;
_prevTimestamp = timestamp;
*delay = 0;
return true;
}
WebRtc_Word32 prevWrapArounds = _wrapArounds;
CheckForWrapArounds(timestamp);
// This will be -1 for backward wrap arounds and +1 for forward wrap arounds
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
// Account for reordering in jitter variance estimate in the future?
// Note that this also captures incomplete frames which are grabbed
// for decoding after a later frame has been complete, i.e. real
// packet losses.
if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
{
*delay = 0;
return false;
}
// Compute the compensated timestamp difference and convert it to ms and
// round it to closest integer.
_dTS = static_cast<WebRtc_Word64>((timestamp + wrapAroundsSincePrev *
(static_cast<WebRtc_Word64>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
// frameDelay is the difference of dT and dTS -- i.e. the difference of
// the wall clock time difference and the timestamp difference between
// two following frames.
*delay = static_cast<WebRtc_Word64>(currentWallClock - _prevWallClock - _dTS);
_prevTimestamp = timestamp;
_prevWallClock = currentWallClock;
return true;
}
// Returns the current difference between incoming timestamps
WebRtc_UWord32 VCMInterFrameDelay::CurrentTimeStampDiffMs() const
{
if (_dTS < 0)
{
return 0;
}
return static_cast<WebRtc_UWord32>(_dTS);
}
// Investigates if the timestamp clock has overflowed since the last timestamp and
// keeps track of the number of wrap arounds since reset.
void
VCMInterFrameDelay::CheckForWrapArounds(WebRtc_UWord32 timestamp)
{
if (timestamp < _prevTimestamp)
{
// This difference will probably be less than -2^31 if we have had a wrap around
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
// it should be positive.
if (static_cast<WebRtc_Word32>(timestamp - _prevTimestamp) > 0)
{
// Forward wrap around
_wrapArounds++;
}
}
// This difference will probably be less than -2^31 if we have had a backward wrap around.
// Since it is cast to a Word32, it should be positive.
else if (static_cast<WebRtc_Word32>(_prevTimestamp - timestamp) > 0)
{
// Backward wrap around
_wrapArounds--;
}
}
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
#include "typedefs.h"
namespace webrtc
{
class VCMInterFrameDelay
{
public:
VCMInterFrameDelay();
// Resets the estimate. Zeros are given as parameters.
void Reset();
// Calculates the delay of a frame with the given timestamp.
// This method is called when the frame is complete.
//
// Input:
// - timestamp : RTP timestamp of a received frame
// - *delay : Pointer to memory where the result should be stored
// - currentWallClock : The current time in milliseconds.
// Should be -1 for normal operation, only used for testing.
// Return value : true if OK, false when reordered timestamps
bool CalculateDelay(WebRtc_UWord32 timestamp,
WebRtc_Word64 *delay,
WebRtc_Word64 currentWallClock = -1);
// Returns the current difference between incoming timestamps
//
// Return value : Wrap-around compensated difference between incoming
// timestamps.
WebRtc_UWord32 CurrentTimeStampDiffMs() const;
private:
// Controls if the RTP timestamp counter has had a wrap around
// between the current and the previously received frame.
//
// Input:
// - timestmap : RTP timestamp of the current frame.
void CheckForWrapArounds(WebRtc_UWord32 timestamp);
WebRtc_Word64 _zeroWallClock; // Local timestamp of the first video packet received
WebRtc_Word32 _wrapArounds; // Number of wrapArounds detected
// The previous timestamp passed to the delay estimate
WebRtc_UWord32 _prevTimestamp;
// The previous wall clock timestamp used by the delay estimate
WebRtc_Word64 _prevWallClock;
// Wrap-around compensated difference between incoming timestamps
WebRtc_Word64 _dTS;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
#include "typedefs.h"
namespace webrtc
{
#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
inline WebRtc_UWord32 MaskWord64ToUWord32(WebRtc_Word64 w64)
{
return static_cast<WebRtc_UWord32>(MASK_32_BITS(w64));
}
#define VCM_MAX(a, b) ((a) > (b)) ? (a) : (b)
#define VCM_MIN(a, b) ((a) < (b)) ? (a) : (b)
#define VCM_DEFAULT_CODEC_WIDTH 352
#define VCM_DEFAULT_CODEC_HEIGHT 288
#define VCM_DEFAULT_FRAME_RATE 30
#define VCM_MIN_BITRATE 30
// Helper macros for creating the static codec list
#define VCM_NO_CODEC_IDX -1
#ifdef VIDEOCODEC_VP8
#define VCM_VP8_IDX VCM_NO_CODEC_IDX + 1
#else
#define VCM_VP8_IDX VCM_NO_CODEC_IDX
#endif
#ifdef VIDEOCODEC_I420
#define VCM_I420_IDX VCM_VP8_IDX + 1
#else
#define VCM_I420_IDX VCM_VP8_IDX
#endif
#define VCM_NUM_VIDEO_CODECS_AVAILABLE VCM_I420_IDX + 1
#define VCM_NO_RECEIVER_ID 0
inline WebRtc_Word32 VCMId(const WebRtc_Word32 vcmId, const WebRtc_Word32 receiverId = 0)
{
return static_cast<WebRtc_Word32>((vcmId << 16) + receiverId);
}
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,221 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
#include "typedefs.h"
#include "critical_section_wrapper.h"
#include "module_common_types.h"
#include "video_coding_defines.h"
#include "inter_frame_delay.h"
#include "event.h"
#include "frame_list.h"
#include "jitter_buffer_common.h"
#include "jitter_estimator.h"
namespace webrtc
{
// forward declarations
class VCMFrameBuffer;
class VCMPacket;
class VCMEncodedFrame;
class VCMJitterSample
{
public:
VCMJitterSample() : timestamp(0), frameSize(0), latestPacketTime(-1) {}
WebRtc_UWord32 timestamp;
WebRtc_UWord32 frameSize;
WebRtc_Word64 latestPacketTime;
};
class VCMJitterBuffer
{
public:
VCMJitterBuffer(WebRtc_Word32 vcmId = -1,
WebRtc_Word32 receiverId = -1,
bool master = true);
virtual ~VCMJitterBuffer();
VCMJitterBuffer& operator=(const VCMJitterBuffer& rhs);
// We need a start and stop to break out of the wait event
// used in GetCompleteFrameForDecoding
void Start();
void Stop();
bool Running() const;
// Empty the Jitter buffer of all its data
void Flush();
// Statistics, Get received key and delta frames
WebRtc_Word32 GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames,
WebRtc_UWord32& receivedKeyFrames) const;
// Statistics, Calculate frame and bit rates
WebRtc_Word32 GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate);
// Wait for the first packet in the next frame to arrive, blocks for <= maxWaitTimeMS ms
WebRtc_Word64 GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS,
FrameType& incomingFrameType,
WebRtc_Word64& renderTimeMs);
// Will the packet sequence be complete if the next frame is grabbed
// for decoding right now? That is, have we lost a frame between the
// last decoded frame and the next, or is the next frame missing one
// or more packets?
bool CompleteSequenceWithNextFrame();
// Wait maxWaitTimeMS for a complete frame to arrive. After timeout NULL is returned.
VCMEncodedFrame* GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS);
// Get a frame for decoding (even an incomplete) without delay.
VCMEncodedFrame* GetFrameForDecoding();
VCMEncodedFrame* GetFrameForDecodingNACK();
// Release frame (when done with decoding)
void ReleaseFrame(VCMEncodedFrame* frame);
// Get frame to use for this timestamp
WebRtc_Word32 GetFrame(const VCMPacket& packet, VCMEncodedFrame*&);
VCMEncodedFrame* GetFrame(const VCMPacket& packet); // deprecated
// Returns the time in ms when the latest packet was insterted into the frame.
// Retransmitted is set to true if any of the packets belonging to the frame
// has been retransmitted.
WebRtc_Word64 LastPacketTime(VCMEncodedFrame* frame, bool& retransmitted) const;
// Insert a packet into a frame
VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame, const VCMPacket& packet);
// Sync
WebRtc_UWord32 GetEstimatedJitterMS();
void UpdateRtt(WebRtc_UWord32 rttMs);
// NACK
void SetNackStatus(bool enable); // Enable/disable nack
bool GetNackStatus(); // Get nack status (enabled/disabled)
// Get list of missing sequence numbers (size in number of elements)
WebRtc_UWord16* GetNackList(WebRtc_UWord16& nackSize, bool& listExtended);
WebRtc_Word64 LastDecodedTimestamp() const;
static WebRtc_UWord32 LatestTimestamp(const WebRtc_UWord32 existingTimestamp,
const WebRtc_UWord32 newTimestamp);
protected:
// Misc help functions
// Recycle (release) frame, used if we didn't receive whole frame
void RecycleFrame(VCMFrameBuffer* frame);
void ReleaseFrameInternal(VCMFrameBuffer* frame);
// Flush and reset the jitter buffer. Call under critical section.
void FlushInternal();
VCMFrameListItem* FindOldestSequenceNum() const;
// Help functions for insert packet
// Get empty frame, creates new (i.e. increases JB size) if necessary
VCMFrameBuffer* GetEmptyFrame();
// Recycle oldest frames up to a key frame, used if JB is completely full
bool RecycleFramesUntilKeyFrame();
// Update frame state (set as complete or reconstructable if conditions are met)
void UpdateFrameState(VCMFrameBuffer* frameListItem);
// Help functions for getting a frame
// Find oldest complete frame, used for getting next frame to decode
VCMFrameListItem* FindOldestCompleteContinuousFrame();
// Check if a frame is missing the markerbit but is complete
bool CheckForCompleteFrame(VCMFrameListItem* oldestFrameItem);
void CleanUpOldFrames();
void CleanUpSizeZeroFrames();
void VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame);
bool IsPacketRetransmitted(const VCMPacket& packet) const;
void UpdateJitterAndDelayEstimates(VCMJitterSample& sample, bool incompleteFrame);
void UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame, bool incompleteFrame);
void UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs,
WebRtc_UWord32 timestamp,
WebRtc_UWord32 frameSize,
bool incompleteFrame);
void UpdateOldJitterSample(const VCMPacket& packet);
WebRtc_UWord32 GetEstimatedJitterMsInternal();
// NACK help
WebRtc_UWord16* CreateNackList(WebRtc_UWord16& nackSize, bool& listExtended);
WebRtc_Word32 GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum,
WebRtc_Word32& highSeqNum) const;
void UpdateLastDecodedWithFiller(const VCMPacket& packet);
private:
static bool FrameEqualTimestamp(VCMFrameBuffer* frame, const void* timestamp);
static bool CompleteKeyFrameCriteria(VCMFrameBuffer* frame, const void* notUsed);
WebRtc_Word32 _vcmId;
WebRtc_Word32 _receiverId;
bool _running; // If we are running (have started) or not
CriticalSectionWrapper& _critSect;
bool _master;
// Event to signal when we have a frame ready for decoder
VCMEvent _frameEvent;
// Event to signal when we have received a packet
VCMEvent _packetEvent;
WebRtc_Word32 _maxNumberOfFrames; // Number of allocated frames
// Array of pointers to the frames in JB
VCMFrameBuffer* _frameBuffers[kMaxNumberOfFrames];
VCMFrameListTimestampOrderAsc _frameBuffersTSOrder;
// timing
// Sequence number of last frame that was given to decoder
WebRtc_Word32 _lastDecodedSeqNum;
// Timestamp of last frame that was given to decoder
WebRtc_Word64 _lastDecodedTimeStamp;
// Statistics
// Frame counter for each type (key, delta, golden, key-delta)
WebRtc_UWord8 _receiveStatistics[4];
// Latest calculated frame rates of incoming stream
WebRtc_UWord8 _incomingFrameRate;
WebRtc_UWord32 _incomingFrameCount; // Frame counter, reset in GetUpdate
// Real time for last _frameCount reset
WebRtc_Word64 _timeLastIncomingFrameCount;
WebRtc_UWord32 _incomingBitCount; // Received bits counter, reset in GetUpdate
WebRtc_UWord32 _incomingBitRate;
WebRtc_UWord32 _dropCount; // Frame drop counter
// Number of frames in a row that have been too old
WebRtc_UWord32 _numConsecutiveOldFrames;
// Number of packets in a row that have been too old
WebRtc_UWord32 _numConsecutiveOldPackets;
// Filters for estimating jitter
VCMJitterEstimator _jitterEstimate;
// Calculates network delays used for jitter calculations
VCMInterFrameDelay _delayEstimate;
VCMJitterSample _waitingForCompletion;
// NACK
bool _usingNACK; // If we are using nack
// Holds the internal nack list (the missing seqence numbers)
WebRtc_Word32 _NACKSeqNumInternal[kNackHistoryLength];
WebRtc_UWord16 _NACKSeqNum[kNackHistoryLength];
WebRtc_UWord32 _NACKSeqNumLength;
bool _missingMarkerBits;
bool _firstPacket;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
namespace webrtc
{
enum { kMaxNumberOfFrames = 100 };
enum { kStartNumberOfFrames = 6 }; // in packets, 6 packets are approximately 198 ms,
// we need at least one more for process
enum { kMaxVideoDelayMs = 2000 }; // in ms
enum VCMJitterBufferEnum
{
kMaxConsecutiveOldFrames = 60,
kMaxConsecutiveOldPackets = 300,
kMaxPacketsInJitterBuffer = 800,
kBufferIncStepSizeBytes = 30000, // >20 packets
kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte
};
enum VCMFrameBufferEnum
{
kStateError = -4,
kTimeStampError = -2,
kSizeError = -1,
kNoError = 0,
kIncomplete = 1, // Frame incomplete
kFirstPacket = 2,
kCompleteSession = 3, // at least one layer in the frame complete
kDuplicatePacket = 5 // We're receiving a duplicate packet.
};
enum VCMFrameBufferStateEnum
{
kStateFree, // Unused frame in the JB
kStateEmpty, // frame popped by the RTP receiver
kStateIncomplete, // frame that have one or more packet(s) stored
kStateComplete, // frame that have all packets
kStateDecoding // frame popped by the decoding thread
};
enum { kH264StartCodeLengthBytes = 4};
// Used to indicate if a received packet contain a complete NALU (or equivalent)
enum VCMNaluCompleteness
{
kNaluUnset=0, //Packet has not been filled.
kNaluComplete=1, //Packet can be decoded as is.
kNaluStart, // Packet contain beginning of NALU
kNaluIncomplete, //Packet is not beginning or end of NALU
kNaluEnd // Packet is the end of a NALU
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_

View File

@@ -0,0 +1,439 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "trace.h"
#include "internal_defines.h"
#include "jitter_estimator.h"
#include "rtt_filter.h"
#include "tick_time.h"
#include <math.h>
#include <stdlib.h>
#include <string.h>
namespace webrtc {
VCMJitterEstimator::VCMJitterEstimator(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId) :
_vcmId(vcmId),
_receiverId(receiverId),
_phi(0.97),
_psi(0.9999),
_alphaCountMax(400),
_beta(0.9994),
_thetaLow(0.000001),
_nackLimit(3), // This should be 1 if the old
// retransmition estimate is used.
_nackWindowMS(15000),
_numStdDevDelayOutlier(15),
_numStdDevFrameSizeOutlier(3),
_noiseStdDevs(2.33), // ~Less than 1% chance
// (look up in normal distribution table)...
_noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
_rttFilter(vcmId, receiverId)
{
Reset();
}
VCMJitterEstimator&
VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
{
if (this != &rhs)
{
memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
_vcmId = rhs._vcmId;
_receiverId = rhs._receiverId;
_avgFrameSize = rhs._avgFrameSize;
_varFrameSize = rhs._varFrameSize;
_maxFrameSize = rhs._maxFrameSize;
_fsSum = rhs._fsSum;
_fsCount = rhs._fsCount;
_lastUpdateT = rhs._lastUpdateT;
_prevEstimate = rhs._prevEstimate;
_prevFrameSize = rhs._prevFrameSize;
_avgNoise = rhs._avgNoise;
_alphaCount = rhs._alphaCount;
_filterJitterEstimate = rhs._filterJitterEstimate;
_startupCount = rhs._startupCount;
_latestNackTimestamp = rhs._latestNackTimestamp;
_nackCount = rhs._nackCount;
_rttFilter = rhs._rttFilter;
}
return *this;
}
// Resets the JitterEstimate
void
VCMJitterEstimator::Reset()
{
_theta[0] = 1/(512e3/8);
_theta[1] = 0;
_varNoise = 4.0;
_thetaCov[0][0] = 1e-4;
_thetaCov[1][1] = 1e2;
_thetaCov[0][1] = _thetaCov[1][0] = 0;
_Qcov[0][0] = 2.5e-10;
_Qcov[1][1] = 1e-10;
_Qcov[0][1] = _Qcov[1][0] = 0;
_avgFrameSize = 500;
_maxFrameSize = 500;
_varFrameSize = 100;
_lastUpdateT = -1;
_prevEstimate = -1.0;
_prevFrameSize = 0;
_avgNoise = 0.0;
_alphaCount = 1;
_filterJitterEstimate = 0.0;
_latestNackTimestamp = 0;
_nackCount = 0;
_fsSum = 0;
_fsCount = 0;
_startupCount = 0;
_rttFilter.Reset();
}
void
VCMJitterEstimator::ResetNackCount()
{
_nackCount = 0;
}
// Updates the estimates with the new measurements
void
VCMJitterEstimator::UpdateEstimate(WebRtc_Word64 frameDelayMS, WebRtc_UWord32 frameSizeBytes,
bool incompleteFrame /* = false */)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
VCMId(_vcmId, _receiverId),
"Jitter estimate updated with: frameSize=%d frameDelayMS=%d",
frameSizeBytes, frameDelayMS);
if (frameSizeBytes == 0)
{
return;
}
int deltaFS = frameSizeBytes - _prevFrameSize;
if (_fsCount < kFsAccuStartupSamples)
{
_fsSum += frameSizeBytes;
_fsCount++;
}
else if (_fsCount == kFsAccuStartupSamples)
{
// Give the frame size filter
_avgFrameSize = static_cast<double>(_fsSum) /
static_cast<double>(_fsCount);
_fsCount++;
}
if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
{
double avgFrameSize = _phi * _avgFrameSize +
(1 - _phi) * frameSizeBytes;
if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
{
// Only update the average frame size if this sample wasn't a
// key frame
_avgFrameSize = avgFrameSize;
}
// Update the variance anyway since we want to capture cases where we only get
// key frames.
_varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
(frameSizeBytes - avgFrameSize) *
(frameSizeBytes - avgFrameSize), 1.0);
}
// Update max frameSize estimate
_maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
if (_prevFrameSize == 0)
{
_prevFrameSize = frameSizeBytes;
return;
}
_prevFrameSize = frameSizeBytes;
// Only update the Kalman filter if the sample is not considered
// an extreme outlier. Even if it is an extreme outlier from a
// delay point of view, if the frame size also is large the
// deviation is probably due to an incorrect line slope.
double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
if (abs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
{
// Update the variance of the deviation from the
// line given by the Kalman filter
EstimateRandomJitter(deviation, incompleteFrame);
// Prevent updating with frames which have been congested by a large
// frame, and therefore arrives almost at the same time as that frame.
// This can occur when we receive a large frame (key frame) which
// has been delayed. The next frame is of normal size (delta frame),
// and thus deltaFS will be << 0. This removes all frame samples
// which arrives after a key frame.
if ((!incompleteFrame || deviation >= 0.0) &&
static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
{
// Update the Kalman filter with the new data
KalmanEstimateChannel(frameDelayMS, deltaFS);
}
}
else
{
int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
}
// Post process the total estimated jitter
if (_startupCount >= kStartupDelaySamples)
{
PostProcessEstimate();
}
else
{
_startupCount++;
}
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Framesize statistics: max=%f average=%f", _maxFrameSize, _avgFrameSize);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"The estimated slope is: theta=(%f, %f)", _theta[0], _theta[1]);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Random jitter: mean=%f variance=%f", _avgNoise, _varNoise);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Current jitter estimate: %f", _filterJitterEstimate);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Current max RTT: %u", _rttFilter.RttMs());
}
// Updates the nack/packet ratio
void
VCMJitterEstimator::UpdateNackEstimate(bool retransmitted, WebRtc_Word64 /*wallClockMS = -1*/)
{
// Simplified since it seems to be hard to be sure if a
// packet actually has been retransmitted or not, resulting
// in a delay which varies up and down with one RTT.
// The solution is to wait until _nackLimit retransmitts
// has been received, then always add an RTT to the estimate.
if (retransmitted && _nackCount < _nackLimit)
{
_nackCount++;
}
//if (wallClockMS == -1)
//{
// wallClockMS = VCMTickTime::MillisecondTimestamp();
//}
//if (retransmitted)
//{
// if (_nackCount < _nackLimit)
// {
// _nackCount++;
// }
// _latestNackTimestamp = wallClockMS;
//}
//else if (_nackCount > 0 && wallClockMS - _latestNackTimestamp > _nackWindowMS)
//{
// _nackCount = 0;
//}
}
// Updates Kalman estimate of the channel
// The caller is expected to sanity check the inputs.
void
VCMJitterEstimator::KalmanEstimateChannel(WebRtc_Word64 frameDelayMS,
WebRtc_Word32 deltaFSBytes)
{
double Mh[2];
double hMh_sigma;
double kalmanGain[2];
double measureRes;
double t00, t01;
// Kalman filtering
// Prediction
// M = M + Q
_thetaCov[0][0] += _Qcov[0][0];
_thetaCov[0][1] += _Qcov[0][1];
_thetaCov[1][0] += _Qcov[1][0];
_thetaCov[1][1] += _Qcov[1][1];
// Kalman gain
// K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
// h = [dFS 1]
// Mh = M*h'
// hMh_sigma = h*M*h' + R
Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
// sigma weights measurements with a small deltaFS as noisy and
// measurements with large deltaFS as good
if (_maxFrameSize < 1.0)
{
return;
}
double sigma = (300.0 * exp(-abs(static_cast<double>(deltaFSBytes)) /
(1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
if (sigma < 1.0)
{
sigma = 1.0;
}
hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
{
assert(false);
return;
}
kalmanGain[0] = Mh[0] / hMh_sigma;
kalmanGain[1] = Mh[1] / hMh_sigma;
// Correction
// theta = theta + K*(dT - h*theta)
measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
_theta[0] += kalmanGain[0] * measureRes;
_theta[1] += kalmanGain[1] * measureRes;
if (_theta[0] < _thetaLow)
{
_theta[0] = _thetaLow;
}
// M = (I - K*h)*M
t00 = _thetaCov[0][0];
t01 = _thetaCov[0][1];
_thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
kalmanGain[0] * _thetaCov[1][0];
_thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
kalmanGain[0] * _thetaCov[1][1];
_thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
kalmanGain[1] * deltaFSBytes * t00;
_thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
kalmanGain[1] * deltaFSBytes * t01;
// Covariance matrix, must be positive semi-definite
assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
_thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
_thetaCov[0][0] >= 0);
}
// Calculate difference in delay between a sample and the
// expected delay estimated by the Kalman filter
double
VCMJitterEstimator::DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
WebRtc_Word32 deltaFSBytes) const
{
return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
}
// Estimates the random jitter by calculating the variance of the
// sample distance from the line given by theta.
void
VCMJitterEstimator::EstimateRandomJitter(double d_dT, bool incompleteFrame)
{
double alpha;
if (_alphaCount == 0)
{
assert(_alphaCount > 0);
return;
}
alpha = static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
_alphaCount++;
if (_alphaCount > _alphaCountMax)
{
_alphaCount = _alphaCountMax;
}
double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
double varNoise = alpha * _varNoise +
(1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
if (!incompleteFrame || varNoise > _varNoise)
{
_avgNoise = avgNoise;
_varNoise = varNoise;
}
if (_varNoise < 1.0)
{
// The variance should never be zero, since we might get
// stuck and consider all samples as outliers.
_varNoise = 1.0;
}
}
double
VCMJitterEstimator::NoiseThreshold() const
{
double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
if (noiseThreshold < 1.0)
{
noiseThreshold = 1.0;
}
return noiseThreshold;
}
// Calculates the current jitter estimate from the filtered estimates
double
VCMJitterEstimator::CalculateEstimate()
{
double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
// A very low estimate (or negative) is neglected
if (ret < 1.0) {
if (_prevEstimate <= 0.01)
{
ret = 1.0;
}
else
{
ret = _prevEstimate;
}
}
if (ret > 10000.0) // Sanity
{
ret = 10000.0;
}
_prevEstimate = ret;
return ret;
}
void
VCMJitterEstimator::PostProcessEstimate()
{
_filterJitterEstimate = CalculateEstimate();
}
void
VCMJitterEstimator::UpdateRtt(WebRtc_UWord32 rttMs)
{
_rttFilter.Update(rttMs);
}
void
VCMJitterEstimator::UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes)
{
if (_maxFrameSize < frameSizeBytes)
{
_maxFrameSize = frameSizeBytes;
}
}
// Returns the current filtered estimate if available,
// otherwise tries to calculate an estimate.
double
VCMJitterEstimator::GetJitterEstimate()
{
double jitterMS = CalculateEstimate();
if (_filterJitterEstimate > jitterMS)
{
jitterMS = _filterJitterEstimate;
}
if (_nackCount >= _nackLimit)
{
return jitterMS + _rttFilter.RttMs();
}
return jitterMS;
}
}

View File

@@ -0,0 +1,158 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
#include "typedefs.h"
#include "rtt_filter.h"
namespace webrtc
{
class VCMJitterEstimator
{
public:
VCMJitterEstimator(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
// Resets the estimate to the initial state
void Reset();
void ResetNackCount();
// Updates the jitter estimate with the new data.
//
// Input:
// - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
// - frameSize : Frame size of the current frame.
// - incompleteFrame : Flags if the frame is used to update the estimate before it
// was complete. Default is false.
void UpdateEstimate(WebRtc_Word64 frameDelayMS,
WebRtc_UWord32 frameSizeBytes,
bool incompleteFrame = false);
// Returns the current jitter estimate in milliseconds and adds
// also adds an RTT dependent term in cases of retransmission.
//
// Return value : Jitter estimate in milliseconds
double GetJitterEstimate();
// Updates the nack counter/timer.
//
// Input:
// - retransmitted : True for a nacked frames, false otherwise
// - wallClockMS : Used for testing
void UpdateNackEstimate(bool retransmitted, WebRtc_Word64 wallClockMS = -1);
// Updates the RTT filter.
//
// Input:
// - rttMs : RTT in ms
void UpdateRtt(WebRtc_UWord32 rttMs);
void UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes);
// A constant describing the delay from the jitter buffer
// to the delay on the receiving side which is not accounted
// for by the jitter buffer nor the decoding delay estimate.
static const WebRtc_UWord32 OPERATING_SYSTEM_JITTER = 10;
protected:
// These are protected for better testing possibilities
double _theta[2]; // Estimated line parameters (slope, offset)
double _varNoise; // Variance of the time-deviation from the line
private:
// Updates the Kalman filter for the line describing
// the frame size dependent jitter.
//
// Input:
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
// - deltaFSBytes : Frame size delta, i.e.
// : frame size at time T minus frame size at time T-1
void KalmanEstimateChannel(WebRtc_Word64 frameDelayMS, WebRtc_Word32 deltaFSBytes);
// Updates the random jitter estimate, i.e. the variance
// of the time deviations from the line given by the Kalman filter.
//
// Input:
// - d_dT : The deviation from the kalman estimate
// - incompleteFrame : True if the frame used to update the estimate
// with was incomplete
void EstimateRandomJitter(double d_dT, bool incompleteFrame);
double NoiseThreshold() const;
// Calculates the current jitter estimate.
//
// Return value : The current jitter estimate in milliseconds
double CalculateEstimate();
// Post process the calculated estimate
void PostProcessEstimate();
// Calculates the difference in delay between a sample and the
// expected delay estimated by the Kalman filter.
//
// Input:
// - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
// - deltaFS : Frame size delta, i.e. frame size at time
// T minus frame size at time T-1
//
// Return value : The difference in milliseconds
double DeviationFromExpectedDelay(WebRtc_Word64 frameDelayMS,
WebRtc_Word32 deltaFSBytes) const;
// Constants, filter parameters
WebRtc_Word32 _vcmId;
WebRtc_Word32 _receiverId;
const double _phi;
const double _psi;
const WebRtc_UWord32 _alphaCountMax;
const double _beta;
const double _thetaLow;
const WebRtc_UWord32 _nackLimit;
const WebRtc_UWord32 _nackWindowMS;
const WebRtc_Word32 _numStdDevDelayOutlier;
const WebRtc_Word32 _numStdDevFrameSizeOutlier;
const double _noiseStdDevs;
const double _noiseStdDevOffset;
double _thetaCov[2][2]; // Estimate covariance
double _Qcov[2][2]; // Process noise covariance
double _avgFrameSize; // Average frame size
double _varFrameSize; // Frame size variance
double _maxFrameSize; // Largest frame size received (descending
// with a factor _psi)
WebRtc_UWord32 _fsSum;
WebRtc_UWord32 _fsCount;
WebRtc_Word64 _lastUpdateT;
double _prevEstimate; // The previously returned jitter estimate
WebRtc_UWord32 _prevFrameSize; // Frame size of the previous frame
double _avgNoise; // Average of the random jitter
WebRtc_UWord32 _alphaCount;
double _filterJitterEstimate; // The filtered sum of jitter estimates
WebRtc_UWord32 _startupCount;
WebRtc_Word64 _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
WebRtc_UWord32 _nackCount; // Keeps track of the number of nacks received,
// but never goes above _nackLimit
VCMRttFilter _rttFilter;
enum { kStartupDelaySamples = 30 };
enum { kFsAccuStartupSamples = 5 };
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_

View File

@@ -0,0 +1,850 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video_coding_defines.h"
#include "fec_tables_xor.h"
#include "er_tables_xor.h"
#include "nack_fec_tables.h"
#include "qm_select_data.h"
#include "media_opt_util.h"
#include <math.h>
#include <float.h>
#include <limits.h>
#include <stdio.h>
namespace webrtc {
bool
VCMProtectionMethod::BetterThan(VCMProtectionMethod *pm)
{
if (pm == NULL)
{
return true;
}
return pm->_score > _score;
}
bool
VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* /*parameters*/)
{
//use FEC model with modification with RTT for now
return true;
}
bool
VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* /*parameters*/)
{
//use FEC model with modification with RTT for now
return true;
}
bool
VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
{
VCMFecMethod fecMethod;
VCMNackMethod nackMethod;
const WebRtc_UWord8 plossMax = 129;
WebRtc_UWord16 rttMax = nackMethod.MaxRttNack();
// We should reduce the NACK threshold for NackFec protection method,
// with FEC and ER, we should only use NACK for small RTT, to avoid delay
//But this parameter change should be shared with RTP and JB
//rttMax = (WebRtc_UWord16) 0.5*rttMax;
//Compute the protection factor
fecMethod.ProtectionFactor(parameters);
//Compute the effective packet loss
fecMethod.EffectivePacketLoss(parameters);
WebRtc_UWord8 protFactorK = fecMethod._protectionFactorK;
WebRtc_UWord8 protFactorD = fecMethod._protectionFactorD;
WebRtc_UWord8 effPacketLoss = fecMethod._effectivePacketLoss;
float resPacketLoss = fecMethod._residualPacketLoss;
WebRtc_Word16 rttIndex= (WebRtc_UWord16) parameters->rtt;
float softnessRtt = 1.0;
if (parameters->rtt < rttMax)
{
softnessRtt = (float)VCMNackFecTable[rttIndex]/(float)4096.0;
//soften ER with NACK on
//table depends on roundtrip time relative to rttMax (NACK Threshold)
_effectivePacketLoss = (WebRtc_UWord8)(effPacketLoss*softnessRtt);
//soften FEC with NACK on
//table depends on roundtrip time relative to rttMax (NACK Threshold)
_protectionFactorK = (WebRtc_UWord8) (protFactorK * softnessRtt);
_protectionFactorD = (WebRtc_UWord8) (protFactorD * softnessRtt);
}
//make sure I frame protection is at least larger than P frame protection, and at least as high as received loss
WebRtc_UWord8 packetLoss = (WebRtc_UWord8)(255* parameters->lossPr);
_protectionFactorK = static_cast<WebRtc_UWord8>(VCM_MAX(packetLoss,VCM_MAX(_scaleProtKey*protFactorD,protFactorK)));
//check limit on amount of protection for I frame: 50% is max
if (_protectionFactorK >= plossMax) _protectionFactorK = plossMax - 1;
//Bit cost for NackFec
// NACK cost: based on residual packet loss (since we should only NACK packet not recovered by FEC)
_efficiency = 0.0f;
if (parameters->rtt < rttMax)
_efficiency = parameters->bitRate * resPacketLoss / (1.0f + resPacketLoss);
//add FEC cost: ignore I frames for now
float fecRate = static_cast<float>(_protectionFactorD) / 255.0f;
if (fecRate >= 0.0f)
_efficiency += parameters->bitRate * fecRate;
_score = _efficiency;
//Protection/fec rates obtained above is defined relative to total number of packets (total rate: source+fec)
//FEC in RTP module assumes protection factor is defined relative to source number of packets
//so we should convert the factor to reduce mismatch between mediaOpt suggested rate and the actual rate
WebRtc_UWord8 codeRate = protFactorK;
_protectionFactorK = fecMethod.ConvertFECRate(codeRate);
codeRate = protFactorD;
_protectionFactorD = fecMethod.ConvertFECRate(codeRate);
return true;
}
bool
VCMNackMethod::EffectivePacketLoss(WebRtc_UWord8 effPacketLoss, WebRtc_UWord16 rttTime)
{
WebRtc_UWord16 rttMax = MaxRttNack();
//For large RTT, we should rely on some Error Resilience, so we set packetLossEnc = 0
//for RTT less than the NACK threshold
if (rttTime < rttMax )
effPacketLoss = 0; //may want a softer transition here
_effectivePacketLoss = effPacketLoss;
return true;
}
bool
VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
{
//Compute the effective packet loss for ER
WebRtc_UWord8 effPacketLoss = (WebRtc_UWord8)(255* parameters->lossPr);
WebRtc_UWord16 rttTime = (WebRtc_UWord16) parameters->rtt;
EffectivePacketLoss(effPacketLoss, rttTime);
//
//Compute the NACK bit cost
_efficiency = parameters->bitRate * parameters->lossPr / (1.0f + parameters->lossPr);
_score = _efficiency;
if (parameters->rtt > _NACK_MAX_RTT)
{
_score = 0.0f;
return false;
}
return true;
}
WebRtc_UWord8
VCMFecMethod::BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta, WebRtc_UWord8 packetFrameKey) const
{
WebRtc_UWord8 boostRateKey = 2;
//default: ratio scales the FEC protection up for I frames
WebRtc_UWord8 ratio = 1;
if (packetFrameDelta > 0)
ratio = (WebRtc_Word8)( packetFrameKey / packetFrameDelta );
ratio = VCM_MAX(boostRateKey, ratio);
return ratio;
}
WebRtc_UWord8
VCMFecMethod::ConvertFECRate(WebRtc_UWord8 codeRateRTP) const
{
return static_cast<WebRtc_UWord8>(VCM_MIN(255,(0.5 + 255.0*codeRateRTP/(float)(255 - codeRateRTP))));
}
//AvgRecoveryFEC: average recovery from FEC, assuming random packet loss model
//Computed offline for a range of FEC code parameters and loss rates
float
VCMFecMethod::AvgRecoveryFEC(const VCMProtectionParameters* parameters) const
{
//Total (avg) bits available per frame: total rate over actual/sent frame rate
//units are kbits/frame
const WebRtc_UWord16 bitRatePerFrame = static_cast<WebRtc_UWord16>(parameters->bitRate/(parameters->frameRate));
//Total (avg) number of packets per frame (source and fec):
const WebRtc_UWord8 avgTotPackets = 1 + (WebRtc_UWord8)((float)bitRatePerFrame*1000.0/(float)(8.0*_maxPayloadSize) + 0.5);
//parameters for tables
const WebRtc_UWord8 codeSize = 24;
const WebRtc_UWord8 plossMax = 129;
const WebRtc_UWord16 maxErTableSize = 38700;
//
//
//Get index for table
const float protectionFactor = (float)_protectionFactorD/(float)255;
WebRtc_UWord8 fecPacketsPerFrame = (WebRtc_UWord8)(0.5 + protectionFactor*avgTotPackets);
WebRtc_UWord8 sourcePacketsPerFrame = avgTotPackets - fecPacketsPerFrame;
if (fecPacketsPerFrame == 0)
{
return 0.0; //no protection, so avg. recov from FEC == 0
}
//table defined up to codeSizexcodeSize code
if (sourcePacketsPerFrame > codeSize)
{
sourcePacketsPerFrame = codeSize;
}
//check: protection factor is maxed at 50%, so this should never happen
if (sourcePacketsPerFrame < 1)
{
assert("average number of source packets below 1\n");
}
//index for ER tables: up to codeSizexcodeSize mask
WebRtc_UWord16 codeIndexTable[codeSize*codeSize];
WebRtc_UWord16 k = -1;
for(WebRtc_UWord8 i=1;i<=codeSize;i++)
{
for(WebRtc_UWord8 j=1;j<=i;j++)
{
k += 1;
codeIndexTable[(j-1)*codeSize + i - 1] = k;
}
}
const WebRtc_UWord8 lossRate = (WebRtc_UWord8) (255.0*parameters->lossPr + 0.5f);
const WebRtc_UWord16 codeIndex = (fecPacketsPerFrame - 1)*codeSize + (sourcePacketsPerFrame - 1);
const WebRtc_UWord16 indexTable = codeIndexTable[codeIndex] * plossMax + lossRate;
const WebRtc_UWord16 codeIndex2 = (fecPacketsPerFrame)*codeSize + (sourcePacketsPerFrame);
WebRtc_UWord16 indexTable2 = codeIndexTable[codeIndex2] * plossMax + lossRate;
//checks on table index
if (indexTable >= maxErTableSize)
{
assert("ER table index too large\n");
}
if (indexTable2 >= maxErTableSize)
{
indexTable2 = indexTable;
}
//
//Get the average effective packet loss recovery from FEC
//this is from tables, computed using random loss model
WebRtc_UWord8 avgFecRecov1 = 0;
WebRtc_UWord8 avgFecRecov2 = 0;
float avgFecRecov = 0;
if (fecPacketsPerFrame > 0)
{
avgFecRecov1 = VCMAvgFECRecoveryXOR[indexTable];
avgFecRecov2 = VCMAvgFECRecoveryXOR[indexTable2];
}
//interpolate over two FEC codes
const float weightRpl = (float)(0.5 + protectionFactor*avgTotPackets) - (float)fecPacketsPerFrame;
avgFecRecov = (float)weightRpl * (float)avgFecRecov2 + (float)(1.0 - weightRpl) * (float)avgFecRecov1;
return avgFecRecov;
}
bool
VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
{
//FEC PROTECTION SETTINGS: varies with packet loss and bitrate
const float bitRate = parameters->bitRate;
WebRtc_UWord8 packetLoss = (WebRtc_UWord8)(255* parameters->lossPr);
//Size of tables
const WebRtc_UWord16 maxFecTableSize = 6450;
//Parameters for range of rate and packet loss for tables
const WebRtc_UWord8 ratePar1 = 5;
const WebRtc_UWord8 ratePar2 = 49;
const WebRtc_UWord8 plossMax = 129;
//
//Just for testing: for the case where we randomly lose slices instead of RTP packets and use SingleMode packetization in RTP module
//const WebRtc_UWord16 slice_size = 3000/6; //corresponds to rate=1000k with 4 cores
//float slice_mtu = (float)_maxPayloadSize/(float)slice_size;
const float slice_mtu = 1.0;
//
//Total (avg) bits available per frame: total rate over actual/sent frame rate
//units are kbits/frame
const WebRtc_UWord16 bitRatePerFrame = static_cast<WebRtc_UWord16>(slice_mtu*bitRate/(parameters->frameRate));
//Total (avg) number of packets per frame (source and fec):
const WebRtc_UWord8 avgTotPackets = 1 + (WebRtc_UWord8)((float)bitRatePerFrame*1000.0/(float)(8.0*_maxPayloadSize) + 0.5);
//TODO(marpan): Tune model for FEC Protection.
//Better modulation of protection with available bits/frame (or avgTotpackets) using weight factors
//FEC Tables include this effect already, but need to tune model off-line
float weight1 = 0.5;
float weight2 = 0.5;
if (avgTotPackets > 4)
{
weight1 = 1.0;
weight2 = 0.;
}
if (avgTotPackets > 6)
{
weight1 = 1.5;
weight2 = 0.;
}
//
//Fec rate parameters: for P and I frame
WebRtc_UWord8 codeRateDelta = 0;
WebRtc_UWord8 codeRateKey = 0;
//Get index for new table: the FEC protection depends on the (avergare) available bits/frame
//the range on the rate index corresponds to rates (bps) from 200k to 8000k, for 30fps
WebRtc_UWord8 rateIndexTable = (WebRtc_UWord8) VCM_MAX(VCM_MIN((bitRatePerFrame-ratePar1)/ratePar1,ratePar2),0);
// Restrict packet loss range to 50 for now%: current tables defined only up to 50%
if (packetLoss >= plossMax)
{
packetLoss = plossMax - 1;
}
WebRtc_UWord16 indexTable = rateIndexTable * plossMax + packetLoss;
//check on table index
if (indexTable >= maxFecTableSize)
{
assert("FEC table index too large\n");
}
//
//For Key frame: effectively at a higher rate, so we scale/boost the rate index.
//the boost factor may depend on several factors: ratio of packet number of I to P frames, how much protection placed on P frames, etc.
//default is 2
const WebRtc_UWord8 packetFrameDelta = (WebRtc_UWord8)(0.5 + parameters->packetsPerFrame);
const WebRtc_UWord8 packetFrameKey = (WebRtc_UWord8) (0.5 + parameters->packetsPerFrameKey);
const WebRtc_UWord8 boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
rateIndexTable = (WebRtc_UWord8) VCM_MAX(VCM_MIN(1+(boostKey*bitRatePerFrame-ratePar1)/ratePar1,ratePar2),0);
WebRtc_UWord16 indexTableKey = rateIndexTable * plossMax + packetLoss;
indexTableKey = VCM_MIN(indexTableKey, maxFecTableSize);
codeRateDelta = VCMCodeRateXORTable[indexTable]; //protection factor for P fra
codeRateKey = VCMCodeRateXORTable[indexTableKey]; //protection factor for I frame
//average with minimum protection level given by (average) total number of packets
if (packetLoss > 0)
{
codeRateDelta = static_cast<WebRtc_UWord8>((weight1*(float)codeRateDelta + weight2*255.0/(float)avgTotPackets));
}
//check limit on amount of protection for P frame; 50% is max
if (codeRateDelta >= plossMax)
{
codeRateDelta = plossMax - 1;
}
//make sure I frame protection is at least larger than P frame protection, and at least as high as received loss
codeRateKey = static_cast<WebRtc_UWord8>(VCM_MAX(packetLoss,VCM_MAX(_scaleProtKey*codeRateDelta, codeRateKey)));
//check limit on amount of protection for I frame: 50% is max
if (codeRateKey >= plossMax)
{
codeRateKey = plossMax - 1;
}
_protectionFactorK = codeRateKey;
_protectionFactorD = codeRateDelta;
// DONE WITH FEC PROTECTION SETTINGS
return true;
}
bool
VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
{
// ER SETTINGS:
//Effective packet loss to encoder is based on RPL (residual packet loss)
//this is a soft setting based on degree of FEC protection
//RPL = received/input packet loss - average_FEC_recovery
//note: received/input packet loss may be filtered according to FilteredLoss
//The input packet loss:
WebRtc_UWord8 effPacketLoss = (WebRtc_UWord8)(255*parameters->lossPr);
float scaleErRS = 0.5;
float scaleErXOR = 0.5;
float minErLevel = (float) 0.025;
//float scaleErRS = 1.0;
//float scaleErXOR = 1.0;
//float minErLevel = (float) 0.0;
float avgFecRecov = 0.;
//Effective packet loss for ER:
float scaleEr = scaleErXOR;
avgFecRecov = AvgRecoveryFEC(parameters);
//Residual Packet Loss:
_residualPacketLoss = (float)(effPacketLoss - avgFecRecov)/(float)255.0;
//Effective Packet Loss for encoder:
_effectivePacketLoss = 0;
if (effPacketLoss > 0)
{
_effectivePacketLoss = VCM_MAX((effPacketLoss - (WebRtc_UWord8)(scaleEr*avgFecRecov)),static_cast<WebRtc_UWord8>(minErLevel*255));
}
// DONE WITH ER SETTING
return true;
}
bool
VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
{
// Compute the protection factor
ProtectionFactor(parameters);
// Compute the effective packet loss
EffectivePacketLoss(parameters);
// Compute the bit cost
// Ignore key frames for now.
float fecRate = static_cast<float>(_protectionFactorD) / 255.0f;
if (fecRate >= 0.0f)
{
// use this formula if the fecRate (protection factor) is defined relative to number of source packets
// this is the case for the previous tables:
// _efficiency = parameters->bitRate * ( 1.0 - 1.0 / (1.0 + fecRate));
// in the new tables, the fecRate is defined relative to total number of packets (total rate),
// so overhead cost is:
_efficiency = parameters->bitRate * fecRate;
}
else
{
_efficiency = 0.0f;
}
_score = _efficiency;
// Protection/fec rates obtained above is defined relative to total number of packets (total rate: source+fec)
// FEC in RTP module assumes protection factor is defined relative to source number of packets
// so we should convert the factor to reduce mismatch between mediaOpt suggested rate and the actual rate
_protectionFactorK = ConvertFECRate(_protectionFactorK);
_protectionFactorD = ConvertFECRate(_protectionFactorD);
return true;
}
bool
VCMIntraReqMethod::UpdateParameters(const VCMProtectionParameters* parameters)
{
float packetRate = parameters->packetsPerFrame * parameters->frameRate;
// Assume that all lost packets cohere to different frames
float lossRate = parameters->lossPr * packetRate;
if (parameters->keyFrameSize <= 1e-3)
{
_score = FLT_MAX;
return false;
}
_efficiency = lossRate * parameters->keyFrameSize;
_score = _efficiency;
if (parameters->lossPr >= 1.0f / parameters->keyFrameSize || parameters->rtt > _IREQ_MAX_RTT)
{
return false;
}
return true;
}
bool
VCMPeriodicIntraMethod::UpdateParameters(const VCMProtectionParameters* /*parameters*/)
{
// Periodic I-frames. The last thing we want to use.
_efficiency = 0.0f;
_score = FLT_MAX;
return true;
}
bool
VCMMbIntraRefreshMethod::UpdateParameters(const VCMProtectionParameters* parameters)
{
// Assume optimal for now.
_efficiency = parameters->bitRate * parameters->lossPr / (1.0f + parameters->lossPr);
_score = _efficiency;
if (parameters->bitRate < _MBREF_MIN_BITRATE)
{
return false;
}
return true;
}
WebRtc_UWord16
VCMNackMethod::MaxRttNack() const
{
return _NACK_MAX_RTT;
}
VCMLossProtectionLogic::~VCMLossProtectionLogic()
{
ClearLossProtections();
}
void
VCMLossProtectionLogic::ClearLossProtections()
{
ListItem *item;
while ((item = _availableMethods.First()) != 0)
{
VCMProtectionMethod *method = static_cast<VCMProtectionMethod*>(item->GetItem());
if (method != NULL)
{
delete method;
}
_availableMethods.PopFront();
}
_selectedMethod = NULL;
}
bool
VCMLossProtectionLogic::AddMethod(VCMProtectionMethod *newMethod)
{
VCMProtectionMethod *method;
ListItem *item;
if (newMethod == NULL)
{
return false;
}
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
{
method = static_cast<VCMProtectionMethod *>(item->GetItem());
if (method != NULL && method->Type() == newMethod->Type())
{
return false;
}
}
_availableMethods.PushBack(newMethod);
return true;
}
bool
VCMLossProtectionLogic::RemoveMethod(VCMProtectionMethodEnum methodType)
{
VCMProtectionMethod *method;
ListItem *item;
bool foundAndRemoved = false;
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
{
method = static_cast<VCMProtectionMethod *>(item->GetItem());
if (method != NULL && method->Type() == methodType)
{
if (_selectedMethod != NULL && _selectedMethod->Type() == method->Type())
{
_selectedMethod = NULL;
}
_availableMethods.Erase(item);
item = NULL;
delete method;
foundAndRemoved = true;
}
}
return foundAndRemoved;
}
VCMProtectionMethod*
VCMLossProtectionLogic::FindMethod(VCMProtectionMethodEnum methodType) const
{
VCMProtectionMethod *method;
ListItem *item;
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
{
method = static_cast<VCMProtectionMethod *>(item->GetItem());
if (method != NULL && method->Type() == methodType)
{
return method;
}
}
return NULL;
}
float
VCMLossProtectionLogic::HighestOverhead() const
{
VCMProtectionMethod *method;
ListItem *item;
float highestOverhead = 0.0f;
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
{
method = static_cast<VCMProtectionMethod *>(item->GetItem());
if (method != NULL && method->RequiredBitRate() > highestOverhead)
{
highestOverhead = method->RequiredBitRate();
}
}
return highestOverhead;
}
void
VCMLossProtectionLogic::UpdateRtt(WebRtc_UWord32 rtt)
{
_rtt = rtt;
}
void
VCMLossProtectionLogic::UpdateResidualPacketLoss(float residualPacketLoss)
{
_residualPacketLoss = residualPacketLoss;
}
void
VCMLossProtectionLogic::UpdateFecType(VCMFecTypes fecType)
{
_fecType = fecType;
}
void
VCMLossProtectionLogic::UpdateLossPr(WebRtc_UWord8 lossPr255)
{
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
UpdateMaxLossHistory(lossPr255, now);
_lossPr255.Apply(static_cast<float>(now - _lastPrUpdateT), static_cast<float>(lossPr255));
_lastPrUpdateT = now;
_lossPr = _lossPr255.Value() / 255.0f;
}
void
VCMLossProtectionLogic::UpdateMaxLossHistory(WebRtc_UWord8 lossPr255, WebRtc_Word64 now)
{
if (_lossPrHistory[0].timeMs >= 0 &&
now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
{
if (lossPr255 > _shortMaxLossPr255)
{
_shortMaxLossPr255 = lossPr255;
}
}
else
{
// Only add a new value to the history once a second
if(_lossPrHistory[0].timeMs == -1)
{
// First, no shift
_shortMaxLossPr255 = lossPr255;
}
else
{
// Shift
for(WebRtc_Word32 i = (kLossPrHistorySize - 2); i >= 0 ; i--)
{
_lossPrHistory[i+1].lossPr255 = _lossPrHistory[i].lossPr255;
_lossPrHistory[i+1].timeMs = _lossPrHistory[i].timeMs;
}
}
if (_shortMaxLossPr255 == 0)
{
_shortMaxLossPr255 = lossPr255;
}
_lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
_lossPrHistory[0].timeMs = now;
_shortMaxLossPr255 = 0;
}
}
WebRtc_UWord8
VCMLossProtectionLogic::MaxFilteredLossPr(WebRtc_Word64 nowMs) const
{
WebRtc_UWord8 maxFound = _shortMaxLossPr255;
if (_lossPrHistory[0].timeMs == -1)
{
return maxFound;
}
for (WebRtc_Word32 i=0; i < kLossPrHistorySize; i++)
{
if (_lossPrHistory[i].timeMs == -1)
{
break;
}
if (nowMs - _lossPrHistory[i].timeMs > kLossPrHistorySize * kLossPrShortFilterWinMs)
{
// This sample (and all samples after this) is too old
break;
}
if (_lossPrHistory[i].lossPr255 > maxFound)
{
// This sample is the largest one this far into the history
maxFound = _lossPrHistory[i].lossPr255;
}
}
return maxFound;
}
WebRtc_UWord8
VCMLossProtectionLogic::FilteredLoss() const
{
//take the average received loss
//return static_cast<WebRtc_UWord8>(_lossPr255.Value() + 0.5f);
//take the windowed max of the received loss
if (_selectedMethod != NULL && _selectedMethod->Type() == kFEC)
{
return MaxFilteredLossPr(static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp()));
}
else
{
return static_cast<WebRtc_UWord8>(_lossPr255.Value() + 0.5);
}
}
void
VCMLossProtectionLogic::UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc)
{
_lossPr = (float)packetLossEnc/(float)255.0;
}
void
VCMLossProtectionLogic::UpdateBitRate(float bitRate)
{
_bitRate = bitRate;
}
void
VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets)
{
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
_packetsPerFrame.Apply(static_cast<float>(now - _lastPacketPerFrameUpdateT), nPackets);
_lastPacketPerFrameUpdateT = now;
}
void
VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets)
{
WebRtc_UWord32 now = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
_packetsPerFrameKey.Apply(static_cast<float>(now - _lastPacketPerFrameUpdateTKey), nPackets);
_lastPacketPerFrameUpdateTKey = now;
}
void
VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
{
_keyFrameSize = keyFrameSize;
}
bool
VCMLossProtectionLogic::UpdateMethod(VCMProtectionMethod *newMethod /*=NULL */)
{
_currentParameters.rtt = _rtt;
_currentParameters.lossPr = _lossPr;
_currentParameters.bitRate = _bitRate;
_currentParameters.frameRate = _frameRate; //should this be named actual frame rate?
_currentParameters.keyFrameSize = _keyFrameSize;
_currentParameters.fecRateDelta = _fecRateDelta;
_currentParameters.fecRateKey = _fecRateKey;
_currentParameters.packetsPerFrame = _packetsPerFrame.Value();
_currentParameters.packetsPerFrameKey = _packetsPerFrameKey.Value();
_currentParameters.residualPacketLoss = _residualPacketLoss;
_currentParameters.fecType = _fecType;
if (newMethod == NULL)
{
//_selectedMethod = _bestNotOkMethod = NULL;
VCMProtectionMethod *method;
ListItem *item;
for (item = _availableMethods.First(); item != NULL; item = _availableMethods.Next(item))
{
method = static_cast<VCMProtectionMethod *>(item->GetItem());
if (method != NULL)
{
if (method->Type() == kFEC)
{
_selectedMethod = method;
}
method->UpdateParameters(&_currentParameters);
}
}
if (_selectedMethod != NULL && _selectedMethod->Type() != kFEC)
{
_selectedMethod = method;
}
}
else
{
_selectedMethod = newMethod;
_selectedMethod->UpdateParameters(&_currentParameters);
}
return true;
}
VCMProtectionMethod*
VCMLossProtectionLogic::SelectedMethod() const
{
return _selectedMethod;
}
void
VCMLossProtectionLogic::Reset()
{
_lastPrUpdateT = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
_lastPacketPerFrameUpdateT = static_cast<WebRtc_UWord32>(VCMTickTime::MillisecondTimestamp());
_lossPr255.Reset(0.9999f);
_packetsPerFrame.Reset(0.9999f);
_fecRateDelta = _fecRateKey = 0;
for (WebRtc_Word32 i=0; i < kLossPrHistorySize; i++)
{
_lossPrHistory[i].lossPr255 = 0;
_lossPrHistory[i].timeMs = -1;
}
_shortMaxLossPr255 = 0;
ClearLossProtections();
}
}

View File

@@ -0,0 +1,372 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
#include "typedefs.h"
#include "list_wrapper.h"
#include "trace.h"
#include "exp_filter.h"
#include "internal_defines.h"
#include "tick_time.h"
#include <cmath>
#include <cstdlib>
namespace webrtc
{
class ListWrapper;
enum { kLossPrHistorySize = 30 }; // 30 time periods
enum { kLossPrShortFilterWinMs = 1000 }; // 1000 ms, total filter length is 30 000 ms
enum VCMFecTypes
{
kXORFec
};
struct VCMProtectionParameters
{
VCMProtectionParameters() : rtt(0), lossPr(0), bitRate(0), packetsPerFrame(0),
frameRate(0), keyFrameSize(0), fecRateDelta(0), fecRateKey(0),
residualPacketLoss(0.0), fecType(kXORFec) {}
WebRtc_UWord32 rtt;
float lossPr;
float bitRate;
float packetsPerFrame;
float packetsPerFrameKey;
float frameRate;
float keyFrameSize;
WebRtc_UWord8 fecRateDelta;
WebRtc_UWord8 fecRateKey;
float residualPacketLoss;
VCMFecTypes fecType;
};
/******************************/
/* VCMProtectionMethod class */
/****************************/
enum VCMProtectionMethodEnum
{
kNACK,
kFEC,
kNackFec,
kIntraRequest, // I-frame request
kPeriodicIntra, // I-frame refresh
kMBIntraRefresh, // Macro block refresh
kNone
};
class VCMLossProbabilitySample
{
public:
VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
WebRtc_UWord8 lossPr255;
WebRtc_Word64 timeMs;
};
class VCMProtectionMethod
{
public:
//friend VCMProtectionMethod;
VCMProtectionMethod(VCMProtectionMethodEnum type) : _protectionFactorK(0),
_protectionFactorD(0), _residualPacketLoss(0.0), _scaleProtKey(2.0),
_maxPayloadSize(1460), _efficiency(0), _score(0), _type(type) {}
virtual ~VCMProtectionMethod() {}
// Updates the efficiency of the method using the parameters provided
//
// Input:
// - parameters : Parameters used to calculate the efficiency
//
// Return value : True if this method is recommended in
// the given conditions.
virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
// Returns the protection type
//
// Return value : The protection type
enum VCMProtectionMethodEnum Type() const { return _type; }
// Evaluates if this protection method is considered
// better than the provided method.
//
// Input:
// - pm : The protection method to compare with
bool BetterThan(VCMProtectionMethod *pm);
// Returns the bit rate required by this protection method
// during these conditions.
//
// Return value : Required bit rate
virtual float RequiredBitRate() { return _efficiency; }
// Returns the effective packet loss for ER, required by this protection method
//
// Return value : Required effective packet loss
virtual WebRtc_UWord8 RequiredPacketLossER() { return _effectivePacketLoss; }
// Extracts the FEC protection factor for Key frame, required by this protection method
//
// Return value : Required protectionFactor for Key frame
virtual WebRtc_UWord8 RequiredProtectionFactorK() { return _protectionFactorK; }
// Extracts the FEC protection factor for Delta frame, required by this protection method
//
// Return value : Required protectionFactor for delta frame
virtual WebRtc_UWord8 RequiredProtectionFactorD() { return _protectionFactorD; }
WebRtc_UWord8 _effectivePacketLoss;
WebRtc_UWord8 _protectionFactorK;
WebRtc_UWord8 _protectionFactorD;
float _residualPacketLoss;
float _scaleProtKey;
WebRtc_Word32 _maxPayloadSize;
protected:
float _efficiency;
float _score;
private:
const enum VCMProtectionMethodEnum _type;
};
class VCMNackMethod : public VCMProtectionMethod
{
public:
VCMNackMethod() : VCMProtectionMethod(kNACK), _NACK_MAX_RTT(200) {}
virtual ~VCMNackMethod() {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
//get the effective packet loss for ER
bool EffectivePacketLoss(WebRtc_UWord8 effPacketLoss, WebRtc_UWord16 rttTime);
//get the threshold for NACK
WebRtc_UWord16 MaxRttNack() const;
private:
const WebRtc_UWord16 _NACK_MAX_RTT;
};
class VCMFecMethod : public VCMProtectionMethod
{
public:
VCMFecMethod() : VCMProtectionMethod(kFEC) {}
virtual ~VCMFecMethod() {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
//get the effective packet loss for ER
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
//get the FEC protection factors
bool ProtectionFactor(const VCMProtectionParameters* parameters);
//get the boost for key frame protection
WebRtc_UWord8 BoostCodeRateKey(WebRtc_UWord8 packetFrameDelta,
WebRtc_UWord8 packetFrameKey) const;
//convert the rates: defined relative to total# packets or source# packets
WebRtc_UWord8 ConvertFECRate(WebRtc_UWord8 codeRate) const;
//get the average effective recovery from FEC: for random loss model
float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
};
class VCMNackFecMethod : public VCMProtectionMethod
{
public:
VCMNackFecMethod() : VCMProtectionMethod(kNackFec) {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
//get the effective packet loss for ER
bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
//get the FEC protection factors
bool ProtectionFactor(const VCMProtectionParameters* parameters);
};
class VCMIntraReqMethod : public VCMProtectionMethod
{
public:
VCMIntraReqMethod() : VCMProtectionMethod(kIntraRequest), _IREQ_MAX_RTT(150) {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
private:
const WebRtc_UWord32 _IREQ_MAX_RTT;
};
class VCMPeriodicIntraMethod : public VCMProtectionMethod
{
public:
VCMPeriodicIntraMethod() : VCMProtectionMethod(kPeriodicIntra) {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
};
class VCMMbIntraRefreshMethod : public VCMProtectionMethod
{
public:
VCMMbIntraRefreshMethod() :
VCMProtectionMethod(kMBIntraRefresh), _MBREF_MIN_BITRATE(150) {}
virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
virtual float RequiredBitRate() { return 0.0; }
private:
const WebRtc_UWord32 _MBREF_MIN_BITRATE;
};
class VCMLossProtectionLogic
{
public:
VCMLossProtectionLogic() : _availableMethods(), _selectedMethod(NULL),
_bestNotOkMethod(NULL), _rtt(0), _lossPr(0.0f), _bitRate(0.0f), _frameRate(0.0f),
_keyFrameSize(0.0f), _fecRateKey(0), _fecRateDelta(0), _lastPrUpdateT(0),
_lossPr255(0.9999f), _lossPrHistory(), _shortMaxLossPr255(0),
_packetsPerFrame(0.9999f), _packetsPerFrameKey(0.9999f), _residualPacketLoss(0),
_boostRateKey(2)
{ Reset(); }
~VCMLossProtectionLogic();
void ClearLossProtections();
bool AddMethod(VCMProtectionMethod *newMethod);
bool RemoveMethod(VCMProtectionMethodEnum methodType);
VCMProtectionMethod* FindMethod(VCMProtectionMethodEnum methodType) const;
float HighestOverhead() const;
// Update the round-trip time
//
// Input:
// - rtt : Round-trip time in seconds.
void UpdateRtt(WebRtc_UWord32 rtt);
// Update residual packet loss
//
// Input:
// - residualPacketLoss : residual packet loss: effective loss after FEC recovery
void UpdateResidualPacketLoss(float residualPacketLoss);
// Update fecType
//
// Input:
// - fecType : kXORFec for generic XOR FEC
void UpdateFecType(VCMFecTypes fecType);
// Update the loss probability.
//
// Input:
// - lossPr255 : The packet loss probability in the interval [0, 255],
// reported by RTCP.
void UpdateLossPr(WebRtc_UWord8 lossPr255);
// Update the filtered packet loss.
//
// Input:
// - packetLossEnc : The reported packet loss filtered (max window or average)
void UpdateFilteredLossPr(WebRtc_UWord8 packetLossEnc);
// Update the current target bit rate.
//
// Input:
// - bitRate : The current target bit rate in kbits/s
void UpdateBitRate(float bitRate);
// Update the number of packets per frame estimate, for delta frames
//
// Input:
// - nPackets : Number of packets used to send the latest frame.
void UpdatePacketsPerFrame(float nPackets);
// Update the number of packets per frame estimate, for key frames
//
// Input:
// - nPackets : Number of packets used to send the latest frame.
void UpdatePacketsPerFrameKey(float nPackets);
// Update the keyFrameSize estimate
//
// Input:
// - keyFrameSize : The size of the latest sent key frame.
void UpdateKeyFrameSize(float keyFrameSize);
// Update the frame rate
//
// Input:
// - frameRate : The current target frame rate.
void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
// The amount of packet loss to cover for with FEC.
//
// Input:
// - fecRateKey : Packet loss to cover for with FEC when sending key frames.
// - fecRateDelta : Packet loss to cover for with FEC when sending delta frames.
void UpdateFECRates(WebRtc_UWord8 fecRateKey, WebRtc_UWord8 fecRateDelta)
{ _fecRateKey = fecRateKey; _fecRateDelta = fecRateDelta; }
// Update the protection methods with the current VCMProtectionParameters and
// choose the best method available. The update involves computing the robustness settings
// for the protection method.
//
// Input:
// - newMethod : If not NULL, this is method will be selected by force.
//
// Return value : True if the selected method is recommended using these settings,
// false if it's the best method, but still not recommended to be used.
// E.g. if NACK is the best available, but the RTT is too large, false
// will be returned.
bool UpdateMethod(VCMProtectionMethod *newMethod = NULL);
// Returns the method currently selected.
//
// Return value : The protection method currently selected.
VCMProtectionMethod* SelectedMethod() const;
// Returns the filtered loss probability in the interval [0, 255].
//
// Return value : The filtered loss probability
WebRtc_UWord8 FilteredLoss() const;
// Get constraint on NACK
//
// return value : RTT threshold for using NACK
WebRtc_UWord8 GetNackThreshold() const;
void Reset();
private:
// Sets the available loss protection methods.
void UpdateMaxLossHistory(WebRtc_UWord8 lossPr255, WebRtc_Word64 now);
WebRtc_UWord8 MaxFilteredLossPr(WebRtc_Word64 nowMs) const;
ListWrapper _availableMethods;
VCMProtectionMethod* _selectedMethod;
VCMProtectionMethod* _bestNotOkMethod;
VCMProtectionParameters _currentParameters;
WebRtc_UWord32 _rtt;
float _lossPr;
float _bitRate;
float _frameRate;
float _keyFrameSize;
WebRtc_UWord8 _fecRateKey;
WebRtc_UWord8 _fecRateDelta;
WebRtc_UWord32 _lastPrUpdateT;
WebRtc_UWord32 _lastPacketPerFrameUpdateT;
WebRtc_UWord32 _lastPacketPerFrameUpdateTKey;
VCMExpFilter _lossPr255;
VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
WebRtc_UWord8 _shortMaxLossPr255;
VCMExpFilter _packetsPerFrame;
VCMExpFilter _packetsPerFrameKey;
float _residualPacketLoss;
WebRtc_UWord8 _boostRateKey;
VCMFecTypes _fecType;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_

View File

@@ -0,0 +1,687 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media_optimization.h"
#include "content_metrics_processing.h"
#include "frame_dropper.h"
#include "qm_select.h"
namespace webrtc {
VCMMediaOptimization::VCMMediaOptimization(WebRtc_Word32 id):
_id(id),
_maxBitRate(0),
_sendCodecType(kVideoCodecUnknown),
_codecWidth(0),
_codecHeight(0),
_userFrameRate(0),
_lossProtOverhead(0),
_packetLossEnc(0),
_fractionLost(0),
_sendStatisticsZeroEncode(0),
_maxPayloadSize(1460),
_lastBitRate(0),
_targetBitRate(0),
_enableQm(false),
_videoProtectionCallback(NULL),
_videoQMSettingsCallback(NULL),
_encodedFrameSamples(),
_avgSentBitRateBps(0.0f),
_keyFrameCnt(0),
_deltaFrameCnt(0),
_lastQMUpdateTime(0),
_lastChangeTime(0)
{
memset(_sendStatistics, 0, sizeof(_sendStatistics));
_frameDropper = new VCMFrameDropper(_id);
_lossProtLogic = new VCMLossProtectionLogic();
_content = new VCMContentMetricsProcessing();
_qms = new VCMQmSelect();
}
VCMMediaOptimization::~VCMMediaOptimization(void)
{
_lossProtLogic->ClearLossProtections();
delete _lossProtLogic;
delete _frameDropper;
delete _content;
delete _qms;
}
WebRtc_Word32
VCMMediaOptimization::Reset()
{
_frameDropper->Reset();
_lossProtLogic->Reset();
_frameDropper->SetRates(0, 0);
_content->Reset();
_qms->Reset();
_lossProtLogic->UpdateFrameRate(static_cast<float>(InputFrameRate()));
_lossProtLogic->Reset();
_sendStatisticsZeroEncode = 0;
_lastBitRate = 0;
_targetBitRate = 0;
_lossProtOverhead = 0;
_codecWidth = 0;
_codecHeight = 0;
_userFrameRate = 0;
_keyFrameCnt = 0;
_deltaFrameCnt = 0;
_lastQMUpdateTime = 0;
_lastChangeTime = 0;
for (WebRtc_Word32 i = 0; i < kBitrateMaxFrameSamples; i++)
{
_encodedFrameSamples[i]._sizeBytes = -1;
_encodedFrameSamples[i]._timeCompleteMs = -1;
}
_avgSentBitRateBps = 0.0f;
return VCM_OK;
}
WebRtc_UWord32
VCMMediaOptimization::SetTargetRates(WebRtc_UWord32 bitRate,
WebRtc_UWord8 &fractionLost,
WebRtc_UWord32 roundTripTimeMs)
{
VCMProtectionMethod *selectedMethod = _lossProtLogic->SelectedMethod();
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
_lossProtLogic->UpdateLossPr(fractionLost);
_lossProtLogic->UpdateRtt(roundTripTimeMs);
_lossProtLogic->UpdateResidualPacketLoss(static_cast<float>(fractionLost));
VCMFecTypes fecType = kXORFec; // generic FEC
_lossProtLogic->UpdateFecType(fecType);
//Get frame rate for encoder: this is the actual/sent frame rate
float actualFrameRate = SentFrameRate();
// sanity
if (actualFrameRate < 1.0)
{
actualFrameRate = 1.0;
}
// Update frame rate for the loss protection logic class: frame rate should be the actual/sent rate
_lossProtLogic->UpdateFrameRate(actualFrameRate);
_fractionLost = fractionLost;
// The effective packet loss may be the received loss or filtered, i.e., average or max filter may be used.
//We should think about which filter is appropriate for low/high bit rates, low/high loss rates, etc.
WebRtc_UWord8 packetLossEnc = _lossProtLogic->FilteredLoss();
//For now use the filtered loss for computing the robustness settings
_lossProtLogic->UpdateFilteredLossPr(packetLossEnc);
// Rate cost of the protection methods
_lossProtOverhead = 0;
if(selectedMethod)
{
//Update method will compute the robustness settings for the given protection method and the overhead cost
//the protection method is set by the user via SetVideoProtection.
//The robustness settings are: the effecitve packet loss for ER and the FEC protection settings
_lossProtLogic->UpdateMethod();
//Get the code rate for Key frames
const WebRtc_UWord8 codeRateKeyRTP = selectedMethod->RequiredProtectionFactorK();
//Get the code rate for Delta frames
const WebRtc_UWord8 codeRateDeltaRTP = selectedMethod->RequiredProtectionFactorD();
//Get the effective packet loss for ER
packetLossEnc = selectedMethod->RequiredPacketLossER();
// Get the bit cost of protection method
_lossProtOverhead = static_cast<WebRtc_UWord32>(_lossProtLogic->HighestOverhead() + 0.5f);
//NACK is on for NACK and NackFec protection method: off for FEC method
bool nackStatus = true;
if (selectedMethod->Type() == kFEC)
{
nackStatus = false;
}
if(_videoProtectionCallback)
{
_videoProtectionCallback->ProtectionRequest(codeRateDeltaRTP ,codeRateKeyRTP, nackStatus);
}
}
// Update effective packet loss for encoder: note: fractionLost was passed as reference
fractionLost = packetLossEnc;
WebRtc_UWord32 nackBitRate=0;
if(selectedMethod && _lossProtLogic->FindMethod(kNACK) != NULL)
{
// Make sure we don't over-use the channel momentarily. This is
// necessary for NACK since it can be very bursty.
nackBitRate = (_lastBitRate * fractionLost) / 255;
if (nackBitRate > _targetBitRate)
{
nackBitRate = _targetBitRate;
}
_frameDropper->SetRates(static_cast<float>(bitRate - nackBitRate), 0);
}
else
{
_frameDropper->SetRates(static_cast<float>(bitRate - _lossProtOverhead), 0);
}
//This may be used for UpdateEncoderBitRate: lastBitRate is total rate, before compensation
_lastBitRate = _targetBitRate;
//Source coding rate: total rate - protection overhead
_targetBitRate = bitRate - _lossProtOverhead;
if (_enableQm)
{
//Update QM with rates
_qms->UpdateRates((float)_targetBitRate, _avgSentBitRateBps,_incomingFrameRate);
//Check for QM selection
bool selectQM = checkStatusForQMchange();
if (selectQM)
{
SelectQuality();
}
}
return _targetBitRate;
}
bool
VCMMediaOptimization::DropFrame()
{
_frameDropper->Leak((WebRtc_UWord32)(InputFrameRate() + 0.5f)); // leak appropriate number of bytes
return _frameDropper->DropFrame();
}
WebRtc_Word32
VCMMediaOptimization::SentFrameCount(VCMFrameCount &frameCount) const
{
frameCount.numDeltaFrames = _deltaFrameCnt;
frameCount.numKeyFrames = _keyFrameCnt;
return VCM_OK;
}
WebRtc_Word32
VCMMediaOptimization::SetEncodingData(VideoCodecType sendCodecType, WebRtc_Word32 maxBitRate,
WebRtc_UWord32 frameRate, WebRtc_UWord32 bitRate,
WebRtc_UWord16 width, WebRtc_UWord16 height)
{
// Everything codec specific should be reset here since this means the codec has changed.
// If native dimension values have changed, then either user initiated change, or QM
// initiated change. Will be able to determine only after the processing of the first frame
_lastChangeTime = VCMTickTime::MillisecondTimestamp();
_content->Reset();
_content->UpdateFrameRate(frameRate);
_maxBitRate = maxBitRate;
_sendCodecType = sendCodecType;
_targetBitRate = bitRate;
_lossProtLogic->UpdateBitRate(static_cast<float>(bitRate));
_lossProtLogic->UpdateFrameRate(static_cast<float>(frameRate));
_frameDropper->Reset();
_frameDropper->SetRates(static_cast<float>(bitRate), static_cast<float>(frameRate));
_userFrameRate = (float)frameRate;
_codecWidth = width;
_codecHeight = height;
WebRtc_Word32 ret = VCM_OK;
ret = _qms->Initialize((float)_targetBitRate, _userFrameRate, _codecWidth, _codecHeight);
return ret;
}
WebRtc_Word32
VCMMediaOptimization::RegisterProtectionCallback(VCMProtectionCallback* protectionCallback)
{
_videoProtectionCallback = protectionCallback;
return VCM_OK;
}
void
VCMMediaOptimization::EnableFrameDropper(bool enable)
{
_frameDropper->Enable(enable);
}
void
VCMMediaOptimization::EnableNack(bool enable)
{
// Add NACK to the list of loss protection methods
bool updated = false;
if (enable)
{
VCMProtectionMethod *nackMethod = new VCMNackMethod();
updated = _lossProtLogic->AddMethod(nackMethod);
if (!updated)
{
delete nackMethod;
}
}
else
{
updated = _lossProtLogic->RemoveMethod(kNACK);
}
if (updated)
{
_lossProtLogic->UpdateMethod();
}
}
bool
VCMMediaOptimization::IsNackEnabled()
{
return (_lossProtLogic->FindMethod(kFEC) != NULL);
}
void
VCMMediaOptimization::EnableFEC(bool enable)
{
// Add FEC to the list of loss protection methods
bool updated = false;
if (enable)
{
VCMProtectionMethod *fecMethod = new VCMFecMethod();
updated = _lossProtLogic->AddMethod(fecMethod);
if (!updated)
{
delete fecMethod;
}
}
else
{
updated = _lossProtLogic->RemoveMethod(kFEC);
}
if (updated)
{
_lossProtLogic->UpdateMethod();
}
}
void
VCMMediaOptimization::EnableNackFEC(bool enable)
{
// Add NackFec to the list of loss protection methods
bool updated = false;
if (enable)
{
VCMProtectionMethod *nackfecMethod = new VCMNackFecMethod();
updated = _lossProtLogic->AddMethod(nackfecMethod);
if (!updated)
{
delete nackfecMethod;
}
}
else
{
updated = _lossProtLogic->RemoveMethod(kNackFec);
}
if (updated)
{
_lossProtLogic->UpdateMethod();
}
}
bool
VCMMediaOptimization::IsFecEnabled()
{
return (_lossProtLogic->FindMethod(kFEC) != NULL);
}
bool
VCMMediaOptimization::IsNackFecEnabled()
{
return (_lossProtLogic->FindMethod(kNackFec) != NULL);
}
void
VCMMediaOptimization::SetMtu(WebRtc_Word32 mtu)
{
_maxPayloadSize = mtu;
}
float
VCMMediaOptimization::SentFrameRate()
{
if(_frameDropper)
{
return _frameDropper->ActualFrameRate((WebRtc_UWord32)(InputFrameRate() + 0.5f));
}
return VCM_CODEC_ERROR;
}
float
VCMMediaOptimization::SentBitRate()
{
UpdateBitRateEstimate(-1, VCMTickTime::MillisecondTimestamp());
return _avgSentBitRateBps / 1000.0f;
}
WebRtc_Word32
VCMMediaOptimization::MaxBitRate()
{
return _maxBitRate;
}
WebRtc_Word32
VCMMediaOptimization::UpdateWithEncodedData(WebRtc_Word32 encodedLength,
FrameType encodedFrameType)
{
// look into the ViE version - debug mode - needs also number of layers.
UpdateBitRateEstimate(encodedLength, VCMTickTime::MillisecondTimestamp());
if(encodedLength > 0)
{
const bool deltaFrame = (encodedFrameType != kVideoFrameKey &&
encodedFrameType != kVideoFrameGolden);
_frameDropper->Fill(encodedLength, deltaFrame);
if (_maxPayloadSize > 0 && encodedLength > 0)
{
const float minPacketsPerFrame = encodedLength /
static_cast<float>(_maxPayloadSize);
if (deltaFrame)
{
_lossProtLogic->UpdatePacketsPerFrame(minPacketsPerFrame);
}
else
{
_lossProtLogic->UpdatePacketsPerFrameKey(minPacketsPerFrame);
}
if (_enableQm)
{
// update quality select with encoded length
_qms->UpdateEncodedSize(encodedLength, encodedFrameType);
}
}
if (!deltaFrame && encodedLength > 0)
{
_lossProtLogic->UpdateKeyFrameSize(static_cast<float>(encodedLength));
}
// updating counters
if (deltaFrame){
_deltaFrameCnt++;
} else {
_keyFrameCnt++;
}
}
return VCM_OK;
}
void VCMMediaOptimization::UpdateBitRateEstimate(WebRtc_Word64 encodedLength,
WebRtc_Word64 nowMs)
{
int i = kBitrateMaxFrameSamples - 1;
WebRtc_UWord32 frameSizeSum = 0;
WebRtc_Word64 timeOldest = -1;
// Find an empty slot for storing the new sample and at the same time
// accumulate the history.
for (; i >= 0; i--)
{
if (_encodedFrameSamples[i]._sizeBytes == -1)
{
// Found empty slot
break;
}
if (nowMs - _encodedFrameSamples[i]._timeCompleteMs < kBitrateAverageWinMs)
{
frameSizeSum += static_cast<WebRtc_UWord32>(_encodedFrameSamples[i]._sizeBytes);
if (timeOldest == -1)
{
timeOldest = _encodedFrameSamples[i]._timeCompleteMs;
}
}
}
if (encodedLength > 0)
{
if (i < 0)
{
// No empty slot, shift
for (i = kBitrateMaxFrameSamples - 2; i >= 0; i--)
{
_encodedFrameSamples[i + 1] = _encodedFrameSamples[i];
}
i++;
}
// Insert new sample
_encodedFrameSamples[i]._sizeBytes = encodedLength;
_encodedFrameSamples[i]._timeCompleteMs = nowMs;
}
if (timeOldest > -1)
{
// Update average bit rate
float denom = static_cast<float>(nowMs - timeOldest);
if (denom < 1.0)
{
denom = 1.0;
}
_avgSentBitRateBps = (frameSizeSum + encodedLength) * 8 * 1000 / denom;
}
else if (encodedLength > 0)
{
_avgSentBitRateBps = static_cast<float>(encodedLength * 8);
}
else
{
_avgSentBitRateBps = 0;
}
}
WebRtc_Word32
VCMMediaOptimization::RegisterVideoQMCallback(VCMQMSettingsCallback *videoQMSettings)
{
_videoQMSettingsCallback = videoQMSettings;
//Callback setting controls QM
if (_videoQMSettingsCallback != NULL)
{
_enableQm = true;
}
else
{
_enableQm = false;
}
return VCM_OK;
}
void
VCMMediaOptimization::updateContentData(const VideoContentMetrics *contentMetrics)
{
//Updating content metrics
if (contentMetrics == NULL)
{
//No QM if metrics are NULL
_enableQm = false;
_qms->Reset();
}
else
{
_content->UpdateContentData(contentMetrics);
}
}
WebRtc_Word32
VCMMediaOptimization::SelectQuality()
{
// Reset quantities for QM select
_qms->ResetQM();
// Select quality mode
VCMQualityMode* qm = NULL;
WebRtc_Word32 ret = _qms->SelectQuality(_content->Data(), &qm);
if (ret < 0)
{
return ret;
}
// Check for updates to spatial/temporal modes
QMUpdate(qm);
//Reset all the rate and related frame counters quantities
_qms->ResetRates();
// Reset counters
_lastQMUpdateTime = VCMTickTime::MillisecondTimestamp();
// Reset content metrics
_content->Reset();
return VCM_OK;
}
// Check timing constraints and look for significant change in:
// (1) scene content
// (2) target bit rate
bool
VCMMediaOptimization::checkStatusForQMchange()
{
bool status = true;
// Check that we do not call QMSelect too often, and that we waited some time (to sample the metrics) from the event lastChangeTime
// lastChangeTime is the time where user changed the size/rate/frame rate (via SetEncodingData)
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
if ((now - _lastQMUpdateTime) < kQmMinIntervalMs ||
(now - _lastChangeTime) < kQmMinIntervalMs)
{
status = false;
}
return status;
}
bool
VCMMediaOptimization::QMUpdate(VCMQualityMode* qm)
{
//Check for no change
if (qm->spatialHeightFact == 1 &&
qm->spatialWidthFact == 1 &&
qm->temporalFact == 1)
{
return false;
}
//Content metrics hold native values
VideoContentMetrics* cm = _content->Data();
//Temporal
WebRtc_UWord32 frameRate = static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f);
//Check if go back up in temporal resolution
if (qm->temporalFact == 0)
{
frameRate = (WebRtc_UWord32) 2 * _incomingFrameRate;
}
//go down in temporal resolution
else
{
frameRate = (WebRtc_UWord32)(_incomingFrameRate / qm->temporalFact + 1);
}
//Spatial
WebRtc_UWord32 height = _codecHeight;
WebRtc_UWord32 width = _codecWidth;
//Check if go back up in spatial resolution
if (qm->spatialHeightFact == 0 && qm->spatialWidthFact == 0)
{
height = cm->nativeHeight;
width = cm->nativeWidth;
}
else
{
height = _codecHeight / qm->spatialHeightFact;
width = _codecWidth / qm->spatialWidthFact;
}
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, _id,
"Quality Mode Update: W = %d, H = %d, FR = %f",
width, height, frameRate);
//Update VPM with new target frame rate and size
_videoQMSettingsCallback->SetVideoQMSettings(frameRate, width, height);
return true;
}
void
VCMMediaOptimization::UpdateIncomingFrameRate()
{
WebRtc_Word64 now = VCMTickTime::MillisecondTimestamp();
if(_incomingFrameTimes[0] == 0)
{
// first no shift
} else
{
// shift
for(WebRtc_Word32 i = (kFrameCountHistorySize - 2); i >= 0 ; i--)
{
_incomingFrameTimes[i+1] = _incomingFrameTimes[i];
}
}
_incomingFrameTimes[0] = now;
ProcessIncomingFrameRate(now);
}
// allowing VCM to keep track of incoming frame rate
void
VCMMediaOptimization::ProcessIncomingFrameRate(WebRtc_Word64 now)
{
WebRtc_Word32 num = 0;
WebRtc_Word32 nrOfFrames = 0;
for(num = 1; num < (kFrameCountHistorySize - 1); num++)
{
if (_incomingFrameTimes[num] <= 0 ||
// don't use data older than 2 s
now - _incomingFrameTimes[num] > kFrameHistoryWinMs)
{
break;
} else
{
nrOfFrames++;
}
}
if (num > 1)
{
const WebRtc_Word64 diff = now - _incomingFrameTimes[num-1];
_incomingFrameRate = 1.0;
if(diff >0)
{
_incomingFrameRate = nrOfFrames * 1000.0f / static_cast<float>(diff);
}
}
else
{
_incomingFrameRate = static_cast<float>(nrOfFrames);
}
}
WebRtc_UWord32
VCMMediaOptimization::InputFrameRate()
{
ProcessIncomingFrameRate(VCMTickTime::MillisecondTimestamp());
return WebRtc_UWord32 (_incomingFrameRate + 0.5f);
}
}

View File

@@ -0,0 +1,220 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
#include "list_wrapper.h"
#include "module_common_types.h"
#include "video_coding.h"
#include "trace.h"
#include "media_opt_util.h"
#include "qm_select.h"
namespace webrtc
{
enum { kBitrateMaxFrameSamples = 60 };
enum { kBitrateAverageWinMs = 1000 };
class VCMContentMetricsProcessing;
class VCMFrameDropper;
struct VCMEncodedFrameSample
{
VCMEncodedFrameSample() : _sizeBytes(-1), _timeCompleteMs(-1) {}
WebRtc_Word64 _sizeBytes;
WebRtc_Word64 _timeCompleteMs;
};
class VCMMediaOptimization
{
public:
VCMMediaOptimization(WebRtc_Word32 id);
~VCMMediaOptimization(void);
/*
* Reset the Media Optimization module
*/
WebRtc_Word32 Reset();
/**
* Set target Rates for the encoder given the channel parameters
* Inputs: bitRate - target bitRate, in the conference case this is the rate
* between the sending client and the server
* fractionLost - packet loss in % in the network
* roundTripTimeMs - round trip time in miliseconds
* minBitRate - the bit rate of the end-point with lowest rate
* maxBitRate - the bit rate of the end-point with highest rate
*/
WebRtc_UWord32 SetTargetRates(WebRtc_UWord32 bitRate,
WebRtc_UWord8 &fractionLost,
WebRtc_UWord32 roundTripTimeMs);
/**
* Inform media optimization of initial encoding state
*/
WebRtc_Word32 SetEncodingData(VideoCodecType sendCodecType,
WebRtc_Word32 maxBitRate,
WebRtc_UWord32 frameRate,
WebRtc_UWord32 bitRate,
WebRtc_UWord16 width,
WebRtc_UWord16 height);
/**
* Enable NACK and update error resilience parameters
*/
void EnableNack(bool enable);
/**
* Returns weather or not NACK is enabled
*/
bool IsNackEnabled();
/**
* Enable FEC and update error resilience parameters
*/
void EnableFEC(bool enable);
/**
* Returns weather or not FEC is enabled
*/
bool IsFecEnabled();
/**
* Returns weather or not NackFec is enabled
*/
bool IsNackFecEnabled();
/**
* Updates the max pay load size
*/
/**
* Enable NackFec and update error resilience parameters
*/
void EnableNackFEC(bool enable);
void SetMtu(WebRtc_Word32 mtu);
/*
* Get actual input frame rate
*/
WebRtc_UWord32 InputFrameRate();
/*
* Get actual sent frame rate
*/
float SentFrameRate();
/*
* Get actual sent bit rate
*/
float SentBitRate();
/*
* Get maximum allowed bit rate
*/
WebRtc_Word32 MaxBitRate();
/*
* Inform Media Optimization of encoding output: Length and frame type
*/
WebRtc_Word32 UpdateWithEncodedData(WebRtc_Word32 encodedLength,
FrameType encodedFrameType);
/*
* Register a protection callback to be used to inform the user about the
* protection methods used
*/
WebRtc_Word32 RegisterProtectionCallback(VCMProtectionCallback* protectionCallback);
/*
* Register a quality settings callback to be used to inform VPM/user about the optimal
* quality settings (frame rate/dimension) required
*/
WebRtc_Word32 RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings);
void EnableFrameDropper(bool enable);
bool DropFrame();
/*
* Get number of key/delta frames encoded
*/
WebRtc_Word32 SentFrameCount(VCMFrameCount &frameCount) const;
/*
* update incoming frame rate value
*/
void UpdateIncomingFrameRate();
/**
* Update content metric Data
*/
void updateContentData(const VideoContentMetrics* contentMetrics);
/**
* Compute new Quality Mode
*/
WebRtc_Word32 SelectQuality();
private:
void UpdateBitRateEstimate(WebRtc_Word64 encodedLength, WebRtc_Word64 nowMs);
/*
* verify if QM settings differ from default, i.e. if an update is required
* Compute actual values, as will be sent to the encoder
*/
bool QMUpdate(VCMQualityMode* qm);
/**
* check if we should make a QM change
* will return 1 if yes, 0 otherwise
*/
bool checkStatusForQMchange();
void ProcessIncomingFrameRate(WebRtc_Word64 now);
enum { kFrameCountHistorySize = 90};
enum { kFrameHistoryWinMs = 2000};
WebRtc_Word32 _id;
WebRtc_Word32 _maxBitRate;
VideoCodecType _sendCodecType;
WebRtc_UWord16 _codecWidth;
WebRtc_UWord16 _codecHeight;
float _userFrameRate;
VCMFrameDropper* _frameDropper;
VCMLossProtectionLogic* _lossProtLogic;
WebRtc_UWord32 _lossProtOverhead;
WebRtc_UWord8 _packetLossEnc;
WebRtc_UWord8 _fractionLost;
WebRtc_UWord32 _sendStatistics[4];
WebRtc_UWord32 _sendStatisticsZeroEncode;
WebRtc_Word32 _maxPayloadSize;
WebRtc_UWord32 _lastBitRate;
WebRtc_UWord32 _targetBitRate;
float _incomingFrameRate;
WebRtc_Word64 _incomingFrameTimes[kFrameCountHistorySize];
bool _enableQm;
VCMProtectionCallback* _videoProtectionCallback;
VCMQMSettingsCallback* _videoQMSettingsCallback;
VCMEncodedFrameSample _encodedFrameSamples[kBitrateMaxFrameSamples];
float _avgSentBitRateBps;
WebRtc_UWord32 _keyFrameCnt;
WebRtc_UWord32 _deltaFrameCnt;
VCMContentMetricsProcessing* _content;
VCMQmSelect* _qms;
WebRtc_Word64 _lastQMUpdateTime;
WebRtc_Word64 _lastChangeTime; // content or user triggered
}; // end of VCMMediaOptimization class definition
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_

View File

@@ -0,0 +1,226 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
namespace webrtc
{
// Table for softening FEC rate for NACK/FEC protection method
const WebRtc_UWord16 VCMNackFecTable[200] = {
27,
28,
30,
31,
33,
35,
36,
38,
40,
42,
45,
47,
49,
52,
54,
57,
60,
63,
66,
70,
73,
77,
81,
85,
89,
94,
98,
103,
108,
114,
120,
126,
132,
138,
145,
152,
160,
168,
176,
185,
194,
203,
213,
223,
234,
246,
257,
270,
283,
296,
310,
325,
340,
356,
373,
390,
408,
427,
446,
467,
488,
510,
532,
556,
581,
606,
632,
659,
688,
717,
747,
778,
810,
843,
877,
912,
948,
985,
1022,
1061,
1101,
1142,
1183,
1226,
1269,
1314,
1359,
1404,
1451,
1498,
1546,
1594,
1643,
1693,
1743,
1793,
1843,
1894,
1945,
1996,
2048,
2099,
2150,
2201,
2252,
2302,
2352,
2402,
2452,
2501,
2549,
2597,
2644,
2691,
2736,
2781,
2826,
2869,
2912,
2953,
2994,
3034,
3073,
3110,
3147,
3183,
3218,
3252,
3285,
3317,
3348,
3378,
3407,
3436,
3463,
3489,
3514,
3539,
3563,
3585,
3607,
3628,
3649,
3668,
3687,
3705,
3722,
3739,
3755,
3770,
3785,
3799,
3812,
3825,
3838,
3849,
3861,
3872,
3882,
3892,
3901,
3910,
3919,
3927,
3935,
3943,
3950,
3957,
3963,
3969,
3975,
3981,
3987,
3992,
3997,
4001,
4006,
4010,
4014,
4018,
4022,
4025,
4029,
4032,
4035,
4038,
4041,
4043,
4046,
4048,
4050,
4053,
4055,
4057,
4059,
4060,
4062,
4064,
4065,
4067,
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_

View File

@@ -0,0 +1,77 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "packet.h"
#include "module_common_types.h"
#include <assert.h>
namespace webrtc {
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr,
const WebRtc_UWord32 size,
const WebRtcRTPHeader& rtpHeader) :
payloadType(rtpHeader.header.payloadType),
timestamp(rtpHeader.header.timestamp),
seqNum(rtpHeader.header.sequenceNumber),
dataPtr(ptr),
sizeBytes(size),
markerBit(rtpHeader.header.markerBit),
frameType(rtpHeader.frameType),
codec(kVideoCodecUnknown),
isFirstPacket(rtpHeader.type.Video.isFirstPacket),
completeNALU(kNaluComplete),
insertStartCode(false),
bits(false)
{
CopyCodecSpecifics(rtpHeader.type.Video);
}
VCMPacket::VCMPacket(const WebRtc_UWord8* ptr, WebRtc_UWord32 size, WebRtc_UWord16 seq, WebRtc_UWord32 ts, bool mBit) :
payloadType(0),
timestamp(ts),
seqNum(seq),
dataPtr(ptr),
sizeBytes(size),
markerBit(mBit),
frameType(kVideoFrameDelta),
codec(kVideoCodecUnknown),
isFirstPacket(false),
completeNALU(kNaluComplete),
insertStartCode(false),
bits(false)
{}
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader)
{
RTPVideoTypeHeader codecHeader = videoHeader.codecHeader;
switch(videoHeader.codec)
{
case kRTPVideoVP8:
{
codec = kVideoCodecVP8;
break;
}
case kRTPVideoI420:
{
codec = kVideoCodecI420;
break;
}
default:
{
codec = kVideoCodecUnknown;
break;
}
}
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
#include "typedefs.h"
#include "module_common_types.h"
#include "jitter_buffer_common.h"
namespace webrtc
{
class VCMPacket
{
public:
VCMPacket(const WebRtc_UWord8* ptr,
const WebRtc_UWord32 size,
const WebRtcRTPHeader& rtpHeader);
VCMPacket(const WebRtc_UWord8* ptr,
WebRtc_UWord32 size,
WebRtc_UWord16 seqNum,
WebRtc_UWord32 timestamp,
bool markerBit);
WebRtc_UWord8 payloadType;
WebRtc_UWord32 timestamp;
WebRtc_UWord16 seqNum;
const WebRtc_UWord8* dataPtr;
WebRtc_UWord32 sizeBytes;
bool markerBit;
FrameType frameType;
webrtc::VideoCodecType codec;
bool isFirstPacket; // Is this first packet in a frame.
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
bool insertStartCode; // True if a start code should be inserted before this
// packet.
bool bits; // The first bits of this packets are zero and the
// first
// byte should be ORed with the last packet of the
// previous frame.
protected:
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_

View File

@@ -0,0 +1,684 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "qm_select.h"
#include "internal_defines.h"
#include "qm_select_data.h"
#include "module_common_types.h"
#include "video_coding_defines.h"
#include "trace.h"
#include <math.h>
namespace webrtc {
VCMQmSelect::VCMQmSelect()
{
_qm = new VCMQualityMode();
Reset();
}
VCMQmSelect::~VCMQmSelect()
{
delete _qm;
}
void
VCMQmSelect::ResetQM()
{
_motion.Reset();
_spatial.Reset();
_coherence.Reset();
_stationaryMotion = 0;
_aspectRatio = 1;
_maxRateQM = 0;
_imageType = 1;
_userResolutionPref = 50; // Neutral
_qm->Reset();
return;
}
void
VCMQmSelect::ResetRates()
{
_sumEncodedBytes = 0;
_sumTargetRate = 0;
_sumIncomingFrameRate = 0;
_sumFrameRateMM = 0;
_sumSeqRateMM = 0;
_frameCnt = 0;
_frameCntDelta = 0;
_lowBufferCnt = 0;
_updateRateCnt = 0;
return;
}
void
VCMQmSelect::Reset()
{
_stateDecFactorSpatial = 1;
_stateDecFactorTemp = 1;
_bufferLevel = 0;
_targetBitRate = 0;
_incomingFrameRate = 0;
_userFrameRate = 0;
_perFrameBandwidth =0;
ResetQM();
ResetRates();
return;
}
//Initialize after reset of encoder
WebRtc_Word32
VCMQmSelect::Initialize(float bitRate, float userFrameRate, WebRtc_UWord32 width, WebRtc_UWord32 height)
{
if (userFrameRate == 0.0f || width == 0 || height == 0)
{
return VCM_PARAMETER_ERROR;
}
_targetBitRate = bitRate;
_userFrameRate = userFrameRate;
//Encoder width and height
_width = width;
_height = height;
//Initial buffer level
_bufferLevel = INIT_BUFFER_LEVEL * _targetBitRate;
if ( _incomingFrameRate == 0 )
{
_perFrameBandwidth = _targetBitRate / _userFrameRate;
_incomingFrameRate = _userFrameRate;
}
else
{
//Take average: this is due to delay in update of new frame rate in encoder:
//userFrameRate is the new one, incomingFrameRate is the old one (based on previous ~ 1sec)
_perFrameBandwidth = 0.5 *( _targetBitRate / _userFrameRate + _targetBitRate / _incomingFrameRate );
}
_init = true;
return VCM_OK;
}
WebRtc_Word32
VCMQmSelect::SetPreferences(WebRtc_Word8 resolPref)
{
// Preference setting for temporal over spatial resolution
// 100 means temporal, 0 means spatial, 50 is neutral (we decide)
_userResolutionPref = resolPref;
return VCM_OK;
}
//Update after every encoded frame
void
VCMQmSelect::UpdateEncodedSize(WebRtc_Word64 encodedSize, FrameType encodedFrameType)
{
//Update encoded size;
_sumEncodedBytes += encodedSize;
_frameCnt++;
//Convert to Kbps
float encodedSizeKbits = (float)((encodedSize * 8.0) / 1000.0);
//Update the buffer level: per_frame_BW is updated when encoder is updated, every ~1sec
_bufferLevel += _perFrameBandwidth - encodedSizeKbits;
const bool deltaFrame = (encodedFrameType != kVideoFrameKey &&
encodedFrameType != kVideoFrameGolden);
//Sum the frame mismatch:
//Mismatch here is based on difference of actual encoded frame size and per-frame bandwidth, for delta frames
//This is a much stronger condition on rate mismatch than sumSeqRateMM
// Note: not used in this version
/*
if (deltaFrame)
{
_frameCntDelta++;
if (encodedSizeKbits > 0)
_sumFrameRateMM += (float) (fabs(encodedSizeKbits - _perFrameBandwidth) / encodedSizeKbits);
}
*/
//Counter for occurrences of low buffer level
if (_bufferLevel <= PERC_BUFFER_THR * INIT_BUFFER_LEVEL * _targetBitRate)
{
_lowBufferCnt++;
}
}
//Update after SetTargetRates in MediaOpt (every ~1sec)
void
VCMQmSelect::UpdateRates(float targetBitRate, float avgSentBitRate, float incomingFrameRate)
{
//Sum the target bitrate and incoming frame rate: these values are the encoder rates (from previous ~1sec),
//i.e, before the update for next ~1sec
_sumTargetRate += _targetBitRate;
_sumIncomingFrameRate += _incomingFrameRate;
_updateRateCnt++;
//Convert to kbps
float avgSentBitRatekbps = avgSentBitRate / 1000.0f;
//Sum the sequence rate mismatch:
//Mismatch here is based on difference between target rate the encoder used (in previous ~1sec) and the average actual
//encoding rate at current time
if (fabs(_targetBitRate - avgSentBitRatekbps) < THRESH_SUM_MM && _targetBitRate > 0.0 )
_sumSeqRateMM += (float) (fabs(_targetBitRate - avgSentBitRatekbps) / _targetBitRate );
//Update QM with the current new target and frame rate: these values are ones the encoder will use for the current/next ~1sec
_targetBitRate = targetBitRate;
_incomingFrameRate = incomingFrameRate;
//Update QM with an (average) encoder per_frame_bandwidth: this is the per_frame_bw for the next ~1sec
_perFrameBandwidth = 0.0f;
if (_incomingFrameRate > 0.0f)
{
_perFrameBandwidth = _targetBitRate / _incomingFrameRate;
}
}
WebRtc_Word32
VCMQmSelect::SelectQuality(const VideoContentMetrics* contentMetrics, VCMQualityMode** qm)
{
if (!_init)
{
return VCM_UNINITIALIZED;
}
if (contentMetrics == NULL)
{
Reset(); //default values
*qm = _qm;
return VCM_OK;
}
//Default settings
_qm->spatialWidthFact = 1;
_qm->spatialHeightFact = 1;
_qm->temporalFact = 1;
_contentMetrics = contentMetrics;
//Update native values
_nativeWidth = _contentMetrics->nativeWidth;
_nativeHeight = _contentMetrics->nativeHeight;
_nativeFrameRate = _contentMetrics->nativeFrameRate;
//Aspect ratio: used for selection of 1x2,2x1,2x2
_aspectRatio = (float)_width / (float)_height;
float avgTargetRate = 0.0f;
float avgIncomingFrameRate = 0.0f;
float ratioBufferLow = 0.0f;
float rateMisMatch = 0.0f;
if (_frameCnt > 0)
{
ratioBufferLow = (float)_lowBufferCnt / (float)_frameCnt;
}
if (_updateRateCnt > 0)
{
//use seq-rate mismatch for now
rateMisMatch = (float)_sumSeqRateMM / (float)_updateRateCnt;
//rateMisMatch = (float)_sumFrameRateMM / (float)_frameCntDelta;
//average target and incoming frame rates
avgTargetRate = (float)_sumTargetRate / (float)_updateRateCnt;
avgIncomingFrameRate = (float)_sumIncomingFrameRate / (float)_updateRateCnt;
}
//For qm selection below, may want to weight the average encoder rates with the current (for next ~1sec) rate values
//uniform average for now:
float w1 = 0.5f;
float w2 = 0.5f;
avgTargetRate = w1 * avgTargetRate + w2 * _targetBitRate;
avgIncomingFrameRate = w1 * avgIncomingFrameRate + w2 * _incomingFrameRate;
//Set the maximum transitional rate and image type: for up-sampled spatial dimensions
//Needed to get the transRate for going back up in spatial resolution (only 2x2 allowed in this version)
SetMaxRateForQM(2 * _width, 2 * _height);
WebRtc_UWord8 imageType2 = _imageType;
WebRtc_UWord32 maxRateQM2 = _maxRateQM;
//Set the maximum transitional rate and image type: for the input/encoder spatial dimensions
SetMaxRateForQM(_width, _height);
//Compute metric features
MotionNFD();
Spatial();
//
//Get transitional rate from table, based on image type and content class
//
//Get image size class: map _imageType to 2 classes
WebRtc_UWord8 imageClass = 1;
if (_imageType <= 3) imageClass = 0;
WebRtc_UWord8 contentClass = 3 * _motion.level + _spatial.level;
WebRtc_UWord8 tableIndex = imageClass * 9 + contentClass;
float scaleTransRate = kScaleTransRateQm[tableIndex];
// for transRate for going back up spatially
WebRtc_UWord8 imageClass2 = 1;
if (imageType2 <= 3) imageClass2 = 0;
WebRtc_UWord8 tableIndex2 = imageClass2 * 9 + contentClass;
float scaleTransRate2 = kScaleTransRateQm[tableIndex2];
//
WebRtc_UWord32 estimatedTransRateDown = (WebRtc_UWord32) (_incomingFrameRate * scaleTransRate * _maxRateQM / 30);
WebRtc_UWord32 estimatedTransRateUpT = (WebRtc_UWord32) (TRANS_RATE_SCALE_UP_TEMP * 2 * _incomingFrameRate * scaleTransRate * _maxRateQM / 30);
WebRtc_UWord32 estimatedTransRateUpS = (WebRtc_UWord32) (TRANS_RATE_SCALE_UP_SPATIAL * _incomingFrameRate * scaleTransRate2 * maxRateQM2 / 30);
//
//done with transitional rate
//
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideo, -1,
"Content Metrics: Motion = %d , Spatial = %d, Est. Trans. BR = %d",
_motion.level, _spatial.level, estimatedTransRateDown);
//
//CHECK FOR GOING BACK UP IN RESOLUTION
//
bool selectedUp = false;
//Check if native has been spatially down-sampled
if (_stateDecFactorSpatial > 1)
{
//check conditions on frame_skip and rate_mismatch
if ( (avgTargetRate > estimatedTransRateUpS) &&
(ratioBufferLow < MAX_BUFFER_LOW) && (rateMisMatch < MAX_RATE_MM) )
{
//width/height scaled back up: setting 0 indicates scaling back to native
_qm->spatialHeightFact = 0;
_qm->spatialWidthFact = 0;
selectedUp = true;
}
}
//Check if native has been temporally down-sampled
if (_stateDecFactorTemp > 1)
{
if ( (avgTargetRate > estimatedTransRateUpT) &&
(ratioBufferLow < MAX_BUFFER_LOW) && (rateMisMatch < MAX_RATE_MM) )
{
//temporal scale back up: setting 0 indicates scaling back to native
_qm->temporalFact = 0;
selectedUp = true;
}
}
//leave QM if we selected to go back up in either spatial or temporal resolution
if (selectedUp == true)
{
//Update down-sampling state
//Note: only temp reduction by 2 is allowed
if (_qm->temporalFact == 0)
{
_stateDecFactorTemp = _stateDecFactorTemp / 2;
}
//Update down-sampling state
//Note: only spatial reduction by 2x2 is allowed
if (_qm->spatialHeightFact == 0 && _qm->spatialWidthFact == 0 )
{
_stateDecFactorSpatial = _stateDecFactorSpatial / 4;
}
*qm = _qm;
return VCM_OK;
}
//
//done with checking for going back up
//
//
//CHECK FOR RESOLUTION REDUCTION
//
//ST QM extraction if:
// (1) target rate is lower than transitional rate (with safety margin), or
// (2) frame skip is larger than threshold, or
// (3) rate mismatch is larger than threshold
if ( (avgTargetRate < estimatedTransRateDown ) || (ratioBufferLow > MAX_BUFFER_LOW)
|| (rateMisMatch > MAX_RATE_MM) )
{
WebRtc_UWord8 spatialFact = 1;
WebRtc_UWord8 tempFact = 1;
//Get the Action:
//Note: only consider spatial by 2x2 OR temporal reduction by 2 in this version
if (_motion.level == kLow && _spatial.level == kLow)
{
spatialFact = 1;
tempFact = 1;
}
else if (_motion.level == kLow && _spatial.level == kHigh)
{
spatialFact = 1;
tempFact = 2;
}
else if (_motion.level == kLow && _spatial.level == kDefault)
{
spatialFact = 1;
tempFact = 2;
}
else if (_motion.level == kHigh && _spatial.level == kLow)
{
spatialFact = 4;
tempFact = 1;
}
else if (_motion.level == kHigh && _spatial.level == kHigh)
{
spatialFact = 1;
tempFact = 2;
}
else if (_motion.level == kHigh && _spatial.level == kDefault)
{
spatialFact = 4;
tempFact = 1;
}
else if (_motion.level == kDefault && _spatial.level == kLow)
{
spatialFact = 4;
tempFact = 1;
}
else if (_motion.level == kDefault && _spatial.level == kHigh)
{
spatialFact = 1;
tempFact = 2;
}
else if (_motion.level == kDefault && _spatial.level == kDefault)
{
spatialFact = 1;
tempFact = 1;
}
//
switch(spatialFact)
{
case 4:
_qm->spatialWidthFact = 2;
_qm->spatialHeightFact = 2;
break;
case 2:
//default is 1x2 (H)
_qm->spatialWidthFact = 2;
_qm->spatialHeightFact = 1;
//Select 1x2,2x1, or back to 2x2: depends on prediction errors, aspect ratio, and horizontalness of motion
//Note: directional selection not used in this version
//SelectSpatialDirectionMode((float) estimatedTransRateDown);
break;
default:
_qm->spatialWidthFact = 1;
_qm->spatialHeightFact = 1;
break;
}
_qm->temporalFact = tempFact;
//Sanity check on ST QM selection: override the settings for too small image size and frame rate
//Also check limit the current down-sampling state
//No spatial sampling if image size is too small (QCIF)
if ( (_width * _height) <= MIN_IMAGE_SIZE || _stateDecFactorSpatial >= MAX_SPATIAL_DOWN_FACT)
{
_qm->spatialWidthFact = 1;
_qm->spatialHeightFact = 1;
}
//No frame rate reduction below some point: use the (average) incoming frame rate
if ( avgIncomingFrameRate <= MIN_FRAME_RATE_QM || _stateDecFactorTemp >= MAX_TEMP_DOWN_FACT)
{
_qm->temporalFact = 1;
}
//No down-sampling if current spatial-temporal downsampling state is above threshold
if (_stateDecFactorTemp * _stateDecFactorSpatial >= MAX_SPATIAL_TEMP_DOWN_FACT)
{
_qm->spatialWidthFact = 1;
_qm->spatialHeightFact = 1;
_qm->temporalFact = 1;
}
//
//done with sanity checks on ST QM selection
//
//Note: to disable spatial down-sampling
// _qm->spatialWidthFact = 1;
// _qm->spatialHeightFact = 1;
//Update down-sampling states
_stateDecFactorSpatial = _stateDecFactorSpatial * _qm->spatialWidthFact * _qm->spatialHeightFact;
_stateDecFactorTemp = _stateDecFactorTemp * _qm->temporalFact;
}
else
{
*qm = _qm;
return VCM_OK;
}
// done with checking for resolution reduction
*qm = _qm;
return VCM_OK;
}
WebRtc_Word32
VCMQmSelect::SelectSpatialDirectionMode(float transRate)
{
//Default is 1x2 (H)
//For bit rates well below transitional rate, we select 2x2
if ( _targetBitRate < transRate * RATE_RED_SPATIAL_2X2 )
{
_qm->spatialWidthFact = 2;
_qm->spatialHeightFact = 2;
return VCM_OK;
}
//Otherwise check prediction errors, aspect ratio, horizonalness of motion
float spatialErr = _contentMetrics->spatialPredErr;
float spatialErrH = _contentMetrics->spatialPredErrH;
float spatialErrV = _contentMetrics->spatialPredErrV;
//favor 1x2 if aspect_ratio is 16:9
if (_aspectRatio >= 16.0f / 9.0f )
{
//check if 1x2 has lowest prediction error
if (spatialErrH < spatialErr && spatialErrH < spatialErrV)
{
return VCM_OK;
}
}
//check for 2x2 selection: favor 2x2 over 1x2 and 2x1
if (spatialErr < spatialErrH * (1.0f + SPATIAL_ERR_2X2_VS_H) &&
spatialErr < spatialErrV * (1.0f + SPATIAL_ERR_2X2_VS_V))
{
_qm->spatialWidthFact = 2;
_qm->spatialHeightFact = 2;
return VCM_OK;
}
//check for 2x1 selection:
if (spatialErrV < spatialErrH * (1.0f - SPATIAL_ERR_V_VS_H) &&
spatialErrV < spatialErr * (1.0f - SPATIAL_ERR_2X2_VS_V))
{
_qm->spatialWidthFact = 1;
_qm->spatialHeightFact = 2;
return VCM_OK;
}
return VCM_OK;
}
void
VCMQmSelect::Coherence()
{
float horizNZ = _contentMetrics->motionHorizontalness;
float distortionNZ = _contentMetrics->motionClusterDistortion;
//Coherence measure: combine horizontalness with cluster distortion
_coherence.value = COH_MAX;
if (distortionNZ > 0.)
{
_coherence.value = horizNZ / distortionNZ;
}
_coherence.value = VCM_MIN(COH_MAX, _coherence.value);
if (_coherence.value < COHERENCE_THR)
{
_coherence.level = kLow;
}
else
{
_coherence.level = kHigh;
}
}
void
VCMQmSelect::MotionNFD()
{
_motion.value = _contentMetrics->motionMagnitudeNZ;
// determine motion level
if (_motion.value < LOW_MOTION_NFD)
{
_motion.level = kLow;
}
else if (_motion.value > HIGH_MOTION_NFD)
{
_motion.level = kHigh;
}
else
{
_motion.level = kDefault;
}
}
void
VCMQmSelect::Motion()
{
float sizeZeroMotion = _contentMetrics->sizeZeroMotion;
float motionMagNZ = _contentMetrics->motionMagnitudeNZ;
//take product of size and magnitude with equal weight for now
_motion.value = (1.0f - sizeZeroMotion) * motionMagNZ;
//stabilize: motionMagNZ could be large when only few motion blocks are non-zero
_stationaryMotion = false;
if (sizeZeroMotion > HIGH_ZERO_MOTION_SIZE)
{
_motion.value = 0.0f;
_stationaryMotion = true;
}
// determine motion level
if (_motion.value < LOW_MOTION)
{
_motion.level = kLow;
}
else if (_motion.value > HIGH_MOTION)
{
_motion.level = kHigh;
}
else
{
_motion.level = kDefault;
}
}
void
VCMQmSelect::Spatial()
{
float spatialErr = _contentMetrics->spatialPredErr;
float spatialErrH = _contentMetrics->spatialPredErrH;
float spatialErrV = _contentMetrics->spatialPredErrV;
//Spatial measure: take average of 3 prediction errors
_spatial.value = (spatialErr + spatialErrH + spatialErrV) / 3.0f;
float scale = 1.0f;
//Reduce thresholds for HD scenes
if (_imageType > 3)
{
scale = (float)SCALE_TEXTURE_HD;
}
if (_spatial.value > scale * HIGH_TEXTURE)
{
_spatial.level = kHigh;
}
else if (_spatial.value < scale * LOW_TEXTURE)
{
_spatial.level = kLow;
}
else
{
_spatial.level = kDefault;
}
}
WebRtc_Word32
VCMQmSelect::SetMaxRateForQM(WebRtc_UWord32 width, WebRtc_UWord32 height)
{
// Match image type
WebRtc_UWord32 imageSize = width * height;
if (imageSize < kFrameSizeTh[0])
{
_imageType = 0;
}
else if (imageSize < kFrameSizeTh[1])
{
_imageType = 1;
}
else if (imageSize < kFrameSizeTh[2])
{
_imageType = 2;
}
else if (imageSize < kFrameSizeTh[3])
{
_imageType = 3;
}
else if (imageSize < kFrameSizeTh[4])
{
_imageType = 4;
}
else if (imageSize < kFrameSizeTh[5])
{
_imageType = 5;
}
else
{
_imageType = 6;
}
// set max rate based on image size
_maxRateQM = kMaxRateQm[_imageType];
return VCM_OK;
}
}

View File

@@ -0,0 +1,166 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
#include "typedefs.h"
#include "common_types.h"
/************************/
/* Quality Modes */
/**********************/
namespace webrtc
{
struct VideoContentMetrics;
struct VCMQualityMode
{
VCMQualityMode():spatialWidthFact(1), spatialHeightFact(1), temporalFact(1){}
void Reset()
{
spatialWidthFact = 1;
spatialHeightFact = 1;
temporalFact = 1;
}
WebRtc_UWord16 spatialWidthFact;
WebRtc_UWord16 spatialHeightFact;
WebRtc_UWord16 temporalFact;
};
enum VCMMagValues
{
kLow,
kHigh,
kDefault //default do nothing mode
};
struct VCMContFeature
{
VCMContFeature(): value(0.0f), level(kDefault){}
void Reset()
{
value = 0.0f;
level = kDefault;
}
float value;
VCMMagValues level;
};
class VCMQmSelect
{
public:
VCMQmSelect();
~VCMQmSelect();
// Initialize:
WebRtc_Word32 Initialize(float bitRate, float userFrameRate, WebRtc_UWord32 width, WebRtc_UWord32 height);
// Allow the user to set preferences: favor frame rate/resolution
WebRtc_Word32 SetPreferences(WebRtc_Word8 resolPref);
// Extract ST QM behavior and make decision
// Inputs: Content Metrics per frame (averaged over time)
// qm: Reference to the quality modes pointer
WebRtc_Word32 SelectQuality(const VideoContentMetrics* contentMetrics, VCMQualityMode** qm);
// Update QMselect with actual bit rate (size of the latest encoded frame) and frame type
// -> update buffer level and frame-mismatch
void UpdateEncodedSize(WebRtc_Word64 encodedSize, FrameType encodedFrameType);
// Update QM with new rates from SetTargetRates
void UpdateRates(float targetBitRate, float avgSentRate, float incomingFrameRate);
// Select 1x2,2x2,2x2 spatial sampling mode
WebRtc_Word32 SelectSpatialDirectionMode(float transRate);
// Reset values prior to QMSelect
void ResetQM();
// Reset rate quantities and counter values after every QMSelect call
void ResetRates();
// Reset all
void Reset();
private:
// Compute spatial texture magnitude and level
void Spatial();
// Compute motion magnitude and level
void Motion();
// Compute motion magnitude and level for NFD metric
void MotionNFD();
// Compute coherence magnitude and level
void Coherence();
// Set the max rate for QM selection
WebRtc_Word32 SetMaxRateForQM(WebRtc_UWord32 width, WebRtc_UWord32 height);
// Content Data
const VideoContentMetrics* _contentMetrics;
// Encoder stats/rate-control metrics
float _targetBitRate;
float _userFrameRate;
float _incomingFrameRate;
float _perFrameBandwidth;
float _bufferLevel;
float _sumTargetRate;
float _sumIncomingFrameRate;
float _sumSeqRateMM;
float _sumFrameRateMM;
WebRtc_Word64 _sumEncodedBytes;
//Encoder and native frame sizes
WebRtc_UWord32 _width;
WebRtc_UWord32 _height;
WebRtc_UWord32 _nativeWidth;
WebRtc_UWord32 _nativeHeight;
WebRtc_UWord8 _stateDecFactorSpatial;
WebRtc_UWord32 _nativeFrameRate;
WebRtc_UWord8 _stateDecFactorTemp;
//Counters
WebRtc_UWord32 _frameCnt;
WebRtc_UWord32 _frameCntDelta;
WebRtc_UWord32 _updateRateCnt;
WebRtc_UWord32 _lowBufferCnt;
//Content L/M/H values
VCMContFeature _motion;
VCMContFeature _spatial;
VCMContFeature _coherence;
bool _stationaryMotion;
//aspect ratio
float _aspectRatio;
//Max rate to saturate the transitionalRate
WebRtc_UWord32 _maxRateQM;
WebRtc_UWord8 _imageType;
//User preference for resolution or qmax change
WebRtc_UWord8 _userResolutionPref;
bool _init;
VCMQualityMode* _qm;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_

View File

@@ -0,0 +1,144 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
/***************************************************************
*QMSelectData.h
* This file includes parameters used by the Quality Modes selection process
****************************************************************/
#include "typedefs.h"
namespace webrtc
{
//Initial level of buffer in secs: should corresponds to wrapper settings
#define INIT_BUFFER_LEVEL 0.5
//
//PARAMETERS FOR QM SELECTION
//
//Threshold of (max) buffer size below which we consider too low (underflow)
#define PERC_BUFFER_THR 0.10
//Threshold on rate mismatch
#define MAX_RATE_MM 0.5
//Threshold on the occurrences of low buffer levels
#define MAX_BUFFER_LOW 0.5
//Factor for transitional rate for going back up in resolution
#define TRANS_RATE_SCALE_UP_SPATIAL 1.25
#define TRANS_RATE_SCALE_UP_TEMP 1.25
//Maximum possible transitional rate: (units in kbps), for 30fps
const WebRtc_UWord16 kMaxRateQm[7] = {
100, //QCIF
500, //CIF
800, //VGA
1500, //4CIF
2000, //720 HD 4:3,
2500, //720 HD 16:9
3000 //1080HD
};
//Scale for transitional rate: based on content class
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
const float kScaleTransRateQm[18] = {
//4CIF and lower
0.25f, // L, L
0.75f, // L, H
0.75f, // L, D
0.75f, // H ,L
0.50f, // H, H
0.50f, // H, D
0.50f, // D, L
0.625f, // D, D
0.25f, // D, H
//over 4CIF: WHD, HD
0.25f, // L, L
0.75f, // L, H
0.75f, // L, D
0.75f, // H ,L
0.50f, // H, H
0.50f, // H, D
0.50f, // D, L
0.625f, // D, D
0.25f // D, H
};
//Control the total amount of down-sampling allowed
#define MAX_SPATIAL_DOWN_FACT 4
#define MAX_TEMP_DOWN_FACT 4
#define MAX_SPATIAL_TEMP_DOWN_FACT 8
//
//
//
//PARAMETETS FOR SETTING LOW/HIGH VALUES OF METRICS:
//
//Threshold to determine if high amount of zero_motion
#define HIGH_ZERO_MOTION_SIZE 0.95
//Thresholds for motion: motion level is derived from motion vectors: motion = size_nz*magn_nz
#define HIGH_MOTION 0.7
#define LOW_MOTION 0.4
//Thresholds for motion: motion level is from NFD
#define HIGH_MOTION_NFD 0.075
#define LOW_MOTION_NFD 0.04
//Thresholds for spatial prediction error: this is appLied on the min(2x2,1x2,2x1)
#define HIGH_TEXTURE 0.035
#define LOW_TEXTURE 0.025
//Used to reduce thresholds for HD scenes: correction factor since higher
//correlation in HD scenes means lower spatial prediction error
#define SCALE_TEXTURE_HD 0.9;
//Thresholds for distortion and horizontalness: applied on product: horiz_nz/dist_nz
#define COHERENCE_THR 1.0
#define COH_MAX 10
//
//
#define RATE_RED_SPATIAL_2X2 0.6 //percentage reduction in transitional bitrate where 2x2 is selected over 1x2/2x1
#define SPATIAL_ERR_2X2_VS_H 0.1 //percentage to favor 2x2
#define SPATIAL_ERR_2X2_VS_V 0.1 //percentage to favor 2x2 over V
#define SPATIAL_ERR_V_VS_H 0.1 //percentage to favor H over V
//Minimum image size for a spatial mode selection: no spatial down-sampling if input size <= MIN_IMAGE_SIZE
#define MIN_IMAGE_SIZE 25344 //176*144
//Minimum frame rate for temporal mode: no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE
#define MIN_FRAME_RATE_QM 8
//Avoid outliers in seq-rate MM
#define THRESH_SUM_MM 1000
const WebRtc_UWord32 kFrameSizeTh[6] = {
// boundaries for the closest standard frame size
63360, //between 176*144 and 352*288
204288, //between 352*288 and 640*480
356352, //between 640*480 and 704*576
548352, //between 704*576 and 960*720
806400, //between 960*720 and 1280*720
1497600, // between 1280*720 and 1920*1080
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_

View File

@@ -0,0 +1,472 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video_coding.h"
#include "trace.h"
#include "encoded_frame.h"
#include "internal_defines.h"
#include "receiver.h"
#include "tick_time.h"
#include <assert.h>
namespace webrtc {
VCMReceiver::VCMReceiver(VCMTiming& timing,
WebRtc_Word32 vcmId,
WebRtc_Word32 receiverId,
bool master)
:
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_vcmId(vcmId),
_receiverId(receiverId),
_master(master),
_jitterBuffer(vcmId, receiverId, master),
_timing(timing),
_renderWaitEvent(*new VCMEvent()),
_nackMode(kNoNack),
_state(kPassive)
{
}
VCMReceiver::~VCMReceiver()
{
_renderWaitEvent.Set();
delete &_renderWaitEvent;
delete &_critSect;
}
WebRtc_Word32
VCMReceiver::Initialize()
{
CriticalSectionScoped cs(_critSect);
if (!_jitterBuffer.Running())
{
_jitterBuffer.Start();
}
else
{
_jitterBuffer.Flush();
}
_renderWaitEvent.Reset();
if (_master)
{
_state = kReceiving;
}
else
{
_state = kPassive;
SetNackMode(kNoNack);
}
return VCM_OK;
}
void VCMReceiver::UpdateRtt(WebRtc_UWord32 rtt)
{
_jitterBuffer.UpdateRtt(rtt);
}
WebRtc_Word32
VCMReceiver::InsertPacket(const VCMPacket& packet,
WebRtc_UWord16 frameWidth,
WebRtc_UWord16 frameHeight)
{
// Find an empty frame
VCMEncodedFrame *buffer = NULL;
const WebRtc_Word32 error = _jitterBuffer.GetFrame(packet, buffer);
if (error == VCM_OLD_PACKET_ERROR)
{
return VCM_OK;
}
else if (error < 0)
{
return error;
}
{
CriticalSectionScoped cs(_critSect);
if (frameWidth && frameHeight)
{
buffer->SetEncodedSize(static_cast<WebRtc_UWord32>(frameWidth),
static_cast<WebRtc_UWord32>(frameHeight));
}
if (_master)
{
// Only trace the primary receiver to make it possible
// to parse and plot the trace file.
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Packet seqNo %u of frame %u at %u",
packet.seqNum, packet.timestamp,
MaskWord64ToUWord32(VCMTickTime::MillisecondTimestamp()));
}
const bool emptyFrame = (buffer->Length() == 0);
const WebRtc_Word64 nowMs = VCMTickTime::MillisecondTimestamp();
WebRtc_Word64 renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
if(renderTimeMs < 0)
{
// Render time error. Assume that this is due to some change in
// the incoming video stream and reset the JB and the timing.
_jitterBuffer.Flush();
_timing.Reset();
return VCM_OK;
}
else if (renderTimeMs < nowMs - kMaxVideoDelayMs)
{
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"This frame should have been rendered more than %u ms ago."
"Flushing jitter buffer and resetting timing.", kMaxVideoDelayMs);
_jitterBuffer.Flush();
_timing.Reset();
return VCM_OK;
}
else if (_timing.TargetVideoDelay() > kMaxVideoDelayMs)
{
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"More than %u ms target delay. Flushing jitter buffer and resetting timing.",
kMaxVideoDelayMs);
_jitterBuffer.Flush();
_timing.Reset();
return VCM_OK;
}
// First packet received belonging to this frame.
if (buffer->Length() == 0)
{
const WebRtc_Word64 nowMs = VCMTickTime::MillisecondTimestamp();
if (_master)
{
// Only trace the primary receiver to make it possible to parse and plot the trace file.
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"First packet of frame %u at %u", packet.timestamp,
MaskWord64ToUWord32(nowMs));
}
renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
if (renderTimeMs >= 0)
{
buffer->SetRenderTime(renderTimeMs);
}
else
{
buffer->SetRenderTime(nowMs);
}
}
// Insert packet into jitter buffer
const VCMFrameBufferEnum ret = _jitterBuffer.InsertPacket(buffer, packet);
if (ret < 0)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Error inserting packet seqNo=%u, timeStamp=%u",
packet.seqNum, packet.timestamp);
return VCM_JITTER_BUFFER_ERROR;
}
}
return VCM_OK;
}
VCMEncodedFrame*
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs, WebRtc_Word64& nextRenderTimeMs, bool renderTiming, VCMReceiver* dualReceiver)
{
// No need to enter the critical section here since the jitter buffer
// is thread-safe.
FrameType incomingFrameType = kVideoFrameDelta;
nextRenderTimeMs = -1;
const WebRtc_Word64 startTimeMs = VCMTickTime::MillisecondTimestamp();
WebRtc_Word64 ret = _jitterBuffer.GetNextTimeStamp(maxWaitTimeMs,
incomingFrameType,
nextRenderTimeMs);
if (ret < 0)
{
// No timestamp in jitter buffer at the moment
return NULL;
}
const WebRtc_UWord32 timeStamp = static_cast<WebRtc_UWord32>(ret);
// Update the timing
_timing.SetRequiredDelay(_jitterBuffer.GetEstimatedJitterMS());
_timing.UpdateCurrentDelay(timeStamp);
const WebRtc_Word32 tempWaitTime = maxWaitTimeMs -
static_cast<WebRtc_Word32>(VCMTickTime::MillisecondTimestamp() - startTimeMs);
WebRtc_UWord16 newMaxWaitTime = static_cast<WebRtc_UWord16>(VCM_MAX(tempWaitTime, 0));
VCMEncodedFrame* frame = NULL;
if (renderTiming)
{
frame = FrameForDecoding(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
}
else
{
frame = FrameForRendering(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
}
if (frame != NULL)
{
bool retransmitted = false;
const WebRtc_Word64 lastPacketTimeMs =
_jitterBuffer.LastPacketTime(frame, retransmitted);
if (lastPacketTimeMs >= 0 && !retransmitted)
{
// We don't want to include timestamps which have suffered from retransmission
// here, since we compensate with extra retransmission delay within
// the jitter estimate.
_timing.IncomingTimestamp(timeStamp, lastPacketTimeMs);
}
if (dualReceiver != NULL)
{
dualReceiver->UpdateState(*frame);
}
}
return frame;
}
VCMEncodedFrame*
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
WebRtc_Word64 nextRenderTimeMs,
VCMReceiver* dualReceiver)
{
// How long can we wait until we must decode the next frame
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
VCMTickTime::MillisecondTimestamp());
// Try to get a complete frame from the jitter buffer
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
if (frame == NULL && maxWaitTimeMs == 0 && waitTimeMs > 0)
{
// If we're not allowed to wait for frames to get complete we must calculate if
// it's time to decode, and if it's not we will just return for now.
return NULL;
}
if (frame == NULL)
{
// Wait for a complete frame
waitTimeMs = VCM_MIN(waitTimeMs, maxWaitTimeMs);
frame = _jitterBuffer.GetCompleteFrameForDecoding(waitTimeMs);
}
if (frame == NULL)
{
// Get an incomplete frame
if (_timing.MaxWaitingTime(nextRenderTimeMs, VCMTickTime::MillisecondTimestamp()) > 0)
{
// Still time to wait for a complete frame
return NULL;
}
// No time left to wait, we must decode this frame now.
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
dualReceiver->State() == kPassive &&
dualReceiver->NackMode() == kNackInfinite;
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
{
// Jitter buffer state might get corrupt with this frame.
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
}
frame = _jitterBuffer.GetFrameForDecoding();
}
return frame;
}
VCMEncodedFrame*
VCMReceiver::FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
WebRtc_Word64 nextRenderTimeMs,
VCMReceiver* dualReceiver)
{
// How long MUST we wait until we must decode the next frame. This is different for the case
// where we have a renderer which can render at a specified time. Here we must wait as long
// as possible before giving the frame to the decoder, which will render the frame as soon
// as it has been decoded.
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
VCMTickTime::MillisecondTimestamp());
if (maxWaitTimeMs < waitTimeMs)
{
// If we're not allowed to wait until the frame is supposed to be rendered
// we will have to return NULL for now.
return NULL;
}
// Wait until it's time to render
_renderWaitEvent.Wait(waitTimeMs);
// Get a complete frame if possible
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
if (frame == NULL)
{
// Get an incomplete frame
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
dualReceiver->State() == kPassive &&
dualReceiver->NackMode() == kNackInfinite;
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
{
// Jitter buffer state might get corrupt with this frame.
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
}
frame = _jitterBuffer.GetFrameForDecoding();
}
return frame;
}
void
VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame)
{
_jitterBuffer.ReleaseFrame(frame);
}
WebRtc_Word32
VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate)
{
const WebRtc_Word32 ret = _jitterBuffer.GetUpdate(frameRate, bitRate);
bitRate /= 1000; // Should be in kbps
return ret;
}
WebRtc_Word32
VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const
{
return _jitterBuffer.GetFrameStatistics(frameCount.numDeltaFrames,
frameCount.numKeyFrames);
}
void
VCMReceiver::SetNackMode(VCMNackMode nackMode)
{
CriticalSectionScoped cs(_critSect);
_nackMode = nackMode;
switch (_nackMode)
{
case kNackInfinite:
{
_jitterBuffer.SetNackStatus(true);
break;
}
case kNoNack:
{
_jitterBuffer.SetNackStatus(false);
break;
}
}
if (!_master)
{
_state = kPassive; // The dual decoder defaults to passive
}
}
VCMNackMode
VCMReceiver::NackMode() const
{
CriticalSectionScoped cs(_critSect);
return _nackMode;
}
VCMNackStatus
VCMReceiver::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
{
bool extended = false;
WebRtc_UWord16 nackListSize = 0;
WebRtc_UWord16* internalNackList = _jitterBuffer.GetNackList(nackListSize, extended);
if (internalNackList == NULL && nackListSize == 0xffff)
{
// This combination is used to trigger key frame requests.
size = 0;
return kNackKeyFrameRequest;
}
if (nackListSize > size)
{
size = nackListSize;
return kNackNeedMoreMemory;
}
memcpy(nackList, internalNackList, nackListSize * sizeof(WebRtc_UWord16));
size = nackListSize;
return kNackOk;
}
// Decide whether we should change decoder state. This should be done if the dual decoder
// has caught up with the decoder decoding with packet losses.
bool
VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const
{
if (dualFrame == NULL)
{
return false;
}
if (_jitterBuffer.LastDecodedTimestamp() == dualFrame->TimeStamp())
{
dualReceiver.UpdateState(kWaitForPrimaryDecode);
return true;
}
return false;
}
void
VCMReceiver::CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver)
{
_jitterBuffer = receiver._jitterBuffer;
{
CriticalSectionScoped cs(_critSect);
if (_nackMode != kNoNack)
{
_jitterBuffer.SetNackStatus(true);
}
}
}
VCMReceiverState
VCMReceiver::State() const
{
CriticalSectionScoped cs(_critSect);
return _state;
}
void
VCMReceiver::UpdateState(VCMReceiverState newState)
{
CriticalSectionScoped cs(_critSect);
assert(!(_state == kPassive && newState == kWaitForPrimaryDecode));
// assert(!(_state == kReceiving && newState == kPassive));
_state = newState;
}
void
VCMReceiver::UpdateState(VCMEncodedFrame& frame)
{
if (_nackMode == kNoNack)
{
// Dual decoder mode has not been enabled.
return;
}
// Update the dual receiver state
if (frame.Complete() && frame.FrameType() == kVideoFrameKey)
{
UpdateState(kPassive);
}
if (State() == kWaitForPrimaryDecode &&
frame.Complete() && !frame.MissingFrame())
{
UpdateState(kPassive);
}
if (frame.MissingFrame() || !frame.Complete())
{
// State was corrupted, enable dual receiver.
UpdateState(kReceiving);
}
}
}

View File

@@ -0,0 +1,102 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
#include "critical_section_wrapper.h"
#include "jitter_buffer.h"
#include "timing.h"
#include "packet.h"
namespace webrtc
{
class VCMEncodedFrame;
enum VCMNackStatus
{
kNackOk,
kNackNeedMoreMemory,
kNackKeyFrameRequest
};
enum VCMNackMode
{
kNackInfinite,
kNoNack
};
enum VCMReceiverState
{
kReceiving,
kPassive,
kWaitForPrimaryDecode
};
class VCMReceiver
{
public:
VCMReceiver(VCMTiming& timing,
WebRtc_Word32 vcmId = -1,
WebRtc_Word32 receiverId = -1,
bool master = true);
~VCMReceiver();
WebRtc_Word32 Initialize();
void UpdateRtt(WebRtc_UWord32 rtt);
WebRtc_Word32 InsertPacket(const VCMPacket& packet,
WebRtc_UWord16 frameWidth,
WebRtc_UWord16 frameHeight);
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
WebRtc_Word64& nextRenderTimeMs,
bool renderTiming = true,
VCMReceiver* dualReceiver = NULL);
void ReleaseFrame(VCMEncodedFrame* frame);
WebRtc_Word32 ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate);
WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
// NACK
void SetNackMode(VCMNackMode nackMode);
VCMNackMode NackMode() const;
VCMNackStatus NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
// Dual decoder
bool DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const;
VCMReceiverState State() const;
private:
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
WebRtc_Word64 nextrenderTimeMs,
VCMReceiver* dualReceiver);
VCMEncodedFrame* FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
WebRtc_Word64 nextrenderTimeMs,
VCMReceiver* dualReceiver);
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
void UpdateState(VCMReceiverState newState);
void UpdateState(VCMEncodedFrame& frame);
static WebRtc_Word32 GenerateReceiverId();
CriticalSectionWrapper& _critSect;
WebRtc_Word32 _vcmId;
WebRtc_Word32 _receiverId;
bool _master;
VCMJitterBuffer _jitterBuffer;
VCMTiming& _timing;
VCMEvent& _renderWaitEvent;
VCMNackMode _nackMode;
VCMReceiverState _state;
static WebRtc_Word32 _receiverIdCounter;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_

View File

@@ -0,0 +1,214 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "trace.h"
#include "internal_defines.h"
#include "rtt_filter.h"
#include <cmath>
#include <stdlib.h>
#include <string.h>
namespace webrtc {
VCMRttFilter::VCMRttFilter(WebRtc_Word32 vcmId, WebRtc_Word32 receiverId)
:
_vcmId(vcmId),
_receiverId(receiverId),
_filtFactMax(35),
_jumpStdDevs(2.5),
_driftStdDevs(3.5),
_detectThreshold(kMaxDriftJumpCount)
{
Reset();
}
VCMRttFilter&
VCMRttFilter::operator=(const VCMRttFilter& rhs)
{
if (this != &rhs)
{
_gotNonZeroUpdate = rhs._gotNonZeroUpdate;
_avgRtt = rhs._avgRtt;
_varRtt = rhs._varRtt;
_maxRtt = rhs._maxRtt;
_filtFactCount = rhs._filtFactCount;
_jumpCount = rhs._jumpCount;
_driftCount = rhs._driftCount;
memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
}
return *this;
}
void
VCMRttFilter::Reset()
{
_gotNonZeroUpdate = false;
_avgRtt = 0;
_varRtt = 0;
_maxRtt = 0;
_filtFactCount = 1;
_jumpCount = 0;
_driftCount = 0;
memset(_jumpBuf, 0, kMaxDriftJumpCount);
memset(_driftBuf, 0, kMaxDriftJumpCount);
}
void
VCMRttFilter::Update(WebRtc_UWord32 rttMs)
{
if (!_gotNonZeroUpdate)
{
if (rttMs == 0)
{
return;
}
_gotNonZeroUpdate = true;
}
// Sanity check
if (rttMs > 3000)
{
rttMs = 3000;
}
double filtFactor = 0;
if (_filtFactCount > 1)
{
filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
}
_filtFactCount++;
if (_filtFactCount > _filtFactMax)
{
// This prevents filtFactor from going above
// (_filtFactMax - 1) / _filtFactMax,
// e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
_filtFactCount = _filtFactMax;
}
double oldAvg = _avgRtt;
double oldVar = _varRtt;
_avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
_varRtt = filtFactor * _varRtt + (1 - filtFactor) *
(rttMs - _avgRtt) * (rttMs - _avgRtt);
_maxRtt = VCM_MAX(rttMs, _maxRtt);
if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
{
// In some cases we don't want to update the statistics
_avgRtt = oldAvg;
_varRtt = oldVar;
}
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"RttFilter Update: sample=%u avgRtt=%f varRtt=%f maxRtt=%u",
rttMs, _avgRtt, _varRtt, _maxRtt);
}
bool
VCMRttFilter::JumpDetection(WebRtc_UWord32 rttMs)
{
double diffFromAvg = _avgRtt - rttMs;
if (abs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
{
int diffSign = (diffFromAvg >= 0) ? 1 : -1;
int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
if (diffSign != jumpCountSign)
{
// Since the signs differ the samples currently
// in the buffer is useless as they represent a
// jump in a different direction.
_jumpCount = 0;
}
if (abs(_jumpCount) < kMaxDriftJumpCount)
{
// Update the buffer used for the short time
// statistics.
// The sign of the diff is used for updating the counter since
// we want to use the same buffer for keeping track of when
// the RTT jumps down and up.
_jumpBuf[abs(_jumpCount)] = rttMs;
_jumpCount += diffSign;
}
if (abs(_jumpCount) >= _detectThreshold)
{
// Detected an RTT jump
ShortRttFilter(_jumpBuf, abs(_jumpCount));
_filtFactCount = _detectThreshold + 1;
_jumpCount = 0;
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Detected an RTT jump");
}
else
{
return false;
}
}
else
{
_jumpCount = 0;
}
return true;
}
bool
VCMRttFilter::DriftDetection(WebRtc_UWord32 rttMs)
{
if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
{
if (_driftCount < kMaxDriftJumpCount)
{
// Update the buffer used for the short time
// statistics.
_driftBuf[_driftCount] = rttMs;
_driftCount++;
}
if (_driftCount >= _detectThreshold)
{
// Detected an RTT drift
ShortRttFilter(_driftBuf, _driftCount);
_filtFactCount = _detectThreshold + 1;
_driftCount = 0;
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
"Detected an RTT drift");
}
}
else
{
_driftCount = 0;
}
return true;
}
void
VCMRttFilter::ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length)
{
if (length == 0)
{
return;
}
_maxRtt = 0;
_avgRtt = 0;
for (WebRtc_UWord32 i=0; i < length; i++)
{
if (buf[i] > _maxRtt)
{
_maxRtt = buf[i];
}
_avgRtt += buf[i];
}
_avgRtt = _avgRtt / static_cast<double>(length);
}
WebRtc_UWord32
VCMRttFilter::RttMs() const
{
return static_cast<WebRtc_UWord32>(_maxRtt + 0.5);
}
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
#include "typedefs.h"
namespace webrtc
{
class VCMRttFilter
{
public:
VCMRttFilter(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
VCMRttFilter& operator=(const VCMRttFilter& rhs);
// Resets the filter.
void Reset();
// Updates the filter with a new sample.
void Update(WebRtc_UWord32 rttMs);
// A getter function for the current RTT level in ms.
WebRtc_UWord32 RttMs() const;
private:
// The size of the drift and jump memory buffers
// and thus also the detection threshold for these
// detectors in number of samples.
enum { kMaxDriftJumpCount = 5 };
// Detects RTT jumps by comparing the difference between
// samples and average to the standard deviation.
// Returns true if the long time statistics should be updated
// and false otherwise
bool JumpDetection(WebRtc_UWord32 rttMs);
// Detects RTT drifts by comparing the difference between
// max and average to the standard deviation.
// Returns true if the long time statistics should be updated
// and false otherwise
bool DriftDetection(WebRtc_UWord32 rttMs);
// Computes the short time average and maximum of the vector buf.
void ShortRttFilter(WebRtc_UWord32* buf, WebRtc_UWord32 length);
WebRtc_Word32 _vcmId;
WebRtc_Word32 _receiverId;
bool _gotNonZeroUpdate;
double _avgRtt;
double _varRtt;
WebRtc_UWord32 _maxRtt;
WebRtc_UWord32 _filtFactCount;
const WebRtc_UWord32 _filtFactMax;
const double _jumpStdDevs;
const double _driftStdDevs;
WebRtc_Word32 _jumpCount;
WebRtc_Word32 _driftCount;
const WebRtc_Word32 _detectThreshold;
WebRtc_UWord32 _jumpBuf[kMaxDriftJumpCount];
WebRtc_UWord32 _driftBuf[kMaxDriftJumpCount];
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_

View File

@@ -0,0 +1,636 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "packet.h"
#include "session_info.h"
#include <string.h>
#include <cassert>
namespace webrtc {
VCMSessionInfo::VCMSessionInfo():
_haveFirstPacket(false),
_markerBit(false),
_sessionNACK(false),
_completeSession(false),
_frameType(kVideoFrameDelta),
_previousFrameLoss(false),
_lowSeqNum(-1),
_highSeqNum(-1),
_highestPacketIndex(0)
{
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
}
VCMSessionInfo::~VCMSessionInfo()
{
}
WebRtc_Word32 VCMSessionInfo::GetLowSeqNum() const
{
return _lowSeqNum;
}
WebRtc_Word32 VCMSessionInfo::GetHighSeqNum() const
{
return _highSeqNum;
}
void VCMSessionInfo::Reset()
{
_lowSeqNum = -1;
_highSeqNum = -1;
_markerBit = false;
_haveFirstPacket = false;
_completeSession = false;
_frameType = kVideoFrameDelta;
_previousFrameLoss = false;
_sessionNACK = false;
_highestPacketIndex = 0;
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
memset(_naluCompleteness, kNaluUnset, sizeof(_naluCompleteness));
memset(_ORwithPrevByte, 0, sizeof(_ORwithPrevByte));
}
WebRtc_UWord32 VCMSessionInfo::GetSessionLength()
{
WebRtc_UWord32 length = 0;
for (WebRtc_Word32 i=0; i<=_highestPacketIndex; ++i)
{
length += _packetSizeBytes[i];
}
return length;
}
void
VCMSessionInfo::SetStartSeqNumber(WebRtc_UWord16 seqNumber)
{
_lowSeqNum = seqNumber;
_highSeqNum = seqNumber;
}
bool
VCMSessionInfo::HaveStartSeqNumber()
{
if(_lowSeqNum == -1 || _highSeqNum == -1)
{
return false;
}
return true;
}
WebRtc_UWord32 VCMSessionInfo::InsertBuffer(WebRtc_UWord8* ptrStartOfLayer, WebRtc_Word32 packetIndex, const VCMPacket& packet)
{
WebRtc_UWord32 moveLength = 0;
WebRtc_UWord32 returnLength = 0;
int i = 0;
// need to calc offset before updating _packetSizeBytes
WebRtc_UWord32 offset = 0;
WebRtc_UWord32 packetSize = 0;
// Store this packet length. Add length since we could have data present already (e.g. multicall case).
if (packet.bits)
{
packetSize = packet.sizeBytes;
}
else
{
packetSize = packet.sizeBytes + (packet.insertStartCode?kH264StartCodeLengthBytes:0);
}
_packetSizeBytes[packetIndex] += packetSize;
// count only the one in our layer
for (i=0; i<packetIndex; ++i)
{
offset += _packetSizeBytes[i];
}
for (i=packetIndex+1; i<=_highestPacketIndex; ++i)
{
moveLength += _packetSizeBytes[i];
}
if (moveLength > 0)
{
memmove((void*)(ptrStartOfLayer + offset + packetSize), ptrStartOfLayer + offset, moveLength);
}
if (packet.bits)
{
// Add the packet without ORing end and start bytes together.
// This is done when the frame is fetched for decoding by calling
// GlueTogether().
_ORwithPrevByte[packetIndex] = true;
if (packet.dataPtr != NULL)
{
memcpy((void*)(ptrStartOfLayer + offset), packet.dataPtr, packetSize);
}
returnLength = packetSize;
}
else
{
_ORwithPrevByte[packetIndex] = false;
if (packet.dataPtr != NULL)
{
const unsigned char startCode[] = {0, 0, 0, 1};
if(packet.insertStartCode)
{
memcpy((void*)(ptrStartOfLayer + offset), startCode, kH264StartCodeLengthBytes);
}
memcpy((void*)(ptrStartOfLayer + offset
+ (packet.insertStartCode?kH264StartCodeLengthBytes:0)),
packet.dataPtr,
packet.sizeBytes);
}
returnLength = packetSize;
}
if (packet.isFirstPacket)
{
_haveFirstPacket = true;
}
if (packet.markerBit)
{
_markerBit = true;
}
// Store information about if the packet is decodable as is or not.
_naluCompleteness[packetIndex]=packet.completeNALU;
UpdateCompleteSession();
return returnLength;
}
void VCMSessionInfo::UpdateCompleteSession()
{
if (_haveFirstPacket && _markerBit)
{
// do we have all packets in this session?
bool completeSession = true;
for (int i=0; i<= _highestPacketIndex; ++i)
{
if (_naluCompleteness[i] == kNaluUnset)
{
completeSession = false;
break;
}
}
_completeSession = completeSession;
}
}
bool VCMSessionInfo::IsSessionComplete()
{
return _completeSession;
}
// Find the start and end index of packetIndex packet.
// startIndex -1 if start not found endIndex=-1 if end index not found
void VCMSessionInfo::FindNaluBorder(WebRtc_Word32 packetIndex,WebRtc_Word32& startIndex, WebRtc_Word32& endIndex)
{
if(_naluCompleteness[packetIndex]==kNaluStart ||
_naluCompleteness[packetIndex]==kNaluComplete)
{
startIndex=packetIndex;
}
else // Need to find the start
{
for(startIndex=packetIndex-1;startIndex>=0;--startIndex)
{
if( (_naluCompleteness[startIndex]==kNaluComplete && _packetSizeBytes[startIndex]>0) ||(_naluCompleteness[startIndex]==kNaluEnd && startIndex>0)) // Found previous NALU.
{
startIndex++;
break;
}
if( _naluCompleteness[startIndex]==kNaluStart) // This is where the NALU start.
{
break;
}
}
}
if(_naluCompleteness[packetIndex]==kNaluEnd ||
_naluCompleteness[packetIndex]==kNaluComplete)
{
endIndex=packetIndex;
}
else
{
// Find the next NALU
for(endIndex=packetIndex+1;endIndex<=_highestPacketIndex;++endIndex)
{
if((_naluCompleteness[endIndex]==kNaluComplete && _packetSizeBytes[endIndex]>0) || _naluCompleteness[endIndex]==kNaluStart) // Found next NALU.
{
endIndex--;
break;
}
if( _naluCompleteness[endIndex]==kNaluEnd) // This is where the NALU end.
{
break;
}
}
if(endIndex>_highestPacketIndex)
endIndex=-1;
}
}
// Deletes all packets between startIndex and endIndex
WebRtc_UWord32 VCMSessionInfo::DeletePackets(WebRtc_UWord8* ptrStartOfLayer,WebRtc_Word32 startIndex,WebRtc_Word32 endIndex)
{
//Get the number of bytes to delete.
//Clear the size of these packets.
WebRtc_UWord32 bytesToDelete=0; /// The number of bytes to delete.
for(int j=startIndex;j<=endIndex;++j)
{
bytesToDelete+=_packetSizeBytes[j];
_packetSizeBytes[j]=0;
}
if (bytesToDelete > 0)
{
// Get the offset we want to move to.
int destOffset=0;
for(int j=0;j<startIndex;j++)
{
destOffset+=_packetSizeBytes[j];
}
//Get the number of bytes to move
WebRtc_UWord32 numberOfBytesToMove=0;
for (int j=endIndex+1; j<=_highestPacketIndex; ++j)
{
numberOfBytesToMove += _packetSizeBytes[j];
}
memmove((void*)(ptrStartOfLayer + destOffset),(void*)(ptrStartOfLayer + destOffset+bytesToDelete), numberOfBytesToMove);
}
return bytesToDelete;
}
// Makes the layer decodable. Ie only contain decodable NALU
// return the number of bytes deleted from the session. -1 if an error occurs
WebRtc_UWord32 VCMSessionInfo::MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer)
{
if(_lowSeqNum<0) // No packets in this session
return 0;
WebRtc_Word32 startIndex=0;
WebRtc_Word32 endIndex=0;
int packetIndex=0;
WebRtc_UWord32 returnLength=0;
for (packetIndex=0; packetIndex<= _highestPacketIndex; ++packetIndex)
{
if (_naluCompleteness[packetIndex] == kNaluUnset) // Found a lost packet
{
FindNaluBorder(packetIndex,startIndex,endIndex);
if(startIndex==-1)
startIndex=0;
if(endIndex==-1)
endIndex=_highestPacketIndex;
returnLength+=DeletePackets(ptrStartOfLayer,packetIndex,endIndex);
packetIndex=endIndex;
}// end lost packet
}
//Make sure the first packet is decodable (Either complete nalu or start of NALU)
if(_packetSizeBytes[0]>0)
{
switch(_naluCompleteness[0])
{
case kNaluComplete: //Packet can be decoded as is.
break;
case kNaluStart: // Packet contain beginning of NALU- No need to do anything.
break;
case kNaluIncomplete: //Packet is not beginning or end of NALU
//Need to find the end of this fua NALU and delete all packets.
FindNaluBorder(0,startIndex,endIndex);
if(endIndex==-1) // No end found. Delete
{
endIndex=_highestPacketIndex;
}
returnLength+=DeletePackets(ptrStartOfLayer,0,endIndex);//Delete this NALU.
break;
case kNaluEnd: // Packet is the end of a NALU
//Need to delete this packet
returnLength+=DeletePackets(ptrStartOfLayer,0,0);//Delete this NALU.
break;
default:
assert(false);
}
}
return returnLength;
}
WebRtc_Word32 VCMSessionInfo::ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num)
{
if ((NULL == list) || (num < 1))
{
return -1;
}
if (_lowSeqNum == -1)
{
// no packets in this frame
return 0;
}
// Find end point (index of entry that equals _lowSeqNum)
int index = 0;
for (; index <num; index++)
{
if (list[index] == _lowSeqNum)
{
list[index] = -1;
break;
}
}
// Zero out between first entry and end point
int i = 0;
while ( i <= _highestPacketIndex && index < num)
{
if (_naluCompleteness[i] != kNaluUnset)
{
list[index] = -1;
}
else
{
_sessionNACK = true;
}
i++;
index++;
}
if(!_haveFirstPacket)
{
_sessionNACK = true;
}
return 0;
}
WebRtc_Word32 VCMSessionInfo::GetHighestPacketIndex()
{
return _highestPacketIndex;
}
bool VCMSessionInfo::HaveLastPacket()
{
return _markerBit;
}
void VCMSessionInfo::ForceSetHaveLastPacket()
{
_markerBit = true;
UpdateCompleteSession();
}
bool VCMSessionInfo::IsRetransmitted()
{
return _sessionNACK;
}
void VCMSessionInfo::UpdatePacketSize(WebRtc_Word32 packetIndex, WebRtc_UWord32 length)
{
// sanity
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
{
//not allowed
assert(!"SessionInfo::UpdatePacketSize Error: invalid packetIndex");
return;
}
_packetSizeBytes[packetIndex] = length;
}
void VCMSessionInfo::PrependPacketIndices(WebRtc_Word32 numberOfPacketIndices)
{
// sanity
if((numberOfPacketIndices + GetHighestPacketIndex() >= kMaxPacketsInJitterBuffer) || numberOfPacketIndices < 0)
{
//not allowed
assert(!"SessionInfo::PrependPacketIndexes Error: invalid packetIndex");
return;
}
// Works if we have new packets before packetIndex = 0
int numOfPacketsToMove = GetHighestPacketIndex()+1;
memmove(&_packetSizeBytes[numberOfPacketIndices], &_packetSizeBytes[0], (numOfPacketsToMove)*sizeof(WebRtc_UWord16));
memset(&_packetSizeBytes[0], 0, numberOfPacketIndices*sizeof(WebRtc_UWord16));
_highestPacketIndex += (WebRtc_UWord16)numberOfPacketIndices;
}
void VCMSessionInfo::ClearPacketSize(WebRtc_Word32 packetIndex)
{
// sanity
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
{
//not allowed
assert(!"SessionInfo::ClearPacketSize Error: invalid packetIndex");
return;
}
_packetSizeBytes[packetIndex] =0;
}
WebRtc_UWord32 VCMSessionInfo::GetPacketSize(WebRtc_Word32 packetIndex)
{
// sanity
if(packetIndex >= kMaxPacketsInJitterBuffer || packetIndex < 0)
{
//not allowed
assert(!"SessionInfo::GetPacketSize Error: invalid packetIndex");
return 0;
}
return _packetSizeBytes[packetIndex];
}
WebRtc_Word64
VCMSessionInfo::InsertPacket(const VCMPacket& packet, WebRtc_UWord8* ptrStartOfLayer)
{
//not allowed
assert(!packet.insertStartCode || !packet.bits);
// Check if this is first packet (only valid for some codecs)
if (packet.isFirstPacket)
{
// the first packet in the frame always signals the frametype
_frameType = packet.frameType;
}
// Check sequence number and update highest and lowest sequence numbers received.
// Move data if this seq num is lower than previously lowest.
if (packet.seqNum > _highSeqNum)
{
// This packet's seq num is higher than previously highest seq num; normal case
// if we have a wrap, only update with wrapped values
if (!(_highSeqNum < 0x00ff && packet.seqNum > 0xff00))
{
_highSeqNum = packet.seqNum;
}
} else if (_highSeqNum > 0xff00 && packet.seqNum < 0x00ff)
{
// wrap
_highSeqNum = packet.seqNum;
}
int packetIndex = packet.seqNum - (WebRtc_UWord16)_lowSeqNum;
if(_lowSeqNum < 0x00ff && packet.seqNum > 0xff00)
{
// negative wrap
packetIndex = packet.seqNum - 0x10000 - _lowSeqNum;
}
if (packetIndex < 0)
{
if (_lowSeqNum > 0xff00 && packet.seqNum < 0x00ff)
{
// we have a false detect due to the wrap
packetIndex = (0xffff - (WebRtc_UWord16)_lowSeqNum) + packet.seqNum + (WebRtc_UWord16)1;
} else
{
// This packet's seq num is lower than previously lowest seq num, but no wrap
// We need to move the data in all arrays indexed by packetIndex and insert the new
// packet's info
// How many packets should we leave room for (positions to shift)?
// Example - this seq num is 3 lower than previously lowest seq num
// Before: |--prev packet with lowest seq num--|--|...|
// After: |--new lowest seq num--|--|--|--prev packet with lowest seq num--|--|...|
WebRtc_UWord16 positionsToShift = (WebRtc_UWord16)_lowSeqNum - packet.seqNum;
WebRtc_UWord16 numOfPacketsToMove = _highestPacketIndex + 1;
// sanity, do we have room for the shift?
if ((positionsToShift + numOfPacketsToMove) > kMaxPacketsInJitterBuffer)
{
return -1;
}
// Shift _ORwithPrevByte array
memmove(&_ORwithPrevByte[positionsToShift],
&_ORwithPrevByte[0], numOfPacketsToMove*sizeof(bool));
memset(&_ORwithPrevByte[0], false, positionsToShift*sizeof(bool));
// Shift _packetSizeBytes array
memmove(&_packetSizeBytes[positionsToShift],
&_packetSizeBytes[0], numOfPacketsToMove*sizeof(WebRtc_UWord32));
memset(&_packetSizeBytes[0], 0, positionsToShift*sizeof(WebRtc_UWord32));
//Shift _naluCompleteness
memmove(&_naluCompleteness[positionsToShift],
&_naluCompleteness[0], numOfPacketsToMove*sizeof(WebRtc_UWord8));
memset(&_naluCompleteness[0], kNaluUnset, positionsToShift*sizeof(WebRtc_UWord8));
_highestPacketIndex += positionsToShift;
_lowSeqNum = packet.seqNum;
packetIndex = 0; // (seqNum - _lowSeqNum) = 0
}
} // if (_lowSeqNum > seqNum)
// sanity
if (packetIndex >= kMaxPacketsInJitterBuffer )
{
return -1;
}
if (packetIndex < 0 )
{
return -1;
}
// Check for duplicate packets
if (_packetSizeBytes[packetIndex] != 0)
{
// We have already received a packet with this sequence number, ignore it.
return -2;
}
// update highest packet index
_highestPacketIndex = packetIndex > _highestPacketIndex ? packetIndex :_highestPacketIndex;
return InsertBuffer(ptrStartOfLayer, packetIndex, packet);
}
WebRtc_UWord32 VCMSessionInfo::PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer, VideoCodecType codec)
{
WebRtc_UWord32 currentPacketOffset = 0;
WebRtc_UWord32 length = GetSessionLength();
WebRtc_UWord32 idSum = 0;
WebRtc_UWord32 realDataBytes = 0;
if (length == 0)
{
return length;
}
bool previousLost = false;
for (int i=0; i <= _highestPacketIndex; i++)
{
if (_ORwithPrevByte[i])
{
if (currentPacketOffset > 0)
{
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer + currentPacketOffset;
if (_packetSizeBytes[i-1] == 0 || previousLost)
{
// It is be better to throw away this packet if we are missing the
// previous packet.
memset(ptrFirstByte, 0, _packetSizeBytes[i]);
previousLost = true;
}
else if (_packetSizeBytes[i] > 0) // Ignore if empty packet
{
// Glue with previous byte
// Move everything from [this packet start + 1, end of buffer] one byte to the left
WebRtc_UWord8* ptrPrevByte = ptrFirstByte - 1;
*ptrPrevByte = (*ptrPrevByte) | (*ptrFirstByte);
WebRtc_UWord32 lengthToEnd = length - (currentPacketOffset + 1);
memmove((void*)ptrFirstByte, (void*)(ptrFirstByte + 1), lengthToEnd);
_packetSizeBytes[i]--;
length--;
previousLost = false;
realDataBytes += _packetSizeBytes[i];
}
}
else
{
memset(ptrStartOfLayer, 0, _packetSizeBytes[i]);
previousLost = true;
}
}
else if (_packetSizeBytes[i] == 0 && codec == kVideoCodecH263)
{
WebRtc_UWord8* ptrFirstByte = ptrStartOfLayer + currentPacketOffset;
memmove(ptrFirstByte + 10, ptrFirstByte, length - currentPacketOffset);
memset(ptrFirstByte, 0, 10);
_packetSizeBytes[i] = 10;
length += _packetSizeBytes[i];
previousLost = true;
}
else
{
realDataBytes += _packetSizeBytes[i];
previousLost = false;
}
currentPacketOffset += _packetSizeBytes[i];
}
if (realDataBytes == 0)
{
// Drop the frame since all it contains are zeros
length = 0;
memset(_packetSizeBytes, 0, sizeof(_packetSizeBytes));
}
return length;
}
}

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#include "typedefs.h"
#include "module_common_types.h"
#include "packet.h"
namespace webrtc
{
class VCMSessionInfo
{
public:
VCMSessionInfo();
virtual ~VCMSessionInfo();
VCMSessionInfo(const VCMSessionInfo& rhs);
WebRtc_Word32 ZeroOutSeqNum(WebRtc_Word32* list, WebRtc_Word32 num);
virtual void Reset();
WebRtc_Word64 InsertPacket(const VCMPacket& packet, WebRtc_UWord8* ptrStartOfLayer);
virtual bool IsSessionComplete();
WebRtc_UWord32 MakeSessionDecodable(WebRtc_UWord8* ptrStartOfLayer);
WebRtc_UWord32 GetSessionLength();
bool HaveLastPacket();
void ForceSetHaveLastPacket();
bool IsRetransmitted();
webrtc::FrameType FrameType() const { return _frameType; }
virtual WebRtc_Word32 GetHighestPacketIndex();
virtual WebRtc_UWord32 GetPacketSize(WebRtc_Word32 packetIndex);
virtual void ClearPacketSize(WebRtc_Word32 packetIndex);
virtual void UpdatePacketSize(WebRtc_Word32 packetIndex, WebRtc_UWord32 length);
virtual void PrependPacketIndices(WebRtc_Word32 numberOfPacketIndexes);
void SetStartSeqNumber(WebRtc_UWord16 seqNumber);
bool HaveStartSeqNumber();
WebRtc_Word32 GetLowSeqNum() const;
WebRtc_Word32 GetHighSeqNum() const;
WebRtc_UWord32 PrepareForDecode(WebRtc_UWord8* ptrStartOfLayer, VideoCodecType codec);
void SetPreviousFrameLoss() { _previousFrameLoss = true; }
bool PreviousFrameLoss() const { return _previousFrameLoss; }
protected:
WebRtc_UWord32 InsertBuffer(WebRtc_UWord8* ptrStartOfLayer,
WebRtc_Word32 packetIndex,
const VCMPacket& packet);
void FindNaluBorder(WebRtc_Word32 packetIndex,
WebRtc_Word32& startIndex,
WebRtc_Word32& endIndex);
WebRtc_UWord32 DeletePackets(WebRtc_UWord8* ptrStartOfLayer,
WebRtc_Word32 startIndex,
WebRtc_Word32 endIndex);
void UpdateCompleteSession();
bool _haveFirstPacket; // If we have inserted the first packet into this frame
bool _markerBit; // If we have inserted a packet with markerbit into this frame
bool _sessionNACK; // If this session has been NACKed by JB
bool _completeSession;
webrtc::FrameType _frameType;
bool _previousFrameLoss;
WebRtc_Word32 _lowSeqNum; // Lowest packet sequence number in a session
WebRtc_Word32 _highSeqNum; // Highest packet sequence number in a session
// Highest packet index in this frame
WebRtc_UWord16 _highestPacketIndex;
// Length of packet (used for reordering)
WebRtc_UWord32 _packetSizeBytes[kMaxPacketsInJitterBuffer];
// Completness of packets. Used for deciding if the frame is decodable.
WebRtc_UWord8 _naluCompleteness[kMaxPacketsInJitterBuffer];
bool _ORwithPrevByte[kMaxPacketsInJitterBuffer];
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_
#define WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_
#include "tick_util.h"
#include <assert.h>
namespace webrtc
{
//#define TICK_TIME_DEBUG
class VCMTickTime : public TickTime
{
#ifdef TICK_TIME_DEBUG
public:
/*
* Get current time
*/
static TickTime Now() { assert(false); };
/*
* Get time in milli seconds
*/
static WebRtc_Word64 MillisecondTimestamp() { return _timeNowDebug; };
/*
* Get time in micro seconds
*/
static WebRtc_Word64 MicrosecondTimestamp() { return _timeNowDebug * 1000LL; };
static void IncrementDebugClock() { _timeNowDebug++; };
private:
static WebRtc_Word64 _timeNowDebug;
#else
public:
static void IncrementDebugClock() { assert(false); };
#endif
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_TICK_TIME_H_

View File

@@ -0,0 +1,259 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "internal_defines.h"
#include "timestamp_extrapolator.h"
#include "tick_time.h"
#include "trace.h"
namespace webrtc {
VCMTimestampExtrapolator::VCMTimestampExtrapolator(WebRtc_Word32 vcmId, WebRtc_Word32 id)
:
_rwLock(*RWLockWrapper::CreateRWLock()),
_vcmId(vcmId),
_id(id),
_startMs(0),
_firstTimestamp(0),
_wrapArounds(0),
_prevTs90khz(0),
_lambda(1),
_firstAfterReset(true),
_packetCount(0),
_startUpFilterDelayInPackets(2),
_detectorAccumulatorPos(0),
_detectorAccumulatorNeg(0),
_alarmThreshold(60e3),
_accDrift(6600), // in timestamp ticks, i.e. 15 ms
_accMaxError(7000),
_P11(1e10)
{
Reset(VCMTickTime::MillisecondTimestamp());
}
VCMTimestampExtrapolator::~VCMTimestampExtrapolator()
{
delete &_rwLock;
}
void
VCMTimestampExtrapolator::Reset(const WebRtc_Word64 nowMs /* = -1 */)
{
WriteLockScoped wl(_rwLock);
if (nowMs > -1)
{
_startMs = nowMs;
}
else
{
_startMs = VCMTickTime::MillisecondTimestamp();
}
_prevMs = _startMs;
_firstTimestamp = 0;
_w[0] = 90.0;
_w[1] = 0;
_P[0][0] = 1;
_P[1][1] = _P11;
_P[0][1] = _P[1][0] = 0;
_firstAfterReset = true;
_prevTs90khz = 0;
_wrapArounds = 0;
_packetCount = 0;
_detectorAccumulatorPos = 0;
_detectorAccumulatorNeg = 0;
}
void
VCMTimestampExtrapolator::Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace)
{
_rwLock.AcquireLockExclusive();
if (tMs - _prevMs > 10e3)
{
// Ten seconds without a complete frame.
// Reset the extrapolator
_rwLock.ReleaseLockExclusive();
Reset();
_rwLock.AcquireLockExclusive();
}
else
{
_prevMs = tMs;
}
// Remove offset to prevent badly scaled matrices
tMs -= _startMs;
WebRtc_Word32 prevWrapArounds = _wrapArounds;
CheckForWrapArounds(ts90khz);
WebRtc_Word32 wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
if (wrapAroundsSincePrev == 0 && ts90khz < _prevTs90khz)
{
_rwLock.ReleaseLockExclusive();
return;
}
if (_firstAfterReset)
{
// Make an initial guess of the offset,
// should be almost correct since tMs - _startMs
// should about zero at this time.
_w[1] = -_w[0] * tMs;
_firstTimestamp = ts90khz;
_firstAfterReset = false;
}
// Compensate for wraparounds by changing the line offset
_w[1] = _w[1] - wrapAroundsSincePrev * ((static_cast<WebRtc_Word64>(1)<<32) - 1);
double residual = (static_cast<double>(ts90khz) - _firstTimestamp) - static_cast<double>(tMs) * _w[0] - _w[1];
if (DelayChangeDetection(residual, trace) &&
_packetCount >= _startUpFilterDelayInPackets)
{
// A sudden change of average network delay has been detected.
// Force the filter to adjust its offset parameter by changing
// the offset uncertainty. Don't do this during startup.
_P[1][1] = _P11;
}
//T = [t(k) 1]';
//that = T'*w;
//K = P*T/(lambda + T'*P*T);
double K[2];
K[0] = _P[0][0] * tMs + _P[0][1];
K[1] = _P[1][0] * tMs + _P[1][1];
double TPT = _lambda + tMs * K[0] + K[1];
K[0] /= TPT;
K[1] /= TPT;
//w = w + K*(ts(k) - that);
_w[0] = _w[0] + K[0] * residual;
_w[1] = _w[1] + K[1] * residual;
//P = 1/lambda*(P - K*T'*P);
double p00 = 1 / _lambda * (_P[0][0] - (K[0] * tMs * _P[0][0] + K[0] * _P[1][0]));
double p01 = 1 / _lambda * (_P[0][1] - (K[0] * tMs * _P[0][1] + K[0] * _P[1][1]));
_P[1][0] = 1 / _lambda * (_P[1][0] - (K[1] * tMs * _P[0][0] + K[1] * _P[1][0]));
_P[1][1] = 1 / _lambda * (_P[1][1] - (K[1] * tMs * _P[0][1] + K[1] * _P[1][1]));
_P[0][0] = p00;
_P[0][1] = p01;
if (_packetCount < _startUpFilterDelayInPackets)
{
_packetCount++;
}
if (trace)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "w[0]=%f w[1]=%f ts=%u tMs=%u", _w[0], _w[1], ts90khz, tMs);
}
_rwLock.ReleaseLockExclusive();
}
WebRtc_UWord32
VCMTimestampExtrapolator::ExtrapolateTimestamp(WebRtc_Word64 tMs) const
{
ReadLockScoped rl(_rwLock);
WebRtc_UWord32 timestamp = 0;
if (_packetCount == 0)
{
timestamp = 0;
}
else if (_packetCount < _startUpFilterDelayInPackets)
{
timestamp = static_cast<WebRtc_UWord32>(90.0 * (tMs - _prevMs) + _prevTs90khz + 0.5);
}
else
{
timestamp = static_cast<WebRtc_UWord32>(_w[0] * (tMs - _startMs) + _w[1] + _firstTimestamp + 0.5);
}
return timestamp;
}
WebRtc_Word64
VCMTimestampExtrapolator::ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const
{
ReadLockScoped rl(_rwLock);
WebRtc_Word64 localTimeMs = 0;
if (_packetCount == 0)
{
localTimeMs = -1;
}
else if (_packetCount < _startUpFilterDelayInPackets)
{
localTimeMs = _prevMs + static_cast<WebRtc_Word64>(static_cast<double>(timestamp90khz - _prevTs90khz) / 90.0 + 0.5);
}
else
{
if (_w[0] < 1e-3)
{
localTimeMs = _startMs;
}
else
{
double timestampDiff = static_cast<double>(timestamp90khz) - static_cast<double>(_firstTimestamp);
localTimeMs = static_cast<WebRtc_Word64>(static_cast<double>(_startMs) + (timestampDiff - _w[1]) / _w[0] + 0.5);
}
}
return localTimeMs;
}
// Investigates if the timestamp clock has overflowed since the last timestamp and
// keeps track of the number of wrap arounds since reset.
void
VCMTimestampExtrapolator::CheckForWrapArounds(WebRtc_UWord32 ts90khz)
{
if (_prevTs90khz == 0)
{
_prevTs90khz = ts90khz;
return;
}
if (ts90khz < _prevTs90khz)
{
// This difference will probably be less than -2^31 if we have had a wrap around
// (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is casted to a Word32,
// it should be positive.
if (static_cast<WebRtc_Word32>(ts90khz - _prevTs90khz) > 0)
{
// Forward wrap around
_wrapArounds++;
}
}
// This difference will probably be less than -2^31 if we have had a backward wrap around.
// Since it is casted to a Word32, it should be positive.
else if (static_cast<WebRtc_Word32>(_prevTs90khz - ts90khz) > 0)
{
// Backward wrap around
_wrapArounds--;
}
_prevTs90khz = ts90khz;
}
bool
VCMTimestampExtrapolator::DelayChangeDetection(double error, bool trace)
{
// CUSUM detection of sudden delay changes
error = (error > 0) ? VCM_MIN(error, _accMaxError) : VCM_MAX(error, -_accMaxError);
_detectorAccumulatorPos = VCM_MAX(_detectorAccumulatorPos + error - _accDrift, (double)0);
_detectorAccumulatorNeg = VCM_MIN(_detectorAccumulatorNeg + error + _accDrift, (double)0);
if (_detectorAccumulatorPos > _alarmThreshold || _detectorAccumulatorNeg < -_alarmThreshold)
{
// Alarm
if (trace)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=1", _detectorAccumulatorPos, _detectorAccumulatorNeg);
}
_detectorAccumulatorPos = _detectorAccumulatorNeg = 0;
return true;
}
if (trace)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id), "g1=%f g2=%f alarm=0", _detectorAccumulatorPos, _detectorAccumulatorNeg);
}
return false;
}
}

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_
#include "typedefs.h"
#include "rw_lock_wrapper.h"
namespace webrtc
{
class VCMTimestampExtrapolator
{
public:
VCMTimestampExtrapolator(WebRtc_Word32 vcmId = 0, WebRtc_Word32 receiverId = 0);
~VCMTimestampExtrapolator();
void Update(WebRtc_Word64 tMs, WebRtc_UWord32 ts90khz, bool trace = true);
WebRtc_UWord32 ExtrapolateTimestamp(WebRtc_Word64 tMs) const;
WebRtc_Word64 ExtrapolateLocalTime(WebRtc_UWord32 timestamp90khz) const;
void Reset(WebRtc_Word64 nowMs = -1);
private:
void CheckForWrapArounds(WebRtc_UWord32 ts90khz);
bool DelayChangeDetection(double error, bool trace = true);
RWLockWrapper& _rwLock;
WebRtc_Word32 _vcmId;
WebRtc_Word32 _id;
bool _trace;
double _w[2];
double _P[2][2];
WebRtc_Word64 _startMs;
WebRtc_Word64 _prevMs;
WebRtc_UWord32 _firstTimestamp;
WebRtc_Word32 _wrapArounds;
WebRtc_UWord32 _prevTs90khz;
const double _lambda;
bool _firstAfterReset;
WebRtc_UWord32 _packetCount;
const WebRtc_UWord32 _startUpFilterDelayInPackets;
double _detectorAccumulatorPos;
double _detectorAccumulatorNeg;
const double _alarmThreshold;
const double _accDrift;
const double _accMaxError;
const double _P11;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_EXTRAPOLATOR_H_

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "timestamp_map.h"
#include <stdlib.h>
#include <assert.h>
namespace webrtc {
// Constructor. Optional parameter specifies maximum number of
// coexisting timers.
VCMTimestampMap::VCMTimestampMap(WebRtc_Word32 length):
_nextAddIx(0),
_nextPopIx(0)
{
if (length <= 0)
{
// default
length = 10;
}
_map = new VCMTimestampDataTuple[length];
_length = length;
}
// Destructor.
VCMTimestampMap::~VCMTimestampMap()
{
delete [] _map;
}
// Empty the list of timers.
void
VCMTimestampMap::Reset()
{
_nextAddIx = 0;
_nextPopIx = 0;
}
WebRtc_Word32
VCMTimestampMap::Add(WebRtc_UWord32 timestamp, void* data)
{
_map[_nextAddIx].timestamp = timestamp;
_map[_nextAddIx].data = data;
_nextAddIx = (_nextAddIx + 1) % _length;
if (_nextAddIx == _nextPopIx)
{
// Circular list full; forget oldest entry
_nextPopIx = (_nextPopIx + 1) % _length;
return -1;
}
return 0;
}
void*
VCMTimestampMap::Pop(WebRtc_UWord32 timestamp)
{
while (!IsEmpty())
{
if (_map[_nextPopIx].timestamp == timestamp)
{
// found start time for this timestamp
void* data = _map[_nextPopIx].data;
_map[_nextPopIx].data = NULL;
_nextPopIx = (_nextPopIx + 1) % _length;
return data;
}
else if (_map[_nextPopIx].timestamp > timestamp)
{
// the timestamp we are looking for is not in the list
assert(_nextPopIx < _length && _nextPopIx >= 0);
return NULL;
}
// not in this position, check next (and forget this position)
_nextPopIx = (_nextPopIx + 1) % _length;
}
// could not find matching timestamp in list
assert(_nextPopIx < _length && _nextPopIx >= 0);
return NULL;
}
// Check if no timers are currently running
bool
VCMTimestampMap::IsEmpty() const
{
return (_nextAddIx == _nextPopIx);
}
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
#include "typedefs.h"
namespace webrtc
{
struct VCMTimestampDataTuple
{
WebRtc_UWord32 timestamp;
void* data;
};
class VCMTimestampMap
{
public:
// Constructor. Optional parameter specifies maximum number of
// timestamps in map.
VCMTimestampMap(const WebRtc_Word32 length = 10);
// Destructor.
~VCMTimestampMap();
// Empty the map
void Reset();
WebRtc_Word32 Add(WebRtc_UWord32 timestamp, void* data);
void* Pop(WebRtc_UWord32 timestamp);
private:
bool IsEmpty() const;
VCMTimestampDataTuple* _map;
WebRtc_Word32 _nextAddIx;
WebRtc_Word32 _nextPopIx;
WebRtc_Word32 _length;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_

View File

@@ -0,0 +1,333 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "trace.h"
#include "internal_defines.h"
#include "jitter_buffer_common.h"
#include "timing.h"
#include "timestamp_extrapolator.h"
namespace webrtc {
VCMTiming::VCMTiming(WebRtc_Word32 vcmId, WebRtc_Word32 timingId, VCMTiming* masterTiming)
:
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_vcmId(vcmId),
_timingId(timingId),
_master(false),
_tsExtrapolator(),
_codecTimer(),
_renderDelayMs(kDefaultRenderDelayMs),
_minTotalDelayMs(0),
_requiredDelayMs(0),
_currentDelayMs(0),
_prevFrameTimestamp(0)
{
if (masterTiming == NULL)
{
_master = true;
_tsExtrapolator = new VCMTimestampExtrapolator(vcmId, timingId);
}
else
{
_tsExtrapolator = masterTiming->_tsExtrapolator;
}
}
VCMTiming::~VCMTiming()
{
if (_master)
{
delete _tsExtrapolator;
}
delete &_critSect;
}
void
VCMTiming::Reset(WebRtc_Word64 nowMs /* = -1 */)
{
CriticalSectionScoped cs(_critSect);
if (nowMs > -1)
{
_tsExtrapolator->Reset(nowMs);
}
else
{
_tsExtrapolator->Reset();
}
_codecTimer.Reset();
_renderDelayMs = kDefaultRenderDelayMs;
_minTotalDelayMs = 0;
_requiredDelayMs = 0;
_currentDelayMs = 0;
_prevFrameTimestamp = 0;
}
void VCMTiming::ResetDecodeTime()
{
_codecTimer.Reset();
}
void
VCMTiming::SetRenderDelay(WebRtc_UWord32 renderDelayMs)
{
CriticalSectionScoped cs(_critSect);
_renderDelayMs = renderDelayMs;
}
void
VCMTiming::SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs)
{
CriticalSectionScoped cs(_critSect);
_minTotalDelayMs = minTotalDelayMs;
}
void
VCMTiming::SetRequiredDelay(WebRtc_UWord32 requiredDelayMs)
{
CriticalSectionScoped cs(_critSect);
if (requiredDelayMs != _requiredDelayMs)
{
if (_master)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Desired jitter buffer level: %u ms", requiredDelayMs);
}
_requiredDelayMs = requiredDelayMs;
}
}
void VCMTiming::UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp)
{
CriticalSectionScoped cs(_critSect);
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
// Make sure we try to sync with audio
if (targetDelayMs < _minTotalDelayMs)
{
targetDelayMs = _minTotalDelayMs;
}
if (_currentDelayMs == 0)
{
// Not initialized, set current delay to target.
_currentDelayMs = targetDelayMs;
}
else if (targetDelayMs != _currentDelayMs)
{
WebRtc_Word64 delayDiffMs = static_cast<WebRtc_Word64>(targetDelayMs) -
_currentDelayMs;
// Never change the delay with more than 100 ms every second. If we're changing the
// delay in too large steps we will get noticable freezes. By limiting the change we
// can increase the delay in smaller steps, which will be experienced as the video is
// played in slow motion. When lowering the delay the video will be played at a faster
// pace.
WebRtc_Word64 maxChangeMs = 0;
if (frameTimestamp < 0x0000ffff && _prevFrameTimestamp > 0xffff0000)
{
// wrap
maxChangeMs = kDelayMaxChangeMsPerS * (frameTimestamp +
(static_cast<WebRtc_Word64>(1)<<32) - _prevFrameTimestamp) / 90000;
}
else
{
maxChangeMs = kDelayMaxChangeMsPerS *
(frameTimestamp - _prevFrameTimestamp) / 90000;
}
if (maxChangeMs <= 0)
{
// Any changes less than 1 ms are truncated and
// will be postponed. Negative change will be due
// to reordering and should be ignored.
return;
}
else if (delayDiffMs < -maxChangeMs)
{
delayDiffMs = -maxChangeMs;
}
else if (delayDiffMs > maxChangeMs)
{
delayDiffMs = maxChangeMs;
}
_currentDelayMs = _currentDelayMs + static_cast<WebRtc_Word32>(delayDiffMs);
}
_prevFrameTimestamp = frameTimestamp;
}
void VCMTiming::UpdateCurrentDelay(WebRtc_Word64 renderTimeMs,
WebRtc_Word64 actualDecodeTimeMs)
{
CriticalSectionScoped cs(_critSect);
WebRtc_UWord32 targetDelayMs = TargetDelayInternal();
// Make sure we try to sync with audio
if (targetDelayMs < _minTotalDelayMs)
{
targetDelayMs = _minTotalDelayMs;
}
WebRtc_Word64 delayedMs = actualDecodeTimeMs -
(renderTimeMs - MaxDecodeTimeMs() - _renderDelayMs);
if (delayedMs < 0)
{
return;
}
else if (_currentDelayMs + delayedMs <= targetDelayMs)
{
_currentDelayMs += static_cast<WebRtc_UWord32>(delayedMs);
}
else
{
_currentDelayMs = targetDelayMs;
}
}
WebRtc_Word32
VCMTiming::StopDecodeTimer(WebRtc_UWord32 timeStamp,
WebRtc_Word64 startTimeMs,
WebRtc_Word64 nowMs)
{
CriticalSectionScoped cs(_critSect);
const WebRtc_Word32 maxDecTime = MaxDecodeTimeMs();
WebRtc_Word32 timeDiffMs = _codecTimer.StopTimer(startTimeMs, nowMs);
if (timeDiffMs < 0)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Codec timer error: %d", timeDiffMs);
return timeDiffMs;
}
if (_master)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Frame decoded: timeStamp=%u decTime=%d maxDecTime=%u, at %u",
timeStamp, timeDiffMs, maxDecTime, MaskWord64ToUWord32(nowMs));
}
return 0;
}
void
VCMTiming::IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 nowMs)
{
CriticalSectionScoped cs(_critSect);
_tsExtrapolator->Update(nowMs, timeStamp, _master);
}
WebRtc_Word64
VCMTiming::RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
{
CriticalSectionScoped cs(_critSect);
const WebRtc_Word64 renderTimeMs = RenderTimeMsInternal(frameTimestamp, nowMs);
if (renderTimeMs < 0)
{
return renderTimeMs;
}
if (_master)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Render frame %u at %u. Render delay %u, required delay %u,"
" max decode time %u, min total delay %u",
frameTimestamp, MaskWord64ToUWord32(renderTimeMs), _renderDelayMs,
_requiredDelayMs, MaxDecodeTimeMs(),_minTotalDelayMs);
}
return renderTimeMs;
}
WebRtc_Word64
VCMTiming::RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const
{
WebRtc_Word64 estimatedCompleteTimeMs =
_tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
if (estimatedCompleteTimeMs - nowMs > kMaxVideoDelayMs)
{
if (_master)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Timestamp arrived 2 seconds early, reset statistics",
frameTimestamp, estimatedCompleteTimeMs);
}
return -1;
}
if (_master)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"ExtrapolateLocalTime(%u)=%u ms",
frameTimestamp, MaskWord64ToUWord32(estimatedCompleteTimeMs));
}
if (estimatedCompleteTimeMs == -1)
{
estimatedCompleteTimeMs = nowMs;
}
return estimatedCompleteTimeMs + _currentDelayMs;
}
// Must be called from inside a critical section
WebRtc_Word32
VCMTiming::MaxDecodeTimeMs(FrameType frameType /*= kVideoFrameDelta*/) const
{
const WebRtc_Word32 decodeTimeMs = _codecTimer.RequiredDecodeTimeMs(frameType);
if (decodeTimeMs < 0)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
"Negative maximum decode time: %d", decodeTimeMs);
return -1;
}
return decodeTimeMs;
}
WebRtc_UWord32
VCMTiming::MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const
{
CriticalSectionScoped cs(_critSect);
const WebRtc_Word64 maxWaitTimeMs = renderTimeMs - nowMs -
MaxDecodeTimeMs() - _renderDelayMs;
if (maxWaitTimeMs < 0)
{
return 0;
}
return static_cast<WebRtc_UWord32>(maxWaitTimeMs);
}
bool
VCMTiming::EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const
{
CriticalSectionScoped cs(_critSect);
WebRtc_Word32 maxDecodeTimeMs = MaxDecodeTimeMs();
if (maxDecodeTimeMs < 0)
{
// Haven't decoded any frames yet, try decoding one to get an estimate
// of the decode time.
return true;
}
else if (maxDecodeTimeMs == 0)
{
// Decode time is less than 1, set to 1 for now since
// we don't have any better precision. Count ticks later?
maxDecodeTimeMs = 1;
}
return static_cast<WebRtc_Word32>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
}
WebRtc_UWord32
VCMTiming::TargetVideoDelay() const
{
CriticalSectionScoped cs(_critSect);
return TargetDelayInternal();
}
WebRtc_UWord32
VCMTiming::TargetDelayInternal() const
{
return _requiredDelayMs + MaxDecodeTimeMs() + _renderDelayMs;
}
}

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
#define WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
#include "typedefs.h"
#include "critical_section_wrapper.h"
#include "codec_timer.h"
namespace webrtc
{
class VCMTimestampExtrapolator;
class VCMTiming
{
public:
// The primary timing component should be passed
// if this is the dual timing component.
VCMTiming(WebRtc_Word32 vcmId = 0,
WebRtc_Word32 timingId = 0,
VCMTiming* masterTiming = NULL);
~VCMTiming();
// Resets the timing to the initial state.
void Reset(WebRtc_Word64 nowMs = -1);
void ResetDecodeTime();
// The amount of time needed to render an image. Defaults to 10 ms.
void SetRenderDelay(WebRtc_UWord32 renderDelayMs);
// The minimum time the video must be delayed on the receiver to
// get the desired jitter buffer level.
void SetRequiredDelay(WebRtc_UWord32 requiredDelayMs);
// Minimum total delay required to sync video with audio.
void SetMinimumTotalDelay(WebRtc_UWord32 minTotalDelayMs);
// Increases or decreases the current delay to get closer to the target delay.
// Calculates how long it has been since the previous call to this function,
// and increases/decreases the delay in proportion to the time difference.
void UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp);
// Increases or decreases the current delay to get closer to the target delay.
// Given the actual decode time in ms and the render time in ms for a frame, this
// function calculates how late the frame is and increases the delay accordingly.
void UpdateCurrentDelay(WebRtc_Word64 renderTimeMs, WebRtc_Word64 actualDecodeTimeMs);
// Stops the decoder timer, should be called when the decoder returns a frame
// or when the decoded frame callback is called.
WebRtc_Word32 StopDecodeTimer(WebRtc_UWord32 timeStamp,
WebRtc_Word64 startTimeMs,
WebRtc_Word64 nowMs);
// Used to report that a frame is passed to decoding. Updates the timestamp filter
// which is used to map between timestamps and receiver system time.
void IncomingTimestamp(WebRtc_UWord32 timeStamp, WebRtc_Word64 lastPacketTimeMs);
// Returns the receiver system time when the frame with timestamp frameTimestamp
// should be rendered, assuming that the system time currently is nowMs.
WebRtc_Word64 RenderTimeMs(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 nowMs) const;
// Returns the maximum time in ms that we can wait for a frame to become complete
// before we must pass it to the decoder.
WebRtc_UWord32 MaxWaitingTime(WebRtc_Word64 renderTimeMs, WebRtc_Word64 nowMs) const;
// Returns the current target delay which is required delay + decode time + render
// delay.
WebRtc_UWord32 TargetVideoDelay() const;
// Calculates whether or not there is enough time to decode a frame given a
// certain amount of processing time.
bool EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const;
enum { kDefaultRenderDelayMs = 10 };
enum { kDelayMaxChangeMsPerS = 100 };
protected:
WebRtc_Word32 MaxDecodeTimeMs(FrameType frameType = kVideoFrameDelta) const;
WebRtc_Word64 RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp,
WebRtc_Word64 nowMs) const;
WebRtc_UWord32 TargetDelayInternal() const;
private:
CriticalSectionWrapper& _critSect;
WebRtc_Word32 _vcmId;
WebRtc_Word32 _timingId;
bool _master;
VCMTimestampExtrapolator* _tsExtrapolator;
VCMCodecTimer _codecTimer;
WebRtc_UWord32 _renderDelayMs;
WebRtc_UWord32 _minTotalDelayMs;
WebRtc_UWord32 _requiredDelayMs;
WebRtc_UWord32 _currentDelayMs;
WebRtc_UWord32 _prevFrameTimestamp;
WebRtc_Word64 _startStoragePlaybackMs;
WebRtc_Word64 _firstStoredRenderTimeMs;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_TIMING_H_

View File

@@ -0,0 +1,103 @@
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'webrtc_video_coding',
'type': '<(library)',
'dependencies': [
'../../codecs/i420/main/source/i420.gyp:webrtc_i420',
'../../codecs/vp8/main/source/vp8.gyp:webrtc_vp8',
'../../../../common_video/vplib/main/source/vplib.gyp:webrtc_vplib',
'../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
'include_dirs': [
'../interface',
'../../../interface',
'../../codecs/interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
'../../codecs/interface',
],
},
'sources': [
# interfaces
'../interface/video_coding.h',
'../interface/video_coding_defines.h',
# headers
'codec_database.h',
'codec_timer.h',
'content_metrics_processing.h',
'encoded_frame.h',
'er_tables_xor.h',
'event.h',
'exp_filter.h',
'fec_tables_xor.h',
'frame_buffer.h',
'frame_dropper.h',
'frame_list.h',
'generic_decoder.h',
'generic_encoder.h',
'inter_frame_delay.h',
'internal_defines.h',
'jitter_buffer_common.h',
'jitter_buffer.h',
'jitter_estimator.h',
'media_opt_util.h',
'media_optimization.h',
'nack_fec_tables.h',
'packet.h',
'qm_select_data.h',
'qm_select.h',
'receiver.h',
'rtt_filter.h',
'session_info.h',
'tick_time.h',
'timestamp_extrapolator.h',
'timestamp_map.h',
'timing.h',
'video_coding_impl.h',
# sources
'codec_database.cc',
'codec_timer.cc',
'content_metrics_processing.cc',
'encoded_frame.cc',
'exp_filter.cc',
'frame_buffer.cc',
'frame_dropper.cc',
'frame_list.cc',
'generic_decoder.cc',
'generic_encoder.cc',
'inter_frame_delay.cc',
'jitter_buffer.cc',
'jitter_estimator.cc',
'media_opt_util.cc',
'media_optimization.cc',
'packet.cc',
'qm_select.cc',
'receiver.cc',
'rtt_filter.cc',
'session_info.cc',
'timestamp_extrapolator.cc',
'timestamp_map.cc',
'timing.cc',
'video_coding_impl.cc',
], # source
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,275 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
#include "video_coding.h"
#include "critical_section_wrapper.h"
#include "frame_buffer.h"
#include "receiver.h"
#include "timing.h"
#include "jitter_buffer.h"
#include "codec_database.h"
#include "generic_decoder.h"
#include "generic_encoder.h"
#include "media_optimization.h"
#include <stdio.h>
namespace webrtc
{
class VCMProcessTimer
{
public:
VCMProcessTimer(WebRtc_UWord32 periodMs) :
_periodMs(periodMs), _latestMs(VCMTickTime::MillisecondTimestamp()) {}
WebRtc_UWord32 Period() const;
WebRtc_UWord32 TimeUntilProcess() const;
void Processed();
private:
WebRtc_UWord32 _periodMs;
WebRtc_Word64 _latestMs;
};
enum VCMKeyRequestMode
{
kKeyOnError, // Normal mode, request key frames on decoder error
kKeyOnKeyLoss, // Request key frames on decoder error and on packet loss in key frames
kKeyOnLoss, // Request key frames on decoder error and on packet loss in any frame
};
class VideoCodingModuleImpl : public VideoCodingModule
{
public:
VideoCodingModuleImpl(const WebRtc_Word32 id);
virtual ~VideoCodingModuleImpl();
// Returns version of the module and its components
WebRtc_Word32 Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const;
WebRtc_Word32 Id() const;
// Change the unique identifier of this object
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
// Returns the number of milliseconds until the module want a worker thread to call Process
virtual WebRtc_Word32 TimeUntilNextProcess();
virtual WebRtc_Word32 Process();
/*
* Sender
*/
// Initialize send codec
virtual WebRtc_Word32 InitializeSender();
// Makes sure the encoder is in its initial state.
virtual WebRtc_Word32 ResetEncoder();
// Register the send codec to be used.
virtual WebRtc_Word32 RegisterSendCodec(const VideoCodec* sendCodec,
WebRtc_UWord32 numberOfCores,
WebRtc_UWord32 maxPayloadSize);
// Get current send codec
virtual WebRtc_Word32 SendCodec(VideoCodec* currentSendCodec) const;
// Get current send codec type
virtual VideoCodecType SendCodec() const;
// Register an external encoder object.
virtual WebRtc_Word32 RegisterExternalEncoder(VideoEncoder* externalEncoder,
WebRtc_UWord8 payloadType,
bool internalSource = false);
// Get codec config parameters
virtual WebRtc_Word32 CodecConfigParameters(WebRtc_UWord8* buffer, WebRtc_Word32 size);
// Get encode bitrate
virtual WebRtc_UWord32 Bitrate() const;
// Get encode frame rate
virtual WebRtc_UWord32 FrameRate() const;
// Set channel parameters
virtual WebRtc_Word32 SetChannelParameters(WebRtc_UWord32 availableBandWidth,
WebRtc_UWord8 lossRate,
WebRtc_UWord32 RTT);
// Set recieve channel parameters
virtual WebRtc_Word32 SetReceiveChannelParameters(WebRtc_UWord32 RTT);
// Register a transport callback which will be called to deliver the encoded buffers
virtual WebRtc_Word32 RegisterTransportCallback(VCMPacketizationCallback* transport);
// Register a send statistics callback which will be called to deliver information
// about the video stream produced by the encoder,
// for instance the average frame rate and bit rate.
virtual WebRtc_Word32 RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
// Register a video quality settings callback which will be called when
// frame rate/dimensions need to be updated for video quality optimization
virtual WebRtc_Word32 RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings);
// Register a video protection callback which will be called to deliver
// the requested FEC rate and NACK status (on/off).
virtual WebRtc_Word32 RegisterProtectionCallback(VCMProtectionCallback* protection);
// Enable or disable a video protection method.
virtual WebRtc_Word32 SetVideoProtection(VCMVideoProtection videoProtection, bool enable);
// Add one raw video frame to the encoder, blocking.
virtual WebRtc_Word32 AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* _contentMetrics = NULL,
const void* codecSpecificInfo = NULL);
// Next frame encoded should be of the type frameType.
virtual WebRtc_Word32 FrameTypeRequest(FrameType frameType);
//Enable frame dropper
virtual WebRtc_Word32 EnableFrameDropper(bool enable);
// Sent frame counters
virtual WebRtc_Word32 SentFrameCount(VCMFrameCount& frameCount) const;
/*
* Receiver
*/
// Initialize receiver, resets codec database etc
virtual WebRtc_Word32 InitializeReceiver();
// Register possible reveive codecs, can be called multiple times
virtual WebRtc_Word32 RegisterReceiveCodec(const VideoCodec* receiveCodec,
WebRtc_Word32 numberOfCores,
bool requireKeyFrame = false);
// Register an externally defined decoder/render object.
// Can be a decoder only or a decoder coupled with a renderer.
virtual WebRtc_Word32 RegisterExternalDecoder(VideoDecoder* externalDecoder,
WebRtc_UWord8 payloadType,
bool internalRenderTiming);
// Register a receive callback. Will be called whenever there are a new frame ready
// for rendering.
virtual WebRtc_Word32 RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
// Register a receive statistics callback which will be called to deliver information
// about the video stream received by the receiving side of the VCM, for instance
// the average frame rate and bit rate.
virtual WebRtc_Word32 RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats);
// Register a frame type request callback.
virtual WebRtc_Word32 RegisterFrameTypeCallback(VCMFrameTypeCallback* frameTypeCallback);
// Register a frame storage callback.
virtual WebRtc_Word32 RegisterFrameStorageCallback(
VCMFrameStorageCallback* frameStorageCallback);
// Nack callback
virtual WebRtc_Word32 RegisterPacketRequestCallback(VCMPacketRequestCallback* callback);
// Decode next frame, blocks for a maximum of maxWaitTimeMs milliseconds.
// Should be called as often as possible to get the most out of the decoder.
virtual WebRtc_Word32 Decode(WebRtc_UWord16 maxWaitTimeMs = 200);
// Decode next dual frame, blocks for a maximum of maxWaitTimeMs milliseconds.
virtual WebRtc_Word32 DecodeDualFrame(WebRtc_UWord16 maxWaitTimeMs = 200);
// Reset the decoder state
virtual WebRtc_Word32 ResetDecoder();
// Get current received codec
virtual WebRtc_Word32 ReceiveCodec(VideoCodec* currentReceiveCodec) const;
// Get current received codec type
virtual VideoCodecType ReceiveCodec() const;
// Incoming packet from network parsed and ready for decode, non blocking.
virtual WebRtc_Word32 IncomingPacket(const WebRtc_UWord8* incomingPayload,
WebRtc_UWord32 payloadLength,
const WebRtcRTPHeader& rtpInfo);
// A part of an encoded frame to be decoded.
// Used in conjunction with VCMFrameStorageCallback.
virtual WebRtc_Word32 DecodeFromStorage(const EncodedVideoData& frameFromStorage);
// Set codec config parameters
virtual WebRtc_Word32 SetCodecConfigParameters(WebRtc_UWord8 payloadType,
const WebRtc_UWord8* buffer,
WebRtc_Word32 length);
// Minimum playout delay (Used for lip-sync). This is the minimum delay required
// to sync with audio. Not included in VideoCodingModule::Delay()
// Defaults to 0 ms.
virtual WebRtc_Word32 SetMinimumPlayoutDelay(WebRtc_UWord32 minPlayoutDelayMs);
// The estimated delay caused by rendering
virtual WebRtc_Word32 SetRenderDelay(WebRtc_UWord32 timeMS);
// Current delay
virtual WebRtc_Word32 Delay() const;
// Received frame counters
virtual WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
protected:
WebRtc_Word32 Decode(const webrtc::VCMEncodedFrame& frame);
WebRtc_Word32 RequestKeyFrame();
WebRtc_Word32 RequestSliceLossIndication(const WebRtc_UWord64 pictureID) const;
WebRtc_Word32 NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
private:
WebRtc_Word32 _id;
CriticalSectionWrapper& _receiveCritSect; // Critical section for receive side
bool _receiverInited;
VCMTiming _timing;
VCMTiming _dualTiming;
VCMReceiver _receiver;
VCMReceiver _dualReceiver;
VCMDecodedFrameCallback _decodedFrameCallback;
VCMDecodedFrameCallback _dualDecodedFrameCallback;
VCMFrameTypeCallback* _frameTypeCallback;
VCMFrameStorageCallback* _frameStorageCallback;
VCMReceiveStatisticsCallback* _receiveStatsCallback;
VCMPacketRequestCallback* _packetRequestCallback;
VCMGenericDecoder* _decoder;
VCMGenericDecoder* _dualDecoder;
FILE* _bitStreamBeforeDecoder;
VCMFrameBuffer _frameFromFile;
VCMKeyRequestMode _keyRequestMode;
bool _scheduleKeyRequest;
CriticalSectionWrapper& _sendCritSect; // Critical section for send side
VCMGenericEncoder* _encoder;
VCMEncodedFrameCallback _encodedFrameCallback;
FrameType _nextFrameType;
VCMMediaOptimization _mediaOpt;
VideoCodecType _sendCodecType;
VCMSendStatisticsCallback* _sendStatsCallback;
FILE* _encoderInputFile;
VCMCodecDataBase _codecDataBase;
VCMProcessTimer _receiveStatsTimer;
VCMProcessTimer _sendStatsTimer;
VCMProcessTimer _retransmissionTimer;
VCMProcessTimer _keyRequestTimer;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_

View File

@@ -0,0 +1,77 @@
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'video_coding_test',
'type': 'executable',
'dependencies': [
'video_coding.gyp:webrtc_video_coding',
'../../../rtp_rtcp/source/rtp_rtcp.gyp:rtp_rtcp',
'../../../utility/source/utility.gyp:webrtc_utility',
'../../../video_processing/main/source/video_processing.gyp:video_processing',
'../../../../common_video/vplib/main/source/vplib.gyp:webrtc_vplib',
],
'include_dirs': [
'../../../interface',
'../../codecs/vp8/main/interface',
'../../../../system_wrappers/interface',
'../source',
],
'sources': [
# headers
'../test/codec_database_test.h',
'../test/generic_codec_test.h',
'../test/jitter_estimate_test.h',
'../test/media_opt_test.h',
'../test/normal_test.h',
'../test/quality_modes_test.h',
'../test/receiver_tests.h',
'../test/release_test.h',
'../test/rtp_player.h',
'../test/test_util.h',
'../test/video_source.h',
# sources
'../test/codec_database_test.cc',
'../test/decode_from_storage_test.cc',
'../test/generic_codec_test.cc',
'../test/jitter_buffer_test.cc',
'../test/media_opt_test.cc',
'../test/mt_rx_tx_test.cc',
'../test/normal_test.cc',
'../test/quality_modes_test.cc',
'../test/receiver_timing_tests.cc',
'../test/rtp_player.cc',
'../test/test_util.cc',
'../test/tester_main.cc',
'../test/video_rtp_play_mt.cc',
'../test/video_rtp_play.cc',
'../test/video_source.cc',
], # source
'conditions': [
['OS=="linux"', {
'cflags': [
'-fexceptions',
],
}],
], # conditions
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2: